from urllib.parse import urlencode
import re
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import requests
import json
def get_page_index(offset,keyword):#定义一个函数用于获取索引页信息
data = {
'offset': offset,
'format': 'json',
'keyword': keyword,
'autoload': 'true',
'count': '20',
'cur_tab': '3'
}
# cur_tab为3指的是图集板块,数过来第三个,若为1则指代综合板块
#count 数量
url = '/search_content/?' + urlencode(data) #urlencode可将字典对象转化为url的请求参数
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
}
try:
response = requests.get(url,headers=headers)
if response.status_code == 200:
return response.text
else:
return None
except RequestException:
print('请求索引页出错')
return None
def parse_page_index(html):#定义一个函数解析索引页信息,返回一个包含详情页url的迭代器
data = json.loads(html)#将字符串转化为一个对象(字典)
if data and 'data' in data.keys():#判断data是否为空,同时要满足键里面有‘data’
for item in data.get('data'):#获取字典中key为‘data’的对应的值,这个值data.get('data')为一个容量为20的列表,列表的元素为字典
yield item.get('article_url')#获取字典中key为‘article_url’的对应的值,即网址
def get_page_detail(url):#定义一个函数用于得到详情页下的信息
try:
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return None
except RequestException:
print('请求详情页出错')
return None
def parse_page_detail(html):#定义解析详情页的方法
soup = BeautifulSoup(html,'lxml')
title = soup.select('title')[0].get_text()
print(title)
'''
image_pattern = pile('gallery: (.*?),\n siblingList',re.S)
result = re.search(image_pattern,html)
if result:
print(result.group(1))
'''
def main():
html = get_page_index('0','街拍')
for url in parse_page_index(html):#parse_page_index(html)返回的是一个迭代器,每次输出一个网址
print(url)
html = get_page_detail(url)
if html:
parse_page_detail(html)
#print(url)
if __name__ == '__main__':
main()
python爬虫今日头条街拍美图开发背景_分析Ajax请求并抓取今日头条街拍美图:爬取详情页的url与实际页面上显示不符...