1200字范文,内容丰富有趣,写作的好帮手!
1200字范文 > 按条件爬取百度百科词条及其相关词条的ID

按条件爬取百度百科词条及其相关词条的ID

时间:2019-10-07 13:41:47

相关推荐

按条件爬取百度百科词条及其相关词条的ID

1.使用多个头信息轮替访问百度百科,防止被屏蔽

2.使用css选择器过滤掉含有指定内容的词条

3.指定递归搜索深度,以控制词条相关度

4.缓存搜索过的词条,不再进行重复搜索

5.缓存符合要求的已经记录的词条ID,不再重复记录

6.指定网页中必须包含哪些关键字中的一个

#!/usr/bin/env python3import urllib.requestfrom bs4 import BeautifulSoupfrom time import sleepimport refrom json import loadsfrom random import randintfrom urllib import parsefrom os.path import exists, isfileimport socketfrom fake_useragent import UserAgentdef getlist(word='食品安全', nowDeep=0):'''主程序,word为搜索关键字,nowDeep为搜索深度'''global wordsUsedglobal existsWordsglobal deepif word in wordsUsed:print(word + "\t" + '已经搜索过')return ''if nowDeep > deep: # 当前搜索深度大于指定最大搜索深度return ''wordsUsed.append(word)wordUsed(word)select = '#body_wrapper > div.searchResult > dl dd > a'url = '/search?word=' + \parse.quote(word) + '&pn=0&rn=0&enc=utf8&sefr=sebtn'z_data = getHtmlText(url)soup = BeautifulSoup(z_data, 'lxml')all = soup.select(select)pat = pile('/item/' +'(.*?)' + '?sefr=sebtn', re.S)if len(all) > 0:for item in all:sleep(1)item_url = item.get('href')result = pat.findall(item_url)if len(result) > 0:result = result[0].split('/')if parse.unquote(result[0]) not in existsWords:# print(result[0]+"\t"+result[1].replace('?',''))if len(result) == 2:existsWords.append(parse.unquote(result[0]))getlist(parse.unquote(result[0]), nowDeep + 1)print("正在处理\t" + parse.unquote(result[0]))text_html = getHtmlText(item_url)if containWords(text_html) == True:htmlTags = checkItem(text_html)if len(htmlTags) > 0:print(parse.unquote(result[0]) + "\t" + htmlTags)else:nums = write(parse.unquote(result[0]), result[1].replace('?', ''), parse.unquote(item_url))print(parse.unquote(result[0]) + "\t" + str(nums))def wordUsed(word):file = open('wordUsed.txt', 'a', encoding='utf-8')file.write(word + "\n")file.closedef getWordUsed():global wordUsedFileif exists(wordUsedFile) and isfile('wordUsed.txt'):words = []with open(wordUsedFile) as file:for line in file:line = line.split()if len(line) > 0:words.append(line[0])return wordselse:return []def getHtmlText(url, code='UTF-8'):'''根据URL获取HTML源码'''global uaglobal timeWaitsocket.setdefaulttimeout(timeWait)result = ''try:user_agent = ua.randomhead = {'User-Agent': user_agent}request = urllib.request.Request(url=url, headers=head)response = urllib.request.urlopen(request).read()result = response.decode(code)except Exception as err:print(str(err))finally:return resultdef checkItem(text_html):'''检查该词条对应网页是是否符含有指定的标签'''global selectsfor select in selects:htmlTags = BeautifulSoup(text_html, 'lxml').select(select)if len(htmlTags) > 0:return str(htmlTags)return ''def containWords(text_html):'''检查该词条对应网页是是否符含有关键字'''global containsfor word in contains:if word in text_html:return Truereturn Falsedef write(word, id, url):'''写入到文件并计算写入的条数'''global numsglobal resulFilefile = open(resulFile, 'a', encoding='gbk')file.write(word + ',' + id + ',' + url + "\n")file.close()# print(word+','+id+','+url)nums = nums + 1return numsdef getExistWords():global resulFileif exists(resulFile) and isfile(resulFile):Words = []with open(resulFile, encoding='gbk') as file:for line in file:Words.append(line.split(',')[0])return Wordselse:return []if __name__ == "__main__":ua = UserAgent()wordUsedFile = 'wordUsed.txt' # 缓存已经搜索过的词汇wordsUsed = getWordUsed()timeWait = 10 # 抓取网页等待时间resulFile = 'result.csv' # 结果文件existsWords = getExistWords() # 已经存储在结果文件中的关键字nums = 0 # 符合要求的内容的条数deep = 3 # 搜索深度selects = ['body > div.body-wrapper > div.before-content > div.polysemant-list.polysemant-list-normal > div > div',# 多义词'body > div.body-wrapper > div.content-wrapper > div > div.main-content > a',# 专家贡献'body > div.body-wrapper.feature.feature_small.medical > div.feature_poster > div > div.poster-left > div.poster-top > div.authorityListPrompt > div',# 本词条由...提供内容'body > div.body-wrapper > div.content-wrapper > div > div.main-content > div.authorityListPrompt > div',# 本词条由...审核'#spe-mod-scienceWord > div.sWord-link'# 以上内容由...审定公布]# 凡是select中的css选择器对应的位置有内容的,全部过滤掉contains = [] # 网页中必须包含的关键字getlist(word='食品安全与健康')

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。