1200字范文,内容丰富有趣,写作的好帮手!
1200字范文 > python通过selenium获取网页信息可以绕过反扒系统

python通过selenium获取网页信息可以绕过反扒系统

时间:2022-08-12 14:12:01

相关推荐

python通过selenium获取网页信息可以绕过反扒系统

加载selenium控件:

pip intall selenium;pip showselenium

web下载地址:

chrome浏览器,chrome插件,谷歌浏览器下载,谈笑有鸿儒 ()

配置环境变量path:C:\Program Files\Python38

完整代码:

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport jsonimport xlwtimport xlwings as xwfrom selenium import webdriverimport timefrom selenium.webdriver import Chrome, ChromeOptionsopt = ChromeOptions() # 创建Chrome参数对象opt.headless = True # 把Chrome设置成可视化无界面模式,windows/Linux 皆可driver = Chrome(options=opt)# driver = webdriver.Chrome()driver.get('/PC_HSF10/OperationsRequired/Index?type=web&code=SH601600')html=BeautifulSoup(driver.page_source,'html.parser')time.sleep(2)source =driver.page_source# driver.find_element_by_id().send_keys()# driver.find_element_by_name()# driver.find_elements_by_class_name()with open('rrBand.html', 'w') as f:f.write(source)# print(html)html.list=html.find_all('div',attrs={'class':'sckrox'})print(html.list)driver .quit()# str=['中国铝业','中国核电','中国']# print(str[1])

代码到这里写不下去了,pands还是没有json好用:

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport jsonimport xlwtimport xlwings as xwfrom selenium import webdriverimport timeimport pandas as pdimport csvimport refrom selenium.webdriver import Chrome, ChromeOptionsdef data_a(html):#获取基础信息df = pd.DataFrame()for i, item in enumerate(html):# print(item)html.list_a=item.find_all('table',attrs={'class','quote-info'})# print(html.list_a)# try:bandNanme=item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:].strip(')')df['序号'] ='',df['股票'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:bandNanme.find('(')].strip(')'),df['代码'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('')[5:].strip(')'),# print(df[['股票','代码']])df['股价']=item.find_all('div',attrs={'class','stock-current'})[0].text.strip('¥'),# print(df['股价'])for item_a in html.list_a:for i in range(21):df['总市值(亿)'] = item_a.find_all('span')[18].text.strip('亿'),df['总股本(亿)'] = '',df['营业额'] = '',df['EPS每股收益'] = item_a.find_all('span')[16].text.strip(''),df['分红'] = '',df['分红率'] = '',df['营市比'] = '',# print(item_a.find_all('span')[i].text,i,sep=',')#_a.find_all('span')[18].text.strip('亿'),i,sep=','),df['PE市盈率'] = item_a.find_all('span')[10].text.strip(''),df['PB市净率'] =str((float(df['股价'])/float(item_a.find_all('span')[20].text.strip(''))))[0:4],# df['负债率'] = '',# print(df)return df# except:#continue# df.to_csv('fundWedb.csv', index=None, encoding='utf-8-sig',sep=',')#mode='a', header=None,index=None,# print(df[['股价','总市值','EPS每股收益']])def data_b(html):#获取基础信息df = pd.DataFrame()for i, item in enumerate(html):# print(item)html.list_a=item.find_all('table',attrs={'class','quote-info'})# print(html.list_a)# try:bandNanme=item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:].strip(')')df['营业额'] = '13',df['股票'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:bandNanme.find('(')].strip(')'),df['代码'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('')[5:].strip(')'),df['负债率'] = '12',return df#写入csv中if __name__=="__main__":#打开网页'''opt = ChromeOptions() # 创建Chrome参数对象opt.headless =True#False # # 把Chrome设置成可视化无界面模式,driver = Chrome(options=opt)# driver = webdriver.Chrome()'/'#很好的ide工具xueqiu_url='/S/SH601600'#雪球网数据# DFCF_url='/PC_HSF10/OperationsRequired/Index?type=web&code=SH601600'driver.get(xueqiu_url)#加载网址source =driver.page_source#获取网页内容html=BeautifulSoup(source,'html.parser')#获取网页内容time.sleep(1)#休眠1秒'''with open('rrBand.html', 'r', encoding='utf-8') as f:html = BeautifulSoup(f, 'lxml')html.list = html.find_all('div', attrs={'class': 'container-sm float-left stock__main'})# driver.find_element_by_id().send_keys()# driver.find_element_by_name()# driver.find_elements_by_class_name()# 写入html文件# with open('rrBand.html', 'w',encoding='utf-8') as f:#f.write(source)# print(html)# 加载主页面# html.list=html.find_all('div',attrs={'class':'container-sm float-left stock__main'})# print(html.list)# 获取股票价格、总市值、EPS每股收益、PE市盈率、PB市净率、# 营业额、# print(item )with open('fundWedb.csv',mode='a+',encoding='utf-8',newline='') as f:writer=csv.writer(f)bandN=['序号','股票','代码','股价','总市值(亿)','总股本(亿)','营业额','EPS每股收益','分红','分红率','营市比','PE市盈率',\'PB市净率','负债率']df_a=data_a(html.list )#执行语句块df_b=data_b(html.list) # 执行语句块df=df_a.append(df_b)print(df)writer.writerow([df[bandN[0]].name,df[bandN[1]].name,df[bandN[2]].name,df[bandN[3]].name,df[bandN[4]].name, df[bandN[5]].name,\df[bandN[6]].name,df[bandN[7]].name,df[bandN[8]].name,df[bandN[9]].name,df[bandN[10]].name,df[bandN[11]].name,df[bandN[12]].name,\df[bandN[13]].name])writer.writerows([[df[bandN[0]].values[0],df[bandN[1]].values[0],df[bandN[2]].values[0],df[bandN[3]].values[0],df[bandN[4]].values[0],df[bandN[5]].values[0],\df[bandN[6]].values[0],df[bandN[7]].values[0],df[bandN[8]].values[0],df[bandN[9]].values[0],df[bandN[10]].values[0],df[bandN[11]].values[0],\df[bandN[12]].values[0],df[bandN[13]].values[0]]])f.close()# driver.quit()# driver.close()

完整代码:写入json

import xlwings as xwimport requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport jsonimport xlwtimport xlwings as xwfrom selenium import webdriverimport timeimport pandas as pdimport csvimport refrom selenium.webdriver import Chrome, ChromeOptionsdef data_a(html): # 获取基础信息1# with open('rrBand.html', 'r', encoding='utf-8') as f:# html = BeautifulSoup(f, 'lxml')# html.list = html.find_all('div', attrs={'class': 'container-sm float-left stock__main'})# print(html.list)df = pd.DataFrame()# print(html)for i, item in enumerate(html):# print(item)# print(html.list_a)try:bandNanme = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:].strip(')')df['序号'] = '',df['股票'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:bandNanme.find('(')].strip(')'),df['代码'] = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('')[5:].strip(')').replace(':',''),# print(df[['股票','代码']])df['股价'] = item.find_all('div', attrs={'class', 'stock-current'})[0].text.strip('¥'),print(df['股价'])html.list_a = item.find_all('table', attrs={'class', 'quote-info'})for i,item_a in enumerate(html.list_a):# for i in range(21):df['总市值(亿)'] = item_a.find_all('span')[18].text.strip('亿'),df['总股本(亿)'] = '',# df['营业额'] = '',df['EPS每股收益'] = item_a.find_all('span')[16].text.strip(''),# df['分红'] = '',df['分红率'] = '',df['营市比'] = '',# print(item_a.find_all('span')[i].text,i,sep=',')#_a.find_all('span')[18].text.strip('亿'),i,sep=','),df['PE市盈率'] = item_a.find_all('span')[10].text.strip(''),df['PB市净率'] = str((float(df['股价']) / float(item_a.find_all('span')[20].text.strip(''))))[0:4],# df['负债率'] = '',# print(df['PB市净率'])print(str(i),"第一模块写入正常")except:print(str(i), "第一模块写入异常")## continuereturn df# df.to_csv('fundWedb.csv', index=None, encoding='utf-8-sig',sep=',')#mode='a', header=None,index=None,# print(df[['股价','总市值','EPS每股收益']])def data_b(html): # 获取基础信息2# print('12')# with open('Band.html', 'r', encoding='utf-8') as f:#html = BeautifulSoup(f, 'lxml')#html.list_b = html.find_all('tbody')df = pd.DataFrame()bandIncome=[]for i, item in enumerate(html):# print(item)html.list_b_a=item.find_all('tr')for i,item in enumerate(html.list_b_a):html.list_b_a_a = item.find_all('td')# print(item.find_all('td'))for i, item in enumerate(html.list_b_a_a):try:html.list = item.find_all('p')[0].contents[0]bandIncome.append(html.list)# print(bandIncome,i,sep=',')# for i, item in enumerate(html.list):# print(item)html.list_b = item.find_all('table', attrs={'class', 'quote-info'})# print(html.list_a)# try:# bandNanme = item.find_all('div', attrs={'class', 'stock-name'})[0].text.strip('()')[0:].strip(')')df['分红'] = '',df['负债率'] = '',print(str(i), "第二模块写入正常")except:# continueprint(str(i), "第二模块写入异常")df['营业额'] =bandIncome[0].strip('亿'),df['负债率'] =bandIncome[85].strip(''),print(df['营业额'])# for i in range(len(bandIncome)):#print(bandIncome[i],i,sep=',')return df# 写入csv中if __name__ == "__main__":# 创建一个workbookapp = xw.App(visible=False, add_book=False)wb = app.books.open('fundWebd.xlsx')# 创建一个worksheetsh = wb.sheets['worksheet']rng = [i for i in sh.range("b:b").value if i != None]#单元格内容j = sh.range('a1').expand('table').rows.count#序号app.display_alerts = Falseapp.screen_updating = False# print(rng[1])# title = ['序号','股票','代码','股价','总市值(亿)','总股本(亿)','营业额','EPS每股收益','分红','分红率','营市比','PE市盈率',\#'PB市净率','负债率']# new_worksheet['A1'].value=title# rng = sht.range('a1').expand('table')# nrows = rng.rows.count# a = sht.range(f'a1:a{nrows}').value# a = [ i for i in sht.range(a:a).value if i != None]# 打开网页opt = ChromeOptions() # 创建Chrome参数对象opt.headless =False #True# # 把Chrome设置成可视化无界面模式,driver = Chrome(options=opt)# driver = webdriver.Chrome()xueqiu_url='/S/SH601600'#雪球网基础数据'/'#很好的ide工具xueqiu_url_a='/snowman/S/SH601600/detail#/ZYCWZB'#主要指标# DFCF_url='/PC_HSF10/OperationsRequired/Index?type=web&code=SH601600'driver.get(xueqiu_url)#加载网址source =driver.page_source#获取网页内容html=BeautifulSoup(source,'html.parser')#获取网页内容time.sleep(1)#休眠1秒html.list = html.find_all('div', attrs={'class': 'container-sm float-left stock__main'})df_a = data_a(html.list) # 执行语句块time.sleep(2) # 休眠1秒driver.get(xueqiu_url_a) # 加载网址# driver.find_elements_by_class_name('btn active').click()# driver.find_element_by_xpath(".//*[@id='header']/div[1]/div/form/input[2]").click()time.sleep(2)driver.find_element_by_xpath(".//div[contains(@class,'stock-info-btn-list')]/span[2]").click()# print(driver.find_element_by_xpath(".//div[contains(@class,'stock-info-btn-list')]/span[2]").text) # /span[contains(@class,'btn')]time.sleep(4) # 休眠4秒source = driver.page_source # 获取网页内容html = BeautifulSoup(source, 'html.parser') # 获取网页内容print(html)time.sleep(2) # 休眠1秒html.list_b = html.find_all('tbody')df_b = data_b(html.list_b) # 执行语句块# with open('rrBand.html', 'r', encoding='utf-8') as f:#html = BeautifulSoup(f, 'lxml')#html.list = html.find_all('div', attrs={'class': 'container-sm float-left stock__main'})#df_a = data_a(html.list) # 执行语句块# with open('Band.html', 'r', encoding='utf-8') as f:#html = BeautifulSoup(f, 'lxml')#html.list_b = html.find_all('tbody')#df_b = data_b(html.list_b) # 执行语句块# driver.find_element_by_id().send_keys()# driver.find_element_by_name()# driver.find_elements_by_class_name()# 写入html文件# with open('rrBand.html', 'w',encoding='utf-8') as f:#f.write(source)# print(html)# 加载主页面# html.list=html.find_all('div',attrs={'class':'container-sm float-left stock__main'})# print(html.list)# 获取股票价格、总市值、EPS每股收益、PE市盈率、PB市净率、# 营业额、# print(item )# df_a = data_a(html.list) # 执行语句块# df_b = data_b(html.list_b) # 执行语句块#以下为写入模板df=pd.concat([df_a,df_b],axis=1)#列合并,axis=0表示按行合并df = df_a.append(df_b)print(df)df.to_json('fundWebd.json',orient ='records', force_ascii=False)#,orient="values")with open('fundWebd.json','r',encoding='utf-8') as f:data = json.load(f)# print(data[0]['股票'])bandN = ['序号', '股票', '代码', '股价', '总市值(亿)', '总股本(亿)', '营业额', 'EPS每股收益', '分红', '分红率', '营市比', 'PE市盈率', \'PB市净率', '负债率']for i in range(len(data)):#写入数据sh.cells[1,0].value=data[0][bandN[0]]sh.cells[1, 1].value=data[0][bandN[1]]sh.cells[1, 2].value=data[0][bandN[2]]sh.cells[1, 3].value = data[0][bandN[3]]sh.cells[1, 4].value = data[0][bandN[4]]sh.cells[1, 5].value = data[0][bandN[5]]sh.cells[1, 6].value = data[0][bandN[6]]sh.cells[1, 7].value = data[0][bandN[7]]sh.cells[1, 8].value = data[0][bandN[8]]sh.cells[1, 9].value = data[0][bandN[9]]sh.cells[1, 10].value = data[0][bandN[10]]sh.cells[1, 11].value = data[0][bandN[11]]sh.cells[1, 12].value = data[0][bandN[12]]sh.cells[1, 13].value = data[0][bandN[13]]# print(i)wb.save('fundWebd.xlsx')app.quit()driver.quit()driver.close()# with open('fundWedb.csv', mode='a+', encoding='utf-8', newline='') as f:#writer = csv.writer(f)#bandN = ['序号', '股票', '代码', '股价', '总市值(亿)', '总股本(亿)', '营业额', 'EPS每股收益', '分红', '分红率', '营市比', 'PE市盈率', \# 'PB市净率', '负债率']#writer.writerow([df[bandN[0]].name, df[bandN[1]].name, df[bandN[2]].name, df[bandN[3]].name, df[bandN[4]].name,# df[bandN[5]].name, \# df[bandN[6]].name, df[bandN[7]].name, df[bandN[8]].name, df[bandN[9]].name, df[bandN[10]].name,# df[bandN[11]].name, df[bandN[12]].name, \# df[bandN[13]].name])#writer.writerows([[df[bandN[0]].values[0], df[bandN[1]].values[0], df[bandN[2]].values[0],# df[bandN[3]].values[0], df[bandN[4]].values[0], df[bandN[5]].values[0], \# df[bandN[6]].values[0], df[bandN[7]].values[0], df[bandN[8]].values[0],# df[bandN[9]].values[0], df[bandN[10]].values[0], df[bandN[11]].values[0], \# df[bandN[12]].values[0], df[bandN[13]].values[0]]])# f.close()

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。