1200字范文,内容丰富有趣,写作的好帮手!
1200字范文 > springboot+jsoup抓取新闻网站信息

springboot+jsoup抓取新闻网站信息

时间:2024-02-22 05:39:13

相关推荐

springboot+jsoup抓取新闻网站信息

springboot+jsoup抓取新闻网站信息

步骤:一、导入jar包二、解析凤凰网新闻jsoup获取动态生成的js内容serviceserviceImplmapperdomian
步骤:

(1)根据url抓取html页面

(2)对html页面进行解析,获取该页面所有的数据,保存到数据库中(mongodb)

(3)遍历所有的数据,更新详情数据

一、导入jar包

<!--httpclient--><dependency><groupId>org.apache.httpcomponents</groupId><artifactId>httpclient</artifactId><version>4.5.2</version></dependency><!--htmlunit--><dependency><groupId>net.sourceforge.htmlunit</groupId><artifactId>htmlunit</artifactId><version>2.33</version></dependency><dependency><groupId>net.sf.json-lib</groupId><artifactId>json-lib</artifactId><version>2.4</version><classifier>jdk15</classifier></dependency><dependency><groupId>com.googlecode.juniversalchardet</groupId><artifactId>juniversalchardet</artifactId><version>1.0.3</version></dependency><dependency><groupId>org.jsoup</groupId><artifactId>jsoup</artifactId><version>1.10.3</version></dependency><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-data-mongodb</artifactId></dependency><dependency><groupId>org.mongodb</groupId><artifactId>mongo-java-driver</artifactId><version>3.0.4</version></dependency>

二、解析凤凰网新闻

/*** 爬虫凤凰网军事热点新闻列表*/@GetMapping("/saveNewStream")public void saveNewStream(String url) throws IOException {log.info("程序开始...");long startTime = new Date().getTime();//1.获取动态js页面内容Document document = HtmlUtils.getHtmlunit(url);//2.获取script里html内容String html = document.getElementsByTag("script").get(2).html();html = html.replace("//<![CDATA[","");html = html.replace("//]]>","");String[] data = html.split("var");String sp = "allData =";//3.获取json数据List<NewsStream> newsStreamArrayList = new ArrayList<>();for(String variable : data){if (variable.contains(sp)){variable = variable.replace(sp, "").trim();variable = variable.substring(0, variable.length()-1);JSONObject jsonObject = JSONObject.parseObject(variable);//4.获取所需新闻列表JSONArray newsstream = jsonObject.getJSONArray("newsstream");// jsonArray转换为List对象List<NewsStream> newsStreams = JSONArray.parseArray(newsstream.toString(), NewsStream.class);newsStreamArrayList.addAll(newsStreams);//5.递归求下页数据List<NewsStream> listThree = HtmlUtils.buildTree(newsStreams);newsStreamArrayList.addAll(listThree);//6.保存数据int i = newsStreamService.saveNewsStream(newsStreamArrayList);}}//获取新闻详情数据newsStreamArrayList.forEach(n->{NewsStream newsStream = new NewsStream();String articleUrl = n.getUrl();newsStream.setId(n.getId());Document doc = null;//3.获取动态js页面内容try {String htmlInfo = Requests.get(articleUrl);doc = Jsoup.parse(htmlInfo);}catch (MalformedURLException e){e.printStackTrace();}//详情列表 标题+内容+图片Elements elements = doc.select("div[id=root]").select("div[class=artical-25JfwmD5]").select("div[class=artical-25JfwmD5]");//来源时间Elements span = elements.select("div[class=info-3Ht6Fk1n clearfix]").select("span");//新闻发布时间String time = span.first().text();newsStream.setNewsTime(time);//来源String source = span.select("a[href]").text();newsStream.setSource(source);//内容+图片Elements contentImg = elements.select("div[class=main_content-r5RGqegj]").select("div[class=text-3w2e3DBc]");//内容String content = contentImg.text();newsStream.setContent(content);//图片Elements p = contentImg.select("p");List list = new ArrayList();int i = 0;Elements img = p.select("[src]");for (Element element : img) {String srcUrl = element.attr("src");//获取到src的值list.add(i,srcUrl);i++;}newsStream.setImages(list);//根据id修改mongodb数据库信息newsStreamService.updateNewsStream(newsStream);});long endTime = new Date().getTime();log.info("********本程序运行 " + (endTime - startTime) + " 毫秒完成***********");}

jsoup获取动态生成的js内容

@Slf4jpublic class HtmlUtils {/*** jsoup获取动态生成的js内容* @param url* @return* @throws IOException*/public static Document getHtmlunit(String url) throws IOException {//新建一个模拟谷歌Chrome浏览器的浏览器客户端对象final WebClient webClient = new WebClient(BrowserVersion.CHROME);//当JS执行出错的时候是否抛出异常, 这里选择不需要webClient.getOptions().setThrowExceptionOnScriptError(false);//当HTTP的状态非200时是否抛出异常, 这里选择不需要webClient.getOptions().setThrowExceptionOnFailingStatusCode(false);webClient.getOptions().setActiveXNative(false);//是否启用CSS, 因为不需要展现页面, 所以不需要启用webClient.getOptions().setCssEnabled(false);//很重要,启用JSwebClient.getOptions().setJavaScriptEnabled(true);//很重要,设置支持AJAXwebClient.setAjaxController(new NicelyResynchronizingAjaxController());HtmlPage page = null;try {//尝试加载上面图片例子给出的网页page = webClient.getPage(url);} catch (Exception e) {e.printStackTrace();}finally {webClient.close();}//异步JS执行需要耗时,所以这里线程要阻塞30秒,等待异步JS执行结束webClient.waitForBackgroundJavaScript(30000);//直接将加载完成的页面转换成xml格式的字符串String pageXml = page.asXml();// 下面的代码就是对字符串的操作了,常规的爬虫操作,用到了比较好用的Jsoup库Document doc = Jsoup.parse(pageXml);//获取html文档return doc;}//下拉加载请求的urlprivate static String getViewUrl = "/shanklist/_/getColumnInfo/_/default/";//请求参数private static String callback = "getColumnInfoCallback";/*** 页面拉取查看更多*/public static String getViewMore(String id,String nesTime,Long nowTime) throws MalformedURLException {StringBuffer viewUrl = new StringBuffer();viewUrl.append(getViewUrl).append(id).append("/").append(nesTime).append("/20/14-35083-/").append(callback).append("?callback=").append(callback).append("&_").append(nowTime);String viewUrlInfo = Requests.get(viewUrl.toString());return viewUrlInfo;}/*** 新闻循环取每一页新闻列表*/public static List<NewsStream> buildTree(List<NewsStream> list){List<NewsStream> newsStreamList = new ArrayList<>();list.forEach(n->{if (n.equals(list.get(list.size() - 1))) {String viewMore = null;//加载全部列表//拼接请求url:需要的新闻发布时间时间戳String nesTime = DateUtils.date2TimeStamp(n.getNewsTime(), "yyyy-MM-dd HH:mm:ss");try {viewMore = HtmlUtils.getViewMore(n.getId(), nesTime, new Date().getTime());viewMore = viewMore.replace("getColumnInfoCallback(", "").trim();viewMore = viewMore.substring(0, viewMore.length()-1);} catch (MalformedURLException e) {e.printStackTrace();}//加载view转jsonJSONObject view = JSONObject.parseObject(viewMore);//取data信息String image = view.getString("data");JSONObject object = JSONObject.parseObject(image);String newsstreamList = object.getString("newsstream");JSONArray jsonArrayNewsstreamList = JSONArray.parseArray(newsstreamList);// jsonArray转换为List对象List<NewsStream> newsStreams = JSONArray.parseArray(jsonArrayNewsstreamList.toString(), NewsStream.class);newsStreamList.addAll(newsStreams);//递归寻找下一listList<NewsStream> streams = buildTree(newsStreams);newsStreamList.addAll(streams);}});return newsStreamList;}

service

package com.ddtj.crawl.service;import com.ddtj.crawl.domain.NewsStream;import org.springframework.data.mongodb.core.query.Query;import java.util.List;public interface NewsStreamService {/*** 保存数据* @author*/public int saveNewsStream(List<NewsStream> newsStream);/*** 修改数据* @param newsStream* @return*/public void updateNewsStream(NewsStream newsStream);}

serviceImpl

package com.ddtj.crawl.service.impl;import com.ddtj.crawl.domain.NewsStream;import com.ddtj.crawl.service.NewsStreamService;import com.ddtj.crawl.utils.MongoUtil;import com.ddtj.crawl.utils.PageHelper;import com.mongodb.client.result.UpdateResult;import lombok.extern.slf4j.Slf4j;import org.springframework.beans.factory.annotation.Autowired;import org.springframework.data.domain.*;import org.springframework.data.mongodb.core.MongoTemplate;import org.springframework.data.mongodb.core.query.Criteria;import org.springframework.data.mongodb.core.query.Query;import org.springframework.data.mongodb.core.query.Update;import org.springframework.stereotype.Service;import javax.annotation.Resource;import java.util.ArrayList;import java.util.List;import java.util.regex.Pattern;@Service@Slf4jpublic class NewsStreamServiceImpl implements NewsStreamService {@AutowiredMongoTemplate mongoTemplate;/*** 保存数据* @author*/@Overridepublic int saveNewsStream(List<NewsStream> newsStream) {log.info("mongodb 数据库插入: "+newsStream.size()+"条数据.........");newsStream.forEach(n->{//使用 save和insert都可以进行插入//区别:当存在"_id"时//insert 插入已经存在的id时 会异常//save 则会进行更新//简单来说 save 就是不存在插入 存在更新// NewsStream insert = mongoTemplate.insert(n);mongoTemplate.save(n);});return 0;}/*** 修改数据* @param newsStream* @return*/@Overridepublic void updateNewsStream(NewsStream newsStream) {Query query = new Query(Criteria.where("id").is(newsStream.getId()));Update update = new Update();update.set("source", newsStream.getSource());update.set("content", newsStream.getContent());update.set("images", newsStream.getImages());UpdateResult result = mongoTemplate.updateFirst(query, update, NewsStream.class);}/*** 查看新闻列表* @return*/@Overridepublic List<NewsStream> getNewsStreamList(NewsStream newsStream,Query query ) {//新闻标题模糊if (null != newsStream.getTitle()){Pattern patternTitle = pile("^.*"+newsStream.getTitle()+".*$", Pattern.CASE_INSENSITIVE);query.addCriteria(Criteria.where("title").regex(patternTitle));}//新闻来源模糊if (null != newsStream.getSource()){Pattern patternSource = pile("^.*"+newsStream.getSource()+".*$", Pattern.CASE_INSENSITIVE);query.addCriteria(Criteria.where("source").regex(patternSource));}//新闻url模糊if (null != newsStream.getUrl()){Pattern patternUrl = pile("^.*"+newsStream.getUrl()+".*$", Pattern.CASE_INSENSITIVE);query.addCriteria(Criteria.where("url").regex(patternUrl));}//时间倒序query.with(Sort.by(Sort.Order.desc("newsTime")));return mongoTemplate.find(query, NewsStream.class);}}

mapper

public interface NewsStreamMapper extends MongoRepository<NewsStream,String> {}

domian

package com.ddtj.crawl.domain;import mon.core.annotation.Excel;import mon.core.web.domain.BaseEntity;import groovy.transform.builder.Builder;import lombok.AllArgsConstructor;import lombok.Data;import lombok.NoArgsConstructor;import org.springframework.data.mongodb.core.mapping.Document;import java.util.List;@Document(collection="news_stream")//集合名@Data@Builder@NoArgsConstructor@AllArgsConstructorpublic class NewsStream extends BaseEntity{//id@Excel(name = "新闻ID")private String id;//标题@Excel(name = "标题")private String title;//新闻时间@Excel(name = "新闻时间")private String newsTime;//来源@Excel(name = "来源")private String source;//详情内容@Excel(name = "详情内容")private String content;//原始缩略图@Excel(name = "缩略图")private String thumbnails;//文章详情图片@Excel(name = "文章详情图片")private List<String> images;//文章详情url@Excel(name = "详情URL")private String url;}

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。