这里写目录标题
- 一.数据获取流程
- 二.获取主页面数据并提取出文章url
- 三.获取文章详情页的数据并提取
- 整体代码展示
一.数据获取流程
我们首先通过抓包就能够找到我们所需数据的api
这里一共有五个参数其中只有第一个和第五个参数是变化的第一个参数就是第几页第五个是一个由时间戳生成的一组数字(可以不用)
通过它的返回值我们可以提取出文章的标题以及具体的url
这就是整个流程
二.获取主页面数据并提取出文章url
url='https://cms.offshoremedia.net/front/list/latest'
parm={
'pageNum': '1',
'pageSize': '15',
'siteId': '694841922577108992',
'channelId': '780811183157682176'
}
header={
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
}
falidurl=[]
for i in range(1,50):parm['pageNum'] = str(i)res=requests.get(url,headers=er,params=parm).json()for i in res["info"]["list"]:newurl=i["contentStaticPage"]
通过对https://cms.offshoremedia.net/front/list/latest发送get请求通过改变pageNum的值来获取不同页的文章具体url
三.获取文章详情页的数据并提取
import requests
from lxml import etree
import time
def xxl(url):head={'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7','Accept-Encoding':'gzip, deflate, br, zstd'}#url='https://www.oushinet.com/static/content/china/chinanews/2024-07-10/1260625203458775997.html'res=requests.get(url,headers=head)res.encoding='utf-8'etrees=etree.HTML(res.text)id=url.split('/')[-1].split('.')[0]clas=url.split('/')[5]title=etrees.xpath(f'//*[@id="{id}"]/text()')[0]timee=etrees.xpath('/html/body/div[1]/div[2]/div/div[1]/div[1]/div[1]/div[3]/span[1]/i/text()')[0]now =int(timee)timeArray = time.localtime(now/1000)otherStyleTime = time.strftime("%Y-%m-%d", timeArray)Released="发布时间:"+otherStyleTimeimgurl=etrees.xpath('/html/body/div[1]/div[2]/div/div[1]/div[1]/div[1]//img/@src')if imgurl==[]:imgurl="无图片"Imageannotations=etrees.xpath('/html/body/div[1]/div[2]/div/div[1]/div[1]/div[1]/div[4]/div/p/b/text()')#b标签含有图片来源if Imageannotations==[]:Imageannotations="无图片注释"text=etrees.xpath('/html/body/div[1]/div[2]/div/div[1]/div[1]/div[1]/div[4]/div/p[@style="text-indent:2em;"]/text()')#print(imgurl,Imageannotations)summary=text[0]del text[0]body=""for i in text:body=body+'\n'+ireturn [id,clas,title,otherStyleTime,Released,str(imgurl),str(Imageannotations),summary,body,url]
这里我们使用的是xpath对数据进行提取
此示例代码只适用于该网站中大部分网页信息提取有极个别网页的排版较为特殊这里就不在提取了
整体代码展示
由于单线程速度较慢所以我们使用了线程池
并且使用了PooledDB模块来在多线程是操作数据库
为什么使用PooledDB模块: https://blog.csdn.net/zer_o_o/article/details/86742430
from multiprocessing.dummy import Pool
import requests
from cs import xxl
import pymysql
from dbutils.pooled_db import PooledDB# 数据库连接池
pool = PooledDB(creator=pymysql,maxconnections=32,mincached=10,blocking=True,ping=0,host='127.0.0.1',user='root',password='root',port=3306,database='news',
)def query(nae):parm['pageNum'] = str(nae)res = requests.get(url, headers=header, params=parm).json()for i in res["info"]["list"]:try:newurl = i["contentStaticPage"]list = xxl(newurl)with pool.connection() as conn: # 使用with语句自动管理连接with conn.cursor() as cursor: # 同样使用with语句自动管理游标sql = """INSERT INTO untitled (id, clas, title, otherStyleTime, Released, imgurl, Imageannotations, summary, body, url)VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""cursor.execute(sql, list)conn.commit()except Exception as e:print(f"Failed to process URL {i['contentStaticPage']}: {str(e)}")falidurl.append(i["contentStaticPage"])falidurl = list()
url='https://cms.offshoremedia.net/front/list/latest'
parm={
'pageNum': '3',
'pageSize': '15',
'siteId': '694841922577108992',
'channelId': '780811183157682176'
}
header={
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
'Origin':
'https://www.oushinet.com',
'Referer':
'https://www.oushinet.com/',
'Content-Type':
'application/json;charset=UTF-8'}list=[]for i in range(1, 1000):list.append(i)pool = Pool(30)
pool.map(query, list)# 不需要显式关闭连接,因为我们在with语句中已经处理了