参考网址:
https://blog.csdn.net/m0_61981943/article/details/131262987
- 导入相关库,用
get
请求方式请求网页方式:
import requests
import parsel
import csv
import time
import random
url = f'https://travel.qunar.com/travelbook/list.htm?page=1&order=hot_heat'
response = requests.get(url)
返回的 HTML 内容
html_data = response.text
print(html_data)
Selector类允许你创建一个对象,该对象可以用来从给定的 HTML 或 XML 文本中选择特定的元素。
selector = parsel.Selector(html_data)
print(selector)
selector.css(‘.li h2 a::attr(href)’):
- selector是前面创建的parsel.Selector对象,它代表网页的 HTML 内容。
- .css()是parsel.Selector对象的一个方法,用于使用 CSS 选择器语法来选择网页中的元素。
- '.li h2 a’是 CSS 选择器表达式,它的含义是选择所有具有类名li的元素下的< h2 >标签内的< a >标签。这个选择器的目的是找到网页中特定位置的链接元素。
- '::attr(href)'是一个 CSS 伪元素选择器,用于选择< a >标签的href属性。它的作用是提取这些链接元素的href属性值,也就是链接地址。
- .getall():
这是对前面选择结果的一个操作,用于获取所有满足选择条件的元素的href属性值,并以列表的形式返回。
所以,整行代码的作用是从网页的 HTML 内容中选择具有特定结构的链接元素,并提取它们的链接地址,存储在一个列表url_list中.
url_list = selector.css('.b_strategy_list li h2 a::attr(href)').getall()
保存到.csv文件里面
csv_qne = open('旅游攻略.csv', mode='a', encoding='utf-8', newline='')
csv_writer = csv.writer(csv_qne)
csv_writer.writerow(['地点', '标题', '出发时间', '天数', '人均消费', '人物', '玩法', '浏览量','点赞量' ,'作者'])
detail_id = detail_url.replace('/youji/', '')#字符串中移除/youji/部分
url_1 = 'https://travel.qunar.com/travelbook/note/' + detail_id#构建一个完整的 URL,并将其赋值给url_1。这个完整的 URL 很可能是指向旅游攻略详情页面的地址。
for detail_url in url_list:# 字符串的 替换方法detail_id = detail_url.replace('/youji/', '')#字符串中移除/youji/部分url_1 = 'https://travel.qunar.com/travelbook/note/' + detail_id#构建一个完整的 URL,并将其赋值给url_1。这个完整的 URL 很可能是指向旅游攻略详情页面的地址。print(url_1)response_1 = requests.get(url_1).textselector_1 = parsel.Selector(response_1)title = selector_1.css('.b_crumb_cont *:nth-child(3)::text').get().replace('旅游攻略', '')comment = selector_1.css('.title.white::text').get()date = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.when > p > span.data::text').get()days = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.howlong > p > span.data::text').get()money = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.howmuch > p > span.data::text').get()character = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.who > p > span.data::text').get()play_list = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.how > p > span.data span::text').getall()play = ' '.join(play_list)count = selector_1.css('.view_count::text').get()print(title, comment, date, days, money, character, play, count)
查找方法
:
右键检查,查看css字段
title = selector_1.css('.b_crumb_cont *:nth-child(3)::text').get().replace('旅游攻略', '')
右键复制js路径
同理爬取其他数据
一个简单的例子
import requests
import parsel
import csv
import time
import randomurl = f'https://travel.qunar.com/travelbook/list.htm?page=1&order=hot_heat'
response = requests.get(url)
#print(response)
html_data = response.text
#print(html_data)
selector = parsel.Selector(html_data)
#print(selector)
url_list = selector.css('.b_strategy_list li h2 a::attr(href)').getall()
#csv_writer.writerow(['地点', '作者','地点', '短评', '出发时间', '天数', '人均消费', '人物', '玩法', '浏览量', '点赞'])
#print(url_list)
for detail_url in url_list:detail_id = detail_url.replace('/youji/', '')#字符串中移除/youji/部分url_1 = 'https://travel.qunar.com/travelbook/note/' + detail_id#构建一个完整的 URL,并将其赋值给url_1。这个完整的 URL 很可能是指向旅游攻略详情页面的地址。print(url_1)response_1 = requests.get(url_1).textselector_1 = parsel.Selector(response_1)comment = selector_1.css('.title.white::text').get()# title = selector_1.css('.b_crumb_cont *:nth-child(3)::text').get().replace('旅游攻略', '')date= selector_1.css("#js_mainleft > div.b_foreword > ul > li.f_item.when > p > span.data::text").get()days=selector_1.css("#js_mainleft > div.b_foreword > ul > li.f_item.howlong > p > span.data::text").get()author = selector_1.css("body > div.qn_mainbox > div > div.left_bar > ul > li:nth-child(1) > p.user_info > span.intro > span.user_name > a::text").get()dianzan = selector_1.css("body > div.qn_mainbox > div > div.left_bar > ul > li:nth-child(1) > p.user_info > span.nums > span.icon_love > span::text").get()print( comment, date, days, author, dianzan)
如果数据不够:
import requests
import parsel
import csv
import time
import random# 基础URL
base_url = 'https://travel.qunar.com'
page_number = 1
data_count = 0# 打开CSV文件,准备写入数据
with open('去哪儿.csv', mode='a', encoding='utf-8', newline='') as csv_qne:csv_writer = csv.writer(csv_qne)csv_writer.writerow(['地点', '短评', '出发时间', '天数', '人均消费', '人物', '玩法', '浏览量'])while data_count < 100:url = f'https://travel.qunar.com/travelbook/list.htm?page={page_number}&order=hot_heat'# 发送请求获取页面内容response = requests.get(url)html_data = response.textselector = parsel.Selector(html_data)# 获取详情页URL列表url_list = selector.css('.b_strategy_list li h2 a::attr(href)').getall()for detail_url in url_list:# 获取详情页IDdetail_id = detail_url.replace('/youji/', '')url_1 = base_url + '/travelbook/note/' + detail_idprint(url_1)# 获取详情页内容response_1 = requests.get(url_1).textselector_1 = parsel.Selector(response_1)# 获取标题,添加错误处理title_element = selector_1.css('.b_crumb_cont *:nth-child(3)::text').get()if title_element:title = title_element.replace('旅游攻略', '')else:title = None# 获取短评,添加错误处理comment = selector_1.css('.title.white::text').get()# 获取出发时间,添加错误处理date = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.when > p > span.data::text').get()# 获取天数,添加错误处理days = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.howlong > p > span.data::text').get()# 获取人均消费,添加错误处理money = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.howmuch > p > span.data::text').get()# 获取人物,添加错误处理character = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.who > p > span.data::text').get()# 获取玩法,添加错误处理play_list = selector_1.css('#js_mainleft > div.b_foreword > ul > li.f_item.how > p > span.data span::text').getall()play = ' '.join(play_list) if play_list else None# 获取浏览量,添加错误处理count = selector_1.css('.view_count::text').get()print(title, comment, date, days, money, character, play, count)# 写入数据,如果有值为None则写入空字符串if data_count == 0:row_data = [title or 'Sample Location', comment or '', date or '', days or '', money or '', character or '', play or '', count or '']else:row_data = [title or '', comment or '', date or '', days or '', money or '', character or '', play or '', count or '']csv_writer.writerow(row_data)data_count += 1if data_count >= 100:break# 获取下一页页码信息page_links = selector.css("body > div.qn_mainbox > div > div.left_bar > div.b_paging a::attr(href)").getall()page_link_numbers = [page_link.split('=')[-1] for page_link in page_links]page_numbers = [int(number) for number in page_link_numbers if number.isdigit()]if page_numbers:max_page = max(page_numbers)if page_number < max_page:page_number += 1else:# 如果当前页是最后一页,重新从第一页开始继续获取数据,直到达到100条page_number = 1else:print("未找到页码信息,可能出现问题,继续尝试下一页")page_number += 1time.sleep(random.randint(1, 3))