天气网:
import requests
from bs4 import BeautifulSoup
import time
from pyecharts.charts import Bar
from pyecharts import options as optsurl_hb = 'http://www.weather.com.cn/textFC/hb.shtml'
url_db = 'http://www.weather.com.cn/textFC/db.shtml'
url_hd = 'http://www.weather.com.cn/textFC/hd.shtml'
url_hz = 'http://www.weather.com.cn/textFC/hz.shtml'
url_hn = 'http://www.weather.com.cn/textFC/hn.shtml'
url_xb = 'http://www.weather.com.cn/textFC/xb.shtml'
url_xn = 'http://www.weather.com.cn/textFC/xn.shtml'
url_gat = 'http://www.weather.com.cn/textFC/gat.shtml'url_areas = [url_hb, url_db, url_hd, url_hz, url_hn, url_xb, url_xn, url_gat]HEADERS = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36','Referer': 'http://www.weather.com.cn/textFC/hb.shtml'
}ALL_DATA = []def paser_url(url):response = requests.get(url, headers=HEADERS)text = response.content.decode('utf-8')soup = BeautifulSoup(text, 'html5lib')conMidtab = soup.find('div', class_='conMidtab')tabels = conMidtab.find_all('table')for tabel in tabels:trs = tabel.find_all('tr')[2:]for index, tr in enumerate(trs):tds = tr.find_all('td')city_td = tds[0]if index == 0:city_td = tds[1]city = list(city_td.stripped_strings)[0]min_temp_td = tds[-2]wind_td = tds[-3]tq_td = tds[-4]max_temp_td = tds[-5]soup = BeautifulSoup(str(wind_td), 'html.parser')spans = soup.find_all('span')wind_direction = spans[0].text # 北风wind_level = spans[1].text.strip('<>级') # 3min_temp = list(min_temp_td.stripped_strings)[0]tq_data = list(tq_td.stripped_strings)[0]max_temp = list(max_temp_td.stripped_strings)[0]ALL_DATA.append({'city': city, 'max_temp':int(max_temp),'min_temp': int(min_temp), 'wind_direction': wind_direction, 'wind_level': wind_level, 'tq_data': tq_data})def spider():for index, url in enumerate(url_areas):paser_url(url)print('第{}个区域爬取完毕'.format(index + 1))time.sleep(1)
def main():spider()print(ALL_DATA)if __name__ == '__main__':main()
paser_url(url) 函数用于解析每个区域页面的 HTML,提取出城市名、最高温度、最低温度、风向、风级和天气数据,并将这些数据以字典的形式添加到 ALL_DATA 列表中。
spider() 函数遍历所有区域的 URL,对每个 URL 调用 paser_url(url) 函数,并在每次调用后暂停 1 秒。
使用了 requests 库来发送 HTTP 请求,使用 BeautifulSoup 库来解析 HTML,使用 time 库来暂停执行。
结果:
注意不要爬太多。
最近新开了公众号,请大家关注一下。