python爬虫之爬取疫情数据

#coding=utf-8
import requests
import os
import json
from jsonpath import jsonpath
import time
url='https://api.inews.qq.com/newsqa/v1/query/inner/publish/modules/list?modules=statisGradeCityDetail,diseaseh5Shelf'
heders = {
'User-Agent':'Mozilla/5.0 (windows NT 10.0; Win64; x64) AppleWebKit/537.36 (Khtml, like Gecko) Chrome/100.0.4896.127 Safari/537.36'
}
response=requests.get(url,headers=heders)
response.encoding='utf8'
ff=response.content
data = https://www.isolves.com/it/cxkf/yy/Python/2022-06-29/json.loads(ff)
kk = jsonpath(data, '$..diseaseh5Shelf..name')
ll = jsonpath(data,'$..today.confirm')
dic=dict(zip(kk,ll))
print(dic)
cc='%Y-%m-%d'
c=time.strftime(cc, time.localtime())
ft = open(c +'疫情'+ '.txt', 'w')
ft.write('疫情日期')
ft.write(c)
ft.write('n')
for i,j in dic.items():
#有疫情的地区
if j>0:
print(str(i))
print(str(j))
 
ft.write(str(i))
ft.write('--')
ft.write(str(j))
ft.write('n')
 
'''
#全部地区数据
print(str(i))
print(str(j))
ft.write(str(i))
ft.write('--')
ft.write(str(j))
ft.write('n')
ft.close()
'''

【python爬虫之爬取疫情数据】


    推荐阅读