import requests
import json
import jsonpath
from lxml import etree
import csv
from concurrent.futures import ThreadPoolExecutor
#import pandas as pd
#from xlwt import *
#import time
#def download_wuhu_page(url):
url='http://ggzy.huangshan.gov.cn/EWB-FRONT/rest/webbuilderserverforHeFZTB/getinfolistnew'
dic={
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36 SE 2.X MetaSr 1.0",
"Authorization": "Bearer c72f427e19182317f66b7bd0080e4a64"
}
dat={
"categorynum":"004",
"xiaqucode":"",
"title":"",
"startdate":"",
"enddate":"",
"siteguid":"7eb5f7f1-9041-43ad-8e13-8fcb82ea831a",
"pageSize":10,
"pageIndex":14
}
resp=requests.post(url,headers=dic,json=dat)
print(resp.text)
html=etree.HTML(resp.text)
divs=html.xpath("//table/tbody")
for div in divs:
area=""
name=div.xpath(".//div/text()")
date=div.xpath(".//td[4]/text()")
wz=div.xpath(".//td[2]/a/@onclick")
print(name)
print(wz)
print(date)
#txt=[(area,name[i],date[i],"http://whsggzy.wuhu.gov.cn"+wz[i]) for i in range(len(name)) if len(wz)>=len(date)>=len(name)]
#csvwriter.writerows(txt)
#print(url,"提取完毕")
print('全部下载完毕。')