import xlwt
import requests
import re
from lxml import etree
import time
from fontTools.ttLib import TTFont # pip install fontTools
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'
}
def get_html(url):
response = requests.get(url,headers=headers)
# 匹配字体文件的下载地址
font_url = re.findall("; src: url\('(.*?)'\) format", response.text)[1]
font_response = requests.get(font_url)
with open('字体文件.woff',mode='wb') as f:
f.write(font_response.content) # 保存字体文件
with open('替换前的网页.html', mode='w', encoding='utf-8') as f:
f.write(response.text) # 保存替换前的网页 返回的是一个 unicode 型的文本数据
response.encoding = response.apparent_encoding
return response.text
def font_tran(text):
fi = TTFont('字体文件.woff')
fi.saveXML('font.xml')
# 获取字体映射关系
font_map = fi['cmap'].getBestCmap()
print(font_map)
# 定义一个字典 用以替换
d = {'zero':0, 'two':2, 'eight':8, 'four':4, 'nine':9, 'one':1, 'seven':7, 'three':3, 'period':'.', 'six':6, 'five':5}
for key in font_map:
font_map[key] = d[font_map[key]]
print(font_map)
# 替换
for key in font_map:
html_page = html_page.replace('&#'+str(key)+';', str(font_map[key]))
with open('替换后的网页.html', mode='w', encoding='utf-8') as f:
f.write(html_page)
return html_page
all_info_list = []
def get_info(url):
selector = etree.HTML(html_page)
infos = selector.xpath('//ul[@class="all-img-list cf"]/li')
for info in infos:
title = info.xpath('div[2]/h4/a/text()')[0]
author = info.xpath('div[2]/p[1]/a[1]/text()')[0]
style1 = info.xpath('div[2]/p[1]/a[2]/text()')[0]
style2 = info.xpath('div[2]/p[1]/span/text()')[0]
style = style1 + '.' + style2
complete = info.xpath('div[2]/p[1]/span/text()')[0]
introduce = info.xpath('div[2]/p[2]/text()')[0].strip()
word = info.xpath('div[2]/p[3]/span/span/text()')[0].strip('万字')
info_list =[title,author,style,complete,introduce,word]
all_info_list.append(info_list)
return all_info_list
time.sleep(1)
if __name__ =='__main__':
urls = ['https://www.qidian.com/mm/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page={}'.format(str(i)) for i in range(1,6)]
page_crawled = 1
for url in urls:
get_html(url)
font_tran(html)
get_info(url)
print("{} pages has crawled".format(str(page_crawled)))
page_crawled+=1
header = ['题目','作者','类型','连载状态','介绍','字数']
book = xlwt.Workbook(encoding='utf-8')
sheet = book.add_sheet('Sheet1')
for h in range(len(header)):
sheet.write(0,h,header[h])
i = 1
for list in all_info_list:
j=0
for data in list:
sheet.write(i,j,data)
j+=1
i+=1
book.save('字数.xls')
这是一个起点的爬虫,做了反爬。但调用函数的时候懵逼了ju'hao