kyuzz
xxxTenc
采纳率0%
2020-12-30 17:19

NameError: name 'xx' is not defined是什么问题?如何修改

import xlwt
import requests
import re
from lxml import etree
import time
from fontTools.ttLib import TTFont # pip install fontTools

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36'
}
def get_html(url):
    response = requests.get(url,headers=headers)
    # 匹配字体文件的下载地址
    font_url = re.findall("; src: url\('(.*?)'\) format", response.text)[1]
    font_response = requests.get(font_url)
    with open('字体文件.woff',mode='wb') as f:    
        f.write(font_response.content) # 保存字体文件
    with open('替换前的网页.html', mode='w', encoding='utf-8') as f:
        f.write(response.text) # 保存替换前的网页 返回的是一个 unicode 型的文本数据
    response.encoding = response.apparent_encoding
    return response.text
def font_tran(text):
    fi = TTFont('字体文件.woff')
    fi.saveXML('font.xml')
    # 获取字体映射关系
    font_map = fi['cmap'].getBestCmap()
    print(font_map)
    # 定义一个字典 用以替换
    d = {'zero':0, 'two':2, 'eight':8, 'four':4, 'nine':9, 'one':1, 'seven':7, 'three':3, 'period':'.', 'six':6, 'five':5}
    for key in font_map:
        font_map[key] = d[font_map[key]]
    print(font_map)
    # 替换
    for key in font_map:
        html_page = html_page.replace('&#'+str(key)+';', str(font_map[key]))
    with open('替换后的网页.html', mode='w', encoding='utf-8') as f:
        f.write(html_page)
    return html_page

all_info_list = []
def get_info(url):
    selector = etree.HTML(html_page)
    infos = selector.xpath('//ul[@class="all-img-list cf"]/li')
    for info in infos:
        title = info.xpath('div[2]/h4/a/text()')[0]
        author = info.xpath('div[2]/p[1]/a[1]/text()')[0]
        style1 = info.xpath('div[2]/p[1]/a[2]/text()')[0]
        style2 = info.xpath('div[2]/p[1]/span/text()')[0] 
        style = style1 + '.' + style2
        complete = info.xpath('div[2]/p[1]/span/text()')[0]
        introduce = info.xpath('div[2]/p[2]/text()')[0].strip()
        word = info.xpath('div[2]/p[3]/span/span/text()')[0].strip('万字')
        info_list =[title,author,style,complete,introduce,word]
        all_info_list.append(info_list)
    return all_info_list
    time.sleep(1)
 
if __name__ =='__main__':
    
    urls = ['https://www.qidian.com/mm/all?orderId=&style=1&pageSize=20&siteid=1&pubflag=0&hiddenField=0&page={}'.format(str(i)) for i in range(1,6)]
    page_crawled = 1
    for url in urls:
        get_html(url)
        font_tran(html)
        get_info(url)
        print("{} pages has crawled".format(str(page_crawled)))
        page_crawled+=1
    header = ['题目','作者','类型','连载状态','介绍','字数']
    book = xlwt.Workbook(encoding='utf-8')
    
    sheet = book.add_sheet('Sheet1')
    
    for h in range(len(header)):
        sheet.write(0,h,header[h])
    i = 1
    
    for list in all_info_list:
        j=0
        for data in list:
            sheet.write(i,j,data)
            j+=1
        i+=1
    book.save('字数.xls')

这是一个起点的爬虫,做了反爬。但调用函数的时候懵逼了ju'hao

  • 点赞
  • 写回答
  • 关注问题
  • 收藏
  • 复制链接分享
  • 邀请回答

1条回答

  • weixin_43178103 the file is not 3月前

    报错信息给看一下,大概是你运行该文件时候,当前目录下并没有该文件,如题中83行save(字数.xls)

    点赞 评论 复制链接分享