城枫墨凉 2017-12-07 09:21 采纳率: 20%
浏览 1822
已采纳

python爬虫,爬取百度百科python词条页面数据,是这个页面url的抓取不到还是其他原因?

控制台信息
1.URL管理器:
class UrlManager (object):

def __init__(self):
    self.new_urls = set()
    self.old_urls = set()

def add_new_url(self, url):
    if url is None:
        return  # 如果没有新的URL则不进行添加
    if url not in self.new_urls and url not in self.old_urls:
        self.new_urls.add(url)

def add_new_urls(self, urls):
    if urls is None or len(urls) == 0:
        return
    for url in urls:
        self.add_new_url(url)

def get_new_url(self):
    return len(self.new_urls) != 0

def has_new_url(self):
    new_url = self.new_urls.pop()
    self.old_urls.add(new_url)
    return new_url

2.网页下载器:
import urllib.request
import urllib.response
class HtmlDownloader(object):
def download(self, url):

    if url is None:
        return None
    response = urllib.request.urlopen(url)
    if response.getcode() != 200:
        return None
    return response.read()

3.网页解析器:
# coding:utf-8
from bs4 import BeautifulSoup
import re
import urllib.parse
class HtmlParser(object):
def parser(self, page_url, html_content):
if page_url is None or html_content is None:
return
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data

def _get_new_urls(self, page_url, soup):
new_urls = set()
# links = soup.find_all('a', href=re.compile(r"/item/\d+.htm"))
links = soup.find_all('a', href=re.compile(r"/item/(.*)"))
for link in links:
new_url = link['href']
new_full_url = urllib.parse.urljoin(page_url, new_url)
new_urls.add(new_url)
return new_urls

def get_new_data(self, page_url, soup):
res_data = {}
# url
res_data['url'] = page_url
# 标题

Python

(计算机程序设计语言)


# 简介

title_node = soup.find_all('dd', class='lemmaWgt-lemmaTitle-title').find('h1')
res_data['title'] = title_node.get_text()
summary_node = soup.find_all('div', class_='lemma-summary')
res_data['summmary'] = summary_node.get_text()
return res_data

4.页面输出:
class HtmlOutputer(object):

def __init__(self):
    self.datas=[]

def collectData(self, data):
    if data is None:
        return
    self.datas.append(data)

def output_html(self):

    fout = open('output.html', 'w')
    fout.write("<html>")
    fout.write("<body>")
    fout.write("<table>")
    for data in self.datas:
        fout.write("<tr>")
        fout.write("<td>%s</td>" % (data['url']).encode('utf-8'))
        fout.write("<td>%s</td>" % (data['title']).encode('utf-8'))
        fout.write("<td>%s</td>" % (data['summary']).encode('utf-8'))
        fout.write("</tr>")

    fout.write("</table>")
    fout.write("</body>")
    fout.write("</html>")
    fout.close()

  • 写回答

6条回答

  • raygenyang 2017-12-07 15:26
    关注

    def get_new_url(self):
    return len(self.new_urls) != 0

    def has_new_url(self):
    new_url = self.new_urls.pop()
    self.old_urls.add(new_url)
    return new_url

    这两个函数定义反了吧
    
    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(5条)

报告相同问题?

悬赏问题

  • ¥15 求差集那个函数有问题,有无佬可以解决
  • ¥15 【提问】基于Invest的水源涵养
  • ¥20 微信网友居然可以通过vx号找到我绑的手机号
  • ¥15 寻一个支付宝扫码远程授权登录的软件助手app
  • ¥15 解riccati方程组
  • ¥15 display:none;样式在嵌套结构中的已设置了display样式的元素上不起作用?
  • ¥15 使用rabbitMQ 消息队列作为url源进行多线程爬取时,总有几个url没有处理的问题。
  • ¥15 Ubuntu在安装序列比对软件STAR时出现报错如何解决
  • ¥50 树莓派安卓APK系统签名
  • ¥65 汇编语言除法溢出问题