西门吹瓶哥 2017-10-19 09:26 采纳率: 100%
浏览 1351
已采纳

关于爬虫百度百科问题,求教各位。代码如下,运行提示错误。

-*- coding: utf-8 -*-

"""
Spyder Editor

This is a temporary script file.
"""
import urllib2
from bs4 import BeautifulSoup
import re
import urlparse

class UrlManager(object):
def init(self):
self.new_urls = set()
self.old_urls = set()

def add_new_url(self,url):
    if url is None:
        return
    if url not in self.new_urls and url not in self.old_urls:
        self.new_urls.add(url)

def add_new_urls(self,urls):
    if urls is None or len(urls) == 0:
        return
    for url in urls:
        self.add_new_url(url)

def has_new_url(self):
    return len(self.new_urls) != 0

def get_new_url(self):
    new_url = self.new_urls.pop()
    self.old_urls.add(new_url)
    return new_url

class HtmlDownloader(object):
def download(self,url):
if url is None:
return None

    response = urllib2.urlopen(url)

    if response.getcode() != 200:
        return None

    return response.read()

class HtmlParser(object):
def _get_new_urls(self,page_url,soup):
new_urls = set()
links = soup.find_all('a',href = re.compile(r'/item/(.*)'))
for link in links:
new_url = link['href']
new_full_url = urlparse.urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls

def _get_new_data(self,page_url,soup):
    res_data = {}

    #url
    res_data['url'] = page_url

    # <dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>
    title_node = soup.find('dd',class_ ="lemmaWgt-lemmaTitle-title").find("h1")
    res_data['title'] = title_node.get_text()

    #<div class="lemma-summary" label-module="lemmaSummary">
    summary_node = soup.find('div',class_ = "lemma-summary")
    res_data['summary'] = summary_node.get_text()

    return res_data

def parse(self,page_url,html_cont):
    if page_url is None or html_cont is None:
        return

    soup = BeautifulSoup(html_cont,'html_parser',from_encoding='utf-8')
    new_urls = self._get_new_urls(page_url,soup)
    new_data = self._get_new_data(page_url,soup)
    return new_urls,new_data

class HtmlOutputer(object):
def init(self):
self.datas = []

def collect_data(self,data):
    if data is None:
        return
    self.datas.append(data)

def output_html(self):
    fout = open('output.html','w')

    fout.write("<html>")
    fout.write("<body>")
    fout.write("<table>")

    for data in self.datas:
        fout.write("<tr>")
        fout.write("<td>%s</td>"% data['url'])
        fout.write("<td>%s</td>"% data['title'].encode('utf-8'))
        fout.write("<td>%s</td>"% data['summary'].encode('utf-8'))
        fout.write("</tr>")

    fout.write("</table>")
    fout.write("</body>")
    fout.write("</html>")

    fout.close()

爬虫总调度程序

class SpiderMain(object):

# 构造函数初始化url管理器、HTML下载器、HTML解析器、输出四个对象

def init(self):

# url管理器

self.urls = UrlManager()

# url下载器

self.downloader = HtmlDownloader()

# url解析器

self.parser = HtmlParser()

# 最终的输出

self.outputer = HtmlOutputer()

# 爬虫调度程序  
def craw(self, root_url):  
    count = 1  
    # 添加入口URL  
    self.urls.add_new_url(root_url)  
    while self.urls.has_new_url():  
        try:  
            # 取出新的URL  
            new_url = self.urls.get_new_url()  
            # 下载该url对应的页面  
            print("craw %d : %s" % (count, new_url))  
            html_cont = self.downloader.download(new_url)  
            # 解析该url对应的页面,得到新的链接和内容  
            new_urls, new_data = self.parser.parse(new_url, html_cont)  
            # 将新url添加到url管理器中  
            self.urls.add_new_urls(new_urls)  
            # 将解析到的内容收集起来  
            self.outputer.collect_data(new_data)  

            if count == 1000:  # 爬取1000个页面即可  
                break  
            count = count + 1  

        except:  
            print("craw fail")  
    # 最终输出爬取目标的内容  
    self.outputer.output_html()  

主函数启动爬虫

if name=="__main__":

# root_url = "http://baike.baidu.com/item/Python/407313?fr=aladdin"

root_url = "http://baike.baidu.com/item/Python/407313?fr=aladdin"

obj_Spider = SpiderMain()

obj_Spider.craw(root_url)

    提示runfile('C:/Users/sun/.spyder/temp.py', wdir='C:/Users/sun/.spyder') craw 1 : http://baike.baidu.com/item/Python/407313?fr=aladdin craw fail
  • 写回答

4条回答 默认 最新

  • eagle1024 2017-10-19 13:59
    关注

    你先搞个简单的 试试能不能用urlopen访问百度百科,一般大的网站都做了防爬处理。

    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(3条)

报告相同问题?

悬赏问题

  • ¥15 #MATLAB仿真#车辆换道路径规划
  • ¥15 java 操作 elasticsearch 8.1 实现 索引的重建
  • ¥15 数据可视化Python
  • ¥15 要给毕业设计添加扫码登录的功能!!有偿
  • ¥15 kafka 分区副本增加会导致消息丢失或者不可用吗?
  • ¥15 微信公众号自制会员卡没有收款渠道啊
  • ¥100 Jenkins自动化部署—悬赏100元
  • ¥15 关于#python#的问题:求帮写python代码
  • ¥20 MATLAB画图图形出现上下震荡的线条
  • ¥15 关于#windows#的问题:怎么用WIN 11系统的电脑 克隆WIN NT3.51-4.0系统的硬盘