关于爬虫百度百科问题,求教各位。代码如下,运行提示错误。

-*- coding: utf-8 -*-

"""
Spyder Editor

This is a temporary script file.
"""
import urllib2
from bs4 import BeautifulSoup
import re
import urlparse

class UrlManager(object):
def init(self):
self.new_urls = set()
self.old_urls = set()

def add_new_url(self,url):
    if url is None:
        return
    if url not in self.new_urls and url not in self.old_urls:
        self.new_urls.add(url)

def add_new_urls(self,urls):
    if urls is None or len(urls) == 0:
        return
    for url in urls:
        self.add_new_url(url)

def has_new_url(self):
    return len(self.new_urls) != 0

def get_new_url(self):
    new_url = self.new_urls.pop()
    self.old_urls.add(new_url)
    return new_url

class HtmlDownloader(object):
def download(self,url):
if url is None:
return None

    response = urllib2.urlopen(url)

    if response.getcode() != 200:
        return None

    return response.read()

class HtmlParser(object):
def _get_new_urls(self,page_url,soup):
new_urls = set()
links = soup.find_all('a',href = re.compile(r'/item/(.*)'))
for link in links:
new_url = link['href']
new_full_url = urlparse.urljoin(page_url,new_url)
new_urls.add(new_full_url)
return new_urls

def _get_new_data(self,page_url,soup):
    res_data = {}

    #url
    res_data['url'] = page_url

    # <dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>
    title_node = soup.find('dd',class_ ="lemmaWgt-lemmaTitle-title").find("h1")
    res_data['title'] = title_node.get_text()

    #<div class="lemma-summary" label-module="lemmaSummary">
    summary_node = soup.find('div',class_ = "lemma-summary")
    res_data['summary'] = summary_node.get_text()

    return res_data

def parse(self,page_url,html_cont):
    if page_url is None or html_cont is None:
        return

    soup = BeautifulSoup(html_cont,'html_parser',from_encoding='utf-8')
    new_urls = self._get_new_urls(page_url,soup)
    new_data = self._get_new_data(page_url,soup)
    return new_urls,new_data

class HtmlOutputer(object):
def init(self):
self.datas = []

def collect_data(self,data):
    if data is None:
        return
    self.datas.append(data)

def output_html(self):
    fout = open('output.html','w')

    fout.write("<html>")
    fout.write("<body>")
    fout.write("<table>")

    for data in self.datas:
        fout.write("<tr>")
        fout.write("<td>%s</td>"% data['url'])
        fout.write("<td>%s</td>"% data['title'].encode('utf-8'))
        fout.write("<td>%s</td>"% data['summary'].encode('utf-8'))
        fout.write("</tr>")

    fout.write("</table>")
    fout.write("</body>")
    fout.write("</html>")

    fout.close()

爬虫总调度程序

class SpiderMain(object):

# 构造函数初始化url管理器、HTML下载器、HTML解析器、输出四个对象

def init(self):

# url管理器

self.urls = UrlManager()

# url下载器

self.downloader = HtmlDownloader()

# url解析器

self.parser = HtmlParser()

# 最终的输出

self.outputer = HtmlOutputer()

# 爬虫调度程序  
def craw(self, root_url):  
    count = 1  
    # 添加入口URL  
    self.urls.add_new_url(root_url)  
    while self.urls.has_new_url():  
        try:  
            # 取出新的URL  
            new_url = self.urls.get_new_url()  
            # 下载该url对应的页面  
            print("craw %d : %s" % (count, new_url))  
            html_cont = self.downloader.download(new_url)  
            # 解析该url对应的页面,得到新的链接和内容  
            new_urls, new_data = self.parser.parse(new_url, html_cont)  
            # 将新url添加到url管理器中  
            self.urls.add_new_urls(new_urls)  
            # 将解析到的内容收集起来  
            self.outputer.collect_data(new_data)  

            if count == 1000:  # 爬取1000个页面即可  
                break  
            count = count + 1  

        except:  
            print("craw fail")  
    # 最终输出爬取目标的内容  
    self.outputer.output_html()  

主函数启动爬虫

if name=="__main__":

# root_url = "http://baike.baidu.com/item/Python/407313?fr=aladdin"

root_url = "http://baike.baidu.com/item/Python/407313?fr=aladdin"

obj_Spider = SpiderMain()

obj_Spider.craw(root_url)

    提示runfile('C:/Users/sun/.spyder/temp.py', wdir='C:/Users/sun/.spyder') craw 1 : http://baike.baidu.com/item/Python/407313?fr=aladdin craw fail

3个回答

你先搞个简单的 试试能不能用urlopen访问百度百科,一般大的网站都做了防爬处理。

young951023
西门吹瓶哥 回复eagle1024: 查出来了,是写入文件有问题,不过还没改正好。谢谢了
接近 3 年之前 回复
eagle1024
eagle1024 回复young951023: 定位不出来的话,在函数的入口和出口先加打印日志,看看在哪个函数报的错,然后再进一步定位
接近 3 年之前 回复
young951023
西门吹瓶哥 用getcode()可以得到返回值200,代码检查了几遍,实在不知道哪里出错了
接近 3 年之前 回复
young951023
西门吹瓶哥 你好,没明白你的意思。能说具体点吗
接近 3 年之前 回复

https://zhidao.baidu.com/question /2 01 34631 6433987885.html

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
立即提问