城枫墨凉
2017-12-07 09:21
采纳率: 20%
浏览 1.8k

python爬虫,爬取百度百科python词条页面数据,是这个页面url的抓取不到还是其他原因?

控制台信息
1.URL管理器:
class UrlManager (object):

def __init__(self):
    self.new_urls = set()
    self.old_urls = set()

def add_new_url(self, url):
    if url is None:
        return  # 如果没有新的URL则不进行添加
    if url not in self.new_urls and url not in self.old_urls:
        self.new_urls.add(url)

def add_new_urls(self, urls):
    if urls is None or len(urls) == 0:
        return
    for url in urls:
        self.add_new_url(url)

def get_new_url(self):
    return len(self.new_urls) != 0

def has_new_url(self):
    new_url = self.new_urls.pop()
    self.old_urls.add(new_url)
    return new_url

2.网页下载器:
import urllib.request
import urllib.response
class HtmlDownloader(object):
def download(self, url):

    if url is None:
        return None
    response = urllib.request.urlopen(url)
    if response.getcode() != 200:
        return None
    return response.read()

3.网页解析器:
# coding:utf-8
from bs4 import BeautifulSoup
import re
import urllib.parse
class HtmlParser(object):
def parser(self, page_url, html_content):
if page_url is None or html_content is None:
return
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')
new_urls = self._get_new_urls(page_url, soup)
new_data = self._get_new_data(page_url, soup)
return new_urls, new_data

def _get_new_urls(self, page_url, soup):
new_urls = set()
# links = soup.find_all('a', href=re.compile(r"/item/\d+.htm"))
links = soup.find_all('a', href=re.compile(r"/item/(.*)"))
for link in links:
new_url = link['href']
new_full_url = urllib.parse.urljoin(page_url, new_url)
new_urls.add(new_url)
return new_urls

def get_new_data(self, page_url, soup):
res_data = {}
# url
res_data['url'] = page_url
# 标题

Python

(计算机程序设计语言)


# 简介

title_node = soup.find_all('dd', class='lemmaWgt-lemmaTitle-title').find('h1')
res_data['title'] = title_node.get_text()
summary_node = soup.find_all('div', class_='lemma-summary')
res_data['summmary'] = summary_node.get_text()
return res_data

4.页面输出:
class HtmlOutputer(object):

def __init__(self):
    self.datas=[]

def collectData(self, data):
    if data is None:
        return
    self.datas.append(data)

def output_html(self):

    fout = open('output.html', 'w')
    fout.write("<html>")
    fout.write("<body>")
    fout.write("<table>")
    for data in self.datas:
        fout.write("<tr>")
        fout.write("<td>%s</td>" % (data['url']).encode('utf-8'))
        fout.write("<td>%s</td>" % (data['title']).encode('utf-8'))
        fout.write("<td>%s</td>" % (data['summary']).encode('utf-8'))
        fout.write("</tr>")

    fout.write("</table>")
    fout.write("</body>")
    fout.write("</html>")
    fout.close()

  • 写回答
  • 关注问题
  • 收藏
  • 邀请回答

6条回答 默认 最新

  • raygenyang 2017-12-07 15:26
    已采纳

    def get_new_url(self):
    return len(self.new_urls) != 0

    def has_new_url(self):
    new_url = self.new_urls.pop()
    self.old_urls.add(new_url)
    return new_url

    这两个函数定义反了吧
    
    已采纳该答案
    打赏 评论
  • 城枫墨凉 2017-12-07 09:26

    图片说明

    打赏 评论
  • 城枫墨凉 2017-12-07 09:26

    图片说明

    打赏 评论
  • 城枫墨凉 2017-12-07 09:31

    这是原来的url,一样的问题

    打赏 评论
  • 城枫墨凉 2017-12-07 09:33

    上图是原来的URL,一样的问题

    打赏 评论
  • L1514860026 2017-12-07 11:53

    百度可能反爬虫技术太强,不行爬知乎

    打赏 评论

相关推荐 更多相似问题