为什么我重写了scrapy的start_rquests方法爬不到数据
ua都检查过都没问题,setting中也设置好了,
这是我的爬虫文件
import random
import scrapy
from ..items import MaoyanItem
from Spider.holiday.tool.SpiderHeader import headers
class MaoyanSpider(scrapy.Spider):
name = "maoyan"
allowed_domains = ["www.maoyan.com"]
header = {'User-Agent': random.choice(headers)}
# 重写start_request()方法,把所有URL地址都交给调度器
def start_requests(self):
# 把所有的URL地址统一扔给调度器入队列
for page_num in range(1, 2):
url = 'https://fz.lianjia.com/ershoufang/pg{}/'.format(page_num)
yield scrapy.Request(
url=url,
headers=self.header,
callback=self.parse_html # 解析函数
)
def parse_html(self, response):
# 基准xpath
base_xpath = response.xpath(
'//li[@class="clear LOGCLICKDATA"]/div[@class="info clear"]')
for cd in base_xpath:
# 创建对象(类:items.py中的class MaoyanItem())
item = MaoyanItem()
item['title'] = cd.xpath('./div[@class="title"]/a/text()')
item['location'] = cd.xpath('./div[@class="flood"]/div[@class="positionInfo"]/a[@target="_blank"]/text()')
item['price'] = cd.xpath('./div[@class="priceInfo"]/div[@class="totalPrice totalPrice2"]/span/text()')
# 把爬取的数据交给管道文件pipeline处理
yield item
settings.py
import random
from Spider.holiday.tool.SpiderHeader import headers
BOT_NAME = "Maoyan"
SPIDER_MODULES = ["Maoyan.spiders"]
NEWSPIDER_MODULE = "Maoyan.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "Maoyan (+http://www.yourdomain.com)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# LOG_LEVEL = 'INFO'
# LOG_FILE = 'maoyan.log'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en",
"User-Agent": random.choice(headers)
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# "Maoyan.middlewares.MaoyanSpiderMiddleware": 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# "Maoyan.middlewares.MaoyanDownloaderMiddleware": 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# 300:代表优先级(1-1000),数字越小优先级越高
"Maoyan.pipelines.MaoyanPipeline": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
items.py
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class MaoyanItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
location = scrapy.Field()
price = scrapy.Field()
pipelines.py
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/l atest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class MaoyanPipeline(object):
def process_item(self, item, spider):
print(item['title'])
print(item['location'])
print(item['price'])
return item