问题描述:使用scrapy框架爬取ajax网页,以中国地震台网——历史查询为例,我想爬取某个时间段内所有的数据,但是我的代码并不能实现,找了一些相关问题参考,最终代码如下,未能实现,请问大哥大佬们能支个招吗。。。(scrapy框架,已测试过代码其他配置等没有问题)
#spider文件:
# -*- coding: utf-8 -*-
import scrapy
from EarthquakeScrapy.items import EarthquakescrapyItem
class TestScrapy(scrapy.Spider):
name = 'test' # 爬虫的名字
allowed_domains = ['ceic.ac.cn/search']
start_urls = 'http://www.ceic.ac.cn/ajax/search?page=&&start=&&end=&&jingdu1=&&jingdu2=&&weidu1=&&weidu2=&&height1=&&height2=&&zhenji1=&&zhenji2='
# 爬取页码循环次数,一共57页
all_page = 3
def start_requests(self):
# 遍历各页
for i in range(1, self.all_page + 1): # 从前端获取的页面[1,57+1),循环1到57页
yield scrapy.FormRequest(self.start_urls,
formdata={'page': 'i', 'start': '2019-03-25','end': '2020-03-25',
'jingdu1': '', 'jingdu2': '','weidu1': '', 'weidu2': '',
'height1': '','height2': '', 'zhenji1': '', 'zhenji2': '','callback': ''},
callback=self.parse,
dont_filter=True) # 请求对应的内容
def parse(self, response):
result = eval(response.body.decode('utf-8'))
records = result['shuju']
item = EarthquakescrapyItem()
print("**")#测试用
for record in records:
item['level'] = record['M']
item['time'] = record['O_TIME']
item['longitude'] = record['EPI_LON']
item['latitude'] = record['EPI_LAT']
item['depth'] = record['EPI_DEPTH']
item['address'] = record['LOCATION_C']
print(record['M'])#测试爬取数据情况
# yield item