weixin_45399468
2019-08-07 15:17 阅读 303

利用scrapy 爬取知乎关注者信息并存取MySQL中。

pipeline.py源码

import pymysql

class ZhihuuserPipeline(object):
    def process_item(self, item, spider):
        yield item

class MysqlPipeline():
    def __init__(self, host, database, user, password, port):
        self.host = host
        self.database = database
        self.user = user
        self.password = password
        self.port = port

    @classmethod
    def from_crawler(cls, crawler):
        return cls(host=crawler.settings.get('MYSQL_HOST'),
                   database=crawler.settings.get('MYSQL_DATABASE'),
                   user=crawler.settings.get('MYSQL_USER'),
                   password=crawler.settings.get('MYSQL_PASSWORD'),
                   port=crawler.settings.get('MYSQL_PORT'),
                   )

    def open_spider(self, spider):
        self.db = pymysql.connect(self.host, self.user, self.password, self.database,
                                  charset='utf8', port=self.port)
        self.cursor = self.db.cursor()  # 获取游标

    def close_spider(self, spider):
        self.db.close()

    def process_item(self, item, spider):
        data= dict(item)
        keys = ', '.join(data.keys())
        # 利用join()函数把列名合并到一起
        values = ', '.join(['%s'] * len(data))
        sql = 'insert into  %s (%s) values (%s)' % (item.table, keys, values)
        self.cursor.execute(sql, tuple(data.values()))
        self.db.commit()

        return item

报警代码ValueError: dictionary update sequence element #0 has length 6; 2 is required

该如何解决

  • 点赞
  • 写回答
  • 关注问题
  • 收藏
  • 复制链接分享

相关推荐