from scrapy import signals
class YourPipeline(object):
def __init__(self):
self.items = []
def process_item(self, item, spider):
self.items.extend([ item ])
if len(self.items) >= 50:
self.insert_current_items(spider)
return item
def insert_current_items(self, spider):
for item in self.items:
update_query = ', '.join(["`" + key + "` = %s " for key, value in item.iteritems()])
query = "SELECT asin FROM " + spider.tbl_name + " WHERE asin = %s LIMIT 1"
spider.cursor.execute(query, (item['asin']))
existing = spider.cursor.fetchone()
if spider.cursor.rowcount > 0:
query = "UPDATE " + spider.tbl_name + " SET " + update_query + ", date_update = CURRENT_TIMESTAMP WHERE asin = %s"
update_query_vals = list(item.values())
update_query_vals.extend([existing['YOUR_UNIQUE_COLUMN']])
try:
spider.cursor.execute(query, update_query_vals)
except Exception as e:
if 'MySQL server has gone away' in str(e):
spider.connectDB()
spider.cursor.execute(query, update_query_vals)
else:
raise e
else:
# This ELSE is likely never to get executed because we are not scraping ASINS from Amazon website, we just import ASINs into DB from another script
try:
placeholders = ', '.join(['%s'] * len(item))
columns = ', '.join(item.keys())
query = "INSERT INTO %s ( %s ) VALUES ( %s )" % (spider.tbl_name, columns, placeholders)
spider.cursor.execute(query, item)
except Exception as e:
if 'MySQL server has gone away' in str(e):
spider.connectDB()
spider.cursor.execute(query, item)
else:
raise e
self.items = []
def close_spider(self, spider):
self.insert_current_items(spider)
1条答案
按热度按时间gwbalxhn1#
创建一个管道中的废弃项列表,一旦列表的大小大于N,就调用DB函数保存数据。下面是我的项目中100%工作的代码。请参见
close_spider()
,在spider关闭时,self.items
中有可能包含少于N个项,因此当spider关闭时,self.items
列表中的任何剩余数据也将保存在DB中。