from scrapy.item import Item, Field
class DoubanItem(Item):
title = Field()
link = Field()
#resp = Field()
#dateT = Field() pipelines.py #定义你自己的PipeLine方式,详细中文转码可在此处解决
# -*- coding: utf-8 -*-
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import codecs
class TutorialPipeline(object):
def __init__(self):
self.file = codecs.open('items.json', 'wb', encoding='gbk')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + '\n'
print line
self.file.write(line.decode("unicode_escape"))
return item
在setting.py 加入相应的 ITEM_PIPELINES 属性(红色字体为新加部分)
# -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
ITEM_PIPELINES = {
'tutorial.pipelines.TutorialPipeline':300
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
接下来是spider.py
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from tutorial.items import DoubanItem
class DoubanSpider(BaseSpider):
name = "douban"
allowed_domains = ["douban.com"]
start_urls = [
"http://www.douban.com/group/shanghaizufang/discussion?start=0",
"http://www.douban.com/group/shanghaizufang/discussion?start=25",
"http://www.douban.com/group/shanghaizufang/discussion?start=50",
"http://www.douban.com/group/shanghaizufang/discussion?start=75",
"http://www.douban.com/group/shanghaizufang/discussion?start=100",
"http://www.douban.com/group/shanghaizufang/discussion?start=125",
"http://www.douban.com/group/shanghaizufang/discussion?start=150",
"http://www.douban.com/group/shanghaizufang/discussion?start=175",
"http://www.douban.com/group/shanghaizufang/discussion?start=200"
]
def parse(self, response):
hxs = HtmlXPathSelector(response)
sites = hxs.xpath('//tr/td')
items=[]
for site in sites:
item = DoubanItem()
item['title'] =site.xpath('a/@title').extract()
item['link'] = site.xpath('a/@href').extract()
# item['resp'] = site.xpath('text()').extract()
# item['dateT'] = site.xpath('text()').extract()
items.append(item)
return items
用JSON数据方式导出: