一、创建工程
scrapy startproject dmoz
二、建立dmoz_spider.py
from scrapy.spider import Spider from scrapy.selector import Selector from dmoz.items import DmozItem class DmozSpider(Spider): name = "dmoz" allowed_domains = ["dmoz.org"] start_urls = [ "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/", "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/", ] def parse(self, response): """ The lines below is a spider contract. For more info see: http://doc.scrapy.org/en/latest/topics/contracts.html @url http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/ @scrapes name """ sel = Selector(response) sites = sel.xpath(‘//ul[@class="directory-url"]/li‘) items = [] for site in sites: item = DmozItem() item[‘name‘] = site.xpath(‘a/text()‘).extract() item[‘url‘] = site.xpath(‘a/@href‘).extract() item[‘description‘] = site.xpath(‘text()‘).re(‘-\s[^\n]*\\r‘) items.append(item) return items
三、改写items.py
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy.item import Item, Field class DmozItem(Item): name = Field() description = Field() url = Field()
四、改写pipeline.py
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy.item import Item, Field class DmozItem(Item): name = Field() description = Field() url = Field()
五、在dmoz文件夹根目录执行
scrapy crawl dmoz -o dmoz.json
运行spider
时间: 2024-10-29 22:26:39