亿邦动力抓取实例,持续更新

# -*- coding: utf-8 -*-
import scrapy
from ybdlspider.items import YbdlspiderItem
import re
class YbSpider(scrapy.Spider):
    name = ‘yb‘
    allowed_domains = [‘ebrun.com‘]
    start_urls = [‘http://www.ebrun.com/retail/1‘]#首页
    num=1
    def parse(self, response):#标题和详情页地址
        url_list=response.xpath(‘//div/a[@eb="com_chan_lcol_fylb"]‘)
        for i in url_list:
            item=YbdlspiderItem( )
            item["title"]=i.xpath("./@title").extract_first()
            item["href"]=i.xpath("./@href").extract_first()

            yield scrapy.Request(item["href"],callback=self.parse_detail,meta={"item":item})
        beforeurl=response.url
        pat1=r"/retail/(\d)"
        page=re.search(pat1,beforeurl).group(1)
        page=int(page)+1
        if page<3:#翻页控制
            nexturl="http://www.ebrun.com/retail/"+str(page)
            yield scrapy.Request(nexturl,callback=self.parse)

    def parse_detail(self,response):#详情页内容和发布时间
        item=response.meta["item"]
        item["content"]=response.xpath(‘//section/article/div[@class="post-text"]//p/text()‘).extract()
        item["time"]=response.xpath(‘//html/body/main/section/article/div/p/span[@class="f-right"]‘).extract_first()
        print(item)
        yield item
        

spider

# -*- coding: utf-8 -*-

# Scrapy settings for ybdlspider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = ‘ybdlspider‘

SPIDER_MODULES = [‘ybdlspider.spiders‘]
NEWSPIDER_MODULE = ‘ybdlspider.spiders‘

# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = ‘ybdlspider (+http://www.yourdomain.com)‘

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
LOG_LEVEL="WARNING"
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
USER_AGENT=‘Mozilla/5.0 (Linux; U; Android 8.0.0; zh-CN; MHA-AL00 Build/HUAWEIMHA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/12.1.4.994 Mobile Safari/537.36‘
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# DEFAULT_REQUEST_HEADERS = {
#     ‘User-Agent‘:‘Mozilla/5.0 (Linux; U; Android 8.0.0; zh-CN; MHA-AL00 Build/HUAWEIMHA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/12.1.4.994 Mobile Safari/537.36‘,
#     }
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   ‘Accept‘: ‘text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8‘,
#   ‘Accept-Language‘: ‘en‘,
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    ‘ybdlspider.middlewares.YbdlspiderSpiderMiddleware‘: 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    ‘ybdlspider.middlewares.YbdlspiderDownloaderMiddleware‘: 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    ‘scrapy.extensions.telnet.TelnetConsole‘: None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    ‘ybdlspider.pipelines.YbdlspiderPipeline‘: 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = ‘httpcache‘
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = ‘scrapy.extensions.httpcache.FilesystemCacheStorage‘

set

原文地址:https://www.cnblogs.com/lizhen2020/p/12286014.html

时间: 2024-08-02 01:30:32

亿邦动力抓取实例,持续更新的相关文章

Java HTML页面抓取实例

import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; import java.io.UnsupportedEncodingException; import java.net.HttpURLConnection; import java.net.MalformedURLException; import java.net.URL; public class Url

java网页数据抓取实例

网页上面数据如下: 如果想要过去上图所示网页的数据,代码如下: (1)调度类,主要调用工具类中的方法获取数据并入库 package com.jointsky.jointframe.weather.jobservice; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.commons.lang.StringUtils; import org.springframewor

新浪新闻按keyword抓取实例

import urllib2 import requests #import MySQLdb import webbrowser import string import re from BeautifulSoup import BeautifulSoup def getHtml(page):#获取网址内容 page=str(page) html=requests.get("http://search.sina.com.cn/?q=%BD%F0%D0%E3%CF%CD&range=all

新浪新闻按关键字抓取实例

import urllib2 import requests #import MySQLdb import webbrowser import string import re from BeautifulSoup import BeautifulSoup def getHtml(page):#获取网址内容 page=str(page) html=requests.get("http://search.sina.com.cn/?q=%BD%F0%D0%E3%CF%CD&range=all

Linux 抓取网页方式(curl+wget)

Linux抓取网页,简单方法是直接通过 curl 或 wget 两种命令. curl 和 wget支持协议 curl  支持 http,https,ftp,ftps,scp,telnet等网络协议 wget支持 http,https,ftp网络协议 curl 和 wget抓取实例 抓取网页,主要有url 网址和proxy代理两种方式 1. url 网址方式抓取 (1)curl下载百度首页内容,保存在baidu_html文件中 curl  http://www.baidu.com/  -o  ba

【转】Nutch源代码研究 网页抓取 数据结构

今天我们看看Nutch网页抓取,所用的几种数据结构: 主要涉及到了这几个类:FetchListEntry,Page, 首先我们看看FetchListEntry类: public final class FetchListEntry implements Writable, Cloneable 实现了Writable, Cloneable接口,Nutch许多类实现了Writable, Cloneable. 自己负责自己的读写操作其实是个很合理的设计方法,分离出来反倒有很琐碎 的感觉. 看看里面的成

htmlunit+fastjson抓取酷狗音乐 qq音乐链接及下载

上次学了jsoup之后,发现一些动态生成的网页内容是无法抓取的,于是又学习了htmlunit,下面是抓取酷狗音乐与qq音乐链接的例子: 酷狗音乐: import java.io.BufferedInputStream; import java.io.FileOutputStream; import java.io.InputStream; import java.net.URL; import java.net.URLEncoder; import java.util.UUID; import

hibernate查询&amp;抓取策略优化机制

一 HQL查询 1 一次存1个客户,10个联系人.执行三次,存3个客户,30个联系人,为分页查询做准备 @Test //来3个客户,30个联系人 public void demo1(){ Session session=HibernateUtils.getCurrentSession(); Transaction tx=session.beginTransaction(); Customer customer=new Customer(); customer.setCust_name("小白&q

sqlserver 抓取所有执行语句 SQL语句分析 死锁 抓取

原文:sqlserver 抓取所有执行语句 SQL语句分析 死锁 抓取 在多人开发中最头疼的是人少事多没有时间进行codereview,本来功能都没时间写,哪有时间来开会细细来分析代码.软件能跑就行,但是一些影响性能的语句写出来,有可能本人都不知道.找就更 麻烦了.幸亏sqlserver提供了工具可以导出执行语句进行分析.可以看看是哪些语句影响整体性能.工具叫sql server profiler,这玩意可以抓取实例上执行的所有语句\死锁\事物,为分析提供帮助. 开始->sqlserver目录-