简书全站爬取 mysql异步保存

# 简书网
# 数据保存在mysql中; 将selenium+chromedriver集成到scrapy; 整个网站数据爬取
#  抓取ajax数据

#爬虫文件
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from jianshu_spider.items import ArticleItem

class JsSpider(CrawlSpider):
    name = ‘js‘
    allowed_domains = [‘jianshu.com‘]
    start_urls = [‘https://www.jianshu.com/‘] # 从首页开始爬去

    rules = (
        # 详情页里面下面推荐的文章的href直接就是/p/.......
        Rule(LinkExtractor(allow=r‘.*/p/[0-9a-z]{12}.*‘),
             callback=‘parse_detail‘, follow=True),
    )

    def parse_detail(self, response):
        # print(response.text)
        title = response.xpath("//div[@class=‘note‘]/div[@class=‘post‘]/div[@class=‘article‘]/h1[@class=‘title‘]/text()").get()
        # print(title)
        avatar = response.xpath("//a[@class=‘avatar‘]/img/@src").get()
        # print(avatar)
        author = response.xpath("//span[@class=‘name‘]/a/text()").get()
        # print(author)
        pub_time = response.xpath("//span[@class=‘publish-time‘]/text()").get().replace("*","")
        # print(pub_time)

        # url正常情况下里面只有一个?
        url = response.url
        url1 = url.split("?")[0]
        article_id = url1.split("/")[-1]
        # print(article_id)

        # 把html标签一起趴下来, 方便以后展示
        content = response.xpath("//div[@class=‘show-content‘]").get()
        # print(content)
        item = ArticleItem(
            title=title,
            avatar=avatar,
            author=author,
            pub_time=pub_time,
            origin_url=response.url,
            article_id=article_id,
            content=content
        )
        yield item

# item文件
import scrapy

class ArticleItem(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    content = scrapy.Field()
    article_id = scrapy.Field()
    origin_url = scrapy.Field()
    author = scrapy.Field()
    avatar = scrapy.Field()
    pub_time = scrapy.Field()

# pipeline文件  保存在mysql中
import pymysql
from twisted.enterprise import adbapi       # 专门做数据库处理的模块
from pymysql import cursors

class JianshuSpiderPipeline(object):
    def __init__(self):
        dbparams={
            ‘host‘:‘127.0.0.1‘,
            ‘port‘:3306,
            ‘user‘:‘root‘,
            ‘password‘:‘‘,
            ‘database‘:‘jianshu‘,
            ‘charset‘:‘utf8‘
        }
        self.conn = pymysql.connect(**dbparams)
        # **dbparams 相当于把 host=‘127.0.0.1‘ 写在了括号里

        self.cursor = self.conn.cursor()
        self._sql = None

    def process_item(self, item, spider):
        self.cursor.execute(self.sql,(item[‘title‘],item[‘content‘],item[‘author‘],item[‘avatar‘],
                                      item[‘pub_time‘],item[‘origin_url‘],item[‘article_id‘]))
        self.conn.commit() # 这个是同步进行的 比较慢
        return item

    @property
    def sql(self):
        if not self._sql: # 如果没有 执行
            self._sql = ‘‘‘
            insert into article2(id,title,content,author,avatar,pub_time,
            origin_url,article_id) values(null,%s,%s,%s,%s,%s,%s,%s)
            ‘‘‘
            return self._sql
        else:
            return self._sql
# 优化上面的pipeline文件,  实现异步保存
# 使用twisted 提供的数据库连接池 ConnectionPool,把插入数据的动作变成异步的 (面试可以说)

# 上面的存储是同步 比较慢, 现在优化成异步
class JianshuTwistedPipeline(object):
    def __init__(self):
        # 创建连接池
        dbparams = {
            ‘host‘: ‘127.0.0.1‘,
            ‘port‘: 3306,
            ‘user‘: ‘root‘,
            ‘password‘: ‘‘,
            ‘database‘: ‘jianshu‘,
            ‘charset‘: ‘utf8‘,
            ‘cursorclass‘:cursors.DictCursor
        }
        self.dbpool = adbapi.ConnectionPool(‘pymysql‘,**dbparams)
        self._sql = None

    @property
    def sql(self):
        if not self._sql: # 如果没有 执行
            self._sql = ‘‘‘
            insert into article2(id,title,content,author,avatar,pub_time,
            origin_url,article_id) values(null,%s,%s,%s,%s,%s,%s,%s)
            ‘‘‘
            return self._sql
        else:
            return self._sql

    def process_item(self,item,spider):
        # runInteraction执行异步的
        defer = self.dbpool.runInteraction(self.insert_item,item)
        defer.addErrback(self.handle_error,item,spider)

    def insert_item(self,cursor,item): # 插入数据库
        cursor.execute(self.sql,(item[‘title‘],item[‘content‘],item[‘author‘],item[‘avatar‘],
                                      item[‘pub_time‘],item[‘origin_url‘],item[‘article_id‘]))

    def handle_error(self,error,item,spider):
        print(‘=‘*20)
        print("error:",error)
        print(‘=‘*20)

# 把settings中的pipeline文件改一下
ITEM_PIPELINES = {
   # ‘jianshu_spider.pipelines.JianshuSpiderPipeline‘: 300,
   ‘jianshu_spider.pipelines.JianshuTwistedPipeline‘: 300, # 异步保存数据
}
# 优化动态数据     处理ajax加载进来的数据
# selenium+chromdriver 处理

# 爬虫文件  把阅读量,点赞数,文章字数,标题分类,评论数 字段获取,保存到item中
    def parse_detail(self, response):
        # print(response.text)
        title = response.xpath("//div[@class=‘note‘]/div[@class=‘post‘]/div[@class=‘article‘]/h1[@class=‘title‘]/text()").get()
        print(title)
        avatar = response.xpath("//a[@class=‘avatar‘]/img/@src").get()
        # print(avatar)
        author = response.xpath("//span[@class=‘name‘]/a/text()").get()
        # print(author)
        pub_time = response.xpath("//span[@class=‘publish-time‘]/text()").get().replace("*","")
        # print(pub_time)

        # url正常情况下里面只有一个?
        url = response.url
        url1 = url.split("?")[0]
        article_id = url1.split("/")[-1]
        # print(article_id)

        # 把html标签一起趴下来, 方便以后展示
        content = response.xpath("//div[@class=‘show-content‘]").get()
        # print(content)

        # 动态获取下面的数据
        word_count = response.xpath("//span[@class=‘wordage‘]/text()").get().split(" ")[-1]
        read_count = response.xpath("//span[@class=‘views-count‘]/text()").get().split(" ")[-1]
        comment_count = response.xpath("//span[@class=‘comments-count‘]/text()").get().split(" ")[-1]
        like_count = response.xpath("//span[@class=‘likes-count‘]/text()").get().split(" ")[-1]
        subject = response.xpath("//div[@class=‘include-collection‘]/a/div/text()").getall()
        # subject 获取的时候一个列表  存到mysql的时候不支持, 需要把列表转成字符串
        subject = ",".join(subject)

        item = ArticleItem(
            title=title,
            avatar=avatar,
            author=author,
            pub_time=pub_time,
            origin_url=response.url,
            article_id=article_id,
            content=content,

            word_count=word_count,
            read_count=read_count,
            comment_count=comment_count,
            like_count=like_count,
            subject=subject,
        )
        yield item

# 管道文件
# 上面的存储是同步 比较慢, 现在优化成异步
class JianshuTwistedPipeline(object):
    def __init__(self):
        # 创建连接池
        dbparams = {
            ‘host‘: ‘127.0.0.1‘,
            ‘port‘: 3306,
            ‘user‘: ‘root‘,
            ‘password‘: ‘‘,
            ‘database‘: ‘jianshu‘,
            ‘charset‘: ‘utf8‘,
            ‘cursorclass‘:cursors.DictCursor
        }
        self.dbpool = adbapi.ConnectionPool(‘pymysql‘,**dbparams)
        self._sql = None

    @property
    def sql(self):
        if not self._sql: # 如果没有 执行
            self._sql = ‘‘‘
            insert into article2(id,title,content,author,avatar,pub_time,
            origin_url,article_id,read_count, word_count, like_count, comment_count,subject)
             values(null,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)
            ‘‘‘
            #

            return self._sql
        else:
            return self._sql

    def process_item(self,item,spider):
        # runInteraction执行异步的
        defer = self.dbpool.runInteraction(self.insert_item,item)
        defer.addErrback(self.handle_error,item,spider)

    def insert_item(self,cursor,item): # 插入数据库
        cursor.execute(self.sql,(item[‘title‘],item[‘content‘],item[‘author‘],item[‘avatar‘],
                                      item[‘pub_time‘],item[‘origin_url‘],item[‘article_id‘],
                                 item[‘read_count‘],item[‘word_count‘],item[‘like_count‘],item[‘comment_count‘],item[‘subject‘]))

    def handle_error(self,error,item,spider):
        print(‘=‘*20+‘error‘+‘=‘*20)
        print("error:",error)
        print(‘=‘*20+‘error‘+‘=‘*20)

原文地址:https://www.cnblogs.com/kenD/p/11123696.html

时间: 2024-08-13 19:25:45

简书全站爬取 mysql异步保存的相关文章

爬虫5 scrapy框架2 全站爬取cnblogs, scarpy请求传参, 提高爬取效率, 下载中间件, 集成selenium, fake-useragent, 去重源码分析, 布隆过滤器, 分布式爬虫, java等语言概念补充, bilibili爬视频参考

1 全站爬取cnblogs # 1 scrapy startproject cnblogs_crawl # 2 scrapy genspider cnblogs www.cnblogs.com 示例: # cnblogs_crawl/cnblogs_crawl/spiders/cnblogs.py import scrapy from cnblogs_crawl.items import CnblogsCrawlItem from scrapy.http import Request class

爬虫 + 数据分析 - 7 CrawlSpider(全站爬取), 分布式, 增量式爬虫

一.全站爬取(CrawlSpider) 1.基本概念 作用:就是用于进行全站数据的爬取 - CrawlSpider就是Spider的一个子类 - 如何新建一个基于CrawlSpider的爬虫文件 - scrapy genspider -t crawl xxx www.xxx.com - LinkExtractor连接提取器:根据指定规则(正则)进行连接的提取 - Rule规则解析器:将链接提取器提取到的链接进行请求发送,然后对获取的页面数据进行 指定规则(callback)的解析 - 一个链接提

全站爬取cnblogs

全站爬取cnblogs 创建 """ # 1 scrapy startproject cnblogs_crawl # 2 scrapy genspider cnblogs www.cnblogs.com """ 代码演示 """ # -*- coding: utf-8 -*- import scrapy from cnblogs_crawl.items import CnblogsCrawlItem from scr

今日头条图片ajax异步加载爬取,并保存至mongodb,以及代码写法的改进

import requests,time,re,json,pymongofrom urllib.parse import urlencodefrom requests.exceptions import RequestExceptionfrom bs4 import BeautifulSoup as bs #连接mongodbclient = pymongo.MongoClient(host='localhost',port=27017)#指定数据库名称db = client.toutiao #

爬取电影天堂最新电影的名称和下载链接(增量爬取mysql存储版)

这次的程序是在上次的基础上进行修改,把持久化储存方式改成mysql,并增加了断点续爬功能. import requests import re from fake_useragent import UserAgent import random import time import pymysql from hashlib import md5 from lxml import etree class DianyingtiantangSpider(object): def __init__(se

Scrapy教程——搭建环境、创建项目、爬取内容、保存文件

1.创建项目 在开始爬取之前,您必须创建一个新的Scrapy项目.进入您打算存储代码的目录中,运行新建命令. 例如,我需要在D:\00Coding\Python\scrapy目录下存放该项目,打开命令窗口,进入该目录,执行以下命令: scrapy startproject  tutorial PS:tutorial可以替换成任何你喜欢的名称,最好是英文 该命令将会创建包含下列内容的 tutorial 目录: tutorial/ scrapy.cfg tutorial/ __init__.py i

[微博爬虫] 登录+爬取+mysql存储+echart可视化

登录 目前新浪微博登录修改登录加密方法,使用rsa进行加密. 以下为个人实现登录的过程,不过得到cookie出现了问题,使用urllib2可以有效抓取  但是httplib2对cookie操作很恶心需要自己处理 终于搞定了用httplib2抓取新浪微博,不知道其他微博是否适用,,, 下面就是登录的介绍: 1,安装rsa模块 下载地址:https://pypi.python.org/pypi/rsa/3.1.1 rsa模块文档地址:http://stuvel.eu/files/python-rsa

爬虫 --- 07. 全站爬取, post请求,cookie, 传参,中间件,selenium

一.全站数据的爬取 - yield scrapy.Request(url,callback):callback回调一个函数用于数据解析 # 爬取阳光热线前五页数据 import scrapy from sunLinePro.items import SunlineproItem class SunSpider(scrapy.Spider): name = 'sun' # allowed_domains = ['www.xxx.com'] start_urls = ['http://wz.sun0

【Scrapy框架之CrawlSpider全站爬取】--2019-08-06 15:17:42

原创链接: http://106.13.73.98/__/144/ 起 提问: 如果想要快速爬取网站的全站数据,有几种实现方法? 基于Scrapy框架中 Spider 的递归爬取来实现(Request模块递归回调parse方法) 基于 CrawlSpider 的自动爬取来实现(更加高效简洁) ???????CrawlSpider 是 Spider 的一个子类,除了继承了 Spider 的特性和功能外,还派生了其自己独有的更加强大的特性和功能.其中最为显著的功能就是 LinkExtractors: