Python35爬虫爬取百度歼击机词条

1.  spider_main

# coding:utf8
from Spider_Test import url_manager, html_downloader, html_parser,html_outputer

class SpiderMain(object):
    def __init__(self):
        self.urls = url_manager.UrlManager()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutPuter()

    def craw(self, root_url):
        count = 1
        self.urls.add_new_url(root_url)
        while self.urls.has_new_url:
            try:
                new_url = self.urls.get_new_url()
                print("craw %d:%s" % (count,new_url))
                html_cont = self.downloader.download(new_url)
                new_urls, new_data = self.parser.parse(new_url, html_cont)
                self.urls.add_new_urls(new_urls)
                self.outputer.collect_data(new_data)
                if count == 7:
                    break

                count = count + 1
            except:
                print("craw fail")           

        self.outputer.output_html()           

if __name__ == "__main__":
    root_url = "http://baike.baidu.com/view/114149.htm"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)

  2. url_manager

# coding:utf8

class UrlManager(object):
    def __init__(self):
        self.new_urls = set()
        self.old_urls = set()

    def add_new_url(self, url):
        if url is None:
            return
        if url not in self.new_urls and url not in self.old_urls:
            self.new_urls.add(url)  #  this place use the method      

    def add_new_urls(self, urls):
        if urls is None or len(urls) == 0:
            return
        for url in urls:
            self.add_new_url(url)   #  this place use the method    

    def has_new_url(self):
        return len(self.new_urls)!= 0

    def get_new_url(self):
        new_url = self.new_urls.pop()
        self.old_urls.add(new_url)
        return new_url

  3. html_downloader

# coding:utf8
from urllib import request

class HtmlDownloader(object):    

    def download(self, url):
        if url is None:
            return None

        # Python3.5 different from Python2.7
        response = request.urlopen(url)
        if response.getcode() != 200:
            return None

        return response.read().decode(‘utf-8‘,‘ignore‘)

  4. html_parser

# coding:utf8
import re
import urllib
from bs4 import BeautifulSoup

class HtmlParser(object):    

    def _get_new_urls(self, page_url, soup):
        new_urls = set()
        links = soup.find_all("a", href=re.compile(r"/view/\d+\.htm"))
        for link in links:
            new_url = link[‘href‘]
            # different from Python2.7
            new_full_url = urllib.parse.urljoin(page_url, new_url)
            new_urls.add(new_full_url)

        return new_urls

    def _get_new_data(self, page_url, soup):
        res_data = {}
        res_data[‘url‘] = page_url
        #  <dl class="lemmaWgt-lemmaTitle lemmaWgt-lemmaTitle-">
        title_node = soup.find(‘dl‘,class_="lemmaWgt-lemmaTitle lemmaWgt-lemmaTitle-").find("h1")
        res_data[‘title‘] = title_node.get_text()  #  the key should be right or it would raise an error
        summary_node = soup.find(‘div‘, class_ = "lemma-summary")
        res_data[‘summary‘] = summary_node.get_text()
        return res_data                

    def parse(self, page_url, html_cont):
        if page_url is None or html_cont is None:
            return

        soup = BeautifulSoup(html_cont, "html.parser", from_encoding="utf-8")
        new_urls = self._get_new_urls(page_url, soup)
        new_data = self._get_new_data(page_url, soup)
        return new_urls, new_data

  5. html_outputer

# coding:utf8
class HtmlOutPuter(object):

    def __init__(self):
        self.datas = []

    def collect_data(self, data):
        if data is None:
            return
        self.datas.append(data) #  array use append method

    def output_html(self):
        fout = open(‘output.html‘,‘w‘)

        fout.write("<html>")
        fout.write("<body>")
        fout.write("<table>")
        for data in self.datas:
            fout.write("<tr>")
            fout.write("<td>%s</td>" % data["url"])
            fout.write("<td>%s</td>" % data["title"])  #  .encode(‘utf-8‘)不能编码成utf-8
            #  fout.write("<td>%s</td>" % data["summary"].encode(‘utf-8‘).decode(‘gbk‘,‘ignore‘))
            fout.write("<td>")
            fout.write(data["summary"].encode(‘utf-8‘).decode(‘gbk‘,‘ignore‘)) # this place can‘t display some characters
            fout.write("</td>")
            fout.write("</tr>")

        fout.write("</table>")
        fout.write("</body>")
        fout.write("</html")
        fout.close()

  最终获取的html,有些字符不能显示,查资料,说是用命令cmd /K chcp 65001 但是用控制台查询codepage编码依然是,改动不了,说是windows 控制台的问题,先这样。

时间: 2024-12-15 07:35:52

Python35爬虫爬取百度歼击机词条的相关文章

爬虫爬取百度词条

页面是随时升级的,所以现在的链接不代表以后的链接,但是万变不离其宗,只要学会解析页面,那么就能走的更远. 码云链接:https://gitee.com/ALADL/baike_spider.git from baike_spider import url_manager,html_downloader, html_parser, html_outputer class SpiderMain(object): def __init__(self): # 初始化各个对象 self.urls = ur

Python简易爬虫爬取百度贴吧图片

通过python 来实现这样一个简单的爬虫功能,把我们想要的图片爬取到本地.(Python版本为3.6.0) 一.获取整个页面数据 def getHtml(url): page=urllib.request.urlopen(url) html=page.read() return html 说明: 向getHtml()函数传递一个网址,就可以把整个页面下载下来. urllib.request 模块提供了读取web页面数据的接口,我们可以像读取本地文件一样读取www和ftp上的数据. 二.筛选页面

python爬虫—爬取百度百科数据

爬虫框架:开发平台 centos6.7 根据慕课网爬虫教程编写代码 片区百度百科url,标题,内容 分为4个模块:html_downloader.py 下载器 html_outputer.py 爬取数据生成html模块 html_parser 获取有用数据 url_manager url管理器 spider_main 爬虫启动代码 spider_main.py 1 #!/usr/bin/python 2 #-*- coding: utf8 -*- 3 4 import html_download

Python爬虫爬取百度贴吧的帖子

同样是参考网上教程,编写爬取贴吧帖子的内容,同时把爬取的帖子保存到本地文档: #!/usr/bin/python#_*_coding:utf-8_*_import urllibimport urllib2import reimport sys reload(sys)sys.setdefaultencoding("utf-8")#处理页面标签,去除图片.超链接.换行符等class Tool: #去除img标签,7位长空格 removeImg = re.compile('<img.*

Python爬虫爬取百度贴吧的图片

根据输入的贴吧地址,爬取想要该贴吧的图片,保存到本地文件夹,仅供参考: #!/usr/bin/python#_*_coding:utf-8_*_import urllibimport urllib2import reimport osimport sys reload(sys)sys.setdefaultencoding("utf-8")#下载图片class GetPic: #页面初始化 def __init__(self,baseUrl,seelz): #base链接地址 self.

爬虫爬取百度搜狗图片持久化存储

1.图片下载 # 百度图片:http://image.baidu.com/# 搜狗图片:https://pic.sogou.com/ # 图片爬取:1).寻找图片下载的url: elements与network抓包2).浏览器中访问url, 进行验证3).编写代码获取url4).请求url地址, 获取二进制流5).将二进制流写入文件 # 百度图片:import timeimport requestsfrom lxml import etreefrom selenium import webdri

java网络爬虫爬取百度新闻

采用commons-httpclient commons-httpclient是一个遗留版本,现在官方已经不推荐使用了. lucene采用4.3版本 所需jar包 package com.lulei.util; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; import java.io.InputStream;

Python爬虫-爬取百度贴吧帖子

这次主要学习了替换各种标签,规范格式的方法.依然参考博主崔庆才的博客. 1.获取url 某一帖子:https://tieba.baidu.com/p/3138733512?see_lz=1&pn=1 其中https://tieba.baidu.com/p/3138733512?为基础部分,剩余的为参数部分. http://  代表资源传输使用http协议 tieba.baidu.com 是百度的二级域名,指向百度贴吧的服务器. /p/3138733512 是服务器某个资源,即这个帖子的地址定位符

爬虫 -----爬取百度时事热点和url

使用scrapy top.py    爬虫主要工作 pipelines.py    数据保存 main.py   执行脚本 items.py   初始化item 原文地址:https://www.cnblogs.com/wozuilang-mdzz/p/9736497.html