吴裕雄 python 爬虫(2)

import requests

from bs4 import BeautifulSoup

url = ‘http://www.baidu.com‘
html = requests.get(url)
sp = BeautifulSoup(html.text, ‘html.parser‘)
print(sp)

html_doc = """
<html><head><title>页标题</title></head>

<p class="title"><b>文件标题</b></p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""

from bs4 import BeautifulSoup

sp = BeautifulSoup(html_doc,‘html.parser‘) 

print(sp.find(‘b‘)) # 返回值:<b>文件标题</b>

print(sp.find_all(‘a‘)) #返回值: [<b>文件标题</b>]

print(sp.find_all("a", {"class":"sister"}))

data1=sp.find("a", {"href":"http://example.com/elsie"})
print(data1.text) # 返回值:Elsie  

data2=sp.find("a", {"id":"link2"})
print(data2.text) # 返回值:Lacie

data3 = sp.select("#link3")
print(data3[0].text) # 返回值:Tillie

print(sp.find_all([‘title‘,‘a‘])) 

data1=sp.find("a", {"id":"link1"})
print(data1.get("href")) #返回值: http://example.com/elsie

import requests

from bs4 import BeautifulSoup

url = ‘http://www.wsbookshow.com/‘
html = requests.get(url)
html.encoding="gbk"

sp=BeautifulSoup(html.text,"html.parser")
links=sp.find_all(["a","img"]) # 同时读取 <a> 和 <img>
for link in links:
    href=link.get("href") # 读取 href 属性的值
    # 判断值是否为非 None,以及是不是以http://开头
    if((href != None) and (href.startswith("http://"))):
        print(href)

import requests

from bs4 import BeautifulSoup

url = ‘http://www.taiwanlottery.com.tw/‘
html = requests.get(url)
sp = BeautifulSoup(html.text, ‘html.parser‘)

data1 = sp.select("#rightdown")
print(data1)

data2 = data1[0].find(‘div‘, {‘class‘:‘contents_box02‘})
print(data2)
print()

data3 = data2.find_all(‘div‘, {‘class‘:‘ball_tx‘})
print(data3)

import requests
from bs4 import BeautifulSoup

url1 = ‘http://www.pm25x.com/‘  #获得主页面链接
html = requests.get(url1)  #抓取主页面数据
sp1 = BeautifulSoup(html.text, ‘html.parser‘)  #把抓取的数据进行解析

city = sp1.find("a",{"title":"北京PM2.5"})  #从解析结果中找出title属性值为"北京PM2.5"的标签
print(city)
citylink=city.get("href")  #从找到的标签中取href属性值
print(citylink)
url2=url1+citylink  #生成二级页面完整的链接地址
print(url2)

html2=requests.get(url2)   #抓取二级页面数据
sp2=BeautifulSoup(html2.text,"html.parser")   #二级页面数据解析
#print(sp2)
data1=sp2.select(".aqivalue")  #通过类名aqivalue抓取包含北京市pm2.5数值的标签
pm25=data1[0].text   #获取标签中的pm2.5数据
print("北京市此时的PM2.5值为:"+pm25) #显示pm2.5值

import requests,os
from bs4 import BeautifulSoup
from urllib.request import urlopen

url = ‘http://www.tooopen.com/img/87.aspx‘

html = requests.get(url)
html.encoding="utf-8"

sp = BeautifulSoup(html.text, ‘html.parser‘)

# 建立images目录保存图片
images_dir="E:\\images\\"
if not os.path.exists(images_dir):
    os.mkdir(images_dir)

# 取得所有 <a> 和 <img> 标签
all_links=sp.find_all([‘a‘,‘img‘])
for link in all_links:
    # 读取 src 和 href 属性内容
    src=link.get(‘src‘)
    href = link.get(‘href‘)
    attrs=[src,src]
    for attr in attrs:
        # 读取 .jpg 和 .png 檔
        if attr != None and (‘.jpg‘ in attr or ‘.png‘ in attr):
            # 设置图片文件完整路径
            full_path = attr
            filename = full_path.split(‘/‘)[-1]  # 取得图片名
            ext = filename.split(‘.‘)[-1]  #取得扩展名
            filename = filename.split(‘.‘)[-2] #取得主文件名
            if ‘jpg‘ in ext: filename = filename + ‘.jpg‘
            else:            filename = filename + ‘.png‘
            print(attr)
            # 保存图片
            try:
                image = urlopen(full_path)
                f = open(os.path.join(images_dir,filename),‘wb‘)
                f.write(image.read())
                f.close()
            except:
                print("{} 无法读取!".format(filename))

原文地址:https://www.cnblogs.com/tszr/p/10149956.html

时间: 2024-11-08 23:42:08

吴裕雄 python 爬虫(2)的相关文章

吴裕雄 python 神经网络——TensorFlow pb文件保存方法

import tensorflow as tf from tensorflow.python.framework import graph_util v1 = tf.Variable(tf.constant(1.0, shape=[1]), name = "v1") v2 = tf.Variable(tf.constant(2.0, shape=[1]), name = "v2") result = v1 + v2 init_op = tf.global_varia

吴裕雄--python编程:CGI编程

什么是CGI CGI 目前由NCSA维护,NCSA定义CGI如下: CGI(Common Gateway Interface),通用网关接口,它是一段程序,运行在服务器上如:HTTP服务器,提供同客户端HTML页面的接口. 网页浏览 为了更好的了解CGI是如何工作的,我们可以从在网页上点击一个链接或URL的流程: 1.使用你的浏览器访问URL并连接到HTTP web 服务器. 2.Web服务器接收到请求信息后会解析URL,并查找访问的文件在服务器上是否存在,如果存在返回文件的内容,否则返回错误信

吴裕雄 python神经网络 手写数字图片识别(5)

import kerasimport matplotlib.pyplot as pltfrom keras.models import Sequentialfrom keras.layers import Dense,Activation,Flatten,Dropout,Convolution2D,MaxPooling2Dfrom keras.utils import np_utilsfrom keras.optimizers import RMSpropfrom skimage import

吴裕雄 python oracle操作数据库(4)

import cx_Oracle conn = cx_Oracle.connect("scott/[email protected]:1521/orcl")cursor = conn.cursor() sql = "insert into dept (deptno,dname,loc) values ('%d','%s','%s')" % (88,'design','beijing')cursor.execute(sql)conn.commit()print('添加

吴裕雄 python 机器学习-KNN算法(1)

import numpy as np import operator as op from os import listdir def classify0(inX, dataSet, labels, k): dataSetSize = dataSet.shape[0] diffMat = np.tile(inX, (dataSetSize,1)) - dataSet sqDiffMat = diffMat**2 sqDistances = sqDiffMat.sum(axis=1) distan

吴裕雄 python 熵权法确定特征权重

一.熵权法介绍 熵最先由申农引入信息论,目前已经在工程技术.社会经济等领域得到了非常广泛的应用. 熵权法的基本思路是根据各个特征和它对应的值的变异性的大小来确定客观权重. 一般来说,若某个特征的信息熵越小,表明该特征的值得变异(对整体的影响)程度越大,提供的信息量越多,在综合评价中所能起到 的作用也越大,其权重也就越大.相反,某个特征的信息熵越大,表明指标值得变异(对整体的影响)程度越小,提供的信息量也越少, 在综合评价中所起到的作用也越小,其权重也就越小. 二.熵权法赋权步骤 1. 数据标准化

吴裕雄 python深度学习与实践(1)

#coding = utf8 import threading,time count = 0 class MyThread(threading.Thread): def __init__(self,threadName): super(MyThread,self).__init__(name = threadName) def run(self): global count for i in range(100): count = count + 1 time.sleep(0.3) print(

吴裕雄 python深度学习与实践(2)

#coding = utf8 import threading,time,random count = 0 class MyThread (threading.Thread): def __init__(self,lock,threadName): super(MyThread,self).__init__(name = threadName) self.lock = lock def run(self): global count self.lock.acquire() for i in ra

吴裕雄 python深度学习与实践(3)

import threading, time def doWaiting(): print('start waiting:', time.strftime('%S')) time.sleep(3) print('stop waiting', time.strftime('%S')) thread1 = threading.Thread(target = doWaiting) thread1.start() time.sleep(1) #确保线程thread1已经启动 print('start j