import aiohttp
import asyncio
import aiofiles
import async_timeout
from bs4 import BeautifulSoup
import time
import os
async def aitaotu(sem, urllist):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4',
'Host': 'www.aitaotu.com',
'Referer': 'https://www.aitaotu.com/guonei/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
}
for url in urllist:
async with aiohttp.ClientSession() as session:
async with session.get(url, verify_ssl=False, headers=headers) as resp:
if resp.status == 200:
respdata = await resp.text()
page = BeautifulSoup(respdata, 'lxml')
hrefs = page.select('#infinite_scroll > div > div.item_t > div > a')
img_urls = page.select('#infinite_scroll > div > div.item_t > div > a > img')
titles = page.select('#infinite_scroll > div > div.item_b.clearfix > div.title > span > a')
for href1, img_url1, title1 in zip(hrefs, img_urls, titles):
href = href1.get('href')
title = title1.get_text()
href_url = 'https://www.aitaotu.com' + href
async with session.get(href_url, verify_ssl=False, headers=headers) as contentresp:
if contentresp.status == 200:
contentrespdata = await contentresp.text()
content_page = BeautifulSoup(contentrespdata, 'lxml')
images = content_page.select('#big-pic > p > a > img')
for image1 in images:
image = image1.get('src')
print(image)
async with sem:
await download_coroutine(image, title)
print('图片下载完成.')
else:
print('被阻止,状态码:', resp.status)
async def download_coroutine(url, title):
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4',
'Host': 'img.aitaotu.cc:8089',
'Referer': 'https://www.aitaotu.com/taotu/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
}
try:
with async_timeout.timeout(30):
async with aiohttp.ClientSession() as session:
async with session.get(url, verify_ssl=False, headers=headers) as response:
if not os.path.exists(title):
os.mkdir(title)
filename = os.path.basename(url)
async with aiofiles.open(os.path.join(title, filename), 'wb') as fd:
while True:
chunk = await response.content.read(1024)
if not chunk:
break
await fd.write(chunk)
except Exception as e:
print(e)
if __name__ == '__main__':
urllist = ['https://www.aitaotu.com/guonei/list_{}.html'.format(x) for x in range(605)] + ['https://www.aitaotu.com/rihan/list_{}.html'.format(x) for x in range(202)] + ['https://www.aitaotu.com/gangtai/list_{}.html'.format(x) for x in range(33)] + ['https://www.aitaotu.com/meinv/list_{}.html'.format(x) for x in range(89)]
start = time.time()
sem = asyncio.Semaphore(100)
loop = asyncio.get_event_loop()
# loop = asyncio.ProactorEventLoop()
# asyncio.set_event_loop(loop)
loop.run_until_complete(aitaotu(sem, urllist))
print('总共耗时 %s s' % (time.time() - start))
原文地址:http://blog.51cto.com/wenguonideshou/2061714