周末无聊,找点乐子。。。
#coding:utf-8 import requests from bs4 import BeautifulSoup import random import time #抓取所需内容 user_agent = ["Mozilla/5.0 (Windows NT 10.0; WOW64)", ‘Mozilla/5.0 (Windows NT 6.3; WOW64)‘, ‘Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11‘, ‘Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko‘, ‘Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.95 Safari/537.36‘, ‘Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; rv:11.0) like Gecko)‘, ‘Mozilla/5.0 (Windows; U; Windows NT 5.2) Gecko/2008070208 Firefox/3.0.1‘, ‘Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070309 Firefox/2.0.0.3‘, ‘Mozilla/5.0 (Windows; U; Windows NT 5.1) Gecko/20070803 Firefox/1.5.0.12‘, ‘Opera/9.27 (Windows NT 5.2; U; zh-cn)‘, ‘Mozilla/5.0 (Macintosh; PPC Mac OS X; U; en) Opera 8.0‘, ‘Opera/8.0 (Macintosh; PPC Mac OS X; U; en)‘, ‘Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.12) Gecko/20080219 Firefox/2.0.0.12 Navigator/9.0.0.6‘, ‘Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Win64; x64; Trident/4.0)‘, ‘Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)‘, ‘Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E)‘, ‘Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Maxthon/4.0.6.2000 Chrome/26.0.1410.43 Safari/537.1 ‘, ‘Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; InfoPath.2; .NET4.0C; .NET4.0E; QQBrowser/7.3.9825.400)‘, ‘Mozilla/5.0 (Windows NT 6.1; WOW64; rv:21.0) Gecko/20100101 Firefox/21.0 ‘, ‘Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.92 Safari/537.1 LBBROWSER‘, ‘Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0; BIDUBrowser 2.x)‘, ‘Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/3.0 Safari/536.11‘] moduledic={‘ranklist_a‘:111,‘ranklist_b‘:4,} for module in moduledic: for page in range(1,moduledic[module]): url=‘http://quote.stockstar.com/stock/‘+str(module)+‘_3_1_‘+str(page)+‘.html‘ try: global response response=requests.post(url, headers={"User-Agent":random.choice(user_agent)}) #定制请求头 except : print "继续" response.encoding = ‘gb2312‘ html = response.text soup = BeautifulSoup(html, ‘lxml‘) time.sleep(random.randrange(1,2)) #每抓一页随机休眠几秒,数值可根据实际情况改动 datalist=[] for i in soup.find_all(‘tr‘): for j in i.find_all(‘td‘): datalist.append(j.string) try: data = datalist[0] + " " + datalist[1] +" " + datalist[2] + " " + datalist[3] +" " + datalist[4] + " " + datalist[5] +" " + datalist[6] +" " + datalist[7]+ " " + datalist[8] +" " + datalist[9] +" " + datalist[10] + " " + datalist[11] print data except: continue datalist=[]
部分截图:
本来想存到数据库中的,后面用来做数据分析用,突然没兴趣就先这样吧。
只想说:大部分网站反爬虫策略基本上没做,如果我想的话,可能也就一两天 可以把整个网站爬下来,上面的也就用了半个小时吧。 数据不就是钱吗? 完全爬下来 不就相当于间接脱库嘛?
时间: 2024-10-16 14:36:35