今早無聊。。。7點起來突然想寫個刷訪問量的。。那就動手吧
僅供測試,不建議刷訪問量哦~~
很簡單的思路,第一步提取代理ip,第二步模擬訪問。
提取HTTP代理IP
網上很多收費的代理和免費的代理IP
如:
無論哪個網站,我們需要的就是爬取上面的ip和端口號,整理到一起。
具體的網站根據具體的結構爬取 比如上面那個網站,ip和端口在td標簽
這里利用bs4爬取即可。貼上腳本
##獲取代理ip
def Get_proxy_ip():
print("==========批量提取ip刷博客園訪問量 By 卿=========")
print(" Blogs:https://www.cnblogs.com/-qing-/")
print(" Started! ")
global proxy_list
proxy_list = []
url = "https://www.kuaidaili.com/free/inha/"
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, sdch, br",
"Accept-Language":"zh-CN,zh;q=0.8",
"Cache-Control":"max-age=0",
"Connection":"keep-alive",
"Cookie":"channelid=0; sid=1561681200472193; _ga=GA1.2.762166746.1561681203; _gid=GA1.2.971407760.1561681203; _gat=1; Hm_lvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203; Hm_lpvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203",
"Host":"www.kuaidaili.com",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0",
"Referrer Policy":"no-referrer-when-downgrade",
}
for i in range(1,100):
url = url = "https://www.kuaidaili.com/free/inha/"+str(i)
html = requests.get(url = url,headers = headers).content
soup = BeautifulSoup(html,'html.parser')
ip_list = '';
port_list = '';
protocol_list = '';
for ip in soup.find_all('td'):
if "IP" in ip.get('data-title') :
ip_list = ip.get_text()##獲取ip
if "PORT" in ip.get('data-title'):
port_list = ip.get_text()##獲取port
if ip_list != '' and port_list != '':
proxy = ip_list+":"+port_list
ip_list = '';
port_list = '';
proxy_list.append(proxy)
iv_main()
time.sleep(2)
proxy_list = []
這樣就把 提取的ip和端口放到列表里
模擬訪問刷博客園文章
這里就很簡單了 ,遍歷上面那個代理ip的列表,使用requests模塊取訪問就是了
def iv_main():
proxies = {}
requests.packages.urllib3.disable_warnings()
#proxy_ip = random.choice(proxy_list)
url = 'https://www.cnblogs.com/-qing-/p/11080845.html'
for proxy_ip in proxy_list:
headers2 = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'accept-encoding':'gzip, deflate, sdch, br',
'accept-language':'zh-CN,zh;q=0.8',
'cache-control':'max-age=0',
'cookie':'__gads=ID=8c6fd85d91262bb1:T=1561554219:S=ALNI_MZwz0CMKQJK-L19DrX5DPDtYvp63Q; _gat=1; _ga=GA1.2.359634670.1561535095; _gid=GA1.2.1087331661.1561535095',
'if-modified-since':'Fri, 28 Jun 2019 02:10:23 GMT',
'referer':'https://www.cnblogs.com/',
'upgrade-insecure-requests':'1',
'user-agent':random.choice(user_agent_list),
}
proxies['HTTP'] = proxy_ip
#user_agent = random.choice(user_agent_list)
try:
r = requests.get(url,headers=headers2,proxies=proxies,verify=False) #verify是否驗證服務器的SSL證書
print("[*]"+proxy_ip+"訪問成功!")
except:
print("[-]"+proxy_ip+"訪問失敗!")
最好帶上隨機的ua請求頭
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
]
優化整合
這里可以稍微優化下,加入隊列線程優化(雖然python這個沒啥用)
最終代碼整合:
# -*- coding:utf-8 -*-
#By 卿
#Blog:https://www.cnblogs.com/-qing-/
import requests
from bs4 import BeautifulSoup
import re
import time
import random
import threading
print("==========批量提取ip刷博客園訪問量 By 卿=========")
print(" Blogs:https://www.cnblogs.com/-qing-/")
print(" Started! ")
user_agent_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
]
def iv_main():
proxies = {}
requests.packages.urllib3.disable_warnings()
#proxy_ip = random.choice(proxy_list)
url = 'https://www.cnblogs.com/-qing-/p/11080845.html'
for proxy_ip in proxy_list:
headers2 = {
'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'accept-encoding':'gzip, deflate, sdch, br',
'accept-language':'zh-CN,zh;q=0.8',
'cache-control':'max-age=0',
'cookie':'__gads=ID=8c6fd85d91262bb1:T=1561554219:S=ALNI_MZwz0CMKQJK-L19DrX5DPDtYvp63Q; _gat=1; _ga=GA1.2.359634670.1561535095; _gid=GA1.2.1087331661.1561535095',
'if-modified-since':'Fri, 28 Jun 2019 02:10:23 GMT',
'referer':'https://www.cnblogs.com/',
'upgrade-insecure-requests':'1',
'user-agent':random.choice(user_agent_list),
}
proxies['HTTP'] = proxy_ip
#user_agent = random.choice(user_agent_list)
try:
r = requests.get(url,headers=headers2,proxies=proxies,verify=False) #verify是否驗證服務器的SSL證書
print("[*]"+proxy_ip+"訪問成功!")
except:
print("[-]"+proxy_ip+"訪問失敗!")
##獲取代理ip
def Get_proxy_ip():
global proxy_list
proxy_list = []
url = "https://www.kuaidaili.com/free/inha/"
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"gzip, deflate, sdch, br",
"Accept-Language":"zh-CN,zh;q=0.8",
"Cache-Control":"max-age=0",
"Connection":"keep-alive",
"Cookie":"channelid=0; sid=1561681200472193; _ga=GA1.2.762166746.1561681203; _gid=GA1.2.971407760.1561681203; _gat=1; Hm_lvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203; Hm_lpvt_7ed65b1cc4b810e9fd37959c9bb51b31=1561681203",
"Host":"www.kuaidaili.com",
"Upgrade-Insecure-Requests":"1",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0",
"Referrer Policy":"no-referrer-when-downgrade",
}
for i in range(1,100):
url = url = "https://www.kuaidaili.com/free/inha/"+str(i)
html = requests.get(url = url,headers = headers).content
soup = BeautifulSoup(html,'html.parser')
ip_list = '';
port_list = '';
protocol_list = '';
for ip in soup.find_all('td'):
if "IP" in ip.get('data-title') :
ip_list = ip.get_text()##獲取ip
if "PORT" in ip.get('data-title'):
port_list = ip.get_text()##獲取port
if ip_list != '' and port_list != '':
proxy = ip_list+":"+port_list
ip_list = '';
port_list = '';
proxy_list.append(proxy)
iv_main()
time.sleep(2)
proxy_list = []
th=[]
th_num=10
for x in range(th_num):
t=threading.Thread(target=Get_proxy_ip)
th.append(t)
for x in range(th_num):
th[x].start()
for x in range(th_num):
th[x].join()
結果
以上就是本文的全部內容,希望對大家的學習有所幫助,也希望大家多多支持腳本之家。
更多文章、技術交流、商務合作、聯系博主
微信掃碼或搜索:z360901061
微信掃一掃加我為好友
QQ號聯系: 360901061
您的支持是博主寫作最大的動力,如果您喜歡我的文章,感覺我的文章對您有幫助,請用微信掃描下面二維碼支持博主2元、5元、10元、20元等您想捐的金額吧,狠狠點擊下面給點支持吧,站長非常感激您!手機微信長按不能支付解決辦法:請將微信支付二維碼保存到相冊,切換到微信,然后點擊微信右上角掃一掃功能,選擇支付二維碼完成支付。
【本文對您有幫助就好】元

