起因:有一天突然想看本小說,在小說網站上不能下載,廣告太多,便。。。
思路:
–分析網站的結構
–爬取目錄(獲得章節名和鏈接)
–多線程加載章節網頁
–正則匹配內容
–保存生成錯誤日志
–鏈接單個章節txt合并為一個
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 2 18:23:49 2019
@author: 24709
"""
import urllib
import urllib.request
import multiprocessing
from bs4 import BeautifulSoup
import re
import os
import time
#小說首頁http://m.wenxuedu.com/html/208249
dirPath = "C:/Users/24709/Desktop/txtxt/"
#所有txt位于的文件夾路徑(注意\與/的區別),最終文件為output
soup=""
titles=[] #存取所有章節名稱
urls=[] #存取所有章節的URL鏈接
################從目錄頁面爬取章節名稱和url地址##################################
def geturl():
print('正在加載章節.....')
for index in range(5,6):#(5,151)爬取第5到第150頁目錄
try:
request = urllib.request.Request("http://m.wenxuedu.com/html/208249_"+str(index))
response = urllib.request.urlopen(request,timeout=8)
content = response.read()
data = content.decode('utf-8')
# soup轉換
soup = BeautifulSoup(data, "html.parser")
i=0;
for link in soup.findAll('li'):
#獲取 link 的 href 屬性內容
if re.search(r'章',str(link.a.string)) :
if i>4:
print(str(link.a.string))
#print(link.a.get('href'))
titles.append(str(link.a.string))
urls.append(str(link.a.get('href')))
i=i+1
except:
try:
#再次嘗試
request = urllib.request.Request("http://m.wenxuedu.com/html/208249_"+str(index))
response = urllib.request.urlopen(request,timeout=8)
content = response.read()
data = content.decode('utf-8')
# soup轉換
soup = BeautifulSoup(data, "html.parser")
i=0;
for link in soup.findAll('li'):
#獲取 link 的 href 屬性內容
if re.search(r'章',str(link.a.string)) :
if i>4:
print(str(link.a.string))
#print(link.a.get('href'))
titles.append(str(link.a.string))
urls.append(str(link.a.get('href')))
i=i+1
except:
#目錄下載失敗將索引寫入錯誤日志
writefile('error_log',"index:{} \n".format(index))
###########################根據url下載小說內容##################################
def getcontent(url):
request = urllib.request.Request("http://m.wenxuedu.com"+url)
response = urllib.request.urlopen(request)
content = response.read()
data = content.decode('utf-8')
# soup轉換
soup = BeautifulSoup(data, "html.parser")
a=str(soup.find(id='novelcontent').p).replace('
','')[3:-4]
try:
#嘗試刪去冗余的頭(標題)和尾
TEMP=int(re.search(str(r"[)]"),a).span()[0])+1
a=a[TEMP:-1*(re.search(str('\u3000\u3000'),a[::-1]).span()[0]+2)]
except:
pass
#print(a)
#print("\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\")
try:
#嘗試由沒有第二章
request = urllib.request.Request("http://m.wenxuedu.com"+url[:-1]+"_2")
response = urllib.request.urlopen(request)
content = response.read()
data = content.decode('utf-8')
# soup轉換
soup = BeautifulSoup(data, "html.parser")
b=str(soup.find(id='novelcontent').p).replace('
','')[3:-4]
#print(b)
try:
#嘗試刪去冗余的頭(標題)
TEMP=int(re.search(str("[)]"),b).span()[0])+1
c=a+b[TEMP:-1]
except:
c=a+b
except:
#print("本章無第二章節")
pass
return c
############################寫入文件###########################################
def writefile(title,content):
with open(dirPath+title+".txt",'a',encoding='utf-8') as f:
f.write(content)
f.close()
#######################嘗試下載,下載失敗保存到日志##############################
def download(title_url):
try:
writefile(title_url[0],getcontent(title_url[1]))
except:
writefile('error_log',"title:{} url:{} \n".format(title_url[0],title_url[1]))
##########################合并txt##############################################
def linkTheBook():
print("-------------------開始合成txt-------------------")
start0 = time.time()
file0 = os.listdir(dirPath)
files=[]
for file in file0:
if re.search(r'(\d+)',file):
files.append(file)
##過濾名字里不帶數字章節的
files.sort(key=lambda i:int(re.search(r'(\d+)',i)[0]))#用正則提取章節數字并排序
res = ""
i = 0
for file in files:
if file.endswith(".txt"):
i += 1
title = "%s" % (file[0:len(file)-4])
with open(dirPath + file, "r", encoding='utf-8') as file:
content = file.read()
file.close()
append = "\n%s\n\n%s" % (title, content)
res += append
with open(dirPath+"outfile.txt", "w", encoding='utf-8') as outFile:
outFile.write(res)
outFile.close()
end0=time.time()
print("-------------------txt合成完成-------------------")
print("全書共"+str(len(files))+"章,共"+str(len(res))+"字")
print('運行時間 %0.2f s.' % (end0 - start0))
#######################################################################3
if __name__=="__main__":
start = time.time()
geturl()
#爬取目錄中的章節名稱和url地址到【titles】和【urls】
print("-------------------開始下載-------------------")
p = []
print('主程序的PID:%s' % os.getpid())
for [title,url] in zip(titles,urls):
p.append(multiprocessing.Process(target=download, args=([title,url],)))
#多進程同時下載不同的章節
print("等待所有的進程加載完成........")
for i in p:
i.start()
for i in p:
i.join()
end = time.time()
print("-------------------全部下載完成-------------------")
print('運行時間 %0.2f s.' % (end - start))
###################################3
#linkTheBook()
pass
用于自我學習記錄,歡迎交流指正。
更多文章、技術交流、商務合作、聯系博主
微信掃碼或搜索:z360901061

微信掃一掃加我為好友
QQ號聯系: 360901061
您的支持是博主寫作最大的動力,如果您喜歡我的文章,感覺我的文章對您有幫助,請用微信掃描下面二維碼支持博主2元、5元、10元、20元等您想捐的金額吧,狠狠點擊下面給點支持吧,站長非常感激您!手機微信長按不能支付解決辦法:請將微信支付二維碼保存到相冊,切換到微信,然后點擊微信右上角掃一掃功能,選擇支付二維碼完成支付。
【本文對您有幫助就好】元
