近期发觉了一部好看小说,爬下来试一试 ,这一脚本制作并不适感用以全部的网页页面,但依据主要内容稍稍改动后绝大多数网页页面還是可以用的

#!/usr/bin/python
# Author:Playon
# -*- coding:utf-8 -*-
# Time:2020/8/18 9:57

import requests
from bs4 import BeautifulSoup
import re,time,random

def finder(url,book):
    """
    爬标识符
    :param url: 起止详细地址url
    :param book: 储存的文件目录
    :return:
    """
    title = []
    with open(book,'r',encoding='utf-8')as f:
        rec=re.compile('第\d 章.*?')
        for t in f.readlines():
            t=t.strip()
            if re.findall(rec,t):
                if t not in title:
                    title.append(t)
    user_agent_list = [
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
        ]
    heard = random.choice(user_agent_list)

    headers = {
        'User-Agent': heard,
        # 'Referer': 'Referer',
        # 'Connection': 'close',
    }
    res = requests.get(url, headers=headers).content.decode('gbk','ignore')
    soup = BeautifulSoup(res, 'html.parser')

    # title
    bookname = soup.find('div', attrs={'class': 'bookname'})
    bookname = soup.h1.text
    bookname =re.sub('文章正文 |所有章节目录 ','',bookname)
    # print(bookname)
    if bookname.lstrip() not in title:

        # content
        content = soup.find('div', attrs={'class': 'box_con'}).stripped_strings
        # for i in content:
        #     print(i)
        with open(book,'a',encoding='utf-8')as f:
            # rec=re.compile('\([2,3]/\d\)')
            rec1=re.compile('\(\d/\d\)|文章正文 |所有章节目录 ')
            # if not re.findall(rec,bookname):
            bookname=re.sub(rec1,'',bookname)
            f.write('\n'  bookname.title()  '\n')

            rec2=re.compile('^textselect|大神小说网|^>|^投强烈推荐票|^[左右]一章|^添加便签|^荣耀巅峰|^←|^→|^节章|^强烈推荐诸位读者|\(.*?\)   |^文章正文|^第\d 章|^所有章节目录')
            for i in content:
                if not re.findall(rec2, i):
                    # i=re.sub(rec4,'',i.rstrip())
                    f.write(re.sub('Kpl','KPL',i.title()  '\n'))

        print(url)

    # Next page
    next_page = 'http://www.dashenxiaoshuo.com'   soup.find('div', attrs={'class': 'bottem1'}).a.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling['href']

    rec = re.compile('\d \.html$')
    if re.findall(rec,next_page):
        # print(next_page)
        finder(next_page,book)
    else:
        # print(next_page)
        return 'End'
url="http://www.dashenxiaoshuo.com/html/32/32313/15350782.html"
book='../booklist/rydf.txt'
finder(url,book)

自身用的情况下必须改动下url和book的储放详细地址 。

文章来源于网络,如有侵权请联系站长QQ61910465删除
本文版权归趣快排营销www.seoguRubloG.com 所有,如有转发请注明来出,竞价开户托管,seo优化请联系✚Qq61910465