# -*- codeing = utf-8 -*-
from bs4 import BeautifulSoup
import re
import urllib.request
import urllib.error
import xlwt
import sqlite3


def main():
    baseurl = "https://search.bilibili.com/all?keyword=roblox&from_source=webtop_search&spm_id_from=333.851&page=0"
    # 1.爬取网页
    datalist = getdata(baseurl)
    savepath = "biliroblox.xls"
    # 3.保存数据
    savedata(datalist, savepath)
    # askurl("https://search.bilibili.com/all?keyword=roblox&from_source=webtop_search&spm_id_from=333.851")


# 影片详情的链接的规则
findlink = re.compile(r'href="//(.*?)"')  # 创建正则表达式对象,表示规则(字符串的模式)
findtitle = re.compile(
    r'target="_blank" title="(.*)"><div class="img">',
    re.S)  # 让换行符包含在字符中
findup = re.compile(r'target="_blank">(.*)</a></span></div></div></li>', re.S)
findguankan = re.compile(
    r'<i class="icon-playtime"></i>(.*)</span><span class="so-icon hide" title="弹幕">',
    re.S)
finddanmu = re.compile(
    r'<i class="icon-subtitle"></i>(.*)</span><span class="so-icon time" title="上传时间">',
    re.S)
findtime = re.compile(r'<i class="icon-date"></i>(.*?)</span>', re.S)
findjianjie = re.compile(r'<div class="des hide">(.*?)</div>', re.S)

# 爬取网页


def getdata(baseurl):
    datalist = []
    for i in range(0, 50):  # 调用获取页面信息的函数,50次(50页)
        url = baseurl + str(i * 1)
        html = askurl(url)  # 保存获取到的网页原码
        # 2.逐一解析数据
        soup = BeautifulSoup(html, "html.parser")
        for item in soup.find_all(
                'li', class_="video-item matrix"):  # 查找符合要求的字符串,形成列表
            # print(item) #测试,查看网页item
            data = []  # 保存一部视频的所有信息
            item = str(item)
            # print(item)
            # 获取视频超链接

            title = re.findall(findtitle, item)[0]  # 视频标题,re库用来通过正则表达式查找指定的字符串
            data.append(title)
            up = re.findall(findup, item)[0]  # UP主
            data.append(up)
            guankan = re.findall(findguankan, item)[0].replace("\n", "")  # 观看量
            data.append(guankan)
            danmu = re.findall(finddanmu, item)[0].replace("\n", "")  # 弹幕量
            data.append(danmu)
            time = re.findall(findtime, item)[0].replace("\n", "")  # 上传时间
            data.append(time)
            jianjie = re.findall(
                findjianjie,
                item)[0].replace(
                "\n",
                "")  # 内容简介
            data.append(jianjie)
            link = re.findall(findlink, item)[0]  # 链接
            data.append(link)

            datalist.append(data)  # 把一页处理好的信息放入datalist
    return datalist

# 得到指定一个URL的网页内容


def askurl(url):
    head = {
        "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36 Edg/90.0.818.42"}
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)
    return html


def savedata(datalist, savepath):
    book = xlwt.Workbook(encoding="utf-8", style_compression=0)  # 创建workbook对象
    sheet = book.add_sheet('bilibili', cell_overwrite_ok=True)  # 创建工作表
    col = ("标题", "up主", "观看量", "弹幕量", "上传时间", "内容简介", "网页链接")
    for i in range(0, 7):
        sheet.write(0, i, col[i])  # 列名
    for i in range(0, 980):
        print(f"当前为第{i}条......")
        data = datalist[i]
        for j in range(0, 7):
            sheet.write(i + 1, j, data[j])

    book.save(savepath)  # 保存
    # 3.保存数据


if __name__ == "__main__":
    # 调用函数
    main()

胭惜雨

2021年04月21日

此站点使用Akismet来减少垃圾评论。了解我们如何处理您的评论数据