100字范文,内容丰富有趣,生活中的好帮手!
100字范文 > python爬虫豆瓣TOP250电影信息并写入数据库

python爬虫豆瓣TOP250电影信息并写入数据库

时间:2023-10-26 21:06:53

相关推荐

python爬虫豆瓣TOP250电影信息并写入数据库

初步完成一个页面的数据爬取与写入数据库

import requestsimport pymysqlfrom loguru import loggerfrom lxml import etreedb = pymysql.connect(host='localhost', port=3306, user='root', passwd='1234', db='db', charset='utf8')logger.info("正在连接到数据库")cursor = db.cursor()cursor.execute("DROP TABLE IF EXISTS movie")sql = 'CREATE TABLEmovie(name CHAR(255),year int,country char(255),director char(255),type CHAR(20),sorce char(20), ' \'notes char(255)) 'cursor.execute(sql)logger.info("创建表格")def get_html():headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ''Chrome/92.0.4515.159 Safari/537.36'}url = '/top250'resp = requests.get(url, headers=headers)html = resp.textresp.close()return htmlif __name__ == '__main__':logger.info("执行主程序...")xhtml = etree.HTML(get_html())logger.info("...")names = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[1]/a/span[1]/text()')logger.info("已完成电影名字的抓取...")notes = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[2]/span/text()')logger.info("已完成电影标语的抓取...")sorce = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/div/span[2]/text()')logger.info("已完成电影评分的抓取...")years = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[2]')logger.info("已完成电影年份,国家,类型的抓取...")# 获取下标,直接用字符串截取年份,国家,类型# print(years[0].index('1'))# for i in range(len(years)):#print(years[i][29:33])# print(years[0].index('美'))# print(years[0][36:38])# 方法二 使用split("/")# print(years[0])# print(years[1])# print(years[1].split("/")[-1])directors = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[1]')logger.info("已完成电影导演的抓取...")# print(re.match(pile(r'\d{4}')),years[0])# for i in range(len(directors)):#print((directors[i].split(':')[1])[:-2])# 同时使用split("/") 和 字符串下标截取# print(directors[1])# print((directors[1].split(':')[1])[:-2])for i in range(len(sorce)):sql = "insert into movie(name,year,country,sorce,director,type,notes) values(%s,%s,%s,%s,%s,%s,%s)"par = (names[i], years[i][29:33], years[i].split("/")[-1],sorce[i],(directors[i].split(':')[1])[:-2], years[i].split("/")[-2],notes[i])cursor.execute(sql, par)mit()logger.info("数据已经写入表格中...")db.close()# class LinkMysql():#def __init__(self):# self.db = pymysql.connect(host='localhost', port=3306, user='root', passwd='1234', db='db', charset='utf8')# logger.info("正在连接到数据库")# self.cursor = self.db.cursor()

以上面为模板,找到豆瓣翻页后网址的规律,重新封装函数

去掉了note 字段

import requestsimport pymysqlfrom loguru import loggerfrom lxml import etreefrom faker import Fakerdb = pymysql.connect(host='localhost', port=3306, user='root', passwd='1234', db='db', charset='utf8')logger.info("正在连接到数据库")cursor = db.cursor()cursor.execute("DROP TABLE IF EXISTS movie")sql = 'CREATE TABLEmovie(name CHAR(255),year int,country char(255),director char(255),type varchar(255),score char(20)) 'cursor.execute(sql)logger.info("创建表格")def get_xhtml(num=0):# 代理ip# ip_list = ['60.186.41.131:9000', '175.7.199.253:3256', '118.190.244.234:3128', '112.250.107.37:53281']# i = random.choice(ip_list)# proxy = {#'http': i# }# print(proxy)# 随时改变UA# faker = Faker()headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) ''Chrome/92.0.4515.159 Safari/537.36','Accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,""application/signed-exchange;v=b3;q=0.9 ",'origin': ''}url = f'/top250?start={num}&filter='logger.info("正在发送请求...")resp = requests.get(url, headers=headers)logger.info("正在响应...")html = resp.textresp.close()xhtml = etree.HTML(html)return xhtmlif __name__ == '__main__':for i in range(10):logger.info("执行主程序...")xhtml = get_xhtml(25 * i)logger.info(f"-----正在抓取第{i + 1}页内容-----")names = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[1]/a/span[1]/text()')print(names)logger.info("已完成电影名字的抓取...")notes = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[2]/span/text()')logger.info("已完成电影标语的抓取...")sorce = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/div/span[2]/text()')logger.info("已完成电影评分的抓取...")years = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[2]')logger.info("已完成电影年份,国家,类型的抓取...")directors = xhtml.xpath('/html/body/div[3]/div[1]/div/div[1]/ol/li/div/div[2]/div[2]/p[1]/text()[1]')logger.info("已完成电影导演的抓取...")for j in range(len(sorce)):print(names[j])print(years[j][29:33], years[j].split("/")[-1], sorce[j], (directors[j].split(':')[1])[:-2],years[j].split("/")[-2])sql = "insert into movie(name,year,country,score,director,type) values(%s,%s,%s,%s,%s,%s)"par = (names[j], years[j][29:33], years[j].split("/")[-1], sorce[j], (directors[j].split(':')[1])[:-2],years[j].split("/")[-2])cursor.execute(sql, par)mit()logger.info("数据已经写入表格中...")# db.close()

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。