#百度排名查询
# -*- coding=utf-8 -*-
import requests
import re
import time
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
}
#获取搜索网址
def get_search_url(keyword,num):
search_urls= []
for i in range(0, num):
ym = i * 10
search_url=f"/s?wd={keyword}&ie=UTF-8&pn={ym}"
search_urls.append(search_url)
#print(search_urls)
return search_urls
#获取搜结果内容
def get_search_object(search_url):
search_results=[]
req=requests.get(search_url,headers=headers,timeout=10).text
search_contentze=r'
(.+?)
search_content=re.findall(search_contentze,req,re.S)
search_content=search_content[0]
search_objectze=r'
zsurl = f'{baidu_url}'
# 解析真实地址
else:
r = requests.head(baidu_url, stream=True)
website_url = r.headers['Location']
#print(website_url)
return website_url
#获取搜索数据
def get_search_data(search_objects,search_website):
seach_data=''
baidu_idze1 = r'id="([0-9]{1,4})" srcid'
baidu_idze2 = r'id="([0-9]{1,4})" tpl'
titleze = r'target="_blank"(.+?)'
baidu_urlze1 = r'href = "(.+?)"'
baidu_urlze2 = r'href="(.+?)"'
for search_object in search_objects:
# 获取排名
try:
baidu_id = re.findall(baidu_idze1, search_object, re.S)
baidu_id= baidu_id[0]
except:
baidu_id = re.findall(baidu_idze2, search_object, re.S)
baidu_id = baidu_id[0]
#print(baidu_id)
# 获取标题
title = re.findall(titleze,search_object,re.S)
title = title[0]
# 去除多余的字符
title = re.sub(r'[\n\t> ]', "", title)
#print(title)
# 获取链接
try:
baidu_url= re.findall(baidu_urlze1,search_object,re.S)
baidu_url= baidu_url[0]
except:
baidu_url= re.findall(baidu_urlze2,search_object,re.S)
baidu_url= baidu_url[0]
#print(baidu_url)
website_url=get_website_url(baidu_url)
#print(website_url)
if search_website in website_url:
print(f'关键词:{keyword},排名:{baidu_id},标题:{title},网址:{website_url},百度链接:{baidu_url}')
search_result= f'关键词:{keyword},排名:{baidu_id},标题:{title},网址:{website_url},百度链接:{baidu_url}'
seach_data = '%s%s%s' % (seach_data, search_result, '\n')
time.sleep(3) # 设置延迟间隔
return seach_data
if __name__ == '__main__':
search_datas=''
i=0
keyword = input('请输入关键词>>')
num = int(input('请输入最大查询页数>>'))
search_website = input('请输入网址(建议输入不带www网址)>>')
search_urls=get_search_url(keyword,num)
for search_url in search_urls:
print(f'正在查询{i + 1}页排名...')
search_objects=get_search_object(search_url)
#print(search_objects)
search_data=get_search_data(search_objects, search_website)
#print(search_data)
i=i+1
search_datas=search_datas+search_data
print('========================查询结果========================\n\n')
print(search_datas)
print('\n\n========================查询结束========================\n')
#360搜索排名查询
# -*- coding=utf-8 -*-
import requests
from lxml import etree
headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
def ss360(keyword,num,search_url):
search_datas=''
for x in range(1,num+1):
print(f"正在查询第{x}页搜索结果...")
url=f"/s?q={keyword}&pn={x}"
html=requests.get(url,headers=headers).text
#print(html)
con=etree.HTML(html)
#print(con)
title=con.xpath('//h3[@class="res-title "]/a/text()')
#print(title)
#print(len(title))
datas=con.xpath('///h3/a')
#print(len(datas))
for data in datas:
#print(data)
try:
data_res=data.attrib['data-res']
#pos=re.findall('"pos":(.+?),',data_res,re.S)
#print(pos[0])
data_res=eval(data_res) #转换为字典数据
pos=data_res['pos']
print(pos)
except:
pos=''
try:
data_url=data.attrib['data-url']
except:
data_url=data.attrib['href']
if "/static/" not in data_url and "javascript:" not in data_url:
print(data_url)
print('\r')
if search_url in data_url:
pm=(x-1)*10+pos
print(f'第{x}页,排名:{pos}/{pm},链接:{data_url}')
search_data=f'第{x}页,排名:{pos}/{pm},链接:{data_url}'
search_datas ='%s%s%s'%(search_datas,search_data,'\n')
print(search_datas)
return search_datas
if __name__ == "__main__":
search_datas = ''
keyword = input('请输入关键词>>')
num = int(input('请输入最大查询页数>>'))
search_website = input('请输入网址(建议输入不带www网址)>>')
search_datas=ss360(keyword,num,search_website)
print('========================查询结果========================\n\n')
print(search_datas)
print('\n\n========================查询结束========================\n')