You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

52 lines
1.7 KiB
Python

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import re
import json
import requests
from bs4 import BeautifulSoup
def get_link(url):
headers = {
'Accept-Language': 'zh-CN,zh;q=0.9,en-CN;q=0.8,en;q=0.7,zh-TW;q=0.6',
'Cookie': 'rewardsn=; wxtokenkey=777',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36'
}
# 访问链接并从json中提取微信推文链接
response = requests.get(url, headers=headers)
data = json.loads(response.text)
link = data['getalbum_resp']['article_list'][0]['url']
return link
def get_zip_url(link):
# 访问微信推文链接并解析网页
response = requests.get(link)
soup = BeautifulSoup(response.text, 'html.parser')
# 提取文本中的zip链接正则匹配以https://开头以.zip后缀的链接
content = soup.find('div', {'id': 'js_content'}).get_text()
zip_url = re.findall(r'https://.*?\.zip', content)
return zip_url
if __name__ == '__main__':
# 从微信推文json数据中获得最新一期IP库的发布文章链接
url = 'https://mp.weixin.qq.com/mp/appmsgalbum?__biz=Mzg3Mzc0NTA3NA==&action=getalbum&album_id=2329805780276838401&f=json'
try:
link = get_link(url)
if link:
zip_url = get_zip_url(link)
if zip_url:
print(zip_url[0])
else:
print("没有找到zip链接")
else:
print("没有找到微信推文链接")
except Exception as e:
print("出现错误:", e)
# 下载zip文件
response = requests.get(zip_url[0])
with open('cz88.zip', 'wb') as f:
f.write(response.content)
print('cz88.zip文件已下载')