#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#author:Wos
#the website url have many domain alias,you can change premain variable
#save page range recommend <= 10pages,and you can save a single page,just input single page number
#you may need modify 87line and 209line about which browser your use,I use "surf" in Linux,so that's define by yourself
import requests
import os
import urllib.parse
import threading
import sys
import pyperclip
import time
import webbrowser
tlist = [] #title
tlink = [] #link
timage = []
premain = "https://fs81.xyz"
hdr = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0 Waterfox/78.13.0"
}
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
html_path = os.path.join(BASE_DIR, "fsporn.html")
def get_premain():
global premain
url = "https://v88avnetwork.github.io/feise.html"
res = requests.get(url)
try:
premain = res.text.split("
")[1].split("href=\"")[1].split("\"")[0]
return
except:
return
def GetDesktopPath():
return os.path.join(os.path.expanduser("~"), 'Desktop')
def get_fsporn(url):
while True:
try:
r = requests.get(url,headers=hdr)
url2 = r.text.split("\"m3u8_url\": \"")[1].split("\"")[0]
result = premain+str(url2)
return result
except:
continue
def automode():
pyperclip.copy("")
while True:
os.system('clear')
print("Right click to copy the video link of interest and wait 1~2 seconds")
print("Ctrl+C to close")
try:
url = pyperclip.paste()
if str(url).strip() == "":
time.sleep(3)
continue
elif str(url).startswith(premain):
result = get_fsporn(url)
pyperclip.copy("")
cmdline = ("mpv --really-quiet --ontop \"%s\""%(result))
os.system(cmdline)
continue
else:
time.sleep(3)
continue
except KeyboardInterrupt:
return
def save_to_html():
global tlink,tlist,timage
if len(tlink) == 0:
tlink,tlist,timage = [],[],[]
return
file_obj = open(html_path,'w',encoding='UTF-8', newline='')
file_obj.write("\n\n
\n
\n")
file_obj.write("\n")
file_obj.write("\n")
file_obj.write("
\n")
for i in tlink:
if str(i) != "":
file_obj.write("
\n")
file_obj.write("
\n")
file_obj.close()
webbrowser.open(html_path)
tlink,tlist,timage = [],[],[]
#Get list of parent links
def get_link_list(url):
global tlist,tlink,timage
res = requests.get(url,headers=hdr)
try:
tmp = res.text.split("class=\"column is-one-quarter avdata\">")
i = 1
while i < len(tmp):
try:
l = premain+tmp[i].split("href=\"")[1].split("\"")[0]
except:
pass
try:
t = tmp[i].split("alt=\"")[1].split("\"")[0]
except:
pass
print(t)
try:
img = "https:"+tmp[i].split("data-src=\"")[1].split("\"")[0]
except:
pass
tlist.append(t)
timage.append(img)
tlink.append(l)
i += 1
continue
except:
pass
def get_result_max(url):
while True:
try:
try:
res = requests.get(url,headers=hdr)
except:
res = requests.get(url,headers=hdr)
num = int(res.text.split("
")[1].split("
")[0].split(">")[-2].split("<")[0].replace(",",""))//28+1
return num
except:
num = 3
return num
def get_fsporn_link(keyword):
global md
url_ = ""
url = ""
if str(keyword) != "":
url_ = premain+"/search/"+urllib.parse.quote(keyword).replace("%2B"," ")+"/"
url = premain+"/search/"+urllib.parse.quote(keyword).replace("%2B"," ")+"/2"
else:
return search()
num_ = get_result_max(url)
while True:
try:
os.system('clear')
ulist = []
sid = ""
print("current search keyword:"+str(keyword))
sid = input("(total:"+str(num_)+"pages,Enter the range to be saved, eg: 1~15,0 to return to the search page):")
if sid == "0":
return search()
elif sid == "":
continue
if len(sid.split("~")) == 2:
m = int(sid.split("~")[0])
n = int(sid.split("~")[1])
if m < n and n < num_+1:
for k in range(m,n+1):
url_2 = url_+str(k)
ulist.append(url_2)
for i in ulist:
thread = threading.Thread(target=get_link_list,args=(i,))
thread.start()
thread.join()
save_to_html()
automode()
continue
elif sid.isdigit and int(sid) > 0 and int(sid) < num_+1:
for k in range(int(sid),int(sid)+1):
url_2 = url_+str(k)
ulist.append(url_2)
for i in ulist:
thread = threading.Thread(target=get_link_list,args=(i,))
thread.start()
thread.join()
save_to_html()
automode()
continue
except:
continue
def search():
os.system('clear')
key = ""
print("Enter auto and press Enter to directly enter the automatic parser mode")
print("mpv player is required to use this script")
print("Download:https://mpv.io/installation/")
print("resource come from"+premain)
key = input("[exit()]Search:")
if key == "exit()":
os.system('clear')
return
if str(key).strip() == "auto":
if os.path.exists(html_path):
webbrowser.open(html_path)
else:
webbrowser.open(premain)
automode()
return search()
keyword = key
try:
key = "+".join(str(key).split(" "))
keyword = key
except:
pass
get_fsporn_link(keyword)
if __name__ == '__main__':
get_premain()
search()