Skip to content

Commit 9b794cb

Browse files
committed
add modification
1 parent 831b8fb commit 9b794cb

File tree

4 files changed

+222
-171
lines changed

4 files changed

+222
-171
lines changed

raspamb.py

Lines changed: 141 additions & 166 deletions
Original file line numberDiff line numberDiff line change
@@ -1,201 +1,176 @@
1-
import os
2-
import re
3-
import platform
4-
import importlib
5-
from pathlib import Path
6-
import subprocess
1+
import requests.packages.urllib3.util.connection as urllib3_cn
2+
from urllib.parse import unquote
3+
from vlc import start_stream
4+
from servers import zshared
5+
from thefuzz import fuzz
6+
import lxml.html
7+
import requests
8+
import socket
9+
import base64
710

811

9-
def bible(lib):
12+
def allowed_gai_family():
13+
return socket.AF_INET
14+
15+
16+
urllib3_cn.allowed_gai_family = allowed_gai_family
17+
18+
19+
def get_index(input_text:str, size: int):
20+
index = None
21+
1022
try:
11-
if importlib.import_module(lib):
12-
return importlib.import_module(lib)
13-
except:
14-
try:
15-
os.system(f'pip install {lib}')
16-
return importlib.import_module(lib)
17-
except:
18-
os.system(f'sudo pip install {lib}')
19-
return importlib.import_module(lib)
23+
index = int(input(input_text)) - 1
24+
except ValueError:
25+
pass
26+
27+
# CASO SEJA PASSADO UMA LETRA
28+
if index == None:
29+
print('!! Use apenas números !!')
30+
return True
31+
32+
# CASO SEJA USADO -1 PARA VOLTAR
33+
elif index == -2:
34+
# break
35+
return False
36+
37+
# SE FOR MAIOR QUE SIZE OU MENOR QUE 0
38+
elif index <= -1 or index >= size:
39+
print('!! Número inválido !!')
40+
return True
41+
42+
else:
43+
return index
44+
2045

46+
def download_episode(download_link: str) -> None:
47+
print('[+] Iniciando download do episódio...')
48+
video_content = requests.get(download_link, stream=True)
2149

22-
requests = bible('requests')
23-
bs4 = bible('bs4')
24-
selenium = bible('selenium')
25-
webdriver_manager = bible('webdriver_manager')
26-
from webdriver_manager.microsoft import EdgeChromiumDriverManager
27-
from webdriver_manager.chrome import ChromeDriverManager
28-
from bs4 import BeautifulSoup
29-
from selenium import webdriver
30-
from urllib.request import urlopen
31-
from selenium.common.exceptions import WebDriverException
50+
if 'Content-Disposition' in video_content.headers:
51+
videoname = video_content.headers['Content-Disposition'].split("'")[-1]
52+
else:
53+
videoname = 'anime.mkv'
3254

55+
with open(videoname, "wb") as r:
56+
r.write(video_content.content)
57+
58+
print('[+] Download concluído !!')
59+
print(f'[+] Download salvo como: {videoname}')
3360

3461

35-
def popen(cmd):
36-
startupinfo = subprocess.STARTUPINFO()
37-
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
38-
process = subprocess.Popen(cmd, shell=True, startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
39-
return process.stdout.read()
62+
class Raspamb:
63+
def __init__(self):
64+
self.URL_ANIMES_LIST = 'https://www.anbient.com/anime/lista'
65+
self.URL_ANBIENT = 'https://www.anbient.com'
4066

67+
self.animes_list = self.get_anime_list()
4168

42-
def get_path_exe(exe_name, thirty_two=True):
43-
'''
44-
Retorna a path de um executável
45-
'''
46-
path_exe = None
47-
DISCO = popen('echo %WINDIR%').decode().split(':')[0]
69+
self.matchs: list[list[str], int] = None
70+
self.selected: list[str] = None
71+
self.episodes: list[str] = None
4872

49-
50-
if thirty_two:
51-
path_program_files = rf'{DISCO}:\Program Files (x86)'
52-
else:
53-
path_program_files = rf'{DISCO}:\Program Files'
5473

74+
def get_anime_list(self) -> list[list[str, str]]:
75+
response = requests.get(self.URL_ANIMES_LIST)
76+
parser = lxml.html.fromstring(response.text)
77+
elements = parser.xpath('//td/a')
78+
animes_list = []
5579

56-
for path in Path(path_program_files).rglob(exe_name):
57-
path_exe = path
80+
for x in elements:
81+
href = x.get('href')
82+
title = x.text
83+
animes_list.append([title, href])
5884

59-
return path_exe
85+
return animes_list
6086

6187

62-
def return_driver():
63-
'''
64-
Esta função baixa e retorna o driver no computador do usuário
65-
'''
88+
def search(self, anime_name: str) -> None:
89+
matchs = []
90+
91+
for x in self.animes_list:
92+
ratio = fuzz.partial_ratio(anime_name, x[0])
93+
if ratio > 70:
94+
matchs.append([x, ratio])
6695

67-
if get_path_exe('chrome.exe', False):
68-
return webdriver.Chrome(ChromeDriverManager().install())
96+
matchs.sort(key = lambda x: x[1], reverse = True)
97+
self.matchs = matchs
6998

7099

71-
if get_path_exe('msedge.exe'):
72-
return webdriver.Edge(EdgeChromiumDriverManager().install())
100+
def select_anime(self) -> None:
101+
size = len(self.matchs)
102+
cont = 1
73103

104+
for x in self.matchs:
105+
print([cont], x[0][0])
106+
cont += 1
107+
108+
while True:
109+
index = get_index(input_text='\nEscolha o anime: (-1 voltar): ', size=size)
110+
111+
if type(index) == int:
112+
self.selected = self.matchs[index][0]
113+
break
74114

75-
print("Você precisa ter o Google Chrome ou Edge instalado")
76-
input("")
77-
quit()
115+
elif not index:
116+
return 'continue'
78117

79118

80-
def links_zippyshare():
81-
soup = BeautifulSoup(driver.page_source, 'html.parser')
82-
global lista_com_links
83-
lista_com_links = []
119+
def get_episode_list(self) -> None:
120+
url_of_title = self.URL_ANBIENT + self.selected[1]
121+
response_of_title = requests.get(url_of_title)
122+
parser_of_title = lxml.html.fromstring(response_of_title.text)
123+
url_base64 = parser_of_title.xpath('//a[@class="ajax padrao"]')[0].get('href')
84124

85-
for link in soup.find_all(href=re.compile('/zippyshare/')):
86-
lista_com_links.append(link['href'])
125+
url_of_episodes = self.URL_ANBIENT + base64.b64decode(url_base64).decode()
87126

88-
if len(lista_com_links) == 0:
89-
for link in soup.find_all(href=re.compile('zippyshare.com')):
90-
lista_com_links.append(link['href'])
127+
SERVER = 'zippyshare'
128+
response_episodes = requests.get(url_of_episodes)
129+
parser_episodes = lxml.html.fromstring(response_episodes.text)
130+
131+
episodes = parser_episodes.xpath(f'//div[contains(@class, "servidor {SERVER}") or contains(@class, "servidor {SERVER} active")]/li/a/@href')
91132

92-
return lista_com_links
133+
if len(episodes) == 0:
134+
print('ERRRROO AO EXTRAIR EPISÓDIOS')
135+
exit()
93136

137+
self.episodes = episodes
94138

95-
print('A execução do código pode demorar de acordo com a internet\n')
96-
url = 'https://www.anbient.com/anime/lista'
97-
html = urlopen(url)
98-
bs = BeautifulSoup(html, 'html.parser')
99-
data = bs.find(class_="list")
100-
dat = data.find_all("a")
101-
tv = data.find_all("a", href=True)
102-
# epi = data.find_all("td", {'class': 'epi'})
103139

104-
lista = []
140+
def select_episode(self):
141+
size = len(self.episodes)
105142

106-
for c in range(0, len(dat)):
107-
d = dat[c].text
108-
lista.append(str(d).lower())
143+
cont = 1
144+
for x in self.episodes:
145+
print([cont], x)
146+
cont += 1
109147

148+
while True:
149+
index = get_index(input_text='\nSelecione o episódio: (-1 voltar): ', size=size)
110150

111-
def retornar_busca():
112-
global driver
113-
global list_animes
114-
115-
quantidade_anime = 0
116-
list_animes = []
117-
tv_anbient = []
118-
while quantidade_anime == 0:
119-
anime = input('Nome do anime: ').lower().strip()
120-
121-
for c in range(0, len(lista)):
122-
names = lista[c].find(anime)
123-
if names != (-1):
124-
list_animes.append(lista[c])
125-
tv_anbient.append(tv[c].get('href'))
126-
quantidade_anime = len(list_animes)
127-
if len(list_animes) == 0:
128-
print('\nCertifique-se que o nome está correto!\n')
129-
130-
131-
# Imprime a lista de animes
132-
for i in range(0, len(list_animes)):
133-
print(f'[{i + 1}] {list_animes[i].title()}')
134-
print()
135-
136-
lista_numero_animes = []
137-
while True:
138-
try:
139-
numero = int(input('Digite um número (-1 para voltar): '))
140-
141-
if numero == -1:
142-
print()
143-
retornar_busca()
144-
if (numero - 1) < len(list_animes):
145-
link = 'https://www.anbient.com{}'.format(tv_anbient[numero - 1])
146-
# print(link)
147-
break
148-
else:
149-
print('Numero invalido!!!\n')
150-
print()
151-
except ValueError:
152-
print('!!!!! USE APENAS NUMEROS !!!!!!')
153-
print()
154-
except Exception as e:
155-
print('Tem outra coisa dando bosta aq')
156-
print(e)
157-
158-
print('Capturando links dos episódios...')
159-
print('Recomenda-se que o chromedriver esteja na mesma pasta que este script')
151+
if type(index) == int:
152+
download_link = zshared(self.episodes[index])
153+
print('\n[+] Direct_Link: ', download_link)
154+
print('[+] Tentando iniciar strem com VLC')
155+
if not start_stream(download_link):
156+
download_episode(download_link)
160157

161-
try:
162-
driver = return_driver()
163-
driver.get(link)
164-
except WebDriverException as e:
165-
print('Ocorreu um erro')
166-
print(e)
167-
exit()
168-
169-
lista_links = links_zippyshare()
170-
# Imprime a lista de animes
171-
print()
172-
for i in range(0, len(lista_links)):
173-
print(f'[{i + 1}] {lista_links[i]}')
158+
elif not index:
159+
return 'continue'
160+
161+
162+
if __name__ == '__main__':
163+
animes = Raspamb()
174164

175165
while True:
176-
# Le o numero do episódio que ira baixar
177-
while True:
178-
try:
179-
numero_episodio = int(input('Número do episódio (-1 para voltar): '))
180-
if numero_episodio == -1:
181-
retornar_busca()
182-
elif numero_episodio <= len(lista_links):
183-
link = lista_links[numero_episodio - 1]
184-
break
185-
else:
186-
print('Episódio invalido, escolha um numero entre 1 e {}'.format(len(lista_links)))
187-
except ValueError:
188-
print('''!!!! Atenção !!!! Erro no número''')
189-
190-
print('Iniciando o download\n')
191-
driver.get(link)
192-
episode = driver.find_element_by_xpath('//a[@id="dlbutton"]').get_attribute('href')
193-
194-
195-
# path_vlc = get_path_exe("vlc.exe", False)
196-
# if path_vlc:
197-
# popen(f'"{path_vlc}" {episode}')
198-
199-
driver.get(episode)
200-
201-
retornar_busca()
166+
anime_name = input('\nDigite um anime: ')
167+
animes.search(anime_name)
168+
169+
if animes.select_anime() == 'continue':
170+
continue
171+
172+
animes.get_episode_list()
173+
if animes.select_episode() == 'continue':
174+
continue
175+
176+

requirements.txt

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
1-
beautifulsoup4==4.7.1
2-
bs4==0.0.1
3-
selenium==3.141.0
4-
soupsieve==1.9.1
5-
urllib3==1.25.3
1+
certifi==2021.10.8
2+
charset-normalizer==2.0.12
3+
idna==3.3
4+
lxml==4.7.1
5+
python-Levenshtein==0.12.2
6+
requests==2.27.1
7+
thefuzz==0.19.0
8+
urllib3==1.26.8

servers.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import lxml.html
2+
from pytest import importorskip
3+
import requests
4+
import re
5+
6+
7+
compilado = re.compile('("[\/A-z0-9\+(%)\-." ]+)')
8+
# (\([1-9 % +]+\))
9+
10+
def zshared(link: str) -> str:
11+
response = requests.get(link)
12+
URL = response.url.split('/v')[0]
13+
parser = lxml.html.fromstring(response.text)
14+
15+
javascript = parser.xpath('//script[@type="text/javascript"]')[5].text
16+
javascript = re.search(compilado, javascript).group(0).replace('+ (', '+ str(')
17+
download_path = eval(javascript)
18+
return URL + download_path
19+

0 commit comments

Comments
 (0)