-
Notifications
You must be signed in to change notification settings - Fork 0
/
four_anime.py
78 lines (70 loc) · 2.18 KB
/
four_anime.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import bs4
import re
from anime_base import AnimeBaseC
from logger import logger
from urllib.parse import urlencode, quote_plus
class FourAnimeC(AnimeBaseC):
def __init__(self, url, fillerList):
super().__init__(url, fillerList)
def Name(self):
return "4Anime"
def Grab(self, epNum):
logger.Print("Getting URL ... ")
pageResponse = self.__get_episode_page(epNum)
if pageResponse:
epUrl = self.__get_episode_download_url(pageResponse)
epName = self.get_episode_name(epNum, epUrl)
return epUrl, epName
return "", ""
def Search(self, text):
query = {
's': text
}
query = urlencode(query, quote_via=quote_plus)
response = self.get_response("https://4anime.to/?{0}".format(query))
if response:
soup = bs4.BeautifulSoup(response.text, 'html.parser')
table = soup.find_all('div', {"id": "headerDIV_2"})
pages = {}
for tbl in table:
links = tbl.find_all('a')
for link in links:
href = link.get('href')
if href and len(href) > 1:
title = link.find('div')
if title:
pages[href] = title.text
keys, values = list(pages.keys()), list(pages.values())
if len(keys) == 0:
return ""
if len(keys) == 1:
return keys[0]
user_sel = self.get_user_selection(values)
if user_sel == -1:
return ''
return keys[(user_sel - 1)]
return ""
def __get_episode_page(self, epNum):
if epNum < len(self.m_episodes):
return self.get_response(self.m_episodes[epNum - 1])
else:
url = self.m_url.replace("/anime/", "/")
return super().get_episode_page(url, epNum)
def __get_episode_download_url(self, pageResponse):
if pageResponse:
html_text = pageResponse.text
soup = bs4.BeautifulSoup(html_text, 'html.parser')
scripts = soup.find_all('script', {"type":"text/javascript"})
for script in scripts:
if "mirror_dl" in script.text:
return script.text.split('"')[3].replace('\\', '')
return ""
def collect_episodes(self):
response = self.get_response(self.m_url)
if response:
soup = bs4.BeautifulSoup(response.text, 'html.parser')
uls = soup.find_all('ul', {"class":"episodes"})
for ul in uls:
links = ul.find_all('a')
for link in links:
self.m_episodes.append(link.get('href'))