diff --git a/anime_downloader/sites/init.py b/anime_downloader/sites/init.py index a0d0d3b..e555a56 100644 --- a/anime_downloader/sites/init.py +++ b/anime_downloader/sites/init.py @@ -3,6 +3,7 @@ from importlib import import_module ALL_ANIME_SITES = [ # ('filename', 'sitename', 'classname') # ('_4anime', '4anime', 'Anime4'), + ('kawaiifu', 'kawaiifu', 'Kawaiifu'), ('anitube', 'anitube', 'AniTube'), ('animtime', 'animtime', 'AnimTime'), ('anime8', 'anime8', 'Anime8'), diff --git a/anime_downloader/sites/kawaiifu.py b/anime_downloader/sites/kawaiifu.py new file mode 100644 index 0000000..dfffc6d --- /dev/null +++ b/anime_downloader/sites/kawaiifu.py @@ -0,0 +1,39 @@ +from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult +from anime_downloader.sites import helpers + +import logging + +logger = logging.getLogger(__name__) + +class Kawaiifu(Anime, sitename='kawaiifu'): + sitename = 'kawaiifu' + + @classmethod + def search(cls, query): + soup = helpers.soupify(helpers.get("https://kawaiifu.com/search-movie?keyword={}&cat-get=".format(query))) + + items = soup.find_all("div", "item") + return [ + SearchResult( + title = item.select("h4 > a")[1].text.strip(), + url = item.select("h4 > a")[1]['href'] + ) for item in items + ] + + def _scrape_episodes(self): + soup = helpers.soupify(helpers.get(self.url)) + redirect = soup.select(".list-ep > li > a")[0]["href"] + soup = helpers.soupify(helpers.get(redirect)) + + return [item["href"] for item in soup.select(".list-ep > li > a")] + + def _scrape_metadata(self): + soup = helpers.soupify(helpers.get(self.url)) + self.title = soup.find_all("h2", "title")[0].text + +class KawaiifuEpisode(AnimeEpisode, sitename="kawaiifu"): + def _get_sources(self): + soup = helpers.soupify(helpers.get(self.url)) + video = soup.find_all("video")[0] + + return [('no_extractor', video["atr_id"])]