Initial commit

This commit is contained in:
PapaTutuWawa 2023-12-16 15:48:26 +01:00
commit b32243a67d
9 changed files with 1547 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
venv/
*.egg-info
**/__pycache__/

1348
LICENSE Normal file

File diff suppressed because it is too large Load Diff

9
README.md Normal file
View File

@ -0,0 +1,9 @@
# akibapass_downloader
A library that allows logging in and parsing the Akibapass site so you can download
your library. This is only tested (and probably only works) on Anime series you
have bought (and not rented).
## License
See [`LICENSE`](./LICENSE).

View File

View File

@ -0,0 +1,2 @@
# Base URL of the Akibapass service
BASE_URL = "https://akibapasstv.vhx.tv"

View File

@ -0,0 +1,28 @@
from dataclasses import dataclass
from pathlib import Path
import requests
@dataclass
class Download:
url: str
quality: str
filename: str
def download(self, cookies: dict[str, str], destination_dir: Path):
"""Downloads the episode into the directory @destination_dir. The cookies from @cookies are used
during the request."""
with requests.get(
self.url,
cookies=cookies,
headers={
"Referrer": self.url,
},
stream=True,
) as r:
r.raise_for_status()
with open(destination_dir / self.filename, "wb") as f:
for chunk in r.iter_content(chunk_size=8196):
f.write(chunk)

View File

@ -0,0 +1,102 @@
from enum import Enum
from dataclasses import dataclass
import re
from bs4 import BeautifulSoup
import requests
from akibapass_downloader.const import BASE_URL
from akibapass_downloader.download import Download
class Quality(Enum):
SD_240P = "240p"
SD_360P = "360p"
SD_540P = "540p"
HD_720P = "720p"
HD_1080P = "1080p"
UHD_1440P = "1440p"
@dataclass
class Episode:
# The URL of the episode.
url: str
# The title of the episode.
name: str
# The episode numer.
episode_nr: int
def get_downloads(
self, cookies: dict[str, str], filter_quality: Quality | None = None
) -> list[Download]:
"""Requests the page and fetches the download links for the episode. @cookies are used for cookies."""
r = requests.get(
self.url,
cookies=cookies,
headers={
"Referrer": self.url,
},
)
assert r.status_code == 200
soup = BeautifulSoup(r.text, "html.parser")
# Find the episode title
title = soup.find_all("h1", class_="video-title")[0].strong.string
# Find the different downloads
dropdown = soup.find_all("div", class_="dropdown-list")[0]
scroll = dropdown.ul
assert scroll.attrs["class"] == ["scrolling"]
downloads = []
for download in scroll.find_all("li"):
a = download.a
quality = (
a.string.strip()
.split("")[0]
.rstrip()
.replace("SD ", "")
.replace("HD ", "")
)
if filter_quality is not None:
if quality != filter_quality.value:
continue
downloads.append(
Download(
url=f"{BASE_URL}{a.attrs['href']}",
quality=quality,
filename=f"{title} ({quality}).mp4",
)
)
return downloads
def list_episodes(base_url: str) -> list[Episode]:
"""Fetches episodes for @base_url."""
r = requests.get(base_url)
assert r.status_code == 200
soup = BeautifulSoup(r.text, "html.parser")
episode_nr_matcher = re.compile(r".*S[0-9]+E([0-9]+).*")
episodes = []
for li in soup.find_all("li", class_="item-type-video"):
url = li.find_all("a", class_="browse-item-link")[0]
img = url.find_all("img")[0]
name_raw = img.attrs["alt"]
match = episode_nr_matcher.match(name_raw)
episodes.append(
Episode(
url=f"{BASE_URL}{url.attrs['href']}",
name=name_raw,
episode_nr=int(match.group(1)),
)
)
return episodes

View File

@ -0,0 +1,38 @@
import requests
from bs4 import BeautifulSoup
from akibapass_downloader.const import BASE_URL
def login(email: str, password: str) -> dict[str, str] | None:
"""Performs a login on the Akibapass site and returns a set of cookies
to use while performing other requests."""
login_page_req = requests.get(f"{BASE_URL}/login")
if login_page_req.status_code != 200:
return None
page = BeautifulSoup(login_page_req.text, "html.parser")
# Find the CSRF token's value.
csrf_token = page.find("meta", attrs={"name": "csrf-token"}).attrs["content"]
# Find the CSRD token's name.
csrf_param = page.find("meta", attrs={"name": "csrf-param"}).attrs["content"]
r = requests.post(
f"{BASE_URL}/login",
cookies={
# Use the session we received on the login page.
"_session": login_page_req.cookies["_session"],
},
data={
"email": email,
"password": password,
csrf_param: csrf_token,
"utf8": "",
},
)
if r.status_code != 200:
return None
return {
"_session": r.cookies["_session"],
}

17
pyproject.toml Normal file
View File

@ -0,0 +1,17 @@
[project]
name = "akibapass_downloader"
version = "0.1.0"
dependencies = [
"beautifulsoup4",
"requests"
]
[project.optional-dependencies]
dev = [
"black",
"pylint"
]
[tool.pylint."MESSAES CONTROL"]
max-line-length=120
disable = "missing-class-docstring,missing-module-docstring,missing-function-docstring"