rcdl 3.0.0b18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rcdl might be problematic. Click here for more details.
- rcdl/__init__.py +10 -0
- rcdl/__main__.py +37 -0
- rcdl/core/__init__.py +0 -0
- rcdl/core/adapters.py +241 -0
- rcdl/core/api.py +76 -0
- rcdl/core/config.py +212 -0
- rcdl/core/db.py +283 -0
- rcdl/core/db_queries.py +97 -0
- rcdl/core/downloader.py +307 -0
- rcdl/core/downloader_subprocess.py +366 -0
- rcdl/core/file_io.py +41 -0
- rcdl/core/fuse.py +127 -0
- rcdl/core/models.py +105 -0
- rcdl/core/opti.py +90 -0
- rcdl/core/parser.py +282 -0
- rcdl/gui/__init__.py +0 -0
- rcdl/gui/__main__.py +5 -0
- rcdl/gui/db_viewer.py +41 -0
- rcdl/gui/gui.py +54 -0
- rcdl/gui/video_manager.py +170 -0
- rcdl/interface/__init__.py +0 -0
- rcdl/interface/cli.py +216 -0
- rcdl/interface/ui.py +194 -0
- rcdl/utils.py +180 -0
- rcdl-3.0.0b18.dist-info/METADATA +122 -0
- rcdl-3.0.0b18.dist-info/RECORD +28 -0
- rcdl-3.0.0b18.dist-info/WHEEL +4 -0
- rcdl-3.0.0b18.dist-info/entry_points.txt +3 -0
rcdl/core/opti.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
# core/opti.py
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Optimize media to reduce disk storage utilisation
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
|
|
9
|
+
from rcdl.core.config import Config
|
|
10
|
+
from rcdl.core.models import Status, Media
|
|
11
|
+
from rcdl.core.db import DB
|
|
12
|
+
from rcdl.core.downloader_subprocess import handbrake_optimized
|
|
13
|
+
from rcdl.interface.ui import UI, NestedProgress
|
|
14
|
+
from rcdl.utils import get_media_metadata, get_date_now
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def update_db(media: Media, user: str, result: int):
|
|
18
|
+
"""Update DB if optimisation succesfful with new file_size, etc..."""
|
|
19
|
+
if result == 0:
|
|
20
|
+
path = os.path.join(Config.creator_folder(user), media.file_path)
|
|
21
|
+
_, file_size, checksum = get_media_metadata(path)
|
|
22
|
+
media.status = Status.OPTIMIZED
|
|
23
|
+
media.checksum = checksum
|
|
24
|
+
media.created_at = get_date_now()
|
|
25
|
+
media.file_size = file_size
|
|
26
|
+
|
|
27
|
+
with DB() as db:
|
|
28
|
+
db.update_media(media)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def optimize():
|
|
32
|
+
"""Optimize all medias in DB with DOWNLOADED
|
|
33
|
+
status that are not part of a fuse group"""
|
|
34
|
+
# get all video to opti
|
|
35
|
+
with DB() as db:
|
|
36
|
+
medias = db.query_media_by_status(Status.DOWNLOADED)
|
|
37
|
+
if Config.DEBUG:
|
|
38
|
+
medias.extend(db.query_media_by_status(Status.OPTIMIZED))
|
|
39
|
+
|
|
40
|
+
# progress
|
|
41
|
+
progress = NestedProgress(UI.console)
|
|
42
|
+
progress.start(
|
|
43
|
+
total=len(medias),
|
|
44
|
+
total_label="Optimizing videos",
|
|
45
|
+
current_label="Current video",
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
for media in medias:
|
|
49
|
+
# check media is not in a fuse group
|
|
50
|
+
with DB() as db:
|
|
51
|
+
fuse = db.query_fuses_by_id(media.post_id)
|
|
52
|
+
if fuse is not None:
|
|
53
|
+
progress.advance_total()
|
|
54
|
+
continue
|
|
55
|
+
|
|
56
|
+
# get post info
|
|
57
|
+
with DB() as db:
|
|
58
|
+
post = db.query_post_by_id(media.post_id)
|
|
59
|
+
if post is None:
|
|
60
|
+
UI.error(f"Could not match media {media.post_id} to a post by id")
|
|
61
|
+
progress.advance_total()
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
result = handbrake_optimized(media, post.user, progress)
|
|
65
|
+
|
|
66
|
+
folder_path = Config.creator_folder(post.user)
|
|
67
|
+
video_path = os.path.join(folder_path, media.file_path)
|
|
68
|
+
output_path = video_path + ".opti.mp4"
|
|
69
|
+
|
|
70
|
+
if result == 0:
|
|
71
|
+
try:
|
|
72
|
+
os.replace(output_path, video_path)
|
|
73
|
+
update_db(media, post.user, result)
|
|
74
|
+
except FileNotFoundError as e:
|
|
75
|
+
UI.error(
|
|
76
|
+
f"FileNotFoundError: Could not replace {video_path} "
|
|
77
|
+
f"with {output_path} due to: {e}"
|
|
78
|
+
)
|
|
79
|
+
except PermissionError as e:
|
|
80
|
+
UI.error(
|
|
81
|
+
f"PermissionError: Could not replace {video_path} "
|
|
82
|
+
f"with {output_path} due to: {e}"
|
|
83
|
+
)
|
|
84
|
+
except OSError as e:
|
|
85
|
+
UI.error(
|
|
86
|
+
f"OSError: Failed to replace {video_path} with {output_path} due to: {e}"
|
|
87
|
+
)
|
|
88
|
+
finally:
|
|
89
|
+
progress.advance_total()
|
|
90
|
+
progress.close()
|
rcdl/core/parser.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
# core/parser.py
|
|
2
|
+
|
|
3
|
+
"""Handle function to parse post and files"""
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from pathvalidate import sanitize_filename
|
|
7
|
+
|
|
8
|
+
from rcdl.interface.ui import UI
|
|
9
|
+
from rcdl.core.models import Media, Creator, Post, CreatorStatus
|
|
10
|
+
from rcdl.core.file_io import load_json, load_txt, write_txt
|
|
11
|
+
from rcdl.core.config import Config
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
COOMER_PAYSITES = ["onlyfans", "fansly", "candfans"]
|
|
15
|
+
KEMONO_PAYSITES = [
|
|
16
|
+
"patreon",
|
|
17
|
+
"fanbox",
|
|
18
|
+
"fantia",
|
|
19
|
+
"boosty",
|
|
20
|
+
"gumroad",
|
|
21
|
+
"subscribestar",
|
|
22
|
+
"dlsite",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def get_domain(arg: str | dict | Media) -> str:
|
|
27
|
+
"""From a service get the domain (coomer or kemono)
|
|
28
|
+
Input is either: service(str), post(dict), video(models.Video)
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def _service(service: str) -> str:
|
|
32
|
+
if service in COOMER_PAYSITES:
|
|
33
|
+
return "coomer"
|
|
34
|
+
if service in KEMONO_PAYSITES:
|
|
35
|
+
return "kemono"
|
|
36
|
+
logging.error("Service %s not associated to any domain", service)
|
|
37
|
+
return ""
|
|
38
|
+
|
|
39
|
+
if isinstance(arg, dict):
|
|
40
|
+
return _service(arg["service"])
|
|
41
|
+
if isinstance(arg, Media):
|
|
42
|
+
return _service(arg.service)
|
|
43
|
+
|
|
44
|
+
return _service(arg)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def get_title(post: Post) -> str:
|
|
48
|
+
"""From a Post Model return the title"""
|
|
49
|
+
title = post.title
|
|
50
|
+
if title == "":
|
|
51
|
+
title = post.substring
|
|
52
|
+
if title == "":
|
|
53
|
+
title = post.id
|
|
54
|
+
return sanitize_filename(title)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def get_title_json(post: dict) -> str:
|
|
58
|
+
"""Extract title from a post(dict)"""
|
|
59
|
+
title = post["title"]
|
|
60
|
+
if title == "":
|
|
61
|
+
title = post["substring"]
|
|
62
|
+
if title == "":
|
|
63
|
+
title = post["id"]
|
|
64
|
+
return sanitize_filename(title)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_date(post: dict) -> str:
|
|
68
|
+
"""Extract date from a post(dict)"""
|
|
69
|
+
if "published" in post:
|
|
70
|
+
date = post["published"][0:10]
|
|
71
|
+
elif "added" in post:
|
|
72
|
+
date = post["added"][0:10]
|
|
73
|
+
else:
|
|
74
|
+
logging.error("Could not extract date from %s", post["id"])
|
|
75
|
+
date = "NA"
|
|
76
|
+
return date
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def get_part(post: dict, url: str) -> int:
|
|
80
|
+
"""
|
|
81
|
+
For posts containing multiple video url. Each url is considered a part,
|
|
82
|
+
so all videos from the same posts will simply have a different part number
|
|
83
|
+
"""
|
|
84
|
+
urls = extract_video_urls(post)
|
|
85
|
+
part = 0
|
|
86
|
+
if len(urls) == 1:
|
|
87
|
+
return 0
|
|
88
|
+
|
|
89
|
+
for u in urls:
|
|
90
|
+
if u == url:
|
|
91
|
+
return part
|
|
92
|
+
part += 1
|
|
93
|
+
|
|
94
|
+
logging.error(
|
|
95
|
+
"Could not extract part number for post id %s with url %s", post["id"], url
|
|
96
|
+
)
|
|
97
|
+
return -1
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def get_filename(post: dict, url: str) -> str:
|
|
101
|
+
"""Get filename from pst dict and url"""
|
|
102
|
+
title = get_title_json(post)
|
|
103
|
+
date = get_date(post)
|
|
104
|
+
part = get_part(post, url)
|
|
105
|
+
file_title = f"{date}_{title}".replace("'", " ").replace('"', "")
|
|
106
|
+
filename = f"{file_title}_p{part}.mp4"
|
|
107
|
+
return filename
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def get_filename_fuse(post: Post) -> str:
|
|
111
|
+
"""Get filename for fuse output from Post Model
|
|
112
|
+
Fuse output has 'X' as part number"""
|
|
113
|
+
title = get_title(post)
|
|
114
|
+
date = post.published[0:10]
|
|
115
|
+
part = "X"
|
|
116
|
+
file_title = f"{date}_{title}".replace("'", " ").replace('"', "")
|
|
117
|
+
filename = f"{file_title}_p{part}.mp4"
|
|
118
|
+
return filename
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def extract_video_urls(post: dict) -> list:
|
|
122
|
+
"""Extract all videos urls from a dict post"""
|
|
123
|
+
video_extensions = (".mp4", ".webm", ".mov", ".avi", ".mkv", ".flv", ".wmv", ".m4v")
|
|
124
|
+
urls = set()
|
|
125
|
+
|
|
126
|
+
# Check main file
|
|
127
|
+
if post["file"]:
|
|
128
|
+
if post["file"]["path"]:
|
|
129
|
+
path = post["file"]["path"]
|
|
130
|
+
if path.endswith(video_extensions):
|
|
131
|
+
urls.add(f"{path}")
|
|
132
|
+
|
|
133
|
+
if post["attachments"]:
|
|
134
|
+
attachments = post["attachments"]
|
|
135
|
+
for attachment in attachments:
|
|
136
|
+
if attachment["path"]:
|
|
137
|
+
if attachment["path"].endswith(video_extensions):
|
|
138
|
+
urls.add(f"{attachment['path']}")
|
|
139
|
+
|
|
140
|
+
return list(urls)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def filter_posts_with_videos_from_list(data: list[dict]) -> list[dict]:
|
|
144
|
+
"""Return posts with video url from a json with a list of posts"""
|
|
145
|
+
|
|
146
|
+
posts_with_videos = []
|
|
147
|
+
for post in data:
|
|
148
|
+
if len(extract_video_urls(post)) > 0:
|
|
149
|
+
posts_with_videos.append(post)
|
|
150
|
+
return posts_with_videos
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def filter_posts_with_videos_from_json(path: str) -> list:
|
|
154
|
+
"""Return posts with video url from a json with a list of posts"""
|
|
155
|
+
posts = load_json(path)
|
|
156
|
+
|
|
157
|
+
posts_with_videos = []
|
|
158
|
+
for post in posts:
|
|
159
|
+
if len(extract_video_urls(post)) > 0:
|
|
160
|
+
posts_with_videos.append(post)
|
|
161
|
+
return posts_with_videos
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def valid_service(service: str) -> bool:
|
|
165
|
+
"""Check if a service is valid (within list of DOMAIN services)"""
|
|
166
|
+
if service in COOMER_PAYSITES:
|
|
167
|
+
return True
|
|
168
|
+
if service in KEMONO_PAYSITES:
|
|
169
|
+
return True
|
|
170
|
+
return False
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _default_creator(_id: str, service: str, domain: str):
|
|
174
|
+
return Creator(
|
|
175
|
+
id=_id,
|
|
176
|
+
service=service,
|
|
177
|
+
domain=domain,
|
|
178
|
+
name="",
|
|
179
|
+
indexed="",
|
|
180
|
+
updated="",
|
|
181
|
+
favorited=1,
|
|
182
|
+
status=CreatorStatus.NA,
|
|
183
|
+
max_date="",
|
|
184
|
+
max_posts=1,
|
|
185
|
+
max_size=1,
|
|
186
|
+
min_date="",
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def get_creator_from_line(line: str) -> Creator | None:
|
|
191
|
+
"""
|
|
192
|
+
Convert a line into a Creator model
|
|
193
|
+
arg: line -> 'service/creator'
|
|
194
|
+
This is the format of creators.txt
|
|
195
|
+
"""
|
|
196
|
+
|
|
197
|
+
parts = line.split("/")
|
|
198
|
+
if valid_service(parts[0].strip()):
|
|
199
|
+
return _default_creator(
|
|
200
|
+
parts[1].strip(), parts[0].strip(), get_domain(parts[0].strip())
|
|
201
|
+
)
|
|
202
|
+
if valid_service(parts[1].strip()):
|
|
203
|
+
return _default_creator(
|
|
204
|
+
parts[0].strip(), parts[1].strip(), get_domain(parts[1].strip())
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
UI.error(
|
|
208
|
+
f"Creator file not valid: {line} can not be interpreted."
|
|
209
|
+
f" Format is: 'service/creator_id'"
|
|
210
|
+
)
|
|
211
|
+
return None
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def get_creators() -> list[Creator]:
|
|
215
|
+
"""
|
|
216
|
+
Load creators.txt and return a list of models.Creator
|
|
217
|
+
"""
|
|
218
|
+
lines = load_txt(Config.CREATORS_FILE)
|
|
219
|
+
creators = []
|
|
220
|
+
for line in lines:
|
|
221
|
+
creator = get_creator_from_line(line)
|
|
222
|
+
if creator is None:
|
|
223
|
+
continue
|
|
224
|
+
creators.append(creator)
|
|
225
|
+
if len(creators) < 1:
|
|
226
|
+
UI.error(f"Could not find any creators. Check {Config.CREATORS_FILE}")
|
|
227
|
+
return creators
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def get_creators_from_posts(posts: list[dict]) -> list[Creator]:
|
|
231
|
+
"""Extract a list of Creators model form a list of dict posts"""
|
|
232
|
+
creators = []
|
|
233
|
+
seen = set()
|
|
234
|
+
|
|
235
|
+
for post in posts:
|
|
236
|
+
key = (post["user"], post["service"], "coomer")
|
|
237
|
+
if key in seen:
|
|
238
|
+
continue
|
|
239
|
+
|
|
240
|
+
seen.add(key)
|
|
241
|
+
creators.append(_default_creator(post["user"], post["service"], "coomer"))
|
|
242
|
+
return creators
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def parse_creator_input(value: str) -> tuple[str | None, str]:
|
|
246
|
+
"""Parse user input in cli to extract creator id & service"""
|
|
247
|
+
value = value.strip()
|
|
248
|
+
|
|
249
|
+
# url
|
|
250
|
+
if "://" in value:
|
|
251
|
+
parts = value.replace("https://", "").strip().split("/")
|
|
252
|
+
logging.info(
|
|
253
|
+
"From %s extracte service %s and creator %s", value, parts[1], parts[3]
|
|
254
|
+
)
|
|
255
|
+
return parts[1], parts[3] # service, creator_id
|
|
256
|
+
|
|
257
|
+
# creators.txt format
|
|
258
|
+
if "/" in value:
|
|
259
|
+
c = get_creator_from_line(value)
|
|
260
|
+
if c is not None:
|
|
261
|
+
logging.info(
|
|
262
|
+
"From %s extracte service %s and creator %s",
|
|
263
|
+
value,
|
|
264
|
+
c.service,
|
|
265
|
+
c.id,
|
|
266
|
+
)
|
|
267
|
+
return c.service, c.id
|
|
268
|
+
|
|
269
|
+
logging.info("From %s extracted service None and creator %s", value, value)
|
|
270
|
+
return None, value
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def append_creator(creator: Creator):
|
|
274
|
+
"""Append a creator to the creators.txt file
|
|
275
|
+
Creators.txt hold all creators used in refresh command"""
|
|
276
|
+
line = f"{creator.service}/{creator.id}"
|
|
277
|
+
lines = load_txt(Config.CREATORS_FILE)
|
|
278
|
+
|
|
279
|
+
if line in lines:
|
|
280
|
+
return
|
|
281
|
+
lines.append(line)
|
|
282
|
+
write_txt(Config.CREATORS_FILE, line, mode="a")
|
rcdl/gui/__init__.py
ADDED
|
File without changes
|
rcdl/gui/__main__.py
ADDED
rcdl/gui/db_viewer.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# gui/db_viewer.py
|
|
2
|
+
|
|
3
|
+
import streamlit as st
|
|
4
|
+
import sqlite3
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
|
|
8
|
+
from rcdl.core.config import Config
|
|
9
|
+
|
|
10
|
+
TABLES = ["medias", "posts", "fuses"]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_table_columns(table_name):
|
|
14
|
+
conn = sqlite3.connect(Config.DB_PATH)
|
|
15
|
+
cur = conn.cursor()
|
|
16
|
+
cur.execute(f"PRAGMA table_info({table_name})")
|
|
17
|
+
columns = [info[1] for info in cur.fetchall()]
|
|
18
|
+
conn.close()
|
|
19
|
+
return columns
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_table_data(table_name, sort_by=None, ascending=True):
|
|
23
|
+
conn = sqlite3.connect(Config.DB_PATH)
|
|
24
|
+
df = pd.read_sql_query(f"SELECT * FROM {table_name}", conn)
|
|
25
|
+
conn.close()
|
|
26
|
+
if sort_by and sort_by in df.columns:
|
|
27
|
+
df = df.sort_values(by=sort_by, ascending=ascending)
|
|
28
|
+
return df
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def run_db_viewer():
|
|
32
|
+
st.set_page_config(page_title="DB Viewer", layout="wide")
|
|
33
|
+
st.title("Database Viewer")
|
|
34
|
+
|
|
35
|
+
table_name = st.selectbox("Select Table", TABLES)
|
|
36
|
+
|
|
37
|
+
# Load data
|
|
38
|
+
df = get_table_data(table_name, sort_by=None, ascending=True)
|
|
39
|
+
|
|
40
|
+
st.write(f"Showing `{table_name}` table ({len(df)} rows)")
|
|
41
|
+
st.dataframe(df, width="stretch")
|
rcdl/gui/gui.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
# gui/gui.py
|
|
2
|
+
|
|
3
|
+
import streamlit as st
|
|
4
|
+
|
|
5
|
+
from rcdl.gui.db_viewer import run_db_viewer
|
|
6
|
+
from rcdl.gui.video_manager import video_manager
|
|
7
|
+
|
|
8
|
+
st.markdown(
|
|
9
|
+
"""
|
|
10
|
+
<style>
|
|
11
|
+
/* Remove top padding */
|
|
12
|
+
.block-container {
|
|
13
|
+
padding-top: 1rem !important;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/* Optional: remove Streamlit header */
|
|
17
|
+
header[data-testid="stHeader"] {
|
|
18
|
+
display: none;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/* Optional: remove footer */
|
|
22
|
+
footer {
|
|
23
|
+
display: none;
|
|
24
|
+
}
|
|
25
|
+
</style>
|
|
26
|
+
""",
|
|
27
|
+
unsafe_allow_html=True,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def run_gui():
|
|
32
|
+
"""
|
|
33
|
+
Launches the Streamlit GUI.
|
|
34
|
+
This function can be called from a CLI command.
|
|
35
|
+
"""
|
|
36
|
+
# Streamlit code
|
|
37
|
+
st.set_page_config(page_title="RCDL", layout="wide")
|
|
38
|
+
|
|
39
|
+
# Sidebar navigation
|
|
40
|
+
page = st.sidebar.radio("Go to", ["Home", "Manage Videos", "View DB"])
|
|
41
|
+
|
|
42
|
+
if page == "Home":
|
|
43
|
+
st.header("Home Page")
|
|
44
|
+
st.write("Develloped by - ritonun -")
|
|
45
|
+
|
|
46
|
+
elif page == "Manage Videos":
|
|
47
|
+
video_manager()
|
|
48
|
+
|
|
49
|
+
elif page == "View DB":
|
|
50
|
+
run_db_viewer()
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
if __name__ == "__main__":
|
|
54
|
+
run_gui()
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# gui/video_manager.py
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
import streamlit as st
|
|
6
|
+
|
|
7
|
+
from rcdl.core.config import Config
|
|
8
|
+
from rcdl.core.models import Status, Media
|
|
9
|
+
from rcdl.core.db import DB
|
|
10
|
+
from rcdl.utils import format_seconds
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
previous_statuses = {}
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def set_status(media: Media, status: Status):
|
|
17
|
+
key = media.post_id + media.url
|
|
18
|
+
previous_statuses[key] = media.status
|
|
19
|
+
media.status = status
|
|
20
|
+
with DB() as db:
|
|
21
|
+
db.update_media(media)
|
|
22
|
+
print(f"Set {media.post_id} to {status.value}")
|
|
23
|
+
|
|
24
|
+
for m in st.session_state.medias:
|
|
25
|
+
if m.post_id == media.post_id and m.url == media.url:
|
|
26
|
+
m.status = status
|
|
27
|
+
break
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def video_manager():
|
|
31
|
+
st.title("Video Manager")
|
|
32
|
+
|
|
33
|
+
# Filter & Sorting UI
|
|
34
|
+
with st.expander("Filters & Sorting", expanded=True):
|
|
35
|
+
col1, col2, col3 = st.columns(3)
|
|
36
|
+
with col1:
|
|
37
|
+
sort_by = st.selectbox(
|
|
38
|
+
"Sort By",
|
|
39
|
+
options=["file_size", "service", "duration", "file_path"],
|
|
40
|
+
index=0,
|
|
41
|
+
)
|
|
42
|
+
with col2:
|
|
43
|
+
ascending = st.radio(
|
|
44
|
+
"Order",
|
|
45
|
+
options=[True, False],
|
|
46
|
+
format_func=lambda x: "Ascending" if x else "Descending",
|
|
47
|
+
horizontal=True,
|
|
48
|
+
)
|
|
49
|
+
with col3:
|
|
50
|
+
creator_filter = st.text_input(
|
|
51
|
+
"Creator ID(user)", placeholder="Leave empty for all"
|
|
52
|
+
)
|
|
53
|
+
status_filter = st.multiselect(
|
|
54
|
+
"Status",
|
|
55
|
+
options=list(Status),
|
|
56
|
+
default=[Status.DOWNLOADED, Status.OPTIMIZED],
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
reload = st.button("Apply")
|
|
60
|
+
|
|
61
|
+
# load db
|
|
62
|
+
if reload or "medias" not in st.session_state:
|
|
63
|
+
with DB() as db:
|
|
64
|
+
medias = db.query_medias_by_status_sorted(
|
|
65
|
+
status_filter,
|
|
66
|
+
sort_by=sort_by,
|
|
67
|
+
ascending=ascending,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# check if in a fuse group
|
|
71
|
+
|
|
72
|
+
# creator filter
|
|
73
|
+
if creator_filter:
|
|
74
|
+
filtered = []
|
|
75
|
+
for m in medias:
|
|
76
|
+
post = db.query_post_by_id(m.post_id)
|
|
77
|
+
if post and post.user == creator_filter:
|
|
78
|
+
filtered.append(m)
|
|
79
|
+
# check i na fuse group
|
|
80
|
+
fm = db.query_fuses_by_id(m.post_id)
|
|
81
|
+
if fm is None:
|
|
82
|
+
filtered.append(m)
|
|
83
|
+
medias = filtered
|
|
84
|
+
|
|
85
|
+
st.session_state.medias = medias
|
|
86
|
+
st.session_state.media_index = 0
|
|
87
|
+
|
|
88
|
+
medias = st.session_state.medias
|
|
89
|
+
if not medias:
|
|
90
|
+
st.info("No media found")
|
|
91
|
+
return
|
|
92
|
+
|
|
93
|
+
# session state
|
|
94
|
+
if "media_index" not in st.session_state:
|
|
95
|
+
st.session_state.media_index = 0
|
|
96
|
+
|
|
97
|
+
idx = st.session_state.media_index
|
|
98
|
+
media = medias[idx]
|
|
99
|
+
|
|
100
|
+
# media info
|
|
101
|
+
st.subheader(f"Media {idx + 1} / {len(medias)}")
|
|
102
|
+
|
|
103
|
+
with DB() as db:
|
|
104
|
+
post = db.query_post_by_id(media.post_id)
|
|
105
|
+
if post is None:
|
|
106
|
+
st.info("No matching post found")
|
|
107
|
+
return
|
|
108
|
+
|
|
109
|
+
col_video, col_info = st.columns([1, 2])
|
|
110
|
+
with col_info:
|
|
111
|
+
col1, col2 = st.columns(2)
|
|
112
|
+
with col1:
|
|
113
|
+
st.write("**Post ID:**", media.post_id)
|
|
114
|
+
st.write("**Service:**", media.service)
|
|
115
|
+
st.write("**User:**", post.user)
|
|
116
|
+
st.write("**Duration:**", format_seconds(media.duration))
|
|
117
|
+
st.write("**Sequence:**", media.sequence)
|
|
118
|
+
st.write("**Size:**", round(media.file_size / (1024 * 1024), 1), "MB")
|
|
119
|
+
st.write("**Status:**", media.status)
|
|
120
|
+
key = media.post_id + media.url
|
|
121
|
+
if key in previous_statuses:
|
|
122
|
+
st.write("**PREV STATUS:**", previous_statuses[key])
|
|
123
|
+
st.write("**Path:**", media.file_path)
|
|
124
|
+
st.write("**Created at**:", media.created_at[0:16])
|
|
125
|
+
|
|
126
|
+
with col2:
|
|
127
|
+
# controls
|
|
128
|
+
c1, c2, c3 = st.columns([1, 1, 2])
|
|
129
|
+
with c1:
|
|
130
|
+
if st.button("⏮ Prev", disabled=idx == 0):
|
|
131
|
+
st.session_state.media_index -= 1
|
|
132
|
+
st.rerun()
|
|
133
|
+
if st.button("⏭ Next", disabled=idx >= len(medias) - 1):
|
|
134
|
+
st.session_state.media_index += 1
|
|
135
|
+
st.rerun()
|
|
136
|
+
with c2:
|
|
137
|
+
if st.button("Remove"):
|
|
138
|
+
set_status(media, Status.TO_BE_DELETED)
|
|
139
|
+
st.rerun()
|
|
140
|
+
if st.button("Revert Status"):
|
|
141
|
+
key = media.post_id + media.url
|
|
142
|
+
if key in previous_statuses:
|
|
143
|
+
set_status(media, previous_statuses[key])
|
|
144
|
+
else:
|
|
145
|
+
print("Not in previous status")
|
|
146
|
+
st.rerun()
|
|
147
|
+
with c3:
|
|
148
|
+
chosen_status = st.selectbox(
|
|
149
|
+
"Set Status",
|
|
150
|
+
options=list(Status),
|
|
151
|
+
index=list(Status).index(media.status)
|
|
152
|
+
if media.status in list(Status)
|
|
153
|
+
else 0,
|
|
154
|
+
)
|
|
155
|
+
if st.button("Apply Status"):
|
|
156
|
+
set_status(media, chosen_status)
|
|
157
|
+
st.rerun()
|
|
158
|
+
|
|
159
|
+
# video player
|
|
160
|
+
full_path = os.path.join(Config.creator_folder(post.user), media.file_path)
|
|
161
|
+
if os.path.exists(full_path):
|
|
162
|
+
with col_video:
|
|
163
|
+
with st.container():
|
|
164
|
+
if media.file_size > 199 * 1024 * 1024: # 199MB
|
|
165
|
+
with open(full_path, "rb") as f:
|
|
166
|
+
st.video(f.read(), autoplay=True, loop=True)
|
|
167
|
+
else:
|
|
168
|
+
st.video(full_path, autoplay=True, loop=True)
|
|
169
|
+
else:
|
|
170
|
+
st.error(f"Video file {full_path} not found on disk")
|
|
File without changes
|