warp-beacon 1.1.2__py3-none-any.whl → 1.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- warp_beacon/__version__.py +1 -1
- warp_beacon/jobs/__init__.py +1 -0
- warp_beacon/jobs/abstract.py +2 -0
- warp_beacon/mediainfo/video.py +4 -18
- warp_beacon/scraper/__init__.py +18 -5
- warp_beacon/scraper/exceptions.py +3 -1
- warp_beacon/scraper/instagram.py +14 -7
- warp_beacon/scraper/youtube/shorts.py +20 -9
- warp_beacon/storage/__init__.py +10 -1
- warp_beacon/warp_beacon.py +24 -2
- {warp_beacon-1.1.2.dist-info → warp_beacon-1.2.3.dist-info}/METADATA +1 -1
- {warp_beacon-1.1.2.dist-info → warp_beacon-1.2.3.dist-info}/RECORD +16 -16
- {warp_beacon-1.1.2.dist-info → warp_beacon-1.2.3.dist-info}/top_level.txt +3 -0
- {warp_beacon-1.1.2.dist-info → warp_beacon-1.2.3.dist-info}/LICENSE +0 -0
- {warp_beacon-1.1.2.dist-info → warp_beacon-1.2.3.dist-info}/WHEEL +0 -0
- {warp_beacon-1.1.2.dist-info → warp_beacon-1.2.3.dist-info}/entry_points.txt +0 -0
warp_beacon/__version__.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
__version__ = "1.
|
1
|
+
__version__ = "1.2.3"
|
2
2
|
|
warp_beacon/jobs/__init__.py
CHANGED
warp_beacon/jobs/abstract.py
CHANGED
@@ -25,6 +25,7 @@ class JobSettings(TypedDict):
|
|
25
25
|
save_items: bool
|
26
26
|
media_collection: list
|
27
27
|
job_origin: Origin
|
28
|
+
canonical_name: str
|
28
29
|
|
29
30
|
class AbstractJob(ABC):
|
30
31
|
job_id: uuid.UUID = None
|
@@ -46,6 +47,7 @@ class AbstractJob(ABC):
|
|
46
47
|
save_items: bool = False
|
47
48
|
media_collection: list = []
|
48
49
|
job_origin: Origin = Origin.UNKNOWN
|
50
|
+
canonical_name: str = ""
|
49
51
|
|
50
52
|
def __init__(self, **kwargs: Unpack[JobSettings]) -> None:
|
51
53
|
if kwargs:
|
warp_beacon/mediainfo/video.py
CHANGED
@@ -2,20 +2,17 @@ import io, os
|
|
2
2
|
|
3
3
|
from typing import Union
|
4
4
|
from PIL import Image
|
5
|
-
|
5
|
+
|
6
|
+
from warp_beacon.mediainfo.abstract import MediaInfoAbstract
|
6
7
|
|
7
8
|
import logging
|
8
9
|
|
9
|
-
class VideoInfo(
|
10
|
+
class VideoInfo(MediaInfoAbstract):
|
10
11
|
width = 0
|
11
12
|
height = 0
|
12
|
-
duration = 0.0
|
13
|
-
filename = ""
|
14
|
-
container = None
|
15
13
|
|
16
14
|
def __init__(self, filename: str) -> None:
|
17
|
-
self.filename
|
18
|
-
self.container = av.open(file=self.filename, mode='r')
|
15
|
+
super(VideoInfo, self).__init__(filename)
|
19
16
|
|
20
17
|
if self.container:
|
21
18
|
stream = self.container.streams.video[0]
|
@@ -31,21 +28,10 @@ class VideoInfo(object):
|
|
31
28
|
self.height = frame.height
|
32
29
|
# restore original position after previous frame search
|
33
30
|
self.container.seek(0, backward=False, stream=stream)
|
34
|
-
|
35
|
-
def __del__(self) -> None:
|
36
|
-
if self.container:
|
37
|
-
self.container.close()
|
38
31
|
|
39
32
|
def get_demensions(self) -> dict:
|
40
33
|
return {"width": self.width, "height": self.height}
|
41
34
|
|
42
|
-
def get_duration(self) -> float:
|
43
|
-
return self.duration
|
44
|
-
|
45
|
-
@staticmethod
|
46
|
-
def get_filesize(filename: str) -> float:
|
47
|
-
return os.stat(filename).st_size
|
48
|
-
|
49
35
|
def get_finfo(self, except_info: tuple=()) -> dict:
|
50
36
|
res = {}
|
51
37
|
res.update(self.get_demensions())
|
warp_beacon/scraper/__init__.py
CHANGED
@@ -7,6 +7,7 @@ from queue import Empty
|
|
7
7
|
|
8
8
|
from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, Unavailable
|
9
9
|
from warp_beacon.mediainfo.video import VideoInfo
|
10
|
+
from warp_beacon.mediainfo.audio import AudioInfo
|
10
11
|
from warp_beacon.compress.video import VideoCompress
|
11
12
|
from warp_beacon.uploader import AsyncUploader
|
12
13
|
from warp_beacon.jobs import Origin
|
@@ -38,14 +39,18 @@ class AsyncDownloader(object):
|
|
38
39
|
self.workers.append(proc)
|
39
40
|
proc.start()
|
40
41
|
|
41
|
-
def get_media_info(self, path: str, fr_media_info: dict={}) -> Optional[dict]:
|
42
|
+
def get_media_info(self, path: str, fr_media_info: dict={}, media_type: str = "video") -> Optional[dict]:
|
42
43
|
media_info = None
|
43
44
|
try:
|
44
45
|
if path:
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
46
|
+
if media_type == "video":
|
47
|
+
video_info = VideoInfo(path)
|
48
|
+
media_info = video_info.get_finfo(tuple(fr_media_info.keys()))
|
49
|
+
media_info.update(fr_media_info)
|
50
|
+
media_info["thumb"] = video_info.generate_thumbnail()
|
51
|
+
elif media_type == "audio":
|
52
|
+
audio_info = AudioInfo(path)
|
53
|
+
media_info = audio_info.get_finfo(tuple(fr_media_info.keys()))
|
49
54
|
except Exception as e:
|
50
55
|
logging.error("Failed to process media info!")
|
51
56
|
logging.exception(e)
|
@@ -73,6 +78,9 @@ class AsyncDownloader(object):
|
|
73
78
|
elif job.job_origin is Origin.YT_SHORTS:
|
74
79
|
from warp_beacon.scraper.youtube.shorts import YoutubeShortsScraper
|
75
80
|
actor = YoutubeShortsScraper()
|
81
|
+
elif job.job_origin is Origin.YT_MUSIC:
|
82
|
+
from warp_beacon.scraper.youtube.music import YoutubeMusicScraper
|
83
|
+
actor = YoutubeMusicScraper()
|
76
84
|
while True:
|
77
85
|
try:
|
78
86
|
logging.info("Downloading URL '%s'", job.url)
|
@@ -136,6 +144,9 @@ class AsyncDownloader(object):
|
|
136
144
|
item["local_compressed_media_path"] = new_filepath
|
137
145
|
media_info["filesize"] = VideoInfo.get_filesize(new_filepath)
|
138
146
|
logging.info("New file size of compressed file is '%.3f'", media_info["filesize"])
|
147
|
+
elif item["media_type"] == "audio":
|
148
|
+
media_info = self.get_media_info(item["local_media_path"], item.get("media_info", {}), "audio")
|
149
|
+
logging.info("Final media info: %s", media_info)
|
139
150
|
elif item["media_type"] == "collection":
|
140
151
|
for v in item["items"]:
|
141
152
|
if v["media_type"] == "video":
|
@@ -153,6 +164,8 @@ class AsyncDownloader(object):
|
|
153
164
|
if item.get("local_compressed_media_path", None):
|
154
165
|
job_args["local_media_path"] = item.get("local_compressed_media_path", None)
|
155
166
|
|
167
|
+
job_args["canonical_name"] = item.get("canonical_name", "")
|
168
|
+
|
156
169
|
logging.debug("local_media_path: '%s'", job_args.get("local_media_path", ""))
|
157
170
|
logging.debug("media_collection: '%s'", str(job_args.get("media_collection", {})))
|
158
171
|
upload_job = job.to_upload_job(**job_args)
|
@@ -30,7 +30,9 @@ class UnknownError(ScraperError):
|
|
30
30
|
|
31
31
|
def extract_exception_message(e: Exception) -> str:
|
32
32
|
msg = ""
|
33
|
-
if hasattr(e, "
|
33
|
+
if hasattr(e, "expected"):
|
34
|
+
msg = "Expected bytes: %d" % int(e.expected)
|
35
|
+
elif hasattr(e, "error_string"):
|
34
36
|
msg = e.error_string
|
35
37
|
elif hasattr(e, "message"):
|
36
38
|
msg = e.message
|
warp_beacon/scraper/instagram.py
CHANGED
@@ -1,14 +1,17 @@
|
|
1
1
|
import os
|
2
2
|
import time
|
3
|
+
|
4
|
+
import socket
|
5
|
+
import ssl
|
6
|
+
|
3
7
|
from typing import Callable, Optional, Union
|
8
|
+
|
4
9
|
from pathlib import Path
|
5
10
|
import json
|
6
11
|
|
7
12
|
import requests
|
8
|
-
from requests.exceptions import ConnectTimeout, HTTPError
|
9
13
|
import urllib3
|
10
14
|
from urllib.parse import urljoin, urlparse
|
11
|
-
import logging
|
12
15
|
|
13
16
|
from instagrapi.mixins.story import Story
|
14
17
|
from instagrapi.types import Media
|
@@ -18,6 +21,8 @@ from instagrapi.exceptions import LoginRequired, PleaseWaitFewMinutes, MediaNotF
|
|
18
21
|
from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, extract_exception_message
|
19
22
|
from warp_beacon.scraper.abstract import ScraperAbstract
|
20
23
|
|
24
|
+
import logging
|
25
|
+
|
21
26
|
INST_SESSION_FILE = "/var/warp_beacon/inst_session.json"
|
22
27
|
|
23
28
|
class InstagramScraper(ScraperAbstract):
|
@@ -95,16 +100,18 @@ class InstagramScraper(ScraperAbstract):
|
|
95
100
|
try:
|
96
101
|
ret_val = func(*args, **kwargs)
|
97
102
|
break
|
98
|
-
except (
|
103
|
+
except (socket.timeout,
|
104
|
+
ssl.SSLError,
|
105
|
+
requests.exceptions.ConnectionError,
|
99
106
|
requests.exceptions.ReadTimeout,
|
107
|
+
requests.exceptions.ConnectTimeout,
|
108
|
+
requests.exceptions.HTTPError,
|
100
109
|
urllib3.exceptions.ReadTimeoutError,
|
101
|
-
urllib3.exceptions.ConnectionError
|
102
|
-
ConnectTimeout,
|
103
|
-
HTTPError) as e:
|
110
|
+
urllib3.exceptions.ConnectionError) as e:
|
104
111
|
logging.warning("Instagram read timeout! Retrying in 2 seconds ...")
|
105
112
|
logging.info("Your `IG_MAX_RETRIES` values is %d", max_retries)
|
106
113
|
logging.exception(e)
|
107
|
-
if max_retries
|
114
|
+
if max_retries <= retries:
|
108
115
|
raise TimeOut(extract_exception_message(e))
|
109
116
|
retries += 1
|
110
117
|
time.sleep(2)
|
@@ -2,18 +2,19 @@ import os
|
|
2
2
|
import pathlib
|
3
3
|
import time
|
4
4
|
|
5
|
+
import socket
|
6
|
+
import ssl
|
7
|
+
|
5
8
|
from typing import Callable, Union
|
6
9
|
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
from urllib.error import URLError
|
11
|
-
from http.client import HTTPException
|
10
|
+
import requests
|
11
|
+
import urllib
|
12
|
+
import http.client
|
12
13
|
|
13
14
|
from pytubefix import YouTube
|
14
15
|
from pytubefix.exceptions import VideoUnavailable, VideoPrivate, MaxRetriesExceeded
|
15
16
|
|
16
|
-
from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, extract_exception_message
|
17
|
+
from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, Unavailable, extract_exception_message
|
17
18
|
from warp_beacon.scraper.abstract import ScraperAbstract
|
18
19
|
|
19
20
|
import logging
|
@@ -25,6 +26,11 @@ class YoutubeShortsScraper(ScraperAbstract):
|
|
25
26
|
def __del__(self) -> None:
|
26
27
|
pass
|
27
28
|
|
29
|
+
def remove_tmp_files(self) -> None:
|
30
|
+
for i in os.listdir(DOWNLOAD_DIR):
|
31
|
+
if "yt_download_" in i:
|
32
|
+
os.unlink("%s/%s" % (DOWNLOAD_DIR, i))
|
33
|
+
|
28
34
|
def _download_hndlr(self, func: Callable, *args: tuple[str], **kwargs: dict[str]) -> Union[str, dict]:
|
29
35
|
ret_val = ''
|
30
36
|
max_retries = int(os.environ.get("YT_MAX_RETRIES", default=8))
|
@@ -37,11 +43,14 @@ class YoutubeShortsScraper(ScraperAbstract):
|
|
37
43
|
except MaxRetriesExceeded:
|
38
44
|
# do noting, not interested
|
39
45
|
pass
|
40
|
-
except (timeout, SSLError, HTTPException, RequestException, URLError) as e:
|
46
|
+
except (socket.timeout, ssl.SSLError, http.client.HTTPException, requests.RequestException, urllib.error.URLError) as e:
|
47
|
+
if hasattr(e, "code") and int(e.code) == 403:
|
48
|
+
raise Unavailable(extract_exception_message(e))
|
41
49
|
logging.warning("Youtube read timeout! Retrying in %d seconds ...", pause_secs)
|
42
50
|
logging.info("Your `YT_MAX_RETRIES` values is %d", max_retries)
|
43
51
|
logging.exception(extract_exception_message(e))
|
44
|
-
if max_retries
|
52
|
+
if max_retries <= retries:
|
53
|
+
self.remove_tmp_files()
|
45
54
|
raise TimeOut(extract_exception_message(e))
|
46
55
|
retries += 1
|
47
56
|
time.sleep(pause_secs)
|
@@ -74,8 +83,10 @@ class YoutubeShortsScraper(ScraperAbstract):
|
|
74
83
|
output_path="/tmp",
|
75
84
|
max_retries=0,
|
76
85
|
timeout=timeout,
|
77
|
-
skip_existing=False
|
86
|
+
skip_existing=False,
|
87
|
+
filename_prefix="yt_download_"
|
78
88
|
)
|
89
|
+
logging.debug("Temp filename: '%s'", local_file)
|
79
90
|
res.append({"local_media_path": self.rename_local_file(local_file), "media_type": "video"})
|
80
91
|
|
81
92
|
return res
|
warp_beacon/storage/__init__.py
CHANGED
@@ -2,7 +2,7 @@ import os
|
|
2
2
|
#from typing import Optional
|
3
3
|
import logging
|
4
4
|
|
5
|
-
from urllib.parse import urlparse
|
5
|
+
from urllib.parse import urlparse, parse_qs
|
6
6
|
|
7
7
|
from pymongo import MongoClient
|
8
8
|
|
@@ -28,6 +28,15 @@ class Storage(object):
|
|
28
28
|
|
29
29
|
@staticmethod
|
30
30
|
def compute_uniq(url: str) -> str:
|
31
|
+
if "music.youtube.com" in url:
|
32
|
+
qs = parse_qs(urlparse(url).query)
|
33
|
+
yt_vid_id = qs.get('v', None)
|
34
|
+
if yt_vid_id:
|
35
|
+
path = urlparse(url).path.strip('/').replace("watch", "yt_music")
|
36
|
+
return "%s/%s" % (path, yt_vid_id)
|
37
|
+
else:
|
38
|
+
raise ValueError("Failed to generate uniq_id for url '%s'", url)
|
39
|
+
|
31
40
|
path = urlparse(url).path.strip('/')
|
32
41
|
return path
|
33
42
|
|
warp_beacon/warp_beacon.py
CHANGED
@@ -10,7 +10,7 @@ from io import BytesIO
|
|
10
10
|
from urlextract import URLExtract
|
11
11
|
|
12
12
|
import telegram
|
13
|
-
from telegram import Bot, ForceReply, Update, Chat, error, InputMediaVideo, InputMediaPhoto, MessageEntity, InlineKeyboardMarkup, InlineKeyboardButton
|
13
|
+
from telegram import Bot, ForceReply, Update, Chat, error, InputMediaVideo, InputMediaPhoto, InputMediaAudio, MessageEntity, InlineKeyboardMarkup, InlineKeyboardButton
|
14
14
|
from telegram.ext import Application, CommandHandler, ContextTypes, MessageHandler, filters
|
15
15
|
from telegram.constants import ParseMode
|
16
16
|
|
@@ -234,6 +234,17 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
|
|
234
234
|
media=open(job.local_media_path, 'rb'),
|
235
235
|
filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
|
236
236
|
)
|
237
|
+
elif job.media_type == "audio":
|
238
|
+
if job.tg_file_id:
|
239
|
+
if job.placeholder_message_id:
|
240
|
+
args["media"] = InputMediaAudio(media=job.tg_file_id.replace(":audio", ''))
|
241
|
+
else:
|
242
|
+
args["audio"] = job.tg_file_id.replace(":audio", '')
|
243
|
+
else:
|
244
|
+
args["media"] = InputMediaAudio(
|
245
|
+
media=open(job.local_media_path, 'rb'),
|
246
|
+
filename="%s%s" % (job.canonical_name, os.path.splitext(job.local_media_path)[-1])
|
247
|
+
)
|
237
248
|
elif job.media_type == "collection":
|
238
249
|
if job.tg_file_id:
|
239
250
|
args["media"] = []
|
@@ -306,6 +317,14 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
|
|
306
317
|
if message.photo:
|
307
318
|
tg_file_ids.append(message.photo[-1].file_id)
|
308
319
|
job.tg_file_id = message.photo[-1].file_id
|
320
|
+
elif job.media_type == "audio":
|
321
|
+
if job.placeholder_message_id:
|
322
|
+
message = await context.bot.edit_message_media(**build_tg_args(update, context, job))
|
323
|
+
else:
|
324
|
+
message = await update.message.reply_audio(**build_tg_args(update, context, job))
|
325
|
+
if message.audio:
|
326
|
+
tg_file_ids.append(message.audio.file_id)
|
327
|
+
job.tg_file_id = message.audio.file_id
|
309
328
|
elif job.media_type == "collection":
|
310
329
|
sent_messages = await update.message.reply_media_group(**build_tg_args(update, context, job))
|
311
330
|
if job.placeholder_message_id:
|
@@ -371,6 +390,9 @@ def extract_origin(url: str) -> Origin:
|
|
371
390
|
if "youtube.com/" in url and "shorts/" in url:
|
372
391
|
return Origin.YT_SHORTS
|
373
392
|
|
393
|
+
if "youtube.com/" in url and "music." in url:
|
394
|
+
return Origin.YT_MUSIC
|
395
|
+
|
374
396
|
return Origin.UNKNOWN
|
375
397
|
|
376
398
|
async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
@@ -388,7 +410,7 @@ async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
388
410
|
for url in urls:
|
389
411
|
origin = extract_origin(url)
|
390
412
|
if origin is Origin.UNKNOWN:
|
391
|
-
logging.info("Only Instagram and YouTube
|
413
|
+
logging.info("Only Instagram, YouTube Shorts and YouTube Music are now supported. Skipping.")
|
392
414
|
continue
|
393
415
|
entities, tg_file_ids = [], []
|
394
416
|
uniq_id = Storage.compute_uniq(url)
|
@@ -2,30 +2,30 @@ etc/warp_beacon/warp_beacon.conf,sha256=1gGvh36cnFr0rU4mVomfy66hQz9EvugaNzeH6_tm
|
|
2
2
|
lib/systemd/system/warp_beacon.service,sha256=lPmHqLqcI2eIV7nwHS0qcALQrznixqJuwwPfa2mDLUA,372
|
3
3
|
var/warp_beacon/placeholder.gif,sha256=cE5CGJVaop4Sx21zx6j4AyoHU0ncmvQuS2o6hJfEH88,6064
|
4
4
|
warp_beacon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
warp_beacon/__version__.py,sha256=
|
6
|
-
warp_beacon/warp_beacon.py,sha256=
|
5
|
+
warp_beacon/__version__.py,sha256=FoCfbP1cOqI5xxyfhI-2bmJFC6d3DUpIK7513kJdTxI,23
|
6
|
+
warp_beacon/warp_beacon.py,sha256=DeENFTvlwa8qWhQrLPvwReaOF9LcC3rzdR2_nouEiRs,20910
|
7
7
|
warp_beacon/compress/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
8
|
warp_beacon/compress/video.py,sha256=_PDMVYCyzLYxHv1uZmmzGcG_8rjaZr7BTXsXTTy_oS4,2846
|
9
|
-
warp_beacon/jobs/__init__.py,sha256=
|
10
|
-
warp_beacon/jobs/abstract.py,sha256=
|
9
|
+
warp_beacon/jobs/__init__.py,sha256=99x1MDo8GLfnKtqiNExNnrWCP6_rKa4WclQDQhzjSxY,136
|
10
|
+
warp_beacon/jobs/abstract.py,sha256=zHkh31JT4YL-607hmqswAlI3kwhuXwUZafjYOBqpEqw,1807
|
11
11
|
warp_beacon/jobs/download_job.py,sha256=wfZrKUerfYIjWkRxPzfl5gwIlcotIMH7OpTUM9ae8NY,736
|
12
12
|
warp_beacon/jobs/upload_job.py,sha256=Vaogc4vbpAfyaT4VkIHEPLFRELmM44TDqkmnPYh3Ymc,740
|
13
13
|
warp_beacon/mediainfo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
14
|
warp_beacon/mediainfo/abstract.py,sha256=O9ZROcW1cDlLMqBrUW2KI06tHCQ_iflDJknZA3iFaHE,591
|
15
15
|
warp_beacon/mediainfo/audio.py,sha256=ous88kwQj4bDIChN5wnGil5LqTs0IQHH0d-nyrL0-ZM,651
|
16
|
-
warp_beacon/mediainfo/video.py,sha256=
|
17
|
-
warp_beacon/scraper/__init__.py,sha256=
|
16
|
+
warp_beacon/mediainfo/video.py,sha256=A0CZX3wdL9i1M4j_YW8hbg7betGA2UXwY1RKbiOL-FY,2381
|
17
|
+
warp_beacon/scraper/__init__.py,sha256=lb6ehjcrEt_YF_urpNobnO44RD7nKH5jqcSpMkSkNsE,8609
|
18
18
|
warp_beacon/scraper/abstract.py,sha256=um4wUthO_7IsoXjKiUTWyBBbKlf-N01aZJK9N2UQI9I,408
|
19
|
-
warp_beacon/scraper/exceptions.py,sha256=
|
20
|
-
warp_beacon/scraper/instagram.py,sha256=
|
19
|
+
warp_beacon/scraper/exceptions.py,sha256=BdC9JRXRSuvZR8nlfYG62SITMeg3DGvRLsCsBr_ACy0,1055
|
20
|
+
warp_beacon/scraper/instagram.py,sha256=atYkK--4ctvSfTqiPQnYHsJ4_q5fCYfNj9CwWr9tvRM,7809
|
21
21
|
warp_beacon/scraper/youtube/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
22
|
warp_beacon/scraper/youtube/music.py,sha256=htxLJW-RaRhgxqJ9276ibaLnuStNtw26-pKJwfaQpgY,3335
|
23
|
-
warp_beacon/scraper/youtube/shorts.py,sha256=
|
24
|
-
warp_beacon/storage/__init__.py,sha256=
|
23
|
+
warp_beacon/scraper/youtube/shorts.py,sha256=yihisHSVDDT-4MflfjYkHWpnp8PrlyNo21ICKSf-rYU,2890
|
24
|
+
warp_beacon/storage/__init__.py,sha256=ljeEP_zKDxKVBXWXdhJL1c2hTKWhP8ubazOkojAkjZs,2724
|
25
25
|
warp_beacon/uploader/__init__.py,sha256=auD1arKpJdN1eFUbTFoa9Gmv-ZYZNesMoT193__pDz8,4507
|
26
|
-
warp_beacon-1.
|
27
|
-
warp_beacon-1.
|
28
|
-
warp_beacon-1.
|
29
|
-
warp_beacon-1.
|
30
|
-
warp_beacon-1.
|
31
|
-
warp_beacon-1.
|
26
|
+
warp_beacon-1.2.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
27
|
+
warp_beacon-1.2.3.dist-info/METADATA,sha256=kooNNS-QQyjIXZRjkZDJ7S7WyI3gt6MiN6GDESaZHXA,18244
|
28
|
+
warp_beacon-1.2.3.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
|
29
|
+
warp_beacon-1.2.3.dist-info/entry_points.txt,sha256=eSB61Rb89d56WY0O-vEIQwkn18J-4CMrJcLA_R_8h3g,119
|
30
|
+
warp_beacon-1.2.3.dist-info/top_level.txt,sha256=-jYi-GhbnF8nFw16lQwExFZqTiFRW62R8HztQDNQ1po,566
|
31
|
+
warp_beacon-1.2.3.dist-info/RECORD,,
|
@@ -7,12 +7,15 @@ warp_beacon/jobs/abstract
|
|
7
7
|
warp_beacon/jobs/download_job
|
8
8
|
warp_beacon/jobs/upload_job
|
9
9
|
warp_beacon/mediainfo
|
10
|
+
warp_beacon/mediainfo/abstract
|
11
|
+
warp_beacon/mediainfo/audio
|
10
12
|
warp_beacon/mediainfo/video
|
11
13
|
warp_beacon/scraper
|
12
14
|
warp_beacon/scraper/abstract
|
13
15
|
warp_beacon/scraper/exceptions
|
14
16
|
warp_beacon/scraper/instagram
|
15
17
|
warp_beacon/scraper/youtube
|
18
|
+
warp_beacon/scraper/youtube/music
|
16
19
|
warp_beacon/scraper/youtube/shorts
|
17
20
|
warp_beacon/storage
|
18
21
|
warp_beacon/uploader
|
File without changes
|
File without changes
|
File without changes
|