warp-beacon 1.1.1__py3-none-any.whl → 1.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2 +1,2 @@
1
- __version__ = "1.1.1"
1
+ __version__ = "1.2.3"
2
2
 
@@ -3,4 +3,5 @@ from enum import Enum
3
3
  class Origin(Enum):
4
4
  INSTAGRAM = "instagram"
5
5
  YT_SHORTS = "yt_shorts"
6
+ YT_MUSIC = "yt_music"
6
7
  UNKNOWN = "unknown"
@@ -25,6 +25,7 @@ class JobSettings(TypedDict):
25
25
  save_items: bool
26
26
  media_collection: list
27
27
  job_origin: Origin
28
+ canonical_name: str
28
29
 
29
30
  class AbstractJob(ABC):
30
31
  job_id: uuid.UUID = None
@@ -46,6 +47,7 @@ class AbstractJob(ABC):
46
47
  save_items: bool = False
47
48
  media_collection: list = []
48
49
  job_origin: Origin = Origin.UNKNOWN
50
+ canonical_name: str = ""
49
51
 
50
52
  def __init__(self, **kwargs: Unpack[JobSettings]) -> None:
51
53
  if kwargs:
@@ -0,0 +1,28 @@
1
+ import os
2
+ from abc import ABC, abstractmethod
3
+
4
+ import av
5
+
6
+ class MediaInfoAbstract(ABC):
7
+ filename = ""
8
+ container = None
9
+ duration = 0.0
10
+
11
+ def __init__(self, filename: str) -> None:
12
+ self.filename = filename
13
+ self.container = av.open(file=self.filename, mode='r')
14
+
15
+ def __del__(self) -> None:
16
+ if self.container:
17
+ self.container.close()
18
+
19
+ def get_duration(self) -> float:
20
+ return self.duration
21
+
22
+ @staticmethod
23
+ def get_filesize(filename: str) -> float:
24
+ return os.stat(filename).st_size
25
+
26
+ @abstractmethod
27
+ def get_finfo(cls, except_info: tuple=()) -> dict:
28
+ raise NotImplementedError
@@ -0,0 +1,19 @@
1
+ from warp_beacon.mediainfo.abstract import MediaInfoAbstract
2
+
3
+ class AudioInfo(MediaInfoAbstract):
4
+ def __init__(self, filename: str) -> None:
5
+ super(AudioInfo, self).__init__(filename)
6
+ if self.container:
7
+ stream_list = self.container.streams.get(audio=0)
8
+ if stream_list:
9
+ stream = stream_list[0]
10
+ time_base = stream.time_base
11
+ self.duration = float(stream.duration * time_base)
12
+
13
+ def get_finfo(self, except_info: tuple=()) -> dict:
14
+ res = {}
15
+ if "duration" not in except_info:
16
+ res["duration"] = round(self.get_duration())
17
+ if "filesize" not in except_info:
18
+ res["filesize"] = AudioInfo.get_filesize(self.filename)
19
+ return res
@@ -2,20 +2,17 @@ import io, os
2
2
 
3
3
  from typing import Union
4
4
  from PIL import Image
5
- import av
5
+
6
+ from warp_beacon.mediainfo.abstract import MediaInfoAbstract
6
7
 
7
8
  import logging
8
9
 
9
- class VideoInfo(object):
10
+ class VideoInfo(MediaInfoAbstract):
10
11
  width = 0
11
12
  height = 0
12
- duration = 0.0
13
- filename = ""
14
- container = None
15
13
 
16
14
  def __init__(self, filename: str) -> None:
17
- self.filename = filename
18
- self.container = av.open(file=self.filename, mode='r')
15
+ super(VideoInfo, self).__init__(filename)
19
16
 
20
17
  if self.container:
21
18
  stream = self.container.streams.video[0]
@@ -31,21 +28,10 @@ class VideoInfo(object):
31
28
  self.height = frame.height
32
29
  # restore original position after previous frame search
33
30
  self.container.seek(0, backward=False, stream=stream)
34
-
35
- def __del__(self) -> None:
36
- if self.container:
37
- self.container.close()
38
31
 
39
32
  def get_demensions(self) -> dict:
40
33
  return {"width": self.width, "height": self.height}
41
34
 
42
- def get_duration(self) -> float:
43
- return self.duration
44
-
45
- @staticmethod
46
- def get_filesize(filename: str) -> float:
47
- return os.stat(filename).st_size
48
-
49
35
  def get_finfo(self, except_info: tuple=()) -> dict:
50
36
  res = {}
51
37
  res.update(self.get_demensions())
@@ -7,6 +7,7 @@ from queue import Empty
7
7
 
8
8
  from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, Unavailable
9
9
  from warp_beacon.mediainfo.video import VideoInfo
10
+ from warp_beacon.mediainfo.audio import AudioInfo
10
11
  from warp_beacon.compress.video import VideoCompress
11
12
  from warp_beacon.uploader import AsyncUploader
12
13
  from warp_beacon.jobs import Origin
@@ -38,14 +39,18 @@ class AsyncDownloader(object):
38
39
  self.workers.append(proc)
39
40
  proc.start()
40
41
 
41
- def get_media_info(self, path: str, fr_media_info: dict={}) -> Optional[dict]:
42
+ def get_media_info(self, path: str, fr_media_info: dict={}, media_type: str = "video") -> Optional[dict]:
42
43
  media_info = None
43
44
  try:
44
45
  if path:
45
- video_info = VideoInfo(path)
46
- media_info = video_info.get_finfo(tuple(fr_media_info.keys()))
47
- media_info.update(fr_media_info)
48
- media_info["thumb"] = video_info.generate_thumbnail()
46
+ if media_type == "video":
47
+ video_info = VideoInfo(path)
48
+ media_info = video_info.get_finfo(tuple(fr_media_info.keys()))
49
+ media_info.update(fr_media_info)
50
+ media_info["thumb"] = video_info.generate_thumbnail()
51
+ elif media_type == "audio":
52
+ audio_info = AudioInfo(path)
53
+ media_info = audio_info.get_finfo(tuple(fr_media_info.keys()))
49
54
  except Exception as e:
50
55
  logging.error("Failed to process media info!")
51
56
  logging.exception(e)
@@ -73,6 +78,9 @@ class AsyncDownloader(object):
73
78
  elif job.job_origin is Origin.YT_SHORTS:
74
79
  from warp_beacon.scraper.youtube.shorts import YoutubeShortsScraper
75
80
  actor = YoutubeShortsScraper()
81
+ elif job.job_origin is Origin.YT_MUSIC:
82
+ from warp_beacon.scraper.youtube.music import YoutubeMusicScraper
83
+ actor = YoutubeMusicScraper()
76
84
  while True:
77
85
  try:
78
86
  logging.info("Downloading URL '%s'", job.url)
@@ -136,6 +144,9 @@ class AsyncDownloader(object):
136
144
  item["local_compressed_media_path"] = new_filepath
137
145
  media_info["filesize"] = VideoInfo.get_filesize(new_filepath)
138
146
  logging.info("New file size of compressed file is '%.3f'", media_info["filesize"])
147
+ elif item["media_type"] == "audio":
148
+ media_info = self.get_media_info(item["local_media_path"], item.get("media_info", {}), "audio")
149
+ logging.info("Final media info: %s", media_info)
139
150
  elif item["media_type"] == "collection":
140
151
  for v in item["items"]:
141
152
  if v["media_type"] == "video":
@@ -153,6 +164,8 @@ class AsyncDownloader(object):
153
164
  if item.get("local_compressed_media_path", None):
154
165
  job_args["local_media_path"] = item.get("local_compressed_media_path", None)
155
166
 
167
+ job_args["canonical_name"] = item.get("canonical_name", "")
168
+
156
169
  logging.debug("local_media_path: '%s'", job_args.get("local_media_path", ""))
157
170
  logging.debug("media_collection: '%s'", str(job_args.get("media_collection", {})))
158
171
  upload_job = job.to_upload_job(**job_args)
@@ -30,7 +30,9 @@ class UnknownError(ScraperError):
30
30
 
31
31
  def extract_exception_message(e: Exception) -> str:
32
32
  msg = ""
33
- if hasattr(e, "error_string"):
33
+ if hasattr(e, "expected"):
34
+ msg = "Expected bytes: %d" % int(e.expected)
35
+ elif hasattr(e, "error_string"):
34
36
  msg = e.error_string
35
37
  elif hasattr(e, "message"):
36
38
  msg = e.message
@@ -1,14 +1,17 @@
1
1
  import os
2
2
  import time
3
+
4
+ import socket
5
+ import ssl
6
+
3
7
  from typing import Callable, Optional, Union
8
+
4
9
  from pathlib import Path
5
10
  import json
6
11
 
7
12
  import requests
8
- from requests.exceptions import ConnectTimeout, HTTPError
9
13
  import urllib3
10
14
  from urllib.parse import urljoin, urlparse
11
- import logging
12
15
 
13
16
  from instagrapi.mixins.story import Story
14
17
  from instagrapi.types import Media
@@ -18,6 +21,8 @@ from instagrapi.exceptions import LoginRequired, PleaseWaitFewMinutes, MediaNotF
18
21
  from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, extract_exception_message
19
22
  from warp_beacon.scraper.abstract import ScraperAbstract
20
23
 
24
+ import logging
25
+
21
26
  INST_SESSION_FILE = "/var/warp_beacon/inst_session.json"
22
27
 
23
28
  class InstagramScraper(ScraperAbstract):
@@ -95,16 +100,18 @@ class InstagramScraper(ScraperAbstract):
95
100
  try:
96
101
  ret_val = func(*args, **kwargs)
97
102
  break
98
- except (requests.exceptions.ConnectionError,
103
+ except (socket.timeout,
104
+ ssl.SSLError,
105
+ requests.exceptions.ConnectionError,
99
106
  requests.exceptions.ReadTimeout,
107
+ requests.exceptions.ConnectTimeout,
108
+ requests.exceptions.HTTPError,
100
109
  urllib3.exceptions.ReadTimeoutError,
101
- urllib3.exceptions.ConnectionError,
102
- ConnectTimeout,
103
- HTTPError) as e:
110
+ urllib3.exceptions.ConnectionError) as e:
104
111
  logging.warning("Instagram read timeout! Retrying in 2 seconds ...")
105
112
  logging.info("Your `IG_MAX_RETRIES` values is %d", max_retries)
106
113
  logging.exception(e)
107
- if max_retries == retries:
114
+ if max_retries <= retries:
108
115
  raise TimeOut(extract_exception_message(e))
109
116
  retries += 1
110
117
  time.sleep(2)
@@ -0,0 +1,110 @@
1
+ import os
2
+ import pathlib
3
+ import time
4
+
5
+ import socket
6
+ import ssl
7
+
8
+ from typing import Callable, Union
9
+
10
+ import requests
11
+ import urllib
12
+ import http.client
13
+
14
+ from pytubefix import YouTube
15
+ from pytubefix.exceptions import VideoUnavailable, VideoPrivate, MaxRetriesExceeded
16
+
17
+ from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, Unavailable, extract_exception_message
18
+ from warp_beacon.scraper.abstract import ScraperAbstract
19
+
20
+ import logging
21
+
22
+ DOWNLOAD_DIR = "/tmp"
23
+
24
+ class YoutubeMusicScraper(ScraperAbstract):
25
+
26
+ def __init__(self) -> None:
27
+ pass
28
+
29
+ def __del__(self) -> None:
30
+ pass
31
+
32
+ def remove_tmp_files(self) -> None:
33
+ for i in os.listdir(DOWNLOAD_DIR):
34
+ if "yt_download_" in i:
35
+ os.unlink("%s/%s" % (DOWNLOAD_DIR, i))
36
+
37
+ def _download_hndlr(self, func: Callable, *args: tuple[str], **kwargs: dict[str]) -> Union[str, dict]:
38
+ ret_val = ''
39
+ max_retries = int(os.environ.get("YT_MUSIC_MAX_RETRIES", default=6))
40
+ pause_secs = int(os.environ.get("YT_MUSIC_PAUSE_BEFORE_RETRY", default=3))
41
+ timeout = int(os.environ.get("YT_MUSIC_TIMEOUT", default=60))
42
+ timeout_increment = int(os.environ.get("YT_MUSIC_TIMEOUT_INCREMENT", default=60))
43
+ retries = 0
44
+ while max_retries >= retries:
45
+ try:
46
+ kwargs["timeout"] = timeout
47
+ ret_val = func(*args, **kwargs)
48
+ break
49
+ except MaxRetriesExceeded:
50
+ # do noting, not interested
51
+ pass
52
+ #except http.client.IncompleteRead as e:
53
+ except (socket.timeout,
54
+ ssl.SSLError,
55
+ http.client.IncompleteRead,
56
+ http.client.HTTPException,
57
+ requests.RequestException,
58
+ urllib.error.URLError,
59
+ urllib.error.HTTPError) as e:
60
+ if hasattr(e, "code") and int(e.code) == 403:
61
+ raise Unavailable(extract_exception_message(e))
62
+ logging.warning("Youtube read timeout! Retrying in %d seconds ...", pause_secs)
63
+ logging.info("Your `YT_MUSIC_MAX_RETRIES` values is %d", max_retries)
64
+ logging.exception(extract_exception_message(e))
65
+ if max_retries <= retries:
66
+ self.remove_tmp_files()
67
+ raise TimeOut(extract_exception_message(e))
68
+ retries += 1
69
+ timeout += timeout_increment
70
+ time.sleep(pause_secs)
71
+ except (VideoUnavailable, VideoPrivate) as e:
72
+ raise Unavailable(extract_exception_message(e))
73
+
74
+ return ret_val
75
+
76
+ def rename_local_file(self, filename: str) -> str:
77
+ if not os.path.exists(filename):
78
+ raise NameError("No file provided")
79
+ path_info = pathlib.Path(filename)
80
+ ext = path_info.suffix
81
+ old_filename = path_info.stem
82
+ time_name = str(time.time()).replace('.', '_')
83
+ new_filename = "%s%s" % (time_name, ext)
84
+ new_filepath = "%s/%s" % (os.path.dirname(filename), new_filename)
85
+
86
+ os.rename(filename, new_filepath)
87
+
88
+ return new_filepath
89
+
90
+ def _download(self, url: str, timeout: int = 0) -> list:
91
+ res = []
92
+ yt = YouTube(url)
93
+ stream = yt.streams.get_audio_only()
94
+ if stream:
95
+ logging.info("Operation timeout is '%d'", timeout)
96
+ local_file = stream.download(
97
+ output_path=DOWNLOAD_DIR,
98
+ max_retries=0,
99
+ timeout=timeout,
100
+ skip_existing=False,
101
+ filename_prefix='yt_download_',
102
+ mp3=True
103
+ )
104
+ logging.info("Temp filename: '%s'", local_file)
105
+ res.append({"local_media_path": self.rename_local_file(local_file), "canonical_name": stream.title, "media_type": "audio"})
106
+
107
+ return res
108
+
109
+ def download(self, url: str) -> list:
110
+ return self._download_hndlr(self._download, url)
@@ -2,18 +2,19 @@ import os
2
2
  import pathlib
3
3
  import time
4
4
 
5
+ import socket
6
+ import ssl
7
+
5
8
  from typing import Callable, Union
6
9
 
7
- from socket import timeout
8
- from ssl import SSLError
9
- from requests.exceptions import RequestException
10
- from urllib.error import URLError
11
- from http.client import HTTPException
10
+ import requests
11
+ import urllib
12
+ import http.client
12
13
 
13
14
  from pytubefix import YouTube
14
15
  from pytubefix.exceptions import VideoUnavailable, VideoPrivate, MaxRetriesExceeded
15
16
 
16
- from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, extract_exception_message
17
+ from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, Unavailable, extract_exception_message
17
18
  from warp_beacon.scraper.abstract import ScraperAbstract
18
19
 
19
20
  import logging
@@ -25,6 +26,11 @@ class YoutubeShortsScraper(ScraperAbstract):
25
26
  def __del__(self) -> None:
26
27
  pass
27
28
 
29
+ def remove_tmp_files(self) -> None:
30
+ for i in os.listdir(DOWNLOAD_DIR):
31
+ if "yt_download_" in i:
32
+ os.unlink("%s/%s" % (DOWNLOAD_DIR, i))
33
+
28
34
  def _download_hndlr(self, func: Callable, *args: tuple[str], **kwargs: dict[str]) -> Union[str, dict]:
29
35
  ret_val = ''
30
36
  max_retries = int(os.environ.get("YT_MAX_RETRIES", default=8))
@@ -37,11 +43,14 @@ class YoutubeShortsScraper(ScraperAbstract):
37
43
  except MaxRetriesExceeded:
38
44
  # do noting, not interested
39
45
  pass
40
- except (timeout, SSLError, HTTPException, RequestException, URLError) as e:
46
+ except (socket.timeout, ssl.SSLError, http.client.HTTPException, requests.RequestException, urllib.error.URLError) as e:
47
+ if hasattr(e, "code") and int(e.code) == 403:
48
+ raise Unavailable(extract_exception_message(e))
41
49
  logging.warning("Youtube read timeout! Retrying in %d seconds ...", pause_secs)
42
50
  logging.info("Your `YT_MAX_RETRIES` values is %d", max_retries)
43
51
  logging.exception(extract_exception_message(e))
44
- if max_retries >= retries:
52
+ if max_retries <= retries:
53
+ self.remove_tmp_files()
45
54
  raise TimeOut(extract_exception_message(e))
46
55
  retries += 1
47
56
  time.sleep(pause_secs)
@@ -74,8 +83,10 @@ class YoutubeShortsScraper(ScraperAbstract):
74
83
  output_path="/tmp",
75
84
  max_retries=0,
76
85
  timeout=timeout,
77
- skip_existing=False
86
+ skip_existing=False,
87
+ filename_prefix="yt_download_"
78
88
  )
89
+ logging.debug("Temp filename: '%s'", local_file)
79
90
  res.append({"local_media_path": self.rename_local_file(local_file), "media_type": "video"})
80
91
 
81
92
  return res
@@ -2,7 +2,7 @@ import os
2
2
  #from typing import Optional
3
3
  import logging
4
4
 
5
- from urllib.parse import urlparse
5
+ from urllib.parse import urlparse, parse_qs
6
6
 
7
7
  from pymongo import MongoClient
8
8
 
@@ -28,6 +28,15 @@ class Storage(object):
28
28
 
29
29
  @staticmethod
30
30
  def compute_uniq(url: str) -> str:
31
+ if "music.youtube.com" in url:
32
+ qs = parse_qs(urlparse(url).query)
33
+ yt_vid_id = qs.get('v', None)
34
+ if yt_vid_id:
35
+ path = urlparse(url).path.strip('/').replace("watch", "yt_music")
36
+ return "%s/%s" % (path, yt_vid_id)
37
+ else:
38
+ raise ValueError("Failed to generate uniq_id for url '%s'", url)
39
+
31
40
  path = urlparse(url).path.strip('/')
32
41
  return path
33
42
 
@@ -10,7 +10,7 @@ from io import BytesIO
10
10
  from urlextract import URLExtract
11
11
 
12
12
  import telegram
13
- from telegram import Bot, ForceReply, Update, Chat, error, InputMediaVideo, InputMediaPhoto, MessageEntity, InlineKeyboardMarkup, InlineKeyboardButton
13
+ from telegram import Bot, ForceReply, Update, Chat, error, InputMediaVideo, InputMediaPhoto, InputMediaAudio, MessageEntity, InlineKeyboardMarkup, InlineKeyboardButton
14
14
  from telegram.ext import Application, CommandHandler, ContextTypes, MessageHandler, filters
15
15
  from telegram.constants import ParseMode
16
16
 
@@ -234,6 +234,17 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
234
234
  media=open(job.local_media_path, 'rb'),
235
235
  filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
236
236
  )
237
+ elif job.media_type == "audio":
238
+ if job.tg_file_id:
239
+ if job.placeholder_message_id:
240
+ args["media"] = InputMediaAudio(media=job.tg_file_id.replace(":audio", ''))
241
+ else:
242
+ args["audio"] = job.tg_file_id.replace(":audio", '')
243
+ else:
244
+ args["media"] = InputMediaAudio(
245
+ media=open(job.local_media_path, 'rb'),
246
+ filename="%s%s" % (job.canonical_name, os.path.splitext(job.local_media_path)[-1])
247
+ )
237
248
  elif job.media_type == "collection":
238
249
  if job.tg_file_id:
239
250
  args["media"] = []
@@ -306,6 +317,14 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
306
317
  if message.photo:
307
318
  tg_file_ids.append(message.photo[-1].file_id)
308
319
  job.tg_file_id = message.photo[-1].file_id
320
+ elif job.media_type == "audio":
321
+ if job.placeholder_message_id:
322
+ message = await context.bot.edit_message_media(**build_tg_args(update, context, job))
323
+ else:
324
+ message = await update.message.reply_audio(**build_tg_args(update, context, job))
325
+ if message.audio:
326
+ tg_file_ids.append(message.audio.file_id)
327
+ job.tg_file_id = message.audio.file_id
309
328
  elif job.media_type == "collection":
310
329
  sent_messages = await update.message.reply_media_group(**build_tg_args(update, context, job))
311
330
  if job.placeholder_message_id:
@@ -371,6 +390,9 @@ def extract_origin(url: str) -> Origin:
371
390
  if "youtube.com/" in url and "shorts/" in url:
372
391
  return Origin.YT_SHORTS
373
392
 
393
+ if "youtube.com/" in url and "music." in url:
394
+ return Origin.YT_MUSIC
395
+
374
396
  return Origin.UNKNOWN
375
397
 
376
398
  async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
@@ -388,7 +410,7 @@ async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
388
410
  for url in urls:
389
411
  origin = extract_origin(url)
390
412
  if origin is Origin.UNKNOWN:
391
- logging.info("Only Instagram and YouTube Shorts are now supported. Skipping.")
413
+ logging.info("Only Instagram, YouTube Shorts and YouTube Music are now supported. Skipping.")
392
414
  continue
393
415
  entities, tg_file_ids = [], []
394
416
  uniq_id = Storage.compute_uniq(url)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: warp_beacon
3
- Version: 1.1.1
3
+ Version: 1.2.3
4
4
  Summary: Telegram bot for expanding external media links
5
5
  Home-page: https://github.com/sb0y/warp_beacon
6
6
  Author: Andrey Bagrintsev
@@ -0,0 +1,31 @@
1
+ etc/warp_beacon/warp_beacon.conf,sha256=1gGvh36cnFr0rU4mVomfy66hQz9EvugaNzeH6_tmBM0,266
2
+ lib/systemd/system/warp_beacon.service,sha256=lPmHqLqcI2eIV7nwHS0qcALQrznixqJuwwPfa2mDLUA,372
3
+ var/warp_beacon/placeholder.gif,sha256=cE5CGJVaop4Sx21zx6j4AyoHU0ncmvQuS2o6hJfEH88,6064
4
+ warp_beacon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ warp_beacon/__version__.py,sha256=FoCfbP1cOqI5xxyfhI-2bmJFC6d3DUpIK7513kJdTxI,23
6
+ warp_beacon/warp_beacon.py,sha256=DeENFTvlwa8qWhQrLPvwReaOF9LcC3rzdR2_nouEiRs,20910
7
+ warp_beacon/compress/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ warp_beacon/compress/video.py,sha256=_PDMVYCyzLYxHv1uZmmzGcG_8rjaZr7BTXsXTTy_oS4,2846
9
+ warp_beacon/jobs/__init__.py,sha256=99x1MDo8GLfnKtqiNExNnrWCP6_rKa4WclQDQhzjSxY,136
10
+ warp_beacon/jobs/abstract.py,sha256=zHkh31JT4YL-607hmqswAlI3kwhuXwUZafjYOBqpEqw,1807
11
+ warp_beacon/jobs/download_job.py,sha256=wfZrKUerfYIjWkRxPzfl5gwIlcotIMH7OpTUM9ae8NY,736
12
+ warp_beacon/jobs/upload_job.py,sha256=Vaogc4vbpAfyaT4VkIHEPLFRELmM44TDqkmnPYh3Ymc,740
13
+ warp_beacon/mediainfo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ warp_beacon/mediainfo/abstract.py,sha256=O9ZROcW1cDlLMqBrUW2KI06tHCQ_iflDJknZA3iFaHE,591
15
+ warp_beacon/mediainfo/audio.py,sha256=ous88kwQj4bDIChN5wnGil5LqTs0IQHH0d-nyrL0-ZM,651
16
+ warp_beacon/mediainfo/video.py,sha256=A0CZX3wdL9i1M4j_YW8hbg7betGA2UXwY1RKbiOL-FY,2381
17
+ warp_beacon/scraper/__init__.py,sha256=lb6ehjcrEt_YF_urpNobnO44RD7nKH5jqcSpMkSkNsE,8609
18
+ warp_beacon/scraper/abstract.py,sha256=um4wUthO_7IsoXjKiUTWyBBbKlf-N01aZJK9N2UQI9I,408
19
+ warp_beacon/scraper/exceptions.py,sha256=BdC9JRXRSuvZR8nlfYG62SITMeg3DGvRLsCsBr_ACy0,1055
20
+ warp_beacon/scraper/instagram.py,sha256=atYkK--4ctvSfTqiPQnYHsJ4_q5fCYfNj9CwWr9tvRM,7809
21
+ warp_beacon/scraper/youtube/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ warp_beacon/scraper/youtube/music.py,sha256=htxLJW-RaRhgxqJ9276ibaLnuStNtw26-pKJwfaQpgY,3335
23
+ warp_beacon/scraper/youtube/shorts.py,sha256=yihisHSVDDT-4MflfjYkHWpnp8PrlyNo21ICKSf-rYU,2890
24
+ warp_beacon/storage/__init__.py,sha256=ljeEP_zKDxKVBXWXdhJL1c2hTKWhP8ubazOkojAkjZs,2724
25
+ warp_beacon/uploader/__init__.py,sha256=auD1arKpJdN1eFUbTFoa9Gmv-ZYZNesMoT193__pDz8,4507
26
+ warp_beacon-1.2.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
27
+ warp_beacon-1.2.3.dist-info/METADATA,sha256=kooNNS-QQyjIXZRjkZDJ7S7WyI3gt6MiN6GDESaZHXA,18244
28
+ warp_beacon-1.2.3.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
29
+ warp_beacon-1.2.3.dist-info/entry_points.txt,sha256=eSB61Rb89d56WY0O-vEIQwkn18J-4CMrJcLA_R_8h3g,119
30
+ warp_beacon-1.2.3.dist-info/top_level.txt,sha256=-jYi-GhbnF8nFw16lQwExFZqTiFRW62R8HztQDNQ1po,566
31
+ warp_beacon-1.2.3.dist-info/RECORD,,
@@ -7,12 +7,15 @@ warp_beacon/jobs/abstract
7
7
  warp_beacon/jobs/download_job
8
8
  warp_beacon/jobs/upload_job
9
9
  warp_beacon/mediainfo
10
+ warp_beacon/mediainfo/abstract
11
+ warp_beacon/mediainfo/audio
10
12
  warp_beacon/mediainfo/video
11
13
  warp_beacon/scraper
12
14
  warp_beacon/scraper/abstract
13
15
  warp_beacon/scraper/exceptions
14
16
  warp_beacon/scraper/instagram
15
17
  warp_beacon/scraper/youtube
18
+ warp_beacon/scraper/youtube/music
16
19
  warp_beacon/scraper/youtube/shorts
17
20
  warp_beacon/storage
18
21
  warp_beacon/uploader
@@ -1,28 +0,0 @@
1
- etc/warp_beacon/warp_beacon.conf,sha256=1gGvh36cnFr0rU4mVomfy66hQz9EvugaNzeH6_tmBM0,266
2
- lib/systemd/system/warp_beacon.service,sha256=lPmHqLqcI2eIV7nwHS0qcALQrznixqJuwwPfa2mDLUA,372
3
- var/warp_beacon/placeholder.gif,sha256=cE5CGJVaop4Sx21zx6j4AyoHU0ncmvQuS2o6hJfEH88,6064
4
- warp_beacon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
- warp_beacon/__version__.py,sha256=Y1WL3jwTYIGv_JONH9hc9ULjDsNL4lbh4ucw3BlkBYs,23
6
- warp_beacon/warp_beacon.py,sha256=Z3zY7MTMZp3zKRderhNOVe6XvM-93Djo0DIsiaoucmo,20012
7
- warp_beacon/compress/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- warp_beacon/compress/video.py,sha256=_PDMVYCyzLYxHv1uZmmzGcG_8rjaZr7BTXsXTTy_oS4,2846
9
- warp_beacon/jobs/__init__.py,sha256=KsSaS0KlCNyffNnWKuvqmdfgyfKyn3niXYyZ38-exQ8,113
10
- warp_beacon/jobs/abstract.py,sha256=PCr8RXzocKi-sTsi2Y1_spiv6D95G1NlzZ2wD2WJXRc,1760
11
- warp_beacon/jobs/download_job.py,sha256=wfZrKUerfYIjWkRxPzfl5gwIlcotIMH7OpTUM9ae8NY,736
12
- warp_beacon/jobs/upload_job.py,sha256=Vaogc4vbpAfyaT4VkIHEPLFRELmM44TDqkmnPYh3Ymc,740
13
- warp_beacon/mediainfo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- warp_beacon/mediainfo/video.py,sha256=8h7p4k0w45Vm0lPQNlaQaUjaDTBybX3RcKgL1QQbioA,2638
15
- warp_beacon/scraper/__init__.py,sha256=c7NySK5Krm-zlWQckFs-uN4fD3J19A0pTS4CByXjmMs,7918
16
- warp_beacon/scraper/abstract.py,sha256=um4wUthO_7IsoXjKiUTWyBBbKlf-N01aZJK9N2UQI9I,408
17
- warp_beacon/scraper/exceptions.py,sha256=qra_Jx53RVCnrCKvw2OxvEHl4cXJCrPDa_yef3cvGXM,978
18
- warp_beacon/scraper/instagram.py,sha256=s7slumqdqVVWQYpnVphx-dOLIAWvgA_UdqkTQVp6GsI,7758
19
- warp_beacon/scraper/youtube/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- warp_beacon/scraper/youtube/shorts.py,sha256=xAOeHNT3_Ayjjglid00UqGqCgSMfbJua26PNrbbDYUo,2565
21
- warp_beacon/storage/__init__.py,sha256=NhD3V7UNRiZNf61yQEAjXOfi-tfA2LaJa7a7kvbkmtE,2402
22
- warp_beacon/uploader/__init__.py,sha256=auD1arKpJdN1eFUbTFoa9Gmv-ZYZNesMoT193__pDz8,4507
23
- warp_beacon-1.1.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
24
- warp_beacon-1.1.1.dist-info/METADATA,sha256=grW-AA1vEjTZEt30ShVny1vomQY-0M-HGMGuOGOZdCc,18244
25
- warp_beacon-1.1.1.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
26
- warp_beacon-1.1.1.dist-info/entry_points.txt,sha256=eSB61Rb89d56WY0O-vEIQwkn18J-4CMrJcLA_R_8h3g,119
27
- warp_beacon-1.1.1.dist-info/top_level.txt,sha256=QuN6MynevEblMhPPAVeMrNAkcyqYUpYDholtIRq8-ew,473
28
- warp_beacon-1.1.1.dist-info/RECORD,,