warp-beacon 1.2.5__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,111 +1,15 @@
1
- import os
2
- import io
3
- import pathlib
4
- import time
5
-
6
- import socket
7
- import ssl
8
-
9
- from typing import Callable, Union
10
-
11
- import requests
12
- import urllib
13
- import http.client
14
-
15
- from PIL import Image
1
+ from warp_beacon.jobs.types import JobType
2
+ from warp_beacon.scraper.youtube.abstract import YoutubeAbstract
16
3
 
17
4
  from pytubefix import YouTube
18
- from pytubefix.exceptions import VideoUnavailable, VideoPrivate, MaxRetriesExceeded
19
-
20
- from warp_beacon.mediainfo.abstract import MediaInfoAbstract
21
- from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, Unavailable, FileTooBig, extract_exception_message
22
- from warp_beacon.scraper.abstract import ScraperAbstract
23
5
 
24
6
  import logging
25
7
 
26
- DOWNLOAD_DIR = "/tmp"
27
-
28
- class YoutubeMusicScraper(ScraperAbstract):
29
-
30
- def __init__(self) -> None:
31
- pass
32
-
33
- def __del__(self) -> None:
34
- pass
35
-
36
- def remove_tmp_files(self) -> None:
37
- for i in os.listdir(DOWNLOAD_DIR):
38
- if "yt_download_" in i:
39
- os.unlink("%s/%s" % (DOWNLOAD_DIR, i))
40
-
41
- def _download_hndlr(self, func: Callable, *args: tuple[str], **kwargs: dict[str]) -> Union[str, dict]:
42
- ret_val = ''
43
- max_retries = int(os.environ.get("YT_MUSIC_MAX_RETRIES", default=6))
44
- pause_secs = int(os.environ.get("YT_MUSIC_PAUSE_BEFORE_RETRY", default=3))
45
- timeout = int(os.environ.get("YT_MUSIC_TIMEOUT", default=60))
46
- timeout_increment = int(os.environ.get("YT_MUSIC_TIMEOUT_INCREMENT", default=60))
47
- retries = 0
48
- while max_retries >= retries:
49
- try:
50
- kwargs["timeout"] = timeout
51
- ret_val = func(*args, **kwargs)
52
- break
53
- except MaxRetriesExceeded:
54
- # do noting, not interested
55
- pass
56
- #except http.client.IncompleteRead as e:
57
- except (socket.timeout,
58
- ssl.SSLError,
59
- http.client.IncompleteRead,
60
- http.client.HTTPException,
61
- requests.RequestException,
62
- urllib.error.URLError,
63
- urllib.error.HTTPError) as e:
64
- if hasattr(e, "code") and int(e.code) == 403:
65
- raise Unavailable(extract_exception_message(e))
66
- logging.warning("Youtube read timeout! Retrying in %d seconds ...", pause_secs)
67
- logging.info("Your `YT_MUSIC_MAX_RETRIES` values is %d", max_retries)
68
- logging.exception(extract_exception_message(e))
69
- if max_retries <= retries:
70
- self.remove_tmp_files()
71
- raise TimeOut(extract_exception_message(e))
72
- retries += 1
73
- timeout += timeout_increment
74
- time.sleep(pause_secs)
75
- except (VideoUnavailable, VideoPrivate) as e:
76
- raise Unavailable(extract_exception_message(e))
77
-
78
- return ret_val
79
-
80
- def rename_local_file(self, filename: str) -> str:
81
- if not os.path.exists(filename):
82
- raise NameError("No file provided")
83
- path_info = pathlib.Path(filename)
84
- ext = path_info.suffix
85
- old_filename = path_info.stem
86
- time_name = str(time.time()).replace('.', '_')
87
- new_filename = "%s%s" % (time_name, ext)
88
- new_filepath = "%s/%s" % (os.path.dirname(filename), new_filename)
89
-
90
- os.rename(filename, new_filepath)
91
-
92
- return new_filepath
93
-
94
- def download_thumbnail(self, url: str) -> Union[io.BytesIO, None]:
95
- try:
96
- reply = requests.get(url, stream=True)
97
- if reply.ok and reply.status_code == 200:
98
- image = Image.open(io.BytesIO(reply.content))
99
- image = MediaInfoAbstract.shrink_image_to_fit(image)
100
- io_buf = io.BytesIO()
101
- image.save(io_buf, format='JPEG')
102
- io_buf.seek(0)
103
- return io_buf
104
- except Exception as e:
105
- logging.error("Failed to download download thumbnail!")
106
- logging.exception(e)
107
-
108
- return None
8
+ class YoutubeMusicScraper(YoutubeAbstract):
9
+ YT_MAX_RETRIES_DEFAULT = 6
10
+ YT_PAUSE_BEFORE_RETRY_DEFAULT = 3
11
+ YT_TIMEOUT_DEFAULT = 2
12
+ YT_TIMEOUT_INCREMENT_DEFAULT = 60
109
13
 
110
14
  def _download(self, url: str, timeout: int = 0) -> list:
111
15
  res = []
@@ -116,12 +20,12 @@ class YoutubeMusicScraper(ScraperAbstract):
116
20
  stream = yt.streams.get_audio_only()
117
21
  if stream:
118
22
  logging.info("Announced audio file size: '%d'", stream.filesize)
119
- if stream.filesize > 5e+7:
120
- logging.warning("Downloading size reported by YouTube is over than 50 mb!")
121
- raise FileTooBig("YouTube file is larger than 50 mb")
23
+ if stream.filesize > 2e+9:
24
+ logging.warning("Downloading size reported by YouTube is over than 2 GB!")
25
+ raise FileTooBig("YouTube file is larger than 2 GB")
122
26
  logging.info("Operation timeout is '%d'", timeout)
123
27
  local_file = stream.download(
124
- output_path=DOWNLOAD_DIR,
28
+ output_path=self.DOWNLOAD_DIR,
125
29
  max_retries=0,
126
30
  timeout=timeout,
127
31
  skip_existing=False,
@@ -134,7 +38,7 @@ class YoutubeMusicScraper(ScraperAbstract):
134
38
  "performer": yt.author,
135
39
  "thumb": thumbnail,
136
40
  "canonical_name": stream.title,
137
- "media_type": "audio"
41
+ "media_type": JobType.AUDIO
138
42
  })
139
43
 
140
44
  return res
@@ -1,93 +1,40 @@
1
- import os
2
- import pathlib
3
- import time
4
-
5
- import socket
6
- import ssl
7
-
8
- from typing import Callable, Union
9
-
10
- import requests
11
- import urllib
12
- import http.client
1
+ from warp_beacon.jobs.types import JobType
2
+ from warp_beacon.scraper.youtube.abstract import YoutubeAbstract
13
3
 
14
4
  from pytubefix import YouTube
15
- from pytubefix.exceptions import VideoUnavailable, VideoPrivate, MaxRetriesExceeded
16
-
17
- from warp_beacon.scraper.exceptions import NotFound, UnknownError, TimeOut, Unavailable, extract_exception_message
18
- from warp_beacon.scraper.abstract import ScraperAbstract
19
5
 
20
6
  import logging
21
7
 
22
- class YoutubeShortsScraper(ScraperAbstract):
23
- def __init__(self) -> None:
24
- pass
25
-
26
- def __del__(self) -> None:
27
- pass
28
-
29
- def remove_tmp_files(self) -> None:
30
- for i in os.listdir(DOWNLOAD_DIR):
31
- if "yt_download_" in i:
32
- os.unlink("%s/%s" % (DOWNLOAD_DIR, i))
33
-
34
- def _download_hndlr(self, func: Callable, *args: tuple[str], **kwargs: dict[str]) -> Union[str, dict]:
35
- ret_val = ''
36
- max_retries = int(os.environ.get("YT_MAX_RETRIES", default=8))
37
- pause_secs = int(os.environ.get("YT_PAUSE_BEFORE_RETRY", default=3))
38
- retries = 0
39
- while max_retries >= retries:
40
- try:
41
- ret_val = func(*args, **kwargs)
42
- break
43
- except MaxRetriesExceeded:
44
- # do noting, not interested
45
- pass
46
- except (socket.timeout, ssl.SSLError, http.client.HTTPException, requests.RequestException, urllib.error.URLError) as e:
47
- if hasattr(e, "code") and int(e.code) == 403:
48
- raise Unavailable(extract_exception_message(e))
49
- logging.warning("Youtube read timeout! Retrying in %d seconds ...", pause_secs)
50
- logging.info("Your `YT_MAX_RETRIES` values is %d", max_retries)
51
- logging.exception(extract_exception_message(e))
52
- if max_retries <= retries:
53
- self.remove_tmp_files()
54
- raise TimeOut(extract_exception_message(e))
55
- retries += 1
56
- time.sleep(pause_secs)
57
- except (VideoUnavailable, VideoPrivate) as e:
58
- raise Unavailable(extract_exception_message(e))
59
-
60
- return ret_val
61
-
62
- def rename_local_file(self, filename: str) -> str:
63
- if not os.path.exists(filename):
64
- raise NameError("No file provided")
65
- path_info = pathlib.Path(filename)
66
- ext = path_info.suffix
67
- old_filename = path_info.stem
68
- time_name = str(time.time()).replace('.', '_')
69
- new_filename = "%s%s" % (time_name, ext)
70
- new_filepath = "%s/%s" % (os.path.dirname(filename), new_filename)
71
-
72
- os.rename(filename, new_filepath)
73
-
74
- return new_filepath
8
+ class YoutubeShortsScraper(YoutubeAbstract):
9
+ YT_MAX_RETRIES_DEFAULT = 8
10
+ YT_PAUSE_BEFORE_RETRY_DEFAULT = 3
11
+ YT_TIMEOUT_DEFAULT = 2
12
+ YT_TIMEOUT_INCREMENT_DEFAULT = 60
75
13
 
76
- def _download(self, url: str) -> list:
14
+ def _download(self, url: str, timeout: int = 0) -> list:
77
15
  res = []
78
- timeout = int(os.environ.get("YT_TIMEOUT", default=2))
16
+ thumbnail = None
79
17
  yt = YouTube(url)
80
18
  stream = yt.streams.get_highest_resolution()
19
+ if yt and yt.thumbnail_url:
20
+ logging.debug("Generation thumb for Shorts ...")
21
+ thumbnail = self.download_thumbnail(yt.thumbnail_url)
81
22
  if stream:
82
23
  local_file = stream.download(
83
- output_path="/tmp",
24
+ output_path=self.DOWNLOAD_DIR,
84
25
  max_retries=0,
85
26
  timeout=timeout,
86
27
  skip_existing=False,
87
28
  filename_prefix="yt_download_"
88
29
  )
89
30
  logging.debug("Temp filename: '%s'", local_file)
90
- res.append({"local_media_path": self.rename_local_file(local_file), "media_type": "video"})
31
+ res.append({
32
+ "local_media_path": self.rename_local_file(local_file),
33
+ "performer": yt.author,
34
+ "thumb": thumbnail,
35
+ "canonical_name": stream.title,
36
+ "media_type": JobType.VIDEO
37
+ })
91
38
 
92
39
  return res
93
40
 
@@ -0,0 +1,41 @@
1
+ from warp_beacon.jobs.types import JobType
2
+ from warp_beacon.scraper.youtube.abstract import YoutubeAbstract
3
+
4
+ from pytubefix import YouTube
5
+
6
+ import logging
7
+
8
+ class YoutubeScraper(YoutubeAbstract):
9
+ YT_MAX_RETRIES_DEFAULT = 8
10
+ YT_PAUSE_BEFORE_RETRY_DEFAULT = 3
11
+ YT_TIMEOUT_DEFAULT = 2
12
+ YT_TIMEOUT_INCREMENT_DEFAULT = 60
13
+
14
+ def _download(self, url: str, timeout: int = 0) -> list:
15
+ res = []
16
+ thumbnail = None
17
+ yt = YouTube(url)
18
+ if yt and yt.thumbnail_url:
19
+ thumbnail = self.download_thumbnail(yt.thumbnail_url)
20
+ stream = yt.streams.get_highest_resolution()
21
+ if stream:
22
+ local_file = stream.download(
23
+ output_path=self.DOWNLOAD_DIR,
24
+ max_retries=0,
25
+ timeout=timeout,
26
+ skip_existing=False,
27
+ filename_prefix="yt_download_"
28
+ )
29
+ logging.debug("Temp filename: '%s'", local_file)
30
+ res.append({
31
+ "local_media_path": self.rename_local_file(local_file),
32
+ "performer": yt.author,
33
+ "thumb": thumbnail,
34
+ "canonical_name": stream.title,
35
+ "media_type": JobType.VIDEO
36
+ })
37
+
38
+ return res
39
+
40
+ def download(self, url: str) -> list:
41
+ return self._download_hndlr(self._download, url)
@@ -1,11 +1,19 @@
1
1
  import os
2
2
  #from typing import Optional
3
- import logging
3
+ from enum import Enum
4
4
 
5
5
  from urllib.parse import urlparse, parse_qs
6
6
 
7
7
  from pymongo import MongoClient
8
8
 
9
+ import logging
10
+
11
+ class UrlParseMode(Enum):
12
+ OTHER = 0
13
+ YT_MUSIC = 1
14
+ YT_SHORTS = 2
15
+ YOUTUBE = 3
16
+
9
17
  VIDEO_STORAGE_DIR = os.environ.get("VIDEO_STORAGE_DIR", default="/var/warp_beacon/videos")
10
18
 
11
19
  class Storage(object):
@@ -28,12 +36,22 @@ class Storage(object):
28
36
 
29
37
  @staticmethod
30
38
  def compute_uniq(url: str) -> str:
31
- if "music.youtube.com" in url:
32
- qs = parse_qs(urlparse(url).query)
33
- yt_vid_id = qs.get('v', None)
39
+ parse_mode = UrlParseMode.OTHER
40
+ if "music.youtube.com/" in url:
41
+ parse_mode = UrlParseMode.YT_MUSIC
42
+ elif "youtube.com/shorts/" in url:
43
+ parse_mode = UrlParseMode.YT_SHORTS
44
+ elif "youtube.com/" in url:
45
+ parse_mode = UrlParseMode.YOUTUBE
46
+
47
+ if parse_mode is not UrlParseMode.OTHER and parse_mode is not UrlParseMode.YT_SHORTS:
48
+ purl = urlparse(url)
49
+ qs = parse_qs(purl.query)
50
+ yt_vid_id_list = qs.get('v', None)
51
+ yt_vid_id = yt_vid_id_list.pop() if yt_vid_id_list else ""
34
52
  if yt_vid_id:
35
- path = urlparse(url).path.strip('/').replace("watch", "yt_music")
36
- return "%s/%s" % (path, yt_vid_id)
53
+ path = urlparse(url).path.strip('/').replace("watch", ("yt_music" if parse_mode is UrlParseMode.YT_MUSIC else "youtube"))
54
+ return ("%s/%s" % (path, yt_vid_id)).strip('/')
37
55
  else:
38
56
  raise ValueError("Failed to generate uniq_id for url '%s'", url)
39
57
 
@@ -65,6 +83,9 @@ class Storage(object):
65
83
  uniq_id = self.compute_uniq(media_url)
66
84
  media_ids = []
67
85
  for tg_file_id in tg_file_ids:
86
+ if not tg_file_id:
87
+ logging.warning("Passed empty `tg_file_id`! Skipping.")
88
+ continue
68
89
  if self.db_lookup_id(uniq_id):
69
90
  logging.info("Detected existing uniq_id, skipping storage write operation")
70
91
  continue
File without changes
@@ -0,0 +1,318 @@
1
+ import os, io
2
+ import signal
3
+
4
+ import uvloop
5
+ import asyncio
6
+
7
+ from pyrogram import Client, filters
8
+ from pyrogram.enums import ParseMode
9
+ from pyrogram.handlers import MessageHandler
10
+ from pyrogram.types import Message, InputMedia, InputMediaAudio, InputMediaPhoto, InputMediaVideo, InputMediaAnimation, InputMediaDocument, InlineKeyboardButton, InlineKeyboardMarkup
11
+ from pyrogram.errors import RPCError, FloodWait, NetworkMigrate, BadRequest, MultiMediaTooLong
12
+
13
+ from warp_beacon.__version__ import __version__
14
+ from warp_beacon.telegram.handlers import Handlers
15
+ import warp_beacon.scraper
16
+ from warp_beacon.telegram.placeholder_message import PlaceholderMessage
17
+ from warp_beacon.storage import Storage
18
+ from warp_beacon.uploader import AsyncUploader
19
+ from warp_beacon.jobs.download_job import DownloadJob
20
+ from warp_beacon.jobs.upload_job import UploadJob
21
+ from warp_beacon.jobs import Origin
22
+ from warp_beacon.jobs.types import JobType
23
+ from warp_beacon.telegram.utils import Utils
24
+
25
+ import logging
26
+
27
+ class Bot(object):
28
+ storage = Storage()
29
+ uploader = None
30
+ downloader = None
31
+ allow_loop = True
32
+ client = None
33
+ handlers = None
34
+ placeholder = None
35
+
36
+ def __init__(self, tg_bot_name: str, tg_token: str, tg_api_id: str, tg_api_hash: str) -> None:
37
+ # Enable logging
38
+ logging.basicConfig(
39
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
40
+ )
41
+
42
+ logging.info(f"Starting Warp Beacon version '{__version__}' ...")
43
+
44
+ workers_amount = min(32, os.cpu_count() + 4)
45
+
46
+ uvloop.install()
47
+ self.client = Client(
48
+ name=tg_bot_name,
49
+ app_version=__version__,
50
+ bot_token=tg_token,
51
+ api_id=tg_api_id,
52
+ api_hash=tg_api_hash,
53
+ workdir='/',
54
+ workers=int(os.environ.get("TG_WORKERS_POOL_SIZE", default=workers_amount))
55
+ )
56
+
57
+ this = self
58
+ def __terminator() -> None:
59
+ this.stop()
60
+
61
+ stop_signals = (signal.SIGINT, signal.SIGTERM, signal.SIGABRT)
62
+ for sig in stop_signals:
63
+ self.client.loop.add_signal_handler(sig, __terminator)
64
+
65
+ self.uploader = AsyncUploader(
66
+ storage=self.storage,
67
+ pool_size=int(os.environ.get("UPLOAD_POOL_SIZE", default=workers_amount)),
68
+ loop=self.client.loop
69
+ )
70
+ self.downloader = warp_beacon.scraper.AsyncDownloader(
71
+ workers_count=int(os.environ.get("WORKERS_POOL_SIZE", default=workers_amount)),
72
+ uploader=self.uploader
73
+ )
74
+
75
+ self.downloader.start()
76
+ self.uploader.start()
77
+
78
+ self.handlers = Handlers(self)
79
+
80
+ self.client.add_handler(MessageHandler(self.handlers.start, filters.command("start")))
81
+ self.client.add_handler(MessageHandler(self.handlers.help, filters.command("help")))
82
+ self.client.add_handler(MessageHandler(self.handlers.random, filters.command("random")))
83
+ self.client.add_handler(MessageHandler(self.handlers.handler))
84
+
85
+ self.placeholder = PlaceholderMessage(self)
86
+
87
+ self.client.run()
88
+
89
+ def __del__(self) -> None:
90
+ self.stop()
91
+ logging.info("Warp Beacon terminated.")
92
+
93
+ def start(self) -> None:
94
+ self.client.run()
95
+
96
+ def stop(self) -> None:
97
+ logging.info("Warp Beacon terminating. This may take a while ...")
98
+ self.downloader.stop_all()
99
+ self.uploader.stop_all()
100
+ #self.client.stop()
101
+
102
+ async def send_text(self, chat_id: int, text: str, reply_id: int = None) -> int:
103
+ try:
104
+ message_reply = await self.client.send_message(
105
+ chat_id=chat_id,
106
+ text=text,
107
+ parse_mode=ParseMode.MARKDOWN,
108
+ reply_to_message_id=reply_id
109
+ )
110
+ return message_reply.id
111
+ except Exception as e:
112
+ logging.error("Failed to send text message!")
113
+ logging.exception(e)
114
+
115
+ return 0
116
+
117
+ def build_tg_args(self, job: UploadJob) -> dict:
118
+ args = {}
119
+ if job.media_type == JobType.VIDEO:
120
+ if job.tg_file_id:
121
+ if job.placeholder_message_id:
122
+ args["media"] = InputMediaVideo(media=job.tg_file_id.replace(":video", ''), supports_streaming=True)
123
+ else:
124
+ args["video"] = job.tg_file_id.replace(":video", '')
125
+ else:
126
+ args["media"] = InputMediaVideo(
127
+ media=job.local_media_path,
128
+ supports_streaming=True,
129
+ width=job.media_info["width"],
130
+ height=job.media_info["height"],
131
+ duration=int(job.media_info["duration"]),
132
+ thumb=job.media_info["thumb"]
133
+ )
134
+ args["file_name"] = "downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
135
+ elif job.media_type == JobType.IMAGE:
136
+ if job.tg_file_id:
137
+ if job.placeholder_message_id:
138
+ args["media"] = InputMediaPhoto(media=job.tg_file_id.replace(":image", ''))
139
+ else:
140
+ args["photo"] = job.tg_file_id.replace(":image", '')
141
+ else:
142
+ args["media"] = InputMediaPhoto(
143
+ media=job.local_media_path
144
+ )
145
+ elif job.media_type == JobType.AUDIO:
146
+ if job.tg_file_id:
147
+ if job.placeholder_message_id:
148
+ args["media"] = InputMediaAudio(
149
+ media=job.tg_file_id.replace(":audio", '')
150
+ )
151
+ else:
152
+ args["audio"] = job.tg_file_id.replace(":audio", '')
153
+ else:
154
+ args["media"] = InputMediaAudio(
155
+ media=job.local_media_path,
156
+ performer=job.media_info["performer"],
157
+ thumb=job.media_info["thumb"],
158
+ duration=job.media_info["duration"],
159
+ title=job.canonical_name,
160
+ )
161
+ #args["file_name"] = "%s%s" % (job.canonical_name, os.path.splitext(job.local_media_path)[-1]),
162
+ elif job.media_type == JobType.ANIMATION:
163
+ if job.tg_file_id:
164
+ if job.placeholder_message_id:
165
+ args["media"] = InputMediaAnimation(
166
+ media=job.tg_file_id.replace(":animation", '')
167
+ )
168
+ else:
169
+ args["animation"] = job.tg_file_id.replace(":animation", '')
170
+ else:
171
+ args["media"] = InputMediaAudio(
172
+ media=job.local_media_path,
173
+ performer=job.media_info["performer"],
174
+ thumb=job.media_info["thumb"],
175
+ duration=job.media_info["duration"],
176
+ title=job.canonical_name,
177
+ )
178
+ elif job.media_type == JobType.COLLECTION:
179
+ if job.tg_file_id:
180
+ args["media"] = []
181
+ for chunk in Utils.chunker(job.tg_file_id.split(','), 10):
182
+ tg_chunk = []
183
+ for i in chunk:
184
+ tg_id, mtype = i.split(':')
185
+ ctype = JobType[mtype.upper()]
186
+ ptr = None
187
+ if ctype == JobType.VIDEO:
188
+ ptr = InputMediaVideo(media=tg_id)
189
+ elif ctype == JobType.IMAGE:
190
+ ptr = InputMediaPhoto(media=tg_id)
191
+ elif ctype == JobType.ANIMATION:
192
+ ptr = InputMediaAnimation(media=tg_id)
193
+ tg_chunk.append(ptr)
194
+
195
+ args["media"].append(tg_chunk)
196
+ else:
197
+ mediafs = []
198
+ for chunk in job.media_collection:
199
+ tg_chunk = []
200
+ for j in chunk:
201
+ if j.media_type == JobType.VIDEO:
202
+ vid = InputMediaVideo(
203
+ media=j.local_media_path,
204
+ supports_streaming=True,
205
+ width=j.media_info["width"],
206
+ height=j.media_info["height"],
207
+ duration=int(j.media_info["duration"]),
208
+ thumb=j.media_info["thumb"],
209
+ )
210
+ tg_chunk.append(vid)
211
+ elif j.media_type == JobType.IMAGE:
212
+ photo = InputMediaPhoto(
213
+ media=j.local_media_path
214
+ )
215
+ tg_chunk.append(photo)
216
+ mediafs.append(tg_chunk)
217
+ args["media"] = mediafs
218
+
219
+ args["chat_id"] = job.chat_id
220
+
221
+ # common args
222
+ if job.placeholder_message_id and job.media_type != JobType.COLLECTION:
223
+ args["message_id"] = job.placeholder_message_id
224
+ else:
225
+ args["disable_notification"] = True
226
+ args["reply_to_message_id"] = job.message_id
227
+
228
+ if os.environ.get("ENABLE_DONATES", None) == "true" and job.media_type != JobType.COLLECTION:
229
+ args["reply_markup"] = InlineKeyboardMarkup([[InlineKeyboardButton("❤ Donate", url=os.environ.get("DONATE_LINK", "https://pay.cryptocloud.plus/pos/W5BMtNQt5bJFoW2E"))]])
230
+
231
+ return args
232
+
233
+ async def upload_job(self, job: UploadJob) -> list[str]:
234
+ timeout = int(os.environ.get("TG_WRITE_TIMEOUT", default=120))
235
+ tg_file_ids = []
236
+ try:
237
+ retry_amount = 0
238
+ max_retries = int(os.environ.get("TG_MAX_RETRIES", default=5))
239
+ while not retry_amount >= max_retries:
240
+ try:
241
+ reply_message = None
242
+ if job.media_type in (JobType.VIDEO, JobType.IMAGE, JobType.AUDIO):
243
+ if job.placeholder_message_id:
244
+ reply_message = await self.client.edit_message_media(**self.build_tg_args(job))
245
+ else:
246
+ send_funcs = {
247
+ JobType.VIDEO: self.client.send_video,
248
+ JobType.IMAGE: self.client.send_photo,
249
+ JobType.AUDIO: self.client.send_audio,
250
+ JobType.ANIMATION: self.client.send_animation
251
+ }
252
+ try:
253
+ reply_message = await send_funcs[job.media_type](**self.build_tg_args(job))
254
+ except ValueError as e:
255
+ err_text = str(e)
256
+ if "Expected" in err_text:
257
+ logging.warning("Expectations exceeded reality.")
258
+ logging.warning(err_text)
259
+ expectation, reality = Utils.parse_expected_patronum_error(err_text)
260
+ job_args = self.build_tg_args(job)
261
+ job_args[reality.value.lower()] = job_args.pop(expectation.value.lower())
262
+ reply_message = await send_funcs[reality](**job_args)
263
+
264
+ tg_file_id = Utils.extract_file_id(reply_message)
265
+ tg_file_ids.append(tg_file_id)
266
+ job.tg_file_id = tg_file_id
267
+ logging.info("Uploaded media file with type '%s' tg_file_id is '%s'", job.media_type.value, job.tg_file_id)
268
+ elif job.media_type == JobType.COLLECTION:
269
+ col_job_args = self.build_tg_args(job)
270
+ sent_messages = []
271
+ for i, media_chunk in enumerate(col_job_args["media"]):
272
+ messages = await self.client.send_media_group(
273
+ chat_id=job.chat_id,
274
+ reply_to_message_id=job.message_id,
275
+ media=media_chunk,
276
+ )
277
+ sent_messages += messages
278
+ if job.media_collection:
279
+ for j, chunk in enumerate(media_chunk):
280
+ tg_file_id = Utils.extract_file_id(messages[j])
281
+ if tg_file_id:
282
+ job.media_collection[i][j].tg_file_id = tg_file_id
283
+ if i == 0 and job.placeholder_message_id:
284
+ await self.placeholder.remove(job.chat_id, job.placeholder_message_id)
285
+ for msg in sent_messages:
286
+ if msg.video:
287
+ tg_file_ids.append(msg.video.file_id + ':video')
288
+ elif msg.photo:
289
+ tg_file_ids.append(msg.photo.file_id + ':image')
290
+ logging.info("Uploaded to Telegram")
291
+ break
292
+ except MultiMediaTooLong as e:
293
+ logging.error("Failed to upload due telegram limitations :(")
294
+ logging.exception(e)
295
+ await self.placeholder.remove(job.chat_id, job.placeholder_message_id)
296
+ await self.send_text(job.chat_id, e.MESSAGE, job.message_id)
297
+ break
298
+ except (NetworkMigrate, BadRequest) as e:
299
+ logging.error("Network error. Check you Internet connection.")
300
+ logging.exception(e)
301
+
302
+ if retry_amount+1 >= max_retries:
303
+ msg = ""
304
+ if e.MESSAGE:
305
+ msg = "Telegram error: %s" % str(e.MESSAGE)
306
+ else:
307
+ msg = "Unfortunately, Telegram limits were exceeded. Your media size is %.2f MB." % job.media_info["filesize"]
308
+ await self.placeholder.remove(job.chat_id, job.placeholder_message_id)
309
+ await self.send_text(job.chat_id, msg, job.message_id)
310
+ break
311
+ retry_amount += 1
312
+ except Exception as e:
313
+ logging.error("Error occurred!")
314
+ logging.exception(e)
315
+ finally:
316
+ job.remove_files()
317
+
318
+ return tg_file_ids