warp-beacon 1.0.6__tar.gz → 1.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {warp_beacon-1.0.6/warp_beacon.egg-info → warp_beacon-1.0.7}/PKG-INFO +3 -2
  2. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/pyproject.toml +2 -1
  3. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/setup.py +3 -1
  4. warp_beacon-1.0.7/warp_beacon/__version__.py +2 -0
  5. warp_beacon-1.0.7/warp_beacon/compress/video.py +91 -0
  6. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/jobs/abstract.py +7 -1
  7. warp_beacon-1.0.7/warp_beacon/mediainfo/__init__.py +0 -0
  8. warp_beacon-1.0.7/warp_beacon/mediainfo/video.py +77 -0
  9. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/scrapler/__init__.py +24 -3
  10. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/uploader/__init__.py +35 -24
  11. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/warp_beacon.py +33 -21
  12. {warp_beacon-1.0.6 → warp_beacon-1.0.7/warp_beacon.egg-info}/PKG-INFO +3 -2
  13. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon.egg-info/SOURCES.txt +2 -0
  14. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon.egg-info/requires.txt +2 -1
  15. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon.egg-info/top_level.txt +2 -0
  16. warp_beacon-1.0.6/warp_beacon/__version__.py +0 -2
  17. warp_beacon-1.0.6/warp_beacon/mediainfo/video.py +0 -80
  18. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/LICENSE +0 -0
  19. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/MANIFEST.in +0 -0
  20. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/README.md +0 -0
  21. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/assets/placeholder.gif +0 -0
  22. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/etc/.gitignore +0 -0
  23. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/etc/warp_beacon.conf +0 -0
  24. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/etc/warp_beacon.service +0 -0
  25. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/setup.cfg +0 -0
  26. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/__init__.py +0 -0
  27. {warp_beacon-1.0.6/warp_beacon/jobs → warp_beacon-1.0.7/warp_beacon/compress}/__init__.py +0 -0
  28. {warp_beacon-1.0.6/warp_beacon/mediainfo → warp_beacon-1.0.7/warp_beacon/jobs}/__init__.py +0 -0
  29. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/jobs/download_job.py +0 -0
  30. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/jobs/upload_job.py +0 -0
  31. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/scrapler/abstract.py +0 -0
  32. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/scrapler/instagram.py +0 -0
  33. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon/storage/__init__.py +0 -0
  34. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon.egg-info/dependency_links.txt +0 -0
  35. {warp_beacon-1.0.6 → warp_beacon-1.0.7}/warp_beacon.egg-info/entry_points.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: warp_beacon
3
- Version: 1.0.6
3
+ Version: 1.0.7
4
4
  Summary: Telegram bot for expanding external media links
5
5
  Home-page: https://github.com/sb0y/warp_beacon
6
6
  Author: Andrey Bagrintsev
@@ -224,8 +224,9 @@ Classifier: Programming Language :: Python :: 3.10
224
224
  Requires-Python: >=3.10
225
225
  Description-Content-Type: text/markdown
226
226
  License-File: LICENSE
227
+ Requires-Dist: ffmpeg-python
227
228
  Requires-Dist: python-telegram-bot
228
- Requires-Dist: opencv-python
229
+ Requires-Dist: av
229
230
  Requires-Dist: urlextract
230
231
  Requires-Dist: pillow
231
232
  Requires-Dist: pymongo
@@ -13,8 +13,9 @@ readme = "README.md"
13
13
  license = {file = "LICENSE"}
14
14
  requires-python = ">=3.10"
15
15
  dependencies = [
16
+ "ffmpeg-python",
16
17
  "python-telegram-bot",
17
- "opencv-python",
18
+ "av",
18
19
  "urlextract",
19
20
  "pillow",
20
21
  "pymongo",
@@ -50,7 +50,8 @@ setup(
50
50
  'warp_beacon/storage',
51
51
  'warp_beacon/scrapler',
52
52
  'warp_beacon/mediainfo',
53
- 'warp_beacon/jobs'
53
+ 'warp_beacon/jobs',
54
+ 'warp_beacon/compress'
54
55
  ],
55
56
  py_modules=[
56
57
  "warp_beacon/__version__",
@@ -59,6 +60,7 @@ setup(
59
60
  "warp_beacon/jobs/download_job",
60
61
  "warp_beacon/jobs/upload_job",
61
62
  "warp_beacon/mediainfo/video",
63
+ "warp_beacon/compress/video",
62
64
  "warp_beacon/scrapler/abstract",
63
65
  "warp_beacon/scrapler/instagram"
64
66
  ],
@@ -0,0 +1,2 @@
1
+ __version__ = "1.0.7"
2
+
@@ -0,0 +1,91 @@
1
+ import os
2
+ import pathlib
3
+
4
+ import ffmpeg
5
+
6
+ import logging
7
+
8
+ class VideoCompress(object):
9
+ video_full_path = ""
10
+ min_audio_bitrate = 32000
11
+ max_audio_bitrate = 256000
12
+ duration = 0.0
13
+ size = 0
14
+ audio_bitrate = 0.0
15
+ video_bitrate = 0.0
16
+ probe = None
17
+ ffmpeg = None
18
+
19
+ def __init__(self, file_path: str) -> None:
20
+ self.video_full_path = file_path
21
+ self.ffmpeg = ffmpeg
22
+ self.probe = self.ffmpeg.probe(file_path)
23
+ format_section = self.probe.get("format", {})
24
+ self.duration = float(format_section.get("duration", 0.0))
25
+ self.size = int(format_section.get("size", 0))
26
+ self.audio_bitrate = float(next((s for s in self.probe['streams'] if s['codec_type'] == 'audio'), None).get("bit_rate", 0.0))
27
+ self.video_bitrate = float(next((s for s in self.probe['streams'] if s['codec_type'] == 'video'), None).get("bit_rate", 0.0))
28
+
29
+ def __del__(self) -> None:
30
+ pass
31
+
32
+ def generate_filepath(self, base_filepath: str) -> str:
33
+ path_info = pathlib.Path(base_filepath)
34
+ ext = path_info.suffix
35
+ old_filename = path_info.stem
36
+ new_filename = "%s_compressed%s" % (old_filename, ext)
37
+ new_filepath = "%s/%s" % (os.path.dirname(base_filepath), new_filename)
38
+
39
+ return new_filepath
40
+
41
+ def get_size(self) -> int:
42
+ return self.size
43
+
44
+ def get_resolution(self) -> tuple:
45
+ video_info = next((s for s in self.probe['streams'] if s['codec_type'] == 'video'), None)
46
+ if video_info:
47
+ return (int(video_info.get("width", 0)), int(video_info.get("height", 0)))
48
+
49
+ return (0, 0)
50
+
51
+ def get_duration(self) -> float:
52
+ return self.duration
53
+
54
+ def compress_to(self, output_file_name: str, target_size: int) -> bool:
55
+ try:
56
+ #if self.size > 50.0:
57
+ # best_min_size = (32000 + 100000) * (1.073741824 * self.duration) / (8 * 1024)
58
+ # target_size = best_min_size
59
+
60
+ # Target total bitrate, in bps.
61
+ target_total_bitrate = (target_size * 1024 * 8) / (1.073741824 * self.duration)
62
+
63
+ audio_bitrate = self.audio_bitrate
64
+ # Target audio bitrate, in bps
65
+ if 10 * audio_bitrate > target_total_bitrate:
66
+ audio_bitrate = target_total_bitrate / 10
67
+ if audio_bitrate < self.min_audio_bitrate < target_total_bitrate:
68
+ audio_bitrate = self.min_audio_bitrate
69
+ elif audio_bitrate > self.max_audio_bitrate:
70
+ audio_bitrate = self.max_audio_bitrate
71
+ # Target video bitrate, in bps.
72
+ video_bitrate = target_total_bitrate - audio_bitrate
73
+
74
+ i = ffmpeg.input(self.video_full_path)
75
+ #ffmpeg.output(
76
+ # i,
77
+ # os.devnull,
78
+ # **{'c:v': 'libx264', 'b:v': video_bitrate, 'pass': 1, 'f': 'mp4'}
79
+ #).overwrite_output().run()
80
+ ffmpeg.output(
81
+ i,
82
+ output_file_name,
83
+ **{'preset': 'medium', 'c:v': 'libx264', 'b:v': video_bitrate, 'c:a': 'aac', 'b:a': audio_bitrate}
84
+ ).overwrite_output().run()
85
+
86
+ return True
87
+ except Exception as e:
88
+ logging.error("Failed to compress video '%s'!", self.video_full_path)
89
+ logging.exception(e)
90
+
91
+ return False
@@ -8,14 +8,17 @@ class JobSettings(TypedDict):
8
8
  message_id: int
9
9
  placeholder_message_id: int
10
10
  local_media_path: str
11
+ local_compressed_media_path: str
11
12
  media_info: dict
12
13
  url: str
13
14
  uniq_id: str
14
15
  tg_file_id: str
15
16
  in_process: bool
16
- job_failed: bool
17
17
  media_type: str
18
+ job_failed: bool
18
19
  job_failed_msg: str
20
+ job_warning: bool
21
+ job_warning_message: str
19
22
  effective_url: str
20
23
  save_items: bool
21
24
  media_collection: list
@@ -25,12 +28,15 @@ class AbstractJob(ABC):
25
28
  message_id: int = 0
26
29
  placeholder_message_id: int = 0
27
30
  local_media_path: str = ""
31
+ local_compressed_media_path: str = ""
28
32
  media_info: dict = {}
29
33
  url: str = ""
30
34
  uniq_id: str = ""
31
35
  tg_file_id: str = ""
32
36
  media_type: str = "video"
33
37
  in_process: bool = False
38
+ job_warning: bool = False
39
+ job_warning_message: str = ""
34
40
  job_failed: bool = False
35
41
  job_failed_msg: str = ""
36
42
  effective_url: str = ""
File without changes
@@ -0,0 +1,77 @@
1
+ import io, os
2
+
3
+ from typing import Union
4
+ from PIL import Image
5
+ import av
6
+
7
+ import logging
8
+
9
+ class VideoInfo(object):
10
+ width = 0
11
+ height = 0
12
+ duration = 0.0
13
+ ffmpeg = None
14
+ filename = ""
15
+
16
+ def __init__(self, filename: str) -> None:
17
+ self.filename = filename
18
+ with av.open(file=self.filename, mode='r') as container:
19
+ stream = container.streams.video[0]
20
+ time_base = stream.time_base
21
+ self.duration = float(stream.duration * time_base)
22
+ framerate = stream.average_rate
23
+ frame_container_pts = round((1 / framerate) / time_base)
24
+ container.seek(frame_container_pts, backward=True, stream=stream)
25
+ frame = next(container.decode(video=0))
26
+ self.width = frame.width
27
+ self.height = frame.height
28
+
29
+ def __del__(self) -> None:
30
+ pass
31
+
32
+ def get_demensions(self) -> dict:
33
+ return {"width": self.width, "height": self.height}
34
+
35
+ def get_duration(self) -> float:
36
+ return self.duration
37
+
38
+ @staticmethod
39
+ def get_filesize(filename: str) -> float:
40
+ return os.stat(filename).st_size / 1024 / 1024
41
+
42
+ def get_finfo(self, except_info: tuple=()) -> dict:
43
+ res = {}
44
+ res.update(self.get_demensions())
45
+ if "duration" not in except_info:
46
+ res["duration"] = self.get_duration()
47
+ if "filesize" not in except_info:
48
+ res["filesize"] = VideoInfo.get_filesize(self.filename)
49
+ return res
50
+
51
+ def shrink_image_to_fit(self, image: Image, size: tuple = (320, 320)) -> Image:
52
+ image.thumbnail(size, Image.Resampling.LANCZOS)
53
+ return image
54
+
55
+ def generate_thumbnail(self) -> Union[io.BytesIO, None]:
56
+ try:
57
+ image = None
58
+ with av.open(file=self.filename, mode='r') as container:
59
+ # Signal that we only want to look at keyframes.
60
+ stream = container.streams.video[0]
61
+ stream.codec_context.skip_frame = "NONKEY"
62
+ time_base = stream.time_base
63
+ framerate = stream.average_rate
64
+ frame_container_pts = round((60 / framerate) / time_base)
65
+ container.seek(frame_container_pts, backward=True, stream=stream)
66
+ frame = next(container.decode(video=0))
67
+ image = frame.to_image()
68
+ image = self.shrink_image_to_fit(image)
69
+ io_buf = io.BytesIO()
70
+ io_buf.seek(0)
71
+ image.save(io_buf, format='JPEG')
72
+ return io_buf
73
+ except Exception as e:
74
+ logging.error("Failed to generate thumbnail!")
75
+ logging.exception(e)
76
+
77
+ return None
@@ -1,14 +1,18 @@
1
+ import os
2
+ import time
3
+
1
4
  from typing import Optional
2
5
  import multiprocessing
3
- import time
4
- import logging
5
6
  from requests.exceptions import ConnectTimeout, HTTPError
6
7
  from instagrapi.exceptions import MediaNotFound, UnknownError, ClientNotFoundError, UserNotFound
7
8
 
8
9
  from warp_beacon.mediainfo.video import VideoInfo
10
+ from warp_beacon.compress.video import VideoCompress
9
11
  from warp_beacon.uploader import AsyncUploader
10
12
  from warp_beacon.jobs.download_job import DownloadJob
11
13
 
14
+ import logging
15
+
12
16
  CONST_CPU_COUNT = multiprocessing.cpu_count()
13
17
 
14
18
  class AsyncDownloader(object):
@@ -39,8 +43,8 @@ class AsyncDownloader(object):
39
43
  video_info = VideoInfo(path)
40
44
  media_info = video_info.get_finfo(tuple(fr_media_info.keys()))
41
45
  media_info.update(fr_media_info)
42
- logging.info("Media file info: %s", media_info)
43
46
  media_info["thumb"] = video_info.generate_thumbnail()
47
+ logging.info("Media file info: %s", media_info)
44
48
  except Exception as e:
45
49
  logging.error("Failed to process media info!")
46
50
  logging.exception(e)
@@ -103,6 +107,21 @@ class AsyncDownloader(object):
103
107
  media_info = {"filesize": 0}
104
108
  if item["media_type"] == "video":
105
109
  media_info = self.get_media_info(item["local_media_path"], item["media_info"])
110
+ if media_info["filesize"] > 50.0:
111
+ logging.info("Detected big file. Starting compressing with ffmpeg ...")
112
+ self.uploader.queue_task(job.to_upload_job(
113
+ job_warning=True,
114
+ job_warning_msg="Downloaded file size is bigger than Telegram limits\! Performing video compression\. This may take a while\.")
115
+ )
116
+ ffmpeg = VideoCompress(file_path=item["local_media_path"])
117
+ new_filepath = ffmpeg.generate_filepath(base_filepath=item["local_media_path"])
118
+ if ffmpeg.compress_to(new_filepath, target_size=50 * 1000):
119
+ logging.info("Successfully compressed file '%s'", new_filepath)
120
+ os.unlink(item["local_media_path"])
121
+ item["local_media_path"] = new_filepath
122
+ item["local_compressed_media_path"] = new_filepath
123
+ media_info["filesize"] = VideoInfo.get_filesize(new_filepath)
124
+ logging.info("New file size of compressed file is '%.3f'", media_info["filesize"])
106
125
  elif item["media_type"] == "collection":
107
126
  for v in item["items"]:
108
127
  if v["media_type"] == "video":
@@ -117,6 +136,8 @@ class AsyncDownloader(object):
117
136
  job_args["save_items"] = item.get("save_items", False)
118
137
  else:
119
138
  job_args["local_media_path"] = item["local_media_path"]
139
+ if item.get("local_compressed_media_path", None):
140
+ job_args["local_media_path"] = item.get("local_compressed_media_path", None)
120
141
 
121
142
  logging.debug("local_media_path: '%s'", job_args.get("local_media_path", ""))
122
143
  logging.debug("media_collection: '%s'", str(job_args.get("media_collection", {})))
@@ -40,12 +40,13 @@ class AsyncUploader(object):
40
40
  def add_callback(self, message_id: int, callback: Callable, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
41
41
  def callback_wrap(*args, **kwargs) -> None:
42
42
  ret = callback(*args, **kwargs)
43
- self.remove_callback(message_id)
43
+ #self.remove_callback(message_id)
44
44
  return ret
45
45
  self.callbacks[message_id] = {"callback": callback_wrap, "update": update, "context": context}
46
46
 
47
47
  def remove_callback(self, message_id: int) -> None:
48
48
  if message_id in self.callbacks:
49
+ logging.debug("Removing callback with message id #%d", message_id)
49
50
  del self.callbacks[message_id]
50
51
 
51
52
  def stop_all(self) -> None:
@@ -85,34 +86,44 @@ class AsyncUploader(object):
85
86
  in_process = job.in_process
86
87
  uniq_id = job.uniq_id
87
88
  message_id = job.placeholder_message_id
88
- if not in_process:
89
+ if not in_process and not job.job_failed and not job.job_warning:
89
90
  logging.info("Accepted upload job, file(s): '%s'", path)
90
91
  try:
91
- for m_id in self.callbacks.copy():
92
- if m_id == message_id:
93
- if job.job_failed:
94
- logging.info("URL '%s' download failed. Skipping upload job ...", job.url)
95
- if job.job_failed_msg: # we want to say something to user
96
- asyncio.ensure_future(self.callbacks[m_id]["callback"](job), loop=self.loop)
92
+ if message_id in self.callbacks:
93
+ if job.job_failed:
94
+ logging.info("URL '%s' download failed. Skipping upload job ...", job.url)
95
+ if job.job_failed_msg: # we want to say something to user
96
+ asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
97
+ self.process_done(uniq_id)
98
+ self.remove_callback(message_id)
99
+ continue
100
+ if job.job_warning:
101
+ logging.info("Job warning occurred ...")
102
+ if job.job_warning_msg:
103
+ asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
104
+ continue
105
+ if in_process:
106
+ db_list_dicts = self.storage.db_lookup_id(uniq_id)
107
+ if db_list_dicts:
108
+ tg_file_ids = [i["tg_file_id"] for i in db_list_dicts]
109
+ dlds_len = len(db_list_dicts)
110
+ if dlds_len > 1:
111
+ job.tg_file_id = ",".join(tg_file_ids)
112
+ job.media_type = "collection"
113
+ elif dlds_len:
114
+ job.tg_file_id = ",".join(tg_file_ids)
115
+ job.media_type = db_list_dicts.pop()["media_type"]
116
+ asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
97
117
  self.process_done(uniq_id)
98
118
  self.remove_callback(message_id)
99
- continue
100
- if in_process:
101
- db_list_dicts = self.storage.db_lookup_id(uniq_id)
102
- if db_list_dicts:
103
- tg_file_ids = [i["tg_file_id"] for i in db_list_dicts]
104
- dlds_len = len(db_list_dicts)
105
- if dlds_len > 1:
106
- job.tg_file_id = ",".join(tg_file_ids)
107
- job.media_type = "collection"
108
- elif dlds_len:
109
- job.tg_file_id = ",".join(tg_file_ids)
110
- job.media_type = db_list_dicts.pop()["media_type"]
111
- asyncio.ensure_future(self.callbacks[m_id]["callback"](job), loop=self.loop)
112
- else:
113
- self.queue_task(job)
114
119
  else:
115
- asyncio.ensure_future(self.callbacks[m_id]["callback"](job), loop=self.loop)
120
+ self.queue_task(job)
121
+ else:
122
+ asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
123
+ self.process_done(uniq_id)
124
+ self.remove_callback(message_id)
125
+ else:
126
+ logging.info("No callback no call!!")
116
127
  except Exception as e:
117
128
  logging.exception(e)
118
129
  except multiprocessing.Queue.empty:
@@ -69,6 +69,23 @@ async def remove_placeholder(update: Update, context: ContextTypes.DEFAULT_TYPE,
69
69
  logging.error("Failed to remove placeholder message!")
70
70
  logging.exception(e)
71
71
 
72
+ async def update_placeholder_text(update: Update, context: ContextTypes.DEFAULT_TYPE, placeholder_message_id: int, placeholder_text: str) -> None:
73
+ try:
74
+ timeout = int(os.environ.get("TG_WRITE_TIMEOUT", default=120))
75
+ await context.bot.edit_message_caption(
76
+ chat_id=update.message.chat_id,
77
+ message_id=placeholder_message_id,
78
+ parse_mode="MarkdownV2",
79
+ caption=" ⚠️ *%s*" % placeholder_text,
80
+ show_caption_above_media=True,
81
+ write_timeout=timeout,
82
+ read_timeout=timeout,
83
+ connect_timeout=timeout
84
+ )
85
+ except Exception as e:
86
+ logging.error("Failed to update placeholder message!")
87
+ logging.exception(e)
88
+
72
89
  async def send_text(update: Update, context: ContextTypes.DEFAULT_TYPE, reply_id: int, text: str) -> int:
73
90
  try:
74
91
  reply = await update.message.reply_text(
@@ -200,7 +217,8 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
200
217
  width=job.media_info["width"],
201
218
  height=job.media_info["height"],
202
219
  duration=int(job.media_info["duration"]),
203
- thumbnail=job.media_info["thumb"]
220
+ thumbnail=job.media_info["thumb"],
221
+ filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
204
222
  )
205
223
  elif job.media_type == "image":
206
224
  if job.tg_file_id:
@@ -209,9 +227,9 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
209
227
  else:
210
228
  args["photo"] = job.tg_file_id.replace(":image", '')
211
229
  else:
212
- #args["photo"] = open(job.local_media_path, 'rb')
213
230
  args["media"] = InputMediaPhoto(
214
- media=open(job.local_media_path, 'rb')
231
+ media=open(job.local_media_path, 'rb'),
232
+ filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
215
233
  )
216
234
  elif job.media_type == "collection":
217
235
  if job.tg_file_id:
@@ -234,12 +252,14 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
234
252
  width=j.media_info["width"],
235
253
  height=j.media_info["height"],
236
254
  duration=int(j.media_info["duration"]),
237
- thumbnail=j.media_info["thumb"]
255
+ thumbnail=j.media_info["thumb"],
256
+ filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(j.local_media_path)[-1])
238
257
  )
239
258
  mediafs.append(vid)
240
259
  elif j.media_type == "image":
241
260
  photo = InputMediaPhoto(
242
- media=open(j.local_media_path, 'rb')
261
+ media=open(j.local_media_path, 'rb'),
262
+ filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
243
263
  )
244
264
  mediafs.append(photo)
245
265
  args["media"] = mediafs
@@ -274,6 +294,7 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
274
294
  message = await update.message.reply_video(**build_tg_args(update, context, job))
275
295
  tg_file_ids.append(message.video.file_id)
276
296
  job.tg_file_id = message.video.file_id
297
+ logging.info("Uploaded video file tg_file_id is '%s'", job.tg_file_id)
277
298
  elif job.media_type == "image":
278
299
  if job.placeholder_message_id:
279
300
  message = await context.bot.edit_message_media(**build_tg_args(update, context, job))
@@ -297,19 +318,8 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
297
318
  job.media_collection[i].tg_file_id = msg.photo[-1].file_id + ':image'
298
319
  logging.info("Uploaded to Telegram")
299
320
  break
300
- except error.TimedOut as e:
301
- logging.error("TG timeout error!")
302
- logging.exception(e)
303
- await remove_placeholder(update, context, job.placeholder_message_id)
304
- await send_text(
305
- update,
306
- context,
307
- job.message_id,
308
- "Telegram timeout error occurred! Your configuration timeout value is `%d`" % timeout
309
- )
310
- break
311
- except error.NetworkError as e:
312
- logging.error("Failed to upload due telegram limits :(")
321
+ except (error.NetworkError, error.TimedOut) as e:
322
+ logging.error("Failed to upload due telegram limitations :(")
313
323
  logging.exception(e)
314
324
  if not "Request Entity Too Large" in e.message:
315
325
  logging.info("TG upload will be retried. Configuration `TG_MAX_RETRIES` values is %d.", max_retries)
@@ -345,6 +355,9 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
345
355
  else:
346
356
  if os.path.exists(job.local_media_path):
347
357
  os.unlink(job.local_media_path)
358
+ if job.local_compressed_media_path:
359
+ if os.path.exists(job.local_compressed_media_path):
360
+ os.unlink(job.local_compressed_media_path)
348
361
 
349
362
  return tg_file_ids
350
363
 
@@ -403,6 +416,8 @@ async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
403
416
  if job.placeholder_message_id:
404
417
  await remove_placeholder(update, context, job.placeholder_message_id)
405
418
  return await send_text(update, context, reply_id=job.message_id, text=job.job_failed_msg)
419
+ if job.job_warning and job.job_warning_msg:
420
+ return await update_placeholder_text(update, context, job.placeholder_message_id, job.job_warning_msg)
406
421
  tg_file_ids = await upload_job(update, context, job)
407
422
  if tg_file_ids:
408
423
  if job.media_type == "collection" and job.save_items:
@@ -413,9 +428,6 @@ async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
413
428
  except Exception as e:
414
429
  logging.error("Exception occurred while performing upload callback!")
415
430
  logging.exception(e)
416
- finally:
417
- uploader.process_done(job.uniq_id)
418
- uploader.remove_callback(job.message_id)
419
431
 
420
432
  try:
421
433
  # create placeholder message for long download
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: warp_beacon
3
- Version: 1.0.6
3
+ Version: 1.0.7
4
4
  Summary: Telegram bot for expanding external media links
5
5
  Home-page: https://github.com/sb0y/warp_beacon
6
6
  Author: Andrey Bagrintsev
@@ -224,8 +224,9 @@ Classifier: Programming Language :: Python :: 3.10
224
224
  Requires-Python: >=3.10
225
225
  Description-Content-Type: text/markdown
226
226
  License-File: LICENSE
227
+ Requires-Dist: ffmpeg-python
227
228
  Requires-Dist: python-telegram-bot
228
- Requires-Dist: opencv-python
229
+ Requires-Dist: av
229
230
  Requires-Dist: urlextract
230
231
  Requires-Dist: pillow
231
232
  Requires-Dist: pymongo
@@ -17,6 +17,8 @@ warp_beacon.egg-info/dependency_links.txt
17
17
  warp_beacon.egg-info/entry_points.txt
18
18
  warp_beacon.egg-info/requires.txt
19
19
  warp_beacon.egg-info/top_level.txt
20
+ warp_beacon/compress/__init__.py
21
+ warp_beacon/compress/video.py
20
22
  warp_beacon/jobs/__init__.py
21
23
  warp_beacon/jobs/abstract.py
22
24
  warp_beacon/jobs/download_job.py
@@ -1,5 +1,6 @@
1
+ ffmpeg-python
1
2
  python-telegram-bot
2
- opencv-python
3
+ av
3
4
  urlextract
4
5
  pillow
5
6
  pymongo
@@ -1,5 +1,7 @@
1
1
  warp_beacon
2
2
  warp_beacon/__version__
3
+ warp_beacon/compress
4
+ warp_beacon/compress/video
3
5
  warp_beacon/jobs
4
6
  warp_beacon/jobs/abstract
5
7
  warp_beacon/jobs/download_job
@@ -1,2 +0,0 @@
1
- __version__ = "1.0.6"
2
-
@@ -1,80 +0,0 @@
1
- import io, os
2
- from typing import Optional
3
- import cv2
4
-
5
- class VideoInfo(object):
6
- vid = None
7
- # need for filesize
8
- filename = ""
9
-
10
- def __init__(self, filename: str) -> None:
11
- self.vid = cv2.VideoCapture(filename)
12
- self.filename = filename
13
-
14
- def __del__(self) -> None:
15
- self.vid.release()
16
-
17
- def get_demensions(self) -> dict:
18
- res = {"width": None, "height": None}
19
- if self.vid.isOpened():
20
- res["width"] = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH))
21
- res["height"] = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
22
-
23
- return res
24
-
25
- def get_duration(self) -> int:
26
- duration_in_seconds = None
27
- if self.vid.isOpened():
28
- fps = self.vid.get(cv2.CAP_PROP_FPS)
29
- total_no_frames = self.vid.get(cv2.CAP_PROP_FRAME_COUNT)
30
- duration_in_seconds = int(total_no_frames / fps)
31
-
32
- return duration_in_seconds
33
-
34
- def get_filesize(self) -> float:
35
- size = os.path.getsize(self.filename)
36
- return round(size/(pow(1024,2)), 2)
37
-
38
- def get_finfo(self, except_info: tuple=()) -> dict:
39
- res = {}
40
- res.update(self.get_demensions())
41
- if "duration" not in except_info:
42
- res["duration"] = self.get_duration()
43
- if "filesize" not in except_info:
44
- res["filesize"] = self.get_filesize()
45
- return res
46
-
47
- def shrink_image_to_fit(self, img):
48
- height, width = img.shape[:2]
49
- max_height = 320
50
- max_width = 320
51
-
52
- # only shrink if img is bigger than required
53
- if max_height < height or max_width < width:
54
- # get scaling factor
55
- scaling_factor = max_height / float(height)
56
- if max_width/float(width) < scaling_factor:
57
- scaling_factor = max_width / float(width)
58
- # resize image
59
- img = cv2.resize(img, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
60
-
61
- return img
62
-
63
- def generate_thumbnail(self) -> Optional[io.BytesIO]:
64
- if self.vid.isOpened():
65
- count = 4
66
- success = True
67
- while success:
68
- self.vid.set(cv2.CAP_PROP_POS_MSEC,(count*1000))
69
- success, image = self.vid.read()
70
- if success:
71
- image = self.shrink_image_to_fit(image)
72
- success, buffer = cv2.imencode(".jpg", image)
73
- if success:
74
- io_buf = io.BytesIO(buffer)
75
- io_buf.seek(0)
76
- #io_buf.name = "thumbnail.png"
77
- return io_buf
78
- count += 1
79
-
80
- return None
File without changes
File without changes
File without changes
File without changes
File without changes