warp-beacon 1.0.6__py3-none-any.whl → 1.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- warp_beacon/__version__.py +1 -1
- warp_beacon/compress/__init__.py +0 -0
- warp_beacon/compress/video.py +91 -0
- warp_beacon/jobs/abstract.py +7 -1
- warp_beacon/mediainfo/video.py +52 -55
- warp_beacon/scrapler/__init__.py +24 -3
- warp_beacon/uploader/__init__.py +35 -24
- warp_beacon/warp_beacon.py +33 -21
- {warp_beacon-1.0.6.dist-info → warp_beacon-1.0.7.dist-info}/METADATA +3 -2
- warp_beacon-1.0.7.dist-info/RECORD +25 -0
- {warp_beacon-1.0.6.dist-info → warp_beacon-1.0.7.dist-info}/WHEEL +1 -1
- {warp_beacon-1.0.6.dist-info → warp_beacon-1.0.7.dist-info}/top_level.txt +2 -0
- warp_beacon-1.0.6.dist-info/RECORD +0 -23
- {warp_beacon-1.0.6.dist-info → warp_beacon-1.0.7.dist-info}/LICENSE +0 -0
- {warp_beacon-1.0.6.dist-info → warp_beacon-1.0.7.dist-info}/entry_points.txt +0 -0
warp_beacon/__version__.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
__version__ = "1.0.
|
1
|
+
__version__ = "1.0.7"
|
2
2
|
|
File without changes
|
@@ -0,0 +1,91 @@
|
|
1
|
+
import os
|
2
|
+
import pathlib
|
3
|
+
|
4
|
+
import ffmpeg
|
5
|
+
|
6
|
+
import logging
|
7
|
+
|
8
|
+
class VideoCompress(object):
|
9
|
+
video_full_path = ""
|
10
|
+
min_audio_bitrate = 32000
|
11
|
+
max_audio_bitrate = 256000
|
12
|
+
duration = 0.0
|
13
|
+
size = 0
|
14
|
+
audio_bitrate = 0.0
|
15
|
+
video_bitrate = 0.0
|
16
|
+
probe = None
|
17
|
+
ffmpeg = None
|
18
|
+
|
19
|
+
def __init__(self, file_path: str) -> None:
|
20
|
+
self.video_full_path = file_path
|
21
|
+
self.ffmpeg = ffmpeg
|
22
|
+
self.probe = self.ffmpeg.probe(file_path)
|
23
|
+
format_section = self.probe.get("format", {})
|
24
|
+
self.duration = float(format_section.get("duration", 0.0))
|
25
|
+
self.size = int(format_section.get("size", 0))
|
26
|
+
self.audio_bitrate = float(next((s for s in self.probe['streams'] if s['codec_type'] == 'audio'), None).get("bit_rate", 0.0))
|
27
|
+
self.video_bitrate = float(next((s for s in self.probe['streams'] if s['codec_type'] == 'video'), None).get("bit_rate", 0.0))
|
28
|
+
|
29
|
+
def __del__(self) -> None:
|
30
|
+
pass
|
31
|
+
|
32
|
+
def generate_filepath(self, base_filepath: str) -> str:
|
33
|
+
path_info = pathlib.Path(base_filepath)
|
34
|
+
ext = path_info.suffix
|
35
|
+
old_filename = path_info.stem
|
36
|
+
new_filename = "%s_compressed%s" % (old_filename, ext)
|
37
|
+
new_filepath = "%s/%s" % (os.path.dirname(base_filepath), new_filename)
|
38
|
+
|
39
|
+
return new_filepath
|
40
|
+
|
41
|
+
def get_size(self) -> int:
|
42
|
+
return self.size
|
43
|
+
|
44
|
+
def get_resolution(self) -> tuple:
|
45
|
+
video_info = next((s for s in self.probe['streams'] if s['codec_type'] == 'video'), None)
|
46
|
+
if video_info:
|
47
|
+
return (int(video_info.get("width", 0)), int(video_info.get("height", 0)))
|
48
|
+
|
49
|
+
return (0, 0)
|
50
|
+
|
51
|
+
def get_duration(self) -> float:
|
52
|
+
return self.duration
|
53
|
+
|
54
|
+
def compress_to(self, output_file_name: str, target_size: int) -> bool:
|
55
|
+
try:
|
56
|
+
#if self.size > 50.0:
|
57
|
+
# best_min_size = (32000 + 100000) * (1.073741824 * self.duration) / (8 * 1024)
|
58
|
+
# target_size = best_min_size
|
59
|
+
|
60
|
+
# Target total bitrate, in bps.
|
61
|
+
target_total_bitrate = (target_size * 1024 * 8) / (1.073741824 * self.duration)
|
62
|
+
|
63
|
+
audio_bitrate = self.audio_bitrate
|
64
|
+
# Target audio bitrate, in bps
|
65
|
+
if 10 * audio_bitrate > target_total_bitrate:
|
66
|
+
audio_bitrate = target_total_bitrate / 10
|
67
|
+
if audio_bitrate < self.min_audio_bitrate < target_total_bitrate:
|
68
|
+
audio_bitrate = self.min_audio_bitrate
|
69
|
+
elif audio_bitrate > self.max_audio_bitrate:
|
70
|
+
audio_bitrate = self.max_audio_bitrate
|
71
|
+
# Target video bitrate, in bps.
|
72
|
+
video_bitrate = target_total_bitrate - audio_bitrate
|
73
|
+
|
74
|
+
i = ffmpeg.input(self.video_full_path)
|
75
|
+
#ffmpeg.output(
|
76
|
+
# i,
|
77
|
+
# os.devnull,
|
78
|
+
# **{'c:v': 'libx264', 'b:v': video_bitrate, 'pass': 1, 'f': 'mp4'}
|
79
|
+
#).overwrite_output().run()
|
80
|
+
ffmpeg.output(
|
81
|
+
i,
|
82
|
+
output_file_name,
|
83
|
+
**{'preset': 'medium', 'c:v': 'libx264', 'b:v': video_bitrate, 'c:a': 'aac', 'b:a': audio_bitrate}
|
84
|
+
).overwrite_output().run()
|
85
|
+
|
86
|
+
return True
|
87
|
+
except Exception as e:
|
88
|
+
logging.error("Failed to compress video '%s'!", self.video_full_path)
|
89
|
+
logging.exception(e)
|
90
|
+
|
91
|
+
return False
|
warp_beacon/jobs/abstract.py
CHANGED
@@ -8,14 +8,17 @@ class JobSettings(TypedDict):
|
|
8
8
|
message_id: int
|
9
9
|
placeholder_message_id: int
|
10
10
|
local_media_path: str
|
11
|
+
local_compressed_media_path: str
|
11
12
|
media_info: dict
|
12
13
|
url: str
|
13
14
|
uniq_id: str
|
14
15
|
tg_file_id: str
|
15
16
|
in_process: bool
|
16
|
-
job_failed: bool
|
17
17
|
media_type: str
|
18
|
+
job_failed: bool
|
18
19
|
job_failed_msg: str
|
20
|
+
job_warning: bool
|
21
|
+
job_warning_message: str
|
19
22
|
effective_url: str
|
20
23
|
save_items: bool
|
21
24
|
media_collection: list
|
@@ -25,12 +28,15 @@ class AbstractJob(ABC):
|
|
25
28
|
message_id: int = 0
|
26
29
|
placeholder_message_id: int = 0
|
27
30
|
local_media_path: str = ""
|
31
|
+
local_compressed_media_path: str = ""
|
28
32
|
media_info: dict = {}
|
29
33
|
url: str = ""
|
30
34
|
uniq_id: str = ""
|
31
35
|
tg_file_id: str = ""
|
32
36
|
media_type: str = "video"
|
33
37
|
in_process: bool = False
|
38
|
+
job_warning: bool = False
|
39
|
+
job_warning_message: str = ""
|
34
40
|
job_failed: bool = False
|
35
41
|
job_failed_msg: str = ""
|
36
42
|
effective_url: str = ""
|
warp_beacon/mediainfo/video.py
CHANGED
@@ -1,39 +1,43 @@
|
|
1
1
|
import io, os
|
2
|
-
|
3
|
-
import
|
2
|
+
|
3
|
+
from typing import Union
|
4
|
+
from PIL import Image
|
5
|
+
import av
|
6
|
+
|
7
|
+
import logging
|
4
8
|
|
5
9
|
class VideoInfo(object):
|
6
|
-
|
7
|
-
|
10
|
+
width = 0
|
11
|
+
height = 0
|
12
|
+
duration = 0.0
|
13
|
+
ffmpeg = None
|
8
14
|
filename = ""
|
9
15
|
|
10
16
|
def __init__(self, filename: str) -> None:
|
11
|
-
self.vid = cv2.VideoCapture(filename)
|
12
17
|
self.filename = filename
|
18
|
+
with av.open(file=self.filename, mode='r') as container:
|
19
|
+
stream = container.streams.video[0]
|
20
|
+
time_base = stream.time_base
|
21
|
+
self.duration = float(stream.duration * time_base)
|
22
|
+
framerate = stream.average_rate
|
23
|
+
frame_container_pts = round((1 / framerate) / time_base)
|
24
|
+
container.seek(frame_container_pts, backward=True, stream=stream)
|
25
|
+
frame = next(container.decode(video=0))
|
26
|
+
self.width = frame.width
|
27
|
+
self.height = frame.height
|
13
28
|
|
14
29
|
def __del__(self) -> None:
|
15
|
-
|
30
|
+
pass
|
16
31
|
|
17
32
|
def get_demensions(self) -> dict:
|
18
|
-
|
19
|
-
if self.vid.isOpened():
|
20
|
-
res["width"] = int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH))
|
21
|
-
res["height"] = int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
22
|
-
|
23
|
-
return res
|
33
|
+
return {"width": self.width, "height": self.height}
|
24
34
|
|
25
|
-
def get_duration(self) ->
|
26
|
-
|
27
|
-
if self.vid.isOpened():
|
28
|
-
fps = self.vid.get(cv2.CAP_PROP_FPS)
|
29
|
-
total_no_frames = self.vid.get(cv2.CAP_PROP_FRAME_COUNT)
|
30
|
-
duration_in_seconds = int(total_no_frames / fps)
|
35
|
+
def get_duration(self) -> float:
|
36
|
+
return self.duration
|
31
37
|
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
size = os.path.getsize(self.filename)
|
36
|
-
return round(size/(pow(1024,2)), 2)
|
38
|
+
@staticmethod
|
39
|
+
def get_filesize(filename: str) -> float:
|
40
|
+
return os.stat(filename).st_size / 1024 / 1024
|
37
41
|
|
38
42
|
def get_finfo(self, except_info: tuple=()) -> dict:
|
39
43
|
res = {}
|
@@ -41,40 +45,33 @@ class VideoInfo(object):
|
|
41
45
|
if "duration" not in except_info:
|
42
46
|
res["duration"] = self.get_duration()
|
43
47
|
if "filesize" not in except_info:
|
44
|
-
res["filesize"] =
|
48
|
+
res["filesize"] = VideoInfo.get_filesize(self.filename)
|
45
49
|
return res
|
46
50
|
|
47
|
-
def shrink_image_to_fit(self,
|
48
|
-
|
49
|
-
|
50
|
-
max_width = 320
|
51
|
-
|
52
|
-
# only shrink if img is bigger than required
|
53
|
-
if max_height < height or max_width < width:
|
54
|
-
# get scaling factor
|
55
|
-
scaling_factor = max_height / float(height)
|
56
|
-
if max_width/float(width) < scaling_factor:
|
57
|
-
scaling_factor = max_width / float(width)
|
58
|
-
# resize image
|
59
|
-
img = cv2.resize(img, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
|
60
|
-
|
61
|
-
return img
|
51
|
+
def shrink_image_to_fit(self, image: Image, size: tuple = (320, 320)) -> Image:
|
52
|
+
image.thumbnail(size, Image.Resampling.LANCZOS)
|
53
|
+
return image
|
62
54
|
|
63
|
-
def generate_thumbnail(self) ->
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
55
|
+
def generate_thumbnail(self) -> Union[io.BytesIO, None]:
|
56
|
+
try:
|
57
|
+
image = None
|
58
|
+
with av.open(file=self.filename, mode='r') as container:
|
59
|
+
# Signal that we only want to look at keyframes.
|
60
|
+
stream = container.streams.video[0]
|
61
|
+
stream.codec_context.skip_frame = "NONKEY"
|
62
|
+
time_base = stream.time_base
|
63
|
+
framerate = stream.average_rate
|
64
|
+
frame_container_pts = round((60 / framerate) / time_base)
|
65
|
+
container.seek(frame_container_pts, backward=True, stream=stream)
|
66
|
+
frame = next(container.decode(video=0))
|
67
|
+
image = frame.to_image()
|
68
|
+
image = self.shrink_image_to_fit(image)
|
69
|
+
io_buf = io.BytesIO()
|
70
|
+
io_buf.seek(0)
|
71
|
+
image.save(io_buf, format='JPEG')
|
72
|
+
return io_buf
|
73
|
+
except Exception as e:
|
74
|
+
logging.error("Failed to generate thumbnail!")
|
75
|
+
logging.exception(e)
|
79
76
|
|
80
77
|
return None
|
warp_beacon/scrapler/__init__.py
CHANGED
@@ -1,14 +1,18 @@
|
|
1
|
+
import os
|
2
|
+
import time
|
3
|
+
|
1
4
|
from typing import Optional
|
2
5
|
import multiprocessing
|
3
|
-
import time
|
4
|
-
import logging
|
5
6
|
from requests.exceptions import ConnectTimeout, HTTPError
|
6
7
|
from instagrapi.exceptions import MediaNotFound, UnknownError, ClientNotFoundError, UserNotFound
|
7
8
|
|
8
9
|
from warp_beacon.mediainfo.video import VideoInfo
|
10
|
+
from warp_beacon.compress.video import VideoCompress
|
9
11
|
from warp_beacon.uploader import AsyncUploader
|
10
12
|
from warp_beacon.jobs.download_job import DownloadJob
|
11
13
|
|
14
|
+
import logging
|
15
|
+
|
12
16
|
CONST_CPU_COUNT = multiprocessing.cpu_count()
|
13
17
|
|
14
18
|
class AsyncDownloader(object):
|
@@ -39,8 +43,8 @@ class AsyncDownloader(object):
|
|
39
43
|
video_info = VideoInfo(path)
|
40
44
|
media_info = video_info.get_finfo(tuple(fr_media_info.keys()))
|
41
45
|
media_info.update(fr_media_info)
|
42
|
-
logging.info("Media file info: %s", media_info)
|
43
46
|
media_info["thumb"] = video_info.generate_thumbnail()
|
47
|
+
logging.info("Media file info: %s", media_info)
|
44
48
|
except Exception as e:
|
45
49
|
logging.error("Failed to process media info!")
|
46
50
|
logging.exception(e)
|
@@ -103,6 +107,21 @@ class AsyncDownloader(object):
|
|
103
107
|
media_info = {"filesize": 0}
|
104
108
|
if item["media_type"] == "video":
|
105
109
|
media_info = self.get_media_info(item["local_media_path"], item["media_info"])
|
110
|
+
if media_info["filesize"] > 50.0:
|
111
|
+
logging.info("Detected big file. Starting compressing with ffmpeg ...")
|
112
|
+
self.uploader.queue_task(job.to_upload_job(
|
113
|
+
job_warning=True,
|
114
|
+
job_warning_msg="Downloaded file size is bigger than Telegram limits\! Performing video compression\. This may take a while\.")
|
115
|
+
)
|
116
|
+
ffmpeg = VideoCompress(file_path=item["local_media_path"])
|
117
|
+
new_filepath = ffmpeg.generate_filepath(base_filepath=item["local_media_path"])
|
118
|
+
if ffmpeg.compress_to(new_filepath, target_size=50 * 1000):
|
119
|
+
logging.info("Successfully compressed file '%s'", new_filepath)
|
120
|
+
os.unlink(item["local_media_path"])
|
121
|
+
item["local_media_path"] = new_filepath
|
122
|
+
item["local_compressed_media_path"] = new_filepath
|
123
|
+
media_info["filesize"] = VideoInfo.get_filesize(new_filepath)
|
124
|
+
logging.info("New file size of compressed file is '%.3f'", media_info["filesize"])
|
106
125
|
elif item["media_type"] == "collection":
|
107
126
|
for v in item["items"]:
|
108
127
|
if v["media_type"] == "video":
|
@@ -117,6 +136,8 @@ class AsyncDownloader(object):
|
|
117
136
|
job_args["save_items"] = item.get("save_items", False)
|
118
137
|
else:
|
119
138
|
job_args["local_media_path"] = item["local_media_path"]
|
139
|
+
if item.get("local_compressed_media_path", None):
|
140
|
+
job_args["local_media_path"] = item.get("local_compressed_media_path", None)
|
120
141
|
|
121
142
|
logging.debug("local_media_path: '%s'", job_args.get("local_media_path", ""))
|
122
143
|
logging.debug("media_collection: '%s'", str(job_args.get("media_collection", {})))
|
warp_beacon/uploader/__init__.py
CHANGED
@@ -40,12 +40,13 @@ class AsyncUploader(object):
|
|
40
40
|
def add_callback(self, message_id: int, callback: Callable, update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
41
41
|
def callback_wrap(*args, **kwargs) -> None:
|
42
42
|
ret = callback(*args, **kwargs)
|
43
|
-
self.remove_callback(message_id)
|
43
|
+
#self.remove_callback(message_id)
|
44
44
|
return ret
|
45
45
|
self.callbacks[message_id] = {"callback": callback_wrap, "update": update, "context": context}
|
46
46
|
|
47
47
|
def remove_callback(self, message_id: int) -> None:
|
48
48
|
if message_id in self.callbacks:
|
49
|
+
logging.debug("Removing callback with message id #%d", message_id)
|
49
50
|
del self.callbacks[message_id]
|
50
51
|
|
51
52
|
def stop_all(self) -> None:
|
@@ -85,34 +86,44 @@ class AsyncUploader(object):
|
|
85
86
|
in_process = job.in_process
|
86
87
|
uniq_id = job.uniq_id
|
87
88
|
message_id = job.placeholder_message_id
|
88
|
-
if not in_process:
|
89
|
+
if not in_process and not job.job_failed and not job.job_warning:
|
89
90
|
logging.info("Accepted upload job, file(s): '%s'", path)
|
90
91
|
try:
|
91
|
-
|
92
|
-
if
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
92
|
+
if message_id in self.callbacks:
|
93
|
+
if job.job_failed:
|
94
|
+
logging.info("URL '%s' download failed. Skipping upload job ...", job.url)
|
95
|
+
if job.job_failed_msg: # we want to say something to user
|
96
|
+
asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
|
97
|
+
self.process_done(uniq_id)
|
98
|
+
self.remove_callback(message_id)
|
99
|
+
continue
|
100
|
+
if job.job_warning:
|
101
|
+
logging.info("Job warning occurred ...")
|
102
|
+
if job.job_warning_msg:
|
103
|
+
asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
|
104
|
+
continue
|
105
|
+
if in_process:
|
106
|
+
db_list_dicts = self.storage.db_lookup_id(uniq_id)
|
107
|
+
if db_list_dicts:
|
108
|
+
tg_file_ids = [i["tg_file_id"] for i in db_list_dicts]
|
109
|
+
dlds_len = len(db_list_dicts)
|
110
|
+
if dlds_len > 1:
|
111
|
+
job.tg_file_id = ",".join(tg_file_ids)
|
112
|
+
job.media_type = "collection"
|
113
|
+
elif dlds_len:
|
114
|
+
job.tg_file_id = ",".join(tg_file_ids)
|
115
|
+
job.media_type = db_list_dicts.pop()["media_type"]
|
116
|
+
asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
|
97
117
|
self.process_done(uniq_id)
|
98
118
|
self.remove_callback(message_id)
|
99
|
-
continue
|
100
|
-
if in_process:
|
101
|
-
db_list_dicts = self.storage.db_lookup_id(uniq_id)
|
102
|
-
if db_list_dicts:
|
103
|
-
tg_file_ids = [i["tg_file_id"] for i in db_list_dicts]
|
104
|
-
dlds_len = len(db_list_dicts)
|
105
|
-
if dlds_len > 1:
|
106
|
-
job.tg_file_id = ",".join(tg_file_ids)
|
107
|
-
job.media_type = "collection"
|
108
|
-
elif dlds_len:
|
109
|
-
job.tg_file_id = ",".join(tg_file_ids)
|
110
|
-
job.media_type = db_list_dicts.pop()["media_type"]
|
111
|
-
asyncio.ensure_future(self.callbacks[m_id]["callback"](job), loop=self.loop)
|
112
|
-
else:
|
113
|
-
self.queue_task(job)
|
114
119
|
else:
|
115
|
-
|
120
|
+
self.queue_task(job)
|
121
|
+
else:
|
122
|
+
asyncio.ensure_future(self.callbacks[message_id]["callback"](job), loop=self.loop)
|
123
|
+
self.process_done(uniq_id)
|
124
|
+
self.remove_callback(message_id)
|
125
|
+
else:
|
126
|
+
logging.info("No callback no call!!")
|
116
127
|
except Exception as e:
|
117
128
|
logging.exception(e)
|
118
129
|
except multiprocessing.Queue.empty:
|
warp_beacon/warp_beacon.py
CHANGED
@@ -69,6 +69,23 @@ async def remove_placeholder(update: Update, context: ContextTypes.DEFAULT_TYPE,
|
|
69
69
|
logging.error("Failed to remove placeholder message!")
|
70
70
|
logging.exception(e)
|
71
71
|
|
72
|
+
async def update_placeholder_text(update: Update, context: ContextTypes.DEFAULT_TYPE, placeholder_message_id: int, placeholder_text: str) -> None:
|
73
|
+
try:
|
74
|
+
timeout = int(os.environ.get("TG_WRITE_TIMEOUT", default=120))
|
75
|
+
await context.bot.edit_message_caption(
|
76
|
+
chat_id=update.message.chat_id,
|
77
|
+
message_id=placeholder_message_id,
|
78
|
+
parse_mode="MarkdownV2",
|
79
|
+
caption=" ⚠️ *%s*" % placeholder_text,
|
80
|
+
show_caption_above_media=True,
|
81
|
+
write_timeout=timeout,
|
82
|
+
read_timeout=timeout,
|
83
|
+
connect_timeout=timeout
|
84
|
+
)
|
85
|
+
except Exception as e:
|
86
|
+
logging.error("Failed to update placeholder message!")
|
87
|
+
logging.exception(e)
|
88
|
+
|
72
89
|
async def send_text(update: Update, context: ContextTypes.DEFAULT_TYPE, reply_id: int, text: str) -> int:
|
73
90
|
try:
|
74
91
|
reply = await update.message.reply_text(
|
@@ -200,7 +217,8 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
|
|
200
217
|
width=job.media_info["width"],
|
201
218
|
height=job.media_info["height"],
|
202
219
|
duration=int(job.media_info["duration"]),
|
203
|
-
thumbnail=job.media_info["thumb"]
|
220
|
+
thumbnail=job.media_info["thumb"],
|
221
|
+
filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
|
204
222
|
)
|
205
223
|
elif job.media_type == "image":
|
206
224
|
if job.tg_file_id:
|
@@ -209,9 +227,9 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
|
|
209
227
|
else:
|
210
228
|
args["photo"] = job.tg_file_id.replace(":image", '')
|
211
229
|
else:
|
212
|
-
#args["photo"] = open(job.local_media_path, 'rb')
|
213
230
|
args["media"] = InputMediaPhoto(
|
214
|
-
media=open(job.local_media_path, 'rb')
|
231
|
+
media=open(job.local_media_path, 'rb'),
|
232
|
+
filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
|
215
233
|
)
|
216
234
|
elif job.media_type == "collection":
|
217
235
|
if job.tg_file_id:
|
@@ -234,12 +252,14 @@ def build_tg_args(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Uploa
|
|
234
252
|
width=j.media_info["width"],
|
235
253
|
height=j.media_info["height"],
|
236
254
|
duration=int(j.media_info["duration"]),
|
237
|
-
thumbnail=j.media_info["thumb"]
|
255
|
+
thumbnail=j.media_info["thumb"],
|
256
|
+
filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(j.local_media_path)[-1])
|
238
257
|
)
|
239
258
|
mediafs.append(vid)
|
240
259
|
elif j.media_type == "image":
|
241
260
|
photo = InputMediaPhoto(
|
242
|
-
media=open(j.local_media_path, 'rb')
|
261
|
+
media=open(j.local_media_path, 'rb'),
|
262
|
+
filename="downloaded_via_warp_beacon_bot%s" % (os.path.splitext(job.local_media_path)[-1])
|
243
263
|
)
|
244
264
|
mediafs.append(photo)
|
245
265
|
args["media"] = mediafs
|
@@ -274,6 +294,7 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
|
|
274
294
|
message = await update.message.reply_video(**build_tg_args(update, context, job))
|
275
295
|
tg_file_ids.append(message.video.file_id)
|
276
296
|
job.tg_file_id = message.video.file_id
|
297
|
+
logging.info("Uploaded video file tg_file_id is '%s'", job.tg_file_id)
|
277
298
|
elif job.media_type == "image":
|
278
299
|
if job.placeholder_message_id:
|
279
300
|
message = await context.bot.edit_message_media(**build_tg_args(update, context, job))
|
@@ -297,19 +318,8 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
|
|
297
318
|
job.media_collection[i].tg_file_id = msg.photo[-1].file_id + ':image'
|
298
319
|
logging.info("Uploaded to Telegram")
|
299
320
|
break
|
300
|
-
except error.TimedOut as e:
|
301
|
-
logging.error("
|
302
|
-
logging.exception(e)
|
303
|
-
await remove_placeholder(update, context, job.placeholder_message_id)
|
304
|
-
await send_text(
|
305
|
-
update,
|
306
|
-
context,
|
307
|
-
job.message_id,
|
308
|
-
"Telegram timeout error occurred! Your configuration timeout value is `%d`" % timeout
|
309
|
-
)
|
310
|
-
break
|
311
|
-
except error.NetworkError as e:
|
312
|
-
logging.error("Failed to upload due telegram limits :(")
|
321
|
+
except (error.NetworkError, error.TimedOut) as e:
|
322
|
+
logging.error("Failed to upload due telegram limitations :(")
|
313
323
|
logging.exception(e)
|
314
324
|
if not "Request Entity Too Large" in e.message:
|
315
325
|
logging.info("TG upload will be retried. Configuration `TG_MAX_RETRIES` values is %d.", max_retries)
|
@@ -345,6 +355,9 @@ async def upload_job(update: Update, context: ContextTypes.DEFAULT_TYPE, job: Up
|
|
345
355
|
else:
|
346
356
|
if os.path.exists(job.local_media_path):
|
347
357
|
os.unlink(job.local_media_path)
|
358
|
+
if job.local_compressed_media_path:
|
359
|
+
if os.path.exists(job.local_compressed_media_path):
|
360
|
+
os.unlink(job.local_compressed_media_path)
|
348
361
|
|
349
362
|
return tg_file_ids
|
350
363
|
|
@@ -403,6 +416,8 @@ async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
403
416
|
if job.placeholder_message_id:
|
404
417
|
await remove_placeholder(update, context, job.placeholder_message_id)
|
405
418
|
return await send_text(update, context, reply_id=job.message_id, text=job.job_failed_msg)
|
419
|
+
if job.job_warning and job.job_warning_msg:
|
420
|
+
return await update_placeholder_text(update, context, job.placeholder_message_id, job.job_warning_msg)
|
406
421
|
tg_file_ids = await upload_job(update, context, job)
|
407
422
|
if tg_file_ids:
|
408
423
|
if job.media_type == "collection" and job.save_items:
|
@@ -413,9 +428,6 @@ async def handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:
|
|
413
428
|
except Exception as e:
|
414
429
|
logging.error("Exception occurred while performing upload callback!")
|
415
430
|
logging.exception(e)
|
416
|
-
finally:
|
417
|
-
uploader.process_done(job.uniq_id)
|
418
|
-
uploader.remove_callback(job.message_id)
|
419
431
|
|
420
432
|
try:
|
421
433
|
# create placeholder message for long download
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: warp_beacon
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.7
|
4
4
|
Summary: Telegram bot for expanding external media links
|
5
5
|
Home-page: https://github.com/sb0y/warp_beacon
|
6
6
|
Author: Andrey Bagrintsev
|
@@ -224,8 +224,9 @@ Classifier: Programming Language :: Python :: 3.10
|
|
224
224
|
Requires-Python: >=3.10
|
225
225
|
Description-Content-Type: text/markdown
|
226
226
|
License-File: LICENSE
|
227
|
+
Requires-Dist: ffmpeg-python
|
227
228
|
Requires-Dist: python-telegram-bot
|
228
|
-
Requires-Dist:
|
229
|
+
Requires-Dist: av
|
229
230
|
Requires-Dist: urlextract
|
230
231
|
Requires-Dist: pillow
|
231
232
|
Requires-Dist: pymongo
|
@@ -0,0 +1,25 @@
|
|
1
|
+
etc/warp_beacon/warp_beacon.conf,sha256=De80YgoU2uZ5-v2s3QX3etAGZ4bZxaBqV0d2xk872RQ,240
|
2
|
+
lib/systemd/system/warp_beacon.service,sha256=lPmHqLqcI2eIV7nwHS0qcALQrznixqJuwwPfa2mDLUA,372
|
3
|
+
var/warp_beacon/placeholder.gif,sha256=cE5CGJVaop4Sx21zx6j4AyoHU0ncmvQuS2o6hJfEH88,6064
|
4
|
+
warp_beacon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
+
warp_beacon/__version__.py,sha256=8CgA6h1iA3Gr69GIpESCtEIeDKOjL04M-1IxwIOBBo8,23
|
6
|
+
warp_beacon/warp_beacon.py,sha256=agUu6_Qt1UnGTSqYqXQZtHeeWqRkhlIHt_Repph1G3k,19581
|
7
|
+
warp_beacon/compress/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
|
+
warp_beacon/compress/video.py,sha256=_PDMVYCyzLYxHv1uZmmzGcG_8rjaZr7BTXsXTTy_oS4,2846
|
9
|
+
warp_beacon/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
warp_beacon/jobs/abstract.py,sha256=-eXvrRgrUBed1z9iYZd0RxZ8fi0Okq3r0i7PDN31FkY,1666
|
11
|
+
warp_beacon/jobs/download_job.py,sha256=wfZrKUerfYIjWkRxPzfl5gwIlcotIMH7OpTUM9ae8NY,736
|
12
|
+
warp_beacon/jobs/upload_job.py,sha256=Vaogc4vbpAfyaT4VkIHEPLFRELmM44TDqkmnPYh3Ymc,740
|
13
|
+
warp_beacon/mediainfo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
|
+
warp_beacon/mediainfo/video.py,sha256=hhTbbIT9cTXIdpTh-A6CYDuW6OcXr_CI9Qj1scl8HHQ,2244
|
15
|
+
warp_beacon/scrapler/__init__.py,sha256=J1kRZfkCuTucRnklElXnrMUZUGpg8wIGYWGrfY5a1xc,7335
|
16
|
+
warp_beacon/scrapler/abstract.py,sha256=MJxpEovCWDYq2SwbbMsRDfp77WTwvbXXKiQxKWoj0ZQ,304
|
17
|
+
warp_beacon/scrapler/instagram.py,sha256=8CF_Zdxn1hStz_PgLxTc0FTt5heI84d-Ks0XzmD7-_o,7248
|
18
|
+
warp_beacon/storage/__init__.py,sha256=NhD3V7UNRiZNf61yQEAjXOfi-tfA2LaJa7a7kvbkmtE,2402
|
19
|
+
warp_beacon/uploader/__init__.py,sha256=9qQAuYisXiVIjQghxcxpF4WAdW7lm7HmpkOXQjGNJXk,4346
|
20
|
+
warp_beacon-1.0.7.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
21
|
+
warp_beacon-1.0.7.dist-info/METADATA,sha256=iLttbOyvKGllD60KmYiUAyYgZM78vNlXOwc4y0XTDCA,18219
|
22
|
+
warp_beacon-1.0.7.dist-info/WHEEL,sha256=rWxmBtp7hEUqVLOnTaDOPpR-cZpCDkzhhcBce-Zyd5k,91
|
23
|
+
warp_beacon-1.0.7.dist-info/entry_points.txt,sha256=eSB61Rb89d56WY0O-vEIQwkn18J-4CMrJcLA_R_8h3g,119
|
24
|
+
warp_beacon-1.0.7.dist-info/top_level.txt,sha256=510sqsM4LLO-DC4HbUkwdVKmYY_26lbnvJwSq_RLT00,382
|
25
|
+
warp_beacon-1.0.7.dist-info/RECORD,,
|
@@ -1,23 +0,0 @@
|
|
1
|
-
etc/warp_beacon/warp_beacon.conf,sha256=De80YgoU2uZ5-v2s3QX3etAGZ4bZxaBqV0d2xk872RQ,240
|
2
|
-
lib/systemd/system/warp_beacon.service,sha256=lPmHqLqcI2eIV7nwHS0qcALQrznixqJuwwPfa2mDLUA,372
|
3
|
-
var/warp_beacon/placeholder.gif,sha256=cE5CGJVaop4Sx21zx6j4AyoHU0ncmvQuS2o6hJfEH88,6064
|
4
|
-
warp_beacon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
5
|
-
warp_beacon/__version__.py,sha256=KunIFMMdy9Pr0jimIawsybWFdVLbq6gm7t27g1kPr0E,23
|
6
|
-
warp_beacon/warp_beacon.py,sha256=2wNFO1WFg9_1RXUar5STqSpGBysxBXUBfIGLlOafvvc,18668
|
7
|
-
warp_beacon/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
|
-
warp_beacon/jobs/abstract.py,sha256=8iKHelnddpnKtF-FzNkJ2zDcXrEmBO7q5ZoIV2Qby3o,1490
|
9
|
-
warp_beacon/jobs/download_job.py,sha256=wfZrKUerfYIjWkRxPzfl5gwIlcotIMH7OpTUM9ae8NY,736
|
10
|
-
warp_beacon/jobs/upload_job.py,sha256=Vaogc4vbpAfyaT4VkIHEPLFRELmM44TDqkmnPYh3Ymc,740
|
11
|
-
warp_beacon/mediainfo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
|
-
warp_beacon/mediainfo/video.py,sha256=wYPf1_55PW_x6ifSsgXTKVNbTHU_31IumVpUk1ua5dY,2172
|
13
|
-
warp_beacon/scrapler/__init__.py,sha256=ilydR4N5SPNYTCGqAHEeFkcXvHY466YNk8K-mLivh0Q,6100
|
14
|
-
warp_beacon/scrapler/abstract.py,sha256=MJxpEovCWDYq2SwbbMsRDfp77WTwvbXXKiQxKWoj0ZQ,304
|
15
|
-
warp_beacon/scrapler/instagram.py,sha256=8CF_Zdxn1hStz_PgLxTc0FTt5heI84d-Ks0XzmD7-_o,7248
|
16
|
-
warp_beacon/storage/__init__.py,sha256=NhD3V7UNRiZNf61yQEAjXOfi-tfA2LaJa7a7kvbkmtE,2402
|
17
|
-
warp_beacon/uploader/__init__.py,sha256=dz4If8kHTp_eWMm6v3Zp9lwZHR8eDNcNQRIooj9eGgc,3836
|
18
|
-
warp_beacon-1.0.6.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
19
|
-
warp_beacon-1.0.6.dist-info/METADATA,sha256=a8hgXjbD2ZzU9JrhlXy95XrX62URZYX2vfVSns1zNBc,18201
|
20
|
-
warp_beacon-1.0.6.dist-info/WHEEL,sha256=-oYQCr74JF3a37z2nRlQays_SX2MqOANoqVjBBAP2yE,91
|
21
|
-
warp_beacon-1.0.6.dist-info/entry_points.txt,sha256=eSB61Rb89d56WY0O-vEIQwkn18J-4CMrJcLA_R_8h3g,119
|
22
|
-
warp_beacon-1.0.6.dist-info/top_level.txt,sha256=VZcz1AU0_EdW3t-74hnT1eazFqSyWgGdF5divvARwmM,334
|
23
|
-
warp_beacon-1.0.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|