warp-beacon 2.6.96__py3-none-any.whl → 2.6.97__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- warp_beacon/__version__.py +1 -1
- warp_beacon/scheduler/instagram_human.py +134 -29
- warp_beacon/scraper/instagram/instagram.py +5 -1
- warp_beacon/scraper/instagram/wb_instagrapi.py +32 -2
- {warp_beacon-2.6.96.dist-info → warp_beacon-2.6.97.dist-info}/METADATA +1 -1
- {warp_beacon-2.6.96.dist-info → warp_beacon-2.6.97.dist-info}/RECORD +10 -10
- {warp_beacon-2.6.96.dist-info → warp_beacon-2.6.97.dist-info}/WHEEL +1 -1
- {warp_beacon-2.6.96.dist-info → warp_beacon-2.6.97.dist-info}/entry_points.txt +0 -0
- {warp_beacon-2.6.96.dist-info → warp_beacon-2.6.97.dist-info}/licenses/LICENSE +0 -0
- {warp_beacon-2.6.96.dist-info → warp_beacon-2.6.97.dist-info}/top_level.txt +0 -0
warp_beacon/__version__.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
__version__ = "2.6.
|
1
|
+
__version__ = "2.6.97"
|
2
2
|
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import time
|
2
2
|
import random
|
3
|
+
from typing import Optional
|
3
4
|
from datetime import datetime
|
4
5
|
|
5
6
|
import logging
|
@@ -8,33 +9,109 @@ from instagrapi.types import UserShort
|
|
8
9
|
from warp_beacon.scraper.instagram.instagram import InstagramScraper
|
9
10
|
|
10
11
|
class InstagramHuman(object):
|
11
|
-
scrapler = None
|
12
12
|
default_profiles = ["nasa", "natgeo", "9gag", "spotify", "nba"]
|
13
|
-
operations_count = 0
|
14
13
|
|
15
14
|
def __init__(self, scrapler: InstagramScraper) -> None:
|
16
15
|
self.scrapler = scrapler
|
17
16
|
self.operations_count = 0
|
18
17
|
|
18
|
+
def browse_timeline(self) -> Optional[dict]:
|
19
|
+
feed = None
|
20
|
+
items = []
|
21
|
+
try:
|
22
|
+
reason = random.choice(["cold_start_fetch", "pull_to_refresh"])
|
23
|
+
feed = self.scrapler.cl.get_timeline_feed(reason=reason)
|
24
|
+
self.operations_count += 1
|
25
|
+
items = feed.get("feed_items", [])
|
26
|
+
except Exception as e:
|
27
|
+
logging.warning("Failed to get timeline feed!", exc_info=e)
|
28
|
+
return
|
29
|
+
|
30
|
+
seen = []
|
31
|
+
if items:
|
32
|
+
for item in items:
|
33
|
+
media = item.get("media_or_ad")
|
34
|
+
logging.info("Item content: %s", media)
|
35
|
+
if not media:
|
36
|
+
continue
|
37
|
+
media_id = media.get("id")
|
38
|
+
#user_id = media.get("user", {}).get("pk")
|
39
|
+
|
40
|
+
if media_id:
|
41
|
+
seen.append(str(media_id))
|
42
|
+
if random.random() < 0.5:
|
43
|
+
try:
|
44
|
+
#self.scrapler.cl.media_like(media_id)
|
45
|
+
self.scrapler.cl.media_comments(media_id)
|
46
|
+
self.operations_count += 1
|
47
|
+
except Exception as e:
|
48
|
+
logging.warning("Failed to see comments to media '%s'", media_id, exc_info=e)
|
49
|
+
self.random_pause()
|
50
|
+
|
51
|
+
if seen:
|
52
|
+
try:
|
53
|
+
self.scrapler.cl.media_seen(seen)
|
54
|
+
self.operations_count += 1
|
55
|
+
except Exception as e:
|
56
|
+
logging.warning("Failed to mark timeline feed as seen", exc_info=e)
|
57
|
+
|
58
|
+
return feed
|
59
|
+
|
60
|
+
def watch_stories(self) -> None:
|
61
|
+
logging.info("Simulating stories watch ...")
|
62
|
+
stories = None
|
63
|
+
try:
|
64
|
+
stories = self.scrapler.cl.user_stories_v1(self.scrapler.cl.user_id)
|
65
|
+
self.operations_count += 1
|
66
|
+
except Exception as e:
|
67
|
+
logging.warning("Failed to get user stories!", exc_info=e)
|
68
|
+
|
69
|
+
if not stories:
|
70
|
+
return
|
71
|
+
|
72
|
+
seen = []
|
73
|
+
for m in stories[:random.randint(1, len(stories))]:
|
74
|
+
try:
|
75
|
+
logging.info("Wathing story with pk '%s'", str(m.pk))
|
76
|
+
seen.append(str(m.id))
|
77
|
+
self.random_pause()
|
78
|
+
except Exception as e:
|
79
|
+
logging.warning("Exception while watching content", exc_info=e)
|
80
|
+
|
81
|
+
if seen:
|
82
|
+
try:
|
83
|
+
self.scrapler.cl.media_seen(seen)
|
84
|
+
self.operations_count += 1
|
85
|
+
logging.info("Marked %d stories as seen", len(seen))
|
86
|
+
except Exception as e:
|
87
|
+
logging.warning("Failed to mark seen watched watch stories!", exc_info=e)
|
88
|
+
|
19
89
|
def watch_content(self, media: list) -> None:
|
20
90
|
if not media:
|
21
91
|
return
|
92
|
+
seen = []
|
22
93
|
for m in media[:random.randint(1, len(media))]:
|
23
94
|
try:
|
24
95
|
logging.info("Wathing content with pk '%s'", str(m.pk))
|
25
96
|
content = self.scrapler.cl.media_info_v1(m.pk)
|
97
|
+
seen.append(str(content.id))
|
26
98
|
logging.info("Watched content with id '%s'", str(content.pk))
|
27
99
|
self.operations_count += 1
|
28
100
|
self.random_pause()
|
29
101
|
except Exception as e:
|
30
102
|
logging.warning("Exception while watching content")
|
31
103
|
logging.exception(e)
|
104
|
+
try:
|
105
|
+
self.scrapler.cl.media_seen(seen)
|
106
|
+
except Exception as e:
|
107
|
+
logging.warning("Failed to mark seen watched videos!", exc_info=e)
|
32
108
|
|
33
109
|
def scroll_content(self, last_pk: int) -> None:
|
34
110
|
timeline_initialized = False
|
35
111
|
if random.random() > 0.5:
|
36
112
|
timeline_initialized = True
|
37
|
-
self.scrapler.timeline_cursor = self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, reason="cold_start_fetch")
|
113
|
+
#self.scrapler.timeline_cursor = self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, reason="cold_start_fetch")
|
114
|
+
self.scrapler.timeline_cursor = self.browse_timeline()
|
38
115
|
logging.info("Starting to watch related reels with media_pk '%d'", last_pk)
|
39
116
|
media = self.scrapler.download_hndlr(self.scrapler.cl.reels, amount=random.randint(4, 10), last_media_pk=last_pk)
|
40
117
|
self.operations_count += 1
|
@@ -43,7 +120,8 @@ class InstagramHuman(object):
|
|
43
120
|
if random.random() > 0.7:
|
44
121
|
time.sleep(random.uniform(2, 20))
|
45
122
|
if not timeline_initialized:
|
46
|
-
self.scrapler.timeline_cursor = self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, reason="cold_start_fetch")
|
123
|
+
#self.scrapler.timeline_cursor = self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, reason="cold_start_fetch")
|
124
|
+
self.scrapler.timeline_cursor = self.browse_timeline()
|
47
125
|
logging.info("Starting to explore reels with media_pk '%d'", last_pk)
|
48
126
|
media = self.scrapler.download_hndlr(self.scrapler.cl.explore_reels, amount=random.randint(4, 10), last_media_pk=last_pk)
|
49
127
|
self.operations_count += 1
|
@@ -65,15 +143,13 @@ class InstagramHuman(object):
|
|
65
143
|
def morning_routine(self) -> None:
|
66
144
|
try:
|
67
145
|
logging.info("Starting morning activity simulation")
|
68
|
-
self.scrapler.timeline_cursor = self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, "pull_to_refresh", self.scrapler.timeline_cursor.get("next_max_id"))
|
69
|
-
self.operations_count += 1
|
146
|
+
#self.scrapler.timeline_cursor = self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, "pull_to_refresh", self.scrapler.timeline_cursor.get("next_max_id"))
|
147
|
+
#self.operations_count += 1
|
148
|
+
self.scrapler.timeline_cursor = self.browse_timeline()
|
70
149
|
time.sleep(random.uniform(3, 7))
|
71
150
|
if random.random() > 0.5:
|
72
|
-
|
73
|
-
|
74
|
-
self.operations_count += 1
|
75
|
-
time.sleep(random.uniform(2, 5))
|
76
|
-
if random.random() > 0.3:
|
151
|
+
self.check_direct()
|
152
|
+
if random.random() > 0.6:
|
77
153
|
self.scrapler.download_hndlr(self.scrapler.cl.notification_like_and_comment_on_photo_user_tagged, "everyone")
|
78
154
|
self.operations_count += 1
|
79
155
|
self.random_pause()
|
@@ -82,6 +158,8 @@ class InstagramHuman(object):
|
|
82
158
|
self.scrapler.download_hndlr(self.scrapler.cl.get_reels_tray_feed, "pull_to_refresh")
|
83
159
|
self.operations_count += 1
|
84
160
|
self.random_pause()
|
161
|
+
if random.random() > 0.4:
|
162
|
+
self.watch_stories()
|
85
163
|
if random.random() > 0.8:
|
86
164
|
self.profile_view()
|
87
165
|
except Exception as e:
|
@@ -91,13 +169,19 @@ class InstagramHuman(object):
|
|
91
169
|
def daytime_routine(self) -> None:
|
92
170
|
try:
|
93
171
|
logging.info("Starting day fast check activity simulation")
|
94
|
-
self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, "pull_to_refresh")
|
95
|
-
self.operations_count += 1
|
172
|
+
#self.scrapler.download_hndlr(self.scrapler.cl.get_timeline_feed, "pull_to_refresh")
|
173
|
+
#self.operations_count += 1
|
174
|
+
self.browse_timeline()
|
96
175
|
time.sleep(random.uniform(2, 5))
|
97
176
|
if random.random() > 0.5:
|
98
177
|
self.scrapler.download_hndlr(self.scrapler.cl.get_reels_tray_feed, "pull_to_refresh")
|
99
178
|
self.operations_count += 1
|
100
179
|
self.random_pause()
|
180
|
+
|
181
|
+
if random.random() > 0.4:
|
182
|
+
self.watch_stories()
|
183
|
+
self.random_pause()
|
184
|
+
|
101
185
|
if random.random() > 0.4:
|
102
186
|
logging.info("Watching reels ...")
|
103
187
|
reels = self.scrapler.download_hndlr(self.scrapler.cl.reels, amount=random.randint(4, 15))
|
@@ -111,21 +195,18 @@ class InstagramHuman(object):
|
|
111
195
|
def evening_routine(self) -> None:
|
112
196
|
try:
|
113
197
|
logging.info("Starting evening active user simulation")
|
114
|
-
self.
|
115
|
-
self.operations_count += 1
|
198
|
+
self.browse_timeline()
|
116
199
|
time.sleep(random.uniform(2, 5))
|
117
|
-
self.scrapler.download_hndlr(self.scrapler.cl.get_reels_tray_feed, "pull_to_refresh")
|
118
|
-
self.operations_count += 1
|
119
|
-
time.sleep(random.uniform(2, 5))
|
120
|
-
if random.random() > 0.5:
|
121
|
-
self.scrapler.download_hndlr(self.scrapler.cl.direct_active_presence)
|
122
|
-
self.operations_count += 1
|
123
|
-
time.sleep(random.uniform(2, 5))
|
124
200
|
if random.random() > 0.5:
|
201
|
+
self.check_direct()
|
202
|
+
if random.random() > 0.6:
|
125
203
|
logging.info("Checking notifications, tags ...")
|
126
204
|
self.scrapler.download_hndlr(self.scrapler.cl.notification_like_and_comment_on_photo_user_tagged, "everyone")
|
127
205
|
self.operations_count += 1
|
128
206
|
self.random_pause()
|
207
|
+
if random.random() > 0.4:
|
208
|
+
self.watch_stories()
|
209
|
+
self.random_pause()
|
129
210
|
if random.random() > 0.4:
|
130
211
|
logging.info("Watching reels ...")
|
131
212
|
reels = self.scrapler.download_hndlr(self.scrapler.cl.reels, amount=random.randint(4, 10))
|
@@ -144,9 +225,10 @@ class InstagramHuman(object):
|
|
144
225
|
try:
|
145
226
|
logging.info("Starting night activity simulation")
|
146
227
|
if random.random() > 0.7:
|
147
|
-
self.
|
148
|
-
|
149
|
-
self.
|
228
|
+
self.check_direct()
|
229
|
+
if random.random() > 0.7:
|
230
|
+
self.watch_stories()
|
231
|
+
self.random_pause()
|
150
232
|
if random.random() > 0.5:
|
151
233
|
logging.info("Watching reels ...")
|
152
234
|
reels = self.scrapler.download_hndlr(self.scrapler.cl.reels, amount=random.randint(4, 15))
|
@@ -162,6 +244,30 @@ class InstagramHuman(object):
|
|
162
244
|
logging.info("Pause for '%.2f' sec ...", round(pause, 2))
|
163
245
|
time.sleep(pause)
|
164
246
|
|
247
|
+
def check_direct(self) -> None:
|
248
|
+
logging.info("Checking direct ...")
|
249
|
+
self.scrapler.download_hndlr(self.scrapler.cl.direct_active_presence)
|
250
|
+
self.operations_count += 1
|
251
|
+
self.random_pause()
|
252
|
+
threads = self.scrapler.download_hndlr(self.scrapler.cl.direct_threads, amount=random.randint(3, 7))
|
253
|
+
self.operations_count += 1
|
254
|
+
for thread in threads:
|
255
|
+
try:
|
256
|
+
messages = self.scrapler.cl.direct_messages(thread.id, amount=random.randint(5, 15))
|
257
|
+
self.operations_count += 1
|
258
|
+
if not messages:
|
259
|
+
continue
|
260
|
+
msg_sample = random.sample(messages, k=random.randint(1, min(len(messages), 5)))
|
261
|
+
for msg in msg_sample:
|
262
|
+
if random.random() < 0.85:
|
263
|
+
self.scrapler.cl.direct_message_seen(msg.thread_id, msg.id)
|
264
|
+
self.operations_count += 1
|
265
|
+
self.random_pause()
|
266
|
+
self.random_pause()
|
267
|
+
except Exception as e:
|
268
|
+
logging.warning("Failed to read thread %s", thread.id)
|
269
|
+
logging.exception(e)
|
270
|
+
|
165
271
|
def profile_view(self) -> None:
|
166
272
|
try:
|
167
273
|
logging.info("profile_view ...")
|
@@ -190,11 +296,10 @@ class InstagramHuman(object):
|
|
190
296
|
self.operations_count += 1
|
191
297
|
self.random_pause()
|
192
298
|
|
299
|
+
#self.scrapler.cl.explore_page_media_info
|
300
|
+
|
193
301
|
if random.random() > 0.5:
|
194
|
-
|
195
|
-
self.scrapler.download_hndlr(self.scrapler.cl.direct_active_presence)
|
196
|
-
self.operations_count += 1
|
197
|
-
self.random_pause()
|
302
|
+
self.check_direct()
|
198
303
|
|
199
304
|
if random.random() > 0.3:
|
200
305
|
logging.info("Checking notifications, tags ...")
|
@@ -282,7 +282,7 @@ class InstagramScraper(ScraperAbstract):
|
|
282
282
|
if len(st_parts) > 1:
|
283
283
|
effective_story_id = st_parts[0]
|
284
284
|
logging.info("Effective story id is '%s'", effective_story_id)
|
285
|
-
effective_url = "https://www.instagram.com/stories
|
285
|
+
effective_url = f"https://www.instagram.com/stories/{story_info.user.username}/{effective_story_id}/"
|
286
286
|
if story_info.media_type == 1: # photo
|
287
287
|
path = str(self.download_hndlr(self.cl.story_download_by_url, url=story_info.thumbnail_url, folder='/tmp'))
|
288
288
|
path_lowered = path.lower()
|
@@ -357,6 +357,10 @@ class InstagramScraper(ScraperAbstract):
|
|
357
357
|
res.append(self.download_photo(url=media_info.thumbnail_url, media_info=media_info))
|
358
358
|
elif media_info.media_type == 8: # Album
|
359
359
|
res.append(self.download_album(media_info=media_info))
|
360
|
+
try:
|
361
|
+
self.cl.media_seen([media_info.id])
|
362
|
+
except Exception as e:
|
363
|
+
logging.warning("Failed to mark seen with id = '%s'", media_info.id, exc_info=e)
|
360
364
|
elif scrap_type == "story":
|
361
365
|
story_info = self.cl.story_info(media_id)
|
362
366
|
logging.info("media_type for story is '%d'", story_info.media_type)
|
@@ -1,11 +1,17 @@
|
|
1
1
|
import logging
|
2
2
|
from typing import Callable
|
3
|
+
from copy import deepcopy
|
3
4
|
from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
|
4
5
|
from pathlib import Path
|
5
6
|
import requests
|
6
7
|
|
7
8
|
from instagrapi import Client
|
8
|
-
from instagrapi.
|
9
|
+
from instagrapi.types import Media
|
10
|
+
from instagrapi.exceptions import (
|
11
|
+
ClientError,
|
12
|
+
ClientLoginRequired,
|
13
|
+
VideoNotDownload
|
14
|
+
)
|
9
15
|
|
10
16
|
from warp_beacon.scraper.utils import ScraperUtils
|
11
17
|
|
@@ -160,4 +166,28 @@ class WBClient(Client):
|
|
160
166
|
)
|
161
167
|
except Exception as e:
|
162
168
|
logging.warning("Progress callback raised an exception!", exc_info=e)
|
163
|
-
return path.resolve()
|
169
|
+
return path.resolve()
|
170
|
+
|
171
|
+
def media_info(self, media_pk: str, use_cache: bool = True) -> Media:
|
172
|
+
"""
|
173
|
+
Get Media Information from PK
|
174
|
+
|
175
|
+
Parameters
|
176
|
+
----------
|
177
|
+
media_pk: str
|
178
|
+
Unique identifier of the media
|
179
|
+
use_cache: bool, optional
|
180
|
+
Whether or not to use information from cache, default value is True
|
181
|
+
|
182
|
+
Returns
|
183
|
+
-------
|
184
|
+
Media
|
185
|
+
An object of Media type
|
186
|
+
"""
|
187
|
+
media_pk = self.media_pk(media_pk)
|
188
|
+
if not use_cache or media_pk not in self._medias_cache:
|
189
|
+
media = self.media_info_v1(media_pk)
|
190
|
+
self._medias_cache[media_pk] = media
|
191
|
+
return deepcopy(
|
192
|
+
self._medias_cache[media_pk]
|
193
|
+
) # return copy of cache (dict changes protection)
|
@@ -4,7 +4,7 @@ var/warp_beacon/accounts.json,sha256=OsXdncs6h88xrF_AP6_WDCK1waGBn9SR-uYdIeK37GM
|
|
4
4
|
var/warp_beacon/placeholder.gif,sha256=cE5CGJVaop4Sx21zx6j4AyoHU0ncmvQuS2o6hJfEH88,6064
|
5
5
|
var/warp_beacon/proxies.json,sha256=VnjlQDXumOEq72ZFjbh6IqHS1TEHqn8HPYAZqWCeSIA,95
|
6
6
|
warp_beacon/__init__.py,sha256=_rThNODmz0nDp_n4mWo_HKaNFE5jk1_7cRhHyYaencI,163
|
7
|
-
warp_beacon/__version__.py,sha256=
|
7
|
+
warp_beacon/__version__.py,sha256=TS8oRh0prYIEOMSCk2cuClcx0weLzgMtkvKhmNkLh6k,24
|
8
8
|
warp_beacon/warp_beacon.py,sha256=ADCR30uGXIsDrt9WoiI9Ghu2QtWs0qZIK6x3pQKM_B4,1109
|
9
9
|
warp_beacon/yt_auth.py,sha256=GUTKqYr_tzDC-07Lx_ahWXSag8EyLxXBUnQbDBIkEmk,6022
|
10
10
|
warp_beacon/compress/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -20,7 +20,7 @@ warp_beacon/mediainfo/audio.py,sha256=ous88kwQj4bDIChN5wnGil5LqTs0IQHH0d-nyrL0-Z
|
|
20
20
|
warp_beacon/mediainfo/silencer.py,sha256=qxMuViOoVwUYb60uCVvqHiGrqByR1_4_rqMT-XdMkwc,1813
|
21
21
|
warp_beacon/mediainfo/video.py,sha256=UBZrhTN5IDI-aYu6tsJEILo9nFkjHhkldGVFmvV7tEI,2480
|
22
22
|
warp_beacon/scheduler/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
-
warp_beacon/scheduler/instagram_human.py,sha256=
|
23
|
+
warp_beacon/scheduler/instagram_human.py,sha256=BY2qtJE1QTr5oz1-8xC4wkqn9lU_tgy91OPM7QeVbBU,11169
|
24
24
|
warp_beacon/scheduler/scheduler.py,sha256=Bf4sGXjX75Dox3q-yzUHhagtzUAj3hl5GzfnZya-_io,4995
|
25
25
|
warp_beacon/scraper/__init__.py,sha256=k3M0X_m5f7b_DbBB3Ahk62ewEYr5AkqJL0PJXf0G4mI,20140
|
26
26
|
warp_beacon/scraper/abstract.py,sha256=pWbaTu-gDZgi-iFjqMR_uGzPl5KLv-4gTdJ9w6cD4sk,3802
|
@@ -31,8 +31,8 @@ warp_beacon/scraper/link_resolver.py,sha256=Rc9ZuMyOo3iPywDHwjngy-WRQ2SXhJwxcg-5
|
|
31
31
|
warp_beacon/scraper/utils.py,sha256=Kk_lDmdJiCSaHmOV80OFK05O1wRL2H0agH98JiO8dyg,1268
|
32
32
|
warp_beacon/scraper/instagram/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
33
33
|
warp_beacon/scraper/instagram/captcha.py,sha256=9UYziuqB3Tsat_ET6ex-cnZDbi6yCnsXHSpmE8MuUHk,4651
|
34
|
-
warp_beacon/scraper/instagram/instagram.py,sha256=
|
35
|
-
warp_beacon/scraper/instagram/wb_instagrapi.py,sha256=
|
34
|
+
warp_beacon/scraper/instagram/instagram.py,sha256=xDjKJa_TdXC-Xk8ji5oGKQmo7LrpRNSshLcuKuFJrCM,17863
|
35
|
+
warp_beacon/scraper/instagram/wb_instagrapi.py,sha256=M5NCtLwdUvByjmDBZMWljgB275R0LSBFblsGpapluD0,5968
|
36
36
|
warp_beacon/scraper/youtube/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
37
37
|
warp_beacon/scraper/youtube/abstract.py,sha256=AnKWFhfUvPikQ4tZflb0wvnZnkb8ums6eTZNx5iCbdM,15233
|
38
38
|
warp_beacon/scraper/youtube/music.py,sha256=5AeSBQyUgVCJT2hoBCV2WvlyuV9US09SYJhmBG_P9F8,2755
|
@@ -52,9 +52,9 @@ warp_beacon/telegram/progress_file_reader.py,sha256=e3equyNKlKs764AD-iE9QRsh3YDH
|
|
52
52
|
warp_beacon/telegram/types.py,sha256=Kvdng6uCF1HRoqQgGW1ZYYPJoVuYkFb-LDvMBbW5Hjk,89
|
53
53
|
warp_beacon/telegram/utils.py,sha256=1Lq67aRylVJzbwSyvAgjPAGjJZFATkICvAj3TJGuJiM,4635
|
54
54
|
warp_beacon/uploader/__init__.py,sha256=j3qcuKhpchseZLGzSsSiogqe6WdMbkK8d3I-ConhNRs,5687
|
55
|
-
warp_beacon-2.6.
|
56
|
-
warp_beacon-2.6.
|
57
|
-
warp_beacon-2.6.
|
58
|
-
warp_beacon-2.6.
|
59
|
-
warp_beacon-2.6.
|
60
|
-
warp_beacon-2.6.
|
55
|
+
warp_beacon-2.6.97.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
56
|
+
warp_beacon-2.6.97.dist-info/METADATA,sha256=9H9zq1a_2KPH5HBpGZnMi4QHv38xQhZZhx0zAHA1SfE,23215
|
57
|
+
warp_beacon-2.6.97.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
|
58
|
+
warp_beacon-2.6.97.dist-info/entry_points.txt,sha256=eSB61Rb89d56WY0O-vEIQwkn18J-4CMrJcLA_R_8h3g,119
|
59
|
+
warp_beacon-2.6.97.dist-info/top_level.txt,sha256=5YQRN46STNg81V_3jdzZ6bftkMxhe1hTPSFvJugDu84,1405
|
60
|
+
warp_beacon-2.6.97.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|