cobweb-launcher 1.3.3__py3-none-any.whl → 1.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cobweb/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
- from .launchers import LauncherAir, LauncherPro, LauncherApi
1
+ from .launchers import LauncherPro, LauncherApi
2
2
  from .constant import CrawlerModel
@@ -1,88 +1,88 @@
1
- import time
2
-
3
- from cobweb.base import logger
4
- from cobweb.constant import LogTemplate
5
- from .launcher import Launcher, check_pause
6
-
7
-
8
- class LauncherAir(Launcher):
9
-
10
- # def _scheduler(self):
11
- # if self.start_seeds:
12
- # self.__LAUNCHER_QUEUE__['todo'].push(self.start_seeds)
13
-
14
- @check_pause
15
- def _insert(self):
16
- seeds = {}
17
- status = self.__LAUNCHER_QUEUE__['new'].length < self._new_queue_max_size
18
- for _ in range(self._new_queue_max_size):
19
- seed = self.__LAUNCHER_QUEUE__['new'].pop()
20
- if not seed:
21
- break
22
- seeds[seed.to_string] = seed.params.priority
23
- if seeds:
24
- self.__LAUNCHER_QUEUE__['todo'].push(seeds)
25
- if status:
26
- time.sleep(self._new_queue_wait_seconds)
27
-
28
- @check_pause
29
- def _delete(self):
30
- seeds = []
31
- status = self.__LAUNCHER_QUEUE__['done'].length < self._done_queue_max_size
32
-
33
- for _ in range(self._done_queue_max_size):
34
- seed = self.__LAUNCHER_QUEUE__['done'].pop()
35
- if not seed:
36
- break
37
- seeds.append(seed.to_string)
38
-
39
- if seeds:
40
- self._remove_doing_seeds(seeds)
41
-
42
- if status:
43
- time.sleep(self._done_queue_wait_seconds)
44
-
45
- def _polling(self):
46
-
47
- check_emtpy_times = 0
48
-
49
- while not self._stop.is_set():
50
-
51
- queue_not_empty_count = 0
52
- pooling_wait_seconds = 30
53
-
54
- for q in self.__LAUNCHER_QUEUE__.values():
55
- if q.length != 0:
56
- queue_not_empty_count += 1
57
-
58
- if queue_not_empty_count == 0:
59
- pooling_wait_seconds = 3
60
- if self._pause.is_set():
61
- check_emtpy_times = 0
62
- if not self._task_model:
63
- logger.info("Done! Ready to close thread...")
64
- self._stop.set()
65
- elif check_emtpy_times > 2:
66
- self.__DOING__ = {}
67
- self._pause.set()
68
- else:
69
- logger.info(
70
- "check whether the task is complete, "
71
- f"reset times {3 - check_emtpy_times}"
72
- )
73
- check_emtpy_times += 1
74
- elif self._pause.is_set():
75
- self._pause.clear()
76
- self._execute()
77
- else:
78
- logger.info(LogTemplate.launcher_air_polling.format(
79
- task=self.task,
80
- doing_len=len(self.__DOING__.keys()),
81
- todo_len=self.__LAUNCHER_QUEUE__['todo'].length,
82
- done_len=self.__LAUNCHER_QUEUE__['done'].length,
83
- upload_len=self.__LAUNCHER_QUEUE__['upload'].length,
84
- ))
85
-
86
- time.sleep(pooling_wait_seconds)
87
-
88
-
1
+ # import time
2
+ #
3
+ # from cobweb.base import logger
4
+ # from cobweb.constant import LogTemplate
5
+ # from .launcher import Launcher, check_pause
6
+ #
7
+ #
8
+ # class LauncherAir(Launcher):
9
+ #
10
+ # # def _scheduler(self):
11
+ # # if self.start_seeds:
12
+ # # self.__LAUNCHER_QUEUE__['todo'].push(self.start_seeds)
13
+ #
14
+ # @check_pause
15
+ # def _insert(self):
16
+ # seeds = {}
17
+ # status = self.__LAUNCHER_QUEUE__['new'].length < self._new_queue_max_size
18
+ # for _ in range(self._new_queue_max_size):
19
+ # seed = self.__LAUNCHER_QUEUE__['new'].pop()
20
+ # if not seed:
21
+ # break
22
+ # seeds[seed.to_string] = seed.params.priority
23
+ # if seeds:
24
+ # self.__LAUNCHER_QUEUE__['todo'].push(seeds)
25
+ # if status:
26
+ # time.sleep(self._new_queue_wait_seconds)
27
+ #
28
+ # @check_pause
29
+ # def _delete(self):
30
+ # seeds = []
31
+ # status = self.__LAUNCHER_QUEUE__['done'].length < self._done_queue_max_size
32
+ #
33
+ # for _ in range(self._done_queue_max_size):
34
+ # seed = self.__LAUNCHER_QUEUE__['done'].pop()
35
+ # if not seed:
36
+ # break
37
+ # seeds.append(seed.to_string)
38
+ #
39
+ # if seeds:
40
+ # self._remove_doing_seeds(seeds)
41
+ #
42
+ # if status:
43
+ # time.sleep(self._done_queue_wait_seconds)
44
+ #
45
+ # def _polling(self):
46
+ #
47
+ # check_emtpy_times = 0
48
+ #
49
+ # while not self._stop.is_set():
50
+ #
51
+ # queue_not_empty_count = 0
52
+ # pooling_wait_seconds = 30
53
+ #
54
+ # for q in self.__LAUNCHER_QUEUE__.values():
55
+ # if q.length != 0:
56
+ # queue_not_empty_count += 1
57
+ #
58
+ # if queue_not_empty_count == 0:
59
+ # pooling_wait_seconds = 3
60
+ # if self._pause.is_set():
61
+ # check_emtpy_times = 0
62
+ # if not self._task_model:
63
+ # logger.info("Done! Ready to close thread...")
64
+ # self._stop.set()
65
+ # elif check_emtpy_times > 2:
66
+ # self.__DOING__ = {}
67
+ # self._pause.set()
68
+ # else:
69
+ # logger.info(
70
+ # "check whether the task is complete, "
71
+ # f"reset times {3 - check_emtpy_times}"
72
+ # )
73
+ # check_emtpy_times += 1
74
+ # elif self._pause.is_set():
75
+ # self._pause.clear()
76
+ # self._execute()
77
+ # else:
78
+ # logger.info(LogTemplate.launcher_air_polling.format(
79
+ # task=self.task,
80
+ # doing_len=len(self.__DOING__.keys()),
81
+ # todo_len=self.__LAUNCHER_QUEUE__['todo'].length,
82
+ # done_len=self.__LAUNCHER_QUEUE__['done'].length,
83
+ # upload_len=self.__LAUNCHER_QUEUE__['upload'].length,
84
+ # ))
85
+ #
86
+ # time.sleep(pooling_wait_seconds)
87
+ #
88
+ #
@@ -1,161 +1,88 @@
1
1
  import time
2
- import threading
3
2
 
4
- from cobweb.db import ApiDB
5
- from cobweb.base import Seed, TaskQueue,logger, stop, pause
6
- from cobweb.constant import DealModel
3
+ from cobweb.base import TaskQueue, Decorators
4
+ from cobweb.schedulers import ApiScheduler
7
5
  from .launcher import Launcher
8
6
 
9
7
 
10
- class LauncherApi(Launcher):
8
+ class LauncherPro(Launcher):
11
9
 
12
10
  def __init__(self, task, project, custom_setting=None, **kwargs):
13
11
  super().__init__(task, project, custom_setting, **kwargs)
14
- self._db = ApiDB()
15
-
16
- self._todo_key = "{%s:%s}:todo" % (project, task)
17
- self._done_key = "{%s:%s}:done" % (project, task)
18
- self._fail_key = "{%s:%s}:fail" % (project, task)
19
- self._heartbeat_key = "heartbeat:%s_%s" % (project, task)
20
-
21
- self._statistics_done_key = "statistics:%s:%s:done" % (project, task)
22
- self._statistics_fail_key = "statistics:%s:%s:fail" % (project, task)
23
- self._speed_control_key = "speed_control:%s_%s" % (project, task)
24
-
25
- self._reset_lock_key = "lock:reset:%s_%s" % (project, task)
26
-
27
- self._heartbeat_start_event = threading.Event()
28
-
29
- @property
30
- def heartbeat(self):
31
- return self._db.exists(self._heartbeat_key)
32
-
33
- def statistics(self, key, count):
34
- if not self.task_model and not self._db.exists(key):
35
- self._db.setex(key, 86400 * 30, int(count))
36
- else:
37
- self._db.incrby(key, count)
38
-
39
- def _get_seed(self) -> Seed:
40
- """
41
- 从队列中获取种子(频控)
42
- 设置时间窗口为self._time_window(秒),判断在该窗口内的采集量是否满足阈值(self._spider_max_speed)
43
- :return: True -> 种子, False -> None
44
- """
45
- if TaskQueue.TODO.length and not self._db.auto_incr(
46
- self._speed_control_key,
47
- t=self.time_window,
48
- limit=self.spider_max_count
49
- ):
50
- expire_time = self._db.ttl(self._speed_control_key)
51
- logger.info(f"Too fast! Please wait {expire_time} seconds...")
52
- time.sleep(expire_time / 2)
53
- return None
54
- return TaskQueue.TODO.pop()
12
+ self._redis_download = "{%s:%s}:download" % (project, task)
13
+ self._redis_todo = "{%s:%s}:todo" % (project, task)
14
+ self._scheduler = ApiScheduler(task, project)
15
+
16
+ @Decorators.stop
17
+ def _schedule(self):
18
+ thread_sleep = self.scheduling_wait_time
19
+ for q, key, size, item_info in [
20
+ (TaskQueue.TODO, self._redis_todo, self.todo_queue_size, self._task_info["todo"]),
21
+ (TaskQueue.DOWNLOAD, self._redis_download, self.download_queue_size, self._task_info["download"]),
22
+ ]:
23
+ if q.length < size:
24
+ for member, priority in self._scheduler.schedule(key, self.scheduling_size):
25
+ q.push((member, priority), direct_insertion=True)
26
+ self.add_working_item(key.split(":")[-1], member, priority)
27
+ thread_sleep = 0.1
28
+ time.sleep(thread_sleep)
29
+
30
+ @Decorators.stop
31
+ def _heartbeat(self):
32
+ if self._scheduler.working.is_set():
33
+ self._scheduler.set_heartbeat()
34
+ time.sleep(3)
55
35
 
56
- @stop
36
+ @Decorators.stop
57
37
  def _reset(self):
58
- """
59
- 检查过期种子,重新添加到redis缓存中
60
- """
61
- if self._db.lock(self._reset_lock_key, t=120):
62
-
63
- _min = -int(time.time()) + self.seed_reset_seconds \
64
- if self.heartbeat else "-inf"
65
-
66
- self._db.members(self._todo_key, 0, _min=_min, _max="(0")
67
-
68
- if not self.heartbeat:
69
- self._heartbeat_start_event.set()
70
-
71
- self._db.delete(self._reset_lock_key)
72
-
38
+ self._scheduler.reset(
39
+ keys=[self._redis_todo, self._redis_download],
40
+ reset_time=self.seed_reset_seconds
41
+ )
73
42
  time.sleep(30)
74
43
 
75
- @stop
76
- def _refresh(self):
77
- """
78
- 刷新doing种子过期时间,防止reset重新消费
79
- """
80
- if self.doing_seeds:
81
- refresh_time = int(time.time())
82
- seeds = {k: -refresh_time - v / 1e3 for k, v in self.doing_seeds.items()}
83
- self._db.zadd(self._todo_key, item=seeds, xx=True)
84
- time.sleep(3)
85
-
86
- @stop
87
- def _scheduler(self):
88
- """
89
- 调度任务,获取redis队列种子,同时添加到doing字典中
90
- """
91
- if not self._db.zcount(self._todo_key, 0, "(1000"):
92
- time.sleep(self.scheduler_wait_seconds)
93
- elif TaskQueue.TODO.length >= self.todo_queue_size:
94
- time.sleep(self.todo_queue_full_wait_seconds)
95
- else:
96
- members = self._db.members(
97
- self._todo_key, int(time.time()),
98
- count=self.todo_queue_size,
99
- _min=0, _max="(1000"
100
- )
101
- for member, priority in members:
102
- seed = Seed(member, priority=priority)
103
- TaskQueue.TODO.push(seed)
104
- self.doing_seeds[seed.to_string] = seed.params.priority
105
-
106
- @pause
107
- def _heartbeat(self):
108
- if self._heartbeat_start_event.is_set():
109
- self._db.setex(self._heartbeat_key, t=5)
110
- time.sleep(3)
111
-
112
- @pause
44
+ @Decorators.pause
113
45
  def _insert(self):
114
- """
115
- 添加新种子到redis队列中
116
- """
117
- seeds = {}
118
- for _ in range(self.new_queue_max_size):
119
- if seed := TaskQueue.SEED.pop():
120
- seeds[seed.to_string] = seed.params.priority
121
- if seeds:
122
- self._db.zadd(self._todo_key, seeds, nx=True)
123
- if TaskQueue.SEED.length < self.new_queue_max_size:
124
- time.sleep(self.new_queue_wait_seconds)
125
-
126
- @pause
127
- def _delete(self):
128
- """
129
- 删除队列种子,根据状态添加至成功或失败队列,移除doing字典种子索引
130
- """
131
- seed_info = {"count": 0, "failed": [], "succeed": [], "common": []}
132
- status = TaskQueue.DONE.length < self.done_queue_max_size
133
-
134
- for _ in range(self.done_queue_max_size):
135
- seed = TaskQueue.DONE.pop()
136
- if not seed:
137
- break
138
- if seed.params.seed_status == DealModel.fail:
139
- seed_info["failed"].append(seed.to_string)
140
- elif self.done_model == 1:
141
- seed_info["succeed"].append(seed.to_string)
142
- else:
143
- seed_info["common"].append(seed.to_string)
144
- seed_info['count'] += 1
145
-
146
- if seed_info["count"]:
147
-
148
- succeed_count = int(self._db.zrem(self._todo_key, *seed_info["common"]) or 0)
149
- succeed_count += int(self._db.done([self._todo_key, self._done_key], *seed_info["succeed"]) or 0)
150
- failed_count = int(self._db.done([self._todo_key, self._fail_key], *seed_info["failed"]) or 0)
151
-
152
- if failed_count:
153
- self.statistics(self._statistics_fail_key, failed_count)
154
- if succeed_count:
155
- self.statistics(self._statistics_done_key, succeed_count)
156
-
157
- self._remove_doing_seeds(seed_info["common"] + seed_info["succeed"] + seed_info["failed"])
158
-
159
- if status:
160
- time.sleep(self.done_queue_wait_seconds)
161
-
46
+ thread_sleep = 0.1
47
+ for q, key, size in [
48
+ (TaskQueue.SEED, self._redis_todo, self.seed_queue_size),
49
+ (TaskQueue.REQUEST, self._redis_download, self.request_queue_size),
50
+ ]:
51
+ item_info = {}
52
+ while (item := q.pop()) and len(item_info.keys()) < self.inserting_size:
53
+ item_info[item.seed] = item.params.priority
54
+ if q.length >= size:
55
+ thread_sleep = self.inserting_wait_time
56
+ self._scheduler.insert(key, item_info)
57
+ time.sleep(thread_sleep)
58
+
59
+ @Decorators.pause
60
+ def _refresh(self):
61
+ self._scheduler.refresh(self._redis_todo, self._task_info["todo"])
62
+ self._scheduler.refresh(self._redis_download, self._task_info["download"])
63
+ time.sleep(10)
64
+
65
+ @Decorators.pause
66
+ def _remove(self):
67
+ thread_sleep = self.removing_wait_time
68
+ for q, key, size in [
69
+ (TaskQueue.DELETE, self._redis_todo, self.delete_queue_size),
70
+ (TaskQueue.DONE, self._redis_download, self.done_queue_size),
71
+ ]:
72
+ items = []
73
+ while (item := q.pop()) and len(items) < self.removing_size:
74
+ items.append(item)
75
+ self._scheduler.delete(key, items)
76
+ self.remove_working_items(key.split(":")[-1], items)
77
+ if q.length >= size:
78
+ thread_sleep = 0.1
79
+ time.sleep(thread_sleep)
80
+
81
+ def _init_schedule_thread(self):
82
+ self._add_thread(func=self._heartbeat)
83
+ self._add_thread(func=self._reset)
84
+ self._add_thread(func=self._refresh)
85
+ self._add_thread(func=self._schedule)
86
+ self._add_thread(func=self._insert)
87
+ self._add_thread(func=self._remove)
88
+ # self._add_thread(func=self._polling)
@@ -1 +1,3 @@
1
1
  from .scheduler_redis import RedisScheduler
2
+ from .scheduler_api import ApiScheduler
3
+
@@ -0,0 +1,69 @@
1
+ import threading
2
+ import time
3
+
4
+ # from cobweb.base import Seed
5
+ from cobweb.db import ApiDB
6
+
7
+
8
+ class ApiScheduler:
9
+
10
+ def __init__(self, task, project, scheduler_wait_seconds=30):
11
+ self._todo_key = "{%s:%s}:todo" % (project, task)
12
+ self._download_key = "{%s:%s}:download" % (project, task)
13
+ self._heartbeat_key = "heartbeat:%s_%s" % (project, task)
14
+ self._speed_control_key = "speed_control:%s_%s" % (project, task)
15
+ self._reset_lock_key = "lock:reset:%s_%s" % (project, task)
16
+ self._db = ApiDB()
17
+
18
+ self.scheduler_wait_seconds = scheduler_wait_seconds
19
+ self.working = threading.Event()
20
+
21
+ @property
22
+ def heartbeat(self):
23
+ return self._db.exists(self._heartbeat_key)
24
+
25
+ def set_heartbeat(self):
26
+ return self._db.setex(self._heartbeat_key, 5)
27
+
28
+ def schedule(self, key, count):
29
+ if not self._db.zcount(key, 0, "(1000"):
30
+ time.sleep(self.scheduler_wait_seconds)
31
+ else:
32
+ source = int(time.time())
33
+ members = self._db.members(key, source, count=count, _min=0, _max="(1000")
34
+ for member, priority in members:
35
+ # seed = Seed(member, priority=priority)
36
+ yield member.decode(), priority
37
+
38
+ def insert(self, key, items):
39
+ if items:
40
+ self._db.zadd(key, items, nx=True)
41
+
42
+ def reset(self, keys, reset_time=30):
43
+ if self._db.lock(self._reset_lock_key, t=120):
44
+
45
+ if isinstance(keys, str):
46
+ keys = [keys]
47
+
48
+ _min = reset_time - int(time.time()) if self.heartbeat else "-inf"
49
+
50
+ for key in keys:
51
+ self._db.members(key, 0, _min=_min, _max="(0")
52
+
53
+ if not self.heartbeat:
54
+ self.working.set()
55
+ time.sleep(10)
56
+
57
+ self._db.delete(self._reset_lock_key)
58
+
59
+ def refresh(self, key, items: dict[str, int]):
60
+ refresh_time = int(time.time())
61
+ its = {k: -refresh_time - v / 1000 for k, v in items}
62
+ self._db.zadd(key, item=its, xx=True)
63
+
64
+ def delete(self, key, values):
65
+ self._db.zrem(key, *values)
66
+
67
+
68
+
69
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: cobweb-launcher
3
- Version: 1.3.3
3
+ Version: 1.3.4
4
4
  Summary: spider_hole
5
5
  Home-page: https://github.com/Juannie-PP/cobweb
6
6
  Author: Juannie-PP
@@ -1,4 +1,4 @@
1
- cobweb/__init__.py,sha256=CBd2oByCfc5EmH2dCZYVHkxXYZG-oWrLyTtZU5sEoP0,96
1
+ cobweb/__init__.py,sha256=oaEfsGUuGP0s39UbFRwrnsjMUeuB6QvQIAwStKFyUTk,83
2
2
  cobweb/constant.py,sha256=eofONAntk9O6S-cb4KbYGYHL_u7nBlOqqFOw_HzJHAU,3588
3
3
  cobweb/setting.py,sha256=pY6LKsgWI3164GiGA1z_y26LVf5-3mpiEgmm86mKRdY,3135
4
4
  cobweb/base/__init__.py,sha256=Na385Hhl9l2S8aPhcdJVPjmb02wkVM969bWQ84bCSQs,5095
@@ -22,8 +22,8 @@ cobweb/exceptions/__init__.py,sha256=E9SHnJBbhD7fOgPFMswqyOf8SKRDrI_i25L0bSpohvk
22
22
  cobweb/exceptions/oss_db_exception.py,sha256=iP_AImjNHT3-Iv49zCFQ3rdLnlvuHa3h2BXApgrOYpA,636
23
23
  cobweb/launchers/__init__.py,sha256=uzfPkLbY2m0wsIR_s93VFxmO0U49GgUG7hXPzMYdye0,118
24
24
  cobweb/launchers/launcher.py,sha256=NFwpc_0Um0hbDm1A8glWA4fcW6mNYL1eon4t3JAQUlw,7411
25
- cobweb/launchers/launcher_air.py,sha256=KAk_M8F3029cXYe7m4nn3Nzyi89lbxJ2cqZjqW8iZ0E,2832
26
- cobweb/launchers/launcher_api.py,sha256=qPazoC7U-UmgebbiTkhl6f4yQmN34XMl6HawekhAhEo,5789
25
+ cobweb/launchers/launcher_air.py,sha256=yPr395HVIIHAq6lqRcYJu7c0KkfO9V8O-2sn0hC96p0,2990
26
+ cobweb/launchers/launcher_api.py,sha256=c0bnnZQCqkk_cX-WyFsjc6jpliCwZCuAJeGAvUATODk,3370
27
27
  cobweb/launchers/launcher_pro.py,sha256=2H-TcvQx-ga78GLNTa-GXMLYAj9nEeCJSWf8xl-1ISQ,3374
28
28
  cobweb/pipelines/__init__.py,sha256=zSUsGtx6smbs2iXBXvYynReKSgky-3gjqaAtKVnA_OU,105
29
29
  cobweb/pipelines/base_pipeline.py,sha256=fYnWf79GmhufXpcnMa3te18SbmnVeYLwxfyo-zLd9CY,1577
@@ -31,7 +31,8 @@ cobweb/pipelines/loghub_pipeline.py,sha256=cjPO6w6UJ0jNw2fVvdX0BCdlm58T7dmYXlxzX
31
31
  cobweb/pipelines/pipeline.py,sha256=Pycm22bHId9a3gdP81D5y7SsuMndYooTb5n4zQxP7dM,1321
32
32
  cobweb/pipelines/pipeline_console.py,sha256=NEh-4zhuVAQOqwXLsqeb-rcNZ9_KXFUpL3otUTL5qBs,754
33
33
  cobweb/pipelines/pipeline_loghub.py,sha256=xZ6D55BGdiM71WUv83jyLGbEyUwhBHLJRZoXthBxxTs,1019
34
- cobweb/schedulers/__init__.py,sha256=WJgBXsPE8zpJQ9L_-bHIUpBcaB2G4HmutDdWF3ud1Bs,44
34
+ cobweb/schedulers/__init__.py,sha256=y7Lv_7b0zfTl0OhIONb_8u1K1C9gVlBA-xz_XG_kI9g,85
35
+ cobweb/schedulers/scheduler_api.py,sha256=mC54QOS0PEu4SFvxfD5Qr9239hAxwMrKTg-33rirANE,2112
35
36
  cobweb/schedulers/scheduler_redis.py,sha256=Aw7de0sXigRAxJgqUhHWu30hMBzgEWjkj-3OXXqmldg,2118
36
37
  cobweb/utils/__init__.py,sha256=Ev2LZZ1-S56iQYDqFZrqadizEv4Gk8Of-DraH-_WnKY,109
37
38
  cobweb/utils/bloom.py,sha256=vng-YbKgh9HbtpAWYf_nkUSbfVTOj40aqUUejRYlsCU,1752
@@ -103,8 +104,8 @@ cobweb_new/utils/__init__.py,sha256=c9macpjc15hrCUCdzO5RR_sgK_B9kvJKreSGprZ1ld4,
103
104
  cobweb_new/utils/bloom.py,sha256=vng-YbKgh9HbtpAWYf_nkUSbfVTOj40aqUUejRYlsCU,1752
104
105
  cobweb_new/utils/oss.py,sha256=gyt8-UB07tVphZLQXMOf-JTJwU-mWq8KZkOXKkAf3uk,3513
105
106
  cobweb_new/utils/tools.py,sha256=5JEaaAwYoV9Sdla2UBIJn6faUBuXmxUMagm9ck6FVqs,1253
106
- cobweb_launcher-1.3.3.dist-info/LICENSE,sha256=z1rxSIGOyzcSb3orZxFPxzx-0C1vTocmswqBNxpKfEk,1063
107
- cobweb_launcher-1.3.3.dist-info/METADATA,sha256=1varWBOrUSs3SurWiD7VJHrkeocOPxfprgFx214HuoI,6509
108
- cobweb_launcher-1.3.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
109
- cobweb_launcher-1.3.3.dist-info/top_level.txt,sha256=A0GPGeX6QtxXg7AJno3SVRTHtVCCqeRIOrpwDoXg9qs,15
110
- cobweb_launcher-1.3.3.dist-info/RECORD,,
107
+ cobweb_launcher-1.3.4.dist-info/LICENSE,sha256=z1rxSIGOyzcSb3orZxFPxzx-0C1vTocmswqBNxpKfEk,1063
108
+ cobweb_launcher-1.3.4.dist-info/METADATA,sha256=AMoOPhP8ILf0uUUBrQUpn3_-S3qF-_-ef-_DSPQgJZA,6509
109
+ cobweb_launcher-1.3.4.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
110
+ cobweb_launcher-1.3.4.dist-info/top_level.txt,sha256=A0GPGeX6QtxXg7AJno3SVRTHtVCCqeRIOrpwDoXg9qs,15
111
+ cobweb_launcher-1.3.4.dist-info/RECORD,,