saq 0.23.0__py3-none-any.whl → 0.24.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
saq/__init__.py CHANGED
@@ -14,4 +14,4 @@ __all__ = [
14
14
  "Worker",
15
15
  ]
16
16
 
17
- __version__ = "0.23.0"
17
+ __version__ = "0.24.0"
saq/job.py CHANGED
@@ -139,12 +139,19 @@ class Job:
139
139
 
140
140
  _EXCLUDE_NON_FULL = {
141
141
  "kwargs",
142
+ "timeout",
143
+ "heartbeat",
144
+ "retries",
145
+ "ttl",
146
+ "retry_delay",
147
+ "retry_backoff",
142
148
  "scheduled",
143
149
  "progress",
144
- "total_ms",
145
150
  "result",
146
151
  "error",
147
152
  "status",
153
+ "priority",
154
+ "group_key",
148
155
  "meta",
149
156
  }
150
157
 
@@ -156,28 +163,23 @@ class Job:
156
163
  full: If true, will list the full kwargs for the Job, else an abridged version.
157
164
  """
158
165
  # Using an exclusion list preserves order for kwargs below
159
- excluded = set() if full else self._EXCLUDE_NON_FULL
160
- kwargs = ", ".join(
161
- f"{k}={v}"
162
- for k, v in {
163
- "function": self.function,
164
- "kwargs": self.kwargs,
165
- "queue": self.get_queue().name,
166
- "id": self.id,
167
- "scheduled": self.scheduled,
168
- "progress": self.progress,
169
- "process_ms": self.duration("process"),
170
- "start_ms": self.duration("start"),
171
- "total_ms": self.duration("total"),
172
- "attempts": self.attempts,
173
- "result": self.result,
174
- "error": self.error,
175
- "status": self.status,
176
- "meta": self.meta,
177
- }.items()
178
- if v is not None and k not in excluded
179
- )
180
- return f"Job<{kwargs}>"
166
+
167
+ kwargs = {}
168
+
169
+ for field in dataclasses.fields(self):
170
+ key = field.name
171
+ value = getattr(self, key)
172
+ if (full or key not in self._EXCLUDE_NON_FULL) and value != field.default:
173
+ kwargs[key] = value
174
+
175
+ if "queue" in kwargs:
176
+ kwargs["queue"] = kwargs["queue"].name
177
+
178
+ if not kwargs.get("meta"):
179
+ kwargs.pop("meta", None)
180
+
181
+ info = ", ".join(f"{k}: {v}" for k, v in kwargs.items())
182
+ return f"Job<{info}>"
181
183
 
182
184
  def __repr__(self) -> str:
183
185
  return self.info(True)
saq/queue/base.py CHANGED
@@ -110,7 +110,6 @@ class Queue(ABC):
110
110
  async def sweep(self, lock: int = 60, abort: float = 5.0) -> list[str]:
111
111
  pass
112
112
 
113
- @abstractmethod
114
113
  async def notify(self, job: Job) -> None:
115
114
  pass
116
115
 
@@ -354,7 +353,18 @@ class Queue(ABC):
354
353
  job_keys: Iterable[str],
355
354
  callback: ListenCallback,
356
355
  timeout: float | None = 10,
356
+ poll_interval: float = 0.5,
357
357
  ) -> None:
358
+ """
359
+ Listen to updates on jobs.
360
+
361
+ Args:
362
+ job_keys: sequence of job keys
363
+ callback: callback function, if it returns truthy, break
364
+ timeout: if timeout is truthy, wait for timeout seconds
365
+ poll_interval: number of seconds in between poll attempts if needed
366
+ """
367
+
358
368
  async def listen() -> None:
359
369
  while True:
360
370
  for job in await self.jobs(job_keys):
@@ -366,7 +376,7 @@ class Queue(ABC):
366
376
  stop = callback(job.id, job.status)
367
377
  if stop:
368
378
  return
369
- await asyncio.sleep(1)
379
+ await asyncio.sleep(poll_interval)
370
380
 
371
381
  if timeout:
372
382
  await asyncio.wait_for(listen(), timeout)
@@ -404,6 +414,7 @@ class Queue(ABC):
404
414
  iter_kwargs: Sequence[dict[str, t.Any]],
405
415
  timeout: float | None = None,
406
416
  return_exceptions: bool = False,
417
+ poll_interval: float = 0.5,
407
418
  **kwargs: t.Any,
408
419
  ) -> list[t.Any]:
409
420
  """
@@ -431,6 +442,7 @@ class Queue(ABC):
431
442
  return_exceptions: If False (default), an exception is immediately raised as soon as any jobs
432
443
  fail. Other jobs won't be cancelled and will continue to run.
433
444
  If True, exceptions are treated the same as successful results and aggregated in the result list.
445
+ poll_interval: number of seconds in between poll attempts
434
446
  kwargs: Default kwargs for all jobs. These will be overridden by those in iter_kwargs.
435
447
  """
436
448
  iter_kwargs = [
@@ -442,46 +454,24 @@ class Queue(ABC):
442
454
  }
443
455
  for kw in iter_kwargs
444
456
  ]
445
- job_keys = [key["key"] for key in iter_kwargs]
446
- pending_job_keys = set(job_keys)
447
-
448
- def callback(job_key: str, status: Status) -> bool:
449
- if status in TERMINAL_STATUSES:
450
- pending_job_keys.discard(job_key)
451
-
452
- if status in UNSUCCESSFUL_TERMINAL_STATUSES and not return_exceptions:
453
- return True
454
-
455
- if not pending_job_keys:
456
- return True
457
457
 
458
- return False
459
-
460
- # Start listening before we enqueue the jobs.
461
- # This ensures we don't miss any updates.
462
- task = asyncio.create_task(self.listen(pending_job_keys, callback, timeout=None))
463
-
464
- try:
465
- await asyncio.gather(*(self.enqueue(job_or_func, **kw) for kw in iter_kwargs))
466
- except Exception:
467
- task.cancel()
468
- raise
469
-
470
- await asyncio.wait_for(task, timeout=timeout)
471
-
472
- results = []
473
-
474
- for job in await self.jobs(job_keys):
475
- if job is None:
476
- continue
477
- if job.status in UNSUCCESSFUL_TERMINAL_STATUSES:
478
- exc = JobError(job)
479
- if not return_exceptions:
480
- raise exc
481
- results.append(exc)
482
- else:
483
- results.append(job.result)
484
- return results
458
+ await asyncio.gather(*(self.enqueue(job_or_func, **kw) for kw in iter_kwargs))
459
+ incomplete = object()
460
+ results = {key["key"]: incomplete for key in iter_kwargs}
461
+
462
+ while remaining := [k for k, v in results.items() if v is incomplete]:
463
+ for key, job in zip(remaining, await self.jobs(remaining)):
464
+ if not job:
465
+ results[key] = None
466
+ elif job.status in UNSUCCESSFUL_TERMINAL_STATUSES:
467
+ exc = JobError(job)
468
+ if not return_exceptions:
469
+ raise exc
470
+ results[key] = exc
471
+ elif job.status in TERMINAL_STATUSES:
472
+ results[key] = job.result
473
+ await asyncio.sleep(poll_interval)
474
+ return list(results.values())
485
475
 
486
476
  @asynccontextmanager
487
477
  async def batch(self) -> AsyncIterator[None]:
saq/queue/postgres.py CHANGED
@@ -28,7 +28,6 @@ if t.TYPE_CHECKING:
28
28
 
29
29
  from saq.types import (
30
30
  CountKind,
31
- ListenCallback,
32
31
  DumpType,
33
32
  LoadType,
34
33
  QueueInfo,
@@ -70,9 +69,6 @@ class PostgresQueue(Queue):
70
69
  max_size: maximum pool size. (default 20)
71
70
  If greater than 0, this limits the maximum number of connections to Postgres.
72
71
  Otherwise, maintain `min_size` number of connections.
73
- poll_interval: how often to poll for jobs. (default 1)
74
- If 0, the queue will not poll for jobs and will only rely on notifications from the server.
75
- This mean cron jobs will not be picked up in a timely fashion.
76
72
  saq_lock_keyspace: The first of two advisory lock keys used by SAQ. (default 0)
77
73
  SAQ uses advisory locks for coordinating tasks between its workers, e.g. sweeping.
78
74
  job_lock_keyspace: The first of two advisory lock keys used for jobs. (default 1)
@@ -103,7 +99,6 @@ class PostgresQueue(Queue):
103
99
  load: LoadType | None = None,
104
100
  min_size: int = 4,
105
101
  max_size: int = 20,
106
- poll_interval: int = 1,
107
102
  saq_lock_keyspace: int = 0,
108
103
  job_lock_keyspace: int = 1,
109
104
  job_lock_sweep: bool = True,
@@ -125,10 +120,12 @@ class PostgresQueue(Queue):
125
120
  check=AsyncConnectionPool.check_connection,
126
121
  open=False,
127
122
  )
123
+ if self.pool.kwargs.get("autocommit") is False:
124
+ raise ValueError("SAQ Connection pool must have autocommit enabled.")
125
+ self.pool.kwargs["autocommit"] = True
128
126
  self._is_pool_provided = pool is not None
129
127
  self.min_size = min_size
130
128
  self.max_size = max_size
131
- self.poll_interval = poll_interval
132
129
  self.saq_lock_keyspace = saq_lock_keyspace
133
130
  self.job_lock_keyspace = job_lock_keyspace
134
131
  self.job_lock_sweep = job_lock_sweep
@@ -145,7 +142,7 @@ class PostgresQueue(Queue):
145
142
  self._connected = False
146
143
 
147
144
  async def init_db(self) -> None:
148
- async with self.pool.connection() as conn, conn.cursor() as cursor, conn.transaction():
145
+ async with self._get_dequeue_conn() as conn, conn.cursor() as cursor, conn.transaction():
149
146
  await cursor.execute(
150
147
  SQL("SELECT pg_try_advisory_lock(%(key1)s, 0)"),
151
148
  {"key1": self.saq_lock_keyspace},
@@ -231,9 +228,12 @@ class PostgresQueue(Queue):
231
228
  async def disconnect(self) -> None:
232
229
  if not self._connected:
233
230
  return
231
+
234
232
  async with self._connection_lock:
235
233
  if self._dequeue_conn:
236
234
  await self._dequeue_conn.cancel_safe()
235
+ async with self._dequeue_conn as conn:
236
+ await conn.execute("SELECT pg_advisory_unlock_all()")
237
237
  await self.pool.putconn(self._dequeue_conn)
238
238
  self._dequeue_conn = None
239
239
  if not self._is_pool_provided:
@@ -359,7 +359,7 @@ class PostgresQueue(Queue):
359
359
 
360
360
  if not self._has_sweep_lock:
361
361
  # Attempt to get the sweep lock and hold on to it
362
- async with self._get_dequeue_conn() as conn, conn.cursor() as cursor, conn.transaction():
362
+ async with self._get_dequeue_conn() as conn, conn.cursor() as cursor:
363
363
  await cursor.execute(
364
364
  SQL("SELECT pg_try_advisory_lock(%(key1)s, hashtext(%(queue)s))"),
365
365
  {
@@ -446,6 +446,10 @@ class PostgresQueue(Queue):
446
446
  continue
447
447
 
448
448
  swept.append(key)
449
+ logger.info(
450
+ "Sweeping %s, objid %s", job.info(logger.isEnabledFor(logging.DEBUG)), objid
451
+ )
452
+
449
453
  await self.abort(job, error=self.swept_error_message)
450
454
 
451
455
  try:
@@ -453,35 +457,12 @@ class PostgresQueue(Queue):
453
457
  except asyncio.TimeoutError:
454
458
  logger.info("Could not abort job %s", key)
455
459
 
456
- logger.info("Sweeping job %s", job.info(logger.isEnabledFor(logging.DEBUG)))
457
460
  if job.retryable:
458
461
  await self.retry(job, error=self.swept_error_message)
459
462
  else:
460
463
  await self.finish(job, Status.ABORTED, error=self.swept_error_message)
461
464
  return swept
462
465
 
463
- async def listen(
464
- self,
465
- job_keys: Iterable[str],
466
- callback: ListenCallback,
467
- timeout: float | None = 10,
468
- ) -> None:
469
- if not job_keys:
470
- return
471
-
472
- async for message in self._listener.listen(*job_keys, timeout=timeout):
473
- job_key = message["key"]
474
- status = Status[message["data"].upper()]
475
- if asyncio.iscoroutinefunction(callback):
476
- stop = await callback(job_key, status)
477
- else:
478
- stop = callback(job_key, status)
479
- if stop:
480
- break
481
-
482
- async def notify(self, job: Job, connection: AsyncConnection | None = None) -> None:
483
- await self._notify(job.key, job.status, connection)
484
-
485
466
  async def _update(self, job: Job, status: Status | None = None, **kwargs: t.Any) -> None:
486
467
  expire_at = kwargs.pop("expire_at", -1)
487
468
  connection = kwargs.pop("connection", None)
@@ -518,7 +499,6 @@ class PostgresQueue(Queue):
518
499
  "expire_at": expire_at,
519
500
  },
520
501
  )
521
- await self.notify(job, conn)
522
502
 
523
503
  async def job(self, job_key: str) -> Job | None:
524
504
  async with self.pool.connection() as conn, conn.cursor() as cursor:
@@ -629,7 +609,7 @@ class PostgresQueue(Queue):
629
609
  else:
630
610
  async with self._listen_lock:
631
611
  async for payload in self._listener.listen(ENQUEUE, DEQUEUE, timeout=timeout):
632
- if payload["key"] == ENQUEUE:
612
+ if payload == ENQUEUE:
633
613
  await self._dequeue()
634
614
 
635
615
  if not self._job_queue.empty():
@@ -650,7 +630,7 @@ class PostgresQueue(Queue):
650
630
  return
651
631
 
652
632
  async with self._dequeue_lock:
653
- async with self._get_dequeue_conn() as conn, conn.cursor() as cursor, conn.transaction():
633
+ async with self._get_dequeue_conn() as conn, conn.cursor() as cursor:
654
634
  if not self._waiting:
655
635
  return
656
636
  await cursor.execute(
@@ -838,14 +818,12 @@ class PostgresQueue(Queue):
838
818
  ) -> None:
839
819
  key = job.key
840
820
 
841
- async with self.nullcontext(
842
- connection
843
- ) if connection else self.pool.connection() as conn, conn.cursor() as cursor:
821
+ async with self.nullcontext(connection) if connection else self.pool.connection() as conn:
844
822
  if job.ttl >= 0:
845
823
  expire_at = now_seconds() + job.ttl if job.ttl > 0 else None
846
824
  await self.update(job, status=status, expire_at=expire_at, connection=conn)
847
825
  else:
848
- await cursor.execute(
826
+ await conn.execute(
849
827
  SQL(
850
828
  dedent(
851
829
  """
@@ -856,22 +834,12 @@ class PostgresQueue(Queue):
856
834
  ).format(jobs_table=self.jobs_table),
857
835
  {"key": key},
858
836
  )
859
- await self.notify(job, conn)
860
837
  await self._release_job(key)
861
838
 
862
- async def _notify(
863
- self, key: str, data: t.Any | None = None, connection: AsyncConnection | None = None
864
- ) -> None:
865
- payload = {"key": key}
866
-
867
- if data is not None:
868
- payload["data"] = data
869
-
839
+ async def _notify(self, key: str, connection: AsyncConnection | None = None) -> None:
870
840
  async with self.nullcontext(connection) if connection else self.pool.connection() as conn:
871
841
  await conn.execute(
872
- SQL("NOTIFY {channel}, {payload}").format(
873
- channel=Identifier(self._channel), payload=json.dumps(payload)
874
- )
842
+ SQL("NOTIFY {channel}, {key}").format(channel=Identifier(self._channel), key=key)
875
843
  )
876
844
 
877
845
  @asynccontextmanager
@@ -917,7 +885,6 @@ class PostgresQueue(Queue):
917
885
  ),
918
886
  {"keys": self._releasing},
919
887
  )
920
- await conn.commit()
921
888
  self._releasing.clear()
922
889
 
923
890
  @cached_property
@@ -934,8 +901,6 @@ class ListenMultiplexer(Multiplexer):
934
901
  async def _start(self) -> None:
935
902
  async with self.pool.connection() as conn:
936
903
  await conn.execute(SQL("LISTEN {}").format(Identifier(self.key)))
937
- await conn.commit()
938
904
 
939
905
  async for notify in conn.notifies():
940
- payload = json.loads(notify.payload)
941
- self.publish(payload["key"], payload)
906
+ self.publish(notify.payload, notify.payload)
saq/queue/redis.py CHANGED
@@ -365,15 +365,8 @@ class RedisQueue(Queue):
365
365
  job_keys: Iterable[str],
366
366
  callback: ListenCallback,
367
367
  timeout: float | None = 10,
368
+ poll_interval: float = 0.5,
368
369
  ) -> None:
369
- """
370
- Listen to updates on jobs.
371
-
372
- Args:
373
- job_keys: sequence of job keys
374
- callback: callback function, if it returns truthy, break
375
- timeout: if timeout is truthy, wait for timeout seconds
376
- """
377
370
  job_ids = [self.job_id(job_key) for job_key in job_keys]
378
371
 
379
372
  if not job_ids:
saq/utils.py CHANGED
@@ -64,8 +64,12 @@ def exponential_backoff(
64
64
  return backoff
65
65
 
66
66
 
67
- async def cancel_tasks(tasks: Iterable[asyncio.Task]) -> None:
67
+ async def cancel_tasks(tasks: Iterable[asyncio.Task], timeout: float = 1.0) -> None:
68
68
  """Cancel tasks and wait for all of them to finish"""
69
69
  for task in tasks:
70
70
  task.cancel()
71
- await asyncio.gather(*tasks, return_exceptions=True)
71
+
72
+ try:
73
+ await asyncio.wait_for(asyncio.gather(*tasks, return_exceptions=True), timeout)
74
+ except (asyncio.TimeoutError, asyncio.CancelledError):
75
+ pass
saq/worker.py CHANGED
@@ -317,12 +317,12 @@ class Worker:
317
317
  else:
318
318
  await job.retry("cancelled")
319
319
  except Exception as ex:
320
- logger.exception("Error processing job %s", job)
321
-
322
320
  if context is not None:
323
321
  context["exception"] = ex
324
322
 
325
323
  if job:
324
+ logger.exception("Error processing job %s", job)
325
+
326
326
  error = traceback.format_exc()
327
327
 
328
328
  if job.retryable:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: saq
3
- Version: 0.23.0
3
+ Version: 0.24.0
4
4
  Summary: Distributed Python job queue with asyncio and redis
5
5
  Home-page: https://github.com/tobymao/saq
6
6
  Author: Toby Mao
@@ -1,18 +1,18 @@
1
- saq/__init__.py,sha256=P423_WovoYFfOmEd7hg_jjEZDpnvlNEYdbL7hS1uqQw,218
1
+ saq/__init__.py,sha256=_czOeMHM4kMpuyk4QKHSBz-o8le6NYGS_hMj_VRL-L4,218
2
2
  saq/__main__.py,sha256=N4RNqnCcj7eZbM3OyYaC03_6Cot-y-SxW5Hwx6fuzKU,2440
3
3
  saq/errors.py,sha256=XPJw6J3caSAho4ZybuodIbeuGjboVabLuf3NFOEE-4Q,112
4
- saq/job.py,sha256=Hmr4g_RfMBSOtw9E4lOyEE6wVqaXvDxXf3j1Mipq-xo,11528
4
+ saq/job.py,sha256=Pion_buhc4N-5mqnqfwfpzVjv-paP3HHqtMAKB6XIcE,11327
5
5
  saq/multiplexer.py,sha256=S_mjo7kObSBQ_f8epf0pT5Tjxg-LABW3fSH4dPfZxsE,2332
6
6
  saq/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
7
  saq/types.py,sha256=GhIq2BIE_Z9hA-qS-NQXh_iPICNI0NZxOzjW0vcMgFU,3196
8
- saq/utils.py,sha256=S5QPutQibm435vsR8TTO9ehTsRvKsnsWxUg6kVkc4oE,1586
9
- saq/worker.py,sha256=mQofGvJ7Rig29MEtI2OVyT0rUHdteQZonVKQq8pnwV8,16668
8
+ saq/utils.py,sha256=NdOycT-03zxjhKM8A1i0vzKnkv1UQxvy_Zt4GnO0Zd8,1721
9
+ saq/worker.py,sha256=w0PARNGFChpVs1BwKzut6sxshU2XSZ2tG9cKkUa-ybA,16672
10
10
  saq/queue/__init__.py,sha256=5LgBHGylCVvrLDcjMCcI2dRRgh0BPdz2TKOdc8NMs2E,87
11
- saq/queue/base.py,sha256=eKpCme_PwTTuhSOHE9YmlVbNwkBwOVRGpD4VyD61KQc,15467
11
+ saq/queue/base.py,sha256=KaRv9r0fmlRf2CH6Q72MGHXIk9e3vpPEWo4vt5DL1RA,15460
12
12
  saq/queue/http.py,sha256=V9S26gJbUt5AUIR2ETasSQy4Q_K30eGtguBYHpfcLGU,7739
13
- saq/queue/postgres.py,sha256=8stUQefR29l8zJ1rEKv9-TwOGdy7XVzj9GufeA0B5gY,35519
13
+ saq/queue/postgres.py,sha256=UKnARw9at2b6gV3cynZ0jR46Hiir50o40dEd7gEClE4,34307
14
14
  saq/queue/postgres_migrations.py,sha256=gI6j-0TzlFFSWxji3Dy9aJ-llboJBm92J4tB_YZ7qI8,2080
15
- saq/queue/redis.py,sha256=ODtvqny5LmUWh-HqccV4GiR6BFUmWPD2PM7CzUv1dAw,17972
15
+ saq/queue/redis.py,sha256=sa_wzUUlfPw-RZ-v_cnDEJWEFyUi3sy_3YTqG4UklOA,17754
16
16
  saq/web/__init__.py,sha256=NG9LfjgJQxNft0_iZuZ3LnX1I58SfxRwKpycjazBoGE,23
17
17
  saq/web/aiohttp.py,sha256=JklrRWt0aPSVkxRXbWC9l278lfsOSNo1TFjBOjsl03w,3925
18
18
  saq/web/common.py,sha256=U-TALY06werM4gIeGqW-V3HAu8Tko1EP0uA_4wUFmHY,997
@@ -20,9 +20,9 @@ saq/web/starlette.py,sha256=i38xuNcnQvWBY3jyHHu9Uo9ILSBzOwmk5Bq06c3CQzM,4432
20
20
  saq/web/static/app.js,sha256=i6PaRvBvt96LOINBdEuKkDvVeM-GA8lJiFg4jtQ3viY,7094
21
21
  saq/web/static/pico.min.css.gz,sha256=qCxIv3wWFMQ7MkvGSHQLwxio3121VvvieOkSjw6fv6o,9263
22
22
  saq/web/static/snabbdom.js.gz,sha256=zSO3Z761TB7bYNQFFEtypD0vCuqWesqPJeE5CuV4xRg,7603
23
- saq-0.23.0.dist-info/LICENSE,sha256=p208OXrLf_dMcvuRHpcinfsJdihCqKWbqtFXpw4kyW0,1065
24
- saq-0.23.0.dist-info/METADATA,sha256=mpfZ6BqjIgO-4zQR8I0WQTrlXoPg1ISCTNOcqPjZJzA,7472
25
- saq-0.23.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
26
- saq-0.23.0.dist-info/entry_points.txt,sha256=HkKOud1K15_DV7AEltn8G5Ua10VqIgHaZ4BQit4fdOk,42
27
- saq-0.23.0.dist-info/top_level.txt,sha256=FMrrc5EiGr4sQkEDtUMHIpomnWHL9i6xT7B6lvEh8xM,4
28
- saq-0.23.0.dist-info/RECORD,,
23
+ saq-0.24.0.dist-info/LICENSE,sha256=p208OXrLf_dMcvuRHpcinfsJdihCqKWbqtFXpw4kyW0,1065
24
+ saq-0.24.0.dist-info/METADATA,sha256=qBZMrHLODmbu30oyEVgdlT7rJkcs5S8MS0ObqnbyI34,7472
25
+ saq-0.24.0.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
26
+ saq-0.24.0.dist-info/entry_points.txt,sha256=HkKOud1K15_DV7AEltn8G5Ua10VqIgHaZ4BQit4fdOk,42
27
+ saq-0.24.0.dist-info/top_level.txt,sha256=FMrrc5EiGr4sQkEDtUMHIpomnWHL9i6xT7B6lvEh8xM,4
28
+ saq-0.24.0.dist-info/RECORD,,
File without changes
File without changes