saq 0.23.0__py3-none-any.whl → 0.24.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- saq/__init__.py +1 -1
- saq/job.py +25 -23
- saq/queue/base.py +31 -41
- saq/queue/postgres.py +19 -54
- saq/queue/redis.py +1 -8
- saq/utils.py +6 -2
- saq/worker.py +4 -3
- {saq-0.23.0.dist-info → saq-0.24.3.dist-info}/METADATA +1 -1
- {saq-0.23.0.dist-info → saq-0.24.3.dist-info}/RECORD +13 -13
- {saq-0.23.0.dist-info → saq-0.24.3.dist-info}/LICENSE +0 -0
- {saq-0.23.0.dist-info → saq-0.24.3.dist-info}/WHEEL +0 -0
- {saq-0.23.0.dist-info → saq-0.24.3.dist-info}/entry_points.txt +0 -0
- {saq-0.23.0.dist-info → saq-0.24.3.dist-info}/top_level.txt +0 -0
saq/__init__.py
CHANGED
saq/job.py
CHANGED
@@ -139,12 +139,19 @@ class Job:
|
|
139
139
|
|
140
140
|
_EXCLUDE_NON_FULL = {
|
141
141
|
"kwargs",
|
142
|
+
"timeout",
|
143
|
+
"heartbeat",
|
144
|
+
"retries",
|
145
|
+
"ttl",
|
146
|
+
"retry_delay",
|
147
|
+
"retry_backoff",
|
142
148
|
"scheduled",
|
143
149
|
"progress",
|
144
|
-
"total_ms",
|
145
150
|
"result",
|
146
151
|
"error",
|
147
152
|
"status",
|
153
|
+
"priority",
|
154
|
+
"group_key",
|
148
155
|
"meta",
|
149
156
|
}
|
150
157
|
|
@@ -156,28 +163,23 @@ class Job:
|
|
156
163
|
full: If true, will list the full kwargs for the Job, else an abridged version.
|
157
164
|
"""
|
158
165
|
# Using an exclusion list preserves order for kwargs below
|
159
|
-
|
160
|
-
kwargs =
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
"meta": self.meta,
|
177
|
-
}.items()
|
178
|
-
if v is not None and k not in excluded
|
179
|
-
)
|
180
|
-
return f"Job<{kwargs}>"
|
166
|
+
|
167
|
+
kwargs = {}
|
168
|
+
|
169
|
+
for field in dataclasses.fields(self):
|
170
|
+
key = field.name
|
171
|
+
value = getattr(self, key)
|
172
|
+
if (full or key not in self._EXCLUDE_NON_FULL) and value != field.default:
|
173
|
+
kwargs[key] = value
|
174
|
+
|
175
|
+
if "queue" in kwargs:
|
176
|
+
kwargs["queue"] = kwargs["queue"].name
|
177
|
+
|
178
|
+
if not kwargs.get("meta"):
|
179
|
+
kwargs.pop("meta", None)
|
180
|
+
|
181
|
+
info = ", ".join(f"{k}: {v}" for k, v in kwargs.items())
|
182
|
+
return f"Job<{info}>"
|
181
183
|
|
182
184
|
def __repr__(self) -> str:
|
183
185
|
return self.info(True)
|
saq/queue/base.py
CHANGED
@@ -110,7 +110,6 @@ class Queue(ABC):
|
|
110
110
|
async def sweep(self, lock: int = 60, abort: float = 5.0) -> list[str]:
|
111
111
|
pass
|
112
112
|
|
113
|
-
@abstractmethod
|
114
113
|
async def notify(self, job: Job) -> None:
|
115
114
|
pass
|
116
115
|
|
@@ -354,7 +353,18 @@ class Queue(ABC):
|
|
354
353
|
job_keys: Iterable[str],
|
355
354
|
callback: ListenCallback,
|
356
355
|
timeout: float | None = 10,
|
356
|
+
poll_interval: float = 0.5,
|
357
357
|
) -> None:
|
358
|
+
"""
|
359
|
+
Listen to updates on jobs.
|
360
|
+
|
361
|
+
Args:
|
362
|
+
job_keys: sequence of job keys
|
363
|
+
callback: callback function, if it returns truthy, break
|
364
|
+
timeout: if timeout is truthy, wait for timeout seconds
|
365
|
+
poll_interval: number of seconds in between poll attempts if needed
|
366
|
+
"""
|
367
|
+
|
358
368
|
async def listen() -> None:
|
359
369
|
while True:
|
360
370
|
for job in await self.jobs(job_keys):
|
@@ -366,7 +376,7 @@ class Queue(ABC):
|
|
366
376
|
stop = callback(job.id, job.status)
|
367
377
|
if stop:
|
368
378
|
return
|
369
|
-
await asyncio.sleep(
|
379
|
+
await asyncio.sleep(poll_interval)
|
370
380
|
|
371
381
|
if timeout:
|
372
382
|
await asyncio.wait_for(listen(), timeout)
|
@@ -404,6 +414,7 @@ class Queue(ABC):
|
|
404
414
|
iter_kwargs: Sequence[dict[str, t.Any]],
|
405
415
|
timeout: float | None = None,
|
406
416
|
return_exceptions: bool = False,
|
417
|
+
poll_interval: float = 0.5,
|
407
418
|
**kwargs: t.Any,
|
408
419
|
) -> list[t.Any]:
|
409
420
|
"""
|
@@ -431,6 +442,7 @@ class Queue(ABC):
|
|
431
442
|
return_exceptions: If False (default), an exception is immediately raised as soon as any jobs
|
432
443
|
fail. Other jobs won't be cancelled and will continue to run.
|
433
444
|
If True, exceptions are treated the same as successful results and aggregated in the result list.
|
445
|
+
poll_interval: number of seconds in between poll attempts
|
434
446
|
kwargs: Default kwargs for all jobs. These will be overridden by those in iter_kwargs.
|
435
447
|
"""
|
436
448
|
iter_kwargs = [
|
@@ -442,46 +454,24 @@ class Queue(ABC):
|
|
442
454
|
}
|
443
455
|
for kw in iter_kwargs
|
444
456
|
]
|
445
|
-
job_keys = [key["key"] for key in iter_kwargs]
|
446
|
-
pending_job_keys = set(job_keys)
|
447
|
-
|
448
|
-
def callback(job_key: str, status: Status) -> bool:
|
449
|
-
if status in TERMINAL_STATUSES:
|
450
|
-
pending_job_keys.discard(job_key)
|
451
|
-
|
452
|
-
if status in UNSUCCESSFUL_TERMINAL_STATUSES and not return_exceptions:
|
453
|
-
return True
|
454
|
-
|
455
|
-
if not pending_job_keys:
|
456
|
-
return True
|
457
457
|
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
if job is None:
|
476
|
-
continue
|
477
|
-
if job.status in UNSUCCESSFUL_TERMINAL_STATUSES:
|
478
|
-
exc = JobError(job)
|
479
|
-
if not return_exceptions:
|
480
|
-
raise exc
|
481
|
-
results.append(exc)
|
482
|
-
else:
|
483
|
-
results.append(job.result)
|
484
|
-
return results
|
458
|
+
await asyncio.gather(*(self.enqueue(job_or_func, **kw) for kw in iter_kwargs))
|
459
|
+
incomplete = object()
|
460
|
+
results = {key["key"]: incomplete for key in iter_kwargs}
|
461
|
+
|
462
|
+
while remaining := [k for k, v in results.items() if v is incomplete]:
|
463
|
+
for key, job in zip(remaining, await self.jobs(remaining)):
|
464
|
+
if not job:
|
465
|
+
results[key] = None
|
466
|
+
elif job.status in UNSUCCESSFUL_TERMINAL_STATUSES:
|
467
|
+
exc = JobError(job)
|
468
|
+
if not return_exceptions:
|
469
|
+
raise exc
|
470
|
+
results[key] = exc
|
471
|
+
elif job.status in TERMINAL_STATUSES:
|
472
|
+
results[key] = job.result
|
473
|
+
await asyncio.sleep(poll_interval)
|
474
|
+
return list(results.values())
|
485
475
|
|
486
476
|
@asynccontextmanager
|
487
477
|
async def batch(self) -> AsyncIterator[None]:
|
saq/queue/postgres.py
CHANGED
@@ -28,7 +28,6 @@ if t.TYPE_CHECKING:
|
|
28
28
|
|
29
29
|
from saq.types import (
|
30
30
|
CountKind,
|
31
|
-
ListenCallback,
|
32
31
|
DumpType,
|
33
32
|
LoadType,
|
34
33
|
QueueInfo,
|
@@ -70,9 +69,6 @@ class PostgresQueue(Queue):
|
|
70
69
|
max_size: maximum pool size. (default 20)
|
71
70
|
If greater than 0, this limits the maximum number of connections to Postgres.
|
72
71
|
Otherwise, maintain `min_size` number of connections.
|
73
|
-
poll_interval: how often to poll for jobs. (default 1)
|
74
|
-
If 0, the queue will not poll for jobs and will only rely on notifications from the server.
|
75
|
-
This mean cron jobs will not be picked up in a timely fashion.
|
76
72
|
saq_lock_keyspace: The first of two advisory lock keys used by SAQ. (default 0)
|
77
73
|
SAQ uses advisory locks for coordinating tasks between its workers, e.g. sweeping.
|
78
74
|
job_lock_keyspace: The first of two advisory lock keys used for jobs. (default 1)
|
@@ -103,7 +99,6 @@ class PostgresQueue(Queue):
|
|
103
99
|
load: LoadType | None = None,
|
104
100
|
min_size: int = 4,
|
105
101
|
max_size: int = 20,
|
106
|
-
poll_interval: int = 1,
|
107
102
|
saq_lock_keyspace: int = 0,
|
108
103
|
job_lock_keyspace: int = 1,
|
109
104
|
job_lock_sweep: bool = True,
|
@@ -125,10 +120,12 @@ class PostgresQueue(Queue):
|
|
125
120
|
check=AsyncConnectionPool.check_connection,
|
126
121
|
open=False,
|
127
122
|
)
|
123
|
+
if self.pool.kwargs.get("autocommit") is False:
|
124
|
+
raise ValueError("SAQ Connection pool must have autocommit enabled.")
|
125
|
+
self.pool.kwargs["autocommit"] = True
|
128
126
|
self._is_pool_provided = pool is not None
|
129
127
|
self.min_size = min_size
|
130
128
|
self.max_size = max_size
|
131
|
-
self.poll_interval = poll_interval
|
132
129
|
self.saq_lock_keyspace = saq_lock_keyspace
|
133
130
|
self.job_lock_keyspace = job_lock_keyspace
|
134
131
|
self.job_lock_sweep = job_lock_sweep
|
@@ -145,7 +142,7 @@ class PostgresQueue(Queue):
|
|
145
142
|
self._connected = False
|
146
143
|
|
147
144
|
async def init_db(self) -> None:
|
148
|
-
async with self.
|
145
|
+
async with self._get_dequeue_conn() as conn, conn.cursor() as cursor, conn.transaction():
|
149
146
|
await cursor.execute(
|
150
147
|
SQL("SELECT pg_try_advisory_lock(%(key1)s, 0)"),
|
151
148
|
{"key1": self.saq_lock_keyspace},
|
@@ -231,9 +228,12 @@ class PostgresQueue(Queue):
|
|
231
228
|
async def disconnect(self) -> None:
|
232
229
|
if not self._connected:
|
233
230
|
return
|
231
|
+
|
234
232
|
async with self._connection_lock:
|
235
233
|
if self._dequeue_conn:
|
236
234
|
await self._dequeue_conn.cancel_safe()
|
235
|
+
async with self._dequeue_conn as conn:
|
236
|
+
await conn.execute("SELECT pg_advisory_unlock_all()")
|
237
237
|
await self.pool.putconn(self._dequeue_conn)
|
238
238
|
self._dequeue_conn = None
|
239
239
|
if not self._is_pool_provided:
|
@@ -359,7 +359,7 @@ class PostgresQueue(Queue):
|
|
359
359
|
|
360
360
|
if not self._has_sweep_lock:
|
361
361
|
# Attempt to get the sweep lock and hold on to it
|
362
|
-
async with self._get_dequeue_conn() as conn, conn.cursor() as cursor
|
362
|
+
async with self._get_dequeue_conn() as conn, conn.cursor() as cursor:
|
363
363
|
await cursor.execute(
|
364
364
|
SQL("SELECT pg_try_advisory_lock(%(key1)s, hashtext(%(queue)s))"),
|
365
365
|
{
|
@@ -446,6 +446,10 @@ class PostgresQueue(Queue):
|
|
446
446
|
continue
|
447
447
|
|
448
448
|
swept.append(key)
|
449
|
+
logger.info(
|
450
|
+
"Sweeping %s, objid %s", job.info(logger.isEnabledFor(logging.DEBUG)), objid
|
451
|
+
)
|
452
|
+
|
449
453
|
await self.abort(job, error=self.swept_error_message)
|
450
454
|
|
451
455
|
try:
|
@@ -453,35 +457,12 @@ class PostgresQueue(Queue):
|
|
453
457
|
except asyncio.TimeoutError:
|
454
458
|
logger.info("Could not abort job %s", key)
|
455
459
|
|
456
|
-
logger.info("Sweeping job %s", job.info(logger.isEnabledFor(logging.DEBUG)))
|
457
460
|
if job.retryable:
|
458
461
|
await self.retry(job, error=self.swept_error_message)
|
459
462
|
else:
|
460
463
|
await self.finish(job, Status.ABORTED, error=self.swept_error_message)
|
461
464
|
return swept
|
462
465
|
|
463
|
-
async def listen(
|
464
|
-
self,
|
465
|
-
job_keys: Iterable[str],
|
466
|
-
callback: ListenCallback,
|
467
|
-
timeout: float | None = 10,
|
468
|
-
) -> None:
|
469
|
-
if not job_keys:
|
470
|
-
return
|
471
|
-
|
472
|
-
async for message in self._listener.listen(*job_keys, timeout=timeout):
|
473
|
-
job_key = message["key"]
|
474
|
-
status = Status[message["data"].upper()]
|
475
|
-
if asyncio.iscoroutinefunction(callback):
|
476
|
-
stop = await callback(job_key, status)
|
477
|
-
else:
|
478
|
-
stop = callback(job_key, status)
|
479
|
-
if stop:
|
480
|
-
break
|
481
|
-
|
482
|
-
async def notify(self, job: Job, connection: AsyncConnection | None = None) -> None:
|
483
|
-
await self._notify(job.key, job.status, connection)
|
484
|
-
|
485
466
|
async def _update(self, job: Job, status: Status | None = None, **kwargs: t.Any) -> None:
|
486
467
|
expire_at = kwargs.pop("expire_at", -1)
|
487
468
|
connection = kwargs.pop("connection", None)
|
@@ -518,7 +499,6 @@ class PostgresQueue(Queue):
|
|
518
499
|
"expire_at": expire_at,
|
519
500
|
},
|
520
501
|
)
|
521
|
-
await self.notify(job, conn)
|
522
502
|
|
523
503
|
async def job(self, job_key: str) -> Job | None:
|
524
504
|
async with self.pool.connection() as conn, conn.cursor() as cursor:
|
@@ -629,7 +609,7 @@ class PostgresQueue(Queue):
|
|
629
609
|
else:
|
630
610
|
async with self._listen_lock:
|
631
611
|
async for payload in self._listener.listen(ENQUEUE, DEQUEUE, timeout=timeout):
|
632
|
-
if payload
|
612
|
+
if payload == ENQUEUE:
|
633
613
|
await self._dequeue()
|
634
614
|
|
635
615
|
if not self._job_queue.empty():
|
@@ -650,7 +630,7 @@ class PostgresQueue(Queue):
|
|
650
630
|
return
|
651
631
|
|
652
632
|
async with self._dequeue_lock:
|
653
|
-
async with self._get_dequeue_conn() as conn, conn.cursor() as cursor
|
633
|
+
async with self._get_dequeue_conn() as conn, conn.cursor() as cursor:
|
654
634
|
if not self._waiting:
|
655
635
|
return
|
656
636
|
await cursor.execute(
|
@@ -838,14 +818,12 @@ class PostgresQueue(Queue):
|
|
838
818
|
) -> None:
|
839
819
|
key = job.key
|
840
820
|
|
841
|
-
async with self.nullcontext(
|
842
|
-
connection
|
843
|
-
) if connection else self.pool.connection() as conn, conn.cursor() as cursor:
|
821
|
+
async with self.nullcontext(connection) if connection else self.pool.connection() as conn:
|
844
822
|
if job.ttl >= 0:
|
845
823
|
expire_at = now_seconds() + job.ttl if job.ttl > 0 else None
|
846
824
|
await self.update(job, status=status, expire_at=expire_at, connection=conn)
|
847
825
|
else:
|
848
|
-
await
|
826
|
+
await conn.execute(
|
849
827
|
SQL(
|
850
828
|
dedent(
|
851
829
|
"""
|
@@ -856,22 +834,12 @@ class PostgresQueue(Queue):
|
|
856
834
|
).format(jobs_table=self.jobs_table),
|
857
835
|
{"key": key},
|
858
836
|
)
|
859
|
-
await self.notify(job, conn)
|
860
837
|
await self._release_job(key)
|
861
838
|
|
862
|
-
async def _notify(
|
863
|
-
self, key: str, data: t.Any | None = None, connection: AsyncConnection | None = None
|
864
|
-
) -> None:
|
865
|
-
payload = {"key": key}
|
866
|
-
|
867
|
-
if data is not None:
|
868
|
-
payload["data"] = data
|
869
|
-
|
839
|
+
async def _notify(self, key: str, connection: AsyncConnection | None = None) -> None:
|
870
840
|
async with self.nullcontext(connection) if connection else self.pool.connection() as conn:
|
871
841
|
await conn.execute(
|
872
|
-
SQL("NOTIFY {channel}, {
|
873
|
-
channel=Identifier(self._channel), payload=json.dumps(payload)
|
874
|
-
)
|
842
|
+
SQL("NOTIFY {channel}, {key}").format(channel=Identifier(self._channel), key=key)
|
875
843
|
)
|
876
844
|
|
877
845
|
@asynccontextmanager
|
@@ -917,7 +885,6 @@ class PostgresQueue(Queue):
|
|
917
885
|
),
|
918
886
|
{"keys": self._releasing},
|
919
887
|
)
|
920
|
-
await conn.commit()
|
921
888
|
self._releasing.clear()
|
922
889
|
|
923
890
|
@cached_property
|
@@ -934,8 +901,6 @@ class ListenMultiplexer(Multiplexer):
|
|
934
901
|
async def _start(self) -> None:
|
935
902
|
async with self.pool.connection() as conn:
|
936
903
|
await conn.execute(SQL("LISTEN {}").format(Identifier(self.key)))
|
937
|
-
await conn.commit()
|
938
904
|
|
939
905
|
async for notify in conn.notifies():
|
940
|
-
|
941
|
-
self.publish(payload["key"], payload)
|
906
|
+
self.publish(notify.payload, notify.payload)
|
saq/queue/redis.py
CHANGED
@@ -365,15 +365,8 @@ class RedisQueue(Queue):
|
|
365
365
|
job_keys: Iterable[str],
|
366
366
|
callback: ListenCallback,
|
367
367
|
timeout: float | None = 10,
|
368
|
+
poll_interval: float = 0.5,
|
368
369
|
) -> None:
|
369
|
-
"""
|
370
|
-
Listen to updates on jobs.
|
371
|
-
|
372
|
-
Args:
|
373
|
-
job_keys: sequence of job keys
|
374
|
-
callback: callback function, if it returns truthy, break
|
375
|
-
timeout: if timeout is truthy, wait for timeout seconds
|
376
|
-
"""
|
377
370
|
job_ids = [self.job_id(job_key) for job_key in job_keys]
|
378
371
|
|
379
372
|
if not job_ids:
|
saq/utils.py
CHANGED
@@ -64,8 +64,12 @@ def exponential_backoff(
|
|
64
64
|
return backoff
|
65
65
|
|
66
66
|
|
67
|
-
async def cancel_tasks(tasks: Iterable[asyncio.Task]) -> None:
|
67
|
+
async def cancel_tasks(tasks: Iterable[asyncio.Task], timeout: float = 1.0) -> None:
|
68
68
|
"""Cancel tasks and wait for all of them to finish"""
|
69
69
|
for task in tasks:
|
70
70
|
task.cancel()
|
71
|
-
|
71
|
+
|
72
|
+
try:
|
73
|
+
await asyncio.wait_for(asyncio.gather(*tasks, return_exceptions=True), timeout)
|
74
|
+
except (asyncio.TimeoutError, asyncio.CancelledError):
|
75
|
+
pass
|
saq/worker.py
CHANGED
@@ -283,7 +283,8 @@ class Worker:
|
|
283
283
|
|
284
284
|
if task and not task.done():
|
285
285
|
task_data["aborted"] = "abort" if job.error is None else job.error
|
286
|
-
|
286
|
+
# abort should be a blocking operation
|
287
|
+
await cancel_tasks([task], 0)
|
287
288
|
|
288
289
|
await self.queue.finish_abort(job)
|
289
290
|
|
@@ -317,12 +318,12 @@ class Worker:
|
|
317
318
|
else:
|
318
319
|
await job.retry("cancelled")
|
319
320
|
except Exception as ex:
|
320
|
-
logger.exception("Error processing job %s", job)
|
321
|
-
|
322
321
|
if context is not None:
|
323
322
|
context["exception"] = ex
|
324
323
|
|
325
324
|
if job:
|
325
|
+
logger.exception("Error processing job %s", job)
|
326
|
+
|
326
327
|
error = traceback.format_exc()
|
327
328
|
|
328
329
|
if job.retryable:
|
@@ -1,18 +1,18 @@
|
|
1
|
-
saq/__init__.py,sha256=
|
1
|
+
saq/__init__.py,sha256=Qn50Cd572uzUjWhCPkgGI9F7rrYXBOlP6LOZuSCJk3Y,218
|
2
2
|
saq/__main__.py,sha256=N4RNqnCcj7eZbM3OyYaC03_6Cot-y-SxW5Hwx6fuzKU,2440
|
3
3
|
saq/errors.py,sha256=XPJw6J3caSAho4ZybuodIbeuGjboVabLuf3NFOEE-4Q,112
|
4
|
-
saq/job.py,sha256=
|
4
|
+
saq/job.py,sha256=Pion_buhc4N-5mqnqfwfpzVjv-paP3HHqtMAKB6XIcE,11327
|
5
5
|
saq/multiplexer.py,sha256=S_mjo7kObSBQ_f8epf0pT5Tjxg-LABW3fSH4dPfZxsE,2332
|
6
6
|
saq/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
saq/types.py,sha256=GhIq2BIE_Z9hA-qS-NQXh_iPICNI0NZxOzjW0vcMgFU,3196
|
8
|
-
saq/utils.py,sha256=
|
9
|
-
saq/worker.py,sha256=
|
8
|
+
saq/utils.py,sha256=NdOycT-03zxjhKM8A1i0vzKnkv1UQxvy_Zt4GnO0Zd8,1721
|
9
|
+
saq/worker.py,sha256=wd8LTfF7YQRRrOrxqvZcOpEF4KISD9Ws5WDXm3BFfnY,16730
|
10
10
|
saq/queue/__init__.py,sha256=5LgBHGylCVvrLDcjMCcI2dRRgh0BPdz2TKOdc8NMs2E,87
|
11
|
-
saq/queue/base.py,sha256=
|
11
|
+
saq/queue/base.py,sha256=KaRv9r0fmlRf2CH6Q72MGHXIk9e3vpPEWo4vt5DL1RA,15460
|
12
12
|
saq/queue/http.py,sha256=V9S26gJbUt5AUIR2ETasSQy4Q_K30eGtguBYHpfcLGU,7739
|
13
|
-
saq/queue/postgres.py,sha256=
|
13
|
+
saq/queue/postgres.py,sha256=UKnARw9at2b6gV3cynZ0jR46Hiir50o40dEd7gEClE4,34307
|
14
14
|
saq/queue/postgres_migrations.py,sha256=gI6j-0TzlFFSWxji3Dy9aJ-llboJBm92J4tB_YZ7qI8,2080
|
15
|
-
saq/queue/redis.py,sha256=
|
15
|
+
saq/queue/redis.py,sha256=sa_wzUUlfPw-RZ-v_cnDEJWEFyUi3sy_3YTqG4UklOA,17754
|
16
16
|
saq/web/__init__.py,sha256=NG9LfjgJQxNft0_iZuZ3LnX1I58SfxRwKpycjazBoGE,23
|
17
17
|
saq/web/aiohttp.py,sha256=JklrRWt0aPSVkxRXbWC9l278lfsOSNo1TFjBOjsl03w,3925
|
18
18
|
saq/web/common.py,sha256=U-TALY06werM4gIeGqW-V3HAu8Tko1EP0uA_4wUFmHY,997
|
@@ -20,9 +20,9 @@ saq/web/starlette.py,sha256=i38xuNcnQvWBY3jyHHu9Uo9ILSBzOwmk5Bq06c3CQzM,4432
|
|
20
20
|
saq/web/static/app.js,sha256=i6PaRvBvt96LOINBdEuKkDvVeM-GA8lJiFg4jtQ3viY,7094
|
21
21
|
saq/web/static/pico.min.css.gz,sha256=qCxIv3wWFMQ7MkvGSHQLwxio3121VvvieOkSjw6fv6o,9263
|
22
22
|
saq/web/static/snabbdom.js.gz,sha256=zSO3Z761TB7bYNQFFEtypD0vCuqWesqPJeE5CuV4xRg,7603
|
23
|
-
saq-0.
|
24
|
-
saq-0.
|
25
|
-
saq-0.
|
26
|
-
saq-0.
|
27
|
-
saq-0.
|
28
|
-
saq-0.
|
23
|
+
saq-0.24.3.dist-info/LICENSE,sha256=p208OXrLf_dMcvuRHpcinfsJdihCqKWbqtFXpw4kyW0,1065
|
24
|
+
saq-0.24.3.dist-info/METADATA,sha256=gZztgr-2voE3tpOPdufquUPrSV-Y2RFmbXynMcpFHWQ,7472
|
25
|
+
saq-0.24.3.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
26
|
+
saq-0.24.3.dist-info/entry_points.txt,sha256=HkKOud1K15_DV7AEltn8G5Ua10VqIgHaZ4BQit4fdOk,42
|
27
|
+
saq-0.24.3.dist-info/top_level.txt,sha256=FMrrc5EiGr4sQkEDtUMHIpomnWHL9i6xT7B6lvEh8xM,4
|
28
|
+
saq-0.24.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|