python-pq 0.5.0__tar.gz → 0.5.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {python_pq-0.5.0 → python_pq-0.5.2}/PKG-INFO +29 -1
  2. {python_pq-0.5.0 → python_pq-0.5.2}/README.md +28 -0
  3. {python_pq-0.5.0 → python_pq-0.5.2}/pyproject.toml +1 -1
  4. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/__init__.py +1 -1
  5. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/client.py +13 -3
  6. python_pq-0.5.2/src/pq/migrations/versions/20260205T180000Z_b7c8d9e0f1a2_add_periodic_key.py +38 -0
  7. python_pq-0.5.2/src/pq/migrations/versions/20260217T120000Z_c3d4e5f6a7b8_add_periodic_active.py +32 -0
  8. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/models.py +6 -1
  9. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/worker.py +1 -0
  10. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/config.py +0 -0
  11. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/logging.py +0 -0
  12. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/README +0 -0
  13. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/__init__.py +0 -0
  14. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/env.py +0 -0
  15. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/script.py.mako +0 -0
  16. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/versions/20260109T055839Z_476683af098d_initial_schema.py +0 -0
  17. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/versions/20260109T063747Z_2483bec70083_add_client_id.py +0 -0
  18. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/versions/20260205T120000Z_a1b2c3d4e5f6_add_max_concurrent.py +0 -0
  19. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/migrations/versions/__init__.py +0 -0
  20. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/priority.py +0 -0
  21. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/registry.py +0 -0
  22. {python_pq-0.5.0 → python_pq-0.5.2}/src/pq/serialization.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: python-pq
3
- Version: 0.5.0
3
+ Version: 0.5.2
4
4
  Summary: Postgres-backed job queue for Python
5
5
  Author: ricwo
6
6
  Author-email: ricwo <r@cogram.com>
@@ -166,6 +166,34 @@ pq.schedule(fast_idempotent_task, run_every=timedelta(seconds=30), max_concurren
166
166
 
167
167
  The lock auto-expires after `max_runtime` seconds (or 1 hour by default) for crash safety.
168
168
 
169
+ ### Pausing & Resuming
170
+
171
+ Disable a periodic task without removing it:
172
+
173
+ ```python
174
+ # Pause - task stays in the database but won't run
175
+ pq.schedule(sync_inventory, run_every=timedelta(minutes=5), active=False)
176
+
177
+ # Resume
178
+ pq.schedule(sync_inventory, run_every=timedelta(minutes=5), active=True)
179
+ ```
180
+
181
+ New schedules are active by default.
182
+
183
+ ### Multiple Schedules (Key)
184
+
185
+ Use `key` to register the same function multiple times with different configurations:
186
+
187
+ ```python
188
+ pq.schedule(sync_data, run_every=timedelta(hours=1), key="us", region="us")
189
+ pq.schedule(sync_data, run_every=timedelta(hours=2), key="eu", region="eu")
190
+
191
+ # Unschedule only the US schedule
192
+ pq.unschedule(sync_data, key="us")
193
+ ```
194
+
195
+ Omitting `key` defaults to `""` — backward-compatible with single-schedule usage.
196
+
169
197
  ### Unscheduling
170
198
 
171
199
  ```python
@@ -145,6 +145,34 @@ pq.schedule(fast_idempotent_task, run_every=timedelta(seconds=30), max_concurren
145
145
 
146
146
  The lock auto-expires after `max_runtime` seconds (or 1 hour by default) for crash safety.
147
147
 
148
+ ### Pausing & Resuming
149
+
150
+ Disable a periodic task without removing it:
151
+
152
+ ```python
153
+ # Pause - task stays in the database but won't run
154
+ pq.schedule(sync_inventory, run_every=timedelta(minutes=5), active=False)
155
+
156
+ # Resume
157
+ pq.schedule(sync_inventory, run_every=timedelta(minutes=5), active=True)
158
+ ```
159
+
160
+ New schedules are active by default.
161
+
162
+ ### Multiple Schedules (Key)
163
+
164
+ Use `key` to register the same function multiple times with different configurations:
165
+
166
+ ```python
167
+ pq.schedule(sync_data, run_every=timedelta(hours=1), key="us", region="us")
168
+ pq.schedule(sync_data, run_every=timedelta(hours=2), key="eu", region="eu")
169
+
170
+ # Unschedule only the US schedule
171
+ pq.unschedule(sync_data, key="us")
172
+ ```
173
+
174
+ Omitting `key` defaults to `""` — backward-compatible with single-schedule usage.
175
+
148
176
  ### Unscheduling
149
177
 
150
178
  ```python
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "python-pq"
3
- version = "0.5.0"
3
+ version = "0.5.2"
4
4
  description = "Postgres-backed job queue for Python"
5
5
  readme = "README.md"
6
6
  authors = [
@@ -7,7 +7,7 @@ from pq.models import Periodic, Task, TaskStatus
7
7
  from pq.priority import Priority
8
8
  from pq.worker import PostExecuteHook, PreExecuteHook, TaskTimeoutError
9
9
 
10
- __version__ = "0.5.0"
10
+ __version__ = "0.5.2"
11
11
 
12
12
  __all__ = [
13
13
  "PQ",
@@ -236,6 +236,8 @@ class PQ:
236
236
  priority: Priority = Priority.NORMAL,
237
237
  client_id: str | None = None,
238
238
  max_concurrent: int | None = 1,
239
+ key: str = "",
240
+ active: bool = True,
239
241
  **kwargs: Any,
240
242
  ) -> int:
241
243
  """Schedule a periodic task.
@@ -253,6 +255,10 @@ class PQ:
253
255
  max_concurrent: Maximum concurrent executions. Default 1 (no overlap).
254
256
  Set to None for unlimited concurrency. Values > 1 are reserved
255
257
  for future use and raise ValueError.
258
+ key: Discriminator for multiple schedules of the same function.
259
+ Defaults to "" (empty string).
260
+ active: Whether the task is active. Inactive tasks are not executed.
261
+ Defaults to True.
256
262
  **kwargs: Keyword arguments to pass to the handler.
257
263
 
258
264
  Returns:
@@ -305,6 +311,7 @@ class PQ:
305
311
  insert(Periodic)
306
312
  .values(
307
313
  name=name,
314
+ key=key,
308
315
  payload=payload,
309
316
  priority=priority,
310
317
  run_every=run_every,
@@ -312,9 +319,10 @@ class PQ:
312
319
  next_run=next_run,
313
320
  client_id=client_id,
314
321
  max_concurrent=max_concurrent,
322
+ active=active,
315
323
  )
316
324
  .on_conflict_do_update(
317
- index_elements=["name"],
325
+ index_elements=["name", "key"],
318
326
  set_={
319
327
  "payload": payload,
320
328
  "priority": priority,
@@ -322,6 +330,7 @@ class PQ:
322
330
  "cron": cron_expr,
323
331
  "next_run": next_run,
324
332
  "max_concurrent": max_concurrent,
333
+ "active": active,
325
334
  },
326
335
  )
327
336
  .returning(Periodic.id)
@@ -343,18 +352,19 @@ class PQ:
343
352
  result = session.execute(stmt)
344
353
  return result.rowcount > 0
345
354
 
346
- def unschedule(self, task: Callable[..., Any]) -> bool:
355
+ def unschedule(self, task: Callable[..., Any], *, key: str = "") -> bool:
347
356
  """Remove a periodic task.
348
357
 
349
358
  Args:
350
359
  task: The scheduled function to remove.
360
+ key: Discriminator key. Defaults to "" (the default schedule).
351
361
 
352
362
  Returns:
353
363
  True if task was found and deleted, False otherwise.
354
364
  """
355
365
  name = get_function_path(task)
356
366
  with self.session() as session:
357
- stmt = delete(Periodic).where(Periodic.name == name)
367
+ stmt = delete(Periodic).where(Periodic.name == name, Periodic.key == key)
358
368
  result = session.execute(stmt)
359
369
  return result.rowcount > 0
360
370
 
@@ -0,0 +1,38 @@
1
+ """add periodic key
2
+
3
+ Revision ID: b7c8d9e0f1a2
4
+ Revises: a1b2c3d4e5f6
5
+ Create Date: 2026-02-05 18:00:00 Z
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ from alembic import op
12
+ import sqlalchemy as sa
13
+
14
+
15
+ # revision identifiers, used by Alembic.
16
+ revision: str = "b7c8d9e0f1a2"
17
+ down_revision: Union[str, Sequence[str], None] = "a1b2c3d4e5f6"
18
+ branch_labels: Union[str, Sequence[str], None] = None
19
+ depends_on: Union[str, Sequence[str], None] = None
20
+
21
+
22
+ def upgrade() -> None:
23
+ """Add key column to pq_periodic and update unique constraint."""
24
+ op.add_column(
25
+ "pq_periodic",
26
+ sa.Column("key", sa.String(255), nullable=False, server_default=""),
27
+ )
28
+ op.drop_constraint("pq_periodic_name_key", "pq_periodic", type_="unique")
29
+ op.create_unique_constraint(
30
+ "uq_pq_periodic_name_key", "pq_periodic", ["name", "key"]
31
+ )
32
+
33
+
34
+ def downgrade() -> None:
35
+ """Remove key column and restore original unique constraint."""
36
+ op.drop_constraint("uq_pq_periodic_name_key", "pq_periodic", type_="unique")
37
+ op.drop_column("pq_periodic", "key")
38
+ op.create_unique_constraint("pq_periodic_name_key", "pq_periodic", ["name"])
@@ -0,0 +1,32 @@
1
+ """add periodic active
2
+
3
+ Revision ID: c3d4e5f6a7b8
4
+ Revises: b7c8d9e0f1a2
5
+ Create Date: 2026-02-17 12:00:00 Z
6
+
7
+ """
8
+
9
+ from typing import Sequence, Union
10
+
11
+ from alembic import op
12
+ import sqlalchemy as sa
13
+
14
+
15
+ # revision identifiers, used by Alembic.
16
+ revision: str = "c3d4e5f6a7b8"
17
+ down_revision: Union[str, Sequence[str], None] = "b7c8d9e0f1a2"
18
+ branch_labels: Union[str, Sequence[str], None] = None
19
+ depends_on: Union[str, Sequence[str], None] = None
20
+
21
+
22
+ def upgrade() -> None:
23
+ """Add active column to pq_periodic."""
24
+ op.add_column(
25
+ "pq_periodic",
26
+ sa.Column("active", sa.Boolean(), nullable=False, server_default="true"),
27
+ )
28
+
29
+
30
+ def downgrade() -> None:
31
+ """Remove active column from pq_periodic."""
32
+ op.drop_column("pq_periodic", "active")
@@ -6,6 +6,7 @@ from typing import Any
6
6
 
7
7
  from sqlalchemy import (
8
8
  BigInteger,
9
+ Boolean,
9
10
  DateTime,
10
11
  Enum,
11
12
  Identity,
@@ -15,6 +16,7 @@ from sqlalchemy import (
15
16
  SmallInteger,
16
17
  String,
17
18
  Text,
19
+ UniqueConstraint,
18
20
  func,
19
21
  )
20
22
  from sqlalchemy.dialects.postgresql import JSONB
@@ -76,19 +78,22 @@ class Periodic(Base):
76
78
  __tablename__ = "pq_periodic"
77
79
  __table_args__ = (
78
80
  Index("ix_pq_periodic_priority_next_run", "priority", "next_run"),
81
+ UniqueConstraint("name", "key"),
79
82
  )
80
83
 
81
84
  id: Mapped[int] = mapped_column(BigInteger, Identity(), primary_key=True)
82
85
  client_id: Mapped[str | None] = mapped_column(
83
86
  String(255), nullable=True, unique=True, index=True
84
87
  )
85
- name: Mapped[str] = mapped_column(String(255), nullable=False, unique=True)
88
+ name: Mapped[str] = mapped_column(String(255), nullable=False)
89
+ key: Mapped[str] = mapped_column(String(255), nullable=False, server_default="")
86
90
  payload: Mapped[dict[str, Any]] = mapped_column(JSONB, nullable=False, default=dict)
87
91
  priority: Mapped[int] = mapped_column(SmallInteger, nullable=False, default=0)
88
92
  run_every: Mapped[timedelta | None] = mapped_column(Interval, nullable=True)
89
93
  cron: Mapped[str | None] = mapped_column(String(100), nullable=True)
90
94
  next_run: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False)
91
95
  max_concurrent: Mapped[int | None] = mapped_column(SmallInteger, nullable=True)
96
+ active: Mapped[bool] = mapped_column(Boolean, nullable=False, server_default="true")
92
97
  last_run: Mapped[datetime | None] = mapped_column(
93
98
  DateTime(timezone=True), nullable=True
94
99
  )
@@ -560,6 +560,7 @@ def _process_periodic_task(
560
560
  # Claim highest priority due periodic task with FOR UPDATE SKIP LOCKED
561
561
  # Filter out tasks that are locked (max_concurrent=1 and locked_until in future)
562
562
  stmt = select(Periodic).where(
563
+ Periodic.active.is_(True),
563
564
  Periodic.next_run <= func.now(),
564
565
  or_(
565
566
  Periodic.max_concurrent.is_(None),
File without changes
File without changes
File without changes
File without changes