plain.jobs 0.34.0__py3-none-any.whl → 0.35.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of plain.jobs might be problematic. Click here for more details.

plain/jobs/CHANGELOG.md CHANGED
@@ -1,5 +1,32 @@
1
1
  # plain-jobs changelog
2
2
 
3
+ ## [0.35.1](https://github.com/dropseed/plain/releases/plain-jobs@0.35.1) (2025-10-17)
4
+
5
+ ### What's changed
6
+
7
+ - The `run_in_worker()` method now returns `None` when a duplicate job is detected instead of attempting to return the list of in-progress jobs ([72f48d21bc](https://github.com/dropseed/plain/commit/72f48d21bc))
8
+ - Fixed type annotations for `run_in_worker()` to properly indicate it can return `JobRequest | None` ([72f48d21bc](https://github.com/dropseed/plain/commit/72f48d21bc))
9
+ - The `retry_job()` method now properly handles explicit `delay=0` parameter to intentionally retry immediately ([72f48d21bc](https://github.com/dropseed/plain/commit/72f48d21bc))
10
+ - Fixed type annotations for `retry_job()` to properly indicate it can return `JobRequest | None` ([72f48d21bc](https://github.com/dropseed/plain/commit/72f48d21bc))
11
+
12
+ ### Upgrade instructions
13
+
14
+ - No changes required
15
+
16
+ ## [0.35.0](https://github.com/dropseed/plain/releases/plain-jobs@0.35.0) (2025-10-17)
17
+
18
+ ### What's changed
19
+
20
+ - The `Job` base class is now an abstract base class requiring implementation of the `run()` method ([e34282bba8](https://github.com/dropseed/plain/commit/e34282bba8))
21
+ - Job worker processes now properly initialize the Plain framework before processing jobs, fixing potential startup issues ([c4551d1b84](https://github.com/dropseed/plain/commit/c4551d1b84))
22
+ - The `plain jobs list` command now displays job descriptions from docstrings in a cleaner format ([4b6881a49e](https://github.com/dropseed/plain/commit/4b6881a49e))
23
+ - Job requests in the admin interface are now ordered by priority, start time, and created time to match worker processing order ([c18f0e3fb6](https://github.com/dropseed/plain/commit/c18f0e3fb6))
24
+ - The `ClearCompleted` chore has been refactored to use the new abstract base class pattern ([c4466d3c60](https://github.com/dropseed/plain/commit/c4466d3c60))
25
+
26
+ ### Upgrade instructions
27
+
28
+ - No changes required
29
+
3
30
  ## [0.34.0](https://github.com/dropseed/plain/releases/plain-jobs@0.34.0) (2025-10-13)
4
31
 
5
32
  ### What's changed
plain/jobs/admin.py CHANGED
@@ -109,6 +109,7 @@ class JobRequestViewset(AdminViewset):
109
109
  title = "Requests"
110
110
  fields = ["id", "job_class", "priority", "created_at", "start_at", "unique_key"]
111
111
  actions = ["Delete"]
112
+ queryset_order = ["priority", "-start_at", "-created_at"]
112
113
 
113
114
  def perform_action(self, action: str, target_ids: list[int]) -> None:
114
115
  if action == "Delete":
plain/jobs/chores.py CHANGED
@@ -1,17 +1,19 @@
1
1
  import datetime
2
2
 
3
- from plain.chores import register_chore
3
+ from plain.chores import Chore, register_chore
4
4
  from plain.runtime import settings
5
5
  from plain.utils import timezone
6
6
 
7
7
  from .models import JobResult
8
8
 
9
9
 
10
- @register_chore("jobs")
11
- def clear_completed() -> str:
10
+ @register_chore
11
+ class ClearCompleted(Chore):
12
12
  """Delete all completed job results in all queues."""
13
- cutoff = timezone.now() - datetime.timedelta(
14
- seconds=settings.JOBS_RESULTS_RETENTION
15
- )
16
- results = JobResult.query.filter(created_at__lt=cutoff).delete()
17
- return f"{results[0]} jobs deleted"
13
+
14
+ def run(self) -> str:
15
+ cutoff = timezone.now() - datetime.timedelta(
16
+ seconds=settings.JOBS_RESULTS_RETENTION
17
+ )
18
+ results = JobResult.query.filter(created_at__lt=cutoff).delete()
19
+ return f"{results[0]} jobs deleted"
plain/jobs/cli.py CHANGED
@@ -195,4 +195,10 @@ def run(job_class_name: str) -> None:
195
195
  def list_jobs() -> None:
196
196
  """List all registered jobs."""
197
197
  for name, job_class in jobs_registry.jobs.items():
198
- click.echo(f"{click.style(name, fg='blue')}: {job_class}")
198
+ click.secho(f"{name}", bold=True, nl=False)
199
+ # Get description from class docstring
200
+ description = job_class.__doc__.strip() if job_class.__doc__ else ""
201
+ if description:
202
+ click.secho(f": {description}", dim=True)
203
+ else:
204
+ click.echo("")
plain/jobs/jobs.py CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import datetime
4
4
  import inspect
5
5
  import logging
6
+ from abc import ABCMeta, abstractmethod
6
7
  from typing import TYPE_CHECKING, Any
7
8
 
8
9
  from opentelemetry import trace
@@ -33,7 +34,7 @@ logger = logging.getLogger(__name__)
33
34
  tracer = trace.get_tracer("plain.jobs")
34
35
 
35
36
 
36
- class JobType(type):
37
+ class JobType(ABCMeta):
37
38
  """
38
39
  Metaclass allows us to capture the original args/kwargs
39
40
  used to instantiate the job, so we can store them in the database
@@ -48,8 +49,9 @@ class JobType(type):
48
49
 
49
50
 
50
51
  class Job(metaclass=JobType):
52
+ @abstractmethod
51
53
  def run(self) -> None:
52
- raise NotImplementedError
54
+ pass
53
55
 
54
56
  def run_in_worker(
55
57
  self,
@@ -60,7 +62,7 @@ class Job(metaclass=JobType):
60
62
  retries: int | None = None,
61
63
  retry_attempt: int = 0,
62
64
  unique_key: str | None = None,
63
- ) -> JobRequest | list[JobRequest | JobProcess]:
65
+ ) -> JobRequest | None:
64
66
  from .models import JobRequest
65
67
 
66
68
  job_class_name = jobs_registry.get_job_class_name(self.__class__)
@@ -168,8 +170,7 @@ class Job(metaclass=JobType):
168
170
  span.set_attribute(ERROR_TYPE, "IntegrityError")
169
171
  span.set_status(trace.Status(trace.StatusCode.ERROR, "Duplicate job"))
170
172
  logger.warning("Job already in progress: %s", e)
171
- # Try to return the _in_progress list again
172
- return self._in_progress(unique_key)
173
+ return None
173
174
 
174
175
  def _in_progress(self, unique_key: str) -> list[JobRequest | JobProcess]:
175
176
  """Get all JobRequests and JobProcess that are currently in progress, regardless of queue."""
plain/jobs/models.py CHANGED
@@ -412,10 +412,14 @@ class JobResult(models.Model):
412
412
  ],
413
413
  )
414
414
 
415
- def retry_job(self, delay: int | None = None) -> JobRequest:
415
+ def retry_job(self, delay: int | None = None) -> JobRequest | None:
416
416
  retry_attempt = self.retry_attempt + 1
417
417
  job = jobs_registry.load_job(self.job_class, self.parameters)
418
- retry_delay = delay or job.get_retry_delay(retry_attempt)
418
+
419
+ if delay is None:
420
+ retry_delay = job.get_retry_delay(retry_attempt)
421
+ else:
422
+ retry_delay = delay
419
423
 
420
424
  with transaction.atomic():
421
425
  result = job.run_in_worker(
@@ -427,12 +431,9 @@ class JobResult(models.Model):
427
431
  retry_attempt=retry_attempt,
428
432
  # Unique key could be passed also?
429
433
  )
434
+ if result:
435
+ self.retry_job_request_uuid = result.uuid
436
+ self.save(update_fields=["retry_job_request_uuid"])
437
+ return result
430
438
 
431
- # TODO it is actually possible that result is a list
432
- # of pending jobs, which would need to be handled...
433
- # Right now it will throw an exception which could be caught by retry_failed_jobs.
434
-
435
- self.retry_job_request_uuid = result.uuid # type: ignore
436
- self.save(update_fields=["retry_job_request_uuid"])
437
-
438
- return result # type: ignore
439
+ return None
plain/jobs/scheduling.py CHANGED
@@ -216,6 +216,8 @@ class Schedule:
216
216
 
217
217
  @register_job
218
218
  class ScheduledCommand(Job):
219
+ """Run a shell command on a schedule."""
220
+
219
221
  def __init__(self, command: str) -> None:
220
222
  self.command = command
221
223
 
plain/jobs/workers.py CHANGED
@@ -7,7 +7,7 @@ import os
7
7
  import time
8
8
  from concurrent.futures import Future, ProcessPoolExecutor
9
9
  from functools import partial
10
- from typing import Any
10
+ from typing import TYPE_CHECKING, Any
11
11
 
12
12
  from plain import models
13
13
  from plain.models import transaction
@@ -16,12 +16,44 @@ from plain.signals import request_finished, request_started
16
16
  from plain.utils import timezone
17
17
  from plain.utils.module_loading import import_string
18
18
 
19
- from .models import JobProcess, JobRequest, JobResult, JobResultStatuses
20
19
  from .registry import jobs_registry
21
20
 
21
+ if TYPE_CHECKING:
22
+ from .models import JobResult
23
+
24
+ # Models are NOT imported at the top of this file!
25
+ # See comment on _worker_process_initializer() for explanation.
26
+
22
27
  logger = logging.getLogger("plain.jobs")
23
28
 
24
29
 
30
+ def _worker_process_initializer() -> None:
31
+ """Initialize Plain framework in worker process before processing jobs.
32
+
33
+ Why this is needed:
34
+ - We use multiprocessing with 'spawn' context (not 'fork')
35
+ - Spawn creates fresh Python processes, not forked copies
36
+ - When a spawned process starts, it re-imports this module BEFORE the initializer runs
37
+ - If we imported models at the top of this file, model registration would
38
+ happen before plain.runtime.setup(), causing PackageRegistryNotReady errors
39
+
40
+ Solution:
41
+ - This initializer runs plain.runtime.setup() FIRST in each worker process
42
+ - All model imports happen lazily inside functions (after setup completes)
43
+ - This ensures packages registry is ready before any models are accessed
44
+
45
+ Execution order in spawned worker:
46
+ 1. Re-import workers.py (but models NOT imported yet - lazy!)
47
+ 2. Run this initializer → plain.runtime.setup()
48
+ 3. Execute process_job() → NOW it's safe to import models
49
+ """
50
+ from plain.runtime import setup
51
+
52
+ # Each spawned worker process needs to set up Plain
53
+ # (spawn context creates fresh processes, not forks)
54
+ setup()
55
+
56
+
25
57
  class Worker:
26
58
  def __init__(
27
59
  self,
@@ -39,6 +71,7 @@ class Worker:
39
71
  max_workers=max_processes,
40
72
  max_tasks_per_child=max_jobs_per_process,
41
73
  mp_context=multiprocessing.get_context("spawn"),
74
+ initializer=_worker_process_initializer,
42
75
  )
43
76
 
44
77
  self.queues = queues
@@ -56,6 +89,9 @@ class Worker:
56
89
  self._is_shutting_down = False
57
90
 
58
91
  def run(self) -> None:
92
+ # Lazy import - see _worker_process_initializer() comment for why
93
+ from .models import JobRequest
94
+
59
95
  logger.info(
60
96
  "⬣ Starting Plain worker\n Registered jobs: %s\n Queues: %s\n Jobs schedule: %s\n Stats every: %s seconds\n Max processes: %s\n Max jobs per process: %s\n Max pending per process: %s\n PID: %s",
61
97
  "\n ".join(
@@ -211,6 +247,9 @@ class Worker:
211
247
  self._jobs_schedule_checked_at = now
212
248
 
213
249
  def log_stats(self) -> None:
250
+ # Lazy import - see _worker_process_initializer() comment for why
251
+ from .models import JobProcess, JobRequest
252
+
214
253
  try:
215
254
  num_proccesses = len(self.executor._processes)
216
255
  except (AttributeError, TypeError):
@@ -232,12 +271,18 @@ class Worker:
232
271
 
233
272
  def rescue_job_results(self) -> None:
234
273
  """Find any lost or failed jobs on this worker's queues and handle them."""
274
+ # Lazy import - see _worker_process_initializer() comment for why
275
+ from .models import JobProcess, JobResult
276
+
235
277
  # TODO return results and log them if there are any?
236
278
  JobProcess.query.filter(queue__in=self.queues).mark_lost_jobs()
237
279
  JobResult.query.filter(queue__in=self.queues).retry_failed_jobs()
238
280
 
239
281
 
240
282
  def future_finished_callback(job_process_uuid: str, future: Future) -> None:
283
+ # Lazy import - see _worker_process_initializer() comment for why
284
+ from .models import JobProcess, JobResultStatuses
285
+
241
286
  if future.cancelled():
242
287
  logger.warning("Job cancelled job_process_uuid=%s", job_process_uuid)
243
288
  try:
@@ -264,6 +309,9 @@ def future_finished_callback(job_process_uuid: str, future: Future) -> None:
264
309
 
265
310
 
266
311
  def process_job(job_process_uuid: str) -> None:
312
+ # Lazy import - see _worker_process_initializer() comment for why
313
+ from .models import JobProcess
314
+
267
315
  try:
268
316
  worker_pid = os.getpid()
269
317
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: plain.jobs
3
- Version: 0.34.0
3
+ Version: 0.35.1
4
4
  Summary: Process background jobs with a database-driven job queue.
5
5
  Author-email: Dave Gaeddert <dave.gaeddert@dropseed.dev>
6
6
  License-File: LICENSE
@@ -1,18 +1,18 @@
1
- plain/jobs/CHANGELOG.md,sha256=6jaE6mdMVx7jX9UFk5eEqT31LC0CgRVTJHGrwObeES4,9384
1
+ plain/jobs/CHANGELOG.md,sha256=o0DXNqFtf8N7ndAfzEUxyb5Zvl1V-ASquJl3yxDy5NY,11310
2
2
  plain/jobs/README.md,sha256=Xuhz2Q48G9WeGCh5OWGVBlaSea4eKCqWzcTAtZRrS0I,6835
3
3
  plain/jobs/__init__.py,sha256=p2ATql3HyPzPTV34gJQ04caT7tcNQLbBGM6uIoDPbjo,92
4
- plain/jobs/admin.py,sha256=IhB6nkHKHB5CJfwPEoNW4pQKUi_4ewpNGkOCo4XwO0g,6719
5
- plain/jobs/chores.py,sha256=5WdLlCDPppX78yfS4LczIG7UeVR9DAoJsJHTT2Codd4,483
6
- plain/jobs/cli.py,sha256=KnazGup1JumjrSjhoMO2FwgLBATRW70YewkdMcLzsrI,5683
4
+ plain/jobs/admin.py,sha256=t1UEchq1-Eews_wPsVUofaqbzPaYpb-8H1bUlA59JGI,6785
5
+ plain/jobs/chores.py,sha256=oyVU-BfcJxMM3eK2_umn38N2mBsNpcDrZfpeEQju_DA,528
6
+ plain/jobs/cli.py,sha256=PPoT7xjl818BZnmI0yA_UCLEQkzl_Tv1_hiuJW9UE-Q,5911
7
7
  plain/jobs/config.py,sha256=PQsl-LxWsWLnjC98f0mvtdcCOuXvXKDMjrCRf1fq44Y,550
8
8
  plain/jobs/default_settings.py,sha256=r_95ucg_KY1XW1jarZy8VO3p-ylbllKMUrHzOPJiX6U,227
9
- plain/jobs/jobs.py,sha256=IPQ2vlhfLm5gvdZTR52WINDAWRUPN0Mjc_EhKjqYhAk,7843
9
+ plain/jobs/jobs.py,sha256=ZEne6eTVPoWBv-JUAB9GugdIR0pb2I5SDfJlxCrNI4c,7775
10
10
  plain/jobs/middleware.py,sha256=bz8aPBY0RbtLS4kic8mzPOd3EyQFCVRQ2uTCttT3RpE,573
11
- plain/jobs/models.py,sha256=EvO5vHbsTdI0OJIIJRpGEKks9pm_INB33B1q6VeMSUc,16014
11
+ plain/jobs/models.py,sha256=TYOjMT0dSF69uzfY2m13ZPwo6G0WOVJUbgJvsDFxBhI,15889
12
12
  plain/jobs/parameters.py,sha256=t9PwEZgwNCJx3YobsT-jfaVZdfMBS54XJcBrT9Wnsg0,6313
13
13
  plain/jobs/registry.py,sha256=Rwn5Htll10e549vD2Mu0oyoDynyHhE0bGYZ2bq9uzPU,1679
14
- plain/jobs/scheduling.py,sha256=4BQWeRGPYrhNjq9296GCvGw6-1-a3anjFGqc1mdK3fw,7805
15
- plain/jobs/workers.py,sha256=e32UgMch2pugqwLxRWZfH_kq0PtDuxMxHwbAQ0yYMV4,11941
14
+ plain/jobs/scheduling.py,sha256=fqpFnVoIIV-muf82WzuLyioSmiilfZ76KFjXzt8grIk,7851
15
+ plain/jobs/workers.py,sha256=dtr-yxH8FT3nByDbUN8wzIaWfk09enYy6mNr3VmuXiU,13867
16
16
  plain/jobs/migrations/0001_initial.py,sha256=EIgIEMVyTsStyx9dmKM8Jb_hwn694Yo31-74DZkNTqo,9452
17
17
  plain/jobs/migrations/0002_job_span_id_job_trace_id_jobrequest_span_id_and_more.py,sha256=ph5BwwOAwdfjdNh9RItYmX_IA29lO-Dd9GymYzvChXQ,1953
18
18
  plain/jobs/migrations/0003_rename_job_jobprocess_and_more.py,sha256=EdLucHxiH_QshLL2peIcMULRCQyFMPxh476AxCxW5Wk,2615
@@ -21,7 +21,7 @@ plain/jobs/migrations/0005_rename_constraints_and_indexes.py,sha256=PDGpOw6__tVf
21
21
  plain/jobs/migrations/0006_alter_jobprocess_table_alter_jobrequest_table_and_more.py,sha256=FY0_pcw0mL8MkUSatpDXWtA_xQw0kTZBGIyjLcmYeJE,546
22
22
  plain/jobs/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  plain/jobs/templates/admin/plainqueue/jobresult_detail.html,sha256=Ybp1s_dARo_bFDcLEzEfETheP8SzqHHE_NNSKhv_eh8,198
24
- plain_jobs-0.34.0.dist-info/METADATA,sha256=8uXilZ1ZfAqKsipubMNFgbcBY6KD2JLa7eGqavgtZTk,7162
25
- plain_jobs-0.34.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
- plain_jobs-0.34.0.dist-info/licenses/LICENSE,sha256=cvKM3OlqHx3ijD6e34zsSUkPvzl-ya3Dd63A6EHL94U,1500
27
- plain_jobs-0.34.0.dist-info/RECORD,,
24
+ plain_jobs-0.35.1.dist-info/METADATA,sha256=SubahgvtL-4vBbSKNAS1wudQZHVH8_gzWjSdn0ewBFg,7162
25
+ plain_jobs-0.35.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
26
+ plain_jobs-0.35.1.dist-info/licenses/LICENSE,sha256=cvKM3OlqHx3ijD6e34zsSUkPvzl-ya3Dd63A6EHL94U,1500
27
+ plain_jobs-0.35.1.dist-info/RECORD,,