plain.jobs 0.33.0__tar.gz → 0.35.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of plain.jobs might be problematic. Click here for more details.

Files changed (32) hide show
  1. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/PKG-INFO +6 -4
  2. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/CHANGELOG.md +25 -0
  3. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/README.md +5 -3
  4. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/admin.py +1 -0
  5. plain_jobs-0.35.0/plain/jobs/chores.py +19 -0
  6. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/cli.py +71 -20
  7. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/jobs.py +4 -2
  8. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/scheduling.py +2 -0
  9. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/workers.py +50 -2
  10. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/pyproject.toml +1 -1
  11. plain_jobs-0.33.0/plain/jobs/chores.py +0 -17
  12. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/.gitignore +0 -0
  13. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/LICENSE +0 -0
  14. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/README.md +0 -0
  15. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/__init__.py +0 -0
  16. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/config.py +0 -0
  17. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/default_settings.py +0 -0
  18. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/middleware.py +0 -0
  19. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/migrations/0001_initial.py +0 -0
  20. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/migrations/0002_job_span_id_job_trace_id_jobrequest_span_id_and_more.py +0 -0
  21. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/migrations/0003_rename_job_jobprocess_and_more.py +0 -0
  22. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/migrations/0004_rename_tables_to_plainjobs.py +0 -0
  23. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/migrations/0005_rename_constraints_and_indexes.py +0 -0
  24. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/migrations/0006_alter_jobprocess_table_alter_jobrequest_table_and_more.py +0 -0
  25. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/migrations/__init__.py +0 -0
  26. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/models.py +0 -0
  27. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/parameters.py +0 -0
  28. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/registry.py +0 -0
  29. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/plain/jobs/templates/admin/plainqueue/jobresult_detail.html +0 -0
  30. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/tests/app/settings.py +0 -0
  31. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/tests/test_parameters.py +0 -0
  32. {plain_jobs-0.33.0 → plain_jobs-0.35.0}/tests/test_scheduling.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: plain.jobs
3
- Version: 0.33.0
3
+ Version: 0.35.0
4
4
  Summary: Process background jobs with a database-driven job queue.
5
5
  Author-email: Dave Gaeddert <dave.gaeddert@dropseed.dev>
6
6
  License-File: LICENSE
@@ -66,15 +66,17 @@ plain migrate
66
66
 
67
67
  ## Local development
68
68
 
69
- In development, you will typically want to run the worker alongside your app. With [`plain.dev`](/plain-dev/plain/dev/README.md) you can do this by adding it to the `[tool.plain.dev.run]` section of your `pyproject.toml` file. Currently, you will need to use something like [watchfiles](https://pypi.org/project/watchfiles/) to add auto-reloading to the worker.
69
+ In development, you will typically want to run the worker alongside your app with auto-reloading enabled. With [`plain.dev`](/plain-dev/plain/dev/README.md) you can do this by adding it to the `[tool.plain.dev.run]` section of your `pyproject.toml` file.
70
70
 
71
71
  ```toml
72
72
  # pyproject.toml
73
73
  [tool.plain.dev.run]
74
- worker = {cmd = "watchfiles --filter python \"plain jobs worker --stats-every 0 --max-processes 2\" ."}
75
- worker-slow = {cmd = "watchfiles --filter python \"plain jobs worker --queue slow --stats-every 0 --max-processes 2\" ."}
74
+ worker = {cmd = "plain jobs worker --reload --stats-every 0 --max-processes 2"}
75
+ worker-slow = {cmd = "plain jobs worker --reload --queue slow --stats-every 0 --max-processes 2"}
76
76
  ```
77
77
 
78
+ The `--reload` flag will automatically watch `.py` and `.env*` files for changes and restart the worker when changes are detected.
79
+
78
80
  ## Job parameters
79
81
 
80
82
  When calling `run_in_worker()`, you can specify several parameters to control job execution:
@@ -1,5 +1,30 @@
1
1
  # plain-jobs changelog
2
2
 
3
+ ## [0.35.0](https://github.com/dropseed/plain/releases/plain-jobs@0.35.0) (2025-10-17)
4
+
5
+ ### What's changed
6
+
7
+ - The `Job` base class is now an abstract base class requiring implementation of the `run()` method ([e34282bba8](https://github.com/dropseed/plain/commit/e34282bba8))
8
+ - Job worker processes now properly initialize the Plain framework before processing jobs, fixing potential startup issues ([c4551d1b84](https://github.com/dropseed/plain/commit/c4551d1b84))
9
+ - The `plain jobs list` command now displays job descriptions from docstrings in a cleaner format ([4b6881a49e](https://github.com/dropseed/plain/commit/4b6881a49e))
10
+ - Job requests in the admin interface are now ordered by priority, start time, and created time to match worker processing order ([c18f0e3fb6](https://github.com/dropseed/plain/commit/c18f0e3fb6))
11
+ - The `ClearCompleted` chore has been refactored to use the new abstract base class pattern ([c4466d3c60](https://github.com/dropseed/plain/commit/c4466d3c60))
12
+
13
+ ### Upgrade instructions
14
+
15
+ - No changes required
16
+
17
+ ## [0.34.0](https://github.com/dropseed/plain/releases/plain-jobs@0.34.0) (2025-10-13)
18
+
19
+ ### What's changed
20
+
21
+ - Added `--reload` flag to `plain jobs worker` command for automatic reloading when code changes are detected ([f3db87e9aa](https://github.com/dropseed/plain/commit/f3db87e9aa))
22
+ - Worker reloader now only watches `.py` and `.env*` files, not HTML files ([f2f31c288b](https://github.com/dropseed/plain/commit/f2f31c288b))
23
+
24
+ ### Upgrade instructions
25
+
26
+ - Custom autoreloaders for development are no longer needed -- use the built-in `--reload` flag instead
27
+
3
28
  ## [0.33.0](https://github.com/dropseed/plain/releases/plain-jobs@0.33.0) (2025-10-10)
4
29
 
5
30
  ### What's changed
@@ -55,15 +55,17 @@ plain migrate
55
55
 
56
56
  ## Local development
57
57
 
58
- In development, you will typically want to run the worker alongside your app. With [`plain.dev`](/plain-dev/plain/dev/README.md) you can do this by adding it to the `[tool.plain.dev.run]` section of your `pyproject.toml` file. Currently, you will need to use something like [watchfiles](https://pypi.org/project/watchfiles/) to add auto-reloading to the worker.
58
+ In development, you will typically want to run the worker alongside your app with auto-reloading enabled. With [`plain.dev`](/plain-dev/plain/dev/README.md) you can do this by adding it to the `[tool.plain.dev.run]` section of your `pyproject.toml` file.
59
59
 
60
60
  ```toml
61
61
  # pyproject.toml
62
62
  [tool.plain.dev.run]
63
- worker = {cmd = "watchfiles --filter python \"plain jobs worker --stats-every 0 --max-processes 2\" ."}
64
- worker-slow = {cmd = "watchfiles --filter python \"plain jobs worker --queue slow --stats-every 0 --max-processes 2\" ."}
63
+ worker = {cmd = "plain jobs worker --reload --stats-every 0 --max-processes 2"}
64
+ worker-slow = {cmd = "plain jobs worker --reload --queue slow --stats-every 0 --max-processes 2"}
65
65
  ```
66
66
 
67
+ The `--reload` flag will automatically watch `.py` and `.env*` files for changes and restart the worker when changes are detected.
68
+
67
69
  ## Job parameters
68
70
 
69
71
  When calling `run_in_worker()`, you can specify several parameters to control job execution:
@@ -109,6 +109,7 @@ class JobRequestViewset(AdminViewset):
109
109
  title = "Requests"
110
110
  fields = ["id", "job_class", "priority", "created_at", "start_at", "unique_key"]
111
111
  actions = ["Delete"]
112
+ queryset_order = ["priority", "-start_at", "-created_at"]
112
113
 
113
114
  def perform_action(self, action: str, target_ids: list[int]) -> None:
114
115
  if action == "Delete":
@@ -0,0 +1,19 @@
1
+ import datetime
2
+
3
+ from plain.chores import Chore, register_chore
4
+ from plain.runtime import settings
5
+ from plain.utils import timezone
6
+
7
+ from .models import JobResult
8
+
9
+
10
+ @register_chore
11
+ class ClearCompleted(Chore):
12
+ """Delete all completed job results in all queues."""
13
+
14
+ def run(self) -> str:
15
+ cutoff = timezone.now() - datetime.timedelta(
16
+ seconds=settings.JOBS_RESULTS_RETENTION
17
+ )
18
+ results = JobResult.query.filter(created_at__lt=cutoff).delete()
19
+ return f"{results[0]} jobs deleted"
@@ -62,35 +62,80 @@ def cli() -> None:
62
62
  type=int,
63
63
  envvar="PLAIN_JOBS_WORKER_STATS_EVERY",
64
64
  )
65
+ @click.option(
66
+ "--reload",
67
+ is_flag=True,
68
+ help="Watch files and auto-reload worker on changes",
69
+ )
65
70
  def worker(
66
71
  queues: tuple[str, ...],
67
72
  max_processes: int | None,
68
73
  max_jobs_per_process: int | None,
69
74
  max_pending_per_process: int,
70
75
  stats_every: int,
76
+ reload: bool,
71
77
  ) -> None:
72
78
  """Run the job worker."""
73
79
  jobs_schedule = load_schedule(settings.JOBS_SCHEDULE)
74
80
 
75
- worker = Worker(
76
- queues=list(queues),
77
- jobs_schedule=jobs_schedule,
78
- max_processes=max_processes,
79
- max_jobs_per_process=max_jobs_per_process,
80
- max_pending_per_process=max_pending_per_process,
81
- stats_every=stats_every,
82
- )
83
-
84
- def _shutdown(signalnum: int, _: Any) -> None:
85
- logger.info("Job worker shutdown signal received signalnum=%s", signalnum)
86
- worker.shutdown()
87
-
88
- # Allow the worker to be stopped gracefully on SIGTERM
89
- signal.signal(signal.SIGTERM, _shutdown)
90
- signal.signal(signal.SIGINT, _shutdown)
91
-
92
- # Start processing jobs
93
- worker.run()
81
+ if reload:
82
+ from plain.internal.reloader import Reloader
83
+
84
+ # Track whether we should continue restarting
85
+ should_restart = {"value": True}
86
+ current_worker = {"instance": None}
87
+
88
+ def file_changed(filename: str) -> None:
89
+ if current_worker["instance"]:
90
+ current_worker["instance"].shutdown()
91
+
92
+ def signal_shutdown(signalnum: int, _: Any) -> None:
93
+ should_restart["value"] = False
94
+ if current_worker["instance"]:
95
+ current_worker["instance"].shutdown()
96
+
97
+ # Allow the worker to be stopped gracefully on SIGTERM/SIGINT
98
+ signal.signal(signal.SIGTERM, signal_shutdown)
99
+ signal.signal(signal.SIGINT, signal_shutdown)
100
+
101
+ # Start file watcher once, outside the loop
102
+ reloader = Reloader(callback=file_changed, watch_html=False)
103
+ reloader.start()
104
+
105
+ while should_restart["value"]:
106
+ worker = Worker(
107
+ queues=list(queues),
108
+ jobs_schedule=jobs_schedule,
109
+ max_processes=max_processes,
110
+ max_jobs_per_process=max_jobs_per_process,
111
+ max_pending_per_process=max_pending_per_process,
112
+ stats_every=stats_every,
113
+ )
114
+ current_worker["instance"] = worker
115
+
116
+ # Start processing jobs (blocks until shutdown)
117
+ worker.run()
118
+
119
+ else:
120
+ worker = Worker(
121
+ queues=list(queues),
122
+ jobs_schedule=jobs_schedule,
123
+ max_processes=max_processes,
124
+ max_jobs_per_process=max_jobs_per_process,
125
+ max_pending_per_process=max_pending_per_process,
126
+ stats_every=stats_every,
127
+ )
128
+
129
+ def _shutdown(signalnum: int, _: Any) -> None:
130
+ logger.info("Job worker shutdown signal received signalnum=%s", signalnum)
131
+ worker.shutdown()
132
+
133
+ # Allow the worker to be stopped gracefully on SIGTERM
134
+ signal.signal(signal.SIGTERM, _shutdown)
135
+ signal.signal(signal.SIGINT, _shutdown)
136
+
137
+ # Start processing jobs
138
+ worker.run()
94
139
 
95
140
 
96
141
  @cli.command()
@@ -150,4 +195,10 @@ def run(job_class_name: str) -> None:
150
195
  def list_jobs() -> None:
151
196
  """List all registered jobs."""
152
197
  for name, job_class in jobs_registry.jobs.items():
153
- click.echo(f"{click.style(name, fg='blue')}: {job_class}")
198
+ click.secho(f"{name}", bold=True, nl=False)
199
+ # Get description from class docstring
200
+ description = job_class.__doc__.strip() if job_class.__doc__ else ""
201
+ if description:
202
+ click.secho(f": {description}", dim=True)
203
+ else:
204
+ click.echo("")
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import datetime
4
4
  import inspect
5
5
  import logging
6
+ from abc import ABCMeta, abstractmethod
6
7
  from typing import TYPE_CHECKING, Any
7
8
 
8
9
  from opentelemetry import trace
@@ -33,7 +34,7 @@ logger = logging.getLogger(__name__)
33
34
  tracer = trace.get_tracer("plain.jobs")
34
35
 
35
36
 
36
- class JobType(type):
37
+ class JobType(ABCMeta):
37
38
  """
38
39
  Metaclass allows us to capture the original args/kwargs
39
40
  used to instantiate the job, so we can store them in the database
@@ -48,8 +49,9 @@ class JobType(type):
48
49
 
49
50
 
50
51
  class Job(metaclass=JobType):
52
+ @abstractmethod
51
53
  def run(self) -> None:
52
- raise NotImplementedError
54
+ pass
53
55
 
54
56
  def run_in_worker(
55
57
  self,
@@ -216,6 +216,8 @@ class Schedule:
216
216
 
217
217
  @register_job
218
218
  class ScheduledCommand(Job):
219
+ """Run a shell command on a schedule."""
220
+
219
221
  def __init__(self, command: str) -> None:
220
222
  self.command = command
221
223
 
@@ -7,7 +7,7 @@ import os
7
7
  import time
8
8
  from concurrent.futures import Future, ProcessPoolExecutor
9
9
  from functools import partial
10
- from typing import Any
10
+ from typing import TYPE_CHECKING, Any
11
11
 
12
12
  from plain import models
13
13
  from plain.models import transaction
@@ -16,12 +16,44 @@ from plain.signals import request_finished, request_started
16
16
  from plain.utils import timezone
17
17
  from plain.utils.module_loading import import_string
18
18
 
19
- from .models import JobProcess, JobRequest, JobResult, JobResultStatuses
20
19
  from .registry import jobs_registry
21
20
 
21
+ if TYPE_CHECKING:
22
+ from .models import JobResult
23
+
24
+ # Models are NOT imported at the top of this file!
25
+ # See comment on _worker_process_initializer() for explanation.
26
+
22
27
  logger = logging.getLogger("plain.jobs")
23
28
 
24
29
 
30
+ def _worker_process_initializer() -> None:
31
+ """Initialize Plain framework in worker process before processing jobs.
32
+
33
+ Why this is needed:
34
+ - We use multiprocessing with 'spawn' context (not 'fork')
35
+ - Spawn creates fresh Python processes, not forked copies
36
+ - When a spawned process starts, it re-imports this module BEFORE the initializer runs
37
+ - If we imported models at the top of this file, model registration would
38
+ happen before plain.runtime.setup(), causing PackageRegistryNotReady errors
39
+
40
+ Solution:
41
+ - This initializer runs plain.runtime.setup() FIRST in each worker process
42
+ - All model imports happen lazily inside functions (after setup completes)
43
+ - This ensures packages registry is ready before any models are accessed
44
+
45
+ Execution order in spawned worker:
46
+ 1. Re-import workers.py (but models NOT imported yet - lazy!)
47
+ 2. Run this initializer → plain.runtime.setup()
48
+ 3. Execute process_job() → NOW it's safe to import models
49
+ """
50
+ from plain.runtime import setup
51
+
52
+ # Each spawned worker process needs to set up Plain
53
+ # (spawn context creates fresh processes, not forks)
54
+ setup()
55
+
56
+
25
57
  class Worker:
26
58
  def __init__(
27
59
  self,
@@ -39,6 +71,7 @@ class Worker:
39
71
  max_workers=max_processes,
40
72
  max_tasks_per_child=max_jobs_per_process,
41
73
  mp_context=multiprocessing.get_context("spawn"),
74
+ initializer=_worker_process_initializer,
42
75
  )
43
76
 
44
77
  self.queues = queues
@@ -56,6 +89,9 @@ class Worker:
56
89
  self._is_shutting_down = False
57
90
 
58
91
  def run(self) -> None:
92
+ # Lazy import - see _worker_process_initializer() comment for why
93
+ from .models import JobRequest
94
+
59
95
  logger.info(
60
96
  "⬣ Starting Plain worker\n Registered jobs: %s\n Queues: %s\n Jobs schedule: %s\n Stats every: %s seconds\n Max processes: %s\n Max jobs per process: %s\n Max pending per process: %s\n PID: %s",
61
97
  "\n ".join(
@@ -211,6 +247,9 @@ class Worker:
211
247
  self._jobs_schedule_checked_at = now
212
248
 
213
249
  def log_stats(self) -> None:
250
+ # Lazy import - see _worker_process_initializer() comment for why
251
+ from .models import JobProcess, JobRequest
252
+
214
253
  try:
215
254
  num_proccesses = len(self.executor._processes)
216
255
  except (AttributeError, TypeError):
@@ -232,12 +271,18 @@ class Worker:
232
271
 
233
272
  def rescue_job_results(self) -> None:
234
273
  """Find any lost or failed jobs on this worker's queues and handle them."""
274
+ # Lazy import - see _worker_process_initializer() comment for why
275
+ from .models import JobProcess, JobResult
276
+
235
277
  # TODO return results and log them if there are any?
236
278
  JobProcess.query.filter(queue__in=self.queues).mark_lost_jobs()
237
279
  JobResult.query.filter(queue__in=self.queues).retry_failed_jobs()
238
280
 
239
281
 
240
282
  def future_finished_callback(job_process_uuid: str, future: Future) -> None:
283
+ # Lazy import - see _worker_process_initializer() comment for why
284
+ from .models import JobProcess, JobResultStatuses
285
+
241
286
  if future.cancelled():
242
287
  logger.warning("Job cancelled job_process_uuid=%s", job_process_uuid)
243
288
  try:
@@ -264,6 +309,9 @@ def future_finished_callback(job_process_uuid: str, future: Future) -> None:
264
309
 
265
310
 
266
311
  def process_job(job_process_uuid: str) -> None:
312
+ # Lazy import - see _worker_process_initializer() comment for why
313
+ from .models import JobProcess
314
+
267
315
  try:
268
316
  worker_pid = os.getpid()
269
317
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "plain.jobs"
3
- version = "0.33.0"
3
+ version = "0.35.0"
4
4
  description = "Process background jobs with a database-driven job queue."
5
5
  authors = [{name = "Dave Gaeddert", email = "dave.gaeddert@dropseed.dev"}]
6
6
  readme = "README.md"
@@ -1,17 +0,0 @@
1
- import datetime
2
-
3
- from plain.chores import register_chore
4
- from plain.runtime import settings
5
- from plain.utils import timezone
6
-
7
- from .models import JobResult
8
-
9
-
10
- @register_chore("jobs")
11
- def clear_completed() -> str:
12
- """Delete all completed job results in all queues."""
13
- cutoff = timezone.now() - datetime.timedelta(
14
- seconds=settings.JOBS_RESULTS_RETENTION
15
- )
16
- results = JobResult.query.filter(created_at__lt=cutoff).delete()
17
- return f"{results[0]} jobs deleted"
File without changes
File without changes
File without changes