plain.jobs 0.43.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plain/jobs/workers.py ADDED
@@ -0,0 +1,355 @@
1
+ from __future__ import annotations
2
+
3
+ import gc
4
+ import logging
5
+ import multiprocessing
6
+ import os
7
+ import time
8
+ from concurrent.futures import Future, ProcessPoolExecutor
9
+ from functools import partial
10
+ from typing import TYPE_CHECKING, Any
11
+
12
+ from plain import models
13
+ from plain.models import transaction
14
+ from plain.runtime import settings
15
+ from plain.signals import request_finished, request_started
16
+ from plain.utils import timezone
17
+ from plain.utils.module_loading import import_string
18
+
19
+ from .registry import jobs_registry
20
+
21
+ if TYPE_CHECKING:
22
+ from .models import JobResult
23
+
24
+ # Models are NOT imported at the top of this file!
25
+ # See comment on _worker_process_initializer() for explanation.
26
+
27
+ logger = logging.getLogger("plain.jobs")
28
+
29
+
30
+ def _worker_process_initializer() -> None:
31
+ """Initialize Plain framework in worker process before processing jobs.
32
+
33
+ Why this is needed:
34
+ - We use multiprocessing with 'spawn' context (not 'fork')
35
+ - Spawn creates fresh Python processes, not forked copies
36
+ - When a spawned process starts, it re-imports this module BEFORE the initializer runs
37
+ - If we imported models at the top of this file, model registration would
38
+ happen before plain.runtime.setup(), causing PackageRegistryNotReady errors
39
+
40
+ Solution:
41
+ - This initializer runs plain.runtime.setup() FIRST in each worker process
42
+ - All model imports happen lazily inside functions (after setup completes)
43
+ - This ensures packages registry is ready before any models are accessed
44
+
45
+ Execution order in spawned worker:
46
+ 1. Re-import workers.py (but models NOT imported yet - lazy!)
47
+ 2. Run this initializer → plain.runtime.setup()
48
+ 3. Execute process_job() → NOW it's safe to import models
49
+ """
50
+ from plain.runtime import setup
51
+
52
+ # Each spawned worker process needs to set up Plain
53
+ # (spawn context creates fresh processes, not forks)
54
+ setup()
55
+
56
+
57
+ class Worker:
58
+ def __init__(
59
+ self,
60
+ queues: list[str],
61
+ jobs_schedule: list[Any] | None = None,
62
+ max_processes: int | None = None,
63
+ max_jobs_per_process: int | None = None,
64
+ max_pending_per_process: int = 10,
65
+ stats_every: int | None = None,
66
+ ) -> None:
67
+ if jobs_schedule is None:
68
+ jobs_schedule = []
69
+
70
+ self.executor = ProcessPoolExecutor(
71
+ max_workers=max_processes,
72
+ max_tasks_per_child=max_jobs_per_process,
73
+ mp_context=multiprocessing.get_context("spawn"),
74
+ initializer=_worker_process_initializer,
75
+ )
76
+
77
+ self.queues = queues
78
+
79
+ # Filter the jobs schedule to those that are in the same queue as this worker
80
+ self.jobs_schedule = [
81
+ x for x in jobs_schedule if x[0].default_queue() in queues
82
+ ]
83
+
84
+ # How often to log the stats (in seconds)
85
+ self.stats_every = stats_every
86
+
87
+ self.max_processes = self.executor._max_workers # type: ignore[attr-defined]
88
+ self.max_jobs_per_process = max_jobs_per_process
89
+ self.max_pending_per_process = max_pending_per_process
90
+
91
+ self._is_shutting_down = False
92
+
93
+ def run(self) -> None:
94
+ # Lazy import - see _worker_process_initializer() comment for why
95
+ from .models import JobRequest
96
+
97
+ logger.info(
98
+ "⬣ Starting Plain worker\n Registered jobs: %s\n Queues: %s\n Jobs schedule: %s\n Stats every: %s seconds\n Max processes: %s\n Max jobs per process: %s\n Max pending per process: %s\n PID: %s",
99
+ "\n ".join(
100
+ f"{name}: {cls}" for name, cls in jobs_registry.jobs.items()
101
+ ),
102
+ ", ".join(self.queues),
103
+ "\n ".join(str(x) for x in self.jobs_schedule),
104
+ self.stats_every,
105
+ self.max_processes,
106
+ self.max_jobs_per_process,
107
+ self.max_pending_per_process,
108
+ os.getpid(),
109
+ )
110
+
111
+ while not self._is_shutting_down:
112
+ try:
113
+ self.maybe_log_stats()
114
+ self.maybe_check_job_results()
115
+ self.maybe_schedule_jobs()
116
+ except Exception as e:
117
+ # Log the issue, but don't stop the worker
118
+ # (these tasks are kind of ancilarry to the main job processing)
119
+ logger.exception(e)
120
+
121
+ if len(self.executor._pending_work_items) >= (
122
+ self.max_processes * self.max_pending_per_process
123
+ ):
124
+ # We don't want to convert too many JobRequests to Jobs,
125
+ # because anything not started yet will be cancelled on deploy etc.
126
+ # It's easier to leave them in the JobRequest db queue as long as possible.
127
+ time.sleep(0.5)
128
+ continue
129
+
130
+ with transaction.atomic():
131
+ job_request = (
132
+ JobRequest.query.select_for_update(skip_locked=True)
133
+ .filter(
134
+ queue__in=self.queues,
135
+ )
136
+ .filter(
137
+ models.Q(start_at__isnull=True)
138
+ | models.Q(start_at__lte=timezone.now())
139
+ )
140
+ .order_by("priority", "-start_at", "-created_at")
141
+ .first()
142
+ )
143
+ if not job_request:
144
+ # Potentially no jobs to process (who knows for how long)
145
+ # but sleep for a second to give the CPU and DB a break
146
+ time.sleep(1)
147
+ continue
148
+
149
+ logger.debug(
150
+ 'Preparing to execute job job_class=%s job_request_uuid=%s job_priority=%s job_source="%s" job_queues="%s"',
151
+ job_request.job_class,
152
+ job_request.uuid,
153
+ job_request.priority,
154
+ job_request.source,
155
+ job_request.queue,
156
+ )
157
+
158
+ job = job_request.convert_to_job_process()
159
+
160
+ job_process_uuid = str(job.uuid) # Make a str copy
161
+
162
+ future = self.executor.submit(process_job, job_process_uuid)
163
+ future.add_done_callback(
164
+ partial(future_finished_callback, job_process_uuid)
165
+ )
166
+
167
+ def shutdown(self) -> None:
168
+ if self._is_shutting_down:
169
+ # Already shutting down somewhere else
170
+ return
171
+
172
+ logger.info("Job worker shutdown started")
173
+ self._is_shutting_down = True
174
+ self.executor.shutdown(wait=True, cancel_futures=True)
175
+ logger.info("Job worker shutdown complete")
176
+
177
+ def maybe_log_stats(self) -> None:
178
+ if not self.stats_every:
179
+ return
180
+
181
+ now = time.time()
182
+
183
+ if not hasattr(self, "_stats_logged_at"):
184
+ self._stats_logged_at = now
185
+
186
+ if now - self._stats_logged_at > self.stats_every:
187
+ self._stats_logged_at = now
188
+ self.log_stats()
189
+
190
+ def maybe_check_job_results(self) -> None:
191
+ now = time.time()
192
+
193
+ if not hasattr(self, "_job_results_checked_at"):
194
+ self._job_results_checked_at = now
195
+
196
+ check_every = 60 # Only need to check once a minute
197
+
198
+ if now - self._job_results_checked_at > check_every:
199
+ self._job_results_checked_at = now
200
+ self.rescue_job_results()
201
+
202
+ def maybe_schedule_jobs(self) -> None:
203
+ if not self.jobs_schedule:
204
+ return
205
+
206
+ now = time.time()
207
+
208
+ if not hasattr(self, "_jobs_schedule_checked_at"):
209
+ self._jobs_schedule_checked_at = now
210
+
211
+ check_every = 60 # Only need to check once every 60 seconds
212
+
213
+ if now - self._jobs_schedule_checked_at > check_every:
214
+ for job, schedule in self.jobs_schedule:
215
+ next_start_at = schedule.next()
216
+
217
+ # Leverage the concurrency_key to group scheduled jobs
218
+ # with the same start time
219
+ schedule_concurrency_key = f"{job.default_concurrency_key()}:scheduled:{int(next_start_at.timestamp())}"
220
+
221
+ # Job's should_enqueue hook can control scheduling behavior
222
+ result = job.run_in_worker(
223
+ delay=next_start_at,
224
+ concurrency_key=schedule_concurrency_key,
225
+ )
226
+ # Result is None if should_enqueue returned False
227
+ if result:
228
+ logger.info(
229
+ 'Scheduling job job_class=%s job_queue="%s" job_start_at="%s" job_schedule="%s" concurrency_key="%s"',
230
+ result.job_class,
231
+ result.queue,
232
+ result.start_at,
233
+ schedule,
234
+ result.concurrency_key,
235
+ )
236
+
237
+ self._jobs_schedule_checked_at = now
238
+
239
+ def log_stats(self) -> None:
240
+ # Lazy import - see _worker_process_initializer() comment for why
241
+ from .models import JobProcess, JobRequest
242
+
243
+ try:
244
+ num_proccesses = len(self.executor._processes)
245
+ except (AttributeError, TypeError):
246
+ # Depending on shutdown timing and internal behavior, this might not work
247
+ num_proccesses = 0
248
+
249
+ jobs_requested = JobRequest.query.filter(queue__in=self.queues).count()
250
+ jobs_processing = JobProcess.query.filter(queue__in=self.queues).count()
251
+
252
+ logger.info(
253
+ 'Job worker stats worker_processes=%s worker_queues="%s" jobs_requested=%s jobs_processing=%s worker_max_processes=%s worker_max_jobs_per_process=%s',
254
+ num_proccesses,
255
+ ",".join(self.queues),
256
+ jobs_requested,
257
+ jobs_processing,
258
+ self.max_processes,
259
+ self.max_jobs_per_process,
260
+ )
261
+
262
+ def rescue_job_results(self) -> None:
263
+ """Find any lost or failed jobs on this worker's queues and handle them."""
264
+ # Lazy import - see _worker_process_initializer() comment for why
265
+ from .models import JobProcess, JobResult
266
+
267
+ # TODO return results and log them if there are any?
268
+ JobProcess.query.filter(queue__in=self.queues).mark_lost_jobs()
269
+ JobResult.query.filter(queue__in=self.queues).retry_failed_jobs()
270
+
271
+
272
+ def future_finished_callback(job_process_uuid: str, future: Future) -> None:
273
+ # Lazy import - see _worker_process_initializer() comment for why
274
+ from .models import JobProcess, JobResultStatuses
275
+
276
+ if future.cancelled():
277
+ logger.warning("Job cancelled job_process_uuid=%s", job_process_uuid)
278
+ try:
279
+ job = JobProcess.query.get(uuid=job_process_uuid)
280
+ job.convert_to_result(status=JobResultStatuses.CANCELLED)
281
+ except JobProcess.DoesNotExist:
282
+ # Job may have already been cleaned up
283
+ pass
284
+ elif exception := future.exception():
285
+ # Process pool may have been killed...
286
+ logger.warning(
287
+ "Job failed job_process_uuid=%s",
288
+ job_process_uuid,
289
+ exc_info=exception,
290
+ )
291
+ try:
292
+ job = JobProcess.query.get(uuid=job_process_uuid)
293
+ job.convert_to_result(status=JobResultStatuses.CANCELLED)
294
+ except JobProcess.DoesNotExist:
295
+ # Job may have already been cleaned up
296
+ pass
297
+ else:
298
+ logger.debug("Job finished job_process_uuid=%s", job_process_uuid)
299
+
300
+
301
+ def process_job(job_process_uuid: str) -> None:
302
+ # Lazy import - see _worker_process_initializer() comment for why
303
+ from .models import JobProcess
304
+
305
+ try:
306
+ worker_pid = os.getpid()
307
+
308
+ request_started.send(sender=None)
309
+
310
+ job_process = JobProcess.query.get(uuid=job_process_uuid)
311
+
312
+ logger.info(
313
+ 'Executing job worker_pid=%s job_class=%s job_request_uuid=%s job_priority=%s job_source="%s" job_queue="%s"',
314
+ worker_pid,
315
+ job_process.job_class,
316
+ job_process.job_request_uuid,
317
+ job_process.priority,
318
+ job_process.source,
319
+ job_process.queue,
320
+ )
321
+
322
+ def middleware_chain(job: JobProcess) -> JobResult:
323
+ return job.run()
324
+
325
+ for middleware_path in reversed(settings.JOBS_MIDDLEWARE):
326
+ middleware_class = import_string(middleware_path)
327
+ middleware_instance = middleware_class(middleware_chain)
328
+ middleware_chain = middleware_instance.process_job
329
+
330
+ job_result = middleware_chain(job_process)
331
+
332
+ duration = job_result.ended_at - job_result.started_at # type: ignore[operator]
333
+ duration = duration.total_seconds()
334
+
335
+ logger.info(
336
+ 'Completed job worker_pid=%s job_class=%s job_process_uuid=%s job_request_uuid=%s job_result_uuid=%s job_priority=%s job_source="%s" job_queue="%s" job_duration=%s',
337
+ worker_pid,
338
+ job_result.job_class,
339
+ job_result.job_process_uuid,
340
+ job_result.job_request_uuid,
341
+ job_result.uuid,
342
+ job_result.priority,
343
+ job_result.source,
344
+ job_result.queue,
345
+ duration,
346
+ )
347
+ except Exception as e:
348
+ # Raising exceptions inside the worker process doesn't
349
+ # seem to be caught/shown anywhere as configured.
350
+ # So we at least log it out here.
351
+ # (A job should catch it's own user-code errors, so this is for library errors)
352
+ logger.exception(e)
353
+ finally:
354
+ request_finished.send(sender=None)
355
+ gc.collect()
@@ -0,0 +1,312 @@
1
+ Metadata-Version: 2.4
2
+ Name: plain.jobs
3
+ Version: 0.43.2
4
+ Summary: Process background jobs with a database-driven job queue.
5
+ Author-email: Dave Gaeddert <dave.gaeddert@dropseed.dev>
6
+ License-Expression: BSD-3-Clause
7
+ License-File: LICENSE
8
+ Requires-Python: >=3.13
9
+ Requires-Dist: plain-models<1.0.0
10
+ Requires-Dist: plain<1.0.0
11
+ Description-Content-Type: text/markdown
12
+
13
+ # plain.jobs
14
+
15
+ **Process background jobs with a database-driven job queue.**
16
+
17
+ - [Overview](#overview)
18
+ - [Local development](#local-development)
19
+ - [Job parameters](#job-parameters)
20
+ - [Job methods](#job-methods)
21
+ - [Scheduled jobs](#scheduled-jobs)
22
+ - [Admin interface](#admin-interface)
23
+ - [Job history](#job-history)
24
+ - [Monitoring](#monitoring)
25
+ - [FAQs](#faqs)
26
+ - [Installation](#installation)
27
+
28
+ ## Overview
29
+
30
+ Jobs are defined using the [`Job`](./jobs.py#Job) base class and the `run()` method at a minimum.
31
+
32
+ ```python
33
+ from plain.jobs import Job, register_job
34
+ from plain.email import send_mail
35
+
36
+
37
+ @register_job
38
+ class WelcomeUserJob(Job):
39
+ def __init__(self, user):
40
+ self.user = user
41
+
42
+ def run(self):
43
+ send_mail(
44
+ subject="Welcome!",
45
+ message=f"Hello from Plain, {self.user}",
46
+ from_email="welcome@plainframework.com",
47
+ recipient_list=[self.user.email],
48
+ )
49
+ ```
50
+
51
+ You can then create an instance of the job and call [`run_in_worker()`](./jobs.py#Job.run_in_worker) to enqueue it for a background worker to pick up.
52
+
53
+ ```python
54
+ user = User.query.get(id=1)
55
+ WelcomeUserJob(user).run_in_worker()
56
+ ```
57
+
58
+ Workers are run using the `plain jobs worker` command.
59
+
60
+ Jobs can be defined in any Python file, but it is suggested to use `app/jobs.py` or `app/{pkg}/jobs.py` as those will be imported automatically so the [`@register_job`](./registry.py#register_job) decorator will fire.
61
+
62
+ Run database migrations after installation:
63
+
64
+ ```bash
65
+ plain migrate
66
+ ```
67
+
68
+ ## Local development
69
+
70
+ In development, you will typically want to run the worker alongside your app with auto-reloading enabled. With [`plain.dev`](/plain-dev/plain/dev/README.md) you can do this by adding it to the `[tool.plain.dev.run]` section of your `pyproject.toml` file.
71
+
72
+ ```toml
73
+ # pyproject.toml
74
+ [tool.plain.dev.run]
75
+ worker = {cmd = "plain jobs worker --reload --stats-every 0 --max-processes 2"}
76
+ worker-slow = {cmd = "plain jobs worker --reload --queue slow --stats-every 0 --max-processes 2"}
77
+ ```
78
+
79
+ The `--reload` flag will automatically watch `.py` and `.env*` files for changes and restart the worker when changes are detected.
80
+
81
+ ## Job parameters
82
+
83
+ When calling `run_in_worker()`, you can specify several parameters to control job execution:
84
+
85
+ ```python
86
+ job.run_in_worker(
87
+ queue="slow", # Target a specific queue (default: "default")
88
+ delay=60, # Delay in seconds (or timedelta/datetime)
89
+ priority=10, # Higher numbers run first (default: 0, use negatives for lower priority)
90
+ retries=3, # Number of retry attempts (default: 0)
91
+ concurrency_key="user-123-welcome", # Identifier for grouping/deduplication
92
+ )
93
+ ```
94
+
95
+ For more advanced parameter options, see [`Job.run_in_worker()`](./jobs.py#Job.run_in_worker).
96
+
97
+ ## Job methods
98
+
99
+ The [`Job`](./jobs.py#Job) base class provides several methods you can override to customize behavior:
100
+
101
+ ```python
102
+ class MyJob(Job):
103
+ def run(self):
104
+ # Required: The main job logic
105
+ pass
106
+
107
+ # Defaults (can be overridden in run_in_worker)
108
+ def default_queue(self) -> str:
109
+ return "default"
110
+
111
+ def default_priority(self) -> int:
112
+ # Higher numbers run first: 10 > 5 > 0 > -5 > -10
113
+ return 0
114
+
115
+ def default_retries(self) -> int:
116
+ return 0
117
+
118
+ def default_concurrency_key(self) -> str:
119
+ # Identifier for grouping/deduplication
120
+ return ""
121
+
122
+ # Computed values
123
+ def calculate_retry_delay(self, attempt: int) -> int:
124
+ # Delay in seconds before retry (attempt starts at 1)
125
+ return 0
126
+
127
+ # Hooks
128
+ def should_enqueue(self, concurrency_key: str) -> bool:
129
+ # Called before enqueueing - return False to skip
130
+ # Use for concurrency limits, rate limits, etc.
131
+ return True
132
+ ```
133
+
134
+ ## Scheduled jobs
135
+
136
+ You can schedule jobs to run at specific times using the [`Schedule`](./scheduling.py#Schedule) class:
137
+
138
+ ```python
139
+ from plain.jobs import Job, register_job
140
+ from plain.jobs.scheduling import Schedule
141
+
142
+ @register_job
143
+ class DailyReportJob(Job):
144
+ schedule = Schedule.from_cron("0 9 * * *") # Every day at 9 AM
145
+
146
+ def run(self):
147
+ # Generate daily report
148
+ pass
149
+ ```
150
+
151
+ The `Schedule` class supports standard cron syntax and special strings:
152
+
153
+ - `@yearly` or `@annually` - Run once a year
154
+ - `@monthly` - Run once a month
155
+ - `@weekly` - Run once a week
156
+ - `@daily` or `@midnight` - Run once a day
157
+ - `@hourly` - Run once an hour
158
+
159
+ For custom schedules, see [`Schedule`](./scheduling.py#Schedule).
160
+
161
+ ## Admin interface
162
+
163
+ The jobs package includes admin views for monitoring jobs under the "Jobs" section. The admin interface provides:
164
+
165
+ - **Requests**: View pending jobs in the queue
166
+ - **Processes**: Monitor currently running jobs
167
+ - **Results**: Review completed and failed job history
168
+
169
+ Dashboard cards show at-a-glance statistics for successful, errored, lost, and retried jobs.
170
+
171
+ ## Job history
172
+
173
+ Job execution history is stored in the [`JobResult`](./models.py#JobResult) model. This includes:
174
+
175
+ - Job class and parameters
176
+ - Start and end times
177
+ - Success/failure status
178
+ - Error messages and tracebacks for failed jobs
179
+ - Worker information
180
+
181
+ History retention is controlled by the `JOBS_RESULTS_RETENTION` setting (defaults to 7 days):
182
+
183
+ ```python
184
+ # app/settings.py
185
+ JOBS_RESULTS_RETENTION = 60 * 60 * 24 * 30 # 30 days (in seconds)
186
+ ```
187
+
188
+ Job timeout can be configured with `JOBS_TIMEOUT` (defaults to 1 day):
189
+
190
+ ```python
191
+ # app/settings.py
192
+ JOBS_TIMEOUT = 60 * 60 * 24 # 1 day (in seconds)
193
+ ```
194
+
195
+ ## Monitoring
196
+
197
+ Workers report statistics and can be monitored using the `--stats-every` option:
198
+
199
+ ```bash
200
+ # Report stats every 60 seconds
201
+ plain jobs worker --stats-every 60
202
+ ```
203
+
204
+ The worker integrates with OpenTelemetry for distributed tracing. Spans are created for:
205
+
206
+ - Job scheduling (`run_in_worker`)
207
+ - Job execution
208
+ - Job completion/failure
209
+
210
+ Jobs can be linked to the originating trace context, allowing you to track jobs initiated from web requests.
211
+
212
+ ## FAQs
213
+
214
+ #### How do I ensure only one job runs at a time?
215
+
216
+ Set a `concurrency_key` to automatically enforce uniqueness - only one job with the same key can be pending or processing:
217
+
218
+ ```python
219
+ from plain.jobs import Job, register_job
220
+
221
+ @register_job
222
+ class ProcessUserJob(Job):
223
+ def __init__(self, user_id):
224
+ self.user_id = user_id
225
+
226
+ def default_concurrency_key(self):
227
+ return f"user-{self.user_id}"
228
+
229
+ def run(self):
230
+ process_user(self.user_id)
231
+
232
+ # Usage
233
+ ProcessUserJob(123).run_in_worker() # Enqueued
234
+ ProcessUserJob(123).run_in_worker() # Returns None (blocked - job already pending/processing)
235
+ ```
236
+
237
+ Alternatively, pass `concurrency_key` as a parameter to `run_in_worker()` instead of overriding the method.
238
+
239
+ #### How do I implement custom concurrency limits?
240
+
241
+ Use the `should_enqueue()` hook to implement custom concurrency control:
242
+
243
+ ```python
244
+ class ProcessUserDataJob(Job):
245
+ def __init__(self, user_id):
246
+ self.user_id = user_id
247
+
248
+ def default_concurrency_key(self):
249
+ return f"user-{self.user_id}"
250
+
251
+ def should_enqueue(self, concurrency_key):
252
+ # Only allow 1 job per user at a time
253
+ processing = self.get_processing_jobs(concurrency_key).count()
254
+ pending = self.get_requested_jobs(concurrency_key).count()
255
+ return processing == 0 and pending == 0
256
+ ```
257
+
258
+ For more patterns like rate limiting and global limits, see [`should_enqueue()`](./jobs.py#should_enqueue) in the source code.
259
+
260
+ #### How are race conditions prevented?
261
+
262
+ On **PostgreSQL**, plain-jobs uses [advisory locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) to ensure `should_enqueue()` checks are atomic with job creation. The lock is acquired during the transaction and automatically released when the transaction completes. This eliminates race conditions where multiple threads might simultaneously pass the `should_enqueue()` check.
263
+
264
+ On **SQLite and MySQL**, advisory locks are not available, so a small race condition window exists between checking and creating jobs. For production deployments requiring strict concurrency guarantees, **we recommend PostgreSQL**.
265
+
266
+ For custom locking behavior (Redis, etc.), override [`get_enqueue_lock()`](./locks.py#get_enqueue_lock).
267
+
268
+ #### Can I run multiple workers?
269
+
270
+ Yes, you can run multiple worker processes:
271
+
272
+ ```bash
273
+ plain jobs worker --max-processes 4
274
+ ```
275
+
276
+ Or run workers for specific queues:
277
+
278
+ ```bash
279
+ plain jobs worker --queue slow --max-processes 2
280
+ ```
281
+
282
+ #### How do I handle job failures?
283
+
284
+ Set the number of retries and implement retry delays:
285
+
286
+ ```python
287
+ class MyJob(Job):
288
+ def default_retries(self):
289
+ return 3
290
+
291
+ def calculate_retry_delay(self, attempt):
292
+ # Exponential backoff: 1s, 2s, 4s
293
+ return 2 ** (attempt - 1)
294
+ ```
295
+
296
+ ## Installation
297
+
298
+ Install the `plain.jobs` package from [PyPI](https://pypi.org/project/plain.jobs/):
299
+
300
+ ```bash
301
+ uv add plain.jobs
302
+ ```
303
+
304
+ Add to your `INSTALLED_PACKAGES`:
305
+
306
+ ```python
307
+ # app/settings.py
308
+ INSTALLED_PACKAGES = [
309
+ ...
310
+ "plain.jobs",
311
+ ]
312
+ ```
@@ -0,0 +1,30 @@
1
+ plain/jobs/CHANGELOG.md,sha256=AZRc2EcCW8WpxefXr-WXT2it54-CJ9GSTH05Nib1WLU,22183
2
+ plain/jobs/README.md,sha256=rZLhVm-g4qIBUlQ82eD-6xDWyFGDGlYwXFS0InLTPdE,8917
3
+ plain/jobs/__init__.py,sha256=beHp1U4mfzaCD7H2ZFb4OIQLTZOyWIt3xOLkz1Acd4c,218
4
+ plain/jobs/admin.py,sha256=BQ_f611hGhlNZYQdPRCS1oi8gfmIZQ-kvUSsxNZT4xI,7169
5
+ plain/jobs/chores.py,sha256=oyVU-BfcJxMM3eK2_umn38N2mBsNpcDrZfpeEQju_DA,528
6
+ plain/jobs/cli.py,sha256=ufb5n1ax-IxpmJFZVen-E7WBBEEr_3JFknNzFQd4STI,5882
7
+ plain/jobs/config.py,sha256=PQsl-LxWsWLnjC98f0mvtdcCOuXvXKDMjrCRf1fq44Y,550
8
+ plain/jobs/default_settings.py,sha256=r_95ucg_KY1XW1jarZy8VO3p-ylbllKMUrHzOPJiX6U,227
9
+ plain/jobs/exceptions.py,sha256=KmFyw_VtA6rC2ZaTkYWsxgOyForRSjqe3xNUn2Wk-Sg,1193
10
+ plain/jobs/jobs.py,sha256=iEwFx3_LBO0y1r22vPjrpzsC7c2Oysfc_yy8x1M3pBc,12823
11
+ plain/jobs/locks.py,sha256=bGkYwCAmRYB1Cs7Ov6Fxj_fd5BBsuvJFbuPuD3bJQLM,1409
12
+ plain/jobs/middleware.py,sha256=iQbVPnQz91OpkfNFsDh6-G0RLWW7KxYssUKdk7JEdNE,1228
13
+ plain/jobs/models.py,sha256=IFDmdBzqMjRU8XneFwRuCZx8bdIJ4uTmG2HFl1MmY3g,21086
14
+ plain/jobs/parameters.py,sha256=t9PwEZgwNCJx3YobsT-jfaVZdfMBS54XJcBrT9Wnsg0,6313
15
+ plain/jobs/registry.py,sha256=Rwn5Htll10e549vD2Mu0oyoDynyHhE0bGYZ2bq9uzPU,1679
16
+ plain/jobs/scheduling.py,sha256=b7IRPn_LmTPrINDt7wucv9AqCgcByCAfgeYxISUsHxg,7845
17
+ plain/jobs/workers.py,sha256=SNbKOyR1vdmaUCpe9Uq7bRGH8AHIGFqnL-LMq6gmClQ,13405
18
+ plain/jobs/migrations/0001_initial.py,sha256=EIgIEMVyTsStyx9dmKM8Jb_hwn694Yo31-74DZkNTqo,9452
19
+ plain/jobs/migrations/0002_job_span_id_job_trace_id_jobrequest_span_id_and_more.py,sha256=ph5BwwOAwdfjdNh9RItYmX_IA29lO-Dd9GymYzvChXQ,1953
20
+ plain/jobs/migrations/0003_rename_job_jobprocess_and_more.py,sha256=EdLucHxiH_QshLL2peIcMULRCQyFMPxh476AxCxW5Wk,2615
21
+ plain/jobs/migrations/0004_rename_tables_to_plainjobs.py,sha256=huq-BVFccWFdVYsgyzZDEqAQK3pEIZkaBkm3E5nvhws,1055
22
+ plain/jobs/migrations/0005_rename_constraints_and_indexes.py,sha256=PDGpOw6__tVfn-0BAFv_5OwWt6eBo2QF2kxeTZ92JKg,6408
23
+ plain/jobs/migrations/0006_alter_jobprocess_table_alter_jobrequest_table_and_more.py,sha256=FY0_pcw0mL8MkUSatpDXWtA_xQw0kTZBGIyjLcmYeJE,546
24
+ plain/jobs/migrations/0007_remove_jobrequest_plainjobs_jobrequest_unique_job_class_key_and_more.py,sha256=A5yPC3Gh2-wfE4-6YA_KlB6FKe-PghpKC6KbgKsO1c4,4685
25
+ plain/jobs/migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ plain/jobs/templates/admin/plainqueue/jobresult_detail.html,sha256=Ybp1s_dARo_bFDcLEzEfETheP8SzqHHE_NNSKhv_eh8,198
27
+ plain_jobs-0.43.2.dist-info/METADATA,sha256=c8enzKAexx8RF_ecKi9uL8zct1F6VUdt0tD43E8FL3c,9277
28
+ plain_jobs-0.43.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
29
+ plain_jobs-0.43.2.dist-info/licenses/LICENSE,sha256=cvKM3OlqHx3ijD6e34zsSUkPvzl-ya3Dd63A6EHL94U,1500
30
+ plain_jobs-0.43.2.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any