plain.jobs 0.43.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
plain/jobs/README.md ADDED
@@ -0,0 +1,300 @@
1
+ # plain.jobs
2
+
3
+ **Process background jobs with a database-driven job queue.**
4
+
5
+ - [Overview](#overview)
6
+ - [Local development](#local-development)
7
+ - [Job parameters](#job-parameters)
8
+ - [Job methods](#job-methods)
9
+ - [Scheduled jobs](#scheduled-jobs)
10
+ - [Admin interface](#admin-interface)
11
+ - [Job history](#job-history)
12
+ - [Monitoring](#monitoring)
13
+ - [FAQs](#faqs)
14
+ - [Installation](#installation)
15
+
16
+ ## Overview
17
+
18
+ Jobs are defined using the [`Job`](./jobs.py#Job) base class and the `run()` method at a minimum.
19
+
20
+ ```python
21
+ from plain.jobs import Job, register_job
22
+ from plain.email import send_mail
23
+
24
+
25
+ @register_job
26
+ class WelcomeUserJob(Job):
27
+ def __init__(self, user):
28
+ self.user = user
29
+
30
+ def run(self):
31
+ send_mail(
32
+ subject="Welcome!",
33
+ message=f"Hello from Plain, {self.user}",
34
+ from_email="welcome@plainframework.com",
35
+ recipient_list=[self.user.email],
36
+ )
37
+ ```
38
+
39
+ You can then create an instance of the job and call [`run_in_worker()`](./jobs.py#Job.run_in_worker) to enqueue it for a background worker to pick up.
40
+
41
+ ```python
42
+ user = User.query.get(id=1)
43
+ WelcomeUserJob(user).run_in_worker()
44
+ ```
45
+
46
+ Workers are run using the `plain jobs worker` command.
47
+
48
+ Jobs can be defined in any Python file, but it is suggested to use `app/jobs.py` or `app/{pkg}/jobs.py` as those will be imported automatically so the [`@register_job`](./registry.py#register_job) decorator will fire.
49
+
50
+ Run database migrations after installation:
51
+
52
+ ```bash
53
+ plain migrate
54
+ ```
55
+
56
+ ## Local development
57
+
58
+ In development, you will typically want to run the worker alongside your app with auto-reloading enabled. With [`plain.dev`](/plain-dev/plain/dev/README.md) you can do this by adding it to the `[tool.plain.dev.run]` section of your `pyproject.toml` file.
59
+
60
+ ```toml
61
+ # pyproject.toml
62
+ [tool.plain.dev.run]
63
+ worker = {cmd = "plain jobs worker --reload --stats-every 0 --max-processes 2"}
64
+ worker-slow = {cmd = "plain jobs worker --reload --queue slow --stats-every 0 --max-processes 2"}
65
+ ```
66
+
67
+ The `--reload` flag will automatically watch `.py` and `.env*` files for changes and restart the worker when changes are detected.
68
+
69
+ ## Job parameters
70
+
71
+ When calling `run_in_worker()`, you can specify several parameters to control job execution:
72
+
73
+ ```python
74
+ job.run_in_worker(
75
+ queue="slow", # Target a specific queue (default: "default")
76
+ delay=60, # Delay in seconds (or timedelta/datetime)
77
+ priority=10, # Higher numbers run first (default: 0, use negatives for lower priority)
78
+ retries=3, # Number of retry attempts (default: 0)
79
+ concurrency_key="user-123-welcome", # Identifier for grouping/deduplication
80
+ )
81
+ ```
82
+
83
+ For more advanced parameter options, see [`Job.run_in_worker()`](./jobs.py#Job.run_in_worker).
84
+
85
+ ## Job methods
86
+
87
+ The [`Job`](./jobs.py#Job) base class provides several methods you can override to customize behavior:
88
+
89
+ ```python
90
+ class MyJob(Job):
91
+ def run(self):
92
+ # Required: The main job logic
93
+ pass
94
+
95
+ # Defaults (can be overridden in run_in_worker)
96
+ def default_queue(self) -> str:
97
+ return "default"
98
+
99
+ def default_priority(self) -> int:
100
+ # Higher numbers run first: 10 > 5 > 0 > -5 > -10
101
+ return 0
102
+
103
+ def default_retries(self) -> int:
104
+ return 0
105
+
106
+ def default_concurrency_key(self) -> str:
107
+ # Identifier for grouping/deduplication
108
+ return ""
109
+
110
+ # Computed values
111
+ def calculate_retry_delay(self, attempt: int) -> int:
112
+ # Delay in seconds before retry (attempt starts at 1)
113
+ return 0
114
+
115
+ # Hooks
116
+ def should_enqueue(self, concurrency_key: str) -> bool:
117
+ # Called before enqueueing - return False to skip
118
+ # Use for concurrency limits, rate limits, etc.
119
+ return True
120
+ ```
121
+
122
+ ## Scheduled jobs
123
+
124
+ You can schedule jobs to run at specific times using the [`Schedule`](./scheduling.py#Schedule) class:
125
+
126
+ ```python
127
+ from plain.jobs import Job, register_job
128
+ from plain.jobs.scheduling import Schedule
129
+
130
+ @register_job
131
+ class DailyReportJob(Job):
132
+ schedule = Schedule.from_cron("0 9 * * *") # Every day at 9 AM
133
+
134
+ def run(self):
135
+ # Generate daily report
136
+ pass
137
+ ```
138
+
139
+ The `Schedule` class supports standard cron syntax and special strings:
140
+
141
+ - `@yearly` or `@annually` - Run once a year
142
+ - `@monthly` - Run once a month
143
+ - `@weekly` - Run once a week
144
+ - `@daily` or `@midnight` - Run once a day
145
+ - `@hourly` - Run once an hour
146
+
147
+ For custom schedules, see [`Schedule`](./scheduling.py#Schedule).
148
+
149
+ ## Admin interface
150
+
151
+ The jobs package includes admin views for monitoring jobs under the "Jobs" section. The admin interface provides:
152
+
153
+ - **Requests**: View pending jobs in the queue
154
+ - **Processes**: Monitor currently running jobs
155
+ - **Results**: Review completed and failed job history
156
+
157
+ Dashboard cards show at-a-glance statistics for successful, errored, lost, and retried jobs.
158
+
159
+ ## Job history
160
+
161
+ Job execution history is stored in the [`JobResult`](./models.py#JobResult) model. This includes:
162
+
163
+ - Job class and parameters
164
+ - Start and end times
165
+ - Success/failure status
166
+ - Error messages and tracebacks for failed jobs
167
+ - Worker information
168
+
169
+ History retention is controlled by the `JOBS_RESULTS_RETENTION` setting (defaults to 7 days):
170
+
171
+ ```python
172
+ # app/settings.py
173
+ JOBS_RESULTS_RETENTION = 60 * 60 * 24 * 30 # 30 days (in seconds)
174
+ ```
175
+
176
+ Job timeout can be configured with `JOBS_TIMEOUT` (defaults to 1 day):
177
+
178
+ ```python
179
+ # app/settings.py
180
+ JOBS_TIMEOUT = 60 * 60 * 24 # 1 day (in seconds)
181
+ ```
182
+
183
+ ## Monitoring
184
+
185
+ Workers report statistics and can be monitored using the `--stats-every` option:
186
+
187
+ ```bash
188
+ # Report stats every 60 seconds
189
+ plain jobs worker --stats-every 60
190
+ ```
191
+
192
+ The worker integrates with OpenTelemetry for distributed tracing. Spans are created for:
193
+
194
+ - Job scheduling (`run_in_worker`)
195
+ - Job execution
196
+ - Job completion/failure
197
+
198
+ Jobs can be linked to the originating trace context, allowing you to track jobs initiated from web requests.
199
+
200
+ ## FAQs
201
+
202
+ #### How do I ensure only one job runs at a time?
203
+
204
+ Set a `concurrency_key` to automatically enforce uniqueness - only one job with the same key can be pending or processing:
205
+
206
+ ```python
207
+ from plain.jobs import Job, register_job
208
+
209
+ @register_job
210
+ class ProcessUserJob(Job):
211
+ def __init__(self, user_id):
212
+ self.user_id = user_id
213
+
214
+ def default_concurrency_key(self):
215
+ return f"user-{self.user_id}"
216
+
217
+ def run(self):
218
+ process_user(self.user_id)
219
+
220
+ # Usage
221
+ ProcessUserJob(123).run_in_worker() # Enqueued
222
+ ProcessUserJob(123).run_in_worker() # Returns None (blocked - job already pending/processing)
223
+ ```
224
+
225
+ Alternatively, pass `concurrency_key` as a parameter to `run_in_worker()` instead of overriding the method.
226
+
227
+ #### How do I implement custom concurrency limits?
228
+
229
+ Use the `should_enqueue()` hook to implement custom concurrency control:
230
+
231
+ ```python
232
+ class ProcessUserDataJob(Job):
233
+ def __init__(self, user_id):
234
+ self.user_id = user_id
235
+
236
+ def default_concurrency_key(self):
237
+ return f"user-{self.user_id}"
238
+
239
+ def should_enqueue(self, concurrency_key):
240
+ # Only allow 1 job per user at a time
241
+ processing = self.get_processing_jobs(concurrency_key).count()
242
+ pending = self.get_requested_jobs(concurrency_key).count()
243
+ return processing == 0 and pending == 0
244
+ ```
245
+
246
+ For more patterns like rate limiting and global limits, see [`should_enqueue()`](./jobs.py#should_enqueue) in the source code.
247
+
248
+ #### How are race conditions prevented?
249
+
250
+ On **PostgreSQL**, plain-jobs uses [advisory locks](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) to ensure `should_enqueue()` checks are atomic with job creation. The lock is acquired during the transaction and automatically released when the transaction completes. This eliminates race conditions where multiple threads might simultaneously pass the `should_enqueue()` check.
251
+
252
+ On **SQLite and MySQL**, advisory locks are not available, so a small race condition window exists between checking and creating jobs. For production deployments requiring strict concurrency guarantees, **we recommend PostgreSQL**.
253
+
254
+ For custom locking behavior (Redis, etc.), override [`get_enqueue_lock()`](./locks.py#get_enqueue_lock).
255
+
256
+ #### Can I run multiple workers?
257
+
258
+ Yes, you can run multiple worker processes:
259
+
260
+ ```bash
261
+ plain jobs worker --max-processes 4
262
+ ```
263
+
264
+ Or run workers for specific queues:
265
+
266
+ ```bash
267
+ plain jobs worker --queue slow --max-processes 2
268
+ ```
269
+
270
+ #### How do I handle job failures?
271
+
272
+ Set the number of retries and implement retry delays:
273
+
274
+ ```python
275
+ class MyJob(Job):
276
+ def default_retries(self):
277
+ return 3
278
+
279
+ def calculate_retry_delay(self, attempt):
280
+ # Exponential backoff: 1s, 2s, 4s
281
+ return 2 ** (attempt - 1)
282
+ ```
283
+
284
+ ## Installation
285
+
286
+ Install the `plain.jobs` package from [PyPI](https://pypi.org/project/plain.jobs/):
287
+
288
+ ```bash
289
+ uv add plain.jobs
290
+ ```
291
+
292
+ Add to your `INSTALLED_PACKAGES`:
293
+
294
+ ```python
295
+ # app/settings.py
296
+ INSTALLED_PACKAGES = [
297
+ ...
298
+ "plain.jobs",
299
+ ]
300
+ ```
plain/jobs/__init__.py ADDED
@@ -0,0 +1,6 @@
1
+ from .exceptions import DeferError, DeferJob
2
+ from .jobs import Job
3
+ from .middleware import JobMiddleware
4
+ from .registry import register_job
5
+
6
+ __all__ = ["Job", "DeferJob", "DeferError", "JobMiddleware", "register_job"]
plain/jobs/admin.py ADDED
@@ -0,0 +1,249 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import timedelta
4
+
5
+ from plain import models
6
+ from plain.admin.cards import Card
7
+ from plain.admin.views import (
8
+ AdminModelDetailView,
9
+ AdminModelListView,
10
+ AdminViewset,
11
+ register_viewset,
12
+ )
13
+ from plain.http import RedirectResponse
14
+ from plain.models.expressions import Case, When
15
+ from plain.runtime import settings
16
+
17
+ from .models import JobProcess, JobRequest, JobResult, JobResultQuerySet
18
+
19
+
20
+ def _td_format(td_object: timedelta) -> str:
21
+ seconds = int(td_object.total_seconds())
22
+ periods = [
23
+ ("year", 60 * 60 * 24 * 365),
24
+ ("month", 60 * 60 * 24 * 30),
25
+ ("day", 60 * 60 * 24),
26
+ ("hour", 60 * 60),
27
+ ("minute", 60),
28
+ ("second", 1),
29
+ ]
30
+
31
+ strings = []
32
+ for period_name, period_seconds in periods:
33
+ if seconds > period_seconds:
34
+ period_value, seconds = divmod(seconds, period_seconds)
35
+ has_s = "s" if period_value > 1 else ""
36
+ strings.append(f"{period_value} {period_name}{has_s}")
37
+
38
+ return ", ".join(strings)
39
+
40
+
41
+ class SuccessfulJobsCard(Card):
42
+ title = "Successful"
43
+ text = "View"
44
+
45
+ def get_metric(self) -> int:
46
+ return JobResult.query.successful().count()
47
+
48
+ def get_link(self) -> str:
49
+ return JobResultViewset.ListView.get_view_url() + "?display=Successful"
50
+
51
+
52
+ class ErroredJobsCard(Card):
53
+ title = "Errored"
54
+ text = "View"
55
+
56
+ def get_metric(self) -> int:
57
+ return JobResult.query.errored().count()
58
+
59
+ def get_link(self) -> str:
60
+ return JobResultViewset.ListView.get_view_url() + "?display=Errored"
61
+
62
+
63
+ class LostJobsCard(Card):
64
+ title = "Lost"
65
+ text = "View" # TODO make not required - just an icon?
66
+
67
+ def get_description(self) -> str:
68
+ delta = timedelta(seconds=settings.JOBS_TIMEOUT)
69
+ return f"Jobs are considered lost after {_td_format(delta)}"
70
+
71
+ def get_metric(self) -> int:
72
+ return JobResult.query.lost().count()
73
+
74
+ def get_link(self) -> str:
75
+ return JobResultViewset.ListView.get_view_url() + "?display=Lost"
76
+
77
+
78
+ class RetriedJobsCard(Card):
79
+ title = "Retried"
80
+ text = "View" # TODO make not required - just an icon?
81
+
82
+ def get_metric(self) -> int:
83
+ return JobResult.query.retried().count()
84
+
85
+ def get_link(self) -> str:
86
+ return JobResultViewset.ListView.get_view_url() + "?display=Retried"
87
+
88
+
89
+ class WaitingJobsCard(Card):
90
+ title = "Waiting"
91
+
92
+ def get_metric(self) -> int:
93
+ return JobProcess.query.waiting().count()
94
+
95
+
96
+ class RunningJobsCard(Card):
97
+ title = "Running"
98
+
99
+ def get_metric(self) -> int:
100
+ return JobProcess.query.running().count()
101
+
102
+
103
+ @register_viewset
104
+ class JobRequestViewset(AdminViewset):
105
+ class ListView(AdminModelListView):
106
+ nav_section = "Jobs"
107
+ nav_icon = "inbox"
108
+ model = JobRequest
109
+ title = "Requests"
110
+ description = "Jobs waiting to be picked up by a worker."
111
+ fields = [
112
+ "id",
113
+ "job_class",
114
+ "priority",
115
+ "created_at",
116
+ "start_at",
117
+ "concurrency_key",
118
+ ]
119
+ actions = ["Delete"]
120
+ queryset_order = ["priority", "-start_at", "-created_at"]
121
+
122
+ def perform_action(self, action: str, target_ids: list[int]) -> None:
123
+ if action == "Delete":
124
+ JobRequest.query.filter(id__in=target_ids).delete()
125
+
126
+ class DetailView(AdminModelDetailView):
127
+ model = JobRequest
128
+ title = "Request"
129
+
130
+
131
+ @register_viewset
132
+ class JobProcessViewset(AdminViewset):
133
+ class ListView(AdminModelListView):
134
+ nav_section = "Jobs"
135
+ nav_icon = "gear"
136
+ model = JobProcess
137
+ title = "Processes"
138
+ description = "Jobs currently being processed by a worker."
139
+ fields = [
140
+ "id",
141
+ "job_class",
142
+ "priority",
143
+ "created_at",
144
+ "started_at",
145
+ "concurrency_key",
146
+ ]
147
+ actions = ["Delete"]
148
+ cards = [
149
+ WaitingJobsCard,
150
+ RunningJobsCard,
151
+ ]
152
+
153
+ def perform_action(self, action: str, target_ids: list[int]) -> None:
154
+ if action == "Delete":
155
+ JobProcess.query.filter(id__in=target_ids).delete()
156
+
157
+ class DetailView(AdminModelDetailView):
158
+ model = JobProcess
159
+ title = "Process"
160
+
161
+
162
+ @register_viewset
163
+ class JobResultViewset(AdminViewset):
164
+ class ListView(AdminModelListView):
165
+ nav_section = "Jobs"
166
+ nav_icon = "clipboard-check"
167
+ model = JobResult
168
+ title = "Results"
169
+ description = "Completed jobs with their success/failure status."
170
+ fields = [
171
+ "id",
172
+ "job_class",
173
+ "priority",
174
+ "created_at",
175
+ "status",
176
+ "retried",
177
+ "is_retry",
178
+ ]
179
+ search_fields = [
180
+ "uuid",
181
+ "job_process_uuid",
182
+ "job_request_uuid",
183
+ "job_class",
184
+ ]
185
+ cards = [
186
+ SuccessfulJobsCard,
187
+ ErroredJobsCard,
188
+ LostJobsCard,
189
+ RetriedJobsCard,
190
+ ]
191
+ filters = [
192
+ "Successful",
193
+ "Errored",
194
+ "Cancelled",
195
+ "Lost",
196
+ "Retried",
197
+ ]
198
+ actions = [
199
+ "Retry",
200
+ ]
201
+ allow_global_search = False
202
+
203
+ def get_initial_queryset(self) -> JobResultQuerySet:
204
+ queryset: JobResultQuerySet = super().get_initial_queryset() # type: ignore[assignment]
205
+ queryset = queryset.annotate(
206
+ retried=Case(
207
+ When(retry_job_request_uuid__isnull=False, then=True),
208
+ default=False,
209
+ output_field=models.BooleanField(),
210
+ ),
211
+ is_retry=Case(
212
+ When(retry_attempt__gt=0, then=True),
213
+ default=False,
214
+ output_field=models.BooleanField(),
215
+ ),
216
+ )
217
+ if self.preset == "Successful":
218
+ return queryset.successful()
219
+ if self.preset == "Errored":
220
+ return queryset.errored()
221
+ if self.preset == "Cancelled":
222
+ return queryset.cancelled()
223
+ if self.preset == "Lost":
224
+ return queryset.lost()
225
+ if self.preset == "Retried":
226
+ return queryset.retried()
227
+ return queryset
228
+
229
+ def get_fields(self) -> list[str]:
230
+ fields = super().get_fields()
231
+ if self.preset == "Retried":
232
+ fields.append("retries")
233
+ fields.append("retry_attempt")
234
+ return fields
235
+
236
+ def perform_action(self, action: str, target_ids: list[int]) -> None:
237
+ if action == "Retry":
238
+ for result in JobResult.query.filter(id__in=target_ids):
239
+ result.retry_job(delay=0)
240
+ else:
241
+ raise ValueError("Invalid action")
242
+
243
+ class DetailView(AdminModelDetailView):
244
+ model = JobResult
245
+ title = "Result"
246
+
247
+ def post(self) -> RedirectResponse:
248
+ self.object.retry_job(delay=0)
249
+ return RedirectResponse(".")
plain/jobs/chores.py ADDED
@@ -0,0 +1,19 @@
1
+ import datetime
2
+
3
+ from plain.chores import Chore, register_chore
4
+ from plain.runtime import settings
5
+ from plain.utils import timezone
6
+
7
+ from .models import JobResult
8
+
9
+
10
+ @register_chore
11
+ class ClearCompleted(Chore):
12
+ """Delete all completed job results in all queues."""
13
+
14
+ def run(self) -> str:
15
+ cutoff = timezone.now() - datetime.timedelta(
16
+ seconds=settings.JOBS_RESULTS_RETENTION
17
+ )
18
+ results = JobResult.query.filter(created_at__lt=cutoff).delete()
19
+ return f"{results[0]} jobs deleted"