plain.jobs 0.33.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of plain.jobs might be problematic. Click here for more details.
- plain/jobs/CHANGELOG.md +186 -0
- plain/jobs/README.md +253 -0
- plain/jobs/__init__.py +4 -0
- plain/jobs/admin.py +238 -0
- plain/jobs/chores.py +17 -0
- plain/jobs/cli.py +153 -0
- plain/jobs/config.py +19 -0
- plain/jobs/default_settings.py +6 -0
- plain/jobs/jobs.py +226 -0
- plain/jobs/middleware.py +20 -0
- plain/jobs/migrations/0001_initial.py +246 -0
- plain/jobs/migrations/0002_job_span_id_job_trace_id_jobrequest_span_id_and_more.py +61 -0
- plain/jobs/migrations/0003_rename_job_jobprocess_and_more.py +80 -0
- plain/jobs/migrations/0004_rename_tables_to_plainjobs.py +33 -0
- plain/jobs/migrations/0005_rename_constraints_and_indexes.py +174 -0
- plain/jobs/migrations/0006_alter_jobprocess_table_alter_jobrequest_table_and_more.py +24 -0
- plain/jobs/migrations/__init__.py +0 -0
- plain/jobs/models.py +438 -0
- plain/jobs/parameters.py +193 -0
- plain/jobs/registry.py +60 -0
- plain/jobs/scheduling.py +251 -0
- plain/jobs/templates/admin/plainqueue/jobresult_detail.html +8 -0
- plain/jobs/workers.py +322 -0
- plain_jobs-0.33.0.dist-info/METADATA +264 -0
- plain_jobs-0.33.0.dist-info/RECORD +27 -0
- plain_jobs-0.33.0.dist-info/WHEEL +4 -0
- plain_jobs-0.33.0.dist-info/licenses/LICENSE +28 -0
plain/jobs/cli.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import datetime
|
|
4
|
+
import logging
|
|
5
|
+
import signal
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
import click
|
|
9
|
+
|
|
10
|
+
from plain.cli import register_cli
|
|
11
|
+
from plain.runtime import settings
|
|
12
|
+
from plain.utils import timezone
|
|
13
|
+
|
|
14
|
+
from .models import JobProcess, JobRequest, JobResult
|
|
15
|
+
from .registry import jobs_registry
|
|
16
|
+
from .scheduling import load_schedule
|
|
17
|
+
from .workers import Worker
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger("plain.jobs")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@register_cli("jobs")
|
|
23
|
+
@click.group()
|
|
24
|
+
def cli() -> None:
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@cli.command()
|
|
29
|
+
@click.option(
|
|
30
|
+
"queues",
|
|
31
|
+
"--queue",
|
|
32
|
+
default=["default"],
|
|
33
|
+
multiple=True,
|
|
34
|
+
type=str,
|
|
35
|
+
help="Queue to process",
|
|
36
|
+
)
|
|
37
|
+
@click.option(
|
|
38
|
+
"--max-processes",
|
|
39
|
+
"max_processes",
|
|
40
|
+
default=None,
|
|
41
|
+
type=int,
|
|
42
|
+
envvar="PLAIN_JOBS_WORKER_MAX_PROCESSES",
|
|
43
|
+
)
|
|
44
|
+
@click.option(
|
|
45
|
+
"--max-jobs-per-process",
|
|
46
|
+
"max_jobs_per_process",
|
|
47
|
+
default=None,
|
|
48
|
+
type=int,
|
|
49
|
+
envvar="PLAIN_JOBS_WORKER_MAX_JOBS_PER_PROCESS",
|
|
50
|
+
)
|
|
51
|
+
@click.option(
|
|
52
|
+
"--max-pending-per-process",
|
|
53
|
+
"max_pending_per_process",
|
|
54
|
+
default=10,
|
|
55
|
+
type=int,
|
|
56
|
+
envvar="PLAIN_JOBS_WORKER_MAX_PENDING_PER_PROCESS",
|
|
57
|
+
)
|
|
58
|
+
@click.option(
|
|
59
|
+
"--stats-every",
|
|
60
|
+
"stats_every",
|
|
61
|
+
default=60,
|
|
62
|
+
type=int,
|
|
63
|
+
envvar="PLAIN_JOBS_WORKER_STATS_EVERY",
|
|
64
|
+
)
|
|
65
|
+
def worker(
|
|
66
|
+
queues: tuple[str, ...],
|
|
67
|
+
max_processes: int | None,
|
|
68
|
+
max_jobs_per_process: int | None,
|
|
69
|
+
max_pending_per_process: int,
|
|
70
|
+
stats_every: int,
|
|
71
|
+
) -> None:
|
|
72
|
+
"""Run the job worker."""
|
|
73
|
+
jobs_schedule = load_schedule(settings.JOBS_SCHEDULE)
|
|
74
|
+
|
|
75
|
+
worker = Worker(
|
|
76
|
+
queues=list(queues),
|
|
77
|
+
jobs_schedule=jobs_schedule,
|
|
78
|
+
max_processes=max_processes,
|
|
79
|
+
max_jobs_per_process=max_jobs_per_process,
|
|
80
|
+
max_pending_per_process=max_pending_per_process,
|
|
81
|
+
stats_every=stats_every,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def _shutdown(signalnum: int, _: Any) -> None:
|
|
85
|
+
logger.info("Job worker shutdown signal received signalnum=%s", signalnum)
|
|
86
|
+
worker.shutdown()
|
|
87
|
+
|
|
88
|
+
# Allow the worker to be stopped gracefully on SIGTERM
|
|
89
|
+
signal.signal(signal.SIGTERM, _shutdown)
|
|
90
|
+
signal.signal(signal.SIGINT, _shutdown)
|
|
91
|
+
|
|
92
|
+
# Start processing jobs
|
|
93
|
+
worker.run()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@cli.command()
|
|
97
|
+
def clear() -> None:
|
|
98
|
+
"""Clear all completed job results in all queues."""
|
|
99
|
+
cutoff = timezone.now() - datetime.timedelta(
|
|
100
|
+
seconds=settings.JOBS_RESULTS_RETENTION
|
|
101
|
+
)
|
|
102
|
+
click.echo(f"Clearing job results created before {cutoff}")
|
|
103
|
+
results = JobResult.query.filter(created_at__lt=cutoff).delete()
|
|
104
|
+
click.echo(f"Deleted {results[0]} jobs")
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@cli.command()
|
|
108
|
+
def stats() -> None:
|
|
109
|
+
"""Stats across all queues."""
|
|
110
|
+
pending = JobRequest.query.count()
|
|
111
|
+
processing = JobProcess.query.count()
|
|
112
|
+
|
|
113
|
+
successful = JobResult.query.successful().count()
|
|
114
|
+
errored = JobResult.query.errored().count()
|
|
115
|
+
lost = JobResult.query.lost().count()
|
|
116
|
+
|
|
117
|
+
click.secho(f"Pending: {pending}", bold=True)
|
|
118
|
+
click.secho(f"Processing: {processing}", bold=True)
|
|
119
|
+
click.secho(f"Successful: {successful}", bold=True, fg="green")
|
|
120
|
+
click.secho(f"Errored: {errored}", bold=True, fg="red")
|
|
121
|
+
click.secho(f"Lost: {lost}", bold=True, fg="yellow")
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
@cli.command()
|
|
125
|
+
def purge() -> None:
|
|
126
|
+
"""Delete all running and pending jobs regardless of queue."""
|
|
127
|
+
if not click.confirm(
|
|
128
|
+
"Are you sure you want to clear all running and pending jobs? This will delete all current Jobs and JobRequests"
|
|
129
|
+
):
|
|
130
|
+
return
|
|
131
|
+
|
|
132
|
+
deleted = JobRequest.query.all().delete()[0]
|
|
133
|
+
click.echo(f"Deleted {deleted} job requests")
|
|
134
|
+
|
|
135
|
+
deleted = JobProcess.query.all().delete()[0]
|
|
136
|
+
click.echo(f"Deleted {deleted} jobs")
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
@cli.command()
|
|
140
|
+
@click.argument("job_class_name", type=str)
|
|
141
|
+
def run(job_class_name: str) -> None:
|
|
142
|
+
"""Run a job class directly (and not using a worker)."""
|
|
143
|
+
job = jobs_registry.load_job(job_class_name, {"args": [], "kwargs": {}})
|
|
144
|
+
click.secho("Loaded job: ", bold=True, nl=False)
|
|
145
|
+
print(job)
|
|
146
|
+
job.run()
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
@cli.command("list")
|
|
150
|
+
def list_jobs() -> None:
|
|
151
|
+
"""List all registered jobs."""
|
|
152
|
+
for name, job_class in jobs_registry.jobs.items():
|
|
153
|
+
click.echo(f"{click.style(name, fg='blue')}: {job_class}")
|
plain/jobs/config.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from importlib import import_module
|
|
2
|
+
|
|
3
|
+
from plain.packages import PackageConfig, packages_registry, register_config
|
|
4
|
+
|
|
5
|
+
from .registry import jobs_registry
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@register_config
|
|
9
|
+
class Config(PackageConfig):
|
|
10
|
+
package_label = "plainjobs"
|
|
11
|
+
|
|
12
|
+
def ready(self) -> None:
|
|
13
|
+
# Trigger register calls to fire by importing the modules
|
|
14
|
+
packages_registry.autodiscover_modules("jobs", include_app=True)
|
|
15
|
+
|
|
16
|
+
# Also need to make sure out internal jobs are registered
|
|
17
|
+
import_module("plain.jobs.scheduling")
|
|
18
|
+
|
|
19
|
+
jobs_registry.ready = True
|
plain/jobs/jobs.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import datetime
|
|
4
|
+
import inspect
|
|
5
|
+
import logging
|
|
6
|
+
from typing import TYPE_CHECKING, Any
|
|
7
|
+
|
|
8
|
+
from opentelemetry import trace
|
|
9
|
+
from opentelemetry.semconv._incubating.attributes.code_attributes import (
|
|
10
|
+
CODE_FILEPATH,
|
|
11
|
+
CODE_LINENO,
|
|
12
|
+
)
|
|
13
|
+
from opentelemetry.semconv._incubating.attributes.messaging_attributes import (
|
|
14
|
+
MESSAGING_DESTINATION_NAME,
|
|
15
|
+
MESSAGING_MESSAGE_ID,
|
|
16
|
+
MESSAGING_OPERATION_NAME,
|
|
17
|
+
MESSAGING_OPERATION_TYPE,
|
|
18
|
+
MESSAGING_SYSTEM,
|
|
19
|
+
MessagingOperationTypeValues,
|
|
20
|
+
)
|
|
21
|
+
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
|
22
|
+
from opentelemetry.trace import SpanKind, format_span_id, format_trace_id
|
|
23
|
+
|
|
24
|
+
from plain.models import IntegrityError
|
|
25
|
+
from plain.utils import timezone
|
|
26
|
+
|
|
27
|
+
from .registry import JobParameters, jobs_registry
|
|
28
|
+
|
|
29
|
+
if TYPE_CHECKING:
|
|
30
|
+
from .models import JobProcess, JobRequest
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
tracer = trace.get_tracer("plain.jobs")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class JobType(type):
|
|
37
|
+
"""
|
|
38
|
+
Metaclass allows us to capture the original args/kwargs
|
|
39
|
+
used to instantiate the job, so we can store them in the database
|
|
40
|
+
when we schedule the job.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __call__(self, *args: Any, **kwargs: Any) -> Job:
|
|
44
|
+
instance = super().__call__(*args, **kwargs)
|
|
45
|
+
instance._init_args = args
|
|
46
|
+
instance._init_kwargs = kwargs
|
|
47
|
+
return instance
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class Job(metaclass=JobType):
|
|
51
|
+
def run(self) -> None:
|
|
52
|
+
raise NotImplementedError
|
|
53
|
+
|
|
54
|
+
def run_in_worker(
|
|
55
|
+
self,
|
|
56
|
+
*,
|
|
57
|
+
queue: str | None = None,
|
|
58
|
+
delay: int | datetime.timedelta | datetime.datetime | None = None,
|
|
59
|
+
priority: int | None = None,
|
|
60
|
+
retries: int | None = None,
|
|
61
|
+
retry_attempt: int = 0,
|
|
62
|
+
unique_key: str | None = None,
|
|
63
|
+
) -> JobRequest | list[JobRequest | JobProcess]:
|
|
64
|
+
from .models import JobRequest
|
|
65
|
+
|
|
66
|
+
job_class_name = jobs_registry.get_job_class_name(self.__class__)
|
|
67
|
+
|
|
68
|
+
if queue is None:
|
|
69
|
+
queue = self.get_queue()
|
|
70
|
+
|
|
71
|
+
with tracer.start_as_current_span(
|
|
72
|
+
f"run_in_worker {job_class_name}",
|
|
73
|
+
kind=SpanKind.PRODUCER,
|
|
74
|
+
attributes={
|
|
75
|
+
MESSAGING_SYSTEM: "plain.jobs",
|
|
76
|
+
MESSAGING_OPERATION_TYPE: MessagingOperationTypeValues.SEND.value,
|
|
77
|
+
MESSAGING_OPERATION_NAME: "run_in_worker",
|
|
78
|
+
MESSAGING_DESTINATION_NAME: queue,
|
|
79
|
+
},
|
|
80
|
+
) as span:
|
|
81
|
+
try:
|
|
82
|
+
# Try to automatically annotate the source of the job
|
|
83
|
+
caller = inspect.stack()[1]
|
|
84
|
+
source = f"{caller.filename}:{caller.lineno}"
|
|
85
|
+
span.set_attributes(
|
|
86
|
+
{
|
|
87
|
+
CODE_FILEPATH: caller.filename,
|
|
88
|
+
CODE_LINENO: caller.lineno,
|
|
89
|
+
}
|
|
90
|
+
)
|
|
91
|
+
except (IndexError, AttributeError):
|
|
92
|
+
source = ""
|
|
93
|
+
|
|
94
|
+
parameters = JobParameters.to_json(self._init_args, self._init_kwargs)
|
|
95
|
+
|
|
96
|
+
if priority is None:
|
|
97
|
+
priority = self.get_priority()
|
|
98
|
+
|
|
99
|
+
if retries is None:
|
|
100
|
+
retries = self.get_retries()
|
|
101
|
+
|
|
102
|
+
if delay is None:
|
|
103
|
+
start_at = None
|
|
104
|
+
elif isinstance(delay, int):
|
|
105
|
+
start_at = timezone.now() + datetime.timedelta(seconds=delay)
|
|
106
|
+
elif isinstance(delay, datetime.timedelta):
|
|
107
|
+
start_at = timezone.now() + delay
|
|
108
|
+
elif isinstance(delay, datetime.datetime):
|
|
109
|
+
start_at = delay
|
|
110
|
+
else:
|
|
111
|
+
raise ValueError(f"Invalid delay: {delay}")
|
|
112
|
+
|
|
113
|
+
if unique_key is None:
|
|
114
|
+
unique_key = self.get_unique_key()
|
|
115
|
+
|
|
116
|
+
if unique_key:
|
|
117
|
+
# Only need to look at in progress jobs
|
|
118
|
+
# if we also have a unique key.
|
|
119
|
+
# Otherwise it's up to the user to use _in_progress()
|
|
120
|
+
if running := self._in_progress(unique_key):
|
|
121
|
+
span.set_attribute(ERROR_TYPE, "DuplicateJob")
|
|
122
|
+
return running
|
|
123
|
+
|
|
124
|
+
# Is recording is not enough here... because we also record for summaries!
|
|
125
|
+
|
|
126
|
+
# Capture current trace context
|
|
127
|
+
current_span = trace.get_current_span()
|
|
128
|
+
span_context = current_span.get_span_context()
|
|
129
|
+
|
|
130
|
+
# Only include trace context if the span is being recorded (sampled)
|
|
131
|
+
# This ensures jobs are only linked to traces that are actually being collected
|
|
132
|
+
if current_span.is_recording() and span_context.is_valid:
|
|
133
|
+
trace_id = f"0x{format_trace_id(span_context.trace_id)}"
|
|
134
|
+
span_id = f"0x{format_span_id(span_context.span_id)}"
|
|
135
|
+
else:
|
|
136
|
+
trace_id = None
|
|
137
|
+
span_id = None
|
|
138
|
+
|
|
139
|
+
try:
|
|
140
|
+
job_request = JobRequest(
|
|
141
|
+
job_class=job_class_name,
|
|
142
|
+
parameters=parameters,
|
|
143
|
+
start_at=start_at,
|
|
144
|
+
source=source,
|
|
145
|
+
queue=queue,
|
|
146
|
+
priority=priority,
|
|
147
|
+
retries=retries,
|
|
148
|
+
retry_attempt=retry_attempt,
|
|
149
|
+
unique_key=unique_key,
|
|
150
|
+
trace_id=trace_id,
|
|
151
|
+
span_id=span_id,
|
|
152
|
+
)
|
|
153
|
+
job_request.save(
|
|
154
|
+
clean_and_validate=False
|
|
155
|
+
) # So IntegrityError is raised on unique instead of potentially confusing ValidationError...
|
|
156
|
+
|
|
157
|
+
span.set_attribute(
|
|
158
|
+
MESSAGING_MESSAGE_ID,
|
|
159
|
+
str(job_request.uuid),
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Add job UUID to current span for bidirectional linking
|
|
163
|
+
span.set_attribute("job.uuid", str(job_request.uuid))
|
|
164
|
+
span.set_status(trace.StatusCode.OK)
|
|
165
|
+
|
|
166
|
+
return job_request
|
|
167
|
+
except IntegrityError as e:
|
|
168
|
+
span.set_attribute(ERROR_TYPE, "IntegrityError")
|
|
169
|
+
span.set_status(trace.Status(trace.StatusCode.ERROR, "Duplicate job"))
|
|
170
|
+
logger.warning("Job already in progress: %s", e)
|
|
171
|
+
# Try to return the _in_progress list again
|
|
172
|
+
return self._in_progress(unique_key)
|
|
173
|
+
|
|
174
|
+
def _in_progress(self, unique_key: str) -> list[JobRequest | JobProcess]:
|
|
175
|
+
"""Get all JobRequests and JobProcess that are currently in progress, regardless of queue."""
|
|
176
|
+
from .models import JobProcess, JobRequest
|
|
177
|
+
|
|
178
|
+
job_class_name = jobs_registry.get_job_class_name(self.__class__)
|
|
179
|
+
|
|
180
|
+
job_requests = JobRequest.query.filter(
|
|
181
|
+
job_class=job_class_name,
|
|
182
|
+
unique_key=unique_key,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
jobs = JobProcess.query.filter(
|
|
186
|
+
job_class=job_class_name,
|
|
187
|
+
unique_key=unique_key,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
return list(job_requests) + list(jobs)
|
|
191
|
+
|
|
192
|
+
def get_unique_key(self) -> str:
|
|
193
|
+
"""
|
|
194
|
+
A unique key to prevent duplicate jobs from being queued.
|
|
195
|
+
Enabled by returning a non-empty string.
|
|
196
|
+
|
|
197
|
+
Note that this is not a "once and only once" guarantee, but rather
|
|
198
|
+
an "at least once" guarantee. Jobs should still be idempotent in case
|
|
199
|
+
multiple instances are queued in a race condition.
|
|
200
|
+
"""
|
|
201
|
+
return ""
|
|
202
|
+
|
|
203
|
+
def get_queue(self) -> str:
|
|
204
|
+
return "default"
|
|
205
|
+
|
|
206
|
+
def get_priority(self) -> int:
|
|
207
|
+
"""
|
|
208
|
+
Return the default priority for this job.
|
|
209
|
+
|
|
210
|
+
Higher numbers run first: 10 > 5 > 0 > -5 > -10
|
|
211
|
+
- Use positive numbers for high priority jobs
|
|
212
|
+
- Use negative numbers for low priority jobs
|
|
213
|
+
- Default is 0
|
|
214
|
+
"""
|
|
215
|
+
return 0
|
|
216
|
+
|
|
217
|
+
def get_retries(self) -> int:
|
|
218
|
+
return 0
|
|
219
|
+
|
|
220
|
+
def get_retry_delay(self, attempt: int) -> int:
|
|
221
|
+
"""
|
|
222
|
+
Calculate a delay in seconds before the next retry attempt.
|
|
223
|
+
|
|
224
|
+
On the first retry, attempt will be 1.
|
|
225
|
+
"""
|
|
226
|
+
return 0
|
plain/jobs/middleware.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from collections.abc import Callable
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from plain.logs import app_logger
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from .models import JobProcess, JobResult
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class AppLoggerMiddleware:
|
|
13
|
+
def __init__(self, run_job: Callable[[JobProcess], JobResult]) -> None:
|
|
14
|
+
self.run_job = run_job
|
|
15
|
+
|
|
16
|
+
def __call__(self, job: JobProcess) -> JobResult:
|
|
17
|
+
with app_logger.include_context(
|
|
18
|
+
job_request_uuid=str(job.job_request_uuid), job_process_uuid=str(job.uuid)
|
|
19
|
+
):
|
|
20
|
+
return self.run_job(job)
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
# Generated by Plain 0.52.2 on 2025-07-08 01:17
|
|
2
|
+
|
|
3
|
+
import uuid
|
|
4
|
+
|
|
5
|
+
from plain import models
|
|
6
|
+
from plain.models import migrations
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Migration(migrations.Migration):
|
|
10
|
+
initial = True
|
|
11
|
+
|
|
12
|
+
dependencies = []
|
|
13
|
+
|
|
14
|
+
operations = [
|
|
15
|
+
migrations.CreateModel(
|
|
16
|
+
name="Job",
|
|
17
|
+
fields=[
|
|
18
|
+
("id", models.PrimaryKeyField()),
|
|
19
|
+
("uuid", models.UUIDField(default=uuid.uuid4)),
|
|
20
|
+
("created_at", models.DateTimeField(auto_now_add=True)),
|
|
21
|
+
("started_at", models.DateTimeField(allow_null=True, required=False)),
|
|
22
|
+
("job_request_uuid", models.UUIDField()),
|
|
23
|
+
("job_class", models.CharField(max_length=255)),
|
|
24
|
+
("parameters", models.JSONField(allow_null=True, required=False)),
|
|
25
|
+
("priority", models.IntegerField(default=0)),
|
|
26
|
+
("source", models.TextField(required=False)),
|
|
27
|
+
("queue", models.CharField(default="default", max_length=255)),
|
|
28
|
+
("retries", models.IntegerField(default=0)),
|
|
29
|
+
("retry_attempt", models.IntegerField(default=0)),
|
|
30
|
+
("unique_key", models.CharField(max_length=255, required=False)),
|
|
31
|
+
],
|
|
32
|
+
options={
|
|
33
|
+
"ordering": ["-created_at"],
|
|
34
|
+
},
|
|
35
|
+
),
|
|
36
|
+
migrations.CreateModel(
|
|
37
|
+
name="JobRequest",
|
|
38
|
+
fields=[
|
|
39
|
+
("id", models.PrimaryKeyField()),
|
|
40
|
+
("created_at", models.DateTimeField(auto_now_add=True)),
|
|
41
|
+
("uuid", models.UUIDField(default=uuid.uuid4)),
|
|
42
|
+
("job_class", models.CharField(max_length=255)),
|
|
43
|
+
("parameters", models.JSONField(allow_null=True, required=False)),
|
|
44
|
+
("priority", models.IntegerField(default=0)),
|
|
45
|
+
("source", models.TextField(required=False)),
|
|
46
|
+
("queue", models.CharField(default="default", max_length=255)),
|
|
47
|
+
("retries", models.IntegerField(default=0)),
|
|
48
|
+
("retry_attempt", models.IntegerField(default=0)),
|
|
49
|
+
("unique_key", models.CharField(max_length=255, required=False)),
|
|
50
|
+
("start_at", models.DateTimeField(allow_null=True, required=False)),
|
|
51
|
+
],
|
|
52
|
+
options={
|
|
53
|
+
"ordering": ["priority", "-created_at"],
|
|
54
|
+
},
|
|
55
|
+
),
|
|
56
|
+
migrations.CreateModel(
|
|
57
|
+
name="JobResult",
|
|
58
|
+
fields=[
|
|
59
|
+
("id", models.PrimaryKeyField()),
|
|
60
|
+
("uuid", models.UUIDField(default=uuid.uuid4)),
|
|
61
|
+
("created_at", models.DateTimeField(auto_now_add=True)),
|
|
62
|
+
("job_uuid", models.UUIDField()),
|
|
63
|
+
("started_at", models.DateTimeField(allow_null=True, required=False)),
|
|
64
|
+
("ended_at", models.DateTimeField(allow_null=True, required=False)),
|
|
65
|
+
("error", models.TextField(required=False)),
|
|
66
|
+
(
|
|
67
|
+
"status",
|
|
68
|
+
models.CharField(
|
|
69
|
+
choices=[
|
|
70
|
+
("SUCCESSFUL", "Successful"),
|
|
71
|
+
("ERRORED", "Errored"),
|
|
72
|
+
("CANCELLED", "Cancelled"),
|
|
73
|
+
("LOST", "Lost"),
|
|
74
|
+
],
|
|
75
|
+
max_length=20,
|
|
76
|
+
),
|
|
77
|
+
),
|
|
78
|
+
("job_request_uuid", models.UUIDField()),
|
|
79
|
+
("job_class", models.CharField(max_length=255)),
|
|
80
|
+
("parameters", models.JSONField(allow_null=True, required=False)),
|
|
81
|
+
("priority", models.IntegerField(default=0)),
|
|
82
|
+
("source", models.TextField(required=False)),
|
|
83
|
+
("queue", models.CharField(default="default", max_length=255)),
|
|
84
|
+
("retries", models.IntegerField(default=0)),
|
|
85
|
+
("retry_attempt", models.IntegerField(default=0)),
|
|
86
|
+
("unique_key", models.CharField(max_length=255, required=False)),
|
|
87
|
+
(
|
|
88
|
+
"retry_job_request_uuid",
|
|
89
|
+
models.UUIDField(allow_null=True, required=False),
|
|
90
|
+
),
|
|
91
|
+
],
|
|
92
|
+
options={
|
|
93
|
+
"ordering": ["-created_at"],
|
|
94
|
+
},
|
|
95
|
+
),
|
|
96
|
+
migrations.AddIndex(
|
|
97
|
+
model_name="job",
|
|
98
|
+
index=models.Index(
|
|
99
|
+
fields=["created_at"], name="plainworker_created_a02317_idx"
|
|
100
|
+
),
|
|
101
|
+
),
|
|
102
|
+
migrations.AddIndex(
|
|
103
|
+
model_name="job",
|
|
104
|
+
index=models.Index(fields=["queue"], name="plainworker_queue_077806_idx"),
|
|
105
|
+
),
|
|
106
|
+
migrations.AddIndex(
|
|
107
|
+
model_name="job",
|
|
108
|
+
index=models.Index(
|
|
109
|
+
fields=["unique_key"], name="plainworker_unique__04d87b_idx"
|
|
110
|
+
),
|
|
111
|
+
),
|
|
112
|
+
migrations.AddIndex(
|
|
113
|
+
model_name="job",
|
|
114
|
+
index=models.Index(
|
|
115
|
+
fields=["started_at"], name="plainworker_started_143df5_idx"
|
|
116
|
+
),
|
|
117
|
+
),
|
|
118
|
+
migrations.AddIndex(
|
|
119
|
+
model_name="job",
|
|
120
|
+
index=models.Index(
|
|
121
|
+
fields=["job_class"], name="plainworker_job_cla_884b46_idx"
|
|
122
|
+
),
|
|
123
|
+
),
|
|
124
|
+
migrations.AddIndex(
|
|
125
|
+
model_name="job",
|
|
126
|
+
index=models.Index(
|
|
127
|
+
fields=["job_request_uuid"], name="plainworker_job_req_db2681_idx"
|
|
128
|
+
),
|
|
129
|
+
),
|
|
130
|
+
migrations.AddIndex(
|
|
131
|
+
model_name="job",
|
|
132
|
+
index=models.Index(
|
|
133
|
+
fields=["job_class", "unique_key"], name="job_class_unique_key"
|
|
134
|
+
),
|
|
135
|
+
),
|
|
136
|
+
migrations.AddConstraint(
|
|
137
|
+
model_name="job",
|
|
138
|
+
constraint=models.UniqueConstraint(
|
|
139
|
+
fields=("uuid",), name="plainworker_job_unique_uuid"
|
|
140
|
+
),
|
|
141
|
+
),
|
|
142
|
+
migrations.AddIndex(
|
|
143
|
+
model_name="jobrequest",
|
|
144
|
+
index=models.Index(
|
|
145
|
+
fields=["priority"], name="plainworker_priorit_785e73_idx"
|
|
146
|
+
),
|
|
147
|
+
),
|
|
148
|
+
migrations.AddIndex(
|
|
149
|
+
model_name="jobrequest",
|
|
150
|
+
index=models.Index(
|
|
151
|
+
fields=["created_at"], name="plainworker_created_c81fe5_idx"
|
|
152
|
+
),
|
|
153
|
+
),
|
|
154
|
+
migrations.AddIndex(
|
|
155
|
+
model_name="jobrequest",
|
|
156
|
+
index=models.Index(fields=["queue"], name="plainworker_queue_2614aa_idx"),
|
|
157
|
+
),
|
|
158
|
+
migrations.AddIndex(
|
|
159
|
+
model_name="jobrequest",
|
|
160
|
+
index=models.Index(
|
|
161
|
+
fields=["start_at"], name="plainworker_start_a_4d6020_idx"
|
|
162
|
+
),
|
|
163
|
+
),
|
|
164
|
+
migrations.AddIndex(
|
|
165
|
+
model_name="jobrequest",
|
|
166
|
+
index=models.Index(
|
|
167
|
+
fields=["unique_key"], name="plainworker_unique__21a534_idx"
|
|
168
|
+
),
|
|
169
|
+
),
|
|
170
|
+
migrations.AddIndex(
|
|
171
|
+
model_name="jobrequest",
|
|
172
|
+
index=models.Index(
|
|
173
|
+
fields=["job_class"], name="plainworker_job_cla_3e7dea_idx"
|
|
174
|
+
),
|
|
175
|
+
),
|
|
176
|
+
migrations.AddIndex(
|
|
177
|
+
model_name="jobrequest",
|
|
178
|
+
index=models.Index(
|
|
179
|
+
fields=["job_class", "unique_key"], name="job_request_class_unique_key"
|
|
180
|
+
),
|
|
181
|
+
),
|
|
182
|
+
migrations.AddConstraint(
|
|
183
|
+
model_name="jobrequest",
|
|
184
|
+
constraint=models.UniqueConstraint(
|
|
185
|
+
condition=models.Q(("retry_attempt", 0), ("unique_key__gt", "")),
|
|
186
|
+
fields=("job_class", "unique_key"),
|
|
187
|
+
name="plainworker_jobrequest_unique_job_class_key",
|
|
188
|
+
),
|
|
189
|
+
),
|
|
190
|
+
migrations.AddConstraint(
|
|
191
|
+
model_name="jobrequest",
|
|
192
|
+
constraint=models.UniqueConstraint(
|
|
193
|
+
fields=("uuid",), name="plainworker_jobrequest_unique_uuid"
|
|
194
|
+
),
|
|
195
|
+
),
|
|
196
|
+
migrations.AddIndex(
|
|
197
|
+
model_name="jobresult",
|
|
198
|
+
index=models.Index(
|
|
199
|
+
fields=["created_at"], name="plainworker_created_6894c5_idx"
|
|
200
|
+
),
|
|
201
|
+
),
|
|
202
|
+
migrations.AddIndex(
|
|
203
|
+
model_name="jobresult",
|
|
204
|
+
index=models.Index(
|
|
205
|
+
fields=["job_uuid"], name="plainworker_job_uui_8307d1_idx"
|
|
206
|
+
),
|
|
207
|
+
),
|
|
208
|
+
migrations.AddIndex(
|
|
209
|
+
model_name="jobresult",
|
|
210
|
+
index=models.Index(
|
|
211
|
+
fields=["started_at"], name="plainworker_started_9bce76_idx"
|
|
212
|
+
),
|
|
213
|
+
),
|
|
214
|
+
migrations.AddIndex(
|
|
215
|
+
model_name="jobresult",
|
|
216
|
+
index=models.Index(
|
|
217
|
+
fields=["ended_at"], name="plainworker_ended_a_63caaf_idx"
|
|
218
|
+
),
|
|
219
|
+
),
|
|
220
|
+
migrations.AddIndex(
|
|
221
|
+
model_name="jobresult",
|
|
222
|
+
index=models.Index(fields=["status"], name="plainworker_status_a7ca35_idx"),
|
|
223
|
+
),
|
|
224
|
+
migrations.AddIndex(
|
|
225
|
+
model_name="jobresult",
|
|
226
|
+
index=models.Index(
|
|
227
|
+
fields=["job_request_uuid"], name="plainworker_job_req_1e1bf2_idx"
|
|
228
|
+
),
|
|
229
|
+
),
|
|
230
|
+
migrations.AddIndex(
|
|
231
|
+
model_name="jobresult",
|
|
232
|
+
index=models.Index(
|
|
233
|
+
fields=["job_class"], name="plainworker_job_cla_d138b5_idx"
|
|
234
|
+
),
|
|
235
|
+
),
|
|
236
|
+
migrations.AddIndex(
|
|
237
|
+
model_name="jobresult",
|
|
238
|
+
index=models.Index(fields=["queue"], name="plainworker_queue_23d8fe_idx"),
|
|
239
|
+
),
|
|
240
|
+
migrations.AddConstraint(
|
|
241
|
+
model_name="jobresult",
|
|
242
|
+
constraint=models.UniqueConstraint(
|
|
243
|
+
fields=("uuid",), name="plainworker_jobresult_unique_uuid"
|
|
244
|
+
),
|
|
245
|
+
),
|
|
246
|
+
]
|