dj-queue 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. dj_queue/__init__.py +0 -0
  2. dj_queue/admin.py +90 -0
  3. dj_queue/api.py +122 -0
  4. dj_queue/apps.py +6 -0
  5. dj_queue/backend.py +161 -0
  6. dj_queue/config.py +456 -0
  7. dj_queue/contrib/__init__.py +1 -0
  8. dj_queue/contrib/asgi.py +32 -0
  9. dj_queue/contrib/gunicorn.py +25 -0
  10. dj_queue/db.py +68 -0
  11. dj_queue/exceptions.py +26 -0
  12. dj_queue/hooks.py +86 -0
  13. dj_queue/log.py +27 -0
  14. dj_queue/management/__init__.py +1 -0
  15. dj_queue/management/commands/__init__.py +1 -0
  16. dj_queue/management/commands/dj_queue.py +39 -0
  17. dj_queue/management/commands/dj_queue_health.py +32 -0
  18. dj_queue/management/commands/dj_queue_prune.py +22 -0
  19. dj_queue/migrations/0001_initial.py +262 -0
  20. dj_queue/migrations/0002_pause_semaphore.py +52 -0
  21. dj_queue/migrations/0003_recurringtask_recurringexecution.py +73 -0
  22. dj_queue/migrations/__init__.py +0 -0
  23. dj_queue/models/__init__.py +24 -0
  24. dj_queue/models/jobs.py +328 -0
  25. dj_queue/models/recurring.py +51 -0
  26. dj_queue/models/runtime.py +55 -0
  27. dj_queue/operations/__init__.py +1 -0
  28. dj_queue/operations/cleanup.py +37 -0
  29. dj_queue/operations/concurrency.py +176 -0
  30. dj_queue/operations/jobs.py +637 -0
  31. dj_queue/operations/recurring.py +81 -0
  32. dj_queue/routers.py +26 -0
  33. dj_queue/runtime/__init__.py +1 -0
  34. dj_queue/runtime/base.py +198 -0
  35. dj_queue/runtime/dispatcher.py +78 -0
  36. dj_queue/runtime/errors.py +39 -0
  37. dj_queue/runtime/interruptible.py +46 -0
  38. dj_queue/runtime/notify.py +119 -0
  39. dj_queue/runtime/pidfile.py +39 -0
  40. dj_queue/runtime/pool.py +62 -0
  41. dj_queue/runtime/procline.py +11 -0
  42. dj_queue/runtime/scheduler.py +128 -0
  43. dj_queue/runtime/supervisor.py +460 -0
  44. dj_queue/runtime/worker.py +116 -0
  45. dj_queue-0.1.0.dist-info/METADATA +613 -0
  46. dj_queue-0.1.0.dist-info/RECORD +48 -0
  47. dj_queue-0.1.0.dist-info/WHEEL +4 -0
  48. dj_queue-0.1.0.dist-info/licenses/LICENSE +21 -0
dj_queue/__init__.py ADDED
File without changes
dj_queue/admin.py ADDED
@@ -0,0 +1,90 @@
1
+ import json
2
+
3
+ from django.contrib import admin, messages
4
+
5
+ from dj_queue.models import FailedExecution, Job, Pause, Process, RecurringTask, Semaphore
6
+
7
+
8
+ @admin.register(Job)
9
+ class JobAdmin(admin.ModelAdmin):
10
+ list_display = ("id", "task_path", "queue_name", "priority", "status", "created_at")
11
+ list_select_related = (
12
+ "ready_execution",
13
+ "scheduled_execution",
14
+ "claimed_execution",
15
+ "blocked_execution",
16
+ "failed_execution",
17
+ )
18
+ readonly_fields = (
19
+ "task_path",
20
+ "queue_name",
21
+ "priority",
22
+ "payload",
23
+ "backend_name",
24
+ "scheduled_at",
25
+ "concurrency_key",
26
+ "finished_at",
27
+ "return_value",
28
+ "created_at",
29
+ "updated_at",
30
+ )
31
+
32
+
33
+ @admin.register(FailedExecution)
34
+ class FailedExecutionAdmin(admin.ModelAdmin):
35
+ list_display = ("job", "exception_class", "message", "created_at")
36
+ list_select_related = ("job",)
37
+ actions = ("retry_jobs",)
38
+ readonly_fields = ("job", "exception_class", "message", "traceback", "created_at")
39
+
40
+ @admin.action(description="Retry selected failed jobs")
41
+ def retry_jobs(self, request, queryset):
42
+ retried = FailedExecution.retry_all(queryset)
43
+ self.message_user(request, f"Retried {retried} failed jobs", level=messages.SUCCESS)
44
+
45
+
46
+ @admin.register(Process)
47
+ class ProcessAdmin(admin.ModelAdmin):
48
+ list_display = ("name", "kind", "pid", "hostname", "metadata_json", "last_heartbeat_at")
49
+ readonly_fields = (
50
+ "kind",
51
+ "pid",
52
+ "hostname",
53
+ "name",
54
+ "metadata",
55
+ "supervisor",
56
+ "last_heartbeat_at",
57
+ )
58
+
59
+ @admin.display(description="metadata")
60
+ def metadata_json(self, obj):
61
+ return json.dumps(obj.metadata, sort_keys=True)
62
+
63
+
64
+ @admin.register(RecurringTask)
65
+ class RecurringTaskAdmin(admin.ModelAdmin):
66
+ list_display = ("key", "task_path", "schedule", "queue_name", "priority", "static")
67
+ readonly_fields = (
68
+ "key",
69
+ "task_path",
70
+ "payload",
71
+ "schedule",
72
+ "queue_name",
73
+ "priority",
74
+ "description",
75
+ "static",
76
+ "created_at",
77
+ "updated_at",
78
+ )
79
+
80
+
81
+ @admin.register(Pause)
82
+ class PauseAdmin(admin.ModelAdmin):
83
+ list_display = ("queue_name", "created_at")
84
+ readonly_fields = ("queue_name", "created_at")
85
+
86
+
87
+ @admin.register(Semaphore)
88
+ class SemaphoreAdmin(admin.ModelAdmin):
89
+ list_display = ("key", "value", "limit", "expires_at")
90
+ readonly_fields = ("key", "value", "limit", "expires_at", "created_at", "updated_at")
dj_queue/api.py ADDED
@@ -0,0 +1,122 @@
1
+ from functools import partial
2
+
3
+ from django.db import transaction
4
+ from django.utils import timezone
5
+ from django.utils.module_loading import import_string
6
+
7
+ from dj_queue.db import get_database_alias
8
+ from dj_queue.log import log_event
9
+ from dj_queue.models import Pause, ReadyExecution, RecurringTask
10
+
11
+
12
+ class QueueInfo:
13
+ def __init__(self, queue_name, *, backend_alias="default"):
14
+ self.queue_name = queue_name
15
+ self.backend_alias = backend_alias
16
+
17
+ @property
18
+ def size(self):
19
+ return self._ready_queryset().count()
20
+
21
+ @property
22
+ def latency(self):
23
+ oldest = (
24
+ self._ready_queryset().order_by("created_at").values_list("created_at", flat=True).first()
25
+ )
26
+ if oldest is None:
27
+ return 0.0
28
+ return (timezone.now() - oldest).total_seconds()
29
+
30
+ @property
31
+ def paused(self):
32
+ alias = get_database_alias(self.backend_alias)
33
+ return Pause.objects.using(alias).filter(queue_name=self.queue_name).exists()
34
+
35
+ def pause(self):
36
+ alias = get_database_alias(self.backend_alias)
37
+ Pause.objects.using(alias).get_or_create(queue_name=self.queue_name)
38
+ log_event("queue.paused", backend_alias=self.backend_alias, queue_name=self.queue_name)
39
+
40
+ def resume(self):
41
+ alias = get_database_alias(self.backend_alias)
42
+ deleted, _ = Pause.objects.using(alias).filter(queue_name=self.queue_name).delete()
43
+ if deleted:
44
+ log_event("queue.resumed", backend_alias=self.backend_alias, queue_name=self.queue_name)
45
+
46
+ def clear(self, *, batch_size=500):
47
+ deleted = 0
48
+ while True:
49
+ job_ids = list(self._ready_queryset().values_list("job_id", flat=True)[:batch_size])
50
+ if not job_ids:
51
+ return deleted
52
+ deleted += _discard_ready_jobs(
53
+ job_ids=job_ids,
54
+ batch_size=batch_size,
55
+ backend_alias=self.backend_alias,
56
+ )
57
+
58
+ @classmethod
59
+ def all(cls, *, backend_alias="default"):
60
+ alias = get_database_alias(backend_alias)
61
+ queue_names = (
62
+ ReadyExecution.objects.using(alias)
63
+ .order_by("queue_name")
64
+ .values_list(
65
+ "queue_name",
66
+ flat=True,
67
+ )
68
+ .distinct()
69
+ )
70
+ return [cls(queue_name, backend_alias=backend_alias) for queue_name in queue_names]
71
+
72
+ def _ready_queryset(self):
73
+ alias = get_database_alias(self.backend_alias)
74
+ return ReadyExecution.objects.using(alias).filter(queue_name=self.queue_name)
75
+
76
+
77
+ def _discard_ready_jobs(*, job_ids, batch_size, backend_alias):
78
+ discard_ready_jobs = import_string("dj_queue.operations.jobs.discard_ready_jobs")
79
+ return discard_ready_jobs(job_ids=job_ids, batch_size=batch_size, backend_alias=backend_alias)
80
+
81
+
82
+ def enqueue_on_commit(task, *args, using=None, **kwargs):
83
+ transaction.on_commit(partial(task.enqueue, *args, **kwargs), using=using)
84
+
85
+
86
+ def schedule_recurring_task(
87
+ *,
88
+ key,
89
+ task_path,
90
+ schedule,
91
+ args=(),
92
+ kwargs=None,
93
+ queue_name="default",
94
+ priority=0,
95
+ description="",
96
+ backend_alias="default",
97
+ ):
98
+ alias = get_database_alias(backend_alias)
99
+ if kwargs is None:
100
+ kwargs = {}
101
+
102
+ recurring_task, _ = RecurringTask.objects.using(alias).update_or_create(
103
+ key=key,
104
+ defaults={
105
+ "task_path": task_path,
106
+ "payload": {"args": list(args), "kwargs": dict(kwargs)},
107
+ "schedule": schedule,
108
+ "queue_name": queue_name,
109
+ "priority": priority,
110
+ "description": description,
111
+ "static": False,
112
+ },
113
+ )
114
+ return recurring_task
115
+
116
+
117
+ def unschedule_recurring_task(key, *, backend_alias="default"):
118
+ alias = get_database_alias(backend_alias)
119
+ queryset = RecurringTask.objects.using(alias).filter(key=key, static=False)
120
+ deleted = queryset.count()
121
+ queryset.delete()
122
+ return deleted
dj_queue/apps.py ADDED
@@ -0,0 +1,6 @@
1
+ from django.apps import AppConfig
2
+
3
+
4
+ class DjQueueConfig(AppConfig):
5
+ name = "dj_queue"
6
+ verbose_name = "dj queue"
dj_queue/backend.py ADDED
@@ -0,0 +1,161 @@
1
+ from asgiref.sync import sync_to_async
2
+ from django.db import close_old_connections
3
+ from django.tasks import TaskResult, TaskResultStatus
4
+ from django.tasks.backends.base import BaseTaskBackend
5
+ from django.tasks.base import TaskError
6
+ from django.tasks.exceptions import TaskResultDoesNotExist
7
+ from django.utils.module_loading import import_string
8
+
9
+ from dj_queue.db import get_database_alias
10
+ from dj_queue.models import Job
11
+ from dj_queue.operations.jobs import enqueue_job_with_dispatch, enqueue_jobs_bulk
12
+
13
+
14
+ class DjQueueBackend(BaseTaskBackend):
15
+ supports_async_task = True
16
+ supports_defer = True
17
+ supports_get_result = True
18
+ supports_priority = True
19
+
20
+ def enqueue(self, task, args, kwargs):
21
+ self.validate_task(task)
22
+ job, dispatched_as = enqueue_job_with_dispatch(task, args, kwargs, backend_alias=self.alias)
23
+ return _task_result_from_enqueued_job(job, task, dispatched_as)
24
+
25
+ async def aenqueue(self, task, args, kwargs):
26
+ return await sync_to_async(_async_backend_call, thread_sensitive=True)(
27
+ self.enqueue,
28
+ task=task,
29
+ args=args,
30
+ kwargs=kwargs,
31
+ )
32
+
33
+ def enqueue_all(self, task_calls):
34
+ jobs = []
35
+ for task, args, kwargs in task_calls:
36
+ self.validate_task(task)
37
+ jobs.append((task, args, kwargs))
38
+
39
+ created_jobs = enqueue_jobs_bulk(jobs, backend_alias=self.alias)
40
+ return [
41
+ _task_result_from_enqueued_job(job, task, dispatched_as)
42
+ for job, task, dispatched_as in created_jobs
43
+ ]
44
+
45
+ def get_result(self, result_id):
46
+ alias = get_database_alias(self.alias)
47
+ try:
48
+ job = (
49
+ Job.objects.using(alias)
50
+ .select_related(
51
+ "ready_execution",
52
+ "scheduled_execution",
53
+ "claimed_execution__process",
54
+ "blocked_execution",
55
+ "failed_execution",
56
+ )
57
+ .get(pk=result_id)
58
+ )
59
+ except Job.DoesNotExist as exc:
60
+ raise TaskResultDoesNotExist(str(result_id)) from exc
61
+
62
+ return _task_result_from_job(job)
63
+
64
+ async def aget_result(self, result_id):
65
+ return await sync_to_async(_async_backend_call, thread_sensitive=True)(
66
+ self.get_result,
67
+ result_id=result_id,
68
+ )
69
+
70
+
71
+ def _async_backend_call(method, /, **kwargs):
72
+ try:
73
+ return method(**kwargs)
74
+ finally:
75
+ close_old_connections()
76
+
77
+
78
+ def _task_result_from_job(job):
79
+ task = import_string(job.task_path)
80
+ if hasattr(task, "using"):
81
+ task = task.using(
82
+ priority=job.priority,
83
+ queue_name=job.queue_name,
84
+ run_after=job.scheduled_at,
85
+ backend=job.backend_name,
86
+ )
87
+
88
+ status = TaskResultStatus.READY
89
+ started_at = None
90
+ finished_at = job.finished_at
91
+ last_attempted_at = None
92
+ errors = []
93
+ worker_ids = []
94
+
95
+ if job.failed:
96
+ status = TaskResultStatus.FAILED
97
+ finished_at = job.failed_execution.created_at
98
+ last_attempted_at = job.failed_execution.created_at
99
+ errors = [
100
+ TaskError(
101
+ exception_class_path=job.failed_execution.exception_class,
102
+ traceback=job.failed_execution.traceback,
103
+ )
104
+ ]
105
+ elif job.claimed:
106
+ status = TaskResultStatus.RUNNING
107
+ started_at = job.claimed_execution.created_at
108
+ last_attempted_at = job.claimed_execution.created_at
109
+ if job.claimed_execution.process_id is not None:
110
+ worker_ids = [job.claimed_execution.process.name]
111
+ elif job.finished:
112
+ status = TaskResultStatus.SUCCESSFUL
113
+
114
+ result = TaskResult(
115
+ task=task,
116
+ id=str(job.id),
117
+ status=status,
118
+ enqueued_at=job.created_at,
119
+ started_at=started_at,
120
+ finished_at=finished_at,
121
+ last_attempted_at=last_attempted_at,
122
+ args=job.payload.get("args", []),
123
+ kwargs=job.payload.get("kwargs", {}),
124
+ backend=job.backend_name,
125
+ errors=errors,
126
+ worker_ids=worker_ids,
127
+ )
128
+ if status == TaskResultStatus.SUCCESSFUL:
129
+ object.__setattr__(result, "_return_value", job.return_value)
130
+ return result
131
+
132
+
133
+ def _task_result_from_enqueued_job(job, task, dispatched_as):
134
+ if hasattr(task, "using"):
135
+ task = task.using(
136
+ priority=job.priority,
137
+ queue_name=job.queue_name,
138
+ run_after=job.scheduled_at,
139
+ backend=job.backend_name,
140
+ )
141
+
142
+ status = TaskResultStatus.SUCCESSFUL if dispatched_as == "discarded" else TaskResultStatus.READY
143
+ finished_at = job.finished_at if status == TaskResultStatus.SUCCESSFUL else None
144
+
145
+ result = TaskResult(
146
+ task=task,
147
+ id=str(job.id),
148
+ status=status,
149
+ enqueued_at=job.created_at,
150
+ started_at=None,
151
+ finished_at=finished_at,
152
+ last_attempted_at=None,
153
+ args=job.payload.get("args", []),
154
+ kwargs=job.payload.get("kwargs", {}),
155
+ backend=job.backend_name,
156
+ errors=[],
157
+ worker_ids=[],
158
+ )
159
+ if status == TaskResultStatus.SUCCESSFUL:
160
+ object.__setattr__(result, "_return_value", job.return_value)
161
+ return result