dj-queue 0.7.0__tar.gz → 0.8.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {dj_queue-0.7.0 → dj_queue-0.8.0}/PKG-INFO +23 -23
  2. {dj_queue-0.7.0 → dj_queue-0.8.0}/README.md +22 -22
  3. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/admin.py +17 -7
  4. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/api.py +32 -40
  5. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/dashboard.py +6 -7
  6. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/exceptions.py +0 -4
  7. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0008_remove_blockedexecution_dj_queue_bl_concurr_1ce730_idx_and_more.py +10 -14
  8. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/models/jobs.py +10 -93
  9. dj_queue-0.8.0/dj_queue/operations/_helpers.py +41 -0
  10. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/operations/concurrency.py +6 -30
  11. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/operations/jobs.py +106 -70
  12. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/operations/recurring.py +12 -14
  13. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/dispatcher.py +0 -3
  14. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/worker.py +4 -4
  15. {dj_queue-0.7.0 → dj_queue-0.8.0}/pyproject.toml +1 -1
  16. {dj_queue-0.7.0 → dj_queue-0.8.0}/LICENSE +0 -0
  17. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/__init__.py +0 -0
  18. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/apps.py +0 -0
  19. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/backend.py +0 -0
  20. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/config.py +0 -0
  21. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/contrib/__init__.py +0 -0
  22. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/contrib/asgi.py +0 -0
  23. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/contrib/gunicorn.py +0 -0
  24. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/contrib/prometheus.py +0 -0
  25. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/db.py +0 -0
  26. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/hooks.py +0 -0
  27. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/log.py +0 -0
  28. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/management/__init__.py +0 -0
  29. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/management/commands/__init__.py +0 -0
  30. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/management/commands/dj_queue.py +0 -0
  31. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/management/commands/dj_queue_health.py +0 -0
  32. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/management/commands/dj_queue_prune.py +0 -0
  33. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0001_initial.py +0 -0
  34. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0002_pause_semaphore.py +0 -0
  35. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0003_recurringtask_recurringexecution.py +0 -0
  36. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0004_dashboard.py +0 -0
  37. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0005_remove_recurringexecution_dj_queue_recurring_executions_task_key_run_at_unique_and_more.py +0 -0
  38. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0006_blockedexecution_dj_queue_bl_concurr_2d8393_idx_and_more.py +0 -0
  39. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/0007_recurringtask_next_run_at.py +0 -0
  40. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/migrations/__init__.py +0 -0
  41. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/models/__init__.py +0 -0
  42. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/models/recurring.py +0 -0
  43. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/models/runtime.py +0 -0
  44. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/observability.py +0 -0
  45. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/operations/__init__.py +0 -0
  46. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/operations/_insert.py +0 -0
  47. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/operations/cleanup.py +0 -0
  48. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/operations/queues.py +0 -0
  49. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/routers.py +0 -0
  50. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/__init__.py +0 -0
  51. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/base.py +0 -0
  52. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/connection_budget.py +0 -0
  53. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/errors.py +0 -0
  54. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/interruptible.py +0 -0
  55. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/notify.py +0 -0
  56. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/pidfile.py +0 -0
  57. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/pool.py +0 -0
  58. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/procline.py +0 -0
  59. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/scheduler.py +0 -0
  60. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/runtime/supervisor.py +0 -0
  61. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/_dashboard_process_rows.html +0 -0
  62. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/_dashboard_recurring_rows.html +0 -0
  63. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/_dashboard_section_table.html +0 -0
  64. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/_dashboard_semaphore_rows.html +0 -0
  65. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/_paginator.html +0 -0
  66. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/_queue_controls.html +0 -0
  67. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/_sortable_header_cells.html +0 -0
  68. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/change_form.html +0 -0
  69. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/change_list.html +0 -0
  70. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/dashboard.html +0 -0
  71. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/includes/fieldset.html +0 -0
  72. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templates/admin/dj_queue/queue_jobs.html +0 -0
  73. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templatetags/__init__.py +0 -0
  74. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/templatetags/dj_queue_admin.py +0 -0
  75. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/urls.py +0 -0
  76. {dj_queue-0.7.0 → dj_queue-0.8.0}/dj_queue/views.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dj-queue
3
- Version: 0.7.0
3
+ Version: 0.8.0
4
4
  Summary: Database-backed task queue backend for Django’s Tasks framework.
5
5
  License-Expression: MIT
6
6
  License-File: LICENSE
@@ -461,7 +461,7 @@ expecting one semaphore namespace per backend alias.
461
461
  tables:
462
462
 
463
463
  ```python
464
- from dj_queue.api import QueueInfo
464
+ from dj_queue.api import QueueInfo, claim_ready_jobs, execute_claimed_job
465
465
 
466
466
  orders = QueueInfo("orders")
467
467
 
@@ -472,6 +472,10 @@ print(orders.paused)
472
472
  orders.pause()
473
473
  orders.resume()
474
474
  orders.clear()
475
+
476
+ claimed_jobs = claim_ready_jobs(limit=1, queues=["orders"])
477
+ if claimed_jobs:
478
+ execute_claimed_job(claimed_jobs[0])
475
479
  ```
476
480
 
477
481
  Notes:
@@ -480,6 +484,8 @@ Notes:
480
484
  - pause rows are scoped per backend alias
481
485
  - `clear()` discards ready jobs only
482
486
  - pass `backend_alias=` when you want to target a non-default `TASKS` alias
487
+ - `claim_ready_jobs()` returns `ClaimedJob` objects, so inspect `claimed_job.job` for the persisted row
488
+ - the low-level claim/execute helpers are exposed on `dj_queue.api` for scripts and examples
483
489
 
484
490
  Operational commands:
485
491
 
@@ -506,32 +512,26 @@ queue database, including the exception class, message, and traceback.
506
512
 
507
513
  You can retry and discard failed jobs through Django admin, and any raw job
508
514
  detail page can enqueue a fresh copy of that stored job. The failed-job actions
509
- also stay available directly through the operations layer:
515
+ also stay available through the public API:
510
516
 
511
517
  ```python
512
- from dj_queue.operations.jobs import discard_failed_job, retry_failed_job
518
+ from dj_queue.api import (
519
+ discard_blocked_jobs,
520
+ discard_failed_job,
521
+ discard_failed_jobs,
522
+ discard_ready_jobs,
523
+ discard_scheduled_jobs,
524
+ retry_failed_job,
525
+ retry_failed_jobs,
526
+ )
513
527
 
514
528
  retry_failed_job(job_id)
515
529
  discard_failed_job(job_id)
516
- ```
517
-
518
- Model helpers are available too:
519
-
520
- ```python
521
- from dj_queue.exceptions import UndiscardableError
522
- from dj_queue.models import ClaimedExecution, FailedExecution
523
-
524
- failed = FailedExecution.objects.get(job_id=job_id)
525
- failed.retry()
526
- failed.discard()
527
-
528
- FailedExecution.retry_all(FailedExecution.objects.order_by("job_id"))
529
- FailedExecution.discard_all_in_batches()
530
-
531
- try:
532
- ClaimedExecution.discard_all_in_batches()
533
- except UndiscardableError:
534
- pass
530
+ retry_failed_jobs(job_ids=[job_id_a, job_id_b], batch_size=2)
531
+ discard_ready_jobs(job_ids=[ready_job_id], batch_size=1)
532
+ discard_failed_jobs(batch_size=500)
533
+ discard_scheduled_jobs(job_ids=[scheduled_job_id], batch_size=1)
534
+ discard_blocked_jobs(job_ids=[blocked_job_id], batch_size=1)
535
535
  ```
536
536
 
537
537
  Failures stay inspectable until you act on them.
@@ -433,7 +433,7 @@ expecting one semaphore namespace per backend alias.
433
433
  tables:
434
434
 
435
435
  ```python
436
- from dj_queue.api import QueueInfo
436
+ from dj_queue.api import QueueInfo, claim_ready_jobs, execute_claimed_job
437
437
 
438
438
  orders = QueueInfo("orders")
439
439
 
@@ -444,6 +444,10 @@ print(orders.paused)
444
444
  orders.pause()
445
445
  orders.resume()
446
446
  orders.clear()
447
+
448
+ claimed_jobs = claim_ready_jobs(limit=1, queues=["orders"])
449
+ if claimed_jobs:
450
+ execute_claimed_job(claimed_jobs[0])
447
451
  ```
448
452
 
449
453
  Notes:
@@ -452,6 +456,8 @@ Notes:
452
456
  - pause rows are scoped per backend alias
453
457
  - `clear()` discards ready jobs only
454
458
  - pass `backend_alias=` when you want to target a non-default `TASKS` alias
459
+ - `claim_ready_jobs()` returns `ClaimedJob` objects, so inspect `claimed_job.job` for the persisted row
460
+ - the low-level claim/execute helpers are exposed on `dj_queue.api` for scripts and examples
455
461
 
456
462
  Operational commands:
457
463
 
@@ -478,32 +484,26 @@ queue database, including the exception class, message, and traceback.
478
484
 
479
485
  You can retry and discard failed jobs through Django admin, and any raw job
480
486
  detail page can enqueue a fresh copy of that stored job. The failed-job actions
481
- also stay available directly through the operations layer:
487
+ also stay available through the public API:
482
488
 
483
489
  ```python
484
- from dj_queue.operations.jobs import discard_failed_job, retry_failed_job
490
+ from dj_queue.api import (
491
+ discard_blocked_jobs,
492
+ discard_failed_job,
493
+ discard_failed_jobs,
494
+ discard_ready_jobs,
495
+ discard_scheduled_jobs,
496
+ retry_failed_job,
497
+ retry_failed_jobs,
498
+ )
485
499
 
486
500
  retry_failed_job(job_id)
487
501
  discard_failed_job(job_id)
488
- ```
489
-
490
- Model helpers are available too:
491
-
492
- ```python
493
- from dj_queue.exceptions import UndiscardableError
494
- from dj_queue.models import ClaimedExecution, FailedExecution
495
-
496
- failed = FailedExecution.objects.get(job_id=job_id)
497
- failed.retry()
498
- failed.discard()
499
-
500
- FailedExecution.retry_all(FailedExecution.objects.order_by("job_id"))
501
- FailedExecution.discard_all_in_batches()
502
-
503
- try:
504
- ClaimedExecution.discard_all_in_batches()
505
- except UndiscardableError:
506
- pass
502
+ retry_failed_jobs(job_ids=[job_id_a, job_id_b], batch_size=2)
503
+ discard_ready_jobs(job_ids=[ready_job_id], batch_size=1)
504
+ discard_failed_jobs(batch_size=500)
505
+ discard_scheduled_jobs(job_ids=[scheduled_job_id], batch_size=1)
506
+ discard_blocked_jobs(job_ids=[blocked_job_id], batch_size=1)
507
507
  ```
508
508
 
509
509
  Failures stay inspectable until you act on them.
@@ -28,7 +28,13 @@ from dj_queue.models import (
28
28
  RecurringTask,
29
29
  Semaphore,
30
30
  )
31
- from dj_queue.operations.jobs import dispatch_scheduled_job_now, enqueue_job_again
31
+ from dj_queue.operations.jobs import (
32
+ discard_failed_job,
33
+ dispatch_scheduled_job_now,
34
+ enqueue_job_again,
35
+ retry_failed_job,
36
+ retry_failed_jobs,
37
+ )
32
38
 
33
39
 
34
40
  class DjQueueFirstAdminSite(admin.AdminSite):
@@ -648,12 +654,12 @@ class JobAdmin(HiddenSidebarAdminMixin, admin.ModelAdmin):
648
654
  return self._current_object_redirect(obj, backend_alias=obj.backend_alias)
649
655
 
650
656
  if action == "retry":
651
- obj.failed_execution.retry()
657
+ retry_failed_job(obj.failed_execution.job_id, backend_alias=obj.backend_alias)
652
658
  self.message_user(request, "Retried failed job", level=messages.SUCCESS)
653
659
  return self._current_object_redirect(obj, backend_alias=obj.backend_alias)
654
660
 
655
661
  if action == "discard":
656
- obj.failed_execution.discard()
662
+ discard_failed_job(obj.failed_execution.job_id, backend_alias=obj.backend_alias)
657
663
  self.message_user(request, "Discarded failed job", level=messages.SUCCESS)
658
664
  return HttpResponseRedirect(self._changelist_url(backend_alias=obj.backend_alias))
659
665
 
@@ -676,14 +682,18 @@ class FailedExecutionAdmin(HiddenSidebarAdminMixin, admin.ModelAdmin):
676
682
 
677
683
  @admin.action(description="Retry selected failed jobs")
678
684
  def retry_jobs(self, request, queryset):
679
- retried = FailedExecution.retry_all(queryset)
685
+ retried = retry_failed_jobs(
686
+ job_ids=list(queryset.values_list("job_id", flat=True)),
687
+ batch_size=queryset.count() or 1,
688
+ backend_alias=self._backend_alias(request),
689
+ )
680
690
  self.message_user(request, f"Retried {retried} failed jobs", level=messages.SUCCESS)
681
691
 
682
692
  @admin.action(description="Discard selected failed jobs")
683
693
  def discard_jobs(self, request, queryset):
684
694
  discarded = 0
685
695
  for execution in queryset.select_related("job"):
686
- discarded += execution.discard()
696
+ discarded += discard_failed_job(execution.job_id, backend_alias=execution.job.backend_alias)
687
697
  self.message_user(request, f"Discarded {discarded} failed jobs", level=messages.SUCCESS)
688
698
 
689
699
  @admin.display(description="created at", ordering="created_at")
@@ -707,13 +717,13 @@ class FailedExecutionAdmin(HiddenSidebarAdminMixin, admin.ModelAdmin):
707
717
 
708
718
  if action == "retry":
709
719
  job_id = obj.job_id
710
- obj.retry()
720
+ retry_failed_job(job_id, backend_alias=backend_alias)
711
721
  self.message_user(request, "Retried failed job", level=messages.SUCCESS)
712
722
  url = reverse("admin:dj_queue_job_change", args=[job_id])
713
723
  return HttpResponseRedirect(f"{url}?{urlencode({'backend': backend_alias})}")
714
724
 
715
725
  if action == "discard":
716
- obj.discard()
726
+ discard_failed_job(obj.job_id, backend_alias=backend_alias)
717
727
  self.message_user(request, "Discarded failed job", level=messages.SUCCESS)
718
728
  return HttpResponseRedirect(self._changelist_url(backend_alias=backend_alias))
719
729
 
@@ -3,10 +3,40 @@ from functools import partial
3
3
  from django.db.models.functions import Coalesce
4
4
  from django.db import transaction
5
5
  from django.utils import timezone
6
- from django.utils.module_loading import import_string
7
6
 
8
7
  from dj_queue.db import get_database_alias
9
8
  from dj_queue.models import Pause, ReadyExecution
9
+ from dj_queue.operations.jobs import (
10
+ ClaimedJob,
11
+ claim_ready_jobs,
12
+ discard_blocked_jobs,
13
+ discard_failed_job,
14
+ discard_failed_jobs,
15
+ discard_ready_jobs,
16
+ discard_scheduled_jobs,
17
+ execute_claimed_job,
18
+ retry_failed_job,
19
+ retry_failed_jobs,
20
+ )
21
+ from dj_queue.operations.queues import pause_queue, resume_queue
22
+ from dj_queue.operations.recurring import schedule_recurring_task, unschedule_recurring_task
23
+
24
+ __all__ = [
25
+ "ClaimedJob",
26
+ "QueueInfo",
27
+ "claim_ready_jobs",
28
+ "discard_blocked_jobs",
29
+ "discard_failed_job",
30
+ "discard_failed_jobs",
31
+ "discard_ready_jobs",
32
+ "discard_scheduled_jobs",
33
+ "enqueue_on_commit",
34
+ "execute_claimed_job",
35
+ "retry_failed_job",
36
+ "retry_failed_jobs",
37
+ "schedule_recurring_task",
38
+ "unschedule_recurring_task",
39
+ ]
10
40
 
11
41
 
12
42
  class QueueInfo:
@@ -44,11 +74,9 @@ class QueueInfo:
44
74
  )
45
75
 
46
76
  def pause(self):
47
- pause_queue = import_string("dj_queue.operations.queues.pause_queue")
48
77
  pause_queue(self.queue_name, backend_alias=self.backend_alias)
49
78
 
50
79
  def resume(self):
51
- resume_queue = import_string("dj_queue.operations.queues.resume_queue")
52
80
  resume_queue(self.queue_name, backend_alias=self.backend_alias)
53
81
 
54
82
  def clear(self, *, batch_size=500):
@@ -57,7 +85,7 @@ class QueueInfo:
57
85
  job_ids = list(self._ready_queryset().values_list("job_id", flat=True)[:batch_size])
58
86
  if not job_ids:
59
87
  return deleted
60
- deleted += _discard_ready_jobs(
88
+ deleted += discard_ready_jobs(
61
89
  job_ids=job_ids,
62
90
  batch_size=batch_size,
63
91
  backend_alias=self.backend_alias,
@@ -86,41 +114,5 @@ class QueueInfo:
86
114
  )
87
115
 
88
116
 
89
- def _discard_ready_jobs(*, job_ids, batch_size, backend_alias):
90
- discard_ready_jobs = import_string("dj_queue.operations.jobs.discard_ready_jobs")
91
- return discard_ready_jobs(job_ids=job_ids, batch_size=batch_size, backend_alias=backend_alias)
92
-
93
-
94
117
  def enqueue_on_commit(task, *args, using=None, **kwargs):
95
118
  transaction.on_commit(partial(task.enqueue, *args, **kwargs), using=using)
96
-
97
-
98
- def schedule_recurring_task(
99
- *,
100
- key,
101
- task_path,
102
- schedule,
103
- args=(),
104
- kwargs=None,
105
- queue_name="default",
106
- priority=0,
107
- description="",
108
- backend_alias="default",
109
- ):
110
- operation = import_string("dj_queue.operations.recurring.schedule_recurring_task")
111
- return operation(
112
- key=key,
113
- task_path=task_path,
114
- schedule=schedule,
115
- args=args,
116
- kwargs=kwargs,
117
- queue_name=queue_name,
118
- priority=priority,
119
- description=description,
120
- backend_alias=backend_alias,
121
- )
122
-
123
-
124
- def unschedule_recurring_task(key, *, backend_alias="default"):
125
- operation = import_string("dj_queue.operations.recurring.unschedule_recurring_task")
126
- return operation(key, backend_alias=backend_alias)
@@ -13,13 +13,14 @@ from dj_queue import observability
13
13
  from dj_queue.api import QueueInfo
14
14
  from dj_queue.config import load_backend_config
15
15
  from dj_queue.db import database_capabilities, get_database_alias
16
- from dj_queue.models import FailedExecution, Job
16
+ from dj_queue.models import Job
17
17
  from dj_queue.operations.jobs import (
18
18
  discard_blocked_jobs,
19
19
  discard_failed_jobs,
20
20
  discard_ready_jobs,
21
21
  discard_scheduled_jobs,
22
22
  enqueue_job_again,
23
+ retry_failed_jobs,
23
24
  )
24
25
 
25
26
  QUEUE_STATE_CONFIG = {
@@ -521,13 +522,11 @@ def apply_job_action(*, backend_alias, queue_name, state, action, job_ids):
521
522
  return f"discarded {deleted} blocked jobs from {queue_name}"
522
523
 
523
524
  if state == "failed" and action == "retry":
524
- alias = get_database_alias(backend_alias)
525
- queryset = FailedExecution.objects.using(alias).filter(
526
- job_id__in=job_ids,
527
- job__backend_alias=backend_alias,
528
- job__queue_name=queue_name,
525
+ retried = retry_failed_jobs(
526
+ job_ids=job_ids,
527
+ batch_size=max(len(job_ids), 1),
528
+ backend_alias=backend_alias,
529
529
  )
530
- retried = FailedExecution.retry_all(queryset.select_related("job"))
531
530
  return f"retried {retried} failed jobs from {queue_name}"
532
531
 
533
532
  if state == "failed" and action == "discard":
@@ -6,10 +6,6 @@ class EnqueueError(DjQueueError):
6
6
  pass
7
7
 
8
8
 
9
- class UndiscardableError(DjQueueError):
10
- pass
11
-
12
-
13
9
  class AlreadyRecorded(DjQueueError):
14
10
  pass
15
11
 
@@ -98,68 +98,64 @@ class Migration(migrations.Migration):
98
98
  model_name="blockedexecution",
99
99
  index=models.Index(
100
100
  fields=["backend_alias", "concurrency_key", "priority", "id"],
101
- name="dj_queue_bl_backend_conc_idx",
101
+ name="djq_bl_b_conc_idx",
102
102
  ),
103
103
  ),
104
104
  migrations.AddIndex(
105
105
  model_name="blockedexecution",
106
106
  index=models.Index(
107
107
  fields=["backend_alias", "expires_at", "concurrency_key"],
108
- name="dj_queue_bl_backend_exp_conc_idx",
108
+ name="djq_bl_b_exp_conc_idx",
109
109
  ),
110
110
  ),
111
111
  migrations.AddIndex(
112
112
  model_name="blockedexecution",
113
113
  index=models.Index(
114
114
  fields=["backend_alias", "concurrency_key", "-priority", "id"],
115
- name="dj_queue_bl_backend_conc_desc_idx",
115
+ name="djq_bl_b_conc_d_idx",
116
116
  ),
117
117
  ),
118
118
  migrations.AddIndex(
119
119
  model_name="blockedexecution",
120
120
  index=models.Index(
121
121
  fields=["backend_alias", "expires_at", "-priority", "id"],
122
- name="dj_queue_bl_backend_exp_desc_idx",
122
+ name="djq_bl_b_exp_d_idx",
123
123
  ),
124
124
  ),
125
125
  migrations.AddIndex(
126
126
  model_name="readyexecution",
127
- index=models.Index(
128
- fields=["backend_alias", "priority", "id"], name="dj_queue_re_backend_prio_idx"
129
- ),
127
+ index=models.Index(fields=["backend_alias", "priority", "id"], name="djq_re_b_prio_idx"),
130
128
  ),
131
129
  migrations.AddIndex(
132
130
  model_name="readyexecution",
133
131
  index=models.Index(
134
132
  fields=["backend_alias", "queue_name", "priority", "id"],
135
- name="dj_queue_re_backend_queue_idx",
133
+ name="djq_re_b_queue_idx",
136
134
  ),
137
135
  ),
138
136
  migrations.AddIndex(
139
137
  model_name="readyexecution",
140
- index=models.Index(
141
- fields=["backend_alias", "-priority", "id"], name="dj_queue_re_backend_prio_desc_idx"
142
- ),
138
+ index=models.Index(fields=["backend_alias", "-priority", "id"], name="djq_re_b_prio_d_idx"),
143
139
  ),
144
140
  migrations.AddIndex(
145
141
  model_name="readyexecution",
146
142
  index=models.Index(
147
143
  fields=["backend_alias", "queue_name", "-priority", "id"],
148
- name="dj_queue_re_backend_queue_desc_idx",
144
+ name="djq_re_b_queue_d_idx",
149
145
  ),
150
146
  ),
151
147
  migrations.AddIndex(
152
148
  model_name="scheduledexecution",
153
149
  index=models.Index(
154
150
  fields=["backend_alias", "scheduled_at", "priority", "id"],
155
- name="dj_queue_se_backend_due_idx",
151
+ name="djq_se_b_due_idx",
156
152
  ),
157
153
  ),
158
154
  migrations.AddIndex(
159
155
  model_name="scheduledexecution",
160
156
  index=models.Index(
161
157
  fields=["backend_alias", "scheduled_at", "-priority", "id"],
162
- name="dj_queue_se_backend_due_desc_idx",
158
+ name="djq_se_b_due_d_idx",
163
159
  ),
164
160
  ),
165
161
  ]
@@ -3,10 +3,6 @@ import uuid
3
3
  from django.core.exceptions import ObjectDoesNotExist
4
4
  from django.db import models
5
5
  from django.db.models import Q
6
- from django.utils.module_loading import import_string
7
-
8
- from dj_queue.db import get_database_alias
9
- from dj_queue.exceptions import UndiscardableError
10
6
 
11
7
  JOB_STATUS_RELATIONS = (
12
8
  ("ready", "ready_execution"),
@@ -125,33 +121,21 @@ class ReadyExecution(models.Model):
125
121
  class Meta:
126
122
  db_table = "dj_queue_ready_executions"
127
123
  indexes = [
128
- models.Index(
129
- fields=["backend_alias", "priority", "id"], name="dj_queue_re_backend_prio_idx"
130
- ),
124
+ models.Index(fields=["backend_alias", "priority", "id"], name="djq_re_b_prio_idx"),
131
125
  models.Index(
132
126
  fields=["backend_alias", "queue_name", "priority", "id"],
133
- name="dj_queue_re_backend_queue_idx",
127
+ name="djq_re_b_queue_idx",
134
128
  ),
135
129
  models.Index(
136
130
  fields=["backend_alias", "-priority", "id"],
137
- name="dj_queue_re_backend_prio_desc_idx",
131
+ name="djq_re_b_prio_d_idx",
138
132
  ),
139
133
  models.Index(
140
134
  fields=["backend_alias", "queue_name", "-priority", "id"],
141
- name="dj_queue_re_backend_queue_desc_idx",
135
+ name="djq_re_b_queue_d_idx",
142
136
  ),
143
137
  ]
144
138
 
145
- @classmethod
146
- def discard_all_in_batches(cls, *, batch_size=500, backend_alias="default"):
147
- operation = import_string("dj_queue.operations.jobs.discard_ready_jobs")
148
- return _discard_jobs_for_state(
149
- cls,
150
- operation,
151
- batch_size=batch_size,
152
- backend_alias=backend_alias,
153
- )
154
-
155
139
 
156
140
  class ScheduledExecution(models.Model):
157
141
  job = models.OneToOneField(
@@ -170,11 +154,11 @@ class ScheduledExecution(models.Model):
170
154
  indexes = [
171
155
  models.Index(
172
156
  fields=["backend_alias", "scheduled_at", "priority", "id"],
173
- name="dj_queue_se_backend_due_idx",
157
+ name="djq_se_b_due_idx",
174
158
  ),
175
159
  models.Index(
176
160
  fields=["backend_alias", "scheduled_at", "-priority", "id"],
177
- name="dj_queue_se_backend_due_desc_idx",
161
+ name="djq_se_b_due_d_idx",
178
162
  ),
179
163
  ]
180
164
 
@@ -198,10 +182,6 @@ class ClaimedExecution(models.Model):
198
182
  db_table = "dj_queue_claimed_executions"
199
183
  indexes = [models.Index(fields=["process", "job"])]
200
184
 
201
- @classmethod
202
- def discard_all_in_batches(cls, **_kwargs):
203
- raise UndiscardableError("cannot discard in-progress jobs")
204
-
205
185
 
206
186
  class BlockedExecution(models.Model):
207
187
  job = models.OneToOneField(
@@ -221,32 +201,22 @@ class BlockedExecution(models.Model):
221
201
  indexes = [
222
202
  models.Index(
223
203
  fields=["backend_alias", "concurrency_key", "priority", "id"],
224
- name="dj_queue_bl_backend_conc_idx",
204
+ name="djq_bl_b_conc_idx",
225
205
  ),
226
206
  models.Index(
227
207
  fields=["backend_alias", "expires_at", "concurrency_key"],
228
- name="dj_queue_bl_backend_exp_conc_idx",
208
+ name="djq_bl_b_exp_conc_idx",
229
209
  ),
230
210
  models.Index(
231
211
  fields=["backend_alias", "concurrency_key", "-priority", "id"],
232
- name="dj_queue_bl_backend_conc_desc_idx",
212
+ name="djq_bl_b_conc_d_idx",
233
213
  ),
234
214
  models.Index(
235
215
  fields=["backend_alias", "expires_at", "-priority", "id"],
236
- name="dj_queue_bl_backend_exp_desc_idx",
216
+ name="djq_bl_b_exp_d_idx",
237
217
  ),
238
218
  ]
239
219
 
240
- @classmethod
241
- def discard_all_in_batches(cls, *, batch_size=500, backend_alias="default"):
242
- operation = import_string("dj_queue.operations.jobs.discard_blocked_jobs")
243
- return _discard_jobs_for_state(
244
- cls,
245
- operation,
246
- batch_size=batch_size,
247
- backend_alias=backend_alias,
248
- )
249
-
250
220
 
251
221
  class FailedExecution(models.Model):
252
222
  job = models.OneToOneField(
@@ -262,56 +232,3 @@ class FailedExecution(models.Model):
262
232
  class Meta:
263
233
  db_table = "dj_queue_failed_executions"
264
234
  indexes = [models.Index(fields=["created_at", "job"])]
265
-
266
- def retry(self):
267
- return _retry_failed_job(self.job_id, backend_alias=self.job.backend_alias)
268
-
269
- def discard(self):
270
- return _discard_failed_job(self.job_id, backend_alias=self.job.backend_alias)
271
-
272
- @classmethod
273
- def retry_all(cls, queryset):
274
- retried = 0
275
- for execution in queryset.select_related("job"):
276
- execution.retry()
277
- retried += 1
278
- return retried
279
-
280
- @classmethod
281
- def discard_all_in_batches(cls, *, batch_size=500, backend_alias="default"):
282
- operation = import_string("dj_queue.operations.jobs.discard_failed_jobs")
283
- return _discard_jobs_for_state(
284
- cls,
285
- operation,
286
- batch_size=batch_size,
287
- backend_alias=backend_alias,
288
- )
289
-
290
-
291
- def _retry_failed_job(job_id, *, backend_alias):
292
- operation = import_string("dj_queue.operations.jobs.retry_failed_job")
293
- return operation(job_id, backend_alias=backend_alias)
294
-
295
-
296
- def _discard_failed_job(job_id, *, backend_alias):
297
- operation = import_string("dj_queue.operations.jobs.discard_failed_job")
298
- return operation(job_id, backend_alias=backend_alias)
299
-
300
-
301
- def _discard_jobs_for_state(model, operation, *, batch_size, backend_alias):
302
- alias = get_database_alias(backend_alias)
303
- deleted = 0
304
- while True:
305
- filter_kwargs = (
306
- {"backend_alias": backend_alias}
307
- if any(field.name == "backend_alias" for field in model._meta.fields)
308
- else {"job__backend_alias": backend_alias}
309
- )
310
- job_ids = list(
311
- model.objects.using(alias)
312
- .filter(**filter_kwargs)
313
- .values_list("job_id", flat=True)[:batch_size]
314
- )
315
- if not job_ids:
316
- return deleted
317
- deleted += operation(job_ids=job_ids, batch_size=batch_size, backend_alias=backend_alias)
@@ -0,0 +1,41 @@
1
+ import json
2
+
3
+ from dj_queue.db import database_capabilities
4
+ from dj_queue.exceptions import EnqueueError
5
+ from dj_queue.models import Pause
6
+
7
+
8
+ def _normalize_payload(args, kwargs):
9
+ try:
10
+ return json.loads(json.dumps({"args": list(args), "kwargs": dict(kwargs)}))
11
+ except (TypeError, ValueError) as exc:
12
+ raise EnqueueError("payload must be JSON round-trippable") from exc
13
+
14
+
15
+ def _task_option(task, name, default=None):
16
+ if hasattr(task, name):
17
+ return getattr(task, name)
18
+ return getattr(task.func, name, default)
19
+
20
+
21
+ def _lock_active_pauses(alias, backend_alias, queue_names=None):
22
+ queryset = Pause.objects.using(alias).select_for_update().filter(backend_alias=backend_alias)
23
+ if queue_names is not None:
24
+ active_queue_names = tuple(queue_name for queue_name in queue_names if queue_name)
25
+ if not active_queue_names:
26
+ return set()
27
+ queryset = queryset.filter(queue_name__in=active_queue_names)
28
+ return set(queryset.values_list("queue_name", flat=True))
29
+
30
+
31
+ def _consume_selected_rows(alias, model, rows):
32
+ if not database_capabilities(alias).uses_serialized_writes:
33
+ model.objects.using(alias).filter(pk__in=[row.pk for row in rows]).delete()
34
+ return rows
35
+
36
+ consumed_rows = []
37
+ for row in rows:
38
+ deleted, _ = model.objects.using(alias).filter(pk=row.pk).delete()
39
+ if deleted:
40
+ consumed_rows.append(row)
41
+ return consumed_rows