dj-queue 0.8.0__tar.gz → 0.8.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. {dj_queue-0.8.0 → dj_queue-0.8.1}/PKG-INFO +1 -1
  2. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/concurrency.py +57 -4
  3. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/jobs.py +55 -83
  4. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/base.py +2 -1
  5. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/supervisor.py +12 -1
  6. {dj_queue-0.8.0 → dj_queue-0.8.1}/pyproject.toml +1 -1
  7. {dj_queue-0.8.0 → dj_queue-0.8.1}/LICENSE +0 -0
  8. {dj_queue-0.8.0 → dj_queue-0.8.1}/README.md +0 -0
  9. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/__init__.py +0 -0
  10. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/admin.py +0 -0
  11. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/api.py +0 -0
  12. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/apps.py +0 -0
  13. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/backend.py +0 -0
  14. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/config.py +0 -0
  15. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/contrib/__init__.py +0 -0
  16. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/contrib/asgi.py +0 -0
  17. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/contrib/gunicorn.py +0 -0
  18. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/contrib/prometheus.py +0 -0
  19. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/dashboard.py +0 -0
  20. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/db.py +0 -0
  21. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/exceptions.py +0 -0
  22. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/hooks.py +0 -0
  23. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/log.py +0 -0
  24. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/management/__init__.py +0 -0
  25. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/management/commands/__init__.py +0 -0
  26. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/management/commands/dj_queue.py +0 -0
  27. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/management/commands/dj_queue_health.py +0 -0
  28. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/management/commands/dj_queue_prune.py +0 -0
  29. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0001_initial.py +0 -0
  30. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0002_pause_semaphore.py +0 -0
  31. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0003_recurringtask_recurringexecution.py +0 -0
  32. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0004_dashboard.py +0 -0
  33. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0005_remove_recurringexecution_dj_queue_recurring_executions_task_key_run_at_unique_and_more.py +0 -0
  34. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0006_blockedexecution_dj_queue_bl_concurr_2d8393_idx_and_more.py +0 -0
  35. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0007_recurringtask_next_run_at.py +0 -0
  36. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/0008_remove_blockedexecution_dj_queue_bl_concurr_1ce730_idx_and_more.py +0 -0
  37. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/migrations/__init__.py +0 -0
  38. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/models/__init__.py +0 -0
  39. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/models/jobs.py +0 -0
  40. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/models/recurring.py +0 -0
  41. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/models/runtime.py +0 -0
  42. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/observability.py +0 -0
  43. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/__init__.py +0 -0
  44. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/_helpers.py +0 -0
  45. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/_insert.py +0 -0
  46. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/cleanup.py +0 -0
  47. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/queues.py +0 -0
  48. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/operations/recurring.py +0 -0
  49. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/routers.py +0 -0
  50. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/__init__.py +0 -0
  51. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/connection_budget.py +0 -0
  52. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/dispatcher.py +0 -0
  53. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/errors.py +0 -0
  54. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/interruptible.py +0 -0
  55. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/notify.py +0 -0
  56. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/pidfile.py +0 -0
  57. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/pool.py +0 -0
  58. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/procline.py +0 -0
  59. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/scheduler.py +0 -0
  60. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/runtime/worker.py +0 -0
  61. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/_dashboard_process_rows.html +0 -0
  62. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/_dashboard_recurring_rows.html +0 -0
  63. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/_dashboard_section_table.html +0 -0
  64. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/_dashboard_semaphore_rows.html +0 -0
  65. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/_paginator.html +0 -0
  66. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/_queue_controls.html +0 -0
  67. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/_sortable_header_cells.html +0 -0
  68. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/change_form.html +0 -0
  69. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/change_list.html +0 -0
  70. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/dashboard.html +0 -0
  71. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/includes/fieldset.html +0 -0
  72. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templates/admin/dj_queue/queue_jobs.html +0 -0
  73. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templatetags/__init__.py +0 -0
  74. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/templatetags/dj_queue_admin.py +0 -0
  75. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/urls.py +0 -0
  76. {dj_queue-0.8.0 → dj_queue-0.8.1}/dj_queue/views.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dj-queue
3
- Version: 0.8.0
3
+ Version: 0.8.1
4
4
  Summary: Database-backed task queue backend for Django’s Tasks framework.
5
5
  License-Expression: MIT
6
6
  License-File: LICENSE
@@ -1,6 +1,6 @@
1
1
  from datetime import timedelta
2
2
 
3
- from django.db import transaction
3
+ from django.db import connections, transaction
4
4
  from django.db.models import F
5
5
  from django.utils import timezone
6
6
  from django.utils.module_loading import import_string
@@ -29,6 +29,16 @@ def semaphore_acquire(
29
29
  alias = get_database_alias(backend_alias)
30
30
  now = timezone.now()
31
31
  expires_at = now + timedelta(seconds=duration_seconds)
32
+ backend_family = database_capabilities(alias).backend_family
33
+
34
+ if backend_family in {"mysql", "mariadb"}:
35
+ return _mysql_family_semaphore_acquire(
36
+ alias,
37
+ key,
38
+ limit=limit,
39
+ expires_at=expires_at,
40
+ now=now,
41
+ )
32
42
 
33
43
  with transaction.atomic(using=alias):
34
44
  if create_ignore_conflicts(
@@ -41,7 +51,6 @@ def semaphore_acquire(
41
51
  ):
42
52
  return True
43
53
 
44
- # mysql-family backends can deadlock if a skipped insert and row lock happen in one tx
45
54
  with transaction.atomic(using=alias):
46
55
  updated = (
47
56
  Semaphore.objects.using(alias)
@@ -55,6 +64,43 @@ def semaphore_acquire(
55
64
  return updated > 0
56
65
 
57
66
 
67
+ def _mysql_family_semaphore_acquire(alias, key, *, limit, expires_at, now):
68
+ connection = connections[alias]
69
+ table = connection.ops.quote_name(Semaphore._meta.db_table)
70
+ key_column = connection.ops.quote_name("key")
71
+ value_column = connection.ops.quote_name("value")
72
+ limit_column = connection.ops.quote_name("limit")
73
+ expires_at_column = connection.ops.quote_name("expires_at")
74
+ created_at_column = connection.ops.quote_name("created_at")
75
+ updated_at_column = connection.ops.quote_name("updated_at")
76
+
77
+ # one upsert avoids mysql-family deadlocks from mixing ignored inserts and follow-up updates
78
+ with connection.cursor() as cursor:
79
+ cursor.execute(
80
+ f"""
81
+ INSERT INTO {table} (
82
+ {key_column},
83
+ {value_column},
84
+ {limit_column},
85
+ {expires_at_column},
86
+ {created_at_column},
87
+ {updated_at_column}
88
+ )
89
+ VALUES (%s, %s, %s, %s, %s, %s)
90
+ ON DUPLICATE KEY UPDATE
91
+ {expires_at_column} = IF({value_column} > 0, %s, {expires_at_column}),
92
+ {updated_at_column} = IF({value_column} > 0, %s, {updated_at_column}),
93
+ {value_column} = IF(
94
+ {value_column} > 0,
95
+ LAST_INSERT_ID({value_column}) - 1,
96
+ LAST_INSERT_ID(0) + {value_column}
97
+ )
98
+ """,
99
+ [key, limit - 1, limit, expires_at, now, now, expires_at, now],
100
+ )
101
+ return cursor.lastrowid != 0
102
+
103
+
58
104
  def semaphore_release(key, *, duration_seconds, backend_alias="default"):
59
105
  alias = get_database_alias(backend_alias)
60
106
  expires_at = timezone.now() + timedelta(seconds=duration_seconds)
@@ -144,16 +190,23 @@ def unblock_next_blocked_job(
144
190
 
145
191
  def cleanup_expired_semaphores(*, backend_alias="default"):
146
192
  alias = get_database_alias(backend_alias)
147
- active_concurrency_keys = (
193
+ claimed_concurrency_keys = (
148
194
  ClaimedExecution.objects.using(alias)
149
195
  .exclude(job__concurrency_key__isnull=True)
150
196
  .exclude(job__concurrency_key="")
151
197
  .values_list("job__concurrency_key", flat=True)
152
198
  )
199
+ ready_concurrency_keys = (
200
+ ReadyExecution.objects.using(alias)
201
+ .exclude(job__concurrency_key__isnull=True)
202
+ .exclude(job__concurrency_key="")
203
+ .values_list("job__concurrency_key", flat=True)
204
+ )
153
205
  queryset = (
154
206
  Semaphore.objects.using(alias)
155
207
  .filter(expires_at__lte=timezone.now())
156
- .exclude(key__in=active_concurrency_keys)
208
+ .exclude(key__in=claimed_concurrency_keys)
209
+ .exclude(key__in=ready_concurrency_keys)
157
210
  )
158
211
  deleted = queryset.count()
159
212
  if not deleted:
@@ -595,113 +595,85 @@ def enqueue_job_again(job_id, *, backend_alias="default", run_after=_KEEP_RUN_AF
595
595
  return job
596
596
 
597
597
 
598
- def discard_failed_jobs(*, job_ids=None, batch_size=500, backend_alias="default"):
598
+ def _discard_state_jobs(
599
+ model,
600
+ reason,
601
+ *,
602
+ job_ids=None,
603
+ batch_size=500,
604
+ backend_alias="default",
605
+ release_concurrency=False,
606
+ ):
599
607
  alias = get_database_alias(backend_alias)
600
608
  config = load_backend_config(backend_alias)
601
609
 
602
610
  with transaction.atomic(using=alias):
603
- queryset = (
604
- FailedExecution.objects.using(alias).filter(job__backend_alias=backend_alias).order_by("id")
605
- )
611
+ if model is FailedExecution:
612
+ queryset = model.objects.using(alias).filter(job__backend_alias=backend_alias).order_by("id")
613
+ else:
614
+ queryset = model.objects.using(alias).filter(backend_alias=backend_alias).order_by("id")
615
+
606
616
  if job_ids is not None:
607
617
  queryset = queryset.filter(job_id__in=job_ids)
608
- failed_rows = list(
609
- locked_queryset(queryset, use_skip_locked=config.use_skip_locked)[:batch_size]
610
- )
611
- if not failed_rows:
618
+ rows = list(locked_queryset(queryset, use_skip_locked=config.use_skip_locked)[:batch_size])
619
+ if not rows:
612
620
  return 0
613
621
 
614
- job_ids = [row.job_id for row in failed_rows]
615
- jobs_by_id = {job.id: job for job in Job.objects.using(alias).filter(pk__in=job_ids)}
616
- jobs = [jobs_by_id[job_id] for job_id in job_ids]
617
- Job.objects.using(alias).filter(pk__in=[row.job_id for row in failed_rows]).delete()
622
+ row_job_ids = [row.job_id for row in rows]
623
+ jobs_by_id = {job.id: job for job in Job.objects.using(alias).filter(pk__in=row_job_ids)}
624
+ jobs = [jobs_by_id[job_id] for job_id in row_job_ids]
625
+ Job.objects.using(alias).filter(pk__in=row_job_ids).delete()
618
626
 
619
627
  for job in jobs:
620
- log_event("job.discarded", job_id=str(job.id), reason="failed")
628
+ if release_concurrency:
629
+ _release_concurrency_slot(job)
630
+ log_event("job.discarded", job_id=str(job.id), reason=reason)
621
631
  return len(jobs)
622
632
 
623
633
 
634
+ def discard_failed_jobs(*, job_ids=None, batch_size=500, backend_alias="default"):
635
+ return _discard_state_jobs(
636
+ FailedExecution,
637
+ "failed",
638
+ job_ids=job_ids,
639
+ batch_size=batch_size,
640
+ backend_alias=backend_alias,
641
+ )
642
+
643
+
624
644
  def discard_failed_job(job_id, *, backend_alias="default"):
625
645
  return discard_failed_jobs(job_ids=[job_id], batch_size=1, backend_alias=backend_alias)
626
646
 
627
647
 
628
648
  def discard_ready_jobs(*, job_ids=None, batch_size=500, backend_alias="default"):
629
- alias = get_database_alias(backend_alias)
630
- config = load_backend_config(backend_alias)
631
-
632
- with transaction.atomic(using=alias):
633
- queryset = (
634
- ReadyExecution.objects.using(alias).filter(backend_alias=backend_alias).order_by("id")
635
- )
636
- if job_ids is not None:
637
- queryset = queryset.filter(job_id__in=job_ids)
638
- ready_rows = list(
639
- locked_queryset(queryset, use_skip_locked=config.use_skip_locked)[:batch_size]
640
- )
641
- if not ready_rows:
642
- return 0
643
-
644
- job_ids = [row.job_id for row in ready_rows]
645
- jobs_by_id = {job.id: job for job in Job.objects.using(alias).filter(pk__in=job_ids)}
646
- jobs = [jobs_by_id[job_id] for job_id in job_ids]
647
- Job.objects.using(alias).filter(pk__in=[row.job_id for row in ready_rows]).delete()
648
-
649
- for job in jobs:
650
- _release_concurrency_slot(job)
651
- log_event("job.discarded", job_id=str(job.id), reason="ready")
652
- return len(jobs)
649
+ return _discard_state_jobs(
650
+ ReadyExecution,
651
+ "ready",
652
+ job_ids=job_ids,
653
+ batch_size=batch_size,
654
+ backend_alias=backend_alias,
655
+ release_concurrency=True,
656
+ )
653
657
 
654
658
 
655
659
  def discard_scheduled_jobs(*, job_ids=None, batch_size=500, backend_alias="default"):
656
- alias = get_database_alias(backend_alias)
657
- config = load_backend_config(backend_alias)
658
-
659
- with transaction.atomic(using=alias):
660
- queryset = (
661
- ScheduledExecution.objects.using(alias).filter(backend_alias=backend_alias).order_by("id")
662
- )
663
- if job_ids is not None:
664
- queryset = queryset.filter(job_id__in=job_ids)
665
- scheduled_rows = list(
666
- locked_queryset(queryset, use_skip_locked=config.use_skip_locked)[:batch_size]
667
- )
668
- if not scheduled_rows:
669
- return 0
670
-
671
- job_ids = [row.job_id for row in scheduled_rows]
672
- jobs_by_id = {job.id: job for job in Job.objects.using(alias).filter(pk__in=job_ids)}
673
- jobs = [jobs_by_id[job_id] for job_id in job_ids]
674
- Job.objects.using(alias).filter(pk__in=[row.job_id for row in scheduled_rows]).delete()
675
-
676
- for job in jobs:
677
- log_event("job.discarded", job_id=str(job.id), reason="scheduled")
678
- return len(jobs)
660
+ return _discard_state_jobs(
661
+ ScheduledExecution,
662
+ "scheduled",
663
+ job_ids=job_ids,
664
+ batch_size=batch_size,
665
+ backend_alias=backend_alias,
666
+ )
679
667
 
680
668
 
681
669
  def discard_blocked_jobs(*, job_ids=None, batch_size=500, backend_alias="default"):
682
- alias = get_database_alias(backend_alias)
683
- config = load_backend_config(backend_alias)
684
-
685
- with transaction.atomic(using=alias):
686
- queryset = (
687
- BlockedExecution.objects.using(alias).filter(backend_alias=backend_alias).order_by("id")
688
- )
689
- if job_ids is not None:
690
- queryset = queryset.filter(job_id__in=job_ids)
691
- blocked_rows = list(
692
- locked_queryset(queryset, use_skip_locked=config.use_skip_locked)[:batch_size]
693
- )
694
- if not blocked_rows:
695
- return 0
696
-
697
- job_ids = [row.job_id for row in blocked_rows]
698
- jobs_by_id = {job.id: job for job in Job.objects.using(alias).filter(pk__in=job_ids)}
699
- jobs = [jobs_by_id[job_id] for job_id in job_ids]
700
- Job.objects.using(alias).filter(pk__in=[row.job_id for row in blocked_rows]).delete()
701
-
702
- for job in jobs:
703
- log_event("job.discarded", job_id=str(job.id), reason="blocked")
704
- return len(jobs)
670
+ return _discard_state_jobs(
671
+ BlockedExecution,
672
+ "blocked",
673
+ job_ids=job_ids,
674
+ batch_size=batch_size,
675
+ backend_alias=backend_alias,
676
+ )
705
677
 
706
678
 
707
679
  def _dispatch_existing_job(job):
@@ -73,6 +73,7 @@ class BaseRunner:
73
73
  self.hostname = hostname
74
74
  self.sleeper = sleeper or InterruptibleSleeper()
75
75
  self.supervisor = supervisor
76
+ self.supervisor_id = getattr(supervisor, "pk", supervisor)
76
77
  self.process = None
77
78
  self._stop_event = threading.Event()
78
79
  self._heartbeat_stop_event = threading.Event()
@@ -195,7 +196,7 @@ class BaseRunner:
195
196
  hostname=self.hostname,
196
197
  name=self.name,
197
198
  metadata=self.runtime_metadata(),
198
- supervisor=self.supervisor,
199
+ supervisor_id=self.supervisor_id,
199
200
  last_heartbeat_at=timezone.now(),
200
201
  ),
201
202
  alias=alias,
@@ -278,9 +278,11 @@ class AsyncSupervisor(Supervisor):
278
278
 
279
279
  # runner crashed — fail its claimed jobs, then stop and replace
280
280
  self._fail_crashed_runner_jobs(runner)
281
- runner.stop()
281
+ drained = runner.stop()
282
282
  if self._stop_event.is_set():
283
283
  return
284
+ if drained is False and not self._wait_for_runner_stop(runner):
285
+ return
284
286
  log_event(
285
287
  "process.replaced",
286
288
  backend_alias=self.backend_alias,
@@ -297,6 +299,11 @@ class AsyncSupervisor(Supervisor):
297
299
  if not self._stop_event.is_set():
298
300
  runner.stop()
299
301
 
302
+ def _wait_for_runner_stop(self, runner):
303
+ while runner.process is not None and not self._stop_event.is_set():
304
+ time.sleep(0.01)
305
+ return runner.process is None
306
+
300
307
  def _replace_runner(self, current, replacement):
301
308
  try:
302
309
  index = self.runners.index(current)
@@ -552,6 +559,7 @@ class ForkSupervisor(Supervisor):
552
559
 
553
560
  def _build_runner_specs(self):
554
561
  specs = []
562
+ supervisor_id = self.process.pk if self.process else None
555
563
 
556
564
  for index, worker_config in enumerate(self.config.workers, start=1):
557
565
  for process_index in range(worker_config.processes):
@@ -565,6 +573,7 @@ class ForkSupervisor(Supervisor):
565
573
  "backend_alias": self.backend_alias,
566
574
  "name": f"worker-{suffix}",
567
575
  "hostname": self.hostname,
576
+ "supervisor": supervisor_id,
568
577
  },
569
578
  }
570
579
  )
@@ -579,6 +588,7 @@ class ForkSupervisor(Supervisor):
579
588
  "backend_alias": self.backend_alias,
580
589
  "name": f"dispatcher-{index}",
581
590
  "hostname": self.hostname,
591
+ "supervisor": supervisor_id,
582
592
  },
583
593
  }
584
594
  )
@@ -593,6 +603,7 @@ class ForkSupervisor(Supervisor):
593
603
  "backend_alias": self.backend_alias,
594
604
  "name": "scheduler-1",
595
605
  "hostname": self.hostname,
606
+ "supervisor": supervisor_id,
596
607
  },
597
608
  }
598
609
  )
@@ -4,7 +4,7 @@ build-backend = "uv_build"
4
4
 
5
5
  [project]
6
6
  name = "dj-queue"
7
- version = "0.8.0"
7
+ version = "0.8.1"
8
8
  description = "Database-backed task queue backend for Django’s Tasks framework."
9
9
  readme = "README.md"
10
10
  license = "MIT"
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes