dj-queue 0.6.1__tar.gz → 0.6.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {dj_queue-0.6.1 → dj_queue-0.6.3}/PKG-INFO +1 -1
  2. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/admin.py +44 -1
  3. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/config.py +25 -8
  4. dj_queue-0.6.3/dj_queue/contrib/gunicorn.py +45 -0
  5. dj_queue-0.6.3/dj_queue/operations/_insert.py +24 -0
  6. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/operations/concurrency.py +28 -30
  7. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/operations/jobs.py +39 -2
  8. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/operations/recurring.py +10 -1
  9. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/base.py +14 -1
  10. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/scheduler.py +2 -1
  11. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/supervisor.py +18 -1
  12. {dj_queue-0.6.1 → dj_queue-0.6.3}/pyproject.toml +1 -1
  13. dj_queue-0.6.1/dj_queue/contrib/gunicorn.py +0 -25
  14. {dj_queue-0.6.1 → dj_queue-0.6.3}/LICENSE +0 -0
  15. {dj_queue-0.6.1 → dj_queue-0.6.3}/README.md +0 -0
  16. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/__init__.py +0 -0
  17. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/api.py +0 -0
  18. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/apps.py +0 -0
  19. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/backend.py +0 -0
  20. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/contrib/__init__.py +0 -0
  21. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/contrib/asgi.py +0 -0
  22. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/contrib/prometheus.py +0 -0
  23. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/dashboard.py +0 -0
  24. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/db.py +0 -0
  25. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/exceptions.py +0 -0
  26. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/hooks.py +0 -0
  27. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/log.py +0 -0
  28. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/management/__init__.py +0 -0
  29. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/management/commands/__init__.py +0 -0
  30. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/management/commands/dj_queue.py +0 -0
  31. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/management/commands/dj_queue_health.py +0 -0
  32. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/management/commands/dj_queue_prune.py +0 -0
  33. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/migrations/0001_initial.py +0 -0
  34. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/migrations/0002_pause_semaphore.py +0 -0
  35. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/migrations/0003_recurringtask_recurringexecution.py +0 -0
  36. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/migrations/0004_dashboard.py +0 -0
  37. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/migrations/0005_remove_recurringexecution_dj_queue_recurring_executions_task_key_run_at_unique_and_more.py +0 -0
  38. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/migrations/__init__.py +0 -0
  39. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/models/__init__.py +0 -0
  40. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/models/jobs.py +0 -0
  41. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/models/recurring.py +0 -0
  42. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/models/runtime.py +0 -0
  43. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/observability.py +0 -0
  44. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/operations/__init__.py +0 -0
  45. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/operations/cleanup.py +0 -0
  46. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/routers.py +0 -0
  47. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/__init__.py +0 -0
  48. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/dispatcher.py +0 -0
  49. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/errors.py +0 -0
  50. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/interruptible.py +0 -0
  51. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/notify.py +0 -0
  52. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/pidfile.py +0 -0
  53. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/pool.py +0 -0
  54. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/procline.py +0 -0
  55. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/runtime/worker.py +0 -0
  56. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/_dashboard_process_rows.html +0 -0
  57. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/_dashboard_recurring_rows.html +0 -0
  58. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/_dashboard_section_table.html +0 -0
  59. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/_dashboard_semaphore_rows.html +0 -0
  60. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/_paginator.html +0 -0
  61. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/_queue_controls.html +0 -0
  62. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/_sortable_header_cells.html +0 -0
  63. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/change_form.html +0 -0
  64. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/change_list.html +0 -0
  65. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/dashboard.html +0 -0
  66. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/includes/fieldset.html +0 -0
  67. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templates/admin/dj_queue/queue_jobs.html +0 -0
  68. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templatetags/__init__.py +0 -0
  69. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/templatetags/dj_queue_admin.py +0 -0
  70. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/urls.py +0 -0
  71. {dj_queue-0.6.1 → dj_queue-0.6.3}/dj_queue/views.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dj-queue
3
- Version: 0.6.1
3
+ Version: 0.6.3
4
4
  Summary: Database-backed task queue backend for Django’s Tasks framework.
5
5
  License-Expression: MIT
6
6
  License-File: LICENSE
@@ -28,7 +28,7 @@ from dj_queue.models import (
28
28
  RecurringTask,
29
29
  Semaphore,
30
30
  )
31
- from dj_queue.operations.jobs import enqueue_job_again
31
+ from dj_queue.operations.jobs import dispatch_scheduled_job_now, enqueue_job_again
32
32
 
33
33
 
34
34
  class DjQueueFirstAdminSite(admin.AdminSite):
@@ -563,6 +563,16 @@ class JobAdmin(HiddenSidebarAdminMixin, admin.ModelAdmin):
563
563
  def get_change_actions(self, request, obj):
564
564
  if obj is None:
565
565
  return ()
566
+ if obj.status == "scheduled":
567
+ return (
568
+ {"name": "run_now", "label": "Run now", "css_class": "djq-object-action-retry"},
569
+ {
570
+ "name": "enqueue_copy_now",
571
+ "label": "Enqueue copy now",
572
+ "css_class": "djq-object-action-retry",
573
+ },
574
+ )
575
+
566
576
  actions = [{"name": "enqueue", "label": "Enqueue job", "css_class": "djq-object-action-retry"}]
567
577
  if obj.status == "failed":
568
578
  actions.extend(
@@ -582,6 +592,39 @@ class JobAdmin(HiddenSidebarAdminMixin, admin.ModelAdmin):
582
592
  return tuple(actions)
583
593
 
584
594
  def handle_change_action(self, request, obj, action):
595
+ if action == "run_now":
596
+ try:
597
+ _job, dispatched_as = dispatch_scheduled_job_now(obj.pk, backend_alias=obj.backend_alias)
598
+ except (EnqueueError, ImportError, AttributeError) as exc:
599
+ self.message_user(request, f"Could not dispatch job now: {exc}", level=messages.ERROR)
600
+ return self._current_object_redirect(obj, backend_alias=obj.backend_alias)
601
+
602
+ message = "Dispatched scheduled job for immediate execution"
603
+ if dispatched_as == "blocked":
604
+ message = "Dispatched scheduled job immediately and it is now blocked"
605
+ if dispatched_as == "discarded":
606
+ message = "Dispatched scheduled job immediately and it was discarded"
607
+ self.message_user(request, message, level=messages.SUCCESS)
608
+ return self._current_object_redirect(obj, backend_alias=obj.backend_alias)
609
+
610
+ if action == "enqueue_copy_now":
611
+ try:
612
+ new_job = enqueue_job_again(obj.pk, backend_alias=obj.backend_alias, run_after=None)
613
+ except (EnqueueError, ImportError, AttributeError) as exc:
614
+ self.message_user(request, f"Could not enqueue job: {exc}", level=messages.ERROR)
615
+ return self._current_object_redirect(obj, backend_alias=obj.backend_alias)
616
+
617
+ self.message_user(
618
+ request,
619
+ format_html(
620
+ 'Enqueued immediate copy <a href="{}">{}</a>.',
621
+ self._change_url(object_id=new_job.pk, backend_alias=new_job.backend_alias),
622
+ new_job.pk,
623
+ ),
624
+ level=messages.SUCCESS,
625
+ )
626
+ return self._current_object_redirect(obj, backend_alias=obj.backend_alias)
627
+
585
628
  if action == "enqueue":
586
629
  try:
587
630
  new_job = enqueue_job_again(obj.pk, backend_alias=obj.backend_alias)
@@ -1,4 +1,5 @@
1
1
  import json
2
+ import math
2
3
  import os
3
4
  import warnings
4
5
  from collections.abc import Mapping, Sequence
@@ -393,7 +394,7 @@ def _build_worker_configs(raw_workers: Any, mode: str) -> tuple[WorkerConfig, ..
393
394
  raw_workers = [raw_workers]
394
395
 
395
396
  workers: list[WorkerConfig] = []
396
- for raw_worker in raw_workers or []:
397
+ for index, raw_worker in enumerate(raw_workers or []):
397
398
  if not isinstance(raw_worker, Mapping):
398
399
  raise ImproperlyConfigured("worker entries must be mappings")
399
400
 
@@ -401,8 +402,9 @@ def _build_worker_configs(raw_workers: Any, mode: str) -> tuple[WorkerConfig, ..
401
402
  queues=_as_queue_selectors(raw_worker.get("queues", DEFAULT_WORKER["queues"])),
402
403
  threads=int(raw_worker.get("threads", DEFAULT_WORKER["threads"])),
403
404
  processes=int(raw_worker.get("processes", DEFAULT_WORKER["processes"])),
404
- polling_interval=float(
405
- raw_worker.get("polling_interval", DEFAULT_WORKER["polling_interval"])
405
+ polling_interval=_positive_float(
406
+ raw_worker.get("polling_interval", DEFAULT_WORKER["polling_interval"]),
407
+ f"workers[{index}].polling_interval",
406
408
  ),
407
409
  )
408
410
 
@@ -423,15 +425,16 @@ def _build_dispatcher_configs(raw_dispatchers: Any) -> tuple[DispatcherConfig, .
423
425
  raw_dispatchers = [raw_dispatchers]
424
426
 
425
427
  dispatchers: list[DispatcherConfig] = []
426
- for raw_dispatcher in raw_dispatchers or []:
428
+ for index, raw_dispatcher in enumerate(raw_dispatchers or []):
427
429
  if not isinstance(raw_dispatcher, Mapping):
428
430
  raise ImproperlyConfigured("dispatcher entries must be mappings")
429
431
 
430
432
  dispatchers.append(
431
433
  DispatcherConfig(
432
434
  batch_size=int(raw_dispatcher.get("batch_size", DEFAULT_DISPATCHER["batch_size"])),
433
- polling_interval=float(
434
- raw_dispatcher.get("polling_interval", DEFAULT_DISPATCHER["polling_interval"])
435
+ polling_interval=_positive_float(
436
+ raw_dispatcher.get("polling_interval", DEFAULT_DISPATCHER["polling_interval"]),
437
+ f"dispatchers[{index}].polling_interval",
435
438
  ),
436
439
  concurrency_maintenance=bool(
437
440
  raw_dispatcher.get(
@@ -463,8 +466,9 @@ def _build_scheduler_config(raw_scheduler: Any) -> SchedulerConfig:
463
466
  DEFAULT_SCHEDULER["dynamic_tasks_enabled"],
464
467
  )
465
468
  ),
466
- polling_interval=float(
467
- raw_scheduler.get("polling_interval", DEFAULT_SCHEDULER["polling_interval"])
469
+ polling_interval=_positive_float(
470
+ raw_scheduler.get("polling_interval", DEFAULT_SCHEDULER["polling_interval"]),
471
+ "scheduler.polling_interval",
468
472
  ),
469
473
  )
470
474
 
@@ -539,5 +543,18 @@ def _optional_int(value: Any) -> int | None:
539
543
  return int(value)
540
544
 
541
545
 
546
+ def _positive_float(value: Any, setting_name: str) -> float:
547
+ try:
548
+ number = float(value)
549
+ except (TypeError, ValueError) as exc:
550
+ raise ImproperlyConfigured(
551
+ f"dj_queue {setting_name} must be a positive number, got {value!r}"
552
+ ) from exc
553
+
554
+ if not math.isfinite(number) or number <= 0:
555
+ raise ImproperlyConfigured(f"dj_queue {setting_name} must be a positive number, got {value!r}")
556
+ return number
557
+
558
+
542
559
  def _cache_key(value: Any) -> str:
543
560
  return json.dumps(value, sort_keys=True, separators=(",", ":"), default=str)
@@ -0,0 +1,45 @@
1
+ import threading
2
+
3
+ from dj_queue.runtime.supervisor import AsyncSupervisor
4
+
5
+
6
+ def build_supervisor(backend_alias="default"):
7
+ return AsyncSupervisor.from_backend_config(backend_alias=backend_alias, standalone=False)
8
+
9
+
10
+ def post_fork(_server, worker):
11
+ if worker.age != 1:
12
+ return None
13
+
14
+ supervisor = build_supervisor()
15
+ worker._dj_queue_supervisor = supervisor
16
+ worker._dj_queue_supervisor_poll_stop = threading.Event()
17
+ supervisor.start()
18
+
19
+ def poll_supervisor():
20
+ stop_event = worker._dj_queue_supervisor_poll_stop
21
+ while stop_event.wait(supervisor.polling_interval) is False:
22
+ supervisor.poll_once()
23
+
24
+ poll_thread = threading.Thread(target=poll_supervisor, daemon=True)
25
+ worker._dj_queue_supervisor_poll_thread = poll_thread
26
+ poll_thread.start()
27
+ return supervisor
28
+
29
+
30
+ def worker_exit(_server, worker):
31
+ supervisor = getattr(worker, "_dj_queue_supervisor", None)
32
+ stop_event = getattr(worker, "_dj_queue_supervisor_poll_stop", None)
33
+ poll_thread = getattr(worker, "_dj_queue_supervisor_poll_thread", None)
34
+ if stop_event is not None:
35
+ stop_event.set()
36
+ if poll_thread is not None:
37
+ poll_thread.join(timeout=1)
38
+ worker._dj_queue_supervisor_poll_thread = None
39
+ worker._dj_queue_supervisor_poll_stop = None
40
+ if supervisor is None:
41
+ return None
42
+
43
+ supervisor.stop()
44
+ worker._dj_queue_supervisor = None
45
+ return None
@@ -0,0 +1,24 @@
1
+ from django.db import connections
2
+ from django.db.models import AutoField
3
+ from django.db.models.constants import OnConflict
4
+ from django.db.models.sql import InsertQuery
5
+
6
+
7
+ def create_ignore_conflicts(model, /, *, using, **fields):
8
+ obj = model(**fields)
9
+ queryset = model.objects.using(using).all()
10
+ _objs_with_pk, objs_without_pk = queryset._prepare_for_bulk_create([obj])
11
+ insert_fields = [field for field in model._meta.concrete_fields if not field.generated]
12
+ if objs_without_pk:
13
+ insert_fields = [field for field in insert_fields if not isinstance(field, AutoField)]
14
+ query = InsertQuery(model, on_conflict=OnConflict.IGNORE)
15
+ query.insert_values(insert_fields, [obj], raw=False)
16
+ compiler = query.get_compiler(using=using)
17
+ rowcount = 0
18
+
19
+ with connections[using].cursor() as cursor:
20
+ for sql, params in compiler.as_sql():
21
+ cursor.execute(sql, params)
22
+ rowcount += cursor.rowcount
23
+
24
+ return rowcount > 0
@@ -1,6 +1,7 @@
1
1
  from datetime import timedelta
2
2
 
3
- from django.db import IntegrityError, transaction
3
+ from django.db import transaction
4
+ from django.db.models import F
4
5
  from django.utils import timezone
5
6
  from django.utils.module_loading import import_string
6
7
 
@@ -8,6 +9,7 @@ from dj_queue.config import load_backend_config
8
9
  from dj_queue.db import get_database_alias, locked_queryset
9
10
  from dj_queue.log import log_event
10
11
  from dj_queue.models import BlockedExecution, Job, Pause, ReadyExecution, Semaphore
12
+ from dj_queue.operations._insert import create_ignore_conflicts
11
13
  from dj_queue.runtime import notify as runtime_notify
12
14
 
13
15
 
@@ -19,36 +21,32 @@ def semaphore_acquire(
19
21
  backend_alias="default",
20
22
  ):
21
23
  alias = get_database_alias(backend_alias)
22
- expires_at = timezone.now() + timedelta(seconds=duration_seconds)
24
+ now = timezone.now()
25
+ expires_at = now + timedelta(seconds=duration_seconds)
26
+
27
+ with transaction.atomic(using=alias):
28
+ if create_ignore_conflicts(
29
+ Semaphore,
30
+ using=alias,
31
+ key=key,
32
+ value=limit - 1,
33
+ limit=limit,
34
+ expires_at=expires_at,
35
+ ):
36
+ return True
23
37
 
24
- for attempt in range(2):
25
- try:
26
- with transaction.atomic(using=alias):
27
- semaphore = Semaphore.objects.using(alias).select_for_update().filter(key=key).first()
28
- if semaphore is None:
29
- Semaphore.objects.using(alias).create(
30
- key=key,
31
- value=limit - 1,
32
- limit=limit,
33
- expires_at=expires_at,
34
- )
35
- return True
36
-
37
- if semaphore.value <= 0:
38
- return False
39
-
40
- semaphore.value -= 1
41
- semaphore.expires_at = expires_at
42
- semaphore.save(using=alias, update_fields=["value", "expires_at", "updated_at"])
43
- return True
44
- except IntegrityError:
45
- # two workers can both miss the row, then race to create the unique key
46
- # retry once so the loser can load the row created by the winner
47
- if attempt == 0:
48
- continue
49
- continue
50
-
51
- return False
38
+ # mysql-family backends can deadlock if a skipped insert and row lock happen in one tx
39
+ with transaction.atomic(using=alias):
40
+ updated = (
41
+ Semaphore.objects.using(alias)
42
+ .filter(key=key, value__gt=0)
43
+ .update(
44
+ value=F("value") - 1,
45
+ expires_at=expires_at,
46
+ updated_at=now,
47
+ )
48
+ )
49
+ return updated > 0
52
50
 
53
51
 
54
52
  def semaphore_release(key, *, duration_seconds, backend_alias="default"):
@@ -351,6 +351,39 @@ def promote_scheduled_jobs(*, batch_size, backend_alias="default", use_skip_lock
351
351
  return jobs
352
352
 
353
353
 
354
+ def dispatch_scheduled_job_now(job_id, *, backend_alias="default"):
355
+ alias = get_database_alias(backend_alias)
356
+ config = load_backend_config(backend_alias)
357
+
358
+ with transaction.atomic(using=alias):
359
+ scheduled = locked_queryset(
360
+ ScheduledExecution.objects.using(alias)
361
+ .select_related("job")
362
+ .filter(job_id=job_id, job__backend_alias=backend_alias),
363
+ use_skip_locked=config.use_skip_locked,
364
+ ).first()
365
+ if scheduled is None:
366
+ raise EnqueueError("job is not scheduled")
367
+
368
+ job = scheduled.job
369
+ scheduled.delete(using=alias)
370
+ job.scheduled_at = None
371
+ job.save(using=alias, update_fields=["scheduled_at", "updated_at"])
372
+ dispatched_as = _dispatch_existing_job(job)
373
+
374
+ if dispatched_as == "ready":
375
+ runtime_notify.notify_ready_queues((job.queue_name,), backend_alias=backend_alias)
376
+
377
+ log_event(
378
+ "job.dispatched_now",
379
+ job_id=str(job.id),
380
+ queue_name=job.queue_name,
381
+ priority=job.priority,
382
+ dispatched_as=dispatched_as,
383
+ )
384
+ return job, dispatched_as
385
+
386
+
354
387
  def retry_failed_job(job_id, *, backend_alias="default"):
355
388
  alias = get_database_alias(backend_alias)
356
389
 
@@ -370,15 +403,19 @@ def retry_failed_job(job_id, *, backend_alias="default"):
370
403
  return job
371
404
 
372
405
 
373
- def enqueue_job_again(job_id, *, backend_alias="default"):
406
+ _KEEP_RUN_AFTER = object()
407
+
408
+
409
+ def enqueue_job_again(job_id, *, backend_alias="default", run_after=_KEEP_RUN_AFTER):
374
410
  alias = get_database_alias(backend_alias)
375
411
  source_job = Job.objects.using(alias).get(pk=job_id)
376
412
  task = import_string(source_job.task_path)
413
+ source_run_after = source_job.scheduled_at if run_after is _KEEP_RUN_AFTER else run_after
377
414
  if hasattr(task, "using"):
378
415
  task = task.using(
379
416
  priority=source_job.priority,
380
417
  queue_name=source_job.queue_name,
381
- run_after=source_job.scheduled_at,
418
+ run_after=source_run_after,
382
419
  backend=source_job.backend_alias,
383
420
  )
384
421
  args = list(source_job.payload.get("args", []))
@@ -3,6 +3,7 @@ from django.utils.module_loading import import_string
3
3
 
4
4
  from dj_queue.db import get_database_alias
5
5
  from dj_queue.models import RecurringExecution, RecurringTask
6
+ from dj_queue.operations._insert import create_ignore_conflicts
6
7
  from dj_queue.operations.jobs import enqueue_job
7
8
 
8
9
 
@@ -62,7 +63,9 @@ def fire_recurring_task(recurring_task, run_at, *, backend_alias="default"):
62
63
  alias = get_database_alias(backend_alias)
63
64
 
64
65
  with transaction.atomic(using=alias):
65
- execution, created = RecurringExecution.objects.using(alias).get_or_create(
66
+ created = create_ignore_conflicts(
67
+ RecurringExecution,
68
+ using=alias,
66
69
  backend_alias=backend_alias,
67
70
  task_key=recurring_task.key,
68
71
  run_at=run_at,
@@ -72,6 +75,12 @@ def fire_recurring_task(recurring_task, run_at, *, backend_alias="default"):
72
75
  # has not happened yet, so duplicate scheduler ticks never enqueue twice
73
76
  return None
74
77
 
78
+ execution = RecurringExecution.objects.using(alias).get(
79
+ backend_alias=backend_alias,
80
+ task_key=recurring_task.key,
81
+ run_at=run_at,
82
+ )
83
+
75
84
  task = import_string(recurring_task.task_path).using(
76
85
  queue_name=recurring_task.queue_name,
77
86
  priority=recurring_task.priority,
@@ -1,4 +1,5 @@
1
1
  from contextlib import contextmanager, nullcontext
2
+ import math
2
3
  import threading
3
4
  import time
4
5
 
@@ -24,6 +25,7 @@ def app_executor():
24
25
 
25
26
 
26
27
  _sqlite_process_write_lock = threading.Lock()
28
+ _safe_polling_interval = 1.0
27
29
 
28
30
 
29
31
  def _process_write_context(alias):
@@ -82,7 +84,18 @@ class BaseRunner:
82
84
 
83
85
  @property
84
86
  def polling_interval(self):
85
- return getattr(self.config, "polling_interval", 0)
87
+ return self._normalized_polling_interval(getattr(self.config, "polling_interval", None))
88
+
89
+ @staticmethod
90
+ def _normalized_polling_interval(value):
91
+ try:
92
+ polling_interval = float(value)
93
+ except (TypeError, ValueError):
94
+ return _safe_polling_interval
95
+
96
+ if not math.isfinite(polling_interval) or polling_interval <= 0:
97
+ return _safe_polling_interval
98
+ return polling_interval
86
99
 
87
100
  def start(self):
88
101
  if self.process is None:
@@ -23,7 +23,8 @@ class Scheduler(BaseRunner):
23
23
 
24
24
  @property
25
25
  def polling_interval(self):
26
- return self.config.scheduler.polling_interval
26
+ scheduler = getattr(self.config, "scheduler", None)
27
+ return self._normalized_polling_interval(getattr(scheduler, "polling_interval", None))
27
28
 
28
29
  def __init__(
29
30
  self,
@@ -2,6 +2,7 @@ import os
2
2
  import signal
3
3
  import socket
4
4
  import threading
5
+ import time
5
6
 
6
7
  from django.utils import timezone
7
8
  from datetime import timedelta
@@ -48,6 +49,7 @@ class Supervisor(BaseRunner):
48
49
  )
49
50
  self.standalone = standalone
50
51
  self.pidfile = None
52
+ self._last_housekeeping_at = None
51
53
 
52
54
  @classmethod
53
55
  def from_backend_config(
@@ -84,7 +86,10 @@ class Supervisor(BaseRunner):
84
86
  return process
85
87
 
86
88
  def poll_once(self):
87
- pruned_processes = self.prune_stale_process_rows()
89
+ pruned_processes = []
90
+ if self._housekeeping_due():
91
+ pruned_processes = self.prune_stale_process_rows()
92
+ self._last_housekeeping_at = time.monotonic()
88
93
  for process in pruned_processes:
89
94
  log_event(
90
95
  "process.pruned",
@@ -94,6 +99,18 @@ class Supervisor(BaseRunner):
94
99
  )
95
100
  return pruned_processes
96
101
 
102
+ @property
103
+ def housekeeping_interval(self):
104
+ heartbeat_interval = self.config.process_heartbeat_interval
105
+ if heartbeat_interval > 0:
106
+ return max(min(self.config.process_alive_threshold, max(heartbeat_interval, 1)), 1)
107
+ return max(min(self.config.process_alive_threshold, 60), 1)
108
+
109
+ def _housekeeping_due(self):
110
+ if self._last_housekeeping_at is None:
111
+ return True
112
+ return (time.monotonic() - self._last_housekeeping_at) >= self.housekeeping_interval
113
+
97
114
  def process_metadata(self):
98
115
  return {
99
116
  "mode": self.config.mode,
@@ -4,7 +4,7 @@ build-backend = "uv_build"
4
4
 
5
5
  [project]
6
6
  name = "dj-queue"
7
- version = "0.6.1"
7
+ version = "0.6.3"
8
8
  description = "Database-backed task queue backend for Django’s Tasks framework."
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -1,25 +0,0 @@
1
- from dj_queue.runtime.supervisor import AsyncSupervisor
2
-
3
-
4
- def build_supervisor(backend_alias="default"):
5
- return AsyncSupervisor.from_backend_config(backend_alias=backend_alias, standalone=False)
6
-
7
-
8
- def post_fork(_server, worker):
9
- if worker.age != 1:
10
- return None
11
-
12
- supervisor = build_supervisor()
13
- worker._dj_queue_supervisor = supervisor
14
- supervisor.start()
15
- return supervisor
16
-
17
-
18
- def worker_exit(_server, worker):
19
- supervisor = getattr(worker, "_dj_queue_supervisor", None)
20
- if supervisor is None:
21
- return None
22
-
23
- supervisor.stop()
24
- worker._dj_queue_supervisor = None
25
- return None
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes