zenpulse-scheduler 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zenpulse_scheduler/__init__.py +0 -0
- zenpulse_scheduler/admin.py +49 -0
- zenpulse_scheduler/apps.py +5 -0
- zenpulse_scheduler/engine.py +87 -0
- zenpulse_scheduler/listeners.py +82 -0
- zenpulse_scheduler/locks.py +107 -0
- zenpulse_scheduler/management/commands/run_zenpulse_scheduler.py +27 -0
- zenpulse_scheduler/models.py +86 -0
- zenpulse_scheduler/registry.py +34 -0
- zenpulse_scheduler/sync.py +76 -0
- zenpulse_scheduler/triggers.py +42 -0
- zenpulse_scheduler-0.1.0.dist-info/METADATA +10 -0
- zenpulse_scheduler-0.1.0.dist-info/RECORD +15 -0
- zenpulse_scheduler-0.1.0.dist-info/WHEEL +5 -0
- zenpulse_scheduler-0.1.0.dist-info/top_level.txt +1 -0
|
File without changes
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from django.contrib import admin
|
|
2
|
+
from .models import ScheduleConfig, JobExecutionLog
|
|
3
|
+
from .registry import JobRegistry
|
|
4
|
+
|
|
5
|
+
@admin.register(ScheduleConfig)
|
|
6
|
+
class ScheduleConfigAdmin(admin.ModelAdmin):
|
|
7
|
+
list_display = (
|
|
8
|
+
'job_key', 'enabled', 'trigger_type', 'schedule_display',
|
|
9
|
+
'log_policy', 'updated_at'
|
|
10
|
+
)
|
|
11
|
+
list_filter = ('enabled', 'trigger_type', 'log_policy')
|
|
12
|
+
search_fields = ('job_key',)
|
|
13
|
+
|
|
14
|
+
def schedule_display(self, obj):
|
|
15
|
+
if obj.trigger_type == 'interval':
|
|
16
|
+
return f"Every {obj.interval_value} {obj.interval_unit}"
|
|
17
|
+
else:
|
|
18
|
+
return f"{obj.cron_minute} {obj.cron_hour} {obj.cron_day} {obj.cron_month} {obj.cron_day_of_week}"
|
|
19
|
+
|
|
20
|
+
schedule_display.short_description = "Schedule"
|
|
21
|
+
|
|
22
|
+
def formfield_for_choice_field(self, db_field, request, **kwargs):
|
|
23
|
+
if db_field.name == 'job_key':
|
|
24
|
+
# Populate with registered jobs dynamically?
|
|
25
|
+
# Standard CharField with choices is tricky if we want to allow typing new ones (if code updated but registry not loaded in admin process context fully).
|
|
26
|
+
# But if we can inspect registry, we can offer choices.
|
|
27
|
+
# Admin runs in WSGI, Registry jobs might be loaded if apps.ready imports them?
|
|
28
|
+
# Usually jobs are in app/jobs.py. If those aren't imported, Registry is empty.
|
|
29
|
+
# Let's simple Text Input for now with help_text.
|
|
30
|
+
pass
|
|
31
|
+
return super().formfield_for_choice_field(db_field, request, **kwargs)
|
|
32
|
+
|
|
33
|
+
@admin.register(JobExecutionLog)
|
|
34
|
+
class JobExecutionLogAdmin(admin.ModelAdmin):
|
|
35
|
+
list_display = ('job_key', 'status', 'run_time_display', 'duration_ms')
|
|
36
|
+
list_filter = ('job_key', 'status', 'run_time')
|
|
37
|
+
readonly_fields = ('run_time', 'traceback', 'exception_message', 'hostname', 'pid')
|
|
38
|
+
|
|
39
|
+
def run_time_display(self, obj):
|
|
40
|
+
return obj.run_time.strftime("%Y-%m-%d %H:%M:%S")
|
|
41
|
+
|
|
42
|
+
run_time_display.admin_order_field = 'run_time'
|
|
43
|
+
run_time_display.short_description = 'Run Time'
|
|
44
|
+
|
|
45
|
+
def has_add_permission(self, request):
|
|
46
|
+
return False
|
|
47
|
+
|
|
48
|
+
def has_change_permission(self, request, obj=None):
|
|
49
|
+
return False
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import signal
|
|
3
|
+
import sys
|
|
4
|
+
import logging
|
|
5
|
+
from apscheduler.schedulers.background import BackgroundScheduler
|
|
6
|
+
from apscheduler.jobstores.memory import MemoryJobStore
|
|
7
|
+
from apscheduler.executors.pool import ThreadPoolExecutor
|
|
8
|
+
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
|
|
9
|
+
|
|
10
|
+
from django.conf import settings
|
|
11
|
+
from .sync import sync_jobs
|
|
12
|
+
from .listeners import handle_job_execution
|
|
13
|
+
from .locks import get_best_lock
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
class ZenPulseEngine:
|
|
18
|
+
def __init__(self, sync_interval=10, use_lock=False):
|
|
19
|
+
self.sync_interval = sync_interval
|
|
20
|
+
self.use_lock = use_lock
|
|
21
|
+
self.lock = get_best_lock() if use_lock else None
|
|
22
|
+
self.running = False
|
|
23
|
+
|
|
24
|
+
self.scheduler = BackgroundScheduler(
|
|
25
|
+
jobstores={'default': MemoryJobStore()},
|
|
26
|
+
executors={'default': ThreadPoolExecutor(20)},
|
|
27
|
+
job_defaults={
|
|
28
|
+
'coalesce': True,
|
|
29
|
+
'max_instances': 1
|
|
30
|
+
},
|
|
31
|
+
timezone=getattr(settings, 'TIME_ZONE', 'UTC')
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
def start(self):
|
|
35
|
+
# 1. Acquire Lock
|
|
36
|
+
if self.use_lock:
|
|
37
|
+
logger.info("Acquiring lock...")
|
|
38
|
+
if not self.lock.acquire():
|
|
39
|
+
logger.error("Could not acquire lock. Another instance is likely running. Exiting.")
|
|
40
|
+
return
|
|
41
|
+
logger.info("Lock acquired.")
|
|
42
|
+
|
|
43
|
+
# 2. Setup Signal Handlers
|
|
44
|
+
signal.signal(signal.SIGINT, self.shutdown)
|
|
45
|
+
signal.signal(signal.SIGTERM, self.shutdown)
|
|
46
|
+
|
|
47
|
+
# 3. Setup Listeners
|
|
48
|
+
self.scheduler.add_listener(handle_job_execution, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
|
|
49
|
+
|
|
50
|
+
# 4. Start Scheduler
|
|
51
|
+
self.scheduler.start()
|
|
52
|
+
self.running = True
|
|
53
|
+
logger.info("ZenPulse Scheduler started.")
|
|
54
|
+
|
|
55
|
+
# Cache for simple change detection: {job_key: (enabled, updated_at_ts)}
|
|
56
|
+
self.last_synced_data = {}
|
|
57
|
+
|
|
58
|
+
# 5. Main Sync Loop
|
|
59
|
+
try:
|
|
60
|
+
while self.running:
|
|
61
|
+
try:
|
|
62
|
+
sync_jobs(self.scheduler, self.last_synced_data)
|
|
63
|
+
except Exception as e:
|
|
64
|
+
logger.error(f"Error in sync loop: {e}")
|
|
65
|
+
|
|
66
|
+
# Sleep in small chunks to handle shutdown signals faster
|
|
67
|
+
for _ in range(self.sync_interval):
|
|
68
|
+
if not self.running:
|
|
69
|
+
break
|
|
70
|
+
time.sleep(1)
|
|
71
|
+
except Exception as e:
|
|
72
|
+
logger.critical(f"Engine crashed: {e}")
|
|
73
|
+
finally:
|
|
74
|
+
self.shutdown()
|
|
75
|
+
|
|
76
|
+
def shutdown(self, signum=None, frame=None):
|
|
77
|
+
if not self.running: return # Already stopped
|
|
78
|
+
|
|
79
|
+
logger.info("Shutting down ZenPulse Scheduler...")
|
|
80
|
+
self.running = False
|
|
81
|
+
self.scheduler.shutdown()
|
|
82
|
+
|
|
83
|
+
if self.use_lock:
|
|
84
|
+
self.lock.release()
|
|
85
|
+
logger.info("Lock released.")
|
|
86
|
+
|
|
87
|
+
logger.info("Shutdown complete.")
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import traceback
|
|
3
|
+
import socket
|
|
4
|
+
import os
|
|
5
|
+
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
|
|
6
|
+
from django.utils import timezone
|
|
7
|
+
from .models import ScheduleConfig, JobExecutionLog
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
def get_config_log_policy(job_id):
|
|
12
|
+
"""
|
|
13
|
+
Helper to fetch log policy for a given job_id.
|
|
14
|
+
Cache this or fetch from DB? Fetching from DB is safer for real-time updates,
|
|
15
|
+
but we might want to optimize if high throughput.
|
|
16
|
+
"""
|
|
17
|
+
try:
|
|
18
|
+
# job_id in APScheduler will be the job_key from our model
|
|
19
|
+
config = ScheduleConfig.objects.filter(job_key=job_id).first()
|
|
20
|
+
if config:
|
|
21
|
+
return config.log_policy
|
|
22
|
+
except Exception:
|
|
23
|
+
pass
|
|
24
|
+
return 'none' # Default to none if not found
|
|
25
|
+
|
|
26
|
+
def handle_job_execution(event):
|
|
27
|
+
"""
|
|
28
|
+
Listener for SUCCESS and ERROR events.
|
|
29
|
+
"""
|
|
30
|
+
job_id = event.job_id
|
|
31
|
+
policy = get_config_log_policy(job_id)
|
|
32
|
+
|
|
33
|
+
if policy == 'none':
|
|
34
|
+
return
|
|
35
|
+
|
|
36
|
+
is_error = event.exception is not None
|
|
37
|
+
|
|
38
|
+
# Logic:
|
|
39
|
+
# FAILURES: Record only if is_error
|
|
40
|
+
# ALL: Record everything
|
|
41
|
+
if policy == 'failures' and not is_error:
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
# Prepare Log Entry
|
|
45
|
+
status = 'fail' if is_error else 'success'
|
|
46
|
+
|
|
47
|
+
# Calculate duration (APScheduler events might not have duration directly in all versions,
|
|
48
|
+
# but let's check `event.retval` or simple generic logging)
|
|
49
|
+
# Actually `event` object in APScheduler has `scheduled_run_time` and loop time.
|
|
50
|
+
# We can infer valid duration if we wrap jobs, but here we might rely on estimated diff logic
|
|
51
|
+
# or just record 0 if unavailable.
|
|
52
|
+
# NOTE: APScheduler `JobExecutionEvent` does not strictly capture duration easily without wrapper.
|
|
53
|
+
# However, we can just log the event.
|
|
54
|
+
|
|
55
|
+
duration = 0.0 # Placeholder, or implement wrapper logic for precise timing later.
|
|
56
|
+
|
|
57
|
+
exception_type = None
|
|
58
|
+
exception_message = None
|
|
59
|
+
tb = None
|
|
60
|
+
|
|
61
|
+
if is_error:
|
|
62
|
+
exception_type = type(event.exception).__name__
|
|
63
|
+
exception_message = str(event.exception)
|
|
64
|
+
try:
|
|
65
|
+
tb = "".join(traceback.format_tb(event.traceback))
|
|
66
|
+
except:
|
|
67
|
+
tb = str(event.traceback)
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
JobExecutionLog.objects.create(
|
|
71
|
+
job_key=job_id,
|
|
72
|
+
status=status,
|
|
73
|
+
duration_ms=duration, # Update if we implement wrapper timing
|
|
74
|
+
exception_type=exception_type,
|
|
75
|
+
exception_message=exception_message,
|
|
76
|
+
traceback=tb,
|
|
77
|
+
hostname=socket.gethostname(),
|
|
78
|
+
pid=os.getpid()
|
|
79
|
+
)
|
|
80
|
+
except Exception as e:
|
|
81
|
+
logger.error(f"Failed to write execution log for job {job_id}: {e}")
|
|
82
|
+
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import atexit
|
|
3
|
+
import os
|
|
4
|
+
import logging
|
|
5
|
+
from django.db import connection
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
class BaseLock:
|
|
10
|
+
def acquire(self):
|
|
11
|
+
raise NotImplementedError
|
|
12
|
+
|
|
13
|
+
def release(self):
|
|
14
|
+
raise NotImplementedError
|
|
15
|
+
|
|
16
|
+
class PIDFileLock(BaseLock):
|
|
17
|
+
def __init__(self, key="zenpulse_scheduler"):
|
|
18
|
+
self.lockfile = f"/tmp/{key}.lock" if sys.platform != "win32" else f"{os.getenv('TEMP')}/{key}.lock"
|
|
19
|
+
self._f = None
|
|
20
|
+
|
|
21
|
+
def acquire(self):
|
|
22
|
+
try:
|
|
23
|
+
if os.path.exists(self.lockfile):
|
|
24
|
+
# Check if pid exists
|
|
25
|
+
with open(self.lockfile, 'r') as f:
|
|
26
|
+
pid = int(f.read().strip())
|
|
27
|
+
|
|
28
|
+
# Check if process is running
|
|
29
|
+
try:
|
|
30
|
+
# Signal 0 checks if process exists (Unix) or OpenProcess (Windows)
|
|
31
|
+
os.kill(pid, 0)
|
|
32
|
+
logger.warning(f"Lock file exists and process {pid} is running.")
|
|
33
|
+
return False
|
|
34
|
+
except OSError:
|
|
35
|
+
# Process dead, safe to take over
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
self._f = open(self.lockfile, 'w')
|
|
39
|
+
self._f.write(str(os.getpid()))
|
|
40
|
+
self._f.flush()
|
|
41
|
+
atexit.register(self.release)
|
|
42
|
+
return True
|
|
43
|
+
except Exception as e:
|
|
44
|
+
logger.error(f"Failed to acquire PID lock: {e}")
|
|
45
|
+
return False
|
|
46
|
+
|
|
47
|
+
def release(self):
|
|
48
|
+
try:
|
|
49
|
+
if self._f:
|
|
50
|
+
self._f.close()
|
|
51
|
+
os.remove(self.lockfile)
|
|
52
|
+
self._f = None
|
|
53
|
+
except Exception:
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
class DatabaseAdvisoryLock(BaseLock):
|
|
57
|
+
"""
|
|
58
|
+
Attempts to use DB-specific advisory locks.
|
|
59
|
+
Supports PostgreSQL and MySQL.
|
|
60
|
+
"""
|
|
61
|
+
LOCK_ID = 808080 # arbitrary integer for advisory lock
|
|
62
|
+
|
|
63
|
+
def __init__(self):
|
|
64
|
+
self.acquired = False
|
|
65
|
+
|
|
66
|
+
def acquire(self):
|
|
67
|
+
if connection.vendor == 'postgresql':
|
|
68
|
+
with connection.cursor() as cursor:
|
|
69
|
+
cursor.execute("SELECT pg_try_advisory_lock(%s)", [self.LOCK_ID])
|
|
70
|
+
row = cursor.fetchone()
|
|
71
|
+
if row and row[0]:
|
|
72
|
+
self.acquired = True
|
|
73
|
+
return True
|
|
74
|
+
return False
|
|
75
|
+
elif connection.vendor == 'mysql':
|
|
76
|
+
with connection.cursor() as cursor:
|
|
77
|
+
# GET_LOCK returns 1 if success, 0 if timeout, NULL on error
|
|
78
|
+
cursor.execute("SELECT GET_LOCK(%s, 0)", [str(self.LOCK_ID)])
|
|
79
|
+
row = cursor.fetchone()
|
|
80
|
+
if row and row[0] == 1:
|
|
81
|
+
self.acquired = True
|
|
82
|
+
return True
|
|
83
|
+
return False
|
|
84
|
+
else:
|
|
85
|
+
logger.warning("DatabaseAdvisoryLock not supported for this vendor. Falling back to PID lock.")
|
|
86
|
+
return PIDFileLock().acquire() # Fallback
|
|
87
|
+
|
|
88
|
+
def release(self):
|
|
89
|
+
if not self.acquired:
|
|
90
|
+
return
|
|
91
|
+
|
|
92
|
+
if connection.vendor == 'postgresql':
|
|
93
|
+
with connection.cursor() as cursor:
|
|
94
|
+
cursor.execute("SELECT pg_advisory_unlock(%s)", [self.LOCK_ID])
|
|
95
|
+
elif connection.vendor == 'mysql':
|
|
96
|
+
with connection.cursor() as cursor:
|
|
97
|
+
cursor.execute("SELECT RELEASE_LOCK(%s)", [str(self.LOCK_ID)])
|
|
98
|
+
|
|
99
|
+
self.acquired = False
|
|
100
|
+
|
|
101
|
+
def get_best_lock():
|
|
102
|
+
# Prefer DB lock usually, but for simplicity/universality check config or default to PID?
|
|
103
|
+
# User asked for DB lock optional.
|
|
104
|
+
# Let's try DB lock, if not supported (sqlite), fallback to PID.
|
|
105
|
+
if connection.vendor in ('postgresql', 'mysql'):
|
|
106
|
+
return DatabaseAdvisoryLock()
|
|
107
|
+
return PIDFileLock()
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
from django.core.management.base import BaseCommand
|
|
2
|
+
from zenpulse_scheduler.engine import ZenPulseEngine
|
|
3
|
+
|
|
4
|
+
class Command(BaseCommand):
|
|
5
|
+
help = 'Runs the ZenPulse Scheduler'
|
|
6
|
+
|
|
7
|
+
def add_arguments(self, parser):
|
|
8
|
+
parser.add_argument(
|
|
9
|
+
'--sync-every',
|
|
10
|
+
type=int,
|
|
11
|
+
default=10,
|
|
12
|
+
help='Seconds between DB config syncs (default: 10)'
|
|
13
|
+
)
|
|
14
|
+
parser.add_argument(
|
|
15
|
+
'--lock',
|
|
16
|
+
action='store_true',
|
|
17
|
+
help='Enable single-instance locking (DB or File)'
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
def handle(self, *args, **options):
|
|
21
|
+
sync_interval = options['sync_every']
|
|
22
|
+
use_lock = options['lock']
|
|
23
|
+
|
|
24
|
+
self.stdout.write(self.style.SUCCESS(f"Starting ZenPulse Scheduler (Sync: {sync_interval}s, Lock: {use_lock})..."))
|
|
25
|
+
|
|
26
|
+
engine = ZenPulseEngine(sync_interval=sync_interval, use_lock=use_lock)
|
|
27
|
+
engine.start()
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from django.db import models
|
|
2
|
+
from django.utils.translation import gettext_lazy as _
|
|
3
|
+
|
|
4
|
+
class ScheduleConfig(models.Model):
|
|
5
|
+
TRIGGER_CHOICES = (
|
|
6
|
+
('interval', 'Interval'),
|
|
7
|
+
('cron', 'Cron'),
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
LOG_POLICY_CHOICES = (
|
|
11
|
+
('none', 'None'),
|
|
12
|
+
('failures', 'Failures Only'),
|
|
13
|
+
('all', 'All Executions'),
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
INTERVAL_UNIT_CHOICES = (
|
|
17
|
+
('seconds', 'Seconds'),
|
|
18
|
+
('minutes', 'Minutes'),
|
|
19
|
+
('hours', 'Hours'),
|
|
20
|
+
('days', 'Days'),
|
|
21
|
+
('weeks', 'Weeks'),
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
job_key = models.CharField(
|
|
25
|
+
max_length=255,
|
|
26
|
+
unique=True,
|
|
27
|
+
help_text="Unique key identifier for the job (must match the registered job name)."
|
|
28
|
+
)
|
|
29
|
+
enabled = models.BooleanField(default=True)
|
|
30
|
+
trigger_type = models.CharField(max_length=20, choices=TRIGGER_CHOICES, default='interval')
|
|
31
|
+
|
|
32
|
+
# Interval Fields
|
|
33
|
+
interval_value = models.IntegerField(null=True, blank=True, help_text="Value for interval trigger.")
|
|
34
|
+
interval_unit = models.CharField(
|
|
35
|
+
max_length=20,
|
|
36
|
+
choices=INTERVAL_UNIT_CHOICES,
|
|
37
|
+
default='minutes',
|
|
38
|
+
null=True, blank=True
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
# Cron Fields
|
|
42
|
+
cron_minute = models.CharField(max_length=100, default='*', help_text="Cron minute (0-59 or *)")
|
|
43
|
+
cron_hour = models.CharField(max_length=100, default='*', help_text="Cron hour (0-23 or *)")
|
|
44
|
+
cron_day = models.CharField(max_length=100, default='*', help_text="Cron day of month (1-31 or *)")
|
|
45
|
+
cron_month = models.CharField(max_length=100, default='*', help_text="Cron month (1-12 or *)")
|
|
46
|
+
cron_day_of_week = models.CharField(max_length=100, default='*', help_text="Cron day of week (0-6 or mon,tue... or *)")
|
|
47
|
+
|
|
48
|
+
# Options
|
|
49
|
+
max_instances = models.IntegerField(default=1, help_text="Maximum number of concurrently running instances allowed.")
|
|
50
|
+
coalesce = models.BooleanField(default=True, help_text="Combine missed runs into one.")
|
|
51
|
+
misfire_grace_time = models.IntegerField(default=60, help_text="Seconds after the designated run time that the job is still allowed to run.")
|
|
52
|
+
|
|
53
|
+
log_policy = models.CharField(max_length=20, choices=LOG_POLICY_CHOICES, default='failures')
|
|
54
|
+
|
|
55
|
+
updated_at = models.DateTimeField(auto_now=True)
|
|
56
|
+
|
|
57
|
+
def __str__(self):
|
|
58
|
+
return f"{self.job_key} ({self.trigger_type})"
|
|
59
|
+
|
|
60
|
+
class JobExecutionLog(models.Model):
|
|
61
|
+
STATUS_CHOICES = (
|
|
62
|
+
('success', 'Success'),
|
|
63
|
+
('fail', 'Fail'),
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
job_key = models.CharField(max_length=255, db_index=True)
|
|
67
|
+
run_time = models.DateTimeField(auto_now_add=True)
|
|
68
|
+
status = models.CharField(max_length=10, choices=STATUS_CHOICES)
|
|
69
|
+
duration_ms = models.FloatField(help_text="Execution duration in milliseconds")
|
|
70
|
+
|
|
71
|
+
exception_type = models.CharField(max_length=255, null=True, blank=True)
|
|
72
|
+
exception_message = models.TextField(null=True, blank=True)
|
|
73
|
+
traceback = models.TextField(null=True, blank=True)
|
|
74
|
+
|
|
75
|
+
hostname = models.CharField(max_length=255, null=True, blank=True)
|
|
76
|
+
pid = models.IntegerField(null=True, blank=True)
|
|
77
|
+
|
|
78
|
+
class Meta:
|
|
79
|
+
ordering = ['-run_time']
|
|
80
|
+
indexes = [
|
|
81
|
+
models.Index(fields=['job_key', 'status']),
|
|
82
|
+
models.Index(fields=['run_time']),
|
|
83
|
+
]
|
|
84
|
+
|
|
85
|
+
def __str__(self):
|
|
86
|
+
return f"{self.job_key} - {self.status} at {self.run_time}"
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
logger = logging.getLogger(__name__)
|
|
4
|
+
|
|
5
|
+
class JobRegistry:
|
|
6
|
+
_registry = {}
|
|
7
|
+
|
|
8
|
+
@classmethod
|
|
9
|
+
def register(cls, name):
|
|
10
|
+
def decorator(func):
|
|
11
|
+
if name in cls._registry:
|
|
12
|
+
logger.warning(f"Job with key '{name}' already registered. Overwriting.")
|
|
13
|
+
cls._registry[name] = func
|
|
14
|
+
return func
|
|
15
|
+
return decorator
|
|
16
|
+
|
|
17
|
+
@classmethod
|
|
18
|
+
def get_job(cls, name):
|
|
19
|
+
return cls._registry.get(name)
|
|
20
|
+
|
|
21
|
+
@classmethod
|
|
22
|
+
def get_all_jobs(cls):
|
|
23
|
+
return cls._registry
|
|
24
|
+
|
|
25
|
+
# Initializer for the decorator
|
|
26
|
+
def zenpulse_job(name):
|
|
27
|
+
"""
|
|
28
|
+
Decorator to register a function as a ZenPulse job.
|
|
29
|
+
Usage:
|
|
30
|
+
@zenpulse_job('my_unique_job_key')
|
|
31
|
+
def my_job_function():
|
|
32
|
+
pass
|
|
33
|
+
"""
|
|
34
|
+
return JobRegistry.register(name)
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from .models import ScheduleConfig
|
|
3
|
+
from .registry import JobRegistry
|
|
4
|
+
from .triggers import build_trigger
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger(__name__)
|
|
7
|
+
|
|
8
|
+
def sync_jobs(scheduler, last_synced_data):
|
|
9
|
+
"""
|
|
10
|
+
Reconciles the DB ScheduleConfig with the in-memory APScheduler.
|
|
11
|
+
last_synced_data: dict {job_key: (enabled, updated_at_timestamp)}
|
|
12
|
+
"""
|
|
13
|
+
logger.debug("Starting sync_jobs...")
|
|
14
|
+
# print("DEBUG: Syncing jobs...")
|
|
15
|
+
configs = ScheduleConfig.objects.all()
|
|
16
|
+
# print(f"DEBUG: Found {len(configs)} configs in DB.")
|
|
17
|
+
|
|
18
|
+
# Track which jobs we've seen in the DB to handle removals
|
|
19
|
+
active_db_jobs = set()
|
|
20
|
+
|
|
21
|
+
for config in configs:
|
|
22
|
+
job_key = config.job_key
|
|
23
|
+
active_db_jobs.add(job_key)
|
|
24
|
+
|
|
25
|
+
# Check cache to see if update is needed
|
|
26
|
+
current_state = (config.enabled, config.updated_at.timestamp())
|
|
27
|
+
if job_key in last_synced_data and last_synced_data[job_key] == current_state:
|
|
28
|
+
# No changes, skip
|
|
29
|
+
continue
|
|
30
|
+
|
|
31
|
+
last_synced_data[job_key] = current_state
|
|
32
|
+
|
|
33
|
+
# 1. Check if job is in registry
|
|
34
|
+
func = JobRegistry.get_job(job_key)
|
|
35
|
+
if not func:
|
|
36
|
+
logger.warning(f"Job '{job_key}' found in config but NOT in registry. Skipping.")
|
|
37
|
+
continue
|
|
38
|
+
|
|
39
|
+
# 2. Check if job exists in scheduler
|
|
40
|
+
existing_job = scheduler.get_job(job_key)
|
|
41
|
+
|
|
42
|
+
# 3. Handle Enabled/Disabled
|
|
43
|
+
if not config.enabled:
|
|
44
|
+
# If exists, remove it
|
|
45
|
+
if existing_job:
|
|
46
|
+
logger.info(f"Removing disabled job: {job_key}")
|
|
47
|
+
scheduler.remove_job(job_key)
|
|
48
|
+
continue
|
|
49
|
+
|
|
50
|
+
# 4. Handle Active Jobs
|
|
51
|
+
trigger = build_trigger(config)
|
|
52
|
+
|
|
53
|
+
kwargs = {
|
|
54
|
+
'id': job_key,
|
|
55
|
+
'name': job_key,
|
|
56
|
+
'func': func,
|
|
57
|
+
'trigger': trigger,
|
|
58
|
+
'replace_existing': True,
|
|
59
|
+
'coalesce': config.coalesce,
|
|
60
|
+
'max_instances': config.max_instances,
|
|
61
|
+
'misfire_grace_time': config.misfire_grace_time,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Update or Add
|
|
65
|
+
logger.info(f"Syncing job: {job_key}")
|
|
66
|
+
try:
|
|
67
|
+
scheduler.add_job(**kwargs)
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.error(f"Failed to add/update job {job_key}: {e}")
|
|
70
|
+
|
|
71
|
+
# 5. Remove jobs that are in Scheduler but NOT in Config (or deleted from DB)
|
|
72
|
+
# Be careful not to remove internal scheduler jobs if any (usually none in MemoryJobStore unless added manually)
|
|
73
|
+
for job in scheduler.get_jobs():
|
|
74
|
+
if job.id not in active_db_jobs:
|
|
75
|
+
logger.info(f"Job {job.id} not in DB config. Removing.")
|
|
76
|
+
scheduler.remove_job(job.id)
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
from apscheduler.triggers.interval import IntervalTrigger
|
|
2
|
+
from apscheduler.triggers.cron import CronTrigger
|
|
3
|
+
|
|
4
|
+
def build_trigger(config):
|
|
5
|
+
"""
|
|
6
|
+
Builds an APScheduler trigger (IntervalTrigger or CronTrigger)
|
|
7
|
+
based on the ScheduleConfig model instance.
|
|
8
|
+
"""
|
|
9
|
+
if config.trigger_type == 'interval':
|
|
10
|
+
# Default to minutes if not specified or invalid
|
|
11
|
+
unit = config.interval_unit or 'minutes'
|
|
12
|
+
value = config.interval_value or 1
|
|
13
|
+
|
|
14
|
+
# Mapping unit to arguments for IntervalTrigger
|
|
15
|
+
kwargs = {}
|
|
16
|
+
if unit == 'seconds':
|
|
17
|
+
kwargs['seconds'] = value
|
|
18
|
+
elif unit == 'minutes':
|
|
19
|
+
kwargs['minutes'] = value
|
|
20
|
+
elif unit == 'hours':
|
|
21
|
+
kwargs['hours'] = value
|
|
22
|
+
elif unit == 'days':
|
|
23
|
+
kwargs['days'] = value
|
|
24
|
+
elif unit == 'weeks':
|
|
25
|
+
kwargs['weeks'] = value
|
|
26
|
+
else:
|
|
27
|
+
kwargs['minutes'] = value # Fallback
|
|
28
|
+
|
|
29
|
+
return IntervalTrigger(**kwargs)
|
|
30
|
+
|
|
31
|
+
elif config.trigger_type == 'cron':
|
|
32
|
+
# Use config fields for CronTrigger
|
|
33
|
+
return CronTrigger(
|
|
34
|
+
minute=config.cron_minute,
|
|
35
|
+
hour=config.cron_hour,
|
|
36
|
+
day=config.cron_day,
|
|
37
|
+
month=config.cron_month,
|
|
38
|
+
day_of_week=config.cron_day_of_week,
|
|
39
|
+
timezone=config.timezone if hasattr(config, 'timezone') and config.timezone else None
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
return None
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: zenpulse_scheduler
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: A DB-driven APScheduler for Django
|
|
5
|
+
Classifier: Framework :: Django
|
|
6
|
+
Classifier: Programming Language :: Python :: 3
|
|
7
|
+
Requires-Python: >=3.8
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Requires-Dist: django>=3.2
|
|
10
|
+
Requires-Dist: apscheduler>=3.6.3
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
zenpulse_scheduler/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
zenpulse_scheduler/admin.py,sha256=gyhR_w42OMy-HWP-Ihc7d4xL7-PGZJ4pELndJ83xDiA,2158
|
|
3
|
+
zenpulse_scheduler/apps.py,sha256=-El1L4XXmOT6da9nsavAKkodFodN4H8XHnnc2c9Ync0,166
|
|
4
|
+
zenpulse_scheduler/engine.py,sha256=JBXEg-lCcAwpDLiVMKXWIL5FPP4Seylk1tqzTS5XPSk,2952
|
|
5
|
+
zenpulse_scheduler/listeners.py,sha256=zk6L5jxOKSbQSKkBsqmyfOdbyt_IxL7_N-ayC-sbFIU,2669
|
|
6
|
+
zenpulse_scheduler/locks.py,sha256=iHSUjGrTmZz8G62AWusJna9fy_N-uHMMH7A90vp6Sns,3681
|
|
7
|
+
zenpulse_scheduler/models.py,sha256=c9Vs7ZcQznO2gTxGvEOvDKFRq6NR4DCPWJmk2glJupo,3307
|
|
8
|
+
zenpulse_scheduler/registry.py,sha256=TSvIFq4VgmtXjR77r5LbUh23J5uIHnHoWjyvzi-pQlw,800
|
|
9
|
+
zenpulse_scheduler/sync.py,sha256=KbqbFDuTLmct2s3XWHHKK8zIWGzFgw44xt7ZKqViXKM,2702
|
|
10
|
+
zenpulse_scheduler/triggers.py,sha256=56ucYsxs5UA69802MC8nZAAgR1vpIaqx37RNd6UZRLk,1430
|
|
11
|
+
zenpulse_scheduler/management/commands/run_zenpulse_scheduler.py,sha256=szJj2CV5avKxWi0DIxHiME-rOfqNuKUqwveSA7fS_yQ,915
|
|
12
|
+
zenpulse_scheduler-0.1.0.dist-info/METADATA,sha256=xYvEgIcMP6OiIzhrGJ6XxDdRApbBUkhZGdWtg_biWxw,310
|
|
13
|
+
zenpulse_scheduler-0.1.0.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
14
|
+
zenpulse_scheduler-0.1.0.dist-info/top_level.txt,sha256=6aLspn3Yg8Me-MY6p0DK4NCmWfmLy2j5jxnqOyM0XNo,19
|
|
15
|
+
zenpulse_scheduler-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
zenpulse_scheduler
|