whenly 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- whenly/__init__.py +6 -0
- whenly/cli.py +95 -0
- whenly/models.py +70 -0
- whenly/runner.py +113 -0
- whenly/scheduler.py +360 -0
- whenly/store.py +247 -0
- whenly-0.1.0.dist-info/METADATA +108 -0
- whenly-0.1.0.dist-info/RECORD +11 -0
- whenly-0.1.0.dist-info/WHEEL +5 -0
- whenly-0.1.0.dist-info/entry_points.txt +2 -0
- whenly-0.1.0.dist-info/top_level.txt +1 -0
whenly/__init__.py
ADDED
whenly/cli.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"""CLI for whenly."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import sys
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
|
|
9
|
+
from .models import JobStatus
|
|
10
|
+
from .store import Store
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main(argv: list[str] | None = None) -> None:
|
|
14
|
+
parser = argparse.ArgumentParser(prog="whenly", description="Lightweight job scheduler")
|
|
15
|
+
parser.add_argument("--db", default="whenly.db", help="Database path")
|
|
16
|
+
sub = parser.add_subparsers(dest="command")
|
|
17
|
+
|
|
18
|
+
sub.add_parser("list", help="List all jobs and next run time")
|
|
19
|
+
sub.add_parser("history", help="Show job run history").add_argument("name")
|
|
20
|
+
|
|
21
|
+
run_parser = sub.add_parser("run", help="Manually trigger a job")
|
|
22
|
+
run_parser.add_argument("name")
|
|
23
|
+
|
|
24
|
+
args = parser.parse_args(argv)
|
|
25
|
+
if not args.command:
|
|
26
|
+
parser.print_help()
|
|
27
|
+
sys.exit(1)
|
|
28
|
+
|
|
29
|
+
store = Store(args.db)
|
|
30
|
+
|
|
31
|
+
if args.command == "list":
|
|
32
|
+
jobs = store.list_jobs()
|
|
33
|
+
if not jobs:
|
|
34
|
+
print("No jobs registered.")
|
|
35
|
+
return
|
|
36
|
+
for j in jobs:
|
|
37
|
+
next_run = j.next_run_at.strftime("%Y-%m-%d %H:%M:%S") if j.next_run_at else "—"
|
|
38
|
+
status = "✓" if j.enabled else "✗"
|
|
39
|
+
sched = _fmt_schedule(j)
|
|
40
|
+
print(f" {status} {j.name:<30} [{sched:<12}] next: {next_run}")
|
|
41
|
+
|
|
42
|
+
elif args.command == "history":
|
|
43
|
+
runs = store.get_runs(args.name, limit=20)
|
|
44
|
+
if not runs:
|
|
45
|
+
print(f"No runs found for '{args.name}'.")
|
|
46
|
+
return
|
|
47
|
+
for r in runs:
|
|
48
|
+
ts = r.started_at.strftime("%Y-%m-%d %H:%M:%S") if r.started_at else "?"
|
|
49
|
+
dur = f"{r.duration_seconds:.1f}s" if r.duration_seconds else "?"
|
|
50
|
+
status_icon = {
|
|
51
|
+
JobStatus.SUCCESS: "✓",
|
|
52
|
+
JobStatus.FAILED: "✗",
|
|
53
|
+
JobStatus.TIMEOUT: "⏱",
|
|
54
|
+
JobStatus.RUNNING: "⟳",
|
|
55
|
+
}.get(r.status, "?")
|
|
56
|
+
err = f" — {r.error_message.splitlines()[-1][:60]}" if r.error_message else ""
|
|
57
|
+
print(f" {status_icon} {ts} {dur:>8} {r.status.value:<8}{err}")
|
|
58
|
+
|
|
59
|
+
elif args.command == "run":
|
|
60
|
+
# For run, we need to try to import and execute the function
|
|
61
|
+
job = store.get_job(args.name)
|
|
62
|
+
if not job:
|
|
63
|
+
print(f"Job '{args.name}' not found.")
|
|
64
|
+
sys.exit(1)
|
|
65
|
+
try:
|
|
66
|
+
parts = job.func_path.rsplit(".", 1)
|
|
67
|
+
if len(parts) == 2:
|
|
68
|
+
import importlib
|
|
69
|
+
mod = importlib.import_module(parts[0])
|
|
70
|
+
func = getattr(mod, parts[1])
|
|
71
|
+
print(f"Running {args.name}...")
|
|
72
|
+
func()
|
|
73
|
+
print("Done.")
|
|
74
|
+
else:
|
|
75
|
+
print(f"Cannot resolve function: {job.func_path}")
|
|
76
|
+
sys.exit(1)
|
|
77
|
+
except Exception as e:
|
|
78
|
+
print(f"Error: {e}")
|
|
79
|
+
sys.exit(1)
|
|
80
|
+
|
|
81
|
+
store.close()
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _fmt_schedule(job) -> str:
|
|
85
|
+
if job.schedule_type.value == "interval":
|
|
86
|
+
return f"every {job.interval_seconds}s"
|
|
87
|
+
elif job.schedule_type.value == "cron":
|
|
88
|
+
return job.cron_expr or "?"
|
|
89
|
+
elif job.schedule_type.value == "once":
|
|
90
|
+
return f"once at {job.scheduled_for or '?'}"
|
|
91
|
+
return job.schedule_type.value
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
if __name__ == "__main__":
|
|
95
|
+
main()
|
whenly/models.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Data models for whenly."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import uuid
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from enum import Enum
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _utcnow() -> datetime:
|
|
13
|
+
return datetime.utcnow()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _uuid() -> str:
|
|
17
|
+
return uuid.uuid4().hex
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ScheduleType(str, Enum):
|
|
21
|
+
INTERVAL = "interval"
|
|
22
|
+
CRON = "cron"
|
|
23
|
+
ONCE = "once"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MissedPolicy(str, Enum):
|
|
27
|
+
RUN_ONCE = "run_once"
|
|
28
|
+
RUN_ALL = "run_all"
|
|
29
|
+
SKIP = "skip"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class JobStatus(str, Enum):
|
|
33
|
+
SUCCESS = "success"
|
|
34
|
+
FAILED = "failed"
|
|
35
|
+
TIMEOUT = "timeout"
|
|
36
|
+
RUNNING = "running"
|
|
37
|
+
PENDING = "pending"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class Job:
|
|
42
|
+
name: str
|
|
43
|
+
func_path: str
|
|
44
|
+
schedule_type: ScheduleType
|
|
45
|
+
interval_seconds: int | float | None = None
|
|
46
|
+
cron_expr: str | None = None
|
|
47
|
+
scheduled_for: datetime | None = None
|
|
48
|
+
missed_policy: MissedPolicy = MissedPolicy.RUN_ONCE
|
|
49
|
+
max_concurrent: int = 1
|
|
50
|
+
timeout_seconds: int | None = None
|
|
51
|
+
enabled: bool = True
|
|
52
|
+
id: str = field(default_factory=_uuid)
|
|
53
|
+
next_run_at: datetime | None = None
|
|
54
|
+
last_run_at: datetime | None = None
|
|
55
|
+
created_at: datetime = field(default_factory=_utcnow)
|
|
56
|
+
updated_at: datetime = field(default_factory=_utcnow)
|
|
57
|
+
# Runtime only — not persisted
|
|
58
|
+
func: Any = field(default=None, repr=False, compare=False)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass
|
|
62
|
+
class JobRun:
|
|
63
|
+
job_id: str
|
|
64
|
+
status: JobStatus = JobStatus.PENDING
|
|
65
|
+
started_at: datetime | None = None
|
|
66
|
+
finished_at: datetime | None = None
|
|
67
|
+
duration_seconds: float | None = None
|
|
68
|
+
error_message: str | None = None
|
|
69
|
+
scheduled_for: datetime | None = None
|
|
70
|
+
id: str = field(default_factory=_uuid)
|
whenly/runner.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""Job execution engine."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import threading
|
|
7
|
+
import traceback
|
|
8
|
+
from concurrent.futures import Future, ThreadPoolExecutor
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import Any, Callable
|
|
11
|
+
|
|
12
|
+
from .models import Job, JobRun, JobStatus
|
|
13
|
+
from .store import Store
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class JobRunner:
|
|
19
|
+
"""Executes jobs in a thread pool."""
|
|
20
|
+
|
|
21
|
+
def __init__(self, store: Store, max_workers: int = 10) -> None:
|
|
22
|
+
self._store = store
|
|
23
|
+
self._max_workers = max_workers
|
|
24
|
+
self._executor: ThreadPoolExecutor | None = None
|
|
25
|
+
self._futures: dict[str, Future[None]] = {}
|
|
26
|
+
self._lock = threading.Lock()
|
|
27
|
+
|
|
28
|
+
def _ensure_executor(self) -> ThreadPoolExecutor:
|
|
29
|
+
if self._executor is None:
|
|
30
|
+
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
|
|
31
|
+
return self._executor
|
|
32
|
+
|
|
33
|
+
def submit(self, job: Job, scheduled_for: datetime | None = None) -> bool:
|
|
34
|
+
"""Submit a job for execution. Returns True if submitted."""
|
|
35
|
+
if not job.enabled:
|
|
36
|
+
return False
|
|
37
|
+
running = self._store.get_running_count(job.id)
|
|
38
|
+
if running >= job.max_concurrent:
|
|
39
|
+
logger.debug("Job %s at max concurrent (%d), skipping", job.name, job.max_concurrent)
|
|
40
|
+
return False
|
|
41
|
+
|
|
42
|
+
run = JobRun(
|
|
43
|
+
job_id=job.id,
|
|
44
|
+
status=JobStatus.RUNNING,
|
|
45
|
+
started_at=datetime.utcnow(),
|
|
46
|
+
scheduled_for=scheduled_for,
|
|
47
|
+
)
|
|
48
|
+
self._store.save_run(run)
|
|
49
|
+
|
|
50
|
+
executor = self._ensure_executor()
|
|
51
|
+
with self._lock:
|
|
52
|
+
self._futures[run.id] = executor.submit(self._execute, job, run)
|
|
53
|
+
return True
|
|
54
|
+
|
|
55
|
+
def _execute(self, job: Job, run: JobRun) -> None:
|
|
56
|
+
func: Callable[..., Any] = job.func
|
|
57
|
+
if func is None:
|
|
58
|
+
func = _resolve_func(job.func_path)
|
|
59
|
+
if func is None:
|
|
60
|
+
run.status = JobStatus.FAILED
|
|
61
|
+
run.error_message = f"Cannot resolve function: {job.func_path}"
|
|
62
|
+
self._store.update_run(run)
|
|
63
|
+
with self._lock:
|
|
64
|
+
self._futures.pop(run.id, None)
|
|
65
|
+
return
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
args = getattr(job, '_args', ()) or ()
|
|
69
|
+
kwargs = getattr(job, '_kwargs', {}) or {}
|
|
70
|
+
result = func(*args, **kwargs)
|
|
71
|
+
# Check if result is a future we should wait on
|
|
72
|
+
if isinstance(result, Future):
|
|
73
|
+
result.result(timeout=job.timeout_seconds)
|
|
74
|
+
except Exception as exc:
|
|
75
|
+
if isinstance(exc, TimeoutError):
|
|
76
|
+
run.status = JobStatus.TIMEOUT
|
|
77
|
+
else:
|
|
78
|
+
run.status = JobStatus.FAILED
|
|
79
|
+
run.error_message = traceback.format_exc()
|
|
80
|
+
logger.error("Job %s failed: %s", job.name, exc)
|
|
81
|
+
finally:
|
|
82
|
+
run.finished_at = datetime.utcnow()
|
|
83
|
+
run.duration_seconds = (run.finished_at - (run.started_at or run.finished_at)).total_seconds()
|
|
84
|
+
if run.status == JobStatus.RUNNING:
|
|
85
|
+
run.status = JobStatus.SUCCESS
|
|
86
|
+
self._store.update_run(run)
|
|
87
|
+
self._store.update_last_run(job.id, run.finished_at)
|
|
88
|
+
|
|
89
|
+
with self._lock:
|
|
90
|
+
self._futures.pop(run.id, None)
|
|
91
|
+
|
|
92
|
+
def shutdown(self, wait: bool = True) -> None:
|
|
93
|
+
with self._lock:
|
|
94
|
+
executor = self._executor
|
|
95
|
+
self._executor = None
|
|
96
|
+
if executor is not None:
|
|
97
|
+
executor.shutdown(wait=wait)
|
|
98
|
+
with self._lock:
|
|
99
|
+
self._futures.clear()
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _resolve_func(func_path: str) -> Callable[..., Any] | None:
|
|
103
|
+
"""Import a function from its dotted path (e.g. 'mymodule.jobs.sync_data')."""
|
|
104
|
+
try:
|
|
105
|
+
parts = func_path.rsplit(".", 1)
|
|
106
|
+
if len(parts) == 2:
|
|
107
|
+
mod_path, func_name = parts
|
|
108
|
+
import importlib
|
|
109
|
+
mod = importlib.import_module(mod_path)
|
|
110
|
+
return getattr(mod, func_name, None)
|
|
111
|
+
except Exception:
|
|
112
|
+
logger.debug("Cannot resolve %s", func_path, exc_info=True)
|
|
113
|
+
return None
|
whenly/scheduler.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
1
|
+
"""Main Scheduler class for whenly."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
from functools import wraps
|
|
10
|
+
from typing import Any, Callable
|
|
11
|
+
|
|
12
|
+
from croniter import croniter
|
|
13
|
+
|
|
14
|
+
from .models import Job, JobRun, MissedPolicy, ScheduleType
|
|
15
|
+
from .runner import JobRunner
|
|
16
|
+
from .store import Store
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Scheduler:
|
|
22
|
+
"""Lightweight persistent job scheduler."""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
db_path: str = "whenly.db",
|
|
27
|
+
tick_interval: float = 1.0,
|
|
28
|
+
max_workers: int = 10,
|
|
29
|
+
) -> None:
|
|
30
|
+
self._store = Store(db_path)
|
|
31
|
+
self._runner = JobRunner(self._store, max_workers=max_workers)
|
|
32
|
+
self._tick_interval = tick_interval
|
|
33
|
+
self._thread: threading.Thread | None = None
|
|
34
|
+
self._running = False
|
|
35
|
+
self._stopped = False
|
|
36
|
+
self._lock = threading.Lock()
|
|
37
|
+
self._funcs: dict[str, Callable[..., Any]] = {}
|
|
38
|
+
|
|
39
|
+
def __enter__(self) -> Scheduler:
|
|
40
|
+
return self
|
|
41
|
+
|
|
42
|
+
def __exit__(self, *args: Any) -> None:
|
|
43
|
+
self.close()
|
|
44
|
+
|
|
45
|
+
def close(self) -> None:
|
|
46
|
+
"""Stop the scheduler and close the store safely."""
|
|
47
|
+
if self._stopped:
|
|
48
|
+
return
|
|
49
|
+
self.stop()
|
|
50
|
+
with self._lock:
|
|
51
|
+
self._stopped = True
|
|
52
|
+
self._store.close()
|
|
53
|
+
|
|
54
|
+
# -- decorators --
|
|
55
|
+
|
|
56
|
+
def every(
|
|
57
|
+
self,
|
|
58
|
+
amount: int = 1,
|
|
59
|
+
unit: str = "seconds",
|
|
60
|
+
*,
|
|
61
|
+
name: str | None = None,
|
|
62
|
+
missed: str | MissedPolicy = MissedPolicy.RUN_ONCE,
|
|
63
|
+
timeout: int | None = None,
|
|
64
|
+
max_concurrent: int = 1,
|
|
65
|
+
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
|
|
66
|
+
"""Decorator to register an interval job.
|
|
67
|
+
|
|
68
|
+
Usage:
|
|
69
|
+
@s.every(5, "minutes")
|
|
70
|
+
def my_job(): ...
|
|
71
|
+
@s.every(10, minutes=True)
|
|
72
|
+
def my_job(): ...
|
|
73
|
+
"""
|
|
74
|
+
seconds = _parse_interval(amount, unit)
|
|
75
|
+
policy = MissedPolicy(missed) if isinstance(missed, str) else missed
|
|
76
|
+
|
|
77
|
+
def decorator(fn: Callable[..., Any]) -> Callable[..., Any]:
|
|
78
|
+
job_name = name or fn.__name__
|
|
79
|
+
with self._lock:
|
|
80
|
+
self._funcs[job_name] = fn
|
|
81
|
+
job = Job(
|
|
82
|
+
name=job_name,
|
|
83
|
+
func_path=_func_path(fn),
|
|
84
|
+
schedule_type=ScheduleType.INTERVAL,
|
|
85
|
+
interval_seconds=seconds,
|
|
86
|
+
missed_policy=policy,
|
|
87
|
+
timeout_seconds=timeout,
|
|
88
|
+
max_concurrent=max_concurrent,
|
|
89
|
+
next_run_at=datetime.utcnow(),
|
|
90
|
+
func=fn,
|
|
91
|
+
)
|
|
92
|
+
self._store.save_job(job)
|
|
93
|
+
return fn
|
|
94
|
+
return decorator
|
|
95
|
+
|
|
96
|
+
def cron(
|
|
97
|
+
self,
|
|
98
|
+
expr: str,
|
|
99
|
+
*,
|
|
100
|
+
name: str | None = None,
|
|
101
|
+
missed: str | MissedPolicy = MissedPolicy.SKIP,
|
|
102
|
+
timeout: int | None = None,
|
|
103
|
+
max_concurrent: int = 1,
|
|
104
|
+
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
|
|
105
|
+
"""Decorator to register a cron job."""
|
|
106
|
+
policy = MissedPolicy(missed) if isinstance(missed, str) else missed
|
|
107
|
+
|
|
108
|
+
def decorator(fn: Callable[..., Any]) -> Callable[..., Any]:
|
|
109
|
+
job_name = name or fn.__name__
|
|
110
|
+
with self._lock:
|
|
111
|
+
self._funcs[job_name] = fn
|
|
112
|
+
next_run = croniter(expr, datetime.utcnow()).get_next(datetime)
|
|
113
|
+
job = Job(
|
|
114
|
+
name=job_name,
|
|
115
|
+
func_path=_func_path(fn),
|
|
116
|
+
schedule_type=ScheduleType.CRON,
|
|
117
|
+
cron_expr=expr,
|
|
118
|
+
missed_policy=policy,
|
|
119
|
+
timeout_seconds=timeout,
|
|
120
|
+
max_concurrent=max_concurrent,
|
|
121
|
+
next_run_at=next_run,
|
|
122
|
+
func=fn,
|
|
123
|
+
)
|
|
124
|
+
self._store.save_job(job)
|
|
125
|
+
return fn
|
|
126
|
+
return decorator
|
|
127
|
+
|
|
128
|
+
# -- programmatic API --
|
|
129
|
+
|
|
130
|
+
def add(
|
|
131
|
+
self,
|
|
132
|
+
func: Callable[..., Any],
|
|
133
|
+
*,
|
|
134
|
+
every: str | None = None,
|
|
135
|
+
cron: str | None = None,
|
|
136
|
+
name: str | None = None,
|
|
137
|
+
missed: str | MissedPolicy = MissedPolicy.RUN_ONCE,
|
|
138
|
+
timeout: int | None = None,
|
|
139
|
+
max_concurrent: int = 1,
|
|
140
|
+
) -> Job:
|
|
141
|
+
"""Add a job programmatically."""
|
|
142
|
+
policy = MissedPolicy(missed) if isinstance(missed, str) else missed
|
|
143
|
+
job_name = name or func.__name__
|
|
144
|
+
with self._lock:
|
|
145
|
+
self._funcs[job_name] = func
|
|
146
|
+
|
|
147
|
+
if cron:
|
|
148
|
+
next_run = croniter(cron, datetime.utcnow()).get_next(datetime)
|
|
149
|
+
job = Job(
|
|
150
|
+
name=job_name, func_path=_func_path(func),
|
|
151
|
+
schedule_type=ScheduleType.CRON, cron_expr=cron,
|
|
152
|
+
missed_policy=policy, timeout_seconds=timeout,
|
|
153
|
+
max_concurrent=max_concurrent, next_run_at=next_run, func=func,
|
|
154
|
+
)
|
|
155
|
+
elif every:
|
|
156
|
+
seconds = _parse_interval_str(every)
|
|
157
|
+
job = Job(
|
|
158
|
+
name=job_name, func_path=_func_path(func),
|
|
159
|
+
schedule_type=ScheduleType.INTERVAL, interval_seconds=seconds,
|
|
160
|
+
missed_policy=policy, timeout_seconds=timeout,
|
|
161
|
+
max_concurrent=max_concurrent, next_run_at=datetime.utcnow(), func=func,
|
|
162
|
+
)
|
|
163
|
+
else:
|
|
164
|
+
raise ValueError("Must specify either 'every' or 'cron'")
|
|
165
|
+
|
|
166
|
+
return self._store.save_job(job)
|
|
167
|
+
|
|
168
|
+
def later(
|
|
169
|
+
self,
|
|
170
|
+
amount: int = 1,
|
|
171
|
+
unit: str = "seconds",
|
|
172
|
+
func: Callable[..., Any] | None = None,
|
|
173
|
+
*,
|
|
174
|
+
name: str | None = None,
|
|
175
|
+
args: tuple = (),
|
|
176
|
+
kwargs: dict | None = None,
|
|
177
|
+
) -> Job | Callable[[Callable[..., Any]], Job]:
|
|
178
|
+
"""Schedule a one-off job to run after a delay.
|
|
179
|
+
|
|
180
|
+
Can be used as a method or decorator:
|
|
181
|
+
s.later(30, "seconds", send_email)
|
|
182
|
+
@s.later(5, "minutes")
|
|
183
|
+
def cleanup(): ...
|
|
184
|
+
"""
|
|
185
|
+
seconds = _parse_interval(amount, unit)
|
|
186
|
+
_kwargs = kwargs or {}
|
|
187
|
+
|
|
188
|
+
if func is not None:
|
|
189
|
+
return self._create_oneoff(func, seconds, name or func.__name__, args, _kwargs)
|
|
190
|
+
|
|
191
|
+
def decorator(fn: Callable[..., Any]) -> Job:
|
|
192
|
+
return self._create_oneoff(fn, seconds, name or fn.__name__, args, _kwargs)
|
|
193
|
+
|
|
194
|
+
return decorator # type: ignore[return-value]
|
|
195
|
+
|
|
196
|
+
def _create_oneoff(
|
|
197
|
+
self, func: Callable[..., Any], delay_seconds: int, name: str,
|
|
198
|
+
args: tuple = (), kwargs: dict | None = None,
|
|
199
|
+
) -> Job:
|
|
200
|
+
job_name = name
|
|
201
|
+
_kwargs = kwargs or {}
|
|
202
|
+
with self._lock:
|
|
203
|
+
self._funcs[job_name] = func
|
|
204
|
+
scheduled = datetime.utcnow() + timedelta(seconds=delay_seconds)
|
|
205
|
+
job = Job(
|
|
206
|
+
name=job_name, func_path=_func_path(func),
|
|
207
|
+
schedule_type=ScheduleType.ONCE, scheduled_for=scheduled,
|
|
208
|
+
next_run_at=scheduled, func=func,
|
|
209
|
+
)
|
|
210
|
+
job._args = args # type: ignore[attr-defined]
|
|
211
|
+
job._kwargs = _kwargs # type: ignore[attr-defined]
|
|
212
|
+
return self._store.save_job(job)
|
|
213
|
+
|
|
214
|
+
def run_now(self, name: str) -> bool:
|
|
215
|
+
"""Manually trigger a job by name."""
|
|
216
|
+
job = self._store.get_job(name)
|
|
217
|
+
if not job:
|
|
218
|
+
return False
|
|
219
|
+
# Attach runtime func if available
|
|
220
|
+
with self._lock:
|
|
221
|
+
job.func = self._funcs.get(job.name)
|
|
222
|
+
self._runner.submit(job)
|
|
223
|
+
return True
|
|
224
|
+
|
|
225
|
+
def disable(self, name: str) -> bool:
|
|
226
|
+
job = self._store.get_job(name)
|
|
227
|
+
if not job:
|
|
228
|
+
return False
|
|
229
|
+
self._store.set_enabled(job.id, False)
|
|
230
|
+
return True
|
|
231
|
+
|
|
232
|
+
def enable(self, name: str) -> bool:
|
|
233
|
+
job = self._store.get_job(name)
|
|
234
|
+
if not job:
|
|
235
|
+
return False
|
|
236
|
+
self._store.set_enabled(job.id, True)
|
|
237
|
+
return True
|
|
238
|
+
|
|
239
|
+
# -- scheduler loop --
|
|
240
|
+
|
|
241
|
+
def start(self) -> None:
|
|
242
|
+
"""Start the scheduler in a background thread."""
|
|
243
|
+
with self._lock:
|
|
244
|
+
if self._running:
|
|
245
|
+
return
|
|
246
|
+
self._running = True
|
|
247
|
+
self._thread = threading.Thread(target=self._loop, daemon=True)
|
|
248
|
+
self._thread.start()
|
|
249
|
+
logger.info("Scheduler started (background)")
|
|
250
|
+
|
|
251
|
+
def stop(self) -> None:
|
|
252
|
+
"""Stop the scheduler (idempotent)."""
|
|
253
|
+
with self._lock:
|
|
254
|
+
was_running = self._running
|
|
255
|
+
self._running = False
|
|
256
|
+
thread = self._thread
|
|
257
|
+
self._thread = None
|
|
258
|
+
if was_running and thread:
|
|
259
|
+
thread.join(timeout=5)
|
|
260
|
+
self._runner.shutdown(wait=True)
|
|
261
|
+
logger.info("Scheduler stopped")
|
|
262
|
+
|
|
263
|
+
def run(self) -> None:
|
|
264
|
+
"""Run the scheduler blocking (foreground)."""
|
|
265
|
+
with self._lock:
|
|
266
|
+
if self._running:
|
|
267
|
+
return
|
|
268
|
+
self._running = True
|
|
269
|
+
try:
|
|
270
|
+
self._loop()
|
|
271
|
+
finally:
|
|
272
|
+
self._running = False
|
|
273
|
+
self._runner.shutdown(wait=True)
|
|
274
|
+
|
|
275
|
+
def _loop(self) -> None:
|
|
276
|
+
while self._running:
|
|
277
|
+
self._tick()
|
|
278
|
+
time.sleep(self._tick_interval)
|
|
279
|
+
|
|
280
|
+
def _tick(self) -> None:
|
|
281
|
+
now = datetime.utcnow()
|
|
282
|
+
due_jobs = self._store.get_due_jobs(now)
|
|
283
|
+
|
|
284
|
+
for job in due_jobs:
|
|
285
|
+
# Attach runtime func if available
|
|
286
|
+
with self._lock:
|
|
287
|
+
job.func = self._funcs.get(job.name)
|
|
288
|
+
submitted = self._runner.submit(job, scheduled_for=job.next_run_at)
|
|
289
|
+
|
|
290
|
+
if submitted:
|
|
291
|
+
# Handle missed runs and schedule next
|
|
292
|
+
if job.schedule_type == ScheduleType.ONCE:
|
|
293
|
+
# One-off: clear next_run to prevent re-scheduling
|
|
294
|
+
self._store.update_next_run(job.id, now + timedelta(days=365 * 100))
|
|
295
|
+
self._store.set_enabled(job.id, False)
|
|
296
|
+
continue
|
|
297
|
+
|
|
298
|
+
# Compute next run FIRST to avoid duplicates
|
|
299
|
+
next_run = self._compute_next(job, now)
|
|
300
|
+
self._store.update_next_run(job.id, next_run)
|
|
301
|
+
|
|
302
|
+
# Handle missed runs
|
|
303
|
+
if job.missed_policy == MissedPolicy.RUN_ALL:
|
|
304
|
+
missed = self._store.get_missed_count(job.id, now)
|
|
305
|
+
for i in range(missed - 1):
|
|
306
|
+
missed_job = Job(
|
|
307
|
+
**{k: getattr(job, k) for k in job.__dataclass_fields__ if k != "func"},
|
|
308
|
+
func=job.func,
|
|
309
|
+
)
|
|
310
|
+
self._runner.submit(missed_job)
|
|
311
|
+
|
|
312
|
+
def _compute_next(self, job: Job, after: datetime) -> datetime:
|
|
313
|
+
if job.schedule_type == ScheduleType.CRON and job.cron_expr:
|
|
314
|
+
return croniter(job.cron_expr, after).get_next(datetime)
|
|
315
|
+
elif job.schedule_type == ScheduleType.INTERVAL and job.interval_seconds:
|
|
316
|
+
return after + timedelta(seconds=job.interval_seconds)
|
|
317
|
+
return after + timedelta(seconds=60)
|
|
318
|
+
|
|
319
|
+
# -- accessors --
|
|
320
|
+
|
|
321
|
+
@property
|
|
322
|
+
def store(self) -> Store:
|
|
323
|
+
return self._store
|
|
324
|
+
|
|
325
|
+
@property
|
|
326
|
+
def jobs(self) -> list[Job]:
|
|
327
|
+
return self._store.list_jobs()
|
|
328
|
+
|
|
329
|
+
def get_history(self, name: str, limit: int = 20) -> list[JobRun]:
|
|
330
|
+
return self._store.get_runs(name, limit)
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
# -- helpers --
|
|
334
|
+
|
|
335
|
+
def _parse_interval(amount: int, unit: str) -> int:
|
|
336
|
+
"""Parse interval amount + unit to seconds."""
|
|
337
|
+
unit = unit.lower().rstrip("s")
|
|
338
|
+
multipliers = {
|
|
339
|
+
"second": 1, "minute": 60, "hour": 3600, "day": 86400,
|
|
340
|
+
}
|
|
341
|
+
if unit not in multipliers:
|
|
342
|
+
raise ValueError(f"Unknown unit: {unit}. Use seconds/minutes/hours/days")
|
|
343
|
+
return amount * multipliers[unit]
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def _parse_interval_str(s: str) -> int | float:
|
|
347
|
+
"""Parse human interval like '5m', '2h', '30s', '1d'."""
|
|
348
|
+
s = s.strip().lower()
|
|
349
|
+
units = {"ms": 0.001, "s": 1, "m": 60, "h": 3600, "d": 86400}
|
|
350
|
+
for suffix, mult in units.items():
|
|
351
|
+
if s.endswith(suffix):
|
|
352
|
+
try:
|
|
353
|
+
return int(s[:-len(suffix)]) * mult
|
|
354
|
+
except ValueError:
|
|
355
|
+
raise ValueError(f"Invalid interval: {s}")
|
|
356
|
+
raise ValueError(f"Invalid interval: {s}. Use format like '5m', '2h', '30s', '1d'")
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
def _func_path(fn: Callable[..., Any]) -> str:
|
|
360
|
+
return f"{fn.__module__}.{fn.__qualname__}"
|
whenly/store.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"""SQLite + in-memory store for whenly."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import sqlite3
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from .models import Job, JobRun, JobStatus, MissedPolicy, ScheduleType
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
_SCHEMA = """
|
|
15
|
+
CREATE TABLE IF NOT EXISTS jobs (
|
|
16
|
+
id TEXT PRIMARY KEY,
|
|
17
|
+
name TEXT UNIQUE NOT NULL,
|
|
18
|
+
func_path TEXT NOT NULL,
|
|
19
|
+
schedule_type TEXT NOT NULL,
|
|
20
|
+
interval_seconds INTEGER,
|
|
21
|
+
cron_expr TEXT,
|
|
22
|
+
scheduled_for TEXT,
|
|
23
|
+
missed_policy TEXT DEFAULT 'run_once',
|
|
24
|
+
max_concurrent INTEGER DEFAULT 1,
|
|
25
|
+
timeout_seconds INTEGER,
|
|
26
|
+
enabled INTEGER DEFAULT 1,
|
|
27
|
+
next_run_at TEXT,
|
|
28
|
+
last_run_at TEXT,
|
|
29
|
+
created_at TEXT NOT NULL,
|
|
30
|
+
updated_at TEXT NOT NULL
|
|
31
|
+
);
|
|
32
|
+
|
|
33
|
+
CREATE TABLE IF NOT EXISTS job_runs (
|
|
34
|
+
id TEXT PRIMARY KEY,
|
|
35
|
+
job_id TEXT NOT NULL REFERENCES jobs(id),
|
|
36
|
+
status TEXT NOT NULL,
|
|
37
|
+
started_at TEXT,
|
|
38
|
+
finished_at TEXT,
|
|
39
|
+
duration_seconds REAL,
|
|
40
|
+
error_message TEXT,
|
|
41
|
+
scheduled_for TEXT
|
|
42
|
+
);
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _dt(s: str | None) -> datetime | None:
|
|
47
|
+
return datetime.fromisoformat(s) if s else None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _dts(d: datetime | None) -> str | None:
|
|
51
|
+
return d.isoformat() if d else None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Store:
|
|
55
|
+
"""Persistent store backend (SQLite or :memory:)."""
|
|
56
|
+
|
|
57
|
+
def __init__(self, db_path: str = "whenly.db") -> None:
|
|
58
|
+
self._db_path = db_path
|
|
59
|
+
self._conn: sqlite3.Connection | None = None
|
|
60
|
+
self._connect()
|
|
61
|
+
self._conn.execute("PRAGMA journal_mode=WAL") # type: ignore[union-attr]
|
|
62
|
+
self._conn.executescript(_SCHEMA) # type: ignore[union-attr]
|
|
63
|
+
|
|
64
|
+
# -- connection management --
|
|
65
|
+
|
|
66
|
+
def _connect(self) -> None:
|
|
67
|
+
detect = sqlite3.PARSE_DECLTYPES
|
|
68
|
+
self._conn = sqlite3.connect(self._db_path, detect_types=detect, check_same_thread=False, isolation_level=None)
|
|
69
|
+
self._conn.execute("PRAGMA foreign_keys = ON")
|
|
70
|
+
self._conn.row_factory = sqlite3.Row # type: ignore[union-attr]
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def conn(self) -> sqlite3.Connection:
|
|
74
|
+
assert self._conn is not None
|
|
75
|
+
return self._conn
|
|
76
|
+
|
|
77
|
+
def close(self) -> None:
|
|
78
|
+
if self._conn:
|
|
79
|
+
self._conn.close()
|
|
80
|
+
self._conn = None
|
|
81
|
+
|
|
82
|
+
# -- jobs CRUD --
|
|
83
|
+
|
|
84
|
+
def save_job(self, job: Job) -> Job:
|
|
85
|
+
row = self.conn.execute(
|
|
86
|
+
"""INSERT INTO jobs (id, name, func_path, schedule_type, interval_seconds,
|
|
87
|
+
cron_expr, scheduled_for, missed_policy, max_concurrent, timeout_seconds,
|
|
88
|
+
enabled, next_run_at, last_run_at, created_at, updated_at)
|
|
89
|
+
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
|
90
|
+
ON CONFLICT(name) DO UPDATE SET
|
|
91
|
+
func_path=excluded.func_path, schedule_type=excluded.schedule_type,
|
|
92
|
+
interval_seconds=excluded.interval_seconds, cron_expr=excluded.cron_expr,
|
|
93
|
+
scheduled_for=excluded.scheduled_for, missed_policy=excluded.missed_policy,
|
|
94
|
+
max_concurrent=excluded.max_concurrent, timeout_seconds=excluded.timeout_seconds,
|
|
95
|
+
enabled=excluded.enabled, next_run_at=excluded.next_run_at,
|
|
96
|
+
last_run_at=excluded.last_run_at, updated_at=excluded.updated_at""",
|
|
97
|
+
(
|
|
98
|
+
job.id, job.name, job.func_path, job.schedule_type.value,
|
|
99
|
+
job.interval_seconds, job.cron_expr, _dts(job.scheduled_for),
|
|
100
|
+
job.missed_policy.value, job.max_concurrent, job.timeout_seconds,
|
|
101
|
+
int(job.enabled), _dts(job.next_run_at), _dts(job.last_run_at),
|
|
102
|
+
job.created_at.isoformat(), job.updated_at.isoformat(),
|
|
103
|
+
),
|
|
104
|
+
)
|
|
105
|
+
self.conn.commit()
|
|
106
|
+
result = self.get_job(job.name)
|
|
107
|
+
return result if result else job
|
|
108
|
+
|
|
109
|
+
def get_job(self, name: str) -> Job | None:
|
|
110
|
+
row = self.conn.execute("SELECT * FROM jobs WHERE name=?", (name,)).fetchone()
|
|
111
|
+
return self._row_to_job(row) if row else None
|
|
112
|
+
|
|
113
|
+
def get_job_by_id(self, job_id: str) -> Job | None:
|
|
114
|
+
row = self.conn.execute("SELECT * FROM jobs WHERE id=?", (job_id,)).fetchone()
|
|
115
|
+
return self._row_to_job(row) if row else None
|
|
116
|
+
|
|
117
|
+
def list_jobs(self) -> list[Job]:
|
|
118
|
+
rows = self.conn.execute("SELECT * FROM jobs ORDER BY created_at").fetchall()
|
|
119
|
+
return [self._row_to_job(r) for r in rows]
|
|
120
|
+
|
|
121
|
+
def delete_job(self, name: str) -> bool:
|
|
122
|
+
# Delete associated runs first to satisfy FK constraint
|
|
123
|
+
job = self.get_job(name)
|
|
124
|
+
if job:
|
|
125
|
+
self.conn.execute("DELETE FROM job_runs WHERE job_id=?", (job.id,))
|
|
126
|
+
cur = self.conn.execute("DELETE FROM jobs WHERE name=?", (name,))
|
|
127
|
+
self.conn.commit()
|
|
128
|
+
return cur.rowcount > 0
|
|
129
|
+
|
|
130
|
+
def update_next_run(self, job_id: str, next_run: datetime) -> None:
|
|
131
|
+
self.conn.execute(
|
|
132
|
+
"UPDATE jobs SET next_run_at=?, updated_at=? WHERE id=?",
|
|
133
|
+
(next_run.isoformat(), datetime.utcnow().isoformat(), job_id),
|
|
134
|
+
)
|
|
135
|
+
self.conn.commit()
|
|
136
|
+
|
|
137
|
+
def update_last_run(self, job_id: str, last_run: datetime) -> None:
|
|
138
|
+
self.conn.execute(
|
|
139
|
+
"UPDATE jobs SET last_run_at=?, updated_at=? WHERE id=?",
|
|
140
|
+
(last_run.isoformat(), datetime.utcnow().isoformat(), job_id),
|
|
141
|
+
)
|
|
142
|
+
self.conn.commit()
|
|
143
|
+
|
|
144
|
+
def set_enabled(self, job_id: str, enabled: bool) -> None:
|
|
145
|
+
self.conn.execute(
|
|
146
|
+
"UPDATE jobs SET enabled=?, updated_at=? WHERE id=?",
|
|
147
|
+
(int(enabled), datetime.utcnow().isoformat(), job_id),
|
|
148
|
+
)
|
|
149
|
+
self.conn.commit()
|
|
150
|
+
|
|
151
|
+
# -- due jobs + missed runs --
|
|
152
|
+
|
|
153
|
+
def get_due_jobs(self, now: datetime) -> list[Job]:
|
|
154
|
+
rows = self.conn.execute(
|
|
155
|
+
"""SELECT * FROM jobs
|
|
156
|
+
WHERE enabled=1 AND next_run_at IS NOT NULL AND next_run_at <= ?
|
|
157
|
+
ORDER BY next_run_at""",
|
|
158
|
+
(now.isoformat(),),
|
|
159
|
+
).fetchall()
|
|
160
|
+
return [self._row_to_job(r) for r in rows]
|
|
161
|
+
|
|
162
|
+
def get_running_count(self, job_id: str) -> int:
|
|
163
|
+
row = self.conn.execute(
|
|
164
|
+
"SELECT COUNT(*) FROM job_runs WHERE job_id=? AND status='running'", (job_id,)
|
|
165
|
+
).fetchone()
|
|
166
|
+
return row[0]
|
|
167
|
+
|
|
168
|
+
def get_missed_count(self, job_id: str, now: datetime) -> int:
|
|
169
|
+
"""Count how many scheduled runs were missed between last_run_at and now."""
|
|
170
|
+
job = self.get_job_by_id(job_id)
|
|
171
|
+
if not job:
|
|
172
|
+
return 0
|
|
173
|
+
if job.missed_policy == MissedPolicy.SKIP:
|
|
174
|
+
return 0
|
|
175
|
+
# For simplicity: interval jobs can calculate count; cron/once just 0 or 1
|
|
176
|
+
if job.schedule_type == ScheduleType.INTERVAL and job.interval_seconds and job.last_run_at:
|
|
177
|
+
elapsed = (now - job.last_run_at).total_seconds()
|
|
178
|
+
count = int(elapsed // job.interval_seconds)
|
|
179
|
+
return max(0, count - 1) if job.next_run_at else max(0, count)
|
|
180
|
+
return 0
|
|
181
|
+
|
|
182
|
+
# -- job runs --
|
|
183
|
+
|
|
184
|
+
def save_run(self, run: JobRun) -> JobRun:
|
|
185
|
+
self.conn.execute(
|
|
186
|
+
"""INSERT INTO job_runs (id, job_id, status, started_at, finished_at,
|
|
187
|
+
duration_seconds, error_message, scheduled_for)
|
|
188
|
+
VALUES (?,?,?,?,?,?,?,?)""",
|
|
189
|
+
(
|
|
190
|
+
run.id, run.job_id, run.status.value,
|
|
191
|
+
_dts(run.started_at), _dts(run.finished_at),
|
|
192
|
+
run.duration_seconds, run.error_message, _dts(run.scheduled_for),
|
|
193
|
+
),
|
|
194
|
+
)
|
|
195
|
+
self.conn.commit()
|
|
196
|
+
return run
|
|
197
|
+
|
|
198
|
+
def update_run(self, run: JobRun) -> None:
|
|
199
|
+
self.conn.execute(
|
|
200
|
+
"""UPDATE job_runs SET status=?, started_at=?, finished_at=?,
|
|
201
|
+
duration_seconds=?, error_message=? WHERE id=?""",
|
|
202
|
+
(run.status.value, _dts(run.started_at), _dts(run.finished_at),
|
|
203
|
+
run.duration_seconds, run.error_message, run.id),
|
|
204
|
+
)
|
|
205
|
+
self.conn.commit()
|
|
206
|
+
|
|
207
|
+
def get_runs(self, job_name: str, limit: int = 20) -> list[JobRun]:
|
|
208
|
+
rows = self.conn.execute(
|
|
209
|
+
"""SELECT jr.* FROM job_runs jr
|
|
210
|
+
JOIN jobs j ON jr.job_id = j.id WHERE j.name=?
|
|
211
|
+
ORDER BY jr.started_at DESC LIMIT ?""",
|
|
212
|
+
(job_name, limit),
|
|
213
|
+
).fetchall()
|
|
214
|
+
return [self._row_to_run(r) for r in rows]
|
|
215
|
+
|
|
216
|
+
# -- internals --
|
|
217
|
+
|
|
218
|
+
def _row_to_job(self, row: sqlite3.Row) -> Job: # type: ignore[type-arg]
|
|
219
|
+
return Job(
|
|
220
|
+
id=row["id"],
|
|
221
|
+
name=row["name"],
|
|
222
|
+
func_path=row["func_path"],
|
|
223
|
+
schedule_type=ScheduleType(row["schedule_type"]),
|
|
224
|
+
interval_seconds=row["interval_seconds"],
|
|
225
|
+
cron_expr=row["cron_expr"],
|
|
226
|
+
scheduled_for=_dt(row["scheduled_for"]),
|
|
227
|
+
missed_policy=MissedPolicy(row["missed_policy"]),
|
|
228
|
+
max_concurrent=row["max_concurrent"],
|
|
229
|
+
timeout_seconds=row["timeout_seconds"],
|
|
230
|
+
enabled=bool(row["enabled"]),
|
|
231
|
+
next_run_at=_dt(row["next_run_at"]),
|
|
232
|
+
last_run_at=_dt(row["last_run_at"]),
|
|
233
|
+
created_at=datetime.fromisoformat(row["created_at"]),
|
|
234
|
+
updated_at=datetime.fromisoformat(row["updated_at"]),
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
def _row_to_run(self, row: sqlite3.Row) -> JobRun: # type: ignore[type-arg]
|
|
238
|
+
return JobRun(
|
|
239
|
+
id=row["id"],
|
|
240
|
+
job_id=row["job_id"],
|
|
241
|
+
status=JobStatus(row["status"]),
|
|
242
|
+
started_at=_dt(row["started_at"]),
|
|
243
|
+
finished_at=_dt(row["finished_at"]),
|
|
244
|
+
duration_seconds=row["duration_seconds"],
|
|
245
|
+
error_message=row["error_message"],
|
|
246
|
+
scheduled_for=_dt(row["scheduled_for"]),
|
|
247
|
+
)
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: whenly
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Lightweight persistent job scheduler for Python
|
|
5
|
+
Author: Teja
|
|
6
|
+
License: MIT
|
|
7
|
+
Keywords: scheduler,cron,jobs,tasks,periodic
|
|
8
|
+
Classifier: Development Status :: 3 - Alpha
|
|
9
|
+
Classifier: Intended Audience :: Developers
|
|
10
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Requires-Python: >=3.10
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
Requires-Dist: croniter>=1.3.0
|
|
21
|
+
Provides-Extra: dev
|
|
22
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
23
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
24
|
+
Requires-Dist: ruff; extra == "dev"
|
|
25
|
+
|
|
26
|
+
# whenly
|
|
27
|
+
|
|
28
|
+
Lightweight persistent job scheduler for Python — SQLite-backed, minimal dependencies.
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
from whenly import Scheduler
|
|
32
|
+
|
|
33
|
+
s = Scheduler()
|
|
34
|
+
|
|
35
|
+
@s.every(5, minutes)
|
|
36
|
+
def sync_data():
|
|
37
|
+
...
|
|
38
|
+
|
|
39
|
+
@s.cron("0 9 * * MON")
|
|
40
|
+
def weekly_report():
|
|
41
|
+
...
|
|
42
|
+
|
|
43
|
+
s.start() # background thread
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## Features
|
|
47
|
+
|
|
48
|
+
- **SQLite persistence** — jobs survive restarts
|
|
49
|
+
- **Cron expressions** via croniter
|
|
50
|
+
- **Interval scheduling** — every N seconds/minutes/hours
|
|
51
|
+
- **One-off delayed jobs** — run once after a delay
|
|
52
|
+
- **Decorator or programmatic API**
|
|
53
|
+
- **Thread-safe** background runner
|
|
54
|
+
- **CLI** for basic management
|
|
55
|
+
- **Zero external dependencies** (except croniter for cron support)
|
|
56
|
+
|
|
57
|
+
## Install
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
pip install whenly
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Quick Start
|
|
64
|
+
|
|
65
|
+
```python
|
|
66
|
+
from whenly import Scheduler
|
|
67
|
+
|
|
68
|
+
s = Scheduler()
|
|
69
|
+
|
|
70
|
+
# Interval jobs
|
|
71
|
+
@s.every(30, seconds)
|
|
72
|
+
def poll_api():
|
|
73
|
+
print("Polling...")
|
|
74
|
+
|
|
75
|
+
# Cron jobs
|
|
76
|
+
@s.cron("0 */2 * * *")
|
|
77
|
+
def cleanup():
|
|
78
|
+
print("Running cleanup...")
|
|
79
|
+
|
|
80
|
+
# One-off delayed job
|
|
81
|
+
s.later(10, minutes, send_notification, msg="Hello")
|
|
82
|
+
|
|
83
|
+
# Start the scheduler (non-blocking)
|
|
84
|
+
s.start()
|
|
85
|
+
|
|
86
|
+
# Or run blocking
|
|
87
|
+
# s.run()
|
|
88
|
+
```
|
|
89
|
+
|
|
90
|
+
### Programmatic API
|
|
91
|
+
|
|
92
|
+
```python
|
|
93
|
+
s.add_job("poll", interval=60, unit="seconds", fn=poll_api)
|
|
94
|
+
s.add_job("cleanup", cron="0 3 * * *", fn=cleanup)
|
|
95
|
+
s.remove_job("poll")
|
|
96
|
+
s.list_jobs()
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
## CLI
|
|
100
|
+
|
|
101
|
+
```bash
|
|
102
|
+
whenly list
|
|
103
|
+
whenly run
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
## License
|
|
107
|
+
|
|
108
|
+
MIT
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
whenly/__init__.py,sha256=6AZ1-oRNeMqTp8_sIqfa9dR7IndKomNR75cjJ9UyECI,136
|
|
2
|
+
whenly/cli.py,sha256=-wqkzChaSp6UNITX6rf2dYZacfJD75lyA0w0SEj59gY,3206
|
|
3
|
+
whenly/models.py,sha256=agi6ZlBMWZf7W2OMgOo_m0_q238p73HyjNS6B0pgcSc,1674
|
|
4
|
+
whenly/runner.py,sha256=FbQHhvJUSsKk7ED0IIQLuRiZYAbeuuUazysAJO_68as,4029
|
|
5
|
+
whenly/scheduler.py,sha256=SM8zYQBqd7Hl2HX4SNHgdORyt6xJzZ-QCDRVgf81wHw,12061
|
|
6
|
+
whenly/store.py,sha256=z6n6j0xPXa9JUgEsjLh0rDsVGot4Oo6J2NKeMqj9M_o,9445
|
|
7
|
+
whenly-0.1.0.dist-info/METADATA,sha256=69_yeSALdStQs2gbEpn5S6UhUls0O0AsX-jmGcypIwI,2284
|
|
8
|
+
whenly-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
|
|
9
|
+
whenly-0.1.0.dist-info/entry_points.txt,sha256=qDxS0nyAE5kvfmmBFmjUCLnGM0wqj5yC8oguxCNFmFQ,43
|
|
10
|
+
whenly-0.1.0.dist-info/top_level.txt,sha256=ssN8iFQSwCeBny9J2bjhTHoHW70oDwkzYnSYzJeX3bA,7
|
|
11
|
+
whenly-0.1.0.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
whenly
|