FlowerPower 0.9.13.1__py3-none-any.whl → 1.0.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- flowerpower/__init__.py +17 -2
- flowerpower/cfg/__init__.py +201 -149
- flowerpower/cfg/base.py +122 -24
- flowerpower/cfg/pipeline/__init__.py +254 -0
- flowerpower/cfg/pipeline/adapter.py +66 -0
- flowerpower/cfg/pipeline/run.py +40 -11
- flowerpower/cfg/pipeline/schedule.py +69 -79
- flowerpower/cfg/project/__init__.py +149 -0
- flowerpower/cfg/project/adapter.py +57 -0
- flowerpower/cfg/project/job_queue.py +165 -0
- flowerpower/cli/__init__.py +92 -37
- flowerpower/cli/job_queue.py +878 -0
- flowerpower/cli/mqtt.py +32 -1
- flowerpower/cli/pipeline.py +559 -406
- flowerpower/cli/utils.py +29 -18
- flowerpower/flowerpower.py +12 -8
- flowerpower/fs/__init__.py +20 -2
- flowerpower/fs/base.py +350 -26
- flowerpower/fs/ext.py +797 -216
- flowerpower/fs/storage_options.py +1097 -55
- flowerpower/io/base.py +13 -18
- flowerpower/io/loader/__init__.py +28 -0
- flowerpower/io/loader/deltatable.py +7 -10
- flowerpower/io/metadata.py +1 -0
- flowerpower/io/saver/__init__.py +28 -0
- flowerpower/io/saver/deltatable.py +4 -3
- flowerpower/job_queue/__init__.py +252 -0
- flowerpower/job_queue/apscheduler/__init__.py +11 -0
- flowerpower/job_queue/apscheduler/_setup/datastore.py +110 -0
- flowerpower/job_queue/apscheduler/_setup/eventbroker.py +93 -0
- flowerpower/job_queue/apscheduler/manager.py +1063 -0
- flowerpower/job_queue/apscheduler/setup.py +524 -0
- flowerpower/job_queue/apscheduler/trigger.py +169 -0
- flowerpower/job_queue/apscheduler/utils.py +309 -0
- flowerpower/job_queue/base.py +382 -0
- flowerpower/job_queue/rq/__init__.py +10 -0
- flowerpower/job_queue/rq/_trigger.py +37 -0
- flowerpower/job_queue/rq/concurrent_workers/gevent_worker.py +226 -0
- flowerpower/job_queue/rq/concurrent_workers/thread_worker.py +231 -0
- flowerpower/job_queue/rq/manager.py +1449 -0
- flowerpower/job_queue/rq/setup.py +150 -0
- flowerpower/job_queue/rq/utils.py +69 -0
- flowerpower/pipeline/__init__.py +5 -0
- flowerpower/pipeline/base.py +118 -0
- flowerpower/pipeline/io.py +407 -0
- flowerpower/pipeline/job_queue.py +505 -0
- flowerpower/pipeline/manager.py +1586 -0
- flowerpower/pipeline/registry.py +560 -0
- flowerpower/pipeline/runner.py +560 -0
- flowerpower/pipeline/visualizer.py +142 -0
- flowerpower/plugins/mqtt/__init__.py +12 -0
- flowerpower/plugins/mqtt/cfg.py +16 -0
- flowerpower/plugins/mqtt/manager.py +789 -0
- flowerpower/settings.py +110 -0
- flowerpower/utils/logging.py +21 -0
- flowerpower/utils/misc.py +57 -9
- flowerpower/utils/sql.py +122 -24
- flowerpower/utils/templates.py +2 -142
- flowerpower-1.0.0b1.dist-info/METADATA +324 -0
- flowerpower-1.0.0b1.dist-info/RECORD +94 -0
- flowerpower/_web/__init__.py +0 -61
- flowerpower/_web/routes/config.py +0 -103
- flowerpower/_web/routes/pipelines.py +0 -173
- flowerpower/_web/routes/scheduler.py +0 -136
- flowerpower/cfg/pipeline/tracker.py +0 -14
- flowerpower/cfg/project/open_telemetry.py +0 -8
- flowerpower/cfg/project/tracker.py +0 -11
- flowerpower/cfg/project/worker.py +0 -19
- flowerpower/cli/scheduler.py +0 -309
- flowerpower/cli/web.py +0 -44
- flowerpower/event_handler.py +0 -23
- flowerpower/mqtt.py +0 -609
- flowerpower/pipeline.py +0 -2499
- flowerpower/scheduler.py +0 -680
- flowerpower/tui.py +0 -79
- flowerpower/utils/datastore.py +0 -186
- flowerpower/utils/eventbroker.py +0 -127
- flowerpower/utils/executor.py +0 -58
- flowerpower/utils/trigger.py +0 -140
- flowerpower-0.9.13.1.dist-info/METADATA +0 -586
- flowerpower-0.9.13.1.dist-info/RECORD +0 -76
- /flowerpower/{cfg/pipeline/params.py → cli/worker.py} +0 -0
- {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/WHEEL +0 -0
- {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/entry_points.txt +0 -0
- {flowerpower-0.9.13.1.dist-info → flowerpower-1.0.0b1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1063 @@
|
|
1
|
+
"""
|
2
|
+
APScheduler implementation for FlowerPower scheduler.
|
3
|
+
|
4
|
+
This module implements the scheduler interfaces using APScheduler as the backend.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import datetime as dt
|
8
|
+
import importlib.util
|
9
|
+
from typing import Any, Callable
|
10
|
+
from uuid import UUID
|
11
|
+
import duration_parser
|
12
|
+
from fsspec.spec import AbstractFileSystem
|
13
|
+
from loguru import logger
|
14
|
+
|
15
|
+
# Check if APScheduler is available
|
16
|
+
if not importlib.util.find_spec("apscheduler"):
|
17
|
+
raise ImportError(
|
18
|
+
"APScheduler is not installed. Please install it using `pip install "
|
19
|
+
"'apscheduler>4.0.0a1'`, 'conda install apscheduler4' or `pip install flowerpower[apscheduler]`"
|
20
|
+
)
|
21
|
+
|
22
|
+
from apscheduler import Job, Scheduler
|
23
|
+
from apscheduler.executors.async_ import AsyncJobExecutor
|
24
|
+
from apscheduler.executors.subprocess import ProcessPoolJobExecutor
|
25
|
+
from apscheduler.executors.thread import ThreadPoolJobExecutor
|
26
|
+
|
27
|
+
from ...utils.logging import setup_logging
|
28
|
+
from ..base import BaseJobQueueManager
|
29
|
+
from .setup import APSBackend, APSDataStore, APSEventBroker
|
30
|
+
from .trigger import APSTrigger
|
31
|
+
from .utils import display_jobs, display_schedules
|
32
|
+
|
33
|
+
setup_logging()
|
34
|
+
|
35
|
+
# Patch pickle if needed
|
36
|
+
try:
|
37
|
+
from ...utils.monkey import patch_pickle
|
38
|
+
|
39
|
+
patch_pickle()
|
40
|
+
except Exception as e:
|
41
|
+
logger.warning(f"Failed to patch pickle: {e}")
|
42
|
+
|
43
|
+
|
44
|
+
class APSManager(BaseJobQueueManager):
|
45
|
+
"""Implementation of BaseScheduler using APScheduler.
|
46
|
+
|
47
|
+
This worker class uses APScheduler 4.0+ as the backend to schedule and manage jobs.
|
48
|
+
It supports different job executors including async, thread pool, and process pool.
|
49
|
+
|
50
|
+
Typical usage:
|
51
|
+
```python
|
52
|
+
worker = APSManager(name="my_scheduler")
|
53
|
+
worker.start_worker(background=True)
|
54
|
+
|
55
|
+
# Add a job
|
56
|
+
def my_job(x: int) -> int:
|
57
|
+
return x * 2
|
58
|
+
|
59
|
+
job_id = worker.add_job(my_job, func_args=(10,))
|
60
|
+
```
|
61
|
+
"""
|
62
|
+
|
63
|
+
def __init__(
|
64
|
+
self,
|
65
|
+
name: str | None = "flowerpower_apscheduler",
|
66
|
+
base_dir: str | None = None,
|
67
|
+
backend: APSBackend | dict | None = None,
|
68
|
+
storage_options: dict[str, Any] = None,
|
69
|
+
fs: AbstractFileSystem | None = None,
|
70
|
+
log_level: str | None = None,
|
71
|
+
):
|
72
|
+
"""Initialize the APScheduler backend.
|
73
|
+
|
74
|
+
Args:
|
75
|
+
name: Name of the scheduler instance. Used for identification in logs and data stores.
|
76
|
+
base_dir: Base directory for the FlowerPower project. Used for finding configuration files.
|
77
|
+
backend: APSBackend instance with data store and event broker configurations,
|
78
|
+
or a dictionary with configuration parameters.
|
79
|
+
storage_options: Options for configuring file system storage access.
|
80
|
+
Example: {"mode": "async", "root": "/tmp"}
|
81
|
+
fs: Custom filesystem implementation for storage operations.
|
82
|
+
log_level: Logging level to use for this worker instance.
|
83
|
+
Example: "DEBUG", "INFO", "WARNING", etc.
|
84
|
+
|
85
|
+
Raises:
|
86
|
+
RuntimeError: If backend setup fails due to missing or invalid configurations.
|
87
|
+
ImportError: If required dependencies are not installed.
|
88
|
+
|
89
|
+
Example:
|
90
|
+
```python
|
91
|
+
# Basic initialization
|
92
|
+
worker = APSManager(name="my_scheduler")
|
93
|
+
|
94
|
+
# With custom backend and logging
|
95
|
+
|
96
|
+
# Create a custom backend configuration using dictionaries for data store and event broker
|
97
|
+
backend_config = {
|
98
|
+
"data_store": {"type": "postgresql", "uri": "postgresql+asyncpg://user:pass@localhost/db"},
|
99
|
+
"event_broker": {"type": "redis", "uri": "redis://localhost:6379/0"}
|
100
|
+
}
|
101
|
+
|
102
|
+
# Create a custom backend configuration using APSBackend, APSDataStore, and APSEventBroker classes
|
103
|
+
from flowerpower.worker.aps import APSBackend, APSDataStore, APSEventBroker
|
104
|
+
data_store = APSDataStore(
|
105
|
+
type="postgresql",
|
106
|
+
uri="postgresql+asyncpg://user:pass@localhost/db"
|
107
|
+
)
|
108
|
+
event_broker = APSEventBroker(
|
109
|
+
from_ds_sqla=True
|
110
|
+
)
|
111
|
+
backend_config = APSBackend(
|
112
|
+
data_store=data_store,
|
113
|
+
event_broker=event_broker
|
114
|
+
)
|
115
|
+
|
116
|
+
worker = APSManager(
|
117
|
+
name="custom_scheduler",
|
118
|
+
backend=backend_config,
|
119
|
+
log_level="DEBUG"
|
120
|
+
)
|
121
|
+
```
|
122
|
+
"""
|
123
|
+
if log_level:
|
124
|
+
setup_logging(level=log_level)
|
125
|
+
|
126
|
+
super().__init__(
|
127
|
+
type="apscheduler",
|
128
|
+
name=name,
|
129
|
+
base_dir=base_dir,
|
130
|
+
fs=fs,
|
131
|
+
backend=backend,
|
132
|
+
storage_options=storage_options,
|
133
|
+
)
|
134
|
+
|
135
|
+
if not isinstance(backend, APSBackend):
|
136
|
+
self._setup_backend()
|
137
|
+
|
138
|
+
# Set up job executors
|
139
|
+
self._job_executors = {
|
140
|
+
"async": AsyncJobExecutor(),
|
141
|
+
"threadpool": ThreadPoolJobExecutor(),
|
142
|
+
"processpool": ProcessPoolJobExecutor(),
|
143
|
+
}
|
144
|
+
self._worker = Scheduler(
|
145
|
+
job_executors=self._job_executors,
|
146
|
+
event_broker=self._backend.event_broker.client,
|
147
|
+
data_store=self._backend.data_store.client,
|
148
|
+
identity=self.name,
|
149
|
+
logger=logger,
|
150
|
+
cleanup_interval=self.cfg.backend.cleanup_interval,
|
151
|
+
max_concurrent_jobs=self.cfg.backend.max_concurrent_jobs,
|
152
|
+
default_job_executor=self.cfg.backend.default_job_executor,
|
153
|
+
)
|
154
|
+
|
155
|
+
def _setup_backend(self) -> None:
|
156
|
+
"""
|
157
|
+
Set up the data store and SQLAlchemy engine for the scheduler.
|
158
|
+
|
159
|
+
This method initializes the data store and SQLAlchemy engine using configuration
|
160
|
+
values. It validates configuration, handles errors, and logs the setup process.
|
161
|
+
|
162
|
+
Raises:
|
163
|
+
RuntimeError: If the data store setup fails due to misconfiguration or connection errors.
|
164
|
+
"""
|
165
|
+
if isinstance(self._backend, dict):
|
166
|
+
if "data_store" in self._backend:
|
167
|
+
data_store = APSDataStore(**self._backend["data_store"])
|
168
|
+
if "event_broker" in self._backend:
|
169
|
+
if self._backend["event_broker"].get("from_ds_sqla", False):
|
170
|
+
event_broker = APSEventBroker.from_ds_sqla(
|
171
|
+
sqla_engine=data_store.sqla_engine
|
172
|
+
)
|
173
|
+
else:
|
174
|
+
event_broker = APSEventBroker(**self._backend["event_broker"])
|
175
|
+
self._backend = APSBackend(data_store=data_store, event_broker=event_broker)
|
176
|
+
else:
|
177
|
+
data_store = APSDataStore(**self.cfg.backend.data_store.to_dict())
|
178
|
+
|
179
|
+
if self.cfg.backend.event_broker.to_dict().get("from_ds_sqla", False):
|
180
|
+
event_broker = APSEventBroker.from_ds_sqla(
|
181
|
+
sqla_engine=data_store.sqla_engine
|
182
|
+
)
|
183
|
+
else:
|
184
|
+
event_broker = APSEventBroker(**{
|
185
|
+
k: v
|
186
|
+
for k, v in self.cfg.backend.event_broker.to_dict().items()
|
187
|
+
if k != "from_ds_sqla"
|
188
|
+
})
|
189
|
+
self._backend = APSBackend(data_store=data_store, event_broker=event_broker)
|
190
|
+
|
191
|
+
logger.info(
|
192
|
+
f"Data store and event broker set up successfully: data store type"
|
193
|
+
f" '{data_store.type}', event broker type '{event_broker.type}'"
|
194
|
+
)
|
195
|
+
|
196
|
+
def start_worker(
|
197
|
+
self, background: bool = False, num_workers: int | None = None
|
198
|
+
) -> None:
|
199
|
+
"""Start the APScheduler worker process.
|
200
|
+
|
201
|
+
This method initializes and starts the worker process that executes scheduled jobs.
|
202
|
+
The worker can be started in foreground (blocking) or background mode.
|
203
|
+
|
204
|
+
Args:
|
205
|
+
background: If True, runs the worker in a non-blocking background mode.
|
206
|
+
If False, runs in the current process and blocks until stopped.
|
207
|
+
num_workers: Number of worker processes for the executor pools.
|
208
|
+
If None, uses the value from config or defaults to CPU count.
|
209
|
+
|
210
|
+
Raises:
|
211
|
+
RuntimeError: If worker fails to start or if multiprocessing setup fails.
|
212
|
+
|
213
|
+
Example:
|
214
|
+
```python
|
215
|
+
# Start worker in background with 4 processes
|
216
|
+
worker.start_worker(background=True, num_workers=4)
|
217
|
+
|
218
|
+
# Start worker in foreground (blocking)
|
219
|
+
worker.start_worker(background=False)
|
220
|
+
|
221
|
+
# Use as a context manager
|
222
|
+
with worker.start_worker(background=False):
|
223
|
+
# Do some work
|
224
|
+
pass
|
225
|
+
```
|
226
|
+
"""
|
227
|
+
import multiprocessing
|
228
|
+
|
229
|
+
# Allow configuration override for pool sizes
|
230
|
+
if num_workers is None:
|
231
|
+
num_workers = getattr(self.cfg.backend, "num_workers", None)
|
232
|
+
if num_workers is None:
|
233
|
+
num_workers = multiprocessing.cpu_count()
|
234
|
+
|
235
|
+
# Adjust thread and process pool executor sizes
|
236
|
+
if "processpool" in self._job_executors:
|
237
|
+
self._job_executors["processpool"].max_workers = num_workers
|
238
|
+
if "threadpool" in self._job_executors:
|
239
|
+
threadpool_size = getattr(
|
240
|
+
self.cfg.backend, "threadpool_size", num_workers * 5
|
241
|
+
)
|
242
|
+
self._job_executors["threadpool"].max_workers = threadpool_size
|
243
|
+
|
244
|
+
logger.info(f"Configured worker pool with {num_workers} workers.")
|
245
|
+
|
246
|
+
if background:
|
247
|
+
logger.info("Starting APScheduler worker in background mode.")
|
248
|
+
self._worker.start_in_background()
|
249
|
+
else:
|
250
|
+
logger.info("Starting APScheduler worker in foreground mode.")
|
251
|
+
self._worker.run_until_stopped()
|
252
|
+
|
253
|
+
def stop_worker(self) -> None:
|
254
|
+
"""Stop the APScheduler worker process.
|
255
|
+
|
256
|
+
This method stops the worker process and cleans up resources.
|
257
|
+
It should be called before program exit to ensure proper cleanup.
|
258
|
+
|
259
|
+
Raises:
|
260
|
+
RuntimeError: If worker fails to stop cleanly.
|
261
|
+
|
262
|
+
Example:
|
263
|
+
```python
|
264
|
+
try:
|
265
|
+
worker.start_worker(background=True)
|
266
|
+
# ... do work ...
|
267
|
+
finally:
|
268
|
+
worker.stop_worker()
|
269
|
+
```
|
270
|
+
"""
|
271
|
+
logger.info("Stopping APScheduler worker.")
|
272
|
+
self._worker.stop()
|
273
|
+
self._worker._exit_stack.close()
|
274
|
+
|
275
|
+
def start_worker_pool(
|
276
|
+
self,
|
277
|
+
background: bool = False,
|
278
|
+
num_workers: int | None = None,
|
279
|
+
) -> None:
|
280
|
+
"""
|
281
|
+
Start a pool of worker processes to handle jobs in parallel.
|
282
|
+
|
283
|
+
APScheduler 4.0 already handles concurrency internally through its executors,
|
284
|
+
so this method simply starts a single worker with the appropriate configuration.
|
285
|
+
|
286
|
+
Args:
|
287
|
+
num_workers: Number of worker processes (affects executor pool sizes)
|
288
|
+
background: Whether to run in background
|
289
|
+
"""
|
290
|
+
|
291
|
+
# Start a single worker which will use the configured executors
|
292
|
+
self.start_worker(background=background, num_workers=num_workers)
|
293
|
+
|
294
|
+
def stop_worker_pool(self) -> None:
|
295
|
+
"""
|
296
|
+
Stop the worker pool.
|
297
|
+
|
298
|
+
Since APScheduler manages concurrency internally, this just stops the worker.
|
299
|
+
"""
|
300
|
+
|
301
|
+
logger.info("Stopping APScheduler worker pool.")
|
302
|
+
self.stop_worker()
|
303
|
+
|
304
|
+
## Jobs
|
305
|
+
|
306
|
+
def add_job(
|
307
|
+
self,
|
308
|
+
func: Callable,
|
309
|
+
func_args: tuple | None = None,
|
310
|
+
func_kwargs: dict[str, Any] | None = None,
|
311
|
+
result_ttl: float | dt.timedelta = 0,
|
312
|
+
run_at: dt.datetime | None = None,
|
313
|
+
run_in: int | float | None = None,
|
314
|
+
job_executor: str | None = None,
|
315
|
+
) -> str:
|
316
|
+
"""Add a job for immediate or scheduled execution.
|
317
|
+
|
318
|
+
This method adds a job to the scheduler. The job can be executed immediately
|
319
|
+
or scheduled for later execution using run_at or run_in parameters.
|
320
|
+
|
321
|
+
Args:
|
322
|
+
func: Function to execute. Must be importable from the worker process.
|
323
|
+
func_args: Positional arguments to pass to the function.
|
324
|
+
func_kwargs: Keyword arguments to pass to the function.
|
325
|
+
result_ttl: Time to live for the job result, as seconds or timedelta.
|
326
|
+
After this time, the result may be removed from storage.
|
327
|
+
run_at: Schedule the job to run at a specific datetime.
|
328
|
+
Takes precedence over run_in if both are specified.
|
329
|
+
run_in: Schedule the job to run after a delay (in seconds).
|
330
|
+
Only used if run_at is not specified.
|
331
|
+
job_executor: Name of the executor to run the job ("async", "threadpool",
|
332
|
+
or "processpool"). If None, uses the default from config.
|
333
|
+
|
334
|
+
Returns:
|
335
|
+
str: Unique identifier for the job.
|
336
|
+
|
337
|
+
Raises:
|
338
|
+
ValueError: If the function is not serializable or arguments are invalid.
|
339
|
+
RuntimeError: If the job cannot be added to the scheduler.
|
340
|
+
|
341
|
+
Note:
|
342
|
+
When using run_at or run_in, the job results will not be stored in the data store.
|
343
|
+
|
344
|
+
Example:
|
345
|
+
```python
|
346
|
+
# Add immediate job
|
347
|
+
def my_task(x: int, y: int) -> int:
|
348
|
+
return x + y
|
349
|
+
|
350
|
+
job_id = worker.add_job(
|
351
|
+
my_task,
|
352
|
+
func_args=(1, 2),
|
353
|
+
result_ttl=3600 # Keep result for 1 hour
|
354
|
+
)
|
355
|
+
|
356
|
+
# Schedule job for later
|
357
|
+
tomorrow = dt.datetime.now() + dt.timedelta(days=1)
|
358
|
+
job_id = worker.add_job(
|
359
|
+
my_task,
|
360
|
+
func_kwargs={"x": 1, "y": 2},
|
361
|
+
run_at=tomorrow
|
362
|
+
)
|
363
|
+
|
364
|
+
# Run after delay
|
365
|
+
job_id = worker.add_job(
|
366
|
+
my_task,
|
367
|
+
func_args=(1, 2),
|
368
|
+
run_in=3600 # Run in 1 hour
|
369
|
+
)
|
370
|
+
```
|
371
|
+
"""
|
372
|
+
job_executor = job_executor or self.cfg.backend.default_job_executor
|
373
|
+
|
374
|
+
# Convert result_expiration_time to datetime.timedelta if it's not already
|
375
|
+
if isinstance(result_ttl, (int, float)):
|
376
|
+
result_ttl = dt.timedelta(seconds=result_ttl)
|
377
|
+
|
378
|
+
run_at = dt.datetime.fromisoformat(run_at) if isinstance(run_at, str) else run_at
|
379
|
+
run_in = duration_parser.parse(run_in) if isinstance(run_in, str) else run_in
|
380
|
+
|
381
|
+
if run_in:
|
382
|
+
run_at = dt.datetime.now() + dt.timedelta(seconds=run_in)
|
383
|
+
|
384
|
+
if run_at:
|
385
|
+
job_id = self.add_schedule(
|
386
|
+
func,
|
387
|
+
func_args=func_args,
|
388
|
+
func_kwargs=func_kwargs,
|
389
|
+
date=run_at,
|
390
|
+
job_executor=job_executor,
|
391
|
+
)
|
392
|
+
else:
|
393
|
+
job_id = self._worker.add_job(
|
394
|
+
func,
|
395
|
+
args=func_args or (),
|
396
|
+
kwargs=func_kwargs or {},
|
397
|
+
job_executor=job_executor,
|
398
|
+
result_expiration_time=result_ttl,
|
399
|
+
)
|
400
|
+
|
401
|
+
return str(job_id)
|
402
|
+
|
403
|
+
def run_job(
|
404
|
+
self,
|
405
|
+
func: Callable,
|
406
|
+
func_args: tuple | None = None,
|
407
|
+
func_kwargs: dict[str, Any] | None = None,
|
408
|
+
job_executor: str | None = None,
|
409
|
+
) -> Any:
|
410
|
+
"""Run a job immediately and wait for its result.
|
411
|
+
|
412
|
+
This method executes the job synchronously and returns its result.
|
413
|
+
|
414
|
+
Args:
|
415
|
+
func: Function to execute. Must be importable from the worker process.
|
416
|
+
func_args: Positional arguments to pass to the function.
|
417
|
+
func_kwargs: Keyword arguments to pass to the function.
|
418
|
+
job_executor: Name of the executor to run the job ("async", "threadpool",
|
419
|
+
or "processpool"). If None, uses the default from config.
|
420
|
+
|
421
|
+
Returns:
|
422
|
+
Any: The result returned by the executed function.
|
423
|
+
|
424
|
+
Raises:
|
425
|
+
Exception: Any exception raised by the executed function.
|
426
|
+
|
427
|
+
Example:
|
428
|
+
```python
|
429
|
+
def add(x: int, y: int) -> int:
|
430
|
+
return x + y
|
431
|
+
|
432
|
+
result = worker.run_job(add, func_args=(1, 2))
|
433
|
+
assert result == 3
|
434
|
+
```
|
435
|
+
"""
|
436
|
+
job_executor = job_executor or self.cfg.backend.default_job_executor
|
437
|
+
|
438
|
+
return self._worker.run_job(
|
439
|
+
func,
|
440
|
+
args=func_args or (),
|
441
|
+
kwargs=func_kwargs or {},
|
442
|
+
)
|
443
|
+
|
444
|
+
def get_jobs(self) -> list[Job]:
|
445
|
+
"""Get all jobs from the scheduler.
|
446
|
+
|
447
|
+
Returns:
|
448
|
+
list[Job]: List of all jobs in the scheduler, including pending,
|
449
|
+
running, and completed jobs.
|
450
|
+
|
451
|
+
Example:
|
452
|
+
```python
|
453
|
+
jobs = worker.get_jobs()
|
454
|
+
for job in jobs:
|
455
|
+
print(f"Job {job.id}: {job.status}")
|
456
|
+
```
|
457
|
+
"""
|
458
|
+
return self._worker.get_jobs()
|
459
|
+
|
460
|
+
def get_job(self, job_id: str | UUID) -> Job | None:
|
461
|
+
"""Get a specific job by its ID.
|
462
|
+
|
463
|
+
Args:
|
464
|
+
job_id: Unique identifier of the job, as string or UUID.
|
465
|
+
|
466
|
+
Returns:
|
467
|
+
Job | None: The job object if found, None otherwise.
|
468
|
+
|
469
|
+
Example:
|
470
|
+
```python
|
471
|
+
# Get job using string ID
|
472
|
+
job = worker.get_job("550e8400-e29b-41d4-a716-446655440000")
|
473
|
+
|
474
|
+
# Get job using UUID
|
475
|
+
from uuid import UUID
|
476
|
+
job = worker.get_job(UUID("550e8400-e29b-41d4-a716-446655440000"))
|
477
|
+
```
|
478
|
+
"""
|
479
|
+
jobs = self._worker.get_jobs()
|
480
|
+
if isinstance(job_id, str):
|
481
|
+
job_id = UUID(job_id)
|
482
|
+
|
483
|
+
for job in jobs:
|
484
|
+
if job.id == job_id:
|
485
|
+
return job
|
486
|
+
return None
|
487
|
+
|
488
|
+
def get_job_result(self, job_id: str | UUID, wait: bool = True) -> Any:
|
489
|
+
"""Get the result of a specific job.
|
490
|
+
|
491
|
+
Args:
|
492
|
+
job_id: Unique identifier of the job, as string or UUID.
|
493
|
+
wait: If True, waits for the job to complete before returning.
|
494
|
+
If False, returns None if the job is not finished.
|
495
|
+
|
496
|
+
Returns:
|
497
|
+
Any: The result of the job if available, None if the job is not
|
498
|
+
finished and wait=False.
|
499
|
+
|
500
|
+
Raises:
|
501
|
+
ValueError: If the job ID is invalid.
|
502
|
+
TimeoutError: If the job takes too long to complete (when waiting).
|
503
|
+
|
504
|
+
Example:
|
505
|
+
```python
|
506
|
+
# Wait for result
|
507
|
+
result = worker.get_job_result("550e8400-e29b-41d4-a716-446655440000")
|
508
|
+
|
509
|
+
# Check result without waiting
|
510
|
+
result = worker.get_job_result(
|
511
|
+
"550e8400-e29b-41d4-a716-446655440000",
|
512
|
+
wait=False
|
513
|
+
)
|
514
|
+
if result is None:
|
515
|
+
print("Job still running")
|
516
|
+
```
|
517
|
+
"""
|
518
|
+
if isinstance(job_id, str):
|
519
|
+
job_id = UUID(job_id)
|
520
|
+
return self._worker.get_job_result(job_id, wait=wait)
|
521
|
+
|
522
|
+
def cancel_job(self, job_id: str | UUID) -> bool:
|
523
|
+
"""Cancel a running or pending job.
|
524
|
+
|
525
|
+
Note:
|
526
|
+
Not currently implemented for APScheduler backend. Jobs must be removed
|
527
|
+
manually from the data store.
|
528
|
+
|
529
|
+
Args:
|
530
|
+
job_id: Unique identifier of the job to cancel, as string or UUID.
|
531
|
+
|
532
|
+
Returns:
|
533
|
+
bool: Always returns False as this operation is not implemented.
|
534
|
+
|
535
|
+
Example:
|
536
|
+
```python
|
537
|
+
# This operation is not supported
|
538
|
+
success = worker.cancel_job("job-123")
|
539
|
+
assert not success
|
540
|
+
```
|
541
|
+
"""
|
542
|
+
logger.info(
|
543
|
+
"Not implemented for apscheduler yet. You have to remove the job manually from the data_store."
|
544
|
+
)
|
545
|
+
return False
|
546
|
+
|
547
|
+
def delete_job(self, job_id: str | UUID) -> bool:
|
548
|
+
"""
|
549
|
+
Delete a job and its results from storage.
|
550
|
+
|
551
|
+
Note:
|
552
|
+
Not currently implemented for APScheduler backend. Jobs must be removed
|
553
|
+
manually from the data store.
|
554
|
+
|
555
|
+
Args:
|
556
|
+
job_id: Unique identifier of the job to delete, as string or UUID.
|
557
|
+
|
558
|
+
Returns:
|
559
|
+
bool: Always returns False as this operation is not implemented.
|
560
|
+
|
561
|
+
Example:
|
562
|
+
```python
|
563
|
+
# This operation is not supported
|
564
|
+
success = worker.delete_job("job-123")
|
565
|
+
assert not success
|
566
|
+
```
|
567
|
+
"""
|
568
|
+
logger.info(
|
569
|
+
"Not implemented for apscheduler yet. You have to remove the job manually from the data_store."
|
570
|
+
)
|
571
|
+
return False
|
572
|
+
|
573
|
+
def cancel_all_jobs(self) -> None:
|
574
|
+
"""Cancel all running and pending jobs.
|
575
|
+
|
576
|
+
Note:
|
577
|
+
Not currently implemented for APScheduler backend. Jobs must be removed
|
578
|
+
manually from the data store.
|
579
|
+
|
580
|
+
Example:
|
581
|
+
```python
|
582
|
+
# This operation is not supported
|
583
|
+
worker.cancel_all_jobs() # No effect
|
584
|
+
```
|
585
|
+
"""
|
586
|
+
logger.info(
|
587
|
+
"Not implemented for apscheduler yet. You have to remove the jobs manually from the data_store."
|
588
|
+
)
|
589
|
+
return None
|
590
|
+
|
591
|
+
def delete_all_jobs(self) -> None:
|
592
|
+
"""
|
593
|
+
Delete all jobs and their results from storage.
|
594
|
+
|
595
|
+
Note:
|
596
|
+
Not currently implemented for APScheduler backend. Jobs must be removed
|
597
|
+
manually from the data store.
|
598
|
+
|
599
|
+
Example:
|
600
|
+
```python
|
601
|
+
# This operation is not supported
|
602
|
+
worker.delete_all_jobs() # No effect
|
603
|
+
```
|
604
|
+
"""
|
605
|
+
logger.info(
|
606
|
+
"Not implemented for apscheduler yet. You have to remove the jobs manually from the data_store."
|
607
|
+
)
|
608
|
+
return None
|
609
|
+
|
610
|
+
@property
|
611
|
+
def jobs(self) -> list[Job]:
|
612
|
+
"""Get all jobs from the scheduler.
|
613
|
+
|
614
|
+
Returns:
|
615
|
+
list[Job]: List of all job objects in the scheduler.
|
616
|
+
|
617
|
+
Example:
|
618
|
+
```python
|
619
|
+
all_jobs = worker.jobs
|
620
|
+
print(f"Total jobs: {len(all_jobs)}")
|
621
|
+
for job in all_jobs:
|
622
|
+
print(f"Job {job.id}: {job.status}")
|
623
|
+
```
|
624
|
+
"""
|
625
|
+
return self._worker.get_jobs()
|
626
|
+
|
627
|
+
@property
|
628
|
+
def job_ids(self) -> list[str]:
|
629
|
+
"""Get all job IDs from the scheduler.
|
630
|
+
|
631
|
+
Returns:
|
632
|
+
list[str]: List of unique identifiers for all jobs.
|
633
|
+
|
634
|
+
Example:
|
635
|
+
```python
|
636
|
+
ids = worker.job_ids
|
637
|
+
print(f"Job IDs: {', '.join(ids)}")
|
638
|
+
```
|
639
|
+
"""
|
640
|
+
return [str(job.id) for job in self._worker.get_jobs()]
|
641
|
+
|
642
|
+
## Schedules
|
643
|
+
def add_schedule(
|
644
|
+
self,
|
645
|
+
func: Callable,
|
646
|
+
func_args: tuple | None = None,
|
647
|
+
func_kwargs: dict[str, Any] | None = None,
|
648
|
+
cron: str | dict[str, str | int] | None = None,
|
649
|
+
interval: int | str | dict[str, str | int] | None = None,
|
650
|
+
date: dt.datetime | None = None,
|
651
|
+
schedule_id: str | None = None,
|
652
|
+
job_executor: str | None = None,
|
653
|
+
**schedule_kwargs,
|
654
|
+
) -> str:
|
655
|
+
"""Schedule a job for repeated or one-time execution.
|
656
|
+
|
657
|
+
This method adds a scheduled job to the scheduler. The schedule can be defined
|
658
|
+
using cron expressions, intervals, or specific dates.
|
659
|
+
|
660
|
+
Args:
|
661
|
+
func: Function to execute. Must be importable from the worker process.
|
662
|
+
func_args: Positional arguments to pass to the function.
|
663
|
+
func_kwargs: Keyword arguments to pass to the function.
|
664
|
+
cron: Cron expression for scheduling. Can be a string (e.g. "* * * * *")
|
665
|
+
or a dict with cron parameters. Only one of cron, interval, or date
|
666
|
+
should be specified.
|
667
|
+
interval: Interval for recurring execution in seconds, or a dict with
|
668
|
+
interval parameters. Only one of cron, interval, or date should
|
669
|
+
be specified.
|
670
|
+
date: Specific datetime for one-time execution. Only one of cron,
|
671
|
+
interval, or date should be specified.
|
672
|
+
schedule_id: Optional unique identifier for the schedule.
|
673
|
+
If None, a UUID will be generated.
|
674
|
+
job_executor: Name of the executor to run the job ("async", "threadpool",
|
675
|
+
or "processpool"). If None, uses the default from config.
|
676
|
+
**schedule_kwargs: Additional scheduling parameters:
|
677
|
+
- coalesce: CoalescePolicy = CoalescePolicy.latest
|
678
|
+
- misfire_grace_time: float | timedelta | None = None
|
679
|
+
- max_jitter: float | timedelta | None = None
|
680
|
+
- max_running_jobs: int | None = None
|
681
|
+
- conflict_policy: ConflictPolicy = ConflictPolicy.do_nothing
|
682
|
+
- paused: bool = False
|
683
|
+
|
684
|
+
Returns:
|
685
|
+
str: Unique identifier for the schedule.
|
686
|
+
|
687
|
+
Raises:
|
688
|
+
ValueError: If no trigger type is specified or if multiple triggers
|
689
|
+
are specified.
|
690
|
+
RuntimeError: If the schedule cannot be added to the scheduler.
|
691
|
+
|
692
|
+
Example:
|
693
|
+
```python
|
694
|
+
def my_task(msg: str) -> None:
|
695
|
+
print(f"Running task: {msg}")
|
696
|
+
|
697
|
+
# Using cron expression (run every minute)
|
698
|
+
schedule_id = worker.add_schedule(
|
699
|
+
my_task,
|
700
|
+
func_kwargs={"msg": "Cron job"},
|
701
|
+
cron="* * * * *"
|
702
|
+
)
|
703
|
+
|
704
|
+
# Using cron dict
|
705
|
+
schedule_id = worker.add_schedule(
|
706
|
+
my_task,
|
707
|
+
func_kwargs={"msg": "Cron job"},
|
708
|
+
cron={
|
709
|
+
"minute": "*/15", # Every 15 minutes
|
710
|
+
"hour": "9-17" # During business hours
|
711
|
+
}
|
712
|
+
)
|
713
|
+
|
714
|
+
# Using interval (every 5 minutes)
|
715
|
+
schedule_id = worker.add_schedule(
|
716
|
+
my_task,
|
717
|
+
func_kwargs={"msg": "Interval job"},
|
718
|
+
interval=300 # 5 minutes in seconds
|
719
|
+
)
|
720
|
+
|
721
|
+
# Using interval dict
|
722
|
+
schedule_id = worker.add_schedule(
|
723
|
+
my_task,
|
724
|
+
func_kwargs={"msg": "Interval job"},
|
725
|
+
interval={
|
726
|
+
"hours": 1,
|
727
|
+
"minutes": 30
|
728
|
+
}
|
729
|
+
)
|
730
|
+
|
731
|
+
# One-time future execution
|
732
|
+
import datetime as dt
|
733
|
+
future_date = dt.datetime.now() + dt.timedelta(days=1)
|
734
|
+
schedule_id = worker.add_schedule(
|
735
|
+
my_task,
|
736
|
+
func_kwargs={"msg": "One-time job"},
|
737
|
+
date=future_date
|
738
|
+
)
|
739
|
+
|
740
|
+
# With additional options
|
741
|
+
from apscheduler import CoalescePolicy
|
742
|
+
schedule_id = worker.add_schedule(
|
743
|
+
my_task,
|
744
|
+
func_kwargs={"msg": "Advanced job"},
|
745
|
+
interval=300,
|
746
|
+
coalesce=CoalescePolicy.latest,
|
747
|
+
max_jitter=dt.timedelta(seconds=30)
|
748
|
+
)
|
749
|
+
```
|
750
|
+
"""
|
751
|
+
job_executor = job_executor or self.cfg.backend.default_job_executor
|
752
|
+
|
753
|
+
if cron:
|
754
|
+
trigger_instance = APSTrigger("cron")
|
755
|
+
if isinstance(cron, str):
|
756
|
+
cron = {"crontab": cron}
|
757
|
+
trigger = trigger_instance.get_trigger_instance(**cron)
|
758
|
+
elif interval:
|
759
|
+
trigger_instance = APSTrigger("interval")
|
760
|
+
if isinstance(interval, str | int):
|
761
|
+
interval = {"seconds": int(interval)}
|
762
|
+
trigger = trigger_instance.get_trigger_instance(**interval)
|
763
|
+
|
764
|
+
if date:
|
765
|
+
trigger_instance = APSTrigger("date")
|
766
|
+
trigger = trigger_instance.get_trigger_instance(run_time=date)
|
767
|
+
|
768
|
+
schedule_id = self._worker.add_schedule(
|
769
|
+
func,
|
770
|
+
trigger=trigger,
|
771
|
+
id=schedule_id,
|
772
|
+
args=func_args or (),
|
773
|
+
kwargs=func_kwargs or {},
|
774
|
+
job_executor=job_executor,
|
775
|
+
**schedule_kwargs,
|
776
|
+
)
|
777
|
+
|
778
|
+
return schedule_id
|
779
|
+
|
780
|
+
def get_schedules(self, as_dict: bool = False) -> list[Any]:
|
781
|
+
"""Get all schedules from the scheduler.
|
782
|
+
|
783
|
+
Args:
|
784
|
+
as_dict: If True, returns schedules as dictionaries instead of
|
785
|
+
Schedule objects.
|
786
|
+
|
787
|
+
Returns:
|
788
|
+
list[Any]: List of all schedules, either as Schedule objects or
|
789
|
+
dictionaries depending on as_dict parameter.
|
790
|
+
|
791
|
+
Example:
|
792
|
+
```python
|
793
|
+
# Get schedule objects
|
794
|
+
schedules = worker.get_schedules()
|
795
|
+
for schedule in schedules:
|
796
|
+
print(f"Schedule {schedule.id}: Next run at {schedule.next_run_time}")
|
797
|
+
|
798
|
+
# Get as dictionaries
|
799
|
+
schedules = worker.get_schedules(as_dict=True)
|
800
|
+
for schedule in schedules:
|
801
|
+
print(f"Schedule {schedule['id']}: {schedule['trigger']}")
|
802
|
+
```
|
803
|
+
"""
|
804
|
+
return self._worker.get_schedules()
|
805
|
+
|
806
|
+
def get_schedule(self, schedule_id: str) -> Any:
|
807
|
+
"""Get a specific schedule by its ID.
|
808
|
+
|
809
|
+
Args:
|
810
|
+
schedule_id: Unique identifier of the schedule.
|
811
|
+
|
812
|
+
Returns:
|
813
|
+
Any: The schedule object if found, None otherwise.
|
814
|
+
|
815
|
+
Example:
|
816
|
+
```python
|
817
|
+
schedule = worker.get_schedule("my-daily-job")
|
818
|
+
if schedule:
|
819
|
+
print(f"Next run at: {schedule.next_run_time}")
|
820
|
+
else:
|
821
|
+
print("Schedule not found")
|
822
|
+
```
|
823
|
+
"""
|
824
|
+
if schedule_id in self.schedule_ids:
|
825
|
+
return self._worker.get_schedule(schedule_id)
|
826
|
+
|
827
|
+
logger.error(f"Schedule {schedule_id} not found.")
|
828
|
+
return None
|
829
|
+
|
830
|
+
def cancel_schedule(self, schedule_id: str) -> bool:
|
831
|
+
"""Cancel a schedule.
|
832
|
+
|
833
|
+
This method removes the schedule from the scheduler. This is equivalent
|
834
|
+
to delete_schedule and stops any future executions of the schedule.
|
835
|
+
|
836
|
+
Args:
|
837
|
+
schedule_id: Unique identifier of the schedule to cancel.
|
838
|
+
|
839
|
+
Returns:
|
840
|
+
bool: True if the schedule was successfully canceled,
|
841
|
+
False if the schedule was not found.
|
842
|
+
|
843
|
+
Example:
|
844
|
+
```python
|
845
|
+
if worker.cancel_schedule("my-daily-job"):
|
846
|
+
print("Schedule canceled successfully")
|
847
|
+
else:
|
848
|
+
print("Schedule not found")
|
849
|
+
```
|
850
|
+
"""
|
851
|
+
if schedule_id not in self.schedule_ids:
|
852
|
+
logger.error(f"Schedule {schedule_id} not found.")
|
853
|
+
return False
|
854
|
+
self._worker.remove_schedule(schedule_id)
|
855
|
+
logger.info(f"Schedule {schedule_id} canceled.")
|
856
|
+
|
857
|
+
def delete_schedule(self, schedule_id: str) -> bool:
|
858
|
+
"""Remove a schedule.
|
859
|
+
|
860
|
+
This method removes the schedule from the scheduler. This is equivalent
|
861
|
+
to cancel_schedule and stops any future executions of the schedule.
|
862
|
+
|
863
|
+
Args:
|
864
|
+
schedule_id: Unique identifier of the schedule to remove.
|
865
|
+
|
866
|
+
Returns:
|
867
|
+
bool: True if the schedule was successfully removed,
|
868
|
+
False if the schedule was not found.
|
869
|
+
|
870
|
+
Raises:
|
871
|
+
RuntimeError: If removal fails due to data store errors.
|
872
|
+
|
873
|
+
Example:
|
874
|
+
```python
|
875
|
+
try:
|
876
|
+
if worker.delete_schedule("my-daily-job"):
|
877
|
+
print("Schedule deleted successfully")
|
878
|
+
else:
|
879
|
+
print("Schedule not found")
|
880
|
+
except RuntimeError as e:
|
881
|
+
print(f"Failed to delete schedule: {e}")
|
882
|
+
```
|
883
|
+
"""
|
884
|
+
self.cancel_schedule(schedule_id)
|
885
|
+
|
886
|
+
def cancel_all_schedules(self) -> None:
|
887
|
+
"""Cancel all schedules in the scheduler.
|
888
|
+
|
889
|
+
This method removes all schedules from the scheduler, stopping all future
|
890
|
+
executions. This operation cannot be undone.
|
891
|
+
|
892
|
+
Example:
|
893
|
+
```python
|
894
|
+
# Cancel all schedules
|
895
|
+
worker.cancel_all_schedules()
|
896
|
+
assert len(worker.schedules) == 0
|
897
|
+
```
|
898
|
+
"""
|
899
|
+
for sched in self.schedule_ids:
|
900
|
+
self.cancel_schedule(sched)
|
901
|
+
logger.info("All schedules canceled.")
|
902
|
+
return None
|
903
|
+
|
904
|
+
def delete_all_schedules(self) -> None:
|
905
|
+
"""
|
906
|
+
Delete all schedules from the scheduler.
|
907
|
+
|
908
|
+
This method removes all schedules from the scheduler, stopping all future
|
909
|
+
executions. This operation cannot be undone.
|
910
|
+
|
911
|
+
Example:
|
912
|
+
```python
|
913
|
+
# Delete all schedules
|
914
|
+
worker.delete_all_schedules()
|
915
|
+
assert len(worker.schedules) == 0
|
916
|
+
```
|
917
|
+
"""
|
918
|
+
for sched in self.schedule_ids:
|
919
|
+
self.delete_schedule(sched)
|
920
|
+
logger.info("All schedules deleted.")
|
921
|
+
return None
|
922
|
+
|
923
|
+
@property
|
924
|
+
def schedules(self) -> list[Any]:
|
925
|
+
"""Get all schedules from the scheduler.
|
926
|
+
|
927
|
+
Returns:
|
928
|
+
list[Any]: List of all schedule objects in the scheduler.
|
929
|
+
|
930
|
+
Example:
|
931
|
+
```python
|
932
|
+
schedules = worker.schedules
|
933
|
+
print(f"Total schedules: {len(schedules)}")
|
934
|
+
```
|
935
|
+
"""
|
936
|
+
return self._worker.get_schedules()
|
937
|
+
|
938
|
+
@property
|
939
|
+
def schedule_ids(self) -> list[str]:
|
940
|
+
"""Get all schedule IDs from the scheduler.
|
941
|
+
|
942
|
+
Returns:
|
943
|
+
list[str]: List of unique identifiers for all schedules.
|
944
|
+
|
945
|
+
Example:
|
946
|
+
```python
|
947
|
+
ids = worker.schedule_ids
|
948
|
+
print(f"Schedule IDs: {', '.join(ids)}")
|
949
|
+
```
|
950
|
+
"""
|
951
|
+
return [str(sched.id) for sched in self._worker.get_schedules()]
|
952
|
+
|
953
|
+
def pause_schedule(self, schedule_id: str) -> bool:
|
954
|
+
"""Pause a schedule temporarily.
|
955
|
+
|
956
|
+
This method pauses the schedule without removing it. The schedule can be
|
957
|
+
resumed later using resume_schedule.
|
958
|
+
|
959
|
+
Args:
|
960
|
+
schedule_id: Unique identifier of the schedule to pause.
|
961
|
+
|
962
|
+
Returns:
|
963
|
+
bool: True if the schedule was successfully paused,
|
964
|
+
False if the schedule was not found.
|
965
|
+
|
966
|
+
Example:
|
967
|
+
```python
|
968
|
+
# Pause a schedule temporarily
|
969
|
+
if worker.pause_schedule("daily-backup"):
|
970
|
+
print("Schedule paused")
|
971
|
+
```
|
972
|
+
"""
|
973
|
+
if schedule_id not in self.schedule_ids:
|
974
|
+
logger.error(f"Schedule {schedule_id} not found.")
|
975
|
+
return False
|
976
|
+
self._worker.pause_schedule(schedule_id)
|
977
|
+
logger.info(f"Schedule {schedule_id} paused.")
|
978
|
+
return True
|
979
|
+
|
980
|
+
def resume_schedule(self, schedule_id: str) -> bool:
|
981
|
+
"""Resume a paused schedule.
|
982
|
+
|
983
|
+
Args:
|
984
|
+
schedule_id: Unique identifier of the schedule to resume.
|
985
|
+
|
986
|
+
Returns:
|
987
|
+
bool: True if the schedule was successfully resumed,
|
988
|
+
False if the schedule was not found.
|
989
|
+
|
990
|
+
Example:
|
991
|
+
```python
|
992
|
+
# Resume a paused schedule
|
993
|
+
if worker.resume_schedule("daily-backup"):
|
994
|
+
print("Schedule resumed")
|
995
|
+
```
|
996
|
+
"""
|
997
|
+
if schedule_id not in self.schedule_ids:
|
998
|
+
logger.error(f"Schedule {schedule_id} not found.")
|
999
|
+
return False
|
1000
|
+
self._worker.unpause_schedule(schedule_id)
|
1001
|
+
logger.info(f"Schedule {schedule_id} resumed.")
|
1002
|
+
return True
|
1003
|
+
|
1004
|
+
def pause_all_schedules(self) -> None:
|
1005
|
+
"""Pause all schedules in the scheduler.
|
1006
|
+
|
1007
|
+
This method pauses all schedules without removing them. They can be
|
1008
|
+
resumed using resume_all_schedules.
|
1009
|
+
|
1010
|
+
Example:
|
1011
|
+
```python
|
1012
|
+
# Pause all schedules temporarily
|
1013
|
+
worker.pause_all_schedules()
|
1014
|
+
```
|
1015
|
+
"""
|
1016
|
+
for sched in self.schedule_ids:
|
1017
|
+
self.pause_schedule(sched)
|
1018
|
+
logger.info("All schedules paused.")
|
1019
|
+
return None
|
1020
|
+
|
1021
|
+
def resume_all_schedules(self) -> None:
|
1022
|
+
"""Resume all paused schedules.
|
1023
|
+
|
1024
|
+
This method resumes all paused schedules in the scheduler.
|
1025
|
+
|
1026
|
+
Example:
|
1027
|
+
```python
|
1028
|
+
# Resume all paused schedules
|
1029
|
+
worker.resume_all_schedules()
|
1030
|
+
```
|
1031
|
+
"""
|
1032
|
+
for sched in self.schedule_ids:
|
1033
|
+
self.resume_schedule(sched)
|
1034
|
+
logger.info("All schedules resumed.")
|
1035
|
+
return None
|
1036
|
+
|
1037
|
+
def show_schedules(self) -> None:
|
1038
|
+
"""Display all schedules in a user-friendly format.
|
1039
|
+
|
1040
|
+
This method prints a formatted view of all schedules including their
|
1041
|
+
status, next run time, and other relevant information.
|
1042
|
+
|
1043
|
+
Example:
|
1044
|
+
```python
|
1045
|
+
# Show all schedules in a readable format
|
1046
|
+
worker.show_schedules()
|
1047
|
+
```
|
1048
|
+
"""
|
1049
|
+
display_schedules(self._worker.get_schedules())
|
1050
|
+
|
1051
|
+
def show_jobs(self) -> None:
|
1052
|
+
"""Display all jobs in a user-friendly format.
|
1053
|
+
|
1054
|
+
This method prints a formatted view of all jobs including their
|
1055
|
+
status, result, and other relevant information.
|
1056
|
+
|
1057
|
+
Example:
|
1058
|
+
```python
|
1059
|
+
# Show all jobs in a readable format
|
1060
|
+
worker.show_jobs()
|
1061
|
+
```
|
1062
|
+
"""
|
1063
|
+
display_jobs(self._worker.get_jobs())
|