avtomatika 1.0b6__py3-none-any.whl → 1.0b7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- avtomatika/api/handlers.py +549 -0
- avtomatika/api/routes.py +118 -0
- avtomatika/app_keys.py +32 -0
- avtomatika/blueprint.py +125 -54
- avtomatika/context.py +2 -2
- avtomatika/data_types.py +3 -2
- avtomatika/dispatcher.py +1 -1
- avtomatika/engine.py +52 -601
- avtomatika/executor.py +21 -16
- avtomatika/scheduler.py +8 -8
- avtomatika/storage/memory.py +12 -7
- avtomatika/utils/__init__.py +0 -0
- avtomatika/utils/webhook_sender.py +54 -0
- avtomatika/watcher.py +1 -3
- {avtomatika-1.0b6.dist-info → avtomatika-1.0b7.dist-info}/METADATA +43 -3
- {avtomatika-1.0b6.dist-info → avtomatika-1.0b7.dist-info}/RECORD +19 -14
- {avtomatika-1.0b6.dist-info → avtomatika-1.0b7.dist-info}/WHEEL +0 -0
- {avtomatika-1.0b6.dist-info → avtomatika-1.0b7.dist-info}/licenses/LICENSE +0 -0
- {avtomatika-1.0b6.dist-info → avtomatika-1.0b7.dist-info}/top_level.txt +0 -0
avtomatika/executor.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
from asyncio import CancelledError, Task, create_task, sleep
|
|
2
|
-
from inspect import signature
|
|
3
2
|
from logging import getLogger
|
|
4
3
|
from time import monotonic
|
|
5
4
|
from types import SimpleNamespace
|
|
@@ -167,17 +166,17 @@ class JobExecutor:
|
|
|
167
166
|
handler = blueprint.find_handler(context.current_state, context)
|
|
168
167
|
|
|
169
168
|
# Build arguments for the handler dynamically.
|
|
170
|
-
|
|
171
|
-
params_to_inject = {}
|
|
169
|
+
param_names = blueprint.get_handler_params(handler)
|
|
170
|
+
params_to_inject: dict[str, Any] = {}
|
|
172
171
|
|
|
173
|
-
if "context" in
|
|
172
|
+
if "context" in param_names:
|
|
174
173
|
params_to_inject["context"] = context
|
|
175
|
-
if "actions" in
|
|
174
|
+
if "actions" in param_names:
|
|
176
175
|
params_to_inject["actions"] = action_factory
|
|
177
176
|
else:
|
|
178
177
|
# New injection logic with prioritized lookup.
|
|
179
178
|
context_as_dict = context._asdict()
|
|
180
|
-
for param_name in
|
|
179
|
+
for param_name in param_names:
|
|
181
180
|
# Look in JobContext fields first.
|
|
182
181
|
if param_name in context_as_dict:
|
|
183
182
|
params_to_inject[param_name] = context_as_dict[param_name]
|
|
@@ -232,7 +231,7 @@ class JobExecutor:
|
|
|
232
231
|
job_state: dict[str, Any],
|
|
233
232
|
next_state: str,
|
|
234
233
|
duration_ms: int,
|
|
235
|
-
):
|
|
234
|
+
) -> None:
|
|
236
235
|
job_id = job_state["id"]
|
|
237
236
|
previous_state = job_state["current_state"]
|
|
238
237
|
logger.info(f"Job {job_id} transitioning from {previous_state} to {next_state}")
|
|
@@ -260,13 +259,19 @@ class JobExecutor:
|
|
|
260
259
|
else:
|
|
261
260
|
logger.info(f"Job {job_id} reached terminal state {next_state}")
|
|
262
261
|
await self._check_and_resume_parent(job_state)
|
|
262
|
+
# Send webhook for finished/failed jobs
|
|
263
|
+
event_type = "job_finished" if next_state == "finished" else "job_failed"
|
|
264
|
+
# Since _check_and_resume_parent is for sub-jobs, we only send webhook if it's a top-level job
|
|
265
|
+
# or if the user explicitly requested it for sub-jobs (by providing webhook_url).
|
|
266
|
+
# The current logic stores webhook_url in job_state, so we just check it.
|
|
267
|
+
await self.engine.send_job_webhook(job_state, event_type)
|
|
263
268
|
|
|
264
269
|
async def _handle_dispatch(
|
|
265
270
|
self,
|
|
266
271
|
job_state: dict[str, Any],
|
|
267
272
|
task_info: dict[str, Any],
|
|
268
273
|
duration_ms: int,
|
|
269
|
-
):
|
|
274
|
+
) -> None:
|
|
270
275
|
job_id = job_state["id"]
|
|
271
276
|
current_state = job_state["current_state"]
|
|
272
277
|
|
|
@@ -302,7 +307,6 @@ class JobExecutor:
|
|
|
302
307
|
await self.storage.save_job_state(job_id, job_state)
|
|
303
308
|
await self.storage.add_job_to_watch(job_id, timeout_at)
|
|
304
309
|
|
|
305
|
-
# Now, dispatch the task
|
|
306
310
|
await self.dispatcher.dispatch(job_state, task_info)
|
|
307
311
|
|
|
308
312
|
async def _handle_run_blueprint(
|
|
@@ -310,7 +314,7 @@ class JobExecutor:
|
|
|
310
314
|
parent_job_state: dict[str, Any],
|
|
311
315
|
sub_blueprint_info: dict[str, Any],
|
|
312
316
|
duration_ms: int,
|
|
313
|
-
):
|
|
317
|
+
) -> None:
|
|
314
318
|
parent_job_id = parent_job_state["id"]
|
|
315
319
|
child_job_id = str(uuid4())
|
|
316
320
|
|
|
@@ -350,7 +354,7 @@ class JobExecutor:
|
|
|
350
354
|
job_state: dict[str, Any],
|
|
351
355
|
parallel_info: dict[str, Any],
|
|
352
356
|
duration_ms: int,
|
|
353
|
-
):
|
|
357
|
+
) -> None:
|
|
354
358
|
job_id = job_state["id"]
|
|
355
359
|
tasks_to_dispatch = parallel_info["tasks"]
|
|
356
360
|
aggregate_into = parallel_info["aggregate_into"]
|
|
@@ -398,7 +402,7 @@ class JobExecutor:
|
|
|
398
402
|
job_state: dict[str, Any],
|
|
399
403
|
error: Exception,
|
|
400
404
|
duration_ms: int,
|
|
401
|
-
):
|
|
405
|
+
) -> None:
|
|
402
406
|
"""Handles failures that occur *during the execution of a handler*.
|
|
403
407
|
|
|
404
408
|
This is different from a task failure reported by a worker. This logic
|
|
@@ -447,13 +451,14 @@ class JobExecutor:
|
|
|
447
451
|
await self.storage.quarantine_job(job_id)
|
|
448
452
|
# If this quarantined job was a sub-job, we must now resume its parent.
|
|
449
453
|
await self._check_and_resume_parent(job_state)
|
|
454
|
+
await self.engine.send_job_webhook(job_state, "job_quarantined")
|
|
450
455
|
from . import metrics
|
|
451
456
|
|
|
452
457
|
metrics.jobs_failed_total.inc(
|
|
453
458
|
{metrics.LABEL_BLUEPRINT: job_state.get("blueprint_name", "unknown")},
|
|
454
459
|
)
|
|
455
460
|
|
|
456
|
-
async def _check_and_resume_parent(self, child_job_state: dict[str, Any]):
|
|
461
|
+
async def _check_and_resume_parent(self, child_job_state: dict[str, Any]) -> None:
|
|
457
462
|
"""Checks if a completed job was a sub-job. If so, it resumes the parent
|
|
458
463
|
job, passing the success/failure outcome of the child.
|
|
459
464
|
"""
|
|
@@ -493,7 +498,7 @@ class JobExecutor:
|
|
|
493
498
|
await self.storage.enqueue_job(parent_job_id)
|
|
494
499
|
|
|
495
500
|
@staticmethod
|
|
496
|
-
def _handle_task_completion(task: Task):
|
|
501
|
+
def _handle_task_completion(task: Task) -> None:
|
|
497
502
|
"""Callback to handle completion of a job processing task."""
|
|
498
503
|
try:
|
|
499
504
|
# This will re-raise any exception caught in the task
|
|
@@ -505,7 +510,7 @@ class JobExecutor:
|
|
|
505
510
|
# Log any other exceptions that occurred in the task.
|
|
506
511
|
logger.exception("Unhandled exception in job processing task")
|
|
507
512
|
|
|
508
|
-
async def run(self):
|
|
513
|
+
async def run(self) -> None:
|
|
509
514
|
import asyncio
|
|
510
515
|
|
|
511
516
|
logger.info("JobExecutor started.")
|
|
@@ -536,5 +541,5 @@ class JobExecutor:
|
|
|
536
541
|
await sleep(1)
|
|
537
542
|
logger.info("JobExecutor stopped.")
|
|
538
543
|
|
|
539
|
-
def stop(self):
|
|
544
|
+
def stop(self) -> None:
|
|
540
545
|
self._running = False
|
avtomatika/scheduler.py
CHANGED
|
@@ -21,7 +21,7 @@ class Scheduler:
|
|
|
21
21
|
self.schedules: list[ScheduledJobConfig] = []
|
|
22
22
|
self.timezone = ZoneInfo(self.config.TZ)
|
|
23
23
|
|
|
24
|
-
def load_config(self):
|
|
24
|
+
def load_config(self) -> None:
|
|
25
25
|
if not self.config.SCHEDULES_CONFIG_PATH:
|
|
26
26
|
logger.info("No SCHEDULES_CONFIG_PATH set. Scheduler will not run any jobs.")
|
|
27
27
|
return
|
|
@@ -32,7 +32,7 @@ class Scheduler:
|
|
|
32
32
|
except Exception as e:
|
|
33
33
|
logger.error(f"Failed to load schedules config: {e}")
|
|
34
34
|
|
|
35
|
-
async def run(self):
|
|
35
|
+
async def run(self) -> None:
|
|
36
36
|
self.load_config()
|
|
37
37
|
if not self.schedules:
|
|
38
38
|
logger.info("No schedules found. Scheduler loop will not start.")
|
|
@@ -59,22 +59,22 @@ class Scheduler:
|
|
|
59
59
|
|
|
60
60
|
logger.info("Scheduler stopped.")
|
|
61
61
|
|
|
62
|
-
def stop(self):
|
|
62
|
+
def stop(self) -> None:
|
|
63
63
|
self._running = False
|
|
64
64
|
|
|
65
|
-
async def _process_job(self, job: ScheduledJobConfig, now_tz: datetime):
|
|
65
|
+
async def _process_job(self, job: ScheduledJobConfig, now_tz: datetime) -> None:
|
|
66
66
|
if job.interval_seconds:
|
|
67
67
|
await self._process_interval_job(job, now_tz)
|
|
68
68
|
else:
|
|
69
69
|
await self._process_calendar_job(job, now_tz)
|
|
70
70
|
|
|
71
|
-
async def _process_interval_job(self, job: ScheduledJobConfig, now_tz: datetime):
|
|
71
|
+
async def _process_interval_job(self, job: ScheduledJobConfig, now_tz: datetime) -> None:
|
|
72
72
|
last_run_key = f"scheduler:last_run:{job.name}"
|
|
73
73
|
last_run_ts = await self.storage.get_str(last_run_key)
|
|
74
74
|
|
|
75
75
|
now_ts = now_tz.timestamp()
|
|
76
76
|
|
|
77
|
-
if last_run_ts and now_ts - float(last_run_ts) < job.interval_seconds:
|
|
77
|
+
if last_run_ts and job.interval_seconds is not None and now_ts - float(last_run_ts) < job.interval_seconds:
|
|
78
78
|
return
|
|
79
79
|
|
|
80
80
|
lock_key = f"scheduler:lock:interval:{job.name}"
|
|
@@ -85,7 +85,7 @@ class Scheduler:
|
|
|
85
85
|
except Exception as e:
|
|
86
86
|
logger.error(f"Failed to trigger interval job {job.name}: {e}")
|
|
87
87
|
|
|
88
|
-
async def _process_calendar_job(self, job: ScheduledJobConfig, now_tz: datetime):
|
|
88
|
+
async def _process_calendar_job(self, job: ScheduledJobConfig, now_tz: datetime) -> None:
|
|
89
89
|
target_time_str = job.daily_at or job.time
|
|
90
90
|
if not target_time_str:
|
|
91
91
|
return
|
|
@@ -110,7 +110,7 @@ class Scheduler:
|
|
|
110
110
|
logger.info(f"Triggering scheduled job {job.name}")
|
|
111
111
|
await self._trigger_job(job)
|
|
112
112
|
|
|
113
|
-
async def _trigger_job(self, job: ScheduledJobConfig):
|
|
113
|
+
async def _trigger_job(self, job: ScheduledJobConfig) -> None:
|
|
114
114
|
try:
|
|
115
115
|
await self.engine.create_background_job(
|
|
116
116
|
blueprint_name=job.blueprint, initial_data=job.input_data, source=f"scheduler:{job.name}"
|
avtomatika/storage/memory.py
CHANGED
|
@@ -33,7 +33,7 @@ class MemoryStorage(StorageBackend):
|
|
|
33
33
|
async with self._lock:
|
|
34
34
|
return self._jobs.get(job_id)
|
|
35
35
|
|
|
36
|
-
async def _clean_expired(self):
|
|
36
|
+
async def _clean_expired(self) -> None:
|
|
37
37
|
"""Helper to remove expired keys."""
|
|
38
38
|
now = monotonic()
|
|
39
39
|
|
|
@@ -47,7 +47,7 @@ class MemoryStorage(StorageBackend):
|
|
|
47
47
|
self._worker_ttls.pop(k, None)
|
|
48
48
|
self._workers.pop(k, None)
|
|
49
49
|
|
|
50
|
-
async def save_job_state(self, job_id: str, state: dict[str, Any]):
|
|
50
|
+
async def save_job_state(self, job_id: str, state: dict[str, Any]) -> None:
|
|
51
51
|
async with self._lock:
|
|
52
52
|
self._jobs[job_id] = state
|
|
53
53
|
|
|
@@ -102,8 +102,13 @@ class MemoryStorage(StorageBackend):
|
|
|
102
102
|
queue = self._worker_task_queues[worker_id]
|
|
103
103
|
|
|
104
104
|
try:
|
|
105
|
-
|
|
106
|
-
|
|
105
|
+
# Type ignore because PriorityQueue.get() return type is generic
|
|
106
|
+
item = await wait_for(queue.get(), timeout=timeout) # type: ignore
|
|
107
|
+
_, task_payload = item
|
|
108
|
+
# Explicit cast for mypy
|
|
109
|
+
if isinstance(task_payload, dict):
|
|
110
|
+
return task_payload
|
|
111
|
+
return None # Should not happen if data integrity is kept
|
|
107
112
|
except AsyncTimeoutError:
|
|
108
113
|
return None
|
|
109
114
|
|
|
@@ -141,7 +146,7 @@ class MemoryStorage(StorageBackend):
|
|
|
141
146
|
async def get_available_workers(self) -> list[dict[str, Any]]:
|
|
142
147
|
async with self._lock:
|
|
143
148
|
now = monotonic()
|
|
144
|
-
active_workers = []
|
|
149
|
+
active_workers: list[dict[str, Any]] = []
|
|
145
150
|
active_workers.extend(
|
|
146
151
|
worker_info
|
|
147
152
|
for worker_id, worker_info in self._workers.items()
|
|
@@ -202,7 +207,7 @@ class MemoryStorage(StorageBackend):
|
|
|
202
207
|
|
|
203
208
|
self._generic_keys[key] += 1
|
|
204
209
|
self._generic_key_ttls[key] = now + ttl
|
|
205
|
-
return self._generic_keys[key]
|
|
210
|
+
return int(self._generic_keys[key])
|
|
206
211
|
|
|
207
212
|
async def save_client_config(self, token: str, config: dict[str, Any]) -> None:
|
|
208
213
|
async with self._lock:
|
|
@@ -223,7 +228,7 @@ class MemoryStorage(StorageBackend):
|
|
|
223
228
|
return True
|
|
224
229
|
return False
|
|
225
230
|
|
|
226
|
-
async def flush_all(self):
|
|
231
|
+
async def flush_all(self) -> None:
|
|
227
232
|
"""
|
|
228
233
|
Resets all in-memory storage containers to their initial empty state.
|
|
229
234
|
This is a destructive operation intended for use in tests to ensure
|
|
File without changes
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from asyncio import sleep
|
|
2
|
+
from dataclasses import asdict, dataclass
|
|
3
|
+
from logging import getLogger
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from aiohttp import ClientSession, ClientTimeout
|
|
7
|
+
|
|
8
|
+
logger = getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class WebhookPayload:
|
|
13
|
+
event: str # "job_finished", "job_failed", "job_quarantined"
|
|
14
|
+
job_id: str
|
|
15
|
+
status: str
|
|
16
|
+
result: dict[str, Any] | None = None
|
|
17
|
+
error: str | None = None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class WebhookSender:
|
|
21
|
+
def __init__(self, session: ClientSession):
|
|
22
|
+
self.session = session
|
|
23
|
+
self.timeout = ClientTimeout(total=10)
|
|
24
|
+
self.max_retries = 3
|
|
25
|
+
|
|
26
|
+
async def send(self, url: str, payload: WebhookPayload) -> bool:
|
|
27
|
+
"""
|
|
28
|
+
Sends a webhook payload to the specified URL with retries.
|
|
29
|
+
Returns True if successful, False otherwise.
|
|
30
|
+
"""
|
|
31
|
+
data = asdict(payload)
|
|
32
|
+
for attempt in range(1, self.max_retries + 1):
|
|
33
|
+
try:
|
|
34
|
+
async with self.session.post(url, json=data, timeout=self.timeout) as response:
|
|
35
|
+
if 200 <= response.status < 300:
|
|
36
|
+
logger.info(f"Webhook sent successfully to {url} for job {payload.job_id}")
|
|
37
|
+
return True
|
|
38
|
+
else:
|
|
39
|
+
logger.warning(
|
|
40
|
+
f"Webhook failed for job {payload.job_id} to {url}. "
|
|
41
|
+
f"Status: {response.status}. Attempt {attempt}/{self.max_retries}"
|
|
42
|
+
)
|
|
43
|
+
except Exception as e:
|
|
44
|
+
logger.warning(
|
|
45
|
+
f"Error sending webhook for job {payload.job_id} to {url}: {e}. "
|
|
46
|
+
f"Attempt {attempt}/{self.max_retries}"
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
# Exponential backoff
|
|
50
|
+
if attempt < self.max_retries:
|
|
51
|
+
await sleep(2**attempt)
|
|
52
|
+
|
|
53
|
+
logger.error(f"Failed to send webhook for job {payload.job_id} to {url} after {self.max_retries} attempts.")
|
|
54
|
+
return False
|
avtomatika/watcher.py
CHANGED
|
@@ -29,9 +29,7 @@ class Watcher:
|
|
|
29
29
|
await sleep(self.watch_interval_seconds)
|
|
30
30
|
|
|
31
31
|
# Attempt to acquire distributed lock
|
|
32
|
-
# We set TTL slightly longer than the expected execution time
|
|
33
|
-
# but shorter than the interval if possible.
|
|
34
|
-
# Actually, a fixed TTL like 60s is fine as long as we release it.
|
|
32
|
+
# We set TTL slightly longer than the expected execution time (60s)
|
|
35
33
|
if not await self.storage.acquire_lock("global_watcher_lock", self._instance_id, 60):
|
|
36
34
|
logger.debug("Watcher lock held by another instance. Skipping check.")
|
|
37
35
|
continue
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: avtomatika
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.0b7
|
|
4
4
|
Summary: A state-machine based orchestrator for long-running AI and other jobs.
|
|
5
5
|
Project-URL: Homepage, https://github.com/avtomatika-ai/avtomatika
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/avtomatika-ai/avtomatika/issues
|
|
@@ -12,7 +12,6 @@ Requires-Python: >=3.11
|
|
|
12
12
|
Description-Content-Type: text/markdown
|
|
13
13
|
License-File: LICENSE
|
|
14
14
|
Requires-Dist: aiohttp~=3.12
|
|
15
|
-
Requires-Dist: aiocache~=0.12
|
|
16
15
|
Requires-Dist: python-json-logger~=4.0
|
|
17
16
|
Requires-Dist: graphviz~=0.21
|
|
18
17
|
Requires-Dist: zstandard~=0.24
|
|
@@ -61,6 +60,7 @@ This document serves as a comprehensive guide for developers looking to build pi
|
|
|
61
60
|
- [Parallel Execution and Aggregation (Fan-out/Fan-in)](#parallel-execution-and-aggregation-fan-outfan-in)
|
|
62
61
|
- [Dependency Injection (DataStore)](#dependency-injection-datastore)
|
|
63
62
|
- [Native Scheduler](#native-scheduler)
|
|
63
|
+
- [Webhook Notifications](#webhook-notifications)
|
|
64
64
|
- [Production Configuration](#production-configuration)
|
|
65
65
|
- [Fault Tolerance](#fault-tolerance)
|
|
66
66
|
- [Storage Backend](#storage-backend)
|
|
@@ -157,7 +157,13 @@ async def end_handler(context):
|
|
|
157
157
|
engine = OrchestratorEngine(storage, config)
|
|
158
158
|
engine.register_blueprint(my_blueprint)
|
|
159
159
|
|
|
160
|
-
# 4.
|
|
160
|
+
# 4. Accessing Components (Optional)
|
|
161
|
+
# You can access the internal aiohttp app and core components using AppKeys
|
|
162
|
+
# from avtomatika.app_keys import ENGINE_KEY, DISPATCHER_KEY
|
|
163
|
+
# app = engine.app
|
|
164
|
+
# dispatcher = app[DISPATCHER_KEY]
|
|
165
|
+
|
|
166
|
+
# 5. Define the main entrypoint to run the server
|
|
161
167
|
async def main():
|
|
162
168
|
await engine.start()
|
|
163
169
|
|
|
@@ -355,6 +361,40 @@ blueprint = "backup_flow"
|
|
|
355
361
|
daily_at = "02:00"
|
|
356
362
|
```
|
|
357
363
|
|
|
364
|
+
### 6. Webhook Notifications
|
|
365
|
+
|
|
366
|
+
The orchestrator can send asynchronous notifications to an external system when a job completes, fails, or is quarantined. This eliminates the need for clients to constantly poll the API for status updates.
|
|
367
|
+
|
|
368
|
+
* **Usage:** Pass a `webhook_url` in the request body when creating a job.
|
|
369
|
+
* **Events:**
|
|
370
|
+
* `job_finished`: The job reached a final success state.
|
|
371
|
+
* `job_failed`: The job failed (e.g., due to an error or invalid input).
|
|
372
|
+
* `job_quarantined`: The job was moved to quarantine after repeated failures.
|
|
373
|
+
|
|
374
|
+
**Example Request:**
|
|
375
|
+
```json
|
|
376
|
+
POST /api/v1/jobs/my_flow
|
|
377
|
+
{
|
|
378
|
+
"initial_data": {
|
|
379
|
+
"video_url": "..."
|
|
380
|
+
},
|
|
381
|
+
"webhook_url": "https://my-app.com/webhooks/avtomatika"
|
|
382
|
+
}
|
|
383
|
+
```
|
|
384
|
+
|
|
385
|
+
**Example Webhook Payload:**
|
|
386
|
+
```json
|
|
387
|
+
{
|
|
388
|
+
"event": "job_finished",
|
|
389
|
+
"job_id": "123e4567-e89b-12d3-a456-426614174000",
|
|
390
|
+
"status": "finished",
|
|
391
|
+
"result": {
|
|
392
|
+
"output_path": "/videos/result.mp4"
|
|
393
|
+
},
|
|
394
|
+
"error": null
|
|
395
|
+
}
|
|
396
|
+
```
|
|
397
|
+
|
|
358
398
|
## Production Configuration
|
|
359
399
|
|
|
360
400
|
The orchestrator's behavior can be configured through environment variables. Additionally, any configuration parameter loaded from environment variables can be programmatically overridden in your application code after the `Config` object has been initialized. This provides flexibility for different deployment and testing scenarios.
|
|
@@ -1,16 +1,17 @@
|
|
|
1
1
|
avtomatika/__init__.py,sha256=D5r3L-H06uxsY_wgfh7u9YR29QvZMer1BlvzjW9Umfo,701
|
|
2
2
|
avtomatika/api.html,sha256=RLx-D1uFCSAXIf_2WgFlSTWrWPcmonNYM-9oNanKXBg,32835
|
|
3
|
-
avtomatika/
|
|
3
|
+
avtomatika/app_keys.py,sha256=hwg5IBYUsCe-tCru61z3a4-lTw98JL41SXWCmIM_YHc,1161
|
|
4
|
+
avtomatika/blueprint.py,sha256=AJ8hZkzvqkdu9aPmlJeFzuSj57L6Xm41KS9kBMGB4Vg,11827
|
|
4
5
|
avtomatika/client_config_loader.py,sha256=zVVHZlxSqZUaNpZ4zoU0T1CFYXdxy-3vKSmPcaFuHSY,2772
|
|
5
6
|
avtomatika/compression.py,sha256=bhA1kw4YrCR3I3kdquZSY0fAzCrRrjtz55uepzLUDKI,2498
|
|
6
7
|
avtomatika/config.py,sha256=Tc-vpaQS11i_JTa1pQjGuQD3R5Kj9sIf6gGJGjItBBo,2487
|
|
7
8
|
avtomatika/constants.py,sha256=WL58Nh-EY6baM9Ur_tR9merwPRGb41_klUG2V-yUUaA,963
|
|
8
|
-
avtomatika/context.py,sha256=
|
|
9
|
-
avtomatika/data_types.py,sha256=
|
|
9
|
+
avtomatika/context.py,sha256=T6Ux4Fb1DwWRGTpMNeukM51MQDQbGk2HS6Cwpc0dc1s,4248
|
|
10
|
+
avtomatika/data_types.py,sha256=odbkraTYhOo1j3ETIX8NJOy8yIwTdQ_i3juATsjroas,1424
|
|
10
11
|
avtomatika/datastore.py,sha256=gJjhZ5kxjF8pmbbPQb_qu3HPUpfy2c6T75KZ-smb_zg,545
|
|
11
|
-
avtomatika/dispatcher.py,sha256=
|
|
12
|
-
avtomatika/engine.py,sha256=
|
|
13
|
-
avtomatika/executor.py,sha256
|
|
12
|
+
avtomatika/dispatcher.py,sha256=RrUPvDnCEwAdqxrGpUUcvQZOxG8Ir1E3m9yJjkQeEig,9635
|
|
13
|
+
avtomatika/engine.py,sha256=LZBN4Z50cEITHcrzoYqPS-Rjw4WdPSKRuq-iaBipEbE,15475
|
|
14
|
+
avtomatika/executor.py,sha256=iR9NqS9IffoW692Cq5PKM0P5lmIikmQ65vx8b-D9IZQ,23098
|
|
14
15
|
avtomatika/health_checker.py,sha256=WXwvRJ-3cZC2Udc_ogsyIQp7VzcvJjq_IaqzkTdE0TE,1265
|
|
15
16
|
avtomatika/logging_config.py,sha256=Zb6f9Nri9WVWhlpuBg6Lpi5SWRLGIUmS8Dc3xD1Gg0g,2993
|
|
16
17
|
avtomatika/metrics.py,sha256=7XDhr_xMJ9JpElpZmBG7R0ml7AMdAp9UYp_W-i7tyLg,1858
|
|
@@ -18,23 +19,27 @@ avtomatika/py.typed,sha256=CT_L7gw2MLcQY-X0vs-xB5Vr0wzvGo7GuQYPI_qwJE8,65
|
|
|
18
19
|
avtomatika/quota.py,sha256=DNcaL6k0J1REeP8sVqbY9FprY_3BSr2SxM2Vf4mEqdw,1612
|
|
19
20
|
avtomatika/ratelimit.py,sha256=hFGW5oN9G6_W_jnHmopXW8bRjjzlvanY19MLghsNLE8,1306
|
|
20
21
|
avtomatika/reputation.py,sha256=IHcaIAILWZftPPmXj5En28OSDNK7U8ivQ-w30zIF8fk,3748
|
|
21
|
-
avtomatika/scheduler.py,sha256=
|
|
22
|
+
avtomatika/scheduler.py,sha256=F5Kv5Rx34nDd0mE5jxjwpjRg8duDZBEr91N5Y6CNR24,4231
|
|
22
23
|
avtomatika/scheduler_config_loader.py,sha256=F6mLM8yPRgG4bMHV_WnXX7UOrXD8fCXJT30bbEuQ2mk,1311
|
|
23
24
|
avtomatika/security.py,sha256=kkU68YmLWq1ClMUdEW98pS9WsEwHinHoZcdMoPm63Uk,4417
|
|
24
25
|
avtomatika/telemetry.py,sha256=ZBt1_xJ36PzDSz-zdCXeNp58NiezUgbqvMctTG25PT0,2352
|
|
25
|
-
avtomatika/watcher.py,sha256=
|
|
26
|
+
avtomatika/watcher.py,sha256=WAYTjhVmXyqWZfWfQm-iFDVZFBYpNRfwgGDxa_LCnAI,3354
|
|
26
27
|
avtomatika/worker_config_loader.py,sha256=n0j8gfuJDacWONr8744RsHTCWpc_1ZTRMC-rJZh6P6A,2249
|
|
27
28
|
avtomatika/ws_manager.py,sha256=pi5xe0ivsCjRZw08ri5N-gAChMH2I2YPLpl3E2tP89k,3057
|
|
29
|
+
avtomatika/api/handlers.py,sha256=MO6QkbT001jj4latUXGT0hGOmQf_6TkRkmwx19OcXeQ,22176
|
|
30
|
+
avtomatika/api/routes.py,sha256=vSwj2jJlmftZrjrctt-mNYLF23CfCtlUfaMoZzNOqCk,4895
|
|
28
31
|
avtomatika/history/base.py,sha256=Gfw0Gb4Mt9wQrMlYLugZwey_6-cDej5OUctiMTCWg7Q,1668
|
|
29
32
|
avtomatika/history/noop.py,sha256=ETVtPiTfkaMpzhGD8c0_4Iu6pWD89dnPrrRrSIjmc8s,970
|
|
30
33
|
avtomatika/history/postgres.py,sha256=vtW4LMW7Vli5MjcGYY3ez667-C8Cq3I7kIHrcEgSYps,9409
|
|
31
34
|
avtomatika/history/sqlite.py,sha256=Blc9ckvzoDaMRStXyfJOzMAdU_t2JcwtQtVdPgnr6s0,9131
|
|
32
35
|
avtomatika/storage/__init__.py,sha256=mGRj_40dWZ7R7uYbqC6gCsUWCKHAbZz4ZVIhYg5dT_E,262
|
|
33
36
|
avtomatika/storage/base.py,sha256=hW7XFhe6CQDP69q5NPSkUzEInIFxDR1-AyRPZNPEDEc,11424
|
|
34
|
-
avtomatika/storage/memory.py,sha256=
|
|
37
|
+
avtomatika/storage/memory.py,sha256=BYox_3v1qRqfJyyjkjxXj5OmyGBu0EZqz5BWKZy6YsU,12561
|
|
35
38
|
avtomatika/storage/redis.py,sha256=opOhqBL_uCsNXcMD_W_tJU-8wzDUSjBJWEsXrwP2_YM,21035
|
|
36
|
-
avtomatika
|
|
37
|
-
avtomatika
|
|
38
|
-
avtomatika-1.
|
|
39
|
-
avtomatika-1.
|
|
40
|
-
avtomatika-1.
|
|
39
|
+
avtomatika/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
40
|
+
avtomatika/utils/webhook_sender.py,sha256=sbX6HEED_TvH3MBxFlVrAqP-n68y0q5quYty5b8Z4D8,1941
|
|
41
|
+
avtomatika-1.0b7.dist-info/licenses/LICENSE,sha256=tqCjw9Y1vbU-hLcWi__7wQstLbt2T1XWPdbQYqCxuWY,1072
|
|
42
|
+
avtomatika-1.0b7.dist-info/METADATA,sha256=FVUEaG5_NWobxU5_FDG2l5RmdhvVN5t1quZXQ0GsSpw,24215
|
|
43
|
+
avtomatika-1.0b7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
44
|
+
avtomatika-1.0b7.dist-info/top_level.txt,sha256=gLDWhA_wxHj0I6fG5X8vw9fE0HSN4hTE2dEJzeVS2x8,11
|
|
45
|
+
avtomatika-1.0b7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|