avtomatika 1.0b4__py3-none-any.whl → 1.0b6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
avtomatika/__init__.py CHANGED
@@ -4,7 +4,7 @@
4
4
  This module exposes the primary classes for building and running state-driven automations.
5
5
  """
6
6
 
7
- import contextlib
7
+ from contextlib import suppress
8
8
  from importlib.metadata import version
9
9
 
10
10
  __version__ = version("avtomatika")
@@ -23,7 +23,7 @@ __all__ = [
23
23
  "StorageBackend",
24
24
  ]
25
25
 
26
- with contextlib.suppress(ImportError):
26
+ with suppress(ImportError):
27
27
  from .storage.redis import RedisStorage # noqa: F401
28
28
 
29
29
  __all__.append("RedisStorage")
avtomatika/blueprint.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from operator import eq, ge, gt, le, lt, ne
2
2
  from re import compile as re_compile
3
- from typing import Any, Callable, Dict, NamedTuple, Optional
3
+ from typing import Any, Callable, NamedTuple
4
4
 
5
5
  from .datastore import AsyncDictStore
6
6
 
@@ -99,8 +99,6 @@ class HandlerDecorator:
99
99
 
100
100
  def when(self, condition_str: str) -> Callable:
101
101
  def decorator(func: Callable) -> Callable:
102
- # We still register the base handler to ensure the state is known,
103
- # but we can make it a no-op if only conditional handlers exist for a state.
104
102
  if self._state not in self._blueprint.handlers:
105
103
  self._blueprint.handlers[self._state] = lambda: None # Placeholder
106
104
 
@@ -115,8 +113,8 @@ class StateMachineBlueprint:
115
113
  def __init__(
116
114
  self,
117
115
  name: str,
118
- api_endpoint: Optional[str] = None,
119
- api_version: Optional[str] = None,
116
+ api_endpoint: str | None = None,
117
+ api_version: str | None = None,
120
118
  data_stores: Any = None,
121
119
  ):
122
120
  """Initializes a new blueprint.
@@ -132,14 +130,14 @@ class StateMachineBlueprint:
132
130
  self.name = name
133
131
  self.api_endpoint = api_endpoint
134
132
  self.api_version = api_version
135
- self.data_stores: Dict[str, AsyncDictStore] = data_stores if data_stores is not None else {}
136
- self.handlers: Dict[str, Callable] = {}
137
- self.aggregator_handlers: Dict[str, Callable] = {}
133
+ self.data_stores: dict[str, AsyncDictStore] = data_stores if data_stores is not None else {}
134
+ self.handlers: dict[str, Callable] = {}
135
+ self.aggregator_handlers: dict[str, Callable] = {}
138
136
  self.conditional_handlers: list[ConditionalHandler] = []
139
- self.start_state: Optional[str] = None
137
+ self.start_state: str | None = None
140
138
  self.end_states: set[str] = set()
141
139
 
142
- def add_data_store(self, name: str, initial_data: Dict[str, Any]):
140
+ def add_data_store(self, name: str, initial_data: dict[str, Any]):
143
141
  """Adds a named data store to the blueprint."""
144
142
  if name in self.data_stores:
145
143
  raise ValueError(f"Data store with name '{name}' already exists.")
@@ -174,7 +172,7 @@ class StateMachineBlueprint:
174
172
  f"No suitable handler found for state '{state}' in blueprint '{self.name}' for the given context.",
175
173
  )
176
174
 
177
- def render_graph(self, output_filename: Optional[str] = None, output_format: str = "png"):
175
+ def render_graph(self, output_filename: str | None = None, output_format: str = "png"):
178
176
  import ast
179
177
  import inspect
180
178
  import logging
avtomatika/config.py CHANGED
@@ -1,4 +1,5 @@
1
1
  from os import getenv
2
+ from socket import gethostname
2
3
 
3
4
 
4
5
  class Config:
@@ -7,6 +8,9 @@ class Config:
7
8
  """
8
9
 
9
10
  def __init__(self):
11
+ # Instance identity
12
+ self.INSTANCE_ID: str = getenv("INSTANCE_ID", gethostname())
13
+
10
14
  # Redis settings
11
15
  self.REDIS_HOST: str = getenv("REDIS_HOST", "")
12
16
  self.REDIS_PORT: int = int(getenv("REDIS_PORT", 6379))
@@ -45,6 +49,9 @@ class Config:
45
49
  self.WATCHER_INTERVAL_SECONDS: int = int(
46
50
  getenv("WATCHER_INTERVAL_SECONDS", 20),
47
51
  )
52
+ self.EXECUTOR_MAX_CONCURRENT_JOBS: int = int(
53
+ getenv("EXECUTOR_MAX_CONCURRENT_JOBS", 100),
54
+ )
48
55
 
49
56
  # History storage settings
50
57
  self.HISTORY_DATABASE_URI: str = getenv("HISTORY_DATABASE_URI", "")
@@ -55,3 +62,7 @@ class Config:
55
62
  # External config files
56
63
  self.WORKERS_CONFIG_PATH: str = getenv("WORKERS_CONFIG_PATH", "")
57
64
  self.CLIENTS_CONFIG_PATH: str = getenv("CLIENTS_CONFIG_PATH", "")
65
+ self.SCHEDULES_CONFIG_PATH: str = getenv("SCHEDULES_CONFIG_PATH", "")
66
+
67
+ # Timezone settings
68
+ self.TZ: str = getenv("TZ", "UTC")
@@ -0,0 +1,30 @@
1
+ """
2
+ Centralized constants for the Avtomatika protocol.
3
+ Use these constants instead of hardcoded strings to ensure consistency.
4
+ """
5
+
6
+ # --- Auth Headers ---
7
+ AUTH_HEADER_CLIENT = "X-Avtomatika-Token"
8
+ AUTH_HEADER_WORKER = "X-Worker-Token"
9
+
10
+ # --- Error Codes ---
11
+ # Error codes returned by workers in the result payload
12
+ ERROR_CODE_TRANSIENT = "TRANSIENT_ERROR"
13
+ ERROR_CODE_PERMANENT = "PERMANENT_ERROR"
14
+ ERROR_CODE_INVALID_INPUT = "INVALID_INPUT_ERROR"
15
+
16
+ # --- Task Statuses ---
17
+ # Standard statuses for task results
18
+ TASK_STATUS_SUCCESS = "success"
19
+ TASK_STATUS_FAILURE = "failure"
20
+ TASK_STATUS_CANCELLED = "cancelled"
21
+
22
+ # --- Job Statuses ---
23
+ JOB_STATUS_PENDING = "pending"
24
+ JOB_STATUS_WAITING_FOR_WORKER = "waiting_for_worker"
25
+ JOB_STATUS_RUNNING = "running"
26
+ JOB_STATUS_FAILED = "failed"
27
+ JOB_STATUS_QUARANTINED = "quarantined"
28
+ JOB_STATUS_CANCELLED = "cancelled"
29
+ JOB_STATUS_WAITING_FOR_HUMAN = "waiting_for_human"
30
+ JOB_STATUS_WAITING_FOR_PARALLEL = "waiting_for_parallel_tasks"
avtomatika/context.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, List, Optional
1
+ from typing import Any
2
2
 
3
3
 
4
4
  class ActionFactory:
@@ -6,10 +6,10 @@ class ActionFactory:
6
6
 
7
7
  def __init__(self, job_id: str):
8
8
  self._job_id = job_id
9
- self._next_state_val: Optional[str] = None
10
- self._task_to_dispatch_val: Optional[Dict[str, Any]] = None
11
- self._sub_blueprint_to_run_val: Optional[Dict[str, Any]] = None
12
- self._parallel_tasks_to_dispatch_val: Optional[Dict[str, Any]] = None
9
+ self._next_state_val: str | None = None
10
+ self._task_to_dispatch_val: dict[str, Any] | None = None
11
+ self._sub_blueprint_to_run_val: dict[str, Any] | None = None
12
+ self._parallel_tasks_to_dispatch_val: dict[str, Any] | None = None
13
13
 
14
14
  def _check_for_existing_action(self):
15
15
  """
@@ -30,22 +30,22 @@ class ActionFactory:
30
30
  )
31
31
 
32
32
  @property
33
- def next_state(self) -> Optional[str]:
33
+ def next_state(self) -> str | None:
34
34
  return self._next_state_val
35
35
 
36
36
  @property
37
- def task_to_dispatch(self) -> Optional[Dict[str, Any]]:
37
+ def task_to_dispatch(self) -> dict[str, Any] | None:
38
38
  return self._task_to_dispatch_val
39
39
 
40
40
  @property
41
- def sub_blueprint_to_run(self) -> Optional[Dict[str, Any]]:
41
+ def sub_blueprint_to_run(self) -> dict[str, Any] | None:
42
42
  return self._sub_blueprint_to_run_val
43
43
 
44
44
  @property
45
- def parallel_tasks_to_dispatch(self) -> Optional[Dict[str, Any]]:
45
+ def parallel_tasks_to_dispatch(self) -> dict[str, Any] | None:
46
46
  return self._parallel_tasks_to_dispatch_val
47
47
 
48
- def dispatch_parallel(self, tasks: List[Dict[str, Any]], aggregate_into: str) -> None:
48
+ def dispatch_parallel(self, tasks: dict[str, Any] | None, aggregate_into: str) -> None:
49
49
  """
50
50
  Dispatches multiple tasks for parallel execution.
51
51
  """
@@ -65,12 +65,12 @@ class ActionFactory:
65
65
  def dispatch_task(
66
66
  self,
67
67
  task_type: str,
68
- params: Dict[str, Any],
69
- transitions: Dict[str, str],
68
+ params: dict[str, Any],
69
+ transitions: dict[str, str],
70
70
  dispatch_strategy: str = "default",
71
- resource_requirements: Optional[Dict[str, Any]] = None,
72
- timeout_seconds: Optional[int] = None,
73
- max_cost: Optional[float] = None,
71
+ resource_requirements: dict[str, Any] | None = None,
72
+ timeout_seconds: int | None = None,
73
+ max_cost: float | None = None,
74
74
  priority: float = 0.0,
75
75
  ) -> None:
76
76
  """Dispatches a task to a worker for execution."""
@@ -91,7 +91,7 @@ class ActionFactory:
91
91
  self,
92
92
  integration: str,
93
93
  message: str,
94
- transitions: Dict[str, str],
94
+ transitions: dict[str, str],
95
95
  ) -> None:
96
96
  """Pauses the pipeline until an external signal (human approval) is received."""
97
97
  self._check_for_existing_action()
@@ -106,8 +106,8 @@ class ActionFactory:
106
106
  def run_blueprint(
107
107
  self,
108
108
  blueprint_name: str,
109
- initial_data: Dict[str, Any],
110
- transitions: Dict[str, str],
109
+ initial_data: dict[str, Any],
110
+ transitions: dict[str, str],
111
111
  ) -> None:
112
112
  """Runs a child blueprint and waits for its result."""
113
113
  self._check_for_existing_action()
avtomatika/data_types.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import TYPE_CHECKING, Any, Dict, NamedTuple, Optional
1
+ from typing import TYPE_CHECKING, Any, NamedTuple
2
2
 
3
3
  if TYPE_CHECKING:
4
4
  from .context import ActionFactory
@@ -9,8 +9,7 @@ class ClientConfig(NamedTuple):
9
9
 
10
10
  token: str
11
11
  plan: str
12
- # Use Dict to support any custom fields
13
- params: Dict[str, Any]
12
+ params: dict[str, Any]
14
13
 
15
14
 
16
15
  class JobContext(NamedTuple):
@@ -18,13 +17,13 @@ class JobContext(NamedTuple):
18
17
 
19
18
  job_id: str
20
19
  current_state: str
21
- initial_data: Dict[str, Any]
22
- state_history: Dict[str, Any]
20
+ initial_data: dict[str, Any]
21
+ state_history: dict[str, Any]
23
22
  client: ClientConfig
24
23
  actions: "ActionFactory"
25
24
  data_stores: Any = None
26
- tracing_context: Dict[str, Any] = {}
27
- aggregation_results: Optional[Dict[str, Any]] = None
25
+ tracing_context: dict[str, Any] = {}
26
+ aggregation_results: dict[str, Any] | None = None
28
27
 
29
28
 
30
29
  class GPUInfo(NamedTuple):
avtomatika/datastore.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict
1
+ from typing import Any
2
2
 
3
3
 
4
4
  class AsyncDictStore:
@@ -6,7 +6,7 @@ class AsyncDictStore:
6
6
  Simulates the behavior of a persistent store for use in blueprints.
7
7
  """
8
8
 
9
- def __init__(self, initial_data: Dict[str, Any]):
9
+ def __init__(self, initial_data: dict[str, Any]):
10
10
  self._data = initial_data.copy()
11
11
 
12
12
  async def get(self, key: str) -> Any:
avtomatika/dispatcher.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from collections import defaultdict
2
2
  from logging import getLogger
3
3
  from random import choice
4
- from typing import Any, Dict, List
4
+ from typing import Any
5
5
  from uuid import uuid4
6
6
 
7
7
  try:
@@ -26,12 +26,12 @@ class Dispatcher:
26
26
  def __init__(self, storage: StorageBackend, config: Config):
27
27
  self.storage = storage
28
28
  self.config = config
29
- self._round_robin_indices: Dict[str, int] = defaultdict(int)
29
+ self._round_robin_indices: dict[str, int] = defaultdict(int)
30
30
 
31
31
  @staticmethod
32
32
  def _is_worker_compliant(
33
- worker: Dict[str, Any],
34
- requirements: Dict[str, Any],
33
+ worker: dict[str, Any],
34
+ requirements: dict[str, Any],
35
35
  ) -> bool:
36
36
  """Checks if a worker meets the specified resource requirements."""
37
37
  if required_gpu := requirements.get("gpu_info"):
@@ -58,9 +58,9 @@ class Dispatcher:
58
58
 
59
59
  @staticmethod
60
60
  def _select_default(
61
- workers: List[Dict[str, Any]],
61
+ workers: list[dict[str, Any]],
62
62
  task_type: str,
63
- ) -> Dict[str, Any]:
63
+ ) -> dict[str, Any]:
64
64
  """Default strategy: first selects "warm" workers (those that have the
65
65
  task in their cache), and then selects the cheapest among them.
66
66
 
@@ -80,9 +80,9 @@ class Dispatcher:
80
80
 
81
81
  def _select_round_robin(
82
82
  self,
83
- workers: List[Dict[str, Any]],
83
+ workers: list[dict[str, Any]],
84
84
  task_type: str,
85
- ) -> Dict[str, Any]:
85
+ ) -> dict[str, Any]:
86
86
  """ "Round Robin" strategy: distributes tasks sequentially among all
87
87
  available workers.
88
88
  """
@@ -93,9 +93,9 @@ class Dispatcher:
93
93
 
94
94
  @staticmethod
95
95
  def _select_least_connections(
96
- workers: List[Dict[str, Any]],
96
+ workers: list[dict[str, Any]],
97
97
  task_type: str,
98
- ) -> Dict[str, Any]:
98
+ ) -> dict[str, Any]:
99
99
  """ "Least Connections" strategy: selects the worker with the fewest
100
100
  active tasks (based on the `load` field).
101
101
  """
@@ -103,14 +103,14 @@ class Dispatcher:
103
103
 
104
104
  @staticmethod
105
105
  def _select_cheapest(
106
- workers: List[Dict[str, Any]],
106
+ workers: list[dict[str, Any]],
107
107
  task_type: str,
108
- ) -> Dict[str, Any]:
108
+ ) -> dict[str, Any]:
109
109
  """Selects the cheapest worker based on 'cost_per_second'."""
110
110
  return min(workers, key=lambda w: w.get("cost_per_second", float("inf")))
111
111
 
112
112
  @staticmethod
113
- def _get_best_value_score(worker: Dict[str, Any]) -> float:
113
+ def _get_best_value_score(worker: dict[str, Any]) -> float:
114
114
  """Calculates a "score" for a worker using the formula cost / reputation.
115
115
  The lower the score, the better.
116
116
  """
@@ -122,13 +122,13 @@ class Dispatcher:
122
122
 
123
123
  def _select_best_value(
124
124
  self,
125
- workers: List[Dict[str, Any]],
125
+ workers: list[dict[str, Any]],
126
126
  task_type: str,
127
- ) -> Dict[str, Any]:
127
+ ) -> dict[str, Any]:
128
128
  """Selects the worker with the best price-quality (reputation) ratio."""
129
129
  return min(workers, key=self._get_best_value_score)
130
130
 
131
- async def dispatch(self, job_state: Dict[str, Any], task_info: Dict[str, Any]):
131
+ async def dispatch(self, job_state: dict[str, Any], task_info: dict[str, Any]):
132
132
  job_id = job_state["id"]
133
133
  task_type = task_info.get("type")
134
134
  if not task_type:
@@ -142,7 +142,6 @@ class Dispatcher:
142
142
  if not all_workers:
143
143
  raise RuntimeError("No available workers")
144
144
 
145
- # 1. Filter by 'idle' status
146
145
  # A worker is considered available if its status is 'idle' or not specified (for backward compatibility)
147
146
  logger.debug(f"All available workers: {[w['worker_id'] for w in all_workers]}")
148
147
  idle_workers = [w for w in all_workers if w.get("status", "idle") == "idle"]
@@ -157,13 +156,13 @@ class Dispatcher:
157
156
  )
158
157
  raise RuntimeError("No idle workers (all are 'busy')")
159
158
 
160
- # 2. Filter by task type
159
+ # Filter by task type
161
160
  capable_workers = [w for w in idle_workers if task_type in w.get("supported_tasks", [])]
162
161
  logger.debug(f"Capable workers for task '{task_type}': {[w['worker_id'] for w in capable_workers]}")
163
162
  if not capable_workers:
164
163
  raise RuntimeError(f"No suitable workers for task type '{task_type}'")
165
164
 
166
- # 3. Filter by resource requirements
165
+ # Filter by resource requirements
167
166
  if resource_requirements:
168
167
  compliant_workers = [w for w in capable_workers if self._is_worker_compliant(w, resource_requirements)]
169
168
  logger.debug(
@@ -176,7 +175,7 @@ class Dispatcher:
176
175
  )
177
176
  capable_workers = compliant_workers
178
177
 
179
- # 4. Filter by maximum cost
178
+ # Filter by maximum cost
180
179
  max_cost = task_info.get("max_cost")
181
180
  if max_cost is not None:
182
181
  cost_compliant_workers = [w for w in capable_workers if w.get("cost_per_second", float("inf")) <= max_cost]
@@ -189,7 +188,7 @@ class Dispatcher:
189
188
  )
190
189
  capable_workers = cost_compliant_workers
191
190
 
192
- # 5. Select worker according to strategy
191
+ # Select worker according to strategy
193
192
  if dispatch_strategy == "round_robin":
194
193
  selected_worker = self._select_round_robin(capable_workers, task_type)
195
194
  elif dispatch_strategy == "least_connections":