avtomatika 1.0b7__py3-none-any.whl → 1.0b8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
avtomatika/app_keys.py CHANGED
@@ -30,3 +30,4 @@ WATCHER_TASK_KEY = AppKey("watcher_task", Task)
30
30
  REPUTATION_CALCULATOR_TASK_KEY = AppKey("reputation_calculator_task", Task)
31
31
  HEALTH_CHECKER_TASK_KEY = AppKey("health_checker_task", Task)
32
32
  SCHEDULER_TASK_KEY = AppKey("scheduler_task", Task)
33
+ S3_SERVICE_KEY = AppKey("s3_service", "S3Service")
avtomatika/config.py CHANGED
@@ -39,6 +39,7 @@ class Config:
39
39
 
40
40
  # Worker settings
41
41
  self.WORKER_TIMEOUT_SECONDS: int = int(getenv("WORKER_TIMEOUT_SECONDS", 300))
42
+ self.TASK_FILES_DIR: str = getenv("TASK_FILES_DIR", "/tmp/avtomatika-payloads")
42
43
  self.WORKER_POLL_TIMEOUT_SECONDS: int = int(
43
44
  getenv("WORKER_POLL_TIMEOUT_SECONDS", 30),
44
45
  )
@@ -52,10 +53,19 @@ class Config:
52
53
  self.EXECUTOR_MAX_CONCURRENT_JOBS: int = int(
53
54
  getenv("EXECUTOR_MAX_CONCURRENT_JOBS", 100),
54
55
  )
56
+ self.REDIS_STREAM_BLOCK_MS: int = int(getenv("REDIS_STREAM_BLOCK_MS", 5000))
55
57
 
56
58
  # History storage settings
57
59
  self.HISTORY_DATABASE_URI: str = getenv("HISTORY_DATABASE_URI", "")
58
60
 
61
+ # S3 settings
62
+ self.S3_ENDPOINT_URL: str = getenv("S3_ENDPOINT_URL", "")
63
+ self.S3_ACCESS_KEY: str = getenv("S3_ACCESS_KEY", "")
64
+ self.S3_SECRET_KEY: str = getenv("S3_SECRET_KEY", "")
65
+ self.S3_REGION: str = getenv("S3_REGION", "us-east-1")
66
+ self.S3_DEFAULT_BUCKET: str = getenv("S3_DEFAULT_BUCKET", "avtomatika-payloads")
67
+ self.S3_MAX_CONCURRENCY: int = int(getenv("S3_MAX_CONCURRENCY", 100))
68
+
59
69
  # Rate limiting settings
60
70
  self.RATE_LIMITING_ENABLED: bool = getenv("RATE_LIMITING_ENABLED", "true").lower() == "true"
61
71
 
avtomatika/data_types.py CHANGED
@@ -21,10 +21,11 @@ class JobContext(NamedTuple):
21
21
  state_history: dict[str, Any]
22
22
  client: ClientConfig
23
23
  actions: "ActionFactory"
24
- data_stores: dict[str, Any] | None = None
24
+ data_stores: Any | None = None
25
25
  tracing_context: dict[str, Any] | None = None
26
26
  aggregation_results: dict[str, Any] | None = None
27
27
  webhook_url: str | None = None
28
+ task_files: Any | None = None
28
29
 
29
30
 
30
31
  class GPUInfo(NamedTuple):
avtomatika/dispatcher.py CHANGED
@@ -137,32 +137,17 @@ class Dispatcher:
137
137
  dispatch_strategy = task_info.get("dispatch_strategy", "default")
138
138
  resource_requirements = task_info.get("resource_requirements")
139
139
 
140
- all_workers = await self.storage.get_available_workers()
141
- logger.info(f"Found {len(all_workers)} available workers")
142
- if not all_workers:
143
- raise RuntimeError("No available workers")
144
-
145
- # A worker is considered available if its status is 'idle' or not specified (for backward compatibility)
146
- logger.debug(f"All available workers: {[w['worker_id'] for w in all_workers]}")
147
- idle_workers = [w for w in all_workers if w.get("status", "idle") == "idle"]
148
- logger.debug(f"Idle workers: {[w['worker_id'] for w in idle_workers]}")
149
- if not idle_workers:
150
- if busy_mo_workers := [
151
- w for w in all_workers if w.get("status") == "busy" and "multi_orchestrator_info" in w
152
- ]:
153
- logger.warning(
154
- f"No idle workers. Found {len(busy_mo_workers)} busy workers "
155
- f"in multi-orchestrator mode. They are likely performing tasks for other Orchestrators.",
156
- )
157
- raise RuntimeError("No idle workers (all are 'busy')")
140
+ candidate_ids = await self.storage.find_workers_for_task(task_type)
141
+ if not candidate_ids:
142
+ logger.warning(f"No idle workers found for task '{task_type}'")
143
+ raise RuntimeError(f"No suitable workers for task type '{task_type}'")
144
+
145
+ capable_workers = await self.storage.get_workers(candidate_ids)
146
+ logger.debug(f"Found {len(capable_workers)} capable workers for task '{task_type}'")
158
147
 
159
- # Filter by task type
160
- capable_workers = [w for w in idle_workers if task_type in w.get("supported_tasks", [])]
161
- logger.debug(f"Capable workers for task '{task_type}': {[w['worker_id'] for w in capable_workers]}")
162
148
  if not capable_workers:
163
- raise RuntimeError(f"No suitable workers for task type '{task_type}'")
149
+ raise RuntimeError(f"No suitable workers for task type '{task_type}' (data missing)")
164
150
 
165
- # Filter by resource requirements
166
151
  if resource_requirements:
167
152
  compliant_workers = [w for w in capable_workers if self._is_worker_compliant(w, resource_requirements)]
168
153
  logger.debug(
@@ -175,7 +160,6 @@ class Dispatcher:
175
160
  )
176
161
  capable_workers = compliant_workers
177
162
 
178
- # Filter by maximum cost
179
163
  max_cost = task_info.get("max_cost")
180
164
  if max_cost is not None:
181
165
  cost_compliant_workers = [w for w in capable_workers if w.get("cost_per_second", float("inf")) <= max_cost]
@@ -188,7 +172,6 @@ class Dispatcher:
188
172
  )
189
173
  capable_workers = cost_compliant_workers
190
174
 
191
- # Select worker according to strategy
192
175
  if dispatch_strategy == "round_robin":
193
176
  selected_worker = self._select_round_robin(capable_workers, task_type)
194
177
  elif dispatch_strategy == "least_connections":
@@ -205,7 +188,6 @@ class Dispatcher:
205
188
  f"Dispatching task '{task_type}' to worker {worker_id} (strategy: {dispatch_strategy})",
206
189
  )
207
190
 
208
- # --- Task creation and enqueuing ---
209
191
  task_id = task_info.get("task_id") or str(uuid4())
210
192
  payload = {
211
193
  "job_id": job_id,
avtomatika/engine.py CHANGED
@@ -19,6 +19,7 @@ from .app_keys import (
19
19
  HTTP_SESSION_KEY,
20
20
  REPUTATION_CALCULATOR_KEY,
21
21
  REPUTATION_CALCULATOR_TASK_KEY,
22
+ S3_SERVICE_KEY,
22
23
  SCHEDULER_KEY,
23
24
  SCHEDULER_TASK_KEY,
24
25
  WATCHER_KEY,
@@ -37,6 +38,7 @@ from .history.base import HistoryStorageBase
37
38
  from .history.noop import NoOpHistoryStorage
38
39
  from .logging_config import setup_logging
39
40
  from .reputation import ReputationCalculator
41
+ from .s3 import S3Service
40
42
  from .scheduler import Scheduler
41
43
  from .storage.base import StorageBackend
42
44
  from .telemetry import setup_telemetry
@@ -141,6 +143,11 @@ class OrchestratorEngine:
141
143
  self.history_storage = NoOpHistoryStorage()
142
144
 
143
145
  async def on_startup(self, app: web.Application) -> None:
146
+ # 1. Fail Fast: Check Storage Connection
147
+ if not await self.storage.ping():
148
+ logger.critical("Failed to connect to Storage Backend (Redis). Exiting.")
149
+ raise RuntimeError("Storage Backend is unavailable.")
150
+
144
151
  try:
145
152
  from opentelemetry.instrumentation.aiohttp_client import (
146
153
  AioHttpClientInstrumentor,
@@ -152,6 +159,8 @@ class OrchestratorEngine:
152
159
  "opentelemetry-instrumentation-aiohttp-client not found. AIOHTTP client instrumentation is disabled."
153
160
  )
154
161
  await self._setup_history_storage()
162
+ # Start history background worker
163
+ await self.history_storage.start()
155
164
 
156
165
  # Load client configs if the path is provided
157
166
  if self.config.CLIENTS_CONFIG_PATH:
@@ -188,6 +197,7 @@ class OrchestratorEngine:
188
197
 
189
198
  app[HTTP_SESSION_KEY] = ClientSession()
190
199
  self.webhook_sender = WebhookSender(app[HTTP_SESSION_KEY])
200
+ self.webhook_sender.start()
191
201
  self.dispatcher = Dispatcher(self.storage, self.config)
192
202
  app[DISPATCHER_KEY] = self.dispatcher
193
203
  app[EXECUTOR_KEY] = JobExecutor(self, self.history_storage)
@@ -196,6 +206,7 @@ class OrchestratorEngine:
196
206
  app[HEALTH_CHECKER_KEY] = HealthChecker(self)
197
207
  app[SCHEDULER_KEY] = Scheduler(self)
198
208
  app[WS_MANAGER_KEY] = self.ws_manager
209
+ app[S3_SERVICE_KEY] = S3Service(self.config, self.history_storage)
199
210
 
200
211
  app[EXECUTOR_TASK_KEY] = create_task(app[EXECUTOR_KEY].run())
201
212
  app[WATCHER_TASK_KEY] = create_task(app[WATCHER_KEY].run())
@@ -220,6 +231,13 @@ class OrchestratorEngine:
220
231
  logger.info("Closing WebSocket connections...")
221
232
  await self.ws_manager.close_all()
222
233
 
234
+ logger.info("Stopping WebhookSender...")
235
+ await self.webhook_sender.stop()
236
+
237
+ if S3_SERVICE_KEY in app:
238
+ logger.info("Closing S3 Service...")
239
+ await app[S3_SERVICE_KEY].close()
240
+
223
241
  logger.info("Cancelling background tasks...")
224
242
  app[HEALTH_CHECKER_TASK_KEY].cancel()
225
243
  app[WATCHER_TASK_KEY].cancel()
@@ -352,7 +370,7 @@ class OrchestratorEngine:
352
370
  )
353
371
 
354
372
  # Run in background to not block the main flow
355
- create_task(self.webhook_sender.send(webhook_url, payload))
373
+ await self.webhook_sender.send(webhook_url, payload)
356
374
 
357
375
  def run(self) -> None:
358
376
  self.setup()
avtomatika/executor.py CHANGED
@@ -47,6 +47,7 @@ except ImportError:
47
47
  inject = NoOpPropagate().inject
48
48
  TraceContextTextMapPropagator = NoOpTraceContextTextMapPropagator() # Instantiate the class
49
49
 
50
+ from .app_keys import S3_SERVICE_KEY
50
51
  from .context import ActionFactory
51
52
  from .data_types import ClientConfig, JobContext
52
53
  from .history.base import HistoryStorageBase
@@ -74,7 +75,7 @@ class JobExecutor:
74
75
  self._running = False
75
76
  self._processing_messages: set[str] = set()
76
77
 
77
- async def _process_job(self, job_id: str, message_id: str):
78
+ async def _process_job(self, job_id: str, message_id: str) -> None:
78
79
  """The core logic for processing a single job dequeued from storage."""
79
80
  if message_id in self._processing_messages:
80
81
  return
@@ -143,6 +144,11 @@ class JobExecutor:
143
144
  plan=client_config_dict.get("plan", "unknown"),
144
145
  params=client_config_dict.get("params", {}),
145
146
  )
147
+
148
+ # Get TaskFiles if S3 service is available
149
+ s3_service = self.engine.app.get(S3_SERVICE_KEY)
150
+ task_files = s3_service.get_task_files(job_id) if s3_service else None
151
+
146
152
  context = JobContext(
147
153
  job_id=job_id,
148
154
  current_state=job_state["current_state"],
@@ -153,6 +159,7 @@ class JobExecutor:
153
159
  data_stores=SimpleNamespace(**blueprint.data_stores),
154
160
  tracing_context=tracing_context,
155
161
  aggregation_results=job_state.get("aggregation_results"),
162
+ task_files=task_files,
156
163
  )
157
164
 
158
165
  try:
@@ -173,12 +180,17 @@ class JobExecutor:
173
180
  params_to_inject["context"] = context
174
181
  if "actions" in param_names:
175
182
  params_to_inject["actions"] = action_factory
183
+ if "task_files" in param_names:
184
+ params_to_inject["task_files"] = task_files
176
185
  else:
177
186
  # New injection logic with prioritized lookup.
178
187
  context_as_dict = context._asdict()
179
188
  for param_name in param_names:
189
+ # Direct injection of task_files
190
+ if param_name == "task_files":
191
+ params_to_inject[param_name] = task_files
180
192
  # Look in JobContext fields first.
181
- if param_name in context_as_dict:
193
+ elif param_name in context_as_dict:
182
194
  params_to_inject[param_name] = context_as_dict[param_name]
183
195
  # Then look in state_history (data from previous steps/workers).
184
196
  elif param_name in context.state_history:
@@ -258,6 +270,15 @@ class JobExecutor:
258
270
  await self.storage.enqueue_job(job_id)
259
271
  else:
260
272
  logger.info(f"Job {job_id} reached terminal state {next_state}")
273
+
274
+ # Clean up S3 files if service is available
275
+ s3_service = self.engine.app.get(S3_SERVICE_KEY)
276
+ if s3_service:
277
+ task_files = s3_service.get_task_files(job_id)
278
+ if task_files:
279
+ # Run cleanup in background to not block response
280
+ create_task(task_files.cleanup())
281
+
261
282
  await self._check_and_resume_parent(job_state)
262
283
  # Send webhook for finished/failed jobs
263
284
  event_type = "job_finished" if next_state == "finished" else "job_failed"
@@ -522,7 +543,10 @@ class JobExecutor:
522
543
  # Wait for an available slot before fetching a new job
523
544
  await semaphore.acquire()
524
545
 
525
- result = await self.storage.dequeue_job()
546
+ # Block for a configured time waiting for a job
547
+ block_time = self.engine.config.REDIS_STREAM_BLOCK_MS
548
+ result = await self.storage.dequeue_job(block=block_time if block_time > 0 else None)
549
+
526
550
  if result:
527
551
  job_id, message_id = result
528
552
  task = create_task(self._process_job(job_id, message_id))
@@ -530,14 +554,18 @@ class JobExecutor:
530
554
  # Release the semaphore slot when the task is done
531
555
  task.add_done_callback(lambda _: semaphore.release())
532
556
  else:
533
- # No job found, release the slot and wait a bit
557
+ # Timeout reached, release slot and loop again
534
558
  semaphore.release()
535
- # Prevent busy loop if storage returns None immediately
536
- await sleep(0.1)
559
+ # Prevent busy loop if blocking is disabled (e.g. in tests) or failed
560
+ if block_time <= 0:
561
+ await sleep(0.1)
562
+
537
563
  except CancelledError:
538
564
  break
539
565
  except Exception:
540
566
  logger.exception("Error in JobExecutor main loop.")
567
+ # If an error occurred (e.g. Redis connection lost), sleep briefly to avoid log spam
568
+ semaphore.release()
541
569
  await sleep(1)
542
570
  logger.info("JobExecutor stopped.")
543
571
 
@@ -20,19 +20,37 @@ logger = getLogger(__name__)
20
20
 
21
21
 
22
22
  class HealthChecker:
23
- def __init__(self, engine: "OrchestratorEngine"):
23
+ def __init__(self, engine: "OrchestratorEngine", interval_seconds: int = 600):
24
+ self.engine = engine
25
+ self.storage = engine.storage
26
+ self.interval_seconds = interval_seconds
24
27
  self._running = False
28
+ from uuid import uuid4
29
+
30
+ self._instance_id = str(uuid4())
25
31
 
26
32
  async def run(self):
27
- logger.info("HealthChecker is now passive and will not perform active checks.")
33
+ logger.info(f"HealthChecker started (Active Index Cleanup, Instance ID: {self._instance_id}).")
28
34
  self._running = True
29
35
  while self._running:
30
36
  try:
31
- # Sleep for a long time, as this checker is passive.
32
- # The loop exists to allow for a clean shutdown.
33
- await sleep(3600)
37
+ # Use distributed lock to ensure only one instance cleans up
38
+ if await self.storage.acquire_lock(
39
+ "global_health_check_lock", self._instance_id, self.interval_seconds - 5
40
+ ):
41
+ try:
42
+ await self.storage.cleanup_expired_workers()
43
+ finally:
44
+ # We don't release the lock immediately to prevent other instances from
45
+ # running the same task if the interval is small.
46
+ pass
47
+
48
+ await sleep(self.interval_seconds)
34
49
  except CancelledError:
35
50
  break
51
+ except Exception:
52
+ logger.exception("Error in HealthChecker main loop.")
53
+ await sleep(60)
36
54
  logger.info("HealthChecker stopped.")
37
55
 
38
56
  def stop(self):
@@ -1,25 +1,79 @@
1
+ import asyncio
2
+ import contextlib
1
3
  from abc import ABC, abstractmethod
4
+ from logging import getLogger
2
5
  from typing import Any
3
6
 
7
+ logger = getLogger(__name__)
8
+
4
9
 
5
10
  class HistoryStorageBase(ABC):
6
11
  """Abstract base class for a history store.
7
- Defines the interface for logging job and worker events.
12
+ Implements buffered asynchronous logging to avoid blocking the main loop.
8
13
  """
9
14
 
15
+ def __init__(self):
16
+ self._queue: asyncio.Queue[tuple[str, dict[str, Any]]] = asyncio.Queue(maxsize=5000)
17
+ self._worker_task: asyncio.Task | None = None
18
+
19
+ async def start(self) -> None:
20
+ """Starts the background worker for writing logs."""
21
+ if not self._worker_task:
22
+ self._worker_task = asyncio.create_task(self._worker())
23
+ logger.info("HistoryStorage background worker started.")
24
+
25
+ async def close(self) -> None:
26
+ """Stops the background worker and closes resources."""
27
+ if self._worker_task:
28
+ self._worker_task.cancel()
29
+ with contextlib.suppress(asyncio.CancelledError):
30
+ await self._worker_task
31
+ self._worker_task = None
32
+ logger.info("HistoryStorage background worker stopped.")
33
+
10
34
  @abstractmethod
11
- async def initialize(self):
35
+ async def initialize(self) -> None:
12
36
  """Performs initialization, e.g., creating tables in the DB."""
13
37
  raise NotImplementedError
14
38
 
39
+ async def log_job_event(self, event_data: dict[str, Any]) -> None:
40
+ """Queues a job event for logging."""
41
+ try:
42
+ self._queue.put_nowait(("job", event_data))
43
+ except asyncio.QueueFull:
44
+ logger.warning("History queue full! Dropping job event.")
45
+
46
+ async def log_worker_event(self, event_data: dict[str, Any]) -> None:
47
+ """Queues a worker event for logging."""
48
+ try:
49
+ self._queue.put_nowait(("worker", event_data))
50
+ except asyncio.QueueFull:
51
+ logger.warning("History queue full! Dropping worker event.")
52
+
53
+ async def _worker(self) -> None:
54
+ while True:
55
+ try:
56
+ kind, data = await self._queue.get()
57
+ try:
58
+ if kind == "job":
59
+ await self._persist_job_event(data)
60
+ elif kind == "worker":
61
+ await self._persist_worker_event(data)
62
+ except Exception as e:
63
+ logger.error(f"Error persisting history event: {e}")
64
+ finally:
65
+ self._queue.task_done()
66
+ except asyncio.CancelledError:
67
+ break
68
+
15
69
  @abstractmethod
16
- async def log_job_event(self, event_data: dict[str, Any]):
17
- """Logs an event related to the job lifecycle."""
70
+ async def _persist_job_event(self, event_data: dict[str, Any]) -> None:
71
+ """Actual implementation of writing a job event to storage."""
18
72
  raise NotImplementedError
19
73
 
20
74
  @abstractmethod
21
- async def log_worker_event(self, event_data: dict[str, Any]):
22
- """Logs an event related to the worker lifecycle."""
75
+ async def _persist_worker_event(self, event_data: dict[str, Any]) -> None:
76
+ """Actual implementation of writing a worker event to storage."""
23
77
  raise NotImplementedError
24
78
 
25
79
  @abstractmethod
@@ -8,20 +8,31 @@ class NoOpHistoryStorage(HistoryStorageBase):
8
8
  Used when history storage is not configured.
9
9
  """
10
10
 
11
- async def initialize(self):
12
- # Do nothing
11
+ def __init__(self):
12
+ super().__init__()
13
+
14
+ async def start(self) -> None:
15
+ pass
16
+
17
+ async def close(self) -> None:
18
+ pass
19
+
20
+ async def initialize(self) -> None:
21
+ pass
22
+
23
+ async def log_job_event(self, event_data: dict[str, Any]) -> None:
24
+ pass
25
+
26
+ async def log_worker_event(self, event_data: dict[str, Any]) -> None:
13
27
  pass
14
28
 
15
- async def log_job_event(self, event_data: dict[str, Any]):
16
- # Do nothing
29
+ async def _persist_job_event(self, event_data: dict[str, Any]) -> None:
17
30
  pass
18
31
 
19
- async def log_worker_event(self, event_data: dict[str, Any]):
20
- # Do nothing
32
+ async def _persist_worker_event(self, event_data: dict[str, Any]) -> None:
21
33
  pass
22
34
 
23
35
  async def get_job_history(self, job_id: str) -> list[dict[str, Any]]:
24
- # Always return an empty list
25
36
  return []
26
37
 
27
38
  async def get_jobs(self, limit: int = 100, offset: int = 0) -> list[dict[str, Any]]:
@@ -46,19 +46,20 @@ class PostgresHistoryStorage(HistoryStorageBase, ABC):
46
46
  """Implementation of the history store based on asyncpg for PostgreSQL."""
47
47
 
48
48
  def __init__(self, dsn: str, tz_name: str = "UTC"):
49
+ super().__init__()
49
50
  self._dsn = dsn
50
51
  self._pool: Pool | None = None
51
52
  self.tz_name = tz_name
52
53
  self.tz = ZoneInfo(tz_name)
53
54
 
54
- async def _setup_connection(self, conn: Connection):
55
+ async def _setup_connection(self, conn: Connection) -> None:
55
56
  """Configures the connection session with the correct timezone."""
56
57
  try:
57
58
  await conn.execute(f"SET TIME ZONE '{self.tz_name}'")
58
59
  except PostgresError as e:
59
60
  logger.error(f"Failed to set timezone '{self.tz_name}' for PG connection: {e}")
60
61
 
61
- async def initialize(self):
62
+ async def initialize(self) -> None:
62
63
  """Initializes the connection pool to PostgreSQL and creates tables."""
63
64
  try:
64
65
  # We use init parameter to configure each new connection in the pool
@@ -75,13 +76,14 @@ class PostgresHistoryStorage(HistoryStorageBase, ABC):
75
76
  logger.error(f"Failed to initialize PostgreSQL history storage: {e}")
76
77
  raise
77
78
 
78
- async def close(self):
79
- """Closes the connection pool."""
79
+ async def close(self) -> None:
80
+ """Closes the connection pool and background worker."""
81
+ await super().close()
80
82
  if self._pool:
81
83
  await self._pool.close()
82
84
  logger.info("PostgreSQL history storage connection pool closed.")
83
85
 
84
- async def log_job_event(self, event_data: dict[str, Any]):
86
+ async def _persist_job_event(self, event_data: dict[str, Any]) -> None:
85
87
  """Logs a job lifecycle event to PostgreSQL."""
86
88
  if not self._pool:
87
89
  raise RuntimeError("History storage is not initialized.")
@@ -117,7 +119,7 @@ class PostgresHistoryStorage(HistoryStorageBase, ABC):
117
119
  except PostgresError as e:
118
120
  logger.error(f"Failed to log job event to PostgreSQL: {e}")
119
121
 
120
- async def log_worker_event(self, event_data: dict[str, Any]):
122
+ async def _persist_worker_event(self, event_data: dict[str, Any]) -> None:
121
123
  """Logs a worker lifecycle event to PostgreSQL."""
122
124
  if not self._pool:
123
125
  raise RuntimeError("History storage is not initialized.")
@@ -49,11 +49,12 @@ class SQLiteHistoryStorage(HistoryStorageBase):
49
49
  """
50
50
 
51
51
  def __init__(self, db_path: str, tz_name: str = "UTC"):
52
+ super().__init__()
52
53
  self._db_path = db_path
53
54
  self._conn: Connection | None = None
54
55
  self.tz = ZoneInfo(tz_name)
55
56
 
56
- async def initialize(self):
57
+ async def initialize(self) -> None:
57
58
  """Initializes the database connection and creates tables if they don't exist."""
58
59
  try:
59
60
  self._conn = await connect(self._db_path)
@@ -68,8 +69,9 @@ class SQLiteHistoryStorage(HistoryStorageBase):
68
69
  logger.error(f"Failed to initialize SQLite history storage: {e}")
69
70
  raise
70
71
 
71
- async def close(self):
72
- """Closes the database connection."""
72
+ async def close(self) -> None:
73
+ """Closes the database connection and background worker."""
74
+ await super().close()
73
75
  if self._conn:
74
76
  await self._conn.close()
75
77
  logger.info("SQLite history storage connection closed.")
@@ -91,7 +93,7 @@ class SQLiteHistoryStorage(HistoryStorageBase):
91
93
 
92
94
  return item
93
95
 
94
- async def log_job_event(self, event_data: dict[str, Any]):
96
+ async def _persist_job_event(self, event_data: dict[str, Any]) -> None:
95
97
  """Logs a job lifecycle event to the job_history table."""
96
98
  if not self._conn:
97
99
  raise RuntimeError("History storage is not initialized.")
@@ -128,7 +130,7 @@ class SQLiteHistoryStorage(HistoryStorageBase):
128
130
  except Error as e:
129
131
  logger.error(f"Failed to log job event: {e}")
130
132
 
131
- async def log_worker_event(self, event_data: dict[str, Any]):
133
+ async def _persist_worker_event(self, event_data: dict[str, Any]) -> None:
132
134
  """Logs a worker lifecycle event to the worker_history table."""
133
135
  if not self._conn:
134
136
  raise RuntimeError("History storage is not initialized.")
avtomatika/metrics.py CHANGED
@@ -12,7 +12,7 @@ task_queue_length: Gauge
12
12
  active_workers: Gauge
13
13
 
14
14
 
15
- def init_metrics():
15
+ def init_metrics() -> None:
16
16
  """
17
17
  Initializes Prometheus metrics.
18
18
  Uses a registry check for idempotency, which is important for tests.
avtomatika/reputation.py CHANGED
@@ -52,48 +52,54 @@ class ReputationCalculator:
52
52
  async def calculate_all_reputations(self):
53
53
  """Calculates and updates the reputation for all active workers."""
54
54
  logger.info("Starting reputation calculation for all workers...")
55
- workers = await self.storage.get_available_workers()
56
- if not workers:
55
+
56
+ # Get only IDs of active workers to avoid O(N) scan of all data
57
+ worker_ids = await self.storage.get_active_worker_ids()
58
+
59
+ if not worker_ids:
57
60
  logger.info("No active workers found for reputation calculation.")
58
61
  return
59
62
 
60
- for worker in workers:
61
- worker_id = worker.get("worker_id")
62
- if not worker_id:
63
- continue
64
-
65
- history = await self.history_storage.get_worker_history(
66
- worker_id,
67
- since_days=REPUTATION_HISTORY_DAYS,
68
- )
69
-
70
- # Count only task completion events
71
- task_finished_events = [event for event in history if event.get("event_type") == "task_finished"]
72
-
73
- if not task_finished_events:
74
- # If there is no history, the reputation does not change (remains 1.0 by default)
75
- continue
76
-
77
- successful_tasks = 0
78
- for event in task_finished_events:
79
- # Extract the result from the snapshot
80
- snapshot = event.get("context_snapshot", {})
81
- result = snapshot.get("result", {})
82
- if result.get("status") == "success":
83
- successful_tasks += 1
84
-
85
- total_tasks = len(task_finished_events)
86
- new_reputation = successful_tasks / total_tasks if total_tasks > 0 else 1.0
87
-
88
- # Round for cleanliness
89
- new_reputation = round(new_reputation, 4)
90
-
91
- logger.info(
92
- f"Updating reputation for worker {worker_id}: {worker.get('reputation')} -> {new_reputation}",
93
- )
94
- await self.storage.update_worker_data(
95
- worker_id,
96
- {"reputation": new_reputation},
97
- )
63
+ logger.info(f"Recalculating reputation for {len(worker_ids)} workers.")
64
+
65
+ for worker_id in worker_ids:
66
+ if not self._running:
67
+ break
68
+
69
+ try:
70
+ history = await self.history_storage.get_worker_history(
71
+ worker_id,
72
+ since_days=REPUTATION_HISTORY_DAYS,
73
+ )
74
+
75
+ # Count only task completion events
76
+ task_finished_events = [event for event in history if event.get("event_type") == "task_finished"]
77
+
78
+ if not task_finished_events:
79
+ # If there is no history, skip to next worker
80
+ continue
81
+
82
+ successful_tasks = 0
83
+ for event in task_finished_events:
84
+ # Extract the result from the snapshot
85
+ snapshot = event.get("context_snapshot", {})
86
+ result = snapshot.get("result", {})
87
+ if result.get("status") == "success":
88
+ successful_tasks += 1
89
+
90
+ total_tasks = len(task_finished_events)
91
+ new_reputation = successful_tasks / total_tasks if total_tasks > 0 else 1.0
92
+ new_reputation = round(new_reputation, 4)
93
+
94
+ await self.storage.update_worker_data(
95
+ worker_id,
96
+ {"reputation": new_reputation},
97
+ )
98
+
99
+ # Throttling: Small sleep to prevent DB spikes
100
+ await sleep(0.1)
101
+
102
+ except Exception as e:
103
+ logger.error(f"Failed to calculate reputation for worker {worker_id}: {e}")
98
104
 
99
105
  logger.info("Reputation calculation finished.")