avtomatika 1.0b8__py3-none-any.whl → 1.0b9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
avtomatika/security.py CHANGED
@@ -10,6 +10,62 @@ from .storage.base import StorageBackend
10
10
  Handler = Callable[[web.Request], Awaitable[web.Response]]
11
11
 
12
12
 
13
+ async def verify_worker_auth(
14
+ storage: StorageBackend,
15
+ config: Config,
16
+ token: str | None,
17
+ cert_identity: str | None,
18
+ worker_id_hint: str | None,
19
+ ) -> str:
20
+ """
21
+ Verifies worker authentication using token or mTLS.
22
+ Returns authenticated worker_id.
23
+ Raises ValueError (400), PermissionError (401/403) on failure.
24
+ """
25
+ # mTLS Check
26
+ if cert_identity:
27
+ if worker_id_hint and cert_identity != worker_id_hint:
28
+ raise PermissionError(
29
+ f"Unauthorized: Certificate CN '{cert_identity}' does not match worker_id '{worker_id_hint}'"
30
+ )
31
+ return cert_identity
32
+
33
+ # Token Check
34
+ if not token:
35
+ raise PermissionError(f"Missing {AUTH_HEADER_WORKER} header or client certificate")
36
+
37
+ hashed_provided_token = sha256(token.encode()).hexdigest()
38
+
39
+ # STS Access Token
40
+ token_worker_id = await storage.verify_worker_access_token(hashed_provided_token)
41
+ if token_worker_id:
42
+ if worker_id_hint and token_worker_id != worker_id_hint:
43
+ raise PermissionError(
44
+ f"Unauthorized: Access Token belongs to '{token_worker_id}', but request is for '{worker_id_hint}'"
45
+ )
46
+ return token_worker_id
47
+
48
+ # Individual/Global Token
49
+ if not worker_id_hint:
50
+ if config.GLOBAL_WORKER_TOKEN and token == config.GLOBAL_WORKER_TOKEN:
51
+ return "unknown_authenticated_by_global_token"
52
+
53
+ raise PermissionError("Unauthorized: Invalid token or missing worker_id hint")
54
+
55
+ # Individual Token for specific worker
56
+ expected_token_hash = await storage.get_worker_token(worker_id_hint)
57
+ if expected_token_hash:
58
+ if hashed_provided_token == expected_token_hash:
59
+ return worker_id_hint
60
+ raise PermissionError("Unauthorized: Invalid individual worker token")
61
+
62
+ # Global Token Fallback
63
+ if config.GLOBAL_WORKER_TOKEN and token == config.GLOBAL_WORKER_TOKEN:
64
+ return worker_id_hint
65
+
66
+ raise PermissionError("Unauthorized: No valid token found")
67
+
68
+
13
69
  def client_auth_middleware_factory(
14
70
  storage: StorageBackend,
15
71
  ) -> Any:
@@ -38,77 +94,3 @@ def client_auth_middleware_factory(
38
94
  return await handler(request)
39
95
 
40
96
  return middleware
41
-
42
-
43
- def worker_auth_middleware_factory(
44
- storage: StorageBackend,
45
- config: Config,
46
- ) -> Any:
47
- """
48
- Middleware factory for worker authentication.
49
- It supports both individual tokens and a global fallback token for backward compatibility.
50
- It also attaches the authenticated worker_id to the request.
51
- """
52
-
53
- @web.middleware
54
- async def middleware(request: web.Request, handler: Handler) -> web.Response:
55
- provided_token = request.headers.get(AUTH_HEADER_WORKER)
56
- if not provided_token:
57
- return web.json_response(
58
- {"error": f"Missing {AUTH_HEADER_WORKER} header"},
59
- status=401,
60
- )
61
-
62
- worker_id = request.match_info.get("worker_id")
63
- data = None
64
-
65
- # For specific endpoints, worker_id is in the body.
66
- # We need to read the body here, which can be tricky as it's a stream.
67
- # We clone the request to allow the handler to read the body again.
68
- if not worker_id and (request.path.endswith("/register") or request.path.endswith("/tasks/result")):
69
- try:
70
- cloned_request = request.clone()
71
- data = await cloned_request.json()
72
- worker_id = data.get("worker_id")
73
- # Attach the parsed data to the request so the handler doesn't need to re-parse
74
- if request.path.endswith("/register"):
75
- request["worker_registration_data"] = data
76
- except Exception:
77
- return web.json_response({"error": "Invalid JSON body"}, status=400)
78
-
79
- # If no worker_id could be determined from path or body, we can only validate against the global token.
80
- if not worker_id:
81
- if provided_token == config.GLOBAL_WORKER_TOKEN:
82
- # We don't know the worker_id, so we can't attach it.
83
- return await handler(request)
84
- else:
85
- return web.json_response(
86
- {"error": "Unauthorized: Invalid token or missing worker_id"},
87
- status=401,
88
- )
89
-
90
- # --- Individual Token Check ---
91
- expected_token_hash = await storage.get_worker_token(worker_id)
92
- if expected_token_hash:
93
- hashed_provided_token = sha256(provided_token.encode()).hexdigest()
94
- if hashed_provided_token == expected_token_hash:
95
- request["worker_id"] = worker_id # Attach authenticated worker_id
96
- return await handler(request)
97
- else:
98
- # If an individual token exists, we do not fall back to the global token.
99
- return web.json_response(
100
- {"error": "Unauthorized: Invalid individual worker token"},
101
- status=401,
102
- )
103
-
104
- # --- Global Token Fallback ---
105
- if config.GLOBAL_WORKER_TOKEN and provided_token == config.GLOBAL_WORKER_TOKEN:
106
- request["worker_id"] = worker_id # Attach authenticated worker_id
107
- return await handler(request)
108
-
109
- return web.json_response(
110
- {"error": "Unauthorized: No valid token found"},
111
- status=401,
112
- )
113
-
114
- return middleware
File without changes
@@ -0,0 +1,266 @@
1
+ from hashlib import sha256
2
+ from logging import getLogger
3
+ from secrets import token_urlsafe
4
+ from time import monotonic
5
+ from typing import Any, Optional
6
+
7
+ from rxon.models import TokenResponse
8
+ from rxon.validators import validate_identifier
9
+
10
+ from ..app_keys import S3_SERVICE_KEY
11
+ from ..config import Config
12
+ from ..constants import (
13
+ ERROR_CODE_INTEGRITY_MISMATCH,
14
+ ERROR_CODE_INVALID_INPUT,
15
+ ERROR_CODE_PERMANENT,
16
+ ERROR_CODE_TRANSIENT,
17
+ JOB_STATUS_CANCELLED,
18
+ JOB_STATUS_FAILED,
19
+ JOB_STATUS_QUARANTINED,
20
+ JOB_STATUS_RUNNING,
21
+ JOB_STATUS_WAITING_FOR_PARALLEL,
22
+ TASK_STATUS_CANCELLED,
23
+ TASK_STATUS_FAILURE,
24
+ TASK_STATUS_SUCCESS,
25
+ )
26
+ from ..history.base import HistoryStorageBase
27
+ from ..storage.base import StorageBackend
28
+
29
+ logger = getLogger(__name__)
30
+
31
+
32
+ class WorkerService:
33
+ def __init__(
34
+ self,
35
+ storage: StorageBackend,
36
+ history_storage: HistoryStorageBase,
37
+ config: Config,
38
+ engine: Any,
39
+ ):
40
+ self.storage = storage
41
+ self.history_storage = history_storage
42
+ self.config = config
43
+ self.engine = engine
44
+
45
+ async def register_worker(self, worker_data: dict[str, Any]) -> None:
46
+ """
47
+ Registers a new worker.
48
+ :param worker_data: Raw dictionary from request (to be validated/converted to Model later)
49
+ """
50
+ worker_id = worker_data.get("worker_id")
51
+ if not worker_id:
52
+ raise ValueError("Missing required field: worker_id")
53
+
54
+ validate_identifier(worker_id, "worker_id")
55
+
56
+ # S3 Consistency Check
57
+ s3_service = self.engine.app.get(S3_SERVICE_KEY)
58
+ if s3_service:
59
+ orchestrator_s3_hash = s3_service.get_config_hash()
60
+ worker_capabilities = worker_data.get("capabilities", {})
61
+ worker_s3_hash = worker_capabilities.get("s3_config_hash")
62
+
63
+ if orchestrator_s3_hash and worker_s3_hash and orchestrator_s3_hash != worker_s3_hash:
64
+ logger.warning(
65
+ f"Worker '{worker_id}' has a different S3 configuration hash! "
66
+ f"Orchestrator: {orchestrator_s3_hash}, Worker: {worker_s3_hash}. "
67
+ "This may lead to 'split-brain' storage issues."
68
+ )
69
+
70
+ ttl = self.config.WORKER_HEALTH_CHECK_INTERVAL_SECONDS * 2
71
+ await self.storage.register_worker(worker_id, worker_data, ttl)
72
+
73
+ logger.info(f"Worker '{worker_id}' registered with info: {worker_data}")
74
+
75
+ await self.history_storage.log_worker_event(
76
+ {
77
+ "worker_id": worker_id,
78
+ "event_type": "registered",
79
+ "worker_info_snapshot": worker_data,
80
+ }
81
+ )
82
+
83
+ async def get_next_task(self, worker_id: str) -> Optional[dict[str, Any]]:
84
+ """
85
+ Retrieves the next task for a worker using long-polling configuration.
86
+ """
87
+ logger.debug(f"Worker {worker_id} is requesting a new task.")
88
+ return await self.storage.dequeue_task_for_worker(worker_id, self.config.WORKER_POLL_TIMEOUT_SECONDS)
89
+
90
+ async def process_task_result(self, result_payload: dict[str, Any], authenticated_worker_id: str) -> str:
91
+ """
92
+ Processes a task result submitted by a worker.
93
+ Returns a status string constant.
94
+ """
95
+ payload_worker_id = result_payload.get("worker_id")
96
+
97
+ if payload_worker_id and payload_worker_id != authenticated_worker_id:
98
+ raise PermissionError(
99
+ f"Forbidden: Authenticated worker '{authenticated_worker_id}' "
100
+ f"cannot submit results for another worker '{payload_worker_id}'."
101
+ )
102
+
103
+ job_id = result_payload.get("job_id")
104
+ task_id = result_payload.get("task_id")
105
+ result_data = result_payload.get("result", {})
106
+
107
+ if not job_id or not task_id:
108
+ raise ValueError("job_id and task_id are required")
109
+
110
+ job_state = await self.storage.get_job_state(job_id)
111
+ if not job_state:
112
+ raise LookupError("Job not found")
113
+
114
+ if job_state.get("status") == JOB_STATUS_WAITING_FOR_PARALLEL:
115
+ await self.storage.remove_job_from_watch(f"{job_id}:{task_id}")
116
+ job_state.setdefault("aggregation_results", {})[task_id] = result_data
117
+
118
+ branches = job_state.setdefault("active_branches", [])
119
+ if task_id in branches:
120
+ branches.remove(task_id)
121
+
122
+ if not branches:
123
+ logger.info(f"All parallel branches for job {job_id} have completed.")
124
+ job_state["status"] = JOB_STATUS_RUNNING
125
+ job_state["current_state"] = job_state["aggregation_target"]
126
+ await self.storage.save_job_state(job_id, job_state)
127
+ await self.storage.enqueue_job(job_id)
128
+ else:
129
+ logger.info(
130
+ f"Branch {task_id} for job {job_id} completed. Waiting for {len(branches)} more.",
131
+ )
132
+ await self.storage.save_job_state(job_id, job_state)
133
+
134
+ return "parallel_branch_result_accepted"
135
+
136
+ await self.storage.remove_job_from_watch(job_id)
137
+
138
+ now = monotonic()
139
+ dispatched_at = job_state.get("task_dispatched_at", now)
140
+ duration_ms = int((now - dispatched_at) * 1000)
141
+
142
+ await self.history_storage.log_job_event(
143
+ {
144
+ "job_id": job_id,
145
+ "state": job_state.get("current_state"),
146
+ "event_type": "task_finished",
147
+ "duration_ms": duration_ms,
148
+ "worker_id": authenticated_worker_id,
149
+ "context_snapshot": {**job_state, "result": result_data},
150
+ },
151
+ )
152
+
153
+ result_status = result_data.get("status", TASK_STATUS_SUCCESS) # Default to success? Constant?
154
+
155
+ if result_status == TASK_STATUS_FAILURE:
156
+ return await self._handle_task_failure(job_state, task_id, result_data)
157
+
158
+ if result_status == TASK_STATUS_CANCELLED:
159
+ logger.info(f"Task {task_id} for job {job_id} was cancelled by worker.")
160
+ job_state["status"] = JOB_STATUS_CANCELLED
161
+ await self.storage.save_job_state(job_id, job_state)
162
+
163
+ transitions = job_state.get("current_task_transitions", {})
164
+ if next_state := transitions.get("cancelled"):
165
+ job_state["current_state"] = next_state
166
+ job_state["status"] = JOB_STATUS_RUNNING
167
+ await self.storage.save_job_state(job_id, job_state)
168
+ await self.storage.enqueue_job(job_id)
169
+ return "result_accepted_cancelled"
170
+
171
+ transitions = job_state.get("current_task_transitions", {})
172
+ result_status = result_data.get("status", TASK_STATUS_SUCCESS)
173
+ next_state = transitions.get(result_status)
174
+
175
+ if next_state:
176
+ logger.info(f"Job {job_id} transitioning based on worker status '{result_status}' to state '{next_state}'")
177
+
178
+ worker_data_content = result_data.get("data")
179
+ if worker_data_content and isinstance(worker_data_content, dict):
180
+ if "state_history" not in job_state:
181
+ job_state["state_history"] = {}
182
+ job_state["state_history"].update(worker_data_content)
183
+
184
+ data_metadata = result_payload.get("data_metadata")
185
+ if data_metadata:
186
+ if "data_metadata" not in job_state:
187
+ job_state["data_metadata"] = {}
188
+ job_state["data_metadata"].update(data_metadata)
189
+ logger.debug(f"Stored data metadata for job {job_id}: {list(data_metadata.keys())}")
190
+
191
+ job_state["current_state"] = next_state
192
+ job_state["status"] = JOB_STATUS_RUNNING
193
+ await self.storage.save_job_state(job_id, job_state)
194
+ await self.storage.enqueue_job(job_id)
195
+ return "result_accepted_success"
196
+ else:
197
+ logger.error(f"Job {job_id} failed. Worker returned unhandled status '{result_status}'.")
198
+ job_state["status"] = JOB_STATUS_FAILED
199
+ job_state["error_message"] = f"Worker returned unhandled status: {result_status}"
200
+ await self.storage.save_job_state(job_id, job_state)
201
+ return "result_accepted_failure"
202
+
203
+ async def _handle_task_failure(self, job_state: dict, task_id: str, result_data: dict) -> str:
204
+ error_details = result_data.get("error", {})
205
+ error_type = ERROR_CODE_TRANSIENT
206
+ error_message = "No error details provided."
207
+
208
+ if isinstance(error_details, dict):
209
+ error_type = error_details.get("code", ERROR_CODE_TRANSIENT)
210
+ error_message = error_details.get("message", "No error message provided.")
211
+ elif isinstance(error_details, str):
212
+ error_message = error_details
213
+
214
+ job_id = job_state["id"]
215
+ logger.warning(f"Task {task_id} for job {job_id} failed with error type '{error_type}'.")
216
+
217
+ if error_type == ERROR_CODE_PERMANENT:
218
+ job_state["status"] = JOB_STATUS_QUARANTINED
219
+ job_state["error_message"] = f"Task failed with permanent error: {error_message}"
220
+ await self.storage.save_job_state(job_id, job_state)
221
+ await self.storage.quarantine_job(job_id)
222
+ elif error_type == ERROR_CODE_INVALID_INPUT:
223
+ job_state["status"] = JOB_STATUS_FAILED
224
+ job_state["error_message"] = f"Task failed due to invalid input: {error_message}"
225
+ await self.storage.save_job_state(job_id, job_state)
226
+ elif error_type == ERROR_CODE_INTEGRITY_MISMATCH:
227
+ job_state["status"] = JOB_STATUS_FAILED
228
+ job_state["error_message"] = f"Task failed due to data integrity mismatch: {error_message}"
229
+ await self.storage.save_job_state(job_id, job_state)
230
+ logger.critical(f"Data integrity mismatch detected for job {job_id}: {error_message}")
231
+ else:
232
+ await self.engine.handle_task_failure(job_state, task_id, error_message)
233
+
234
+ return "result_accepted_failure"
235
+
236
+ async def issue_access_token(self, worker_id: str) -> TokenResponse:
237
+ """Generates and stores a temporary access token."""
238
+ raw_token = token_urlsafe(32)
239
+ token_hash = sha256(raw_token.encode()).hexdigest()
240
+ ttl = 3600
241
+
242
+ await self.storage.save_worker_access_token(worker_id, token_hash, ttl)
243
+ logger.info(f"Issued temporary access token for worker {worker_id}")
244
+
245
+ return TokenResponse(access_token=raw_token, expires_in=ttl, worker_id=worker_id)
246
+
247
+ async def update_worker_heartbeat(
248
+ self, worker_id: str, update_data: Optional[dict[str, Any]]
249
+ ) -> Optional[dict[str, Any]]:
250
+ """Updates worker TTL and status."""
251
+ ttl = self.config.WORKER_HEALTH_CHECK_INTERVAL_SECONDS * 2
252
+
253
+ if update_data:
254
+ updated_worker = await self.storage.update_worker_status(worker_id, update_data, ttl)
255
+ if updated_worker:
256
+ await self.history_storage.log_worker_event(
257
+ {
258
+ "worker_id": worker_id,
259
+ "event_type": "status_update",
260
+ "worker_info_snapshot": updated_worker,
261
+ },
262
+ )
263
+ return updated_worker
264
+ else:
265
+ refreshed = await self.storage.refresh_worker_ttl(worker_id, ttl)
266
+ return {"status": "ttl_refreshed"} if refreshed else None
@@ -292,6 +292,16 @@ class StorageBackend(ABC):
292
292
  """Retrieves an individual token for a specific worker."""
293
293
  raise NotImplementedError
294
294
 
295
+ @abstractmethod
296
+ async def save_worker_access_token(self, worker_id: str, token: str, ttl: int) -> None:
297
+ """Saves a temporary access token for a worker (STS)."""
298
+ raise NotImplementedError
299
+
300
+ @abstractmethod
301
+ async def verify_worker_access_token(self, token: str) -> str | None:
302
+ """Verifies a temporary access token and returns the associated worker_id if valid."""
303
+ raise NotImplementedError
304
+
295
305
  @abstractmethod
296
306
  async def get_worker_info(self, worker_id: str) -> dict[str, Any] | None:
297
307
  """Get complete information about a worker by its ID."""
@@ -189,10 +189,11 @@ class MemoryStorage(StorageBackend):
189
189
  async with self._lock:
190
190
  self._watched_jobs.pop(job_id, None)
191
191
 
192
- async def get_timed_out_jobs(self) -> list[str]:
192
+ async def get_timed_out_jobs(self, limit: int = 100) -> list[str]:
193
193
  async with self._lock:
194
194
  now = monotonic()
195
195
  timed_out_ids = [job_id for job_id, timeout_at in self._watched_jobs.items() if timeout_at <= now]
196
+ timed_out_ids = timed_out_ids[:limit]
196
197
  for job_id in timed_out_ids:
197
198
  self._watched_jobs.pop(job_id, None)
198
199
  return timed_out_ids
@@ -331,6 +332,16 @@ class MemoryStorage(StorageBackend):
331
332
  async with self._lock:
332
333
  return self._worker_tokens.get(worker_id)
333
334
 
335
+ async def save_worker_access_token(self, worker_id: str, token: str, ttl: int) -> None:
336
+ async with self._lock:
337
+ self._generic_keys[f"sts:{token}"] = worker_id
338
+ self._generic_key_ttls[f"sts:{token}"] = monotonic() + ttl
339
+
340
+ async def verify_worker_access_token(self, token: str) -> str | None:
341
+ async with self._lock:
342
+ await self._clean_expired()
343
+ return self._generic_keys.get(f"sts:{token}")
344
+
334
345
  async def set_task_cancellation_flag(self, task_id: str) -> None:
335
346
  key = f"task_cancel:{task_id}"
336
347
  await self.increment_key_with_ttl(key, 3600)
@@ -291,10 +291,33 @@ class RedisStorage(StorageBackend):
291
291
 
292
292
  async def get_timed_out_jobs(self, limit: int = 100) -> list[str]:
293
293
  now = get_running_loop().time()
294
- ids = await self._redis.zrangebyscore("orchestrator:watched_jobs", 0, now, start=0, num=limit)
294
+ # Lua script to atomically fetch and remove timed out jobs
295
+ LUA_POP_TIMEOUTS = """
296
+ local now = ARGV[1]
297
+ local limit = ARGV[2]
298
+ local ids = redis.call('ZRANGEBYSCORE', KEYS[1], 0, now, 'LIMIT', 0, limit)
299
+ if #ids > 0 then
300
+ redis.call('ZREM', KEYS[1], unpack(ids))
301
+ end
302
+ return ids
303
+ """
304
+ try:
305
+ sha = await self._redis.script_load(LUA_POP_TIMEOUTS)
306
+ ids = await self._redis.evalsha(sha, 1, "orchestrator:watched_jobs", now, limit)
307
+ except NoScriptError:
308
+ ids = await self._redis.eval(LUA_POP_TIMEOUTS, 1, "orchestrator:watched_jobs", now, limit)
309
+ except ResponseError as e:
310
+ # Fallback for Redis versions that don't support script_load/evalsha or other errors
311
+ if "unknown command" in str(e).lower():
312
+ logger.warning("Redis does not support LUA scripts. Falling back to non-atomic get_timed_out_jobs.")
313
+ ids = await self._redis.zrangebyscore("orchestrator:watched_jobs", 0, now, start=0, num=limit)
314
+ if ids:
315
+ await self._redis.zrem("orchestrator:watched_jobs", *ids) # type: ignore
316
+ else:
317
+ raise e
318
+
295
319
  if ids:
296
- await self._redis.zrem("orchestrator:watched_jobs", *ids) # type: ignore
297
- return [i.decode("utf-8") for i in ids]
320
+ return [i.decode("utf-8") if isinstance(i, bytes) else i for i in ids]
298
321
  return []
299
322
 
300
323
  async def enqueue_job(self, job_id: str) -> None:
@@ -411,6 +434,13 @@ class RedisStorage(StorageBackend):
411
434
  token = await self._redis.get(f"orchestrator:worker:token:{worker_id}")
412
435
  return token.decode("utf-8") if token else None
413
436
 
437
+ async def save_worker_access_token(self, worker_id: str, token: str, ttl: int) -> None:
438
+ await self._redis.set(f"orchestrator:sts:token:{token}", worker_id, ex=ttl)
439
+
440
+ async def verify_worker_access_token(self, token: str) -> str | None:
441
+ worker_id = await self._redis.get(f"orchestrator:sts:token:{token}")
442
+ return worker_id.decode("utf-8") if worker_id else None
443
+
414
444
  async def acquire_lock(self, key: str, holder_id: str, ttl: int) -> bool:
415
445
  return bool(await self._redis.set(f"orchestrator:lock:{key}", holder_id, nx=True, ex=ttl))
416
446
 
avtomatika/watcher.py CHANGED
@@ -3,6 +3,8 @@ from logging import getLogger
3
3
  from typing import TYPE_CHECKING
4
4
  from uuid import uuid4
5
5
 
6
+ from .constants import JOB_STATUS_FAILED, JOB_STATUS_WAITING_FOR_WORKER
7
+
6
8
  if TYPE_CHECKING:
7
9
  from .engine import OrchestratorEngine
8
10
 
@@ -38,8 +40,8 @@ class Watcher:
38
40
  try:
39
41
  # Get the latest version to avoid overwriting
40
42
  job_state = await self.storage.get_job_state(job_id)
41
- if job_state and job_state["status"] == "waiting_for_worker":
42
- job_state["status"] = "failed"
43
+ if job_state and job_state["status"] == JOB_STATUS_WAITING_FOR_WORKER:
44
+ job_state["status"] = JOB_STATUS_FAILED
43
45
  job_state["error_message"] = "Worker task timed out."
44
46
  await self.storage.save_job_state(job_id, job_state)
45
47
 
avtomatika/ws_manager.py CHANGED
@@ -4,6 +4,8 @@ from typing import Any
4
4
 
5
5
  from aiohttp import web
6
6
 
7
+ from .constants import MSG_TYPE_PROGRESS
8
+
7
9
  logger = getLogger(__name__)
8
10
 
9
11
 
@@ -50,9 +52,7 @@ class WebSocketManager:
50
52
  async def handle_message(worker_id: str, message: dict[str, Any]) -> None:
51
53
  """Handles an incoming message from a worker."""
52
54
  event_type = message.get("event")
53
- if event_type == "progress_update":
54
- # In a real application, you'd likely forward this to a history store
55
- # or a pub/sub system for real-time UI updates.
55
+ if event_type == MSG_TYPE_PROGRESS:
56
56
  logger.info(
57
57
  f"Received progress update from worker {worker_id} for job {message.get('job_id')}: "
58
58
  f"{message.get('progress', 0) * 100:.0f}% - {message.get('message', '')}"