krons 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. kronos/__init__.py +0 -0
  2. kronos/core/__init__.py +145 -0
  3. kronos/core/broadcaster.py +116 -0
  4. kronos/core/element.py +225 -0
  5. kronos/core/event.py +316 -0
  6. kronos/core/eventbus.py +116 -0
  7. kronos/core/flow.py +356 -0
  8. kronos/core/graph.py +442 -0
  9. kronos/core/node.py +982 -0
  10. kronos/core/pile.py +575 -0
  11. kronos/core/processor.py +494 -0
  12. kronos/core/progression.py +296 -0
  13. kronos/enforcement/__init__.py +57 -0
  14. kronos/enforcement/common/__init__.py +34 -0
  15. kronos/enforcement/common/boolean.py +85 -0
  16. kronos/enforcement/common/choice.py +97 -0
  17. kronos/enforcement/common/mapping.py +118 -0
  18. kronos/enforcement/common/model.py +102 -0
  19. kronos/enforcement/common/number.py +98 -0
  20. kronos/enforcement/common/string.py +140 -0
  21. kronos/enforcement/context.py +129 -0
  22. kronos/enforcement/policy.py +80 -0
  23. kronos/enforcement/registry.py +153 -0
  24. kronos/enforcement/rule.py +312 -0
  25. kronos/enforcement/service.py +370 -0
  26. kronos/enforcement/validator.py +198 -0
  27. kronos/errors.py +146 -0
  28. kronos/operations/__init__.py +32 -0
  29. kronos/operations/builder.py +228 -0
  30. kronos/operations/flow.py +398 -0
  31. kronos/operations/node.py +101 -0
  32. kronos/operations/registry.py +92 -0
  33. kronos/protocols.py +414 -0
  34. kronos/py.typed +0 -0
  35. kronos/services/__init__.py +81 -0
  36. kronos/services/backend.py +286 -0
  37. kronos/services/endpoint.py +608 -0
  38. kronos/services/hook.py +471 -0
  39. kronos/services/imodel.py +465 -0
  40. kronos/services/registry.py +115 -0
  41. kronos/services/utilities/__init__.py +36 -0
  42. kronos/services/utilities/header_factory.py +87 -0
  43. kronos/services/utilities/rate_limited_executor.py +271 -0
  44. kronos/services/utilities/rate_limiter.py +180 -0
  45. kronos/services/utilities/resilience.py +414 -0
  46. kronos/session/__init__.py +41 -0
  47. kronos/session/exchange.py +258 -0
  48. kronos/session/message.py +60 -0
  49. kronos/session/session.py +411 -0
  50. kronos/specs/__init__.py +25 -0
  51. kronos/specs/adapters/__init__.py +0 -0
  52. kronos/specs/adapters/_utils.py +45 -0
  53. kronos/specs/adapters/dataclass_field.py +246 -0
  54. kronos/specs/adapters/factory.py +56 -0
  55. kronos/specs/adapters/pydantic_adapter.py +309 -0
  56. kronos/specs/adapters/sql_ddl.py +946 -0
  57. kronos/specs/catalog/__init__.py +36 -0
  58. kronos/specs/catalog/_audit.py +39 -0
  59. kronos/specs/catalog/_common.py +43 -0
  60. kronos/specs/catalog/_content.py +59 -0
  61. kronos/specs/catalog/_enforcement.py +70 -0
  62. kronos/specs/factory.py +120 -0
  63. kronos/specs/operable.py +314 -0
  64. kronos/specs/phrase.py +405 -0
  65. kronos/specs/protocol.py +140 -0
  66. kronos/specs/spec.py +506 -0
  67. kronos/types/__init__.py +60 -0
  68. kronos/types/_sentinel.py +311 -0
  69. kronos/types/base.py +369 -0
  70. kronos/types/db_types.py +260 -0
  71. kronos/types/identity.py +66 -0
  72. kronos/utils/__init__.py +40 -0
  73. kronos/utils/_hash.py +234 -0
  74. kronos/utils/_json_dump.py +392 -0
  75. kronos/utils/_lazy_init.py +63 -0
  76. kronos/utils/_to_list.py +165 -0
  77. kronos/utils/_to_num.py +85 -0
  78. kronos/utils/_utils.py +375 -0
  79. kronos/utils/concurrency/__init__.py +205 -0
  80. kronos/utils/concurrency/_async_call.py +333 -0
  81. kronos/utils/concurrency/_cancel.py +122 -0
  82. kronos/utils/concurrency/_errors.py +96 -0
  83. kronos/utils/concurrency/_patterns.py +363 -0
  84. kronos/utils/concurrency/_primitives.py +328 -0
  85. kronos/utils/concurrency/_priority_queue.py +135 -0
  86. kronos/utils/concurrency/_resource_tracker.py +110 -0
  87. kronos/utils/concurrency/_run_async.py +67 -0
  88. kronos/utils/concurrency/_task.py +95 -0
  89. kronos/utils/concurrency/_utils.py +79 -0
  90. kronos/utils/fuzzy/__init__.py +14 -0
  91. kronos/utils/fuzzy/_extract_json.py +90 -0
  92. kronos/utils/fuzzy/_fuzzy_json.py +288 -0
  93. kronos/utils/fuzzy/_fuzzy_match.py +149 -0
  94. kronos/utils/fuzzy/_string_similarity.py +187 -0
  95. kronos/utils/fuzzy/_to_dict.py +396 -0
  96. kronos/utils/sql/__init__.py +13 -0
  97. kronos/utils/sql/_sql_validation.py +142 -0
  98. krons-0.1.0.dist-info/METADATA +70 -0
  99. krons-0.1.0.dist-info/RECORD +101 -0
  100. krons-0.1.0.dist-info/WHEEL +4 -0
  101. krons-0.1.0.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,398 @@
1
+ # Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """Dependency-aware operation graph execution.
5
+
6
+ Execute DAGs of Operations with concurrency control and progress streaming.
7
+ Core functions: flow() for batch results, flow_stream() for incremental results.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import logging
13
+ from collections.abc import AsyncGenerator
14
+ from dataclasses import dataclass
15
+ from typing import TYPE_CHECKING, Any
16
+ from uuid import UUID
17
+
18
+ from kronos.core import EventStatus, Graph
19
+ from kronos.types import Undefined, UndefinedType, is_sentinel
20
+ from kronos.utils import concurrency
21
+ from kronos.utils.concurrency import CapacityLimiter, CompletionStream
22
+
23
+ from .node import Operation
24
+
25
+ if TYPE_CHECKING:
26
+ from kronos.session import Branch, Session
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ __all__ = ("DependencyAwareExecutor", "OperationResult", "flow", "flow_stream")
31
+
32
+
33
+ @dataclass
34
+ class OperationResult:
35
+ """Single operation result from streaming execution.
36
+
37
+ Attributes:
38
+ name: Operation name from metadata.
39
+ result: Return value (None if failed).
40
+ error: Exception instance if failed, else None.
41
+ completed: Count of finished operations so far.
42
+ total: Total operations in graph.
43
+ """
44
+
45
+ name: str
46
+ result: Any
47
+ error: Exception | None = None
48
+ completed: int = 0
49
+ total: int = 0
50
+
51
+ @property
52
+ def success(self) -> bool:
53
+ """True if operation completed without error."""
54
+ return self.error is None
55
+
56
+
57
+ class DependencyAwareExecutor:
58
+ """Execute operation DAGs respecting dependencies and concurrency limits.
59
+
60
+ Lifecycle:
61
+ 1. Pre-allocate branches for all operations
62
+ 2. Launch all tasks (each waits on predecessors internally)
63
+ 3. Acquire concurrency slot only when dependencies complete
64
+ 4. Execute and store result/error
65
+ 5. Signal completion to dependents
66
+
67
+ Thread Safety:
68
+ Uses asyncio Events for dependency coordination and CapacityLimiter
69
+ for concurrency control. Results/errors stored in thread-safe dicts.
70
+
71
+ Note:
72
+ Does not inject context between operations. Use metadata["branch"]
73
+ to assign specific branches, or set default_branch for all.
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ session: Session,
79
+ graph: Graph,
80
+ max_concurrent: int | UndefinedType = Undefined,
81
+ stop_on_error: bool = True,
82
+ verbose: bool = False,
83
+ default_branch: Branch | str | UndefinedType = Undefined,
84
+ ):
85
+ """Initialize executor.
86
+
87
+ Args:
88
+ session: Session with services and operation registry.
89
+ graph: DAG of Operation nodes.
90
+ max_concurrent: Max parallel operations (None=unlimited).
91
+ stop_on_error: Raise on first failure vs continue.
92
+ verbose: Log progress to debug logger.
93
+ default_branch: Fallback branch for operations without metadata["branch"].
94
+ """
95
+ self.session = session
96
+ self.graph = graph
97
+ resolved_max_concurrent = None if is_sentinel(max_concurrent) else max_concurrent
98
+ resolved_default_branch = None if is_sentinel(default_branch) else default_branch
99
+ self.max_concurrent = resolved_max_concurrent
100
+ self.stop_on_error = stop_on_error
101
+ self.verbose = verbose
102
+ self._default_branch = resolved_default_branch
103
+
104
+ self.results: dict[UUID, Any] = {}
105
+ self.errors: dict[UUID, Exception] = {}
106
+ self.completion_events: dict[UUID, concurrency.Event] = {}
107
+ self.operation_branches: dict[UUID, Branch | None] = {}
108
+
109
+ self._limiter: CapacityLimiter | None = (
110
+ CapacityLimiter(resolved_max_concurrent) if resolved_max_concurrent else None
111
+ )
112
+
113
+ for node in graph.nodes:
114
+ if isinstance(node, Operation):
115
+ self.completion_events[node.id] = concurrency.Event()
116
+
117
+ async def execute(self) -> dict[str, Any]:
118
+ """Execute graph and return results keyed by operation name.
119
+
120
+ Returns:
121
+ Dict mapping operation names to their results.
122
+
123
+ Raises:
124
+ ValueError: If graph has cycles or non-Operation nodes.
125
+ Exception: First operation error if stop_on_error=True.
126
+ """
127
+ if not self.graph.is_acyclic():
128
+ raise ValueError("Operation graph has cycles - must be a DAG")
129
+
130
+ # Validate all nodes are Operations
131
+ for node in self.graph.nodes:
132
+ if not isinstance(node, Operation):
133
+ raise ValueError(
134
+ f"Graph contains non-Operation node: {node} ({type(node).__name__})"
135
+ )
136
+
137
+ # Pre-allocate branches to avoid locking during execution
138
+ await self._preallocate_branches()
139
+
140
+ # Execute operations with dependency coordination
141
+ operations = [node for node in self.graph.nodes if isinstance(node, Operation)]
142
+
143
+ # Create operation tasks (they wait on dependencies internally)
144
+ tasks = [self._execute_operation(op) for op in operations]
145
+
146
+ # Use CompletionStream to process results as they arrive
147
+ # Concurrency is handled by self._limiter AFTER dependency resolution
148
+ # This ensures limiter slots are only held by tasks ready to execute
149
+ async with CompletionStream(tasks, limit=None) as stream:
150
+ async for idx, _ in stream:
151
+ op = operations[idx]
152
+ if self.verbose:
153
+ name = op.metadata.get("name", str(op.id)[:8])
154
+ if op.id in self.errors:
155
+ logger.debug("Operation '%s' failed", name)
156
+ elif op.id in self.results:
157
+ logger.debug("Operation '%s' completed", name)
158
+
159
+ # Compile results keyed by operation name for user-friendly access
160
+ results_by_name = {}
161
+ for node in self.graph.nodes:
162
+ if isinstance(node, Operation):
163
+ name = node.metadata.get("name", str(node.id))
164
+ if node.id in self.results:
165
+ results_by_name[name] = self.results[node.id]
166
+
167
+ return results_by_name
168
+
169
+ async def stream_execute(self) -> AsyncGenerator[OperationResult, None]:
170
+ """Execute graph, yielding OperationResult as each operation completes.
171
+
172
+ Yields:
173
+ OperationResult with name, result/error, and progress counts.
174
+
175
+ Raises:
176
+ ValueError: If graph has cycles or non-Operation nodes.
177
+ """
178
+ if not self.graph.is_acyclic():
179
+ raise ValueError("Operation graph has cycles - must be a DAG")
180
+
181
+ # Validate all nodes are Operations
182
+ for node in self.graph.nodes:
183
+ if not isinstance(node, Operation):
184
+ raise ValueError(
185
+ f"Graph contains non-Operation node: {node} ({type(node).__name__})"
186
+ )
187
+
188
+ # Pre-allocate branches
189
+ await self._preallocate_branches()
190
+
191
+ # Execute operations with dependency coordination
192
+ operations = [node for node in self.graph.nodes if isinstance(node, Operation)]
193
+ total = len(operations)
194
+
195
+ # Create operation tasks
196
+ tasks = [self._execute_operation(op) for op in operations]
197
+
198
+ # Stream results as they complete
199
+ # Concurrency is handled by self._limiter AFTER dependency resolution
200
+ completed = 0
201
+ async with CompletionStream(tasks, limit=None) as stream:
202
+ async for idx, _ in stream:
203
+ completed += 1
204
+ op = operations[idx]
205
+ name = op.metadata.get("name", str(op.id))
206
+
207
+ # Build result
208
+ if op.id in self.errors:
209
+ yield OperationResult(
210
+ name=name,
211
+ result=None,
212
+ error=self.errors[op.id],
213
+ completed=completed,
214
+ total=total,
215
+ )
216
+ else:
217
+ yield OperationResult(
218
+ name=name,
219
+ result=self.results.get(op.id),
220
+ error=None,
221
+ completed=completed,
222
+ total=total,
223
+ )
224
+
225
+ def _resolve_operation_branch(self, branch_spec: Any) -> Branch | None:
226
+ """Resolve branch spec (Branch|UUID|str|None) to Branch or None."""
227
+ if branch_spec is None:
228
+ return None
229
+
230
+ # Already a Branch
231
+ if hasattr(branch_spec, "id") and hasattr(branch_spec, "order"):
232
+ return branch_spec
233
+
234
+ if isinstance(branch_spec, (UUID, str)):
235
+ try:
236
+ return self.session.get_branch(branch_spec)
237
+ except Exception as e:
238
+ logger.debug("Branch '%s' not found, using default: %s", branch_spec, e)
239
+ return None
240
+
241
+ return None
242
+
243
+ async def _preallocate_branches(self) -> None:
244
+ """Assign branches to all operations before execution."""
245
+ default_branch = self._resolve_operation_branch(self._default_branch)
246
+ if default_branch is None:
247
+ default_branch = getattr(self.session, "default_branch", None)
248
+
249
+ for node in self.graph.nodes:
250
+ if isinstance(node, Operation):
251
+ op_branch = node.metadata.get("branch")
252
+ if op_branch is not None:
253
+ resolved = self._resolve_operation_branch(op_branch)
254
+ self.operation_branches[node.id] = (
255
+ resolved if resolved is not None else default_branch
256
+ )
257
+ else:
258
+ self.operation_branches[node.id] = default_branch
259
+
260
+ if self.verbose:
261
+ logger.debug("Pre-allocated branches for %d operations", len(self.operation_branches))
262
+
263
+ async def _execute_operation(self, operation: Operation) -> Operation:
264
+ """Execute single operation: wait deps -> acquire slot -> invoke -> signal."""
265
+ try:
266
+ # Wait for all dependencies to complete (no limiter held yet)
267
+ await self._wait_for_dependencies(operation)
268
+
269
+ # Acquire limiter slot ONLY when ready to execute
270
+ if self._limiter:
271
+ await self._limiter.acquire()
272
+
273
+ try:
274
+ # Execute the operation (no context injection - use params as-is)
275
+ await self._invoke_operation(operation)
276
+ finally:
277
+ if self._limiter:
278
+ self._limiter.release()
279
+
280
+ except Exception as e:
281
+ self.errors[operation.id] = e
282
+ if self.verbose:
283
+ logger.exception("Operation %s failed: %s", str(operation.id)[:8], e)
284
+
285
+ if self.stop_on_error:
286
+ self.completion_events[operation.id].set()
287
+ raise
288
+
289
+ finally:
290
+ self.completion_events[operation.id].set()
291
+
292
+ return operation
293
+
294
+ async def _wait_for_dependencies(self, operation: Operation) -> None:
295
+ """Block until all predecessor operations signal completion."""
296
+ predecessors = self.graph.get_predecessors(operation)
297
+
298
+ if self.verbose and predecessors:
299
+ logger.debug(
300
+ "Operation %s waiting for %d dependencies",
301
+ str(operation.id)[:8],
302
+ len(predecessors),
303
+ )
304
+
305
+ for pred in predecessors:
306
+ if pred.id in self.completion_events:
307
+ await self.completion_events[pred.id].wait()
308
+
309
+ async def _invoke_operation(self, operation: Operation) -> None:
310
+ """Bind, invoke, and store result or error."""
311
+ if self.verbose:
312
+ name = operation.metadata.get("name", str(operation.id)[:8])
313
+ logger.debug("Executing operation: %s", name)
314
+
315
+ branch = self.operation_branches.get(operation.id)
316
+ if branch is None:
317
+ raise ValueError(f"No branch allocated for operation {operation.id}")
318
+
319
+ operation.bind(self.session, branch)
320
+ await operation.invoke()
321
+
322
+ if operation.execution.status == EventStatus.COMPLETED:
323
+ self.results[operation.id] = operation.execution.response
324
+ if self.verbose:
325
+ name = operation.metadata.get("name", str(operation.id)[:8])
326
+ logger.debug("Completed operation: %s", name)
327
+ else:
328
+ error_msg = f"Execution status: {operation.execution.status}"
329
+ if hasattr(operation.execution, "error") and operation.execution.error:
330
+ error_msg += f" - {operation.execution.error}"
331
+ self.errors[operation.id] = RuntimeError(error_msg)
332
+ if self.verbose:
333
+ name = operation.metadata.get("name", str(operation.id)[:8])
334
+ logger.warning("Operation %s failed: %s", name, error_msg)
335
+
336
+
337
+ async def flow(
338
+ session: Session,
339
+ graph: Graph,
340
+ *,
341
+ branch: Branch | str | None = None,
342
+ max_concurrent: int | None = None,
343
+ stop_on_error: bool = True,
344
+ verbose: bool = False,
345
+ ) -> dict[str, Any]:
346
+ """Execute operation graph with dependency-aware scheduling.
347
+
348
+ Operations are executed with their given parameters - no context injection.
349
+ For context passing between operations, use flow_report or manage context
350
+ explicitly before adding operations to the graph.
351
+
352
+ Args:
353
+ session: Session for services and branches.
354
+ graph: Operation graph (DAG) to execute.
355
+ branch: Default branch (operations can override via metadata).
356
+ max_concurrent: Max concurrent operations (None = unlimited).
357
+ stop_on_error: Stop on first error.
358
+ verbose: Print progress.
359
+
360
+ Returns:
361
+ Dictionary mapping operation names to their results.
362
+ """
363
+ executor = DependencyAwareExecutor(
364
+ session=session,
365
+ graph=graph,
366
+ max_concurrent=max_concurrent,
367
+ stop_on_error=stop_on_error,
368
+ verbose=verbose,
369
+ default_branch=branch,
370
+ )
371
+
372
+ return await executor.execute()
373
+
374
+
375
+ async def flow_stream(
376
+ session: Session,
377
+ graph: Graph,
378
+ *,
379
+ branch: Branch | str | None = None,
380
+ max_concurrent: int | None = None,
381
+ stop_on_error: bool = True,
382
+ ) -> AsyncGenerator[OperationResult, None]:
383
+ """Execute graph with streaming results. Same args as flow().
384
+
385
+ Yields:
386
+ OperationResult with progress tracking as each operation completes.
387
+ """
388
+ executor = DependencyAwareExecutor(
389
+ session=session,
390
+ graph=graph,
391
+ max_concurrent=max_concurrent,
392
+ stop_on_error=stop_on_error,
393
+ verbose=False,
394
+ default_branch=branch,
395
+ )
396
+
397
+ async for result in executor.stream_execute():
398
+ yield result
@@ -0,0 +1,101 @@
1
+ # Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from pydantic import Field, PrivateAttr
8
+
9
+ from kronos.core import Event, Node
10
+ from kronos.types import Undefined, UndefinedType, is_sentinel
11
+
12
+ if TYPE_CHECKING:
13
+ from kronos.session import Branch, Session
14
+
15
+ __all__ = ("Operation", "create_operation")
16
+
17
+
18
+ class Operation(Node, Event):
19
+ operation_type: str
20
+ parameters: dict[str, Any] | Any = Field(
21
+ default_factory=dict,
22
+ description="Operation parameters (dict or Pydantic model)",
23
+ )
24
+
25
+ _session: Any = PrivateAttr(default=None)
26
+ _branch: Any = PrivateAttr(default=None)
27
+
28
+ def bind(self, session: Session, branch: Branch) -> Operation:
29
+ """Bind session and branch for execution.
30
+
31
+ Must be called before invoke() if not using Session.conduct().
32
+
33
+ Args:
34
+ session: Session with operations registry and services
35
+ branch: Branch for message context
36
+
37
+ Returns:
38
+ Self for chaining
39
+ """
40
+ self._session = session
41
+ self._branch = branch
42
+ return self
43
+
44
+ def _require_binding(self) -> tuple[Session, Branch]:
45
+ """Return bound (session, branch) tuple or raise RuntimeError if unbound."""
46
+ if self._session is None or self._branch is None:
47
+ raise RuntimeError(
48
+ "Operation not bound to session/branch. "
49
+ "Use operation.bind(session, branch) or session.conduct(...)"
50
+ )
51
+ return self._session, self._branch
52
+
53
+ async def _invoke(self) -> Any:
54
+ """Execute via session's operation registry. Called by Event.invoke().
55
+
56
+ Returns:
57
+ Factory result (stored in execution.response).
58
+
59
+ Raises:
60
+ RuntimeError: If not bound.
61
+ KeyError: If operation_type not registered.
62
+ """
63
+ session, branch = self._require_binding()
64
+ factory = session.operations.get(self.operation_type)
65
+ return await factory(session, branch, self.parameters)
66
+
67
+ def __repr__(self) -> str:
68
+ bound = "bound" if self._session is not None else "unbound"
69
+ return (
70
+ f"Operation(type={self.operation_type}, status={self.execution.status.value}, {bound})"
71
+ )
72
+
73
+
74
+ def create_operation(
75
+ operation_type: str | UndefinedType = Undefined,
76
+ parameters: dict[str, Any] | UndefinedType = Undefined,
77
+ **kwargs,
78
+ ) -> Operation:
79
+ """Factory for Operation nodes.
80
+
81
+ Args:
82
+ operation_type: Registry key (required).
83
+ parameters: Factory arguments dict (default: {}).
84
+ **kwargs: Additional fields (metadata, timeout, etc.).
85
+
86
+ Returns:
87
+ Unbound Operation ready for bind() and invoke().
88
+
89
+ Raises:
90
+ ValueError: If operation_type not provided.
91
+ """
92
+ if is_sentinel(operation_type):
93
+ raise ValueError("operation_type is required")
94
+
95
+ resolved_params: dict[str, Any] = {} if is_sentinel(parameters) else parameters
96
+
97
+ return Operation(
98
+ operation_type=operation_type,
99
+ parameters=resolved_params,
100
+ **kwargs,
101
+ )
@@ -0,0 +1,92 @@
1
+ # Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ """Per-session operation factory registry.
5
+
6
+ Maps operation names to async factory functions. Instantiated per-Session
7
+ for isolation, testability, and per-session customization.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from collections.abc import Awaitable, Callable
13
+ from typing import Any
14
+
15
+ __all__ = ("OperationRegistry",)
16
+
17
+ OperationFactory = Callable[..., Awaitable[Any]]
18
+ """Factory signature: async (session, branch, parameters) -> result"""
19
+
20
+
21
+ class OperationRegistry:
22
+ """Map operation names to async factory functions.
23
+
24
+ Per-session registry (not global) for isolation and testability.
25
+
26
+ Example:
27
+ registry = OperationRegistry()
28
+ registry.register("chat", chat_factory)
29
+ factory = registry.get("chat")
30
+ result = await factory(session, branch, params)
31
+ """
32
+
33
+ def __init__(self):
34
+ """Initialize empty registry."""
35
+ self._factories: dict[str, OperationFactory] = {}
36
+
37
+ def register(
38
+ self,
39
+ operation_name: str,
40
+ factory: OperationFactory,
41
+ *,
42
+ override: bool = False,
43
+ ) -> None:
44
+ """Register factory for operation name.
45
+
46
+ Args:
47
+ operation_name: Lookup key.
48
+ factory: Async (session, branch, params) -> result.
49
+ override: Allow replacing existing. Default False.
50
+
51
+ Raises:
52
+ ValueError: If name exists and override=False.
53
+ """
54
+ if operation_name in self._factories and not override:
55
+ raise ValueError(
56
+ f"Operation '{operation_name}' already registered. Use override=True to replace."
57
+ )
58
+ self._factories[operation_name] = factory
59
+
60
+ def get(self, operation_name: str) -> OperationFactory:
61
+ """Get factory by name. Raises KeyError with available names if not found."""
62
+ if operation_name not in self._factories:
63
+ raise KeyError(
64
+ f"Operation '{operation_name}' not registered. Available: {self.list_names()}"
65
+ )
66
+ return self._factories[operation_name]
67
+
68
+ def has(self, operation_name: str) -> bool:
69
+ """Check if name is registered."""
70
+ return operation_name in self._factories
71
+
72
+ def unregister(self, operation_name: str) -> bool:
73
+ """Remove registration. Returns True if existed."""
74
+ if operation_name in self._factories:
75
+ del self._factories[operation_name]
76
+ return True
77
+ return False
78
+
79
+ def list_names(self) -> list[str]:
80
+ """Return all registered operation names."""
81
+ return list(self._factories.keys())
82
+
83
+ def __contains__(self, operation_name: str) -> bool:
84
+ """Support 'name in registry' syntax."""
85
+ return operation_name in self._factories
86
+
87
+ def __len__(self) -> int:
88
+ """Count of registered operations."""
89
+ return len(self._factories)
90
+
91
+ def __repr__(self) -> str:
92
+ return f"OperationRegistry(operations={self.list_names()})"