omnibase_infra 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. omnibase_infra/constants_topic_patterns.py +26 -0
  2. omnibase_infra/enums/__init__.py +3 -0
  3. omnibase_infra/enums/enum_consumer_group_purpose.py +92 -0
  4. omnibase_infra/enums/enum_handler_source_mode.py +16 -2
  5. omnibase_infra/errors/__init__.py +4 -0
  6. omnibase_infra/errors/error_binding_resolution.py +128 -0
  7. omnibase_infra/event_bus/configs/kafka_event_bus_config.yaml +0 -2
  8. omnibase_infra/event_bus/event_bus_inmemory.py +64 -10
  9. omnibase_infra/event_bus/event_bus_kafka.py +105 -47
  10. omnibase_infra/event_bus/mixin_kafka_broadcast.py +3 -7
  11. omnibase_infra/event_bus/mixin_kafka_dlq.py +12 -6
  12. omnibase_infra/event_bus/models/config/model_kafka_event_bus_config.py +0 -81
  13. omnibase_infra/event_bus/testing/__init__.py +26 -0
  14. omnibase_infra/event_bus/testing/adapter_protocol_event_publisher_inmemory.py +418 -0
  15. omnibase_infra/event_bus/testing/model_publisher_metrics.py +64 -0
  16. omnibase_infra/handlers/handler_consul.py +2 -0
  17. omnibase_infra/handlers/mixins/__init__.py +5 -0
  18. omnibase_infra/handlers/mixins/mixin_consul_service.py +274 -10
  19. omnibase_infra/handlers/mixins/mixin_consul_topic_index.py +585 -0
  20. omnibase_infra/handlers/models/model_filesystem_config.py +4 -4
  21. omnibase_infra/migrations/001_create_event_ledger.sql +166 -0
  22. omnibase_infra/migrations/001_drop_event_ledger.sql +18 -0
  23. omnibase_infra/mixins/mixin_node_introspection.py +189 -19
  24. omnibase_infra/models/__init__.py +8 -0
  25. omnibase_infra/models/bindings/__init__.py +59 -0
  26. omnibase_infra/models/bindings/constants.py +144 -0
  27. omnibase_infra/models/bindings/model_binding_resolution_result.py +103 -0
  28. omnibase_infra/models/bindings/model_operation_binding.py +44 -0
  29. omnibase_infra/models/bindings/model_operation_bindings_subcontract.py +152 -0
  30. omnibase_infra/models/bindings/model_parsed_binding.py +52 -0
  31. omnibase_infra/models/discovery/model_introspection_config.py +25 -17
  32. omnibase_infra/models/dispatch/__init__.py +8 -0
  33. omnibase_infra/models/dispatch/model_debug_trace_snapshot.py +114 -0
  34. omnibase_infra/models/dispatch/model_materialized_dispatch.py +141 -0
  35. omnibase_infra/models/handlers/model_handler_source_config.py +1 -1
  36. omnibase_infra/models/model_node_identity.py +126 -0
  37. omnibase_infra/models/projection/model_snapshot_topic_config.py +3 -2
  38. omnibase_infra/models/registration/__init__.py +9 -0
  39. omnibase_infra/models/registration/model_event_bus_topic_entry.py +59 -0
  40. omnibase_infra/models/registration/model_node_event_bus_config.py +99 -0
  41. omnibase_infra/models/registration/model_node_introspection_event.py +11 -0
  42. omnibase_infra/models/runtime/__init__.py +9 -0
  43. omnibase_infra/models/validation/model_coverage_metrics.py +2 -2
  44. omnibase_infra/nodes/__init__.py +9 -0
  45. omnibase_infra/nodes/contract_registry_reducer/__init__.py +29 -0
  46. omnibase_infra/nodes/contract_registry_reducer/contract.yaml +255 -0
  47. omnibase_infra/nodes/contract_registry_reducer/models/__init__.py +38 -0
  48. omnibase_infra/nodes/contract_registry_reducer/models/model_contract_registry_state.py +266 -0
  49. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_cleanup_topic_references.py +55 -0
  50. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_deactivate_contract.py +58 -0
  51. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_mark_stale.py +49 -0
  52. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_update_heartbeat.py +71 -0
  53. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_update_topic.py +66 -0
  54. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_upsert_contract.py +92 -0
  55. omnibase_infra/nodes/contract_registry_reducer/node.py +121 -0
  56. omnibase_infra/nodes/contract_registry_reducer/reducer.py +784 -0
  57. omnibase_infra/nodes/contract_registry_reducer/registry/__init__.py +9 -0
  58. omnibase_infra/nodes/contract_registry_reducer/registry/registry_infra_contract_registry_reducer.py +101 -0
  59. omnibase_infra/nodes/handlers/consul/contract.yaml +85 -0
  60. omnibase_infra/nodes/handlers/db/contract.yaml +72 -0
  61. omnibase_infra/nodes/handlers/graph/contract.yaml +127 -0
  62. omnibase_infra/nodes/handlers/http/contract.yaml +74 -0
  63. omnibase_infra/nodes/handlers/intent/contract.yaml +66 -0
  64. omnibase_infra/nodes/handlers/mcp/contract.yaml +69 -0
  65. omnibase_infra/nodes/handlers/vault/contract.yaml +91 -0
  66. omnibase_infra/nodes/node_ledger_projection_compute/__init__.py +50 -0
  67. omnibase_infra/nodes/node_ledger_projection_compute/contract.yaml +104 -0
  68. omnibase_infra/nodes/node_ledger_projection_compute/node.py +284 -0
  69. omnibase_infra/nodes/node_ledger_projection_compute/registry/__init__.py +29 -0
  70. omnibase_infra/nodes/node_ledger_projection_compute/registry/registry_infra_ledger_projection.py +118 -0
  71. omnibase_infra/nodes/node_ledger_write_effect/__init__.py +82 -0
  72. omnibase_infra/nodes/node_ledger_write_effect/contract.yaml +200 -0
  73. omnibase_infra/nodes/node_ledger_write_effect/handlers/__init__.py +22 -0
  74. omnibase_infra/nodes/node_ledger_write_effect/handlers/handler_ledger_append.py +372 -0
  75. omnibase_infra/nodes/node_ledger_write_effect/handlers/handler_ledger_query.py +597 -0
  76. omnibase_infra/nodes/node_ledger_write_effect/models/__init__.py +31 -0
  77. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_append_result.py +54 -0
  78. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_entry.py +92 -0
  79. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_query.py +53 -0
  80. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_query_result.py +41 -0
  81. omnibase_infra/nodes/node_ledger_write_effect/node.py +89 -0
  82. omnibase_infra/nodes/node_ledger_write_effect/protocols/__init__.py +13 -0
  83. omnibase_infra/nodes/node_ledger_write_effect/protocols/protocol_ledger_persistence.py +127 -0
  84. omnibase_infra/nodes/node_ledger_write_effect/registry/__init__.py +9 -0
  85. omnibase_infra/nodes/node_ledger_write_effect/registry/registry_infra_ledger_write.py +121 -0
  86. omnibase_infra/nodes/node_registration_orchestrator/registry/registry_infra_node_registration_orchestrator.py +7 -5
  87. omnibase_infra/nodes/reducers/models/__init__.py +7 -2
  88. omnibase_infra/nodes/reducers/models/model_payload_consul_register.py +11 -0
  89. omnibase_infra/nodes/reducers/models/model_payload_ledger_append.py +133 -0
  90. omnibase_infra/nodes/reducers/registration_reducer.py +1 -0
  91. omnibase_infra/protocols/__init__.py +3 -0
  92. omnibase_infra/protocols/protocol_dispatch_engine.py +152 -0
  93. omnibase_infra/runtime/__init__.py +60 -0
  94. omnibase_infra/runtime/binding_resolver.py +753 -0
  95. omnibase_infra/runtime/constants_security.py +70 -0
  96. omnibase_infra/runtime/contract_loaders/__init__.py +9 -0
  97. omnibase_infra/runtime/contract_loaders/operation_bindings_loader.py +789 -0
  98. omnibase_infra/runtime/emit_daemon/__init__.py +97 -0
  99. omnibase_infra/runtime/emit_daemon/cli.py +844 -0
  100. omnibase_infra/runtime/emit_daemon/client.py +811 -0
  101. omnibase_infra/runtime/emit_daemon/config.py +535 -0
  102. omnibase_infra/runtime/emit_daemon/daemon.py +812 -0
  103. omnibase_infra/runtime/emit_daemon/event_registry.py +477 -0
  104. omnibase_infra/runtime/emit_daemon/model_daemon_request.py +139 -0
  105. omnibase_infra/runtime/emit_daemon/model_daemon_response.py +191 -0
  106. omnibase_infra/runtime/emit_daemon/queue.py +618 -0
  107. omnibase_infra/runtime/event_bus_subcontract_wiring.py +466 -0
  108. omnibase_infra/runtime/handler_source_resolver.py +43 -2
  109. omnibase_infra/runtime/kafka_contract_source.py +984 -0
  110. omnibase_infra/runtime/models/__init__.py +13 -0
  111. omnibase_infra/runtime/models/model_contract_load_result.py +224 -0
  112. omnibase_infra/runtime/models/model_runtime_contract_config.py +268 -0
  113. omnibase_infra/runtime/models/model_runtime_scheduler_config.py +4 -3
  114. omnibase_infra/runtime/models/model_security_config.py +109 -0
  115. omnibase_infra/runtime/publisher_topic_scoped.py +294 -0
  116. omnibase_infra/runtime/runtime_contract_config_loader.py +406 -0
  117. omnibase_infra/runtime/service_kernel.py +76 -6
  118. omnibase_infra/runtime/service_message_dispatch_engine.py +558 -15
  119. omnibase_infra/runtime/service_runtime_host_process.py +770 -20
  120. omnibase_infra/runtime/transition_notification_publisher.py +3 -2
  121. omnibase_infra/runtime/util_wiring.py +206 -62
  122. omnibase_infra/services/mcp/service_mcp_tool_sync.py +27 -9
  123. omnibase_infra/services/session/config_consumer.py +25 -8
  124. omnibase_infra/services/session/config_store.py +2 -2
  125. omnibase_infra/services/session/consumer.py +1 -1
  126. omnibase_infra/topics/__init__.py +45 -0
  127. omnibase_infra/topics/platform_topic_suffixes.py +140 -0
  128. omnibase_infra/topics/util_topic_composition.py +95 -0
  129. omnibase_infra/types/typed_dict/__init__.py +9 -1
  130. omnibase_infra/types/typed_dict/typed_dict_envelope_build_params.py +115 -0
  131. omnibase_infra/utils/__init__.py +9 -0
  132. omnibase_infra/utils/util_consumer_group.py +232 -0
  133. omnibase_infra/validation/infra_validators.py +18 -1
  134. omnibase_infra/validation/validation_exemptions.yaml +192 -0
  135. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/METADATA +3 -3
  136. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/RECORD +139 -52
  137. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/entry_points.txt +1 -0
  138. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/WHEEL +0 -0
  139. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,597 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 OmniNode Team
3
+ """Handler for ledger query operations with internal routing.
4
+
5
+ This handler provides query operations for the event ledger, supporting
6
+ queries by correlation_id and time_range. Both operations share:
7
+ - Input validation and normalization
8
+ - DB connection/session lifecycle (via HandlerDb composition)
9
+ - Pagination and ordering rules
10
+ - Error mapping and handling
11
+ - Consistent response surface
12
+
13
+ The operation suffix drives internal routing to private query methods.
14
+
15
+ Design Decision - Single Handler with Internal Routing:
16
+ Two handlers looks "clean" until you realize you now have to duplicate:
17
+ validation, DB session wiring, paging defaults, error mapping, metrics,
18
+ tracing, and auth checks. That's the stuff that actually rots. The query
19
+ shape is the only thing that differs.
20
+
21
+ Only split into two handlers if the two modes diverge materially in
22
+ non-shared behavior (different indexes, different auth model, different
23
+ response shape, different pagination contract).
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ import logging
29
+ from datetime import datetime
30
+ from typing import TYPE_CHECKING
31
+ from uuid import UUID, uuid4
32
+
33
+ from omnibase_core.models.dispatch import ModelHandlerOutput
34
+ from omnibase_infra.enums import (
35
+ EnumHandlerType,
36
+ EnumHandlerTypeCategory,
37
+ EnumInfraTransportType,
38
+ )
39
+ from omnibase_infra.errors import ModelInfraErrorContext, RuntimeHostError
40
+ from omnibase_infra.nodes.node_ledger_write_effect.models import (
41
+ ModelLedgerEntry,
42
+ ModelLedgerQuery,
43
+ ModelLedgerQueryResult,
44
+ )
45
+
46
+ if TYPE_CHECKING:
47
+ from omnibase_core.container import ModelONEXContainer
48
+ from omnibase_infra.handlers.handler_db import HandlerDb
49
+
50
+ logger = logging.getLogger(__name__)
51
+
52
+ # Handler ID for ModelHandlerOutput
53
+ HANDLER_ID_LEDGER_QUERY: str = "ledger-query-handler"
54
+
55
+ # Default pagination limits
56
+ _DEFAULT_LIMIT: int = 100
57
+ _MAX_LIMIT: int = 10000
58
+
59
+ # SQL for correlation_id queries
60
+ # Uses partial index idx_event_ledger_correlation_id
61
+ _SQL_QUERY_BY_CORRELATION_ID = """
62
+ SELECT
63
+ ledger_entry_id,
64
+ topic,
65
+ partition,
66
+ kafka_offset,
67
+ encode(event_key, 'base64') as event_key,
68
+ encode(event_value, 'base64') as event_value,
69
+ onex_headers,
70
+ envelope_id,
71
+ correlation_id,
72
+ event_type,
73
+ source,
74
+ event_timestamp,
75
+ ledger_written_at
76
+ FROM event_ledger
77
+ WHERE correlation_id = $1
78
+ ORDER BY COALESCE(event_timestamp, ledger_written_at) DESC
79
+ LIMIT $2
80
+ OFFSET $3
81
+ """
82
+
83
+ # SQL for counting correlation_id matches (for pagination metadata)
84
+ _SQL_COUNT_BY_CORRELATION_ID = """
85
+ SELECT COUNT(*) as total
86
+ FROM event_ledger
87
+ WHERE correlation_id = $1
88
+ """
89
+
90
+ # SQL for time range queries
91
+ # Uses index idx_event_ledger_topic_timestamp for topic-scoped queries
92
+ # Falls back to idx_event_ledger_event_timestamp for unscoped queries
93
+ _SQL_QUERY_BY_TIME_RANGE_BASE = """
94
+ SELECT
95
+ ledger_entry_id,
96
+ topic,
97
+ partition,
98
+ kafka_offset,
99
+ encode(event_key, 'base64') as event_key,
100
+ encode(event_value, 'base64') as event_value,
101
+ onex_headers,
102
+ envelope_id,
103
+ correlation_id,
104
+ event_type,
105
+ source,
106
+ event_timestamp,
107
+ ledger_written_at
108
+ FROM event_ledger
109
+ WHERE COALESCE(event_timestamp, ledger_written_at) >= $1
110
+ AND COALESCE(event_timestamp, ledger_written_at) < $2
111
+ """
112
+
113
+ _SQL_COUNT_BY_TIME_RANGE_BASE = """
114
+ SELECT COUNT(*) as total
115
+ FROM event_ledger
116
+ WHERE COALESCE(event_timestamp, ledger_written_at) >= $1
117
+ AND COALESCE(event_timestamp, ledger_written_at) < $2
118
+ """
119
+
120
+
121
+ class HandlerLedgerQuery:
122
+ """Handler for querying events from the audit ledger.
123
+
124
+ This handler implements query operations for ProtocolLedgerPersistence,
125
+ composing with HandlerDb for PostgreSQL operations. It provides:
126
+
127
+ - Query by correlation_id (distributed tracing)
128
+ - Query by time_range (replay, audit, debugging)
129
+ - Optional filters by event_type and topic
130
+ - Pagination with limit/offset
131
+ - Consistent response surface via ModelLedgerQueryResult
132
+
133
+ Internal Routing:
134
+ Based on the operation field in the envelope:
135
+ - "ledger.query" with correlation_id → _query_by_correlation_id()
136
+ - "ledger.query" with start_time/end_time → _query_by_time_range()
137
+ - Or use the explicit typed methods directly
138
+
139
+ Attributes:
140
+ handler_type: EnumHandlerType.INFRA_HANDLER
141
+ handler_category: EnumHandlerTypeCategory.EFFECT
142
+
143
+ Example:
144
+ >>> handler = HandlerLedgerQuery(container, db_handler)
145
+ >>> await handler.initialize({})
146
+ >>> # Query by correlation_id
147
+ >>> entries = await handler.query_by_correlation_id(corr_id, limit=50)
148
+ >>> # Query by time range
149
+ >>> entries = await handler.query_by_time_range(start, end, event_type="NodeRegistered")
150
+ """
151
+
152
+ def __init__(
153
+ self,
154
+ container: ModelONEXContainer,
155
+ db_handler: HandlerDb,
156
+ ) -> None:
157
+ """Initialize the ledger query handler.
158
+
159
+ Args:
160
+ container: ONEX dependency injection container.
161
+ db_handler: Initialized HandlerDb instance for PostgreSQL operations.
162
+ """
163
+ self._container = container
164
+ self._db_handler = db_handler
165
+ self._initialized: bool = False
166
+
167
+ @property
168
+ def handler_type(self) -> EnumHandlerType:
169
+ """Return the architectural role of this handler."""
170
+ return EnumHandlerType.INFRA_HANDLER
171
+
172
+ @property
173
+ def handler_category(self) -> EnumHandlerTypeCategory:
174
+ """Return the behavioral classification of this handler."""
175
+ return EnumHandlerTypeCategory.EFFECT
176
+
177
+ async def initialize(self, config: dict[str, object]) -> None:
178
+ """Initialize the handler.
179
+
180
+ Args:
181
+ config: Configuration dict (currently unused).
182
+
183
+ Raises:
184
+ RuntimeHostError: If HandlerDb is not initialized.
185
+ """
186
+ if not getattr(self._db_handler, "_initialized", False):
187
+ ctx = ModelInfraErrorContext.with_correlation(
188
+ transport_type=EnumInfraTransportType.DATABASE,
189
+ operation="initialize",
190
+ )
191
+ raise RuntimeHostError(
192
+ "HandlerDb must be initialized before HandlerLedgerQuery",
193
+ context=ctx,
194
+ )
195
+
196
+ self._initialized = True
197
+ logger.info(
198
+ "%s initialized successfully",
199
+ self.__class__.__name__,
200
+ extra={"handler": self.__class__.__name__},
201
+ )
202
+
203
+ async def shutdown(self) -> None:
204
+ """Shutdown the handler."""
205
+ self._initialized = False
206
+ logger.info("HandlerLedgerQuery shutdown complete")
207
+
208
+ # =========================================================================
209
+ # Public Query Methods (Typed Interface)
210
+ # =========================================================================
211
+
212
+ async def query_by_correlation_id(
213
+ self,
214
+ correlation_id: UUID,
215
+ limit: int = _DEFAULT_LIMIT,
216
+ offset: int = 0,
217
+ ) -> list[ModelLedgerEntry]:
218
+ """Query ledger entries by correlation ID.
219
+
220
+ Args:
221
+ correlation_id: The correlation ID to search for.
222
+ limit: Maximum entries to return (default: 100, max: 10000).
223
+ offset: Number of entries to skip for pagination.
224
+
225
+ Returns:
226
+ List of ModelLedgerEntry matching the correlation ID.
227
+ """
228
+ self._ensure_initialized("ledger.query.by_correlation_id")
229
+ limit = self._normalize_limit(limit)
230
+
231
+ # Execute query via HandlerDb
232
+ rows = await self._execute_query(
233
+ sql=_SQL_QUERY_BY_CORRELATION_ID,
234
+ parameters=[str(correlation_id), limit, offset],
235
+ operation="ledger.query.by_correlation_id",
236
+ correlation_id=correlation_id,
237
+ )
238
+
239
+ return [self._row_to_entry(row) for row in rows]
240
+
241
+ async def query_by_time_range(
242
+ self,
243
+ start: datetime,
244
+ end: datetime,
245
+ correlation_id: UUID | None = None,
246
+ event_type: str | None = None,
247
+ topic: str | None = None,
248
+ limit: int = _DEFAULT_LIMIT,
249
+ offset: int = 0,
250
+ ) -> list[ModelLedgerEntry]:
251
+ """Query ledger entries within a time range.
252
+
253
+ Args:
254
+ start: Start of time range (inclusive).
255
+ end: End of time range (exclusive).
256
+ correlation_id: Correlation ID for distributed tracing (auto-generated if None).
257
+ event_type: Optional filter by event type.
258
+ topic: Optional filter by Kafka topic.
259
+ limit: Maximum entries to return (default: 100, max: 10000).
260
+ offset: Number of entries to skip for pagination.
261
+
262
+ Returns:
263
+ List of ModelLedgerEntry within the time range.
264
+ """
265
+ self._ensure_initialized("ledger.query.by_time_range")
266
+ limit = self._normalize_limit(limit)
267
+ # Auto-generate correlation_id if not provided
268
+ effective_correlation_id = (
269
+ correlation_id if correlation_id is not None else uuid4()
270
+ )
271
+
272
+ # Build query model for SQL generation
273
+ query_params = ModelLedgerQuery(
274
+ start_time=start,
275
+ end_time=end,
276
+ event_type=event_type,
277
+ topic=topic,
278
+ limit=limit,
279
+ offset=offset,
280
+ )
281
+
282
+ # Build dynamic SQL with optional filters
283
+ sql, _count_sql, parameters = self._build_time_range_query(query_params)
284
+
285
+ # Execute query via HandlerDb
286
+ rows = await self._execute_query(
287
+ sql=sql,
288
+ parameters=parameters,
289
+ operation="ledger.query.by_time_range",
290
+ correlation_id=effective_correlation_id,
291
+ )
292
+
293
+ return [self._row_to_entry(row) for row in rows]
294
+
295
+ async def query(
296
+ self,
297
+ query: ModelLedgerQuery,
298
+ correlation_id: UUID,
299
+ ) -> ModelLedgerQueryResult:
300
+ """Execute a query using the ModelLedgerQuery parameters.
301
+
302
+ Routes to the appropriate private method based on query parameters.
303
+
304
+ Args:
305
+ query: Query parameters model.
306
+ correlation_id: Correlation ID for distributed tracing.
307
+
308
+ Returns:
309
+ ModelLedgerQueryResult with entries, total_count, and has_more.
310
+ """
311
+ self._ensure_initialized("ledger.query")
312
+
313
+ # Route based on query parameters
314
+ if query.correlation_id is not None:
315
+ entries = await self.query_by_correlation_id(
316
+ correlation_id=query.correlation_id,
317
+ limit=query.limit,
318
+ offset=query.offset,
319
+ )
320
+ total_count = await self._count_by_correlation_id(query.correlation_id)
321
+ elif query.start_time is not None and query.end_time is not None:
322
+ entries = await self.query_by_time_range(
323
+ start=query.start_time,
324
+ end=query.end_time,
325
+ correlation_id=correlation_id,
326
+ event_type=query.event_type,
327
+ topic=query.topic,
328
+ limit=query.limit,
329
+ offset=query.offset,
330
+ )
331
+ total_count = await self._count_by_time_range(
332
+ start=query.start_time,
333
+ end=query.end_time,
334
+ correlation_id=correlation_id,
335
+ event_type=query.event_type,
336
+ topic=query.topic,
337
+ )
338
+ else:
339
+ # No specific query criteria - would return all events
340
+ # This is likely an error or needs explicit "get all" operation
341
+ ctx = ModelInfraErrorContext.with_correlation(
342
+ correlation_id=correlation_id,
343
+ transport_type=EnumInfraTransportType.DATABASE,
344
+ operation="ledger.query",
345
+ )
346
+ raise RuntimeHostError(
347
+ "Query must specify either correlation_id or time range (start_time + end_time)",
348
+ context=ctx,
349
+ )
350
+
351
+ has_more = query.offset + len(entries) < total_count
352
+
353
+ return ModelLedgerQueryResult(
354
+ entries=entries,
355
+ total_count=total_count,
356
+ has_more=has_more,
357
+ query=query,
358
+ )
359
+
360
+ # =========================================================================
361
+ # Envelope-Based Interface (ProtocolHandler)
362
+ # =========================================================================
363
+
364
+ async def execute(
365
+ self,
366
+ envelope: dict[str, object],
367
+ ) -> ModelHandlerOutput[ModelLedgerQueryResult]:
368
+ """Execute ledger query from envelope.
369
+
370
+ Args:
371
+ envelope: Request envelope containing:
372
+ - operation: "ledger.query"
373
+ - payload: ModelLedgerQuery as dict
374
+ - correlation_id: Optional correlation ID
375
+
376
+ Returns:
377
+ ModelHandlerOutput wrapping ModelLedgerQueryResult.
378
+ """
379
+ correlation_id_raw = envelope.get("correlation_id")
380
+ correlation_id = (
381
+ UUID(str(correlation_id_raw)) if correlation_id_raw else uuid4()
382
+ )
383
+ input_envelope_id = uuid4()
384
+
385
+ payload_raw = envelope.get("payload")
386
+ if not isinstance(payload_raw, dict):
387
+ ctx = ModelInfraErrorContext.with_correlation(
388
+ correlation_id=correlation_id,
389
+ transport_type=EnumInfraTransportType.DATABASE,
390
+ operation="ledger.query",
391
+ )
392
+ raise RuntimeHostError(
393
+ "Missing or invalid 'payload' in envelope",
394
+ context=ctx,
395
+ )
396
+
397
+ # Parse payload into typed model
398
+ query = ModelLedgerQuery.model_validate(payload_raw)
399
+
400
+ # Execute query
401
+ result = await self.query(query, correlation_id=correlation_id)
402
+
403
+ return ModelHandlerOutput.for_compute(
404
+ input_envelope_id=input_envelope_id,
405
+ correlation_id=correlation_id,
406
+ handler_id=HANDLER_ID_LEDGER_QUERY,
407
+ result=result,
408
+ )
409
+
410
+ # =========================================================================
411
+ # Private Helpers
412
+ # =========================================================================
413
+
414
+ def _ensure_initialized(self, operation: str) -> None:
415
+ """Ensure handler is initialized."""
416
+ if not self._initialized:
417
+ ctx = ModelInfraErrorContext.with_correlation(
418
+ transport_type=EnumInfraTransportType.DATABASE,
419
+ operation=operation,
420
+ )
421
+ raise RuntimeHostError(
422
+ "HandlerLedgerQuery not initialized. Call initialize() first.",
423
+ context=ctx,
424
+ )
425
+
426
+ def _normalize_limit(self, limit: int) -> int:
427
+ """Normalize limit to valid range."""
428
+ if limit < 1:
429
+ return _DEFAULT_LIMIT
430
+ if limit > _MAX_LIMIT:
431
+ return _MAX_LIMIT
432
+ return limit
433
+
434
+ async def _execute_query(
435
+ self,
436
+ sql: str,
437
+ parameters: list[object],
438
+ operation: str,
439
+ correlation_id: UUID,
440
+ ) -> list[dict[str, object]]:
441
+ """Execute a query via HandlerDb and return rows."""
442
+ envelope: dict[str, object] = {
443
+ "operation": "db.query",
444
+ "payload": {
445
+ "sql": sql,
446
+ "parameters": parameters,
447
+ },
448
+ "correlation_id": str(correlation_id),
449
+ }
450
+
451
+ db_result = await self._db_handler.execute(envelope)
452
+ if db_result.result is None:
453
+ return []
454
+ return db_result.result.payload.rows
455
+
456
+ async def _count_by_correlation_id(self, correlation_id: UUID) -> int:
457
+ """Get total count for correlation_id query."""
458
+ rows = await self._execute_query(
459
+ sql=_SQL_COUNT_BY_CORRELATION_ID,
460
+ parameters=[str(correlation_id)],
461
+ operation="ledger.query.count",
462
+ correlation_id=correlation_id,
463
+ )
464
+ if rows and rows[0].get("total") is not None:
465
+ return int(str(rows[0]["total"]))
466
+ return 0
467
+
468
+ async def _count_by_time_range(
469
+ self,
470
+ start: datetime,
471
+ end: datetime,
472
+ correlation_id: UUID,
473
+ event_type: str | None = None,
474
+ topic: str | None = None,
475
+ ) -> int:
476
+ """Get total count for time_range query."""
477
+ query_params = ModelLedgerQuery(
478
+ start_time=start,
479
+ end_time=end,
480
+ event_type=event_type,
481
+ topic=topic,
482
+ limit=1,
483
+ offset=0,
484
+ )
485
+ _, count_sql, parameters = self._build_time_range_query(
486
+ query_params, count_only=True
487
+ )
488
+
489
+ rows = await self._execute_query(
490
+ sql=count_sql,
491
+ parameters=parameters,
492
+ operation="ledger.query.count",
493
+ correlation_id=correlation_id,
494
+ )
495
+ if rows and rows[0].get("total") is not None:
496
+ return int(str(rows[0]["total"]))
497
+ return 0
498
+
499
+ def _build_time_range_query(
500
+ self,
501
+ query: ModelLedgerQuery,
502
+ count_only: bool = False,
503
+ ) -> tuple[str, str, list[object]]:
504
+ """Build dynamic SQL for time range query with optional filters.
505
+
506
+ Args:
507
+ query: Query parameters including start_time, end_time, filters, pagination.
508
+ count_only: If True, don't add limit/offset to parameters.
509
+
510
+ Returns:
511
+ Tuple of (query_sql, count_sql, parameters).
512
+ """
513
+ # Start with base parameters (start_time and end_time are required for this path)
514
+ parameters: list[object] = [query.start_time, query.end_time]
515
+ param_index = 3 # $1 and $2 are start/end
516
+
517
+ # Build WHERE clause additions
518
+ where_additions: list[str] = []
519
+
520
+ if query.event_type is not None:
521
+ where_additions.append(f"AND event_type = ${param_index}")
522
+ parameters.append(query.event_type)
523
+ param_index += 1
524
+
525
+ if query.topic is not None:
526
+ where_additions.append(f"AND topic = ${param_index}")
527
+ parameters.append(query.topic)
528
+ param_index += 1
529
+
530
+ # Build final SQL
531
+ where_clause = " ".join(where_additions)
532
+
533
+ # Query SQL with ordering and pagination
534
+ query_sql = (
535
+ _SQL_QUERY_BY_TIME_RANGE_BASE
536
+ + where_clause
537
+ + f"""
538
+ ORDER BY COALESCE(event_timestamp, ledger_written_at) DESC
539
+ LIMIT ${param_index}
540
+ OFFSET ${param_index + 1}
541
+ """
542
+ )
543
+
544
+ # Count SQL without ordering/pagination
545
+ count_sql = _SQL_COUNT_BY_TIME_RANGE_BASE + where_clause
546
+
547
+ if not count_only:
548
+ parameters.extend([query.limit, query.offset])
549
+
550
+ return query_sql, count_sql, parameters
551
+
552
+ def _row_to_entry(self, row: dict[str, object]) -> ModelLedgerEntry:
553
+ """Convert a database row to ModelLedgerEntry.
554
+
555
+ The row comes from HandlerDb which returns dict[str, object].
556
+ event_key and event_value are already base64-encoded via SQL encode().
557
+
558
+ Raises:
559
+ RuntimeHostError: If ledger_written_at is not a datetime (data corruption).
560
+ """
561
+ # Extract ledger_written_at which is guaranteed to exist
562
+ ledger_written_at_raw = row["ledger_written_at"]
563
+ if not isinstance(ledger_written_at_raw, datetime):
564
+ # This should never happen for valid ledger entries - indicates data corruption
565
+ ctx = ModelInfraErrorContext.with_correlation(
566
+ transport_type=EnumInfraTransportType.DATABASE,
567
+ operation="ledger.query.row_to_entry",
568
+ )
569
+ raise RuntimeHostError(
570
+ f"Data integrity error: ledger_written_at must be datetime, got {type(ledger_written_at_raw).__name__}",
571
+ context=ctx,
572
+ )
573
+
574
+ return ModelLedgerEntry(
575
+ ledger_entry_id=UUID(str(row["ledger_entry_id"])),
576
+ topic=str(row["topic"]),
577
+ partition=int(str(row["partition"])),
578
+ kafka_offset=int(str(row["kafka_offset"])),
579
+ event_key=str(row["event_key"]) if row["event_key"] else None,
580
+ event_value=str(row["event_value"]),
581
+ onex_headers=row["onex_headers"]
582
+ if isinstance(row["onex_headers"], dict)
583
+ else {},
584
+ envelope_id=UUID(str(row["envelope_id"])) if row["envelope_id"] else None,
585
+ correlation_id=UUID(str(row["correlation_id"]))
586
+ if row["correlation_id"]
587
+ else None,
588
+ event_type=str(row["event_type"]) if row["event_type"] else None,
589
+ source=str(row["source"]) if row["source"] else None,
590
+ event_timestamp=row["event_timestamp"]
591
+ if isinstance(row["event_timestamp"], datetime)
592
+ else None,
593
+ ledger_written_at=ledger_written_at_raw,
594
+ )
595
+
596
+
597
+ __all__ = ["HandlerLedgerQuery"]
@@ -0,0 +1,31 @@
1
+ """Ledger write effect node models.
2
+
3
+ This package contains Pydantic models for the ledger write effect node,
4
+ which handles persistent storage of events to the event ledger.
5
+
6
+ Models:
7
+ ModelLedgerEntry: Single ledger entry representing one event
8
+ ModelLedgerAppendResult: Result of a ledger write operation
9
+ ModelLedgerQuery: Query parameters for ledger searches
10
+ ModelLedgerQueryResult: Result of a ledger query operation
11
+ """
12
+
13
+ from omnibase_infra.nodes.node_ledger_write_effect.models.model_ledger_append_result import (
14
+ ModelLedgerAppendResult,
15
+ )
16
+ from omnibase_infra.nodes.node_ledger_write_effect.models.model_ledger_entry import (
17
+ ModelLedgerEntry,
18
+ )
19
+ from omnibase_infra.nodes.node_ledger_write_effect.models.model_ledger_query import (
20
+ ModelLedgerQuery,
21
+ )
22
+ from omnibase_infra.nodes.node_ledger_write_effect.models.model_ledger_query_result import (
23
+ ModelLedgerQueryResult,
24
+ )
25
+
26
+ __all__ = [
27
+ "ModelLedgerAppendResult",
28
+ "ModelLedgerEntry",
29
+ "ModelLedgerQuery",
30
+ "ModelLedgerQueryResult",
31
+ ]
@@ -0,0 +1,54 @@
1
+ """Ledger append result model for write operation outcomes.
2
+
3
+ This module defines the result structure returned after attempting
4
+ to append an event to the ledger.
5
+ """
6
+
7
+ from uuid import UUID
8
+
9
+ from pydantic import BaseModel, ConfigDict, Field
10
+
11
+
12
+ class ModelLedgerAppendResult(BaseModel):
13
+ """Result of a ledger append operation.
14
+
15
+ This model captures the outcome of attempting to write an event
16
+ to the ledger, including handling of duplicate detection via
17
+ the (topic, partition, kafka_offset) unique constraint.
18
+
19
+ The duplicate flag indicates when ON CONFLICT DO NOTHING was
20
+ triggered, meaning the event was already in the ledger. This
21
+ is not an error condition - it enables idempotent replay.
22
+ """
23
+
24
+ model_config = ConfigDict(frozen=True, extra="forbid")
25
+
26
+ success: bool = Field(
27
+ ...,
28
+ description="Whether the append operation completed without error",
29
+ )
30
+ ledger_entry_id: UUID | None = Field(
31
+ default=None,
32
+ description="ID of the created entry, None if duplicate",
33
+ )
34
+ duplicate: bool = Field(
35
+ default=False,
36
+ description="True if ON CONFLICT DO NOTHING matched existing entry",
37
+ )
38
+
39
+ # Kafka position that was attempted
40
+ topic: str = Field(
41
+ ...,
42
+ min_length=1,
43
+ description="Kafka topic of the event",
44
+ )
45
+ partition: int = Field(
46
+ ...,
47
+ ge=0,
48
+ description="Kafka partition number",
49
+ )
50
+ kafka_offset: int = Field(
51
+ ...,
52
+ ge=0,
53
+ description="Kafka offset within the partition",
54
+ )