omnibase_infra 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. omnibase_infra/__init__.py +1 -1
  2. omnibase_infra/enums/__init__.py +4 -0
  3. omnibase_infra/enums/enum_declarative_node_violation.py +102 -0
  4. omnibase_infra/event_bus/adapters/__init__.py +31 -0
  5. omnibase_infra/event_bus/adapters/adapter_protocol_event_publisher_kafka.py +517 -0
  6. omnibase_infra/mixins/mixin_async_circuit_breaker.py +113 -1
  7. omnibase_infra/models/__init__.py +9 -0
  8. omnibase_infra/models/event_bus/__init__.py +22 -0
  9. omnibase_infra/models/event_bus/model_consumer_retry_config.py +367 -0
  10. omnibase_infra/models/event_bus/model_dlq_config.py +177 -0
  11. omnibase_infra/models/event_bus/model_idempotency_config.py +131 -0
  12. omnibase_infra/models/event_bus/model_offset_policy_config.py +107 -0
  13. omnibase_infra/models/resilience/model_circuit_breaker_config.py +15 -0
  14. omnibase_infra/models/validation/__init__.py +8 -0
  15. omnibase_infra/models/validation/model_declarative_node_validation_result.py +139 -0
  16. omnibase_infra/models/validation/model_declarative_node_violation.py +169 -0
  17. omnibase_infra/nodes/architecture_validator/__init__.py +28 -7
  18. omnibase_infra/nodes/architecture_validator/constants.py +36 -0
  19. omnibase_infra/nodes/architecture_validator/handlers/__init__.py +28 -0
  20. omnibase_infra/nodes/architecture_validator/handlers/contract.yaml +120 -0
  21. omnibase_infra/nodes/architecture_validator/handlers/handler_architecture_validation.py +359 -0
  22. omnibase_infra/nodes/architecture_validator/node.py +1 -0
  23. omnibase_infra/nodes/architecture_validator/node_architecture_validator.py +48 -336
  24. omnibase_infra/nodes/node_ledger_projection_compute/__init__.py +16 -2
  25. omnibase_infra/nodes/node_ledger_projection_compute/contract.yaml +14 -4
  26. omnibase_infra/nodes/node_ledger_projection_compute/handlers/__init__.py +18 -0
  27. omnibase_infra/nodes/node_ledger_projection_compute/handlers/contract.yaml +53 -0
  28. omnibase_infra/nodes/node_ledger_projection_compute/handlers/handler_ledger_projection.py +354 -0
  29. omnibase_infra/nodes/node_ledger_projection_compute/node.py +20 -256
  30. omnibase_infra/nodes/node_registry_effect/node.py +20 -73
  31. omnibase_infra/protocols/protocol_dispatch_engine.py +90 -0
  32. omnibase_infra/runtime/__init__.py +11 -0
  33. omnibase_infra/runtime/baseline_subscriptions.py +150 -0
  34. omnibase_infra/runtime/event_bus_subcontract_wiring.py +455 -24
  35. omnibase_infra/runtime/kafka_contract_source.py +13 -5
  36. omnibase_infra/runtime/service_message_dispatch_engine.py +112 -0
  37. omnibase_infra/runtime/service_runtime_host_process.py +6 -11
  38. omnibase_infra/services/__init__.py +36 -0
  39. omnibase_infra/services/contract_publisher/__init__.py +95 -0
  40. omnibase_infra/services/contract_publisher/config.py +199 -0
  41. omnibase_infra/services/contract_publisher/errors.py +243 -0
  42. omnibase_infra/services/contract_publisher/models/__init__.py +28 -0
  43. omnibase_infra/services/contract_publisher/models/model_contract_error.py +67 -0
  44. omnibase_infra/services/contract_publisher/models/model_infra_error.py +62 -0
  45. omnibase_infra/services/contract_publisher/models/model_publish_result.py +112 -0
  46. omnibase_infra/services/contract_publisher/models/model_publish_stats.py +79 -0
  47. omnibase_infra/services/contract_publisher/service.py +617 -0
  48. omnibase_infra/services/contract_publisher/sources/__init__.py +52 -0
  49. omnibase_infra/services/contract_publisher/sources/model_discovered.py +155 -0
  50. omnibase_infra/services/contract_publisher/sources/protocol.py +101 -0
  51. omnibase_infra/services/contract_publisher/sources/source_composite.py +309 -0
  52. omnibase_infra/services/contract_publisher/sources/source_filesystem.py +174 -0
  53. omnibase_infra/services/contract_publisher/sources/source_package.py +221 -0
  54. omnibase_infra/services/observability/__init__.py +40 -0
  55. omnibase_infra/services/observability/agent_actions/__init__.py +64 -0
  56. omnibase_infra/services/observability/agent_actions/config.py +209 -0
  57. omnibase_infra/services/observability/agent_actions/consumer.py +1320 -0
  58. omnibase_infra/services/observability/agent_actions/models/__init__.py +87 -0
  59. omnibase_infra/services/observability/agent_actions/models/model_agent_action.py +142 -0
  60. omnibase_infra/services/observability/agent_actions/models/model_detection_failure.py +125 -0
  61. omnibase_infra/services/observability/agent_actions/models/model_envelope.py +85 -0
  62. omnibase_infra/services/observability/agent_actions/models/model_execution_log.py +159 -0
  63. omnibase_infra/services/observability/agent_actions/models/model_performance_metric.py +130 -0
  64. omnibase_infra/services/observability/agent_actions/models/model_routing_decision.py +138 -0
  65. omnibase_infra/services/observability/agent_actions/models/model_transformation_event.py +124 -0
  66. omnibase_infra/services/observability/agent_actions/tests/__init__.py +20 -0
  67. omnibase_infra/services/observability/agent_actions/tests/test_consumer.py +1154 -0
  68. omnibase_infra/services/observability/agent_actions/tests/test_models.py +645 -0
  69. omnibase_infra/services/observability/agent_actions/tests/test_writer.py +709 -0
  70. omnibase_infra/services/observability/agent_actions/writer_postgres.py +926 -0
  71. omnibase_infra/validation/__init__.py +12 -0
  72. omnibase_infra/validation/contracts/declarative_node.validation.yaml +143 -0
  73. omnibase_infra/validation/validation_exemptions.yaml +93 -0
  74. omnibase_infra/validation/validator_declarative_node.py +850 -0
  75. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/METADATA +3 -3
  76. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/RECORD +79 -27
  77. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/WHEEL +0 -0
  78. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/entry_points.txt +0 -0
  79. {omnibase_infra-0.2.7.dist-info → omnibase_infra-0.2.9.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,177 @@
1
+ # SPDX-FileCopyrightText: 2025 OmniNode Team <info@omninode.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ """Dead Letter Queue configuration model for event bus message handling.
5
+
6
+ This module provides the configuration model for DLQ behavior in event bus
7
+ consumers. The DLQ routes messages that fail processing to a dead letter
8
+ topic for later analysis, retry, or manual intervention.
9
+
10
+ Error Classification:
11
+ The DLQ configuration distinguishes between two error categories:
12
+
13
+ Content Errors (non-retryable):
14
+ Schema validation failures, malformed payloads, missing required fields,
15
+ type conversion errors. These errors will NOT fix themselves with retry.
16
+ Default behavior: Send to DLQ and commit offset (dlq_and_commit).
17
+
18
+ Infrastructure Errors (potentially retryable):
19
+ Database timeouts, network failures, service unavailability.
20
+ These errors MAY fix themselves after retry budget exhaustion.
21
+ Default behavior: Fail fast (fail_fast) to avoid hiding infrastructure
22
+ fires in the DLQ.
23
+
24
+ Topic Naming Convention:
25
+ When topic is empty string, the consumer builds a DLQ topic name
26
+ following ONEX conventions: {env}.dlq.{original_topic}.v{schema_major}
27
+
28
+ Examples:
29
+ - dev.dlq.orders.created.v1
30
+ - prod.dlq.payments.processed.v1
31
+ - staging.dlq.users.registered.v1
32
+
33
+ See Also:
34
+ - MixinKafkaDlq: DLQ publishing implementation
35
+ - ModelDlqEvent: Individual DLQ event model for callbacks
36
+ - docs/architecture/DLQ_MESSAGE_FORMAT.md: DLQ message structure
37
+ """
38
+
39
+ from __future__ import annotations
40
+
41
+ from typing import Literal
42
+
43
+ from pydantic import BaseModel, ConfigDict, Field
44
+
45
+
46
+ class ModelDlqConfig(BaseModel):
47
+ """Dead Letter Queue (DLQ) configuration.
48
+
49
+ Controls how failed messages are routed to DLQ topics. Supports
50
+ differentiated handling of content errors (non-retryable) versus
51
+ infrastructure errors (potentially retryable).
52
+
53
+ Attributes:
54
+ enabled: Whether DLQ publishing is enabled for failed messages.
55
+ When False, failed messages are dropped (logged only).
56
+ Default: True.
57
+ topic: DLQ topic name. Empty string triggers convention-based
58
+ topic naming: {env}.dlq.{original_topic}.v{schema_major}.
59
+ Non-empty value overrides the convention with explicit topic.
60
+ Default: "" (use convention).
61
+ on_content_error: Action when a content/schema error occurs.
62
+ Content errors (schema validation, malformed payload) are
63
+ non-retryable - they will never succeed with retry.
64
+ - "dlq_and_commit": Publish to DLQ and commit offset (default)
65
+ - "fail_fast": Raise immediately, do not commit
66
+ Default: "dlq_and_commit".
67
+ on_infra_exhausted: Action when retry budget exhausted for
68
+ infrastructure errors. Infrastructure errors (DB timeout,
69
+ network failure) may fix themselves, but infra owns plumbing.
70
+ - "dlq_and_commit": Publish to DLQ and commit offset
71
+ - "fail_fast": Raise immediately, do not commit (default)
72
+ Default: "fail_fast".
73
+
74
+ Example:
75
+ ```python
76
+ from omnibase_infra.models.event_bus import ModelDlqConfig
77
+
78
+ # Production configuration (explicit topic, fail-fast for infra)
79
+ config = ModelDlqConfig(
80
+ enabled=True,
81
+ topic="", # Use convention-based naming
82
+ on_content_error="dlq_and_commit",
83
+ on_infra_exhausted="fail_fast",
84
+ )
85
+
86
+ # Development configuration (catch everything in DLQ)
87
+ dev_config = ModelDlqConfig(
88
+ enabled=True,
89
+ topic="dev.dlq.catch-all.v1",
90
+ on_content_error="dlq_and_commit",
91
+ on_infra_exhausted="dlq_and_commit",
92
+ )
93
+
94
+ # Disabled DLQ (for testing or specific use cases)
95
+ disabled = ModelDlqConfig(enabled=False)
96
+ ```
97
+
98
+ Configuration Guidelines:
99
+ - Enable DLQ for all production consumers to capture failures
100
+ - Use "fail_fast" for on_infra_exhausted to surface infrastructure
101
+ issues immediately rather than hiding them in DLQ
102
+ - Use "dlq_and_commit" for on_content_error since content errors
103
+ will never self-heal with retry
104
+ - Set explicit topic only when you need multiple consumers to
105
+ share a DLQ or when convention doesn't fit
106
+
107
+ Design Rationale:
108
+ Default on_content_error = "dlq_and_commit":
109
+ Content errors (bad schema, malformed JSON) will never fix
110
+ themselves. Retrying is pointless. Send to DLQ for human
111
+ review and continue processing other messages.
112
+
113
+ Default on_infra_exhausted = "fail_fast":
114
+ Infrastructure owns the plumbing. If the database is down,
115
+ that's an infrastructure fire that should be surfaced
116
+ immediately - not hidden in a DLQ. The operations team needs
117
+ to know about infrastructure failures, not discover them
118
+ later in a DLQ audit.
119
+
120
+ See Also:
121
+ MixinKafkaDlq: Implementation of DLQ publishing behavior.
122
+ """
123
+
124
+ model_config = ConfigDict(
125
+ frozen=True,
126
+ extra="forbid",
127
+ json_schema_extra={
128
+ "examples": [
129
+ {
130
+ "enabled": True,
131
+ "topic": "",
132
+ "on_content_error": "dlq_and_commit",
133
+ "on_infra_exhausted": "fail_fast",
134
+ },
135
+ {
136
+ "enabled": True,
137
+ "topic": "prod.dlq.orders.v1",
138
+ "on_content_error": "dlq_and_commit",
139
+ "on_infra_exhausted": "dlq_and_commit",
140
+ },
141
+ ]
142
+ },
143
+ )
144
+
145
+ enabled: bool = Field(
146
+ default=True,
147
+ description="Enable DLQ publishing for failed messages",
148
+ )
149
+
150
+ topic: str = Field(
151
+ default="",
152
+ description=(
153
+ "DLQ topic name. Empty string uses convention-based naming: "
154
+ "{env}.dlq.{original_topic}.v{schema_major}"
155
+ ),
156
+ )
157
+
158
+ on_content_error: Literal["dlq_and_commit", "fail_fast"] = Field(
159
+ default="dlq_and_commit",
160
+ description=(
161
+ "Action on content/schema errors (non-retryable). "
162
+ "'dlq_and_commit' publishes to DLQ and commits offset. "
163
+ "'fail_fast' raises immediately without committing."
164
+ ),
165
+ )
166
+
167
+ on_infra_exhausted: Literal["dlq_and_commit", "fail_fast"] = Field(
168
+ default="fail_fast",
169
+ description=(
170
+ "Action when retry budget exhausted for infrastructure errors. "
171
+ "'fail_fast' surfaces infrastructure issues immediately. "
172
+ "'dlq_and_commit' publishes to DLQ and commits offset."
173
+ ),
174
+ )
175
+
176
+
177
+ __all__ = ["ModelDlqConfig"]
@@ -0,0 +1,131 @@
1
+ # SPDX-FileCopyrightText: 2025 OmniNode Team <info@omninode.ai>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ """Idempotency configuration model for event bus message consumption.
5
+
6
+ This module provides the configuration model for idempotency behavior in
7
+ event bus consumers. When enabled, consumers deduplicate messages based on
8
+ the `envelope_id` field from the event envelope using an INSERT ON CONFLICT
9
+ DO NOTHING pattern.
10
+
11
+ Idempotency Overview:
12
+ Idempotency ensures that processing the same message multiple times
13
+ produces the same result as processing it once. This is critical in
14
+ distributed systems where message delivery can be at-least-once.
15
+
16
+ The idempotency store tracks processed `envelope_id` values:
17
+ - On first encounter: Record envelope_id, process message
18
+ - On duplicate: Skip processing (already recorded)
19
+ - After retention period: Prune old records to limit storage
20
+
21
+ Store Types:
22
+ - postgres: Production-grade persistent storage using INSERT ON CONFLICT
23
+ - memory: In-memory store for testing only (data lost on restart)
24
+
25
+ See Also:
26
+ - EventBusSubcontractWiring: Uses this configuration for consumer setup
27
+ - docs/patterns/idempotency_patterns.md: Implementation details
28
+ """
29
+
30
+ from __future__ import annotations
31
+
32
+ from typing import Literal
33
+
34
+ from pydantic import BaseModel, ConfigDict, Field
35
+
36
+
37
+ class ModelIdempotencyConfig(BaseModel):
38
+ """Idempotency configuration for message consumption.
39
+
40
+ When enabled, the consumer deduplicates messages based on the `envelope_id`
41
+ field from the event envelope. The deduplication key is always `envelope_id`
42
+ (not configurable) to ensure consistent behavior across all consumers.
43
+
44
+ Attributes:
45
+ enabled: Whether to enable idempotency checking. When False, all
46
+ messages are processed regardless of prior processing.
47
+ Default: False.
48
+ store_type: Backend for storing processed envelope IDs.
49
+ - "postgres": Production-grade, uses INSERT ON CONFLICT DO NOTHING
50
+ - "memory": In-memory store for testing only
51
+ Default: "postgres".
52
+ retention_days: Number of days to retain processed envelope IDs before
53
+ cleanup. Longer retention uses more storage but provides stronger
54
+ deduplication guarantees for delayed retries.
55
+ Must be between 1 and 90 days. Default: 7.
56
+
57
+ Example:
58
+ ```python
59
+ from omnibase_infra.models.event_bus import ModelIdempotencyConfig
60
+
61
+ # Production configuration
62
+ config = ModelIdempotencyConfig(
63
+ enabled=True,
64
+ store_type="postgres",
65
+ retention_days=14,
66
+ )
67
+
68
+ # Testing configuration
69
+ test_config = ModelIdempotencyConfig(
70
+ enabled=True,
71
+ store_type="memory",
72
+ retention_days=1,
73
+ )
74
+
75
+ # Disabled (default behavior)
76
+ disabled = ModelIdempotencyConfig() # enabled=False
77
+ ```
78
+
79
+ Configuration Guidelines:
80
+ - Enable idempotency for all consumers processing side-effecting events
81
+ - Use "postgres" store_type in production for durability
82
+ - Set retention_days based on maximum expected retry window
83
+ - For high-throughput topics, consider shorter retention to reduce storage
84
+
85
+ Note:
86
+ The deduplication key is always the `envelope_id` field from the event
87
+ envelope. This is intentionally not configurable to ensure consistent
88
+ behavior and prevent misconfiguration.
89
+
90
+ See Also:
91
+ EventBusSubcontractWiring: Consumer configuration that uses this model.
92
+ """
93
+
94
+ model_config = ConfigDict(
95
+ frozen=True,
96
+ extra="forbid",
97
+ json_schema_extra={
98
+ "examples": [
99
+ {
100
+ "enabled": True,
101
+ "store_type": "postgres",
102
+ "retention_days": 7,
103
+ },
104
+ {
105
+ "enabled": True,
106
+ "store_type": "memory",
107
+ "retention_days": 1,
108
+ },
109
+ ]
110
+ },
111
+ )
112
+
113
+ enabled: bool = Field(
114
+ default=False,
115
+ description="Enable idempotency checking for message deduplication",
116
+ )
117
+
118
+ store_type: Literal["postgres", "memory"] = Field(
119
+ default="postgres",
120
+ description="Idempotency store backend. 'memory' is for testing only.",
121
+ )
122
+
123
+ retention_days: int = Field(
124
+ default=7,
125
+ ge=1,
126
+ le=90,
127
+ description="Days to retain processed envelope IDs before cleanup",
128
+ )
129
+
130
+
131
+ __all__ = ["ModelIdempotencyConfig"]
@@ -0,0 +1,107 @@
1
+ # Copyright 2025 OmniNode Team. All rights reserved.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ """Kafka offset commit policy configuration model.
4
+
5
+ This module defines the configuration for Kafka consumer offset commit strategies,
6
+ controlling when offsets are committed relative to handler execution.
7
+
8
+ Delivery Semantics:
9
+ - At-least-once (default): Offsets committed AFTER successful handler execution.
10
+ Messages may be redelivered on failure, requiring idempotent handlers.
11
+ - At-most-once: Offsets committed BEFORE handler execution.
12
+ Messages may be lost on failure, but never processed twice.
13
+ - Manual: Explicit offset control for complex transaction scenarios.
14
+
15
+ Design Decision:
16
+ The default is `commit_after_handler` (at-least-once) because:
17
+ 1. Message loss is typically worse than duplicate processing
18
+ 2. Idempotency can be enforced at the handler level (via idempotency keys)
19
+ 3. This aligns with Kafka best practices for reliable message processing
20
+
21
+ See Also:
22
+ - ModelIdempotencyConfig: Pairs with at-least-once for duplicate detection
23
+ - docs/patterns/kafka_delivery_semantics.md: Full delivery guarantee documentation
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ from typing import Literal
29
+
30
+ from pydantic import BaseModel, ConfigDict, Field
31
+
32
+
33
+ class ModelOffsetPolicyConfig(BaseModel):
34
+ """Kafka offset commit policy configuration.
35
+
36
+ Controls when consumer offsets are committed relative to handler execution.
37
+ Default is 'commit_after_handler' for at-least-once delivery semantics.
38
+
39
+ Attributes:
40
+ commit_strategy: When to commit Kafka offsets relative to handler execution.
41
+ - "commit_after_handler": At-least-once delivery (default, safe).
42
+ Offsets committed only after successful handler completion.
43
+ Messages may be redelivered on failure - handlers must be idempotent.
44
+ - "commit_before_handler": At-most-once delivery (may lose messages).
45
+ Offsets committed before handler execution begins.
46
+ Suitable only when message loss is acceptable.
47
+ - "manual": Explicit offset control via handler callback.
48
+ For complex transactional scenarios requiring precise control.
49
+
50
+ Example:
51
+ ```python
52
+ from omnibase_infra.models.event_bus import ModelOffsetPolicyConfig
53
+
54
+ # Default: at-least-once (recommended)
55
+ config = ModelOffsetPolicyConfig()
56
+ assert config.commit_strategy == "commit_after_handler"
57
+
58
+ # Explicit at-most-once (use with caution)
59
+ config = ModelOffsetPolicyConfig(commit_strategy="commit_before_handler")
60
+
61
+ # Manual control for transactions
62
+ config = ModelOffsetPolicyConfig(commit_strategy="manual")
63
+ ```
64
+
65
+ Warning:
66
+ Using "commit_before_handler" may result in message loss if the handler
67
+ fails after offset commit. Only use when message loss is acceptable
68
+ (e.g., metrics, non-critical logs).
69
+
70
+ See Also:
71
+ ModelIdempotencyConfig: For duplicate detection with at-least-once delivery.
72
+ """
73
+
74
+ commit_strategy: Literal[
75
+ "commit_after_handler",
76
+ "commit_before_handler",
77
+ "manual",
78
+ ] = Field(
79
+ default="commit_after_handler",
80
+ description=(
81
+ "When to commit Kafka offsets relative to handler execution. "
82
+ "'commit_after_handler' provides at-least-once delivery (default, safe). "
83
+ "'commit_before_handler' provides at-most-once delivery (may lose messages). "
84
+ "'manual' provides explicit control for transactional scenarios."
85
+ ),
86
+ )
87
+
88
+ model_config = ConfigDict(
89
+ frozen=True,
90
+ extra="forbid",
91
+ json_schema_extra={
92
+ "examples": [
93
+ {
94
+ "commit_strategy": "commit_after_handler",
95
+ },
96
+ {
97
+ "commit_strategy": "commit_before_handler",
98
+ },
99
+ {
100
+ "commit_strategy": "manual",
101
+ },
102
+ ]
103
+ },
104
+ )
105
+
106
+
107
+ __all__ = ["ModelOffsetPolicyConfig"]
@@ -58,6 +58,9 @@ class ModelCircuitBreakerConfig(BaseModel):
58
58
  transport_type: Transport type for error context classification.
59
59
  Determines which error code is used when the circuit opens.
60
60
  Default: HTTP.
61
+ half_open_successes: Number of successful requests required to close
62
+ the circuit from half-open state. Higher values provide more
63
+ confidence before closing. Must be >= 1 and <= 10. Default: 1.
61
64
 
62
65
  Example:
63
66
  ```python
@@ -114,6 +117,16 @@ class ModelCircuitBreakerConfig(BaseModel):
114
117
  description="Transport type for error context classification",
115
118
  )
116
119
 
120
+ half_open_successes: int = Field(
121
+ default=1,
122
+ ge=1,
123
+ le=10,
124
+ description=(
125
+ "Number of successful requests required to close the circuit from "
126
+ "half-open state. Higher values provide more confidence before closing."
127
+ ),
128
+ )
129
+
117
130
  model_config = ConfigDict(
118
131
  frozen=True,
119
132
  extra="forbid",
@@ -124,12 +137,14 @@ class ModelCircuitBreakerConfig(BaseModel):
124
137
  "reset_timeout_seconds": 60.0,
125
138
  "service_name": "kafka.production",
126
139
  "transport_type": "kafka",
140
+ "half_open_successes": 1,
127
141
  },
128
142
  {
129
143
  "threshold": 3,
130
144
  "reset_timeout_seconds": 120.0,
131
145
  "service_name": "postgresql-primary",
132
146
  "transport_type": "db",
147
+ "half_open_successes": 2,
133
148
  },
134
149
  ]
135
150
  },
@@ -39,6 +39,12 @@ from omnibase_infra.models.validation.model_chain_violation import ModelChainVio
39
39
  from omnibase_infra.models.validation.model_coverage_metrics import (
40
40
  ModelCoverageMetrics,
41
41
  )
42
+ from omnibase_infra.models.validation.model_declarative_node_validation_result import (
43
+ ModelDeclarativeNodeValidationResult,
44
+ )
45
+ from omnibase_infra.models.validation.model_declarative_node_violation import (
46
+ ModelDeclarativeNodeViolation,
47
+ )
42
48
  from omnibase_infra.models.validation.model_execution_shape_rule import (
43
49
  ModelExecutionShapeRule,
44
50
  )
@@ -74,6 +80,8 @@ __all__ = [
74
80
  "ModelAnyTypeValidationResult",
75
81
  "ModelAnyTypeViolation",
76
82
  "ModelCategoryMatchResult",
83
+ "ModelDeclarativeNodeValidationResult",
84
+ "ModelDeclarativeNodeViolation",
77
85
  "ModelChainViolation",
78
86
  "ModelCoverageMetrics",
79
87
  "ModelExecutionShapeRule",
@@ -0,0 +1,139 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2025 OmniNode Team
3
+ """Declarative Node Validation Result Model.
4
+
5
+ Defines the aggregate result structure for declarative node validation operations.
6
+ Used by the validator to provide a structured result for CI pipeline integration
7
+ with convenience methods for reporting.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from pydantic import BaseModel, ConfigDict, Field
13
+
14
+ from omnibase_infra.models.validation.model_declarative_node_violation import (
15
+ ModelDeclarativeNodeViolation,
16
+ )
17
+
18
+
19
+ class ModelDeclarativeNodeValidationResult(BaseModel):
20
+ """Aggregate result of declarative node validation.
21
+
22
+ Provides a structured result for CI pipeline integration with
23
+ convenience methods for reporting.
24
+
25
+ Attributes:
26
+ passed: True if no blocking violations found.
27
+ violations: List of all detected violations.
28
+ files_checked: Number of node.py files that were validated.
29
+ total_violations: Total count of violations.
30
+ blocking_count: Count of blocking (error severity) violations.
31
+ imperative_nodes: List of node class names that are imperative.
32
+
33
+ Example:
34
+ >>> result = ModelDeclarativeNodeValidationResult.from_violations(
35
+ ... violations=[violation1, violation2],
36
+ ... files_checked=10,
37
+ ... )
38
+ >>> if not result.passed:
39
+ ... print(result.format_summary())
40
+ """
41
+
42
+ passed: bool = Field(
43
+ ...,
44
+ description="True if no blocking violations found",
45
+ )
46
+ violations: list[ModelDeclarativeNodeViolation] = Field(
47
+ default_factory=list,
48
+ description="List of all detected violations",
49
+ )
50
+ files_checked: int = Field(
51
+ default=0,
52
+ ge=0,
53
+ description="Number of node.py files that were validated",
54
+ )
55
+ total_violations: int = Field(
56
+ default=0,
57
+ ge=0,
58
+ description="Total count of violations",
59
+ )
60
+ blocking_count: int = Field(
61
+ default=0,
62
+ ge=0,
63
+ description="Count of blocking (error severity) violations",
64
+ )
65
+ imperative_nodes: list[str] = Field(
66
+ default_factory=list,
67
+ description="List of node class names that are imperative (have violations)",
68
+ )
69
+
70
+ model_config = ConfigDict(
71
+ frozen=True,
72
+ extra="forbid",
73
+ strict=True,
74
+ )
75
+
76
+ def __bool__(self) -> bool:
77
+ """Allow boolean context: if result: ...
78
+
79
+ Warning:
80
+ Non-standard __bool__: Returns True only when passed is True.
81
+ This differs from Pydantic default where bool(model) always returns True.
82
+ """
83
+ return self.passed
84
+
85
+ @classmethod
86
+ def from_violations(
87
+ cls,
88
+ violations: list[ModelDeclarativeNodeViolation],
89
+ files_checked: int = 0,
90
+ ) -> ModelDeclarativeNodeValidationResult:
91
+ """Create a result from a list of violations.
92
+
93
+ Args:
94
+ violations: List of detected violations.
95
+ files_checked: Number of node.py files that were checked.
96
+
97
+ Returns:
98
+ A ModelDeclarativeNodeValidationResult instance.
99
+ """
100
+ blocking_count = sum(1 for v in violations if v.is_blocking())
101
+ imperative_nodes = sorted(
102
+ {v.node_class_name for v in violations if v.node_class_name}
103
+ )
104
+ return cls(
105
+ passed=blocking_count == 0,
106
+ violations=violations,
107
+ files_checked=files_checked,
108
+ total_violations=len(violations),
109
+ blocking_count=blocking_count,
110
+ imperative_nodes=imperative_nodes,
111
+ )
112
+
113
+ def format_for_ci(self) -> list[str]:
114
+ """Format all violations for CI output.
115
+
116
+ Returns:
117
+ List of formatted strings for CI annotation.
118
+ """
119
+ return [v.format_for_ci() for v in self.violations]
120
+
121
+ def format_summary(self) -> str:
122
+ """Format a summary for console output.
123
+
124
+ Returns:
125
+ Summary string with pass/fail status and counts.
126
+ """
127
+ status = "PASSED" if self.passed else "FAILED"
128
+ summary = (
129
+ f"Declarative Node Validation: {status}\n"
130
+ f" Files checked: {self.files_checked}\n"
131
+ f" Total violations: {self.total_violations}\n"
132
+ f" Blocking violations: {self.blocking_count}"
133
+ )
134
+ if self.imperative_nodes:
135
+ summary += f"\n Imperative nodes: {', '.join(self.imperative_nodes)}"
136
+ return summary
137
+
138
+
139
+ __all__ = ["ModelDeclarativeNodeValidationResult"]