ouroboros-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (81) hide show
  1. ouroboros/__init__.py +15 -0
  2. ouroboros/__main__.py +9 -0
  3. ouroboros/bigbang/__init__.py +39 -0
  4. ouroboros/bigbang/ambiguity.py +464 -0
  5. ouroboros/bigbang/interview.py +530 -0
  6. ouroboros/bigbang/seed_generator.py +610 -0
  7. ouroboros/cli/__init__.py +9 -0
  8. ouroboros/cli/commands/__init__.py +7 -0
  9. ouroboros/cli/commands/config.py +79 -0
  10. ouroboros/cli/commands/init.py +425 -0
  11. ouroboros/cli/commands/run.py +201 -0
  12. ouroboros/cli/commands/status.py +85 -0
  13. ouroboros/cli/formatters/__init__.py +31 -0
  14. ouroboros/cli/formatters/panels.py +157 -0
  15. ouroboros/cli/formatters/progress.py +112 -0
  16. ouroboros/cli/formatters/tables.py +166 -0
  17. ouroboros/cli/main.py +60 -0
  18. ouroboros/config/__init__.py +81 -0
  19. ouroboros/config/loader.py +292 -0
  20. ouroboros/config/models.py +332 -0
  21. ouroboros/core/__init__.py +62 -0
  22. ouroboros/core/ac_tree.py +401 -0
  23. ouroboros/core/context.py +472 -0
  24. ouroboros/core/errors.py +246 -0
  25. ouroboros/core/seed.py +212 -0
  26. ouroboros/core/types.py +205 -0
  27. ouroboros/evaluation/__init__.py +110 -0
  28. ouroboros/evaluation/consensus.py +350 -0
  29. ouroboros/evaluation/mechanical.py +351 -0
  30. ouroboros/evaluation/models.py +235 -0
  31. ouroboros/evaluation/pipeline.py +286 -0
  32. ouroboros/evaluation/semantic.py +302 -0
  33. ouroboros/evaluation/trigger.py +278 -0
  34. ouroboros/events/__init__.py +5 -0
  35. ouroboros/events/base.py +80 -0
  36. ouroboros/events/decomposition.py +153 -0
  37. ouroboros/events/evaluation.py +248 -0
  38. ouroboros/execution/__init__.py +44 -0
  39. ouroboros/execution/atomicity.py +451 -0
  40. ouroboros/execution/decomposition.py +481 -0
  41. ouroboros/execution/double_diamond.py +1386 -0
  42. ouroboros/execution/subagent.py +275 -0
  43. ouroboros/observability/__init__.py +63 -0
  44. ouroboros/observability/drift.py +383 -0
  45. ouroboros/observability/logging.py +504 -0
  46. ouroboros/observability/retrospective.py +338 -0
  47. ouroboros/orchestrator/__init__.py +78 -0
  48. ouroboros/orchestrator/adapter.py +391 -0
  49. ouroboros/orchestrator/events.py +278 -0
  50. ouroboros/orchestrator/runner.py +597 -0
  51. ouroboros/orchestrator/session.py +486 -0
  52. ouroboros/persistence/__init__.py +23 -0
  53. ouroboros/persistence/checkpoint.py +511 -0
  54. ouroboros/persistence/event_store.py +183 -0
  55. ouroboros/persistence/migrations/__init__.py +1 -0
  56. ouroboros/persistence/migrations/runner.py +100 -0
  57. ouroboros/persistence/migrations/scripts/001_initial.sql +20 -0
  58. ouroboros/persistence/schema.py +56 -0
  59. ouroboros/persistence/uow.py +230 -0
  60. ouroboros/providers/__init__.py +28 -0
  61. ouroboros/providers/base.py +133 -0
  62. ouroboros/providers/claude_code_adapter.py +212 -0
  63. ouroboros/providers/litellm_adapter.py +316 -0
  64. ouroboros/py.typed +0 -0
  65. ouroboros/resilience/__init__.py +67 -0
  66. ouroboros/resilience/lateral.py +595 -0
  67. ouroboros/resilience/stagnation.py +727 -0
  68. ouroboros/routing/__init__.py +60 -0
  69. ouroboros/routing/complexity.py +272 -0
  70. ouroboros/routing/downgrade.py +664 -0
  71. ouroboros/routing/escalation.py +340 -0
  72. ouroboros/routing/router.py +204 -0
  73. ouroboros/routing/tiers.py +247 -0
  74. ouroboros/secondary/__init__.py +40 -0
  75. ouroboros/secondary/scheduler.py +467 -0
  76. ouroboros/secondary/todo_registry.py +483 -0
  77. ouroboros_ai-0.1.0.dist-info/METADATA +607 -0
  78. ouroboros_ai-0.1.0.dist-info/RECORD +81 -0
  79. ouroboros_ai-0.1.0.dist-info/WHEEL +4 -0
  80. ouroboros_ai-0.1.0.dist-info/entry_points.txt +2 -0
  81. ouroboros_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,278 @@
1
+ """Consensus Trigger Matrix.
2
+
3
+ Implements the 6 trigger conditions for Stage 3 consensus:
4
+ 1. Seed Modification - Any change to immutable Seed
5
+ 2. Ontology Evolution - Schema changes
6
+ 3. Goal Interpretation - Reinterpretation of goal
7
+ 4. Seed Drift Alert - drift > 0.3
8
+ 5. Stage 2 Uncertainty - uncertainty > 0.3
9
+ 6. Lateral Thinking Adoption - Persona suggestion accepted
10
+
11
+ The ConsensusTrigger is stateless and returns trigger decisions.
12
+ """
13
+
14
+ from dataclasses import dataclass, field
15
+ from enum import StrEnum
16
+ from typing import Any
17
+
18
+ from ouroboros.core.errors import ValidationError
19
+ from ouroboros.core.types import Result
20
+ from ouroboros.evaluation.models import SemanticResult
21
+ from ouroboros.events.base import BaseEvent
22
+ from ouroboros.events.evaluation import create_consensus_triggered_event
23
+
24
+
25
+ class TriggerType(StrEnum):
26
+ """Types of consensus triggers.
27
+
28
+ FR16: Consensus Trigger Matrix - 6 trigger conditions
29
+ """
30
+
31
+ SEED_MODIFICATION = "seed_modification"
32
+ ONTOLOGY_EVOLUTION = "ontology_evolution"
33
+ GOAL_INTERPRETATION = "goal_interpretation"
34
+ SEED_DRIFT_ALERT = "seed_drift_alert"
35
+ STAGE2_UNCERTAINTY = "stage2_uncertainty"
36
+ LATERAL_THINKING_ADOPTION = "lateral_thinking_adoption"
37
+
38
+
39
+ @dataclass(frozen=True, slots=True)
40
+ class TriggerContext:
41
+ """Context for evaluating consensus triggers.
42
+
43
+ Attributes:
44
+ execution_id: Execution identifier
45
+ seed_modified: Whether seed was modified
46
+ ontology_changed: Whether ontology schema changed
47
+ goal_reinterpreted: Whether goal was reinterpreted
48
+ drift_score: Current drift score (0.0-1.0)
49
+ uncertainty_score: Stage 2 uncertainty (0.0-1.0)
50
+ lateral_thinking_adopted: Whether lateral thinking was adopted
51
+ semantic_result: Optional Stage 2 result
52
+ """
53
+
54
+ execution_id: str
55
+ seed_modified: bool = False
56
+ ontology_changed: bool = False
57
+ goal_reinterpreted: bool = False
58
+ drift_score: float = 0.0
59
+ uncertainty_score: float = 0.0
60
+ lateral_thinking_adopted: bool = False
61
+ semantic_result: SemanticResult | None = None
62
+
63
+
64
+ @dataclass(frozen=True, slots=True)
65
+ class TriggerResult:
66
+ """Result of trigger evaluation.
67
+
68
+ Attributes:
69
+ should_trigger: Whether consensus should be triggered
70
+ trigger_type: Type of trigger activated (if any)
71
+ reason: Human-readable reason
72
+ details: Additional context
73
+ """
74
+
75
+ should_trigger: bool
76
+ trigger_type: TriggerType | None = None
77
+ reason: str = ""
78
+ details: dict[str, Any] = field(default_factory=dict)
79
+
80
+
81
+ @dataclass(frozen=True, slots=True)
82
+ class TriggerConfig:
83
+ """Configuration for trigger thresholds.
84
+
85
+ Attributes:
86
+ drift_threshold: Drift score above which to trigger (default 0.3)
87
+ uncertainty_threshold: Uncertainty above which to trigger (default 0.3)
88
+ """
89
+
90
+ drift_threshold: float = 0.3
91
+ uncertainty_threshold: float = 0.3
92
+
93
+
94
+ class ConsensusTrigger:
95
+ """Evaluates whether consensus should be triggered.
96
+
97
+ Implements FR16: Consensus Trigger Matrix with 6 conditions.
98
+ Stateless - all state passed via TriggerContext.
99
+
100
+ Example:
101
+ trigger = ConsensusTrigger()
102
+ result = trigger.evaluate(context)
103
+ if result.should_trigger:
104
+ # Run Stage 3 consensus
105
+ """
106
+
107
+ def __init__(self, config: TriggerConfig | None = None) -> None:
108
+ """Initialize trigger evaluator.
109
+
110
+ Args:
111
+ config: Trigger configuration
112
+ """
113
+ self._config = config or TriggerConfig()
114
+
115
+ def evaluate(
116
+ self,
117
+ context: TriggerContext,
118
+ ) -> Result[tuple[TriggerResult, list[BaseEvent]], ValidationError]:
119
+ """Evaluate all trigger conditions.
120
+
121
+ Checks triggers in priority order and returns on first match.
122
+
123
+ Args:
124
+ context: Trigger evaluation context
125
+
126
+ Returns:
127
+ Result containing TriggerResult and events
128
+ """
129
+ events: list[BaseEvent] = []
130
+
131
+ # Check each trigger condition in priority order
132
+ checks = [
133
+ self._check_seed_modification,
134
+ self._check_ontology_evolution,
135
+ self._check_goal_interpretation,
136
+ self._check_seed_drift,
137
+ self._check_stage2_uncertainty,
138
+ self._check_lateral_thinking,
139
+ ]
140
+
141
+ for check in checks:
142
+ result = check(context)
143
+ if result.should_trigger:
144
+ # Emit trigger event
145
+ events.append(
146
+ create_consensus_triggered_event(
147
+ execution_id=context.execution_id,
148
+ trigger_type=result.trigger_type.value if result.trigger_type else "unknown",
149
+ trigger_details=result.details,
150
+ )
151
+ )
152
+ return Result.ok((result, events))
153
+
154
+ # No trigger condition met
155
+ return Result.ok((
156
+ TriggerResult(
157
+ should_trigger=False,
158
+ reason="No trigger conditions met",
159
+ ),
160
+ events,
161
+ ))
162
+
163
+ def _check_seed_modification(self, context: TriggerContext) -> TriggerResult:
164
+ """Check for seed modification trigger.
165
+
166
+ Seeds are immutable, any modification requires consensus.
167
+ """
168
+ if context.seed_modified:
169
+ return TriggerResult(
170
+ should_trigger=True,
171
+ trigger_type=TriggerType.SEED_MODIFICATION,
172
+ reason="Seed modification detected - requires consensus",
173
+ details={"seed_modified": True},
174
+ )
175
+ return TriggerResult(should_trigger=False)
176
+
177
+ def _check_ontology_evolution(self, context: TriggerContext) -> TriggerResult:
178
+ """Check for ontology evolution trigger.
179
+
180
+ Schema changes affect output structure and require validation.
181
+ """
182
+ if context.ontology_changed:
183
+ return TriggerResult(
184
+ should_trigger=True,
185
+ trigger_type=TriggerType.ONTOLOGY_EVOLUTION,
186
+ reason="Ontology schema changed - requires consensus",
187
+ details={"ontology_changed": True},
188
+ )
189
+ return TriggerResult(should_trigger=False)
190
+
191
+ def _check_goal_interpretation(self, context: TriggerContext) -> TriggerResult:
192
+ """Check for goal interpretation change trigger.
193
+
194
+ Reinterpretation of the goal needs diverse verification.
195
+ """
196
+ if context.goal_reinterpreted:
197
+ return TriggerResult(
198
+ should_trigger=True,
199
+ trigger_type=TriggerType.GOAL_INTERPRETATION,
200
+ reason="Goal interpretation changed - requires consensus",
201
+ details={"goal_reinterpreted": True},
202
+ )
203
+ return TriggerResult(should_trigger=False)
204
+
205
+ def _check_seed_drift(self, context: TriggerContext) -> TriggerResult:
206
+ """Check for seed drift alert trigger.
207
+
208
+ High drift from original seed intent needs verification.
209
+ """
210
+ # Use semantic result drift if available, otherwise use context drift
211
+ drift = context.drift_score
212
+ if context.semantic_result:
213
+ drift = context.semantic_result.drift_score
214
+
215
+ if drift > self._config.drift_threshold:
216
+ return TriggerResult(
217
+ should_trigger=True,
218
+ trigger_type=TriggerType.SEED_DRIFT_ALERT,
219
+ reason=f"Drift score {drift:.2f} exceeds threshold {self._config.drift_threshold}",
220
+ details={
221
+ "drift_score": drift,
222
+ "threshold": self._config.drift_threshold,
223
+ },
224
+ )
225
+ return TriggerResult(should_trigger=False)
226
+
227
+ def _check_stage2_uncertainty(self, context: TriggerContext) -> TriggerResult:
228
+ """Check for Stage 2 uncertainty trigger.
229
+
230
+ High uncertainty in semantic evaluation needs multi-model verification.
231
+ """
232
+ # Use semantic result uncertainty if available
233
+ uncertainty = context.uncertainty_score
234
+ if context.semantic_result:
235
+ uncertainty = context.semantic_result.uncertainty
236
+
237
+ if uncertainty > self._config.uncertainty_threshold:
238
+ return TriggerResult(
239
+ should_trigger=True,
240
+ trigger_type=TriggerType.STAGE2_UNCERTAINTY,
241
+ reason=f"Uncertainty {uncertainty:.2f} exceeds threshold {self._config.uncertainty_threshold}",
242
+ details={
243
+ "uncertainty": uncertainty,
244
+ "threshold": self._config.uncertainty_threshold,
245
+ },
246
+ )
247
+ return TriggerResult(should_trigger=False)
248
+
249
+ def _check_lateral_thinking(self, context: TriggerContext) -> TriggerResult:
250
+ """Check for lateral thinking adoption trigger.
251
+
252
+ Adopting alternative approaches from personas needs verification.
253
+ """
254
+ if context.lateral_thinking_adopted:
255
+ return TriggerResult(
256
+ should_trigger=True,
257
+ trigger_type=TriggerType.LATERAL_THINKING_ADOPTION,
258
+ reason="Lateral thinking approach adopted - requires consensus",
259
+ details={"lateral_thinking_adopted": True},
260
+ )
261
+ return TriggerResult(should_trigger=False)
262
+
263
+
264
+ def check_consensus_trigger(
265
+ context: TriggerContext,
266
+ config: TriggerConfig | None = None,
267
+ ) -> Result[tuple[TriggerResult, list[BaseEvent]], ValidationError]:
268
+ """Convenience function for checking consensus triggers.
269
+
270
+ Args:
271
+ context: Trigger evaluation context
272
+ config: Optional configuration
273
+
274
+ Returns:
275
+ Result with TriggerResult and events
276
+ """
277
+ trigger = ConsensusTrigger(config)
278
+ return trigger.evaluate(context)
@@ -0,0 +1,5 @@
1
+ """Ouroboros events module - event definitions for event sourcing."""
2
+
3
+ from ouroboros.events.base import BaseEvent
4
+
5
+ __all__ = ["BaseEvent"]
@@ -0,0 +1,80 @@
1
+ """Base event definition for event sourcing.
2
+
3
+ All events in Ouroboros inherit from BaseEvent. Events are immutable
4
+ (frozen Pydantic models) and follow the dot.notation.past_tense naming convention.
5
+ """
6
+
7
+ from datetime import UTC, datetime
8
+ from typing import Any
9
+ from uuid import uuid4
10
+
11
+ from pydantic import BaseModel, Field
12
+
13
+
14
+ class BaseEvent(BaseModel, frozen=True):
15
+ """Base class for all Ouroboros events.
16
+
17
+ Events are immutable records of state changes. They are persisted in the
18
+ event store and can be replayed to reconstruct aggregate state.
19
+
20
+ Attributes:
21
+ id: Unique event identifier (UUID).
22
+ type: Event type following dot.notation.past_tense convention.
23
+ Examples: "ontology.concept.added", "execution.ac.completed"
24
+ timestamp: When the event occurred (UTC).
25
+ aggregate_type: Type of aggregate this event belongs to.
26
+ aggregate_id: Unique identifier of the aggregate.
27
+ data: Event-specific payload data.
28
+ consensus_id: Optional consensus identifier for grouped events.
29
+
30
+ Example:
31
+ event = BaseEvent(
32
+ type="ontology.concept.added",
33
+ aggregate_type="ontology",
34
+ aggregate_id="ont-123",
35
+ data={"concept_name": "authentication", "weight": 1.0}
36
+ )
37
+ """
38
+
39
+ id: str = Field(default_factory=lambda: str(uuid4()))
40
+ type: str
41
+ timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC))
42
+ aggregate_type: str
43
+ aggregate_id: str
44
+ data: dict[str, Any] = Field(default_factory=dict)
45
+ consensus_id: str | None = Field(default=None)
46
+
47
+ def to_db_dict(self) -> dict[str, Any]:
48
+ """Convert event to dictionary for database insertion.
49
+
50
+ Returns:
51
+ Dictionary with keys matching the events table columns.
52
+ """
53
+ return {
54
+ "id": self.id,
55
+ "event_type": self.type,
56
+ "timestamp": self.timestamp,
57
+ "aggregate_type": self.aggregate_type,
58
+ "aggregate_id": self.aggregate_id,
59
+ "payload": self.data,
60
+ "consensus_id": self.consensus_id,
61
+ }
62
+
63
+ @classmethod
64
+ def from_db_row(cls, row: dict[str, Any]) -> BaseEvent:
65
+ """Create event from database row.
66
+
67
+ Args:
68
+ row: Dictionary from database query result.
69
+
70
+ Returns:
71
+ BaseEvent instance.
72
+ """
73
+ return cls(
74
+ id=row["id"],
75
+ type=row["event_type"],
76
+ timestamp=row["timestamp"],
77
+ aggregate_type=row["aggregate_type"],
78
+ aggregate_id=row["aggregate_id"],
79
+ data=row["payload"],
80
+ )
@@ -0,0 +1,153 @@
1
+ """Event definitions for AC decomposition and atomicity detection.
2
+
3
+ These events enable full traceability of AC lifecycle through event sourcing:
4
+ - Atomicity checks and their results
5
+ - AC decomposition into children
6
+ - AC marking as atomic (ready for execution)
7
+
8
+ Event naming follows dot.notation.past_tense convention.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from ouroboros.events.base import BaseEvent
14
+
15
+
16
+ def create_ac_atomicity_checked_event(
17
+ ac_id: str,
18
+ execution_id: str,
19
+ is_atomic: bool,
20
+ complexity_score: float,
21
+ tool_count: int,
22
+ estimated_duration: int,
23
+ reasoning: str,
24
+ ) -> BaseEvent:
25
+ """Factory for AC atomicity check event.
26
+
27
+ Emitted when an AC's atomicity has been evaluated.
28
+
29
+ Args:
30
+ ac_id: Unique identifier for the AC.
31
+ execution_id: Associated execution ID.
32
+ is_atomic: Whether the AC is atomic.
33
+ complexity_score: Normalized complexity (0.0-1.0).
34
+ tool_count: Estimated number of tools required.
35
+ estimated_duration: Estimated duration in seconds.
36
+ reasoning: Explanation of the atomicity decision.
37
+
38
+ Returns:
39
+ BaseEvent with type "ac.atomicity.checked".
40
+ """
41
+ return BaseEvent(
42
+ type="ac.atomicity.checked",
43
+ aggregate_type="ac_decomposition",
44
+ aggregate_id=ac_id,
45
+ data={
46
+ "execution_id": execution_id,
47
+ "is_atomic": is_atomic,
48
+ "complexity_score": complexity_score,
49
+ "tool_count": tool_count,
50
+ "estimated_duration": estimated_duration,
51
+ "reasoning": reasoning,
52
+ },
53
+ )
54
+
55
+
56
+ def create_ac_decomposed_event(
57
+ parent_ac_id: str,
58
+ execution_id: str,
59
+ child_ac_ids: list[str],
60
+ child_contents: list[str],
61
+ depth: int,
62
+ reasoning: str,
63
+ ) -> BaseEvent:
64
+ """Factory for AC decomposition event.
65
+
66
+ Emitted when a non-atomic AC is decomposed into child ACs.
67
+
68
+ Args:
69
+ parent_ac_id: ID of the parent AC being decomposed.
70
+ execution_id: Associated execution ID.
71
+ child_ac_ids: List of child AC IDs.
72
+ child_contents: List of child AC content strings.
73
+ depth: Current depth in the AC tree.
74
+ reasoning: Explanation of the decomposition strategy.
75
+
76
+ Returns:
77
+ BaseEvent with type "ac.decomposition.completed".
78
+ """
79
+ return BaseEvent(
80
+ type="ac.decomposition.completed",
81
+ aggregate_type="ac_decomposition",
82
+ aggregate_id=parent_ac_id,
83
+ data={
84
+ "execution_id": execution_id,
85
+ "child_ac_ids": child_ac_ids,
86
+ "child_contents": child_contents,
87
+ "child_count": len(child_ac_ids),
88
+ "depth": depth,
89
+ "reasoning": reasoning,
90
+ },
91
+ )
92
+
93
+
94
+ def create_ac_marked_atomic_event(
95
+ ac_id: str,
96
+ execution_id: str,
97
+ depth: int,
98
+ ) -> BaseEvent:
99
+ """Factory for AC marked atomic event.
100
+
101
+ Emitted when an AC is confirmed as atomic and ready for direct execution.
102
+
103
+ Args:
104
+ ac_id: Unique identifier for the AC.
105
+ execution_id: Associated execution ID.
106
+ depth: Current depth in the AC tree.
107
+
108
+ Returns:
109
+ BaseEvent with type "ac.marked_atomic".
110
+ """
111
+ return BaseEvent(
112
+ type="ac.marked_atomic",
113
+ aggregate_type="ac_decomposition",
114
+ aggregate_id=ac_id,
115
+ data={
116
+ "execution_id": execution_id,
117
+ "depth": depth,
118
+ },
119
+ )
120
+
121
+
122
+ def create_ac_decomposition_failed_event(
123
+ ac_id: str,
124
+ execution_id: str,
125
+ error_message: str,
126
+ error_type: str,
127
+ depth: int,
128
+ ) -> BaseEvent:
129
+ """Factory for AC decomposition failure event.
130
+
131
+ Emitted when decomposition fails (max depth, cyclic, LLM error).
132
+
133
+ Args:
134
+ ac_id: Unique identifier for the AC.
135
+ execution_id: Associated execution ID.
136
+ error_message: Human-readable error description.
137
+ error_type: Type of error (e.g., "max_depth", "cyclic", "llm_failure").
138
+ depth: Current depth when failure occurred.
139
+
140
+ Returns:
141
+ BaseEvent with type "ac.decomposition.failed".
142
+ """
143
+ return BaseEvent(
144
+ type="ac.decomposition.failed",
145
+ aggregate_type="ac_decomposition",
146
+ aggregate_id=ac_id,
147
+ data={
148
+ "execution_id": execution_id,
149
+ "error_message": error_message,
150
+ "error_type": error_type,
151
+ "depth": depth,
152
+ },
153
+ )