ouroboros-ai 0.2.3__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (44) hide show
  1. ouroboros/__init__.py +1 -1
  2. ouroboros/bigbang/__init__.py +9 -0
  3. ouroboros/bigbang/interview.py +16 -18
  4. ouroboros/bigbang/ontology.py +180 -0
  5. ouroboros/cli/commands/__init__.py +2 -0
  6. ouroboros/cli/commands/init.py +162 -97
  7. ouroboros/cli/commands/mcp.py +161 -0
  8. ouroboros/cli/commands/run.py +165 -27
  9. ouroboros/cli/main.py +2 -1
  10. ouroboros/core/ontology_aspect.py +455 -0
  11. ouroboros/core/ontology_questions.py +462 -0
  12. ouroboros/evaluation/__init__.py +16 -1
  13. ouroboros/evaluation/consensus.py +569 -11
  14. ouroboros/evaluation/models.py +81 -0
  15. ouroboros/events/ontology.py +135 -0
  16. ouroboros/mcp/__init__.py +83 -0
  17. ouroboros/mcp/client/__init__.py +20 -0
  18. ouroboros/mcp/client/adapter.py +632 -0
  19. ouroboros/mcp/client/manager.py +600 -0
  20. ouroboros/mcp/client/protocol.py +161 -0
  21. ouroboros/mcp/errors.py +377 -0
  22. ouroboros/mcp/resources/__init__.py +22 -0
  23. ouroboros/mcp/resources/handlers.py +328 -0
  24. ouroboros/mcp/server/__init__.py +21 -0
  25. ouroboros/mcp/server/adapter.py +408 -0
  26. ouroboros/mcp/server/protocol.py +291 -0
  27. ouroboros/mcp/server/security.py +636 -0
  28. ouroboros/mcp/tools/__init__.py +24 -0
  29. ouroboros/mcp/tools/definitions.py +351 -0
  30. ouroboros/mcp/tools/registry.py +269 -0
  31. ouroboros/mcp/types.py +333 -0
  32. ouroboros/orchestrator/__init__.py +31 -0
  33. ouroboros/orchestrator/events.py +40 -0
  34. ouroboros/orchestrator/mcp_config.py +419 -0
  35. ouroboros/orchestrator/mcp_tools.py +483 -0
  36. ouroboros/orchestrator/runner.py +119 -2
  37. ouroboros/providers/claude_code_adapter.py +75 -0
  38. ouroboros/strategies/__init__.py +23 -0
  39. ouroboros/strategies/devil_advocate.py +197 -0
  40. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/METADATA +73 -17
  41. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/RECORD +44 -19
  42. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/WHEEL +0 -0
  43. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/entry_points.txt +0 -0
  44. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,462 @@
1
+ """Shared Ontological Question Framework.
2
+
3
+ This module defines the core philosophical questions used across
4
+ Interview, Consensus, and Resilience phases for ontological analysis.
5
+
6
+ The Two Ancient Methods:
7
+ 1. Socratic Questioning - "Why?", "What if?", "Is it necessary?"
8
+ → Reveals hidden assumptions, exposes contradictions
9
+
10
+ 2. Ontological Analysis - "What IS this?", "Root cause or symptom?"
11
+ → Finds root problems, separates essential from accidental
12
+
13
+ This framework provides the Ontological Analysis component.
14
+
15
+ Usage:
16
+ # For building prompts (low-level)
17
+ prompt = build_ontological_prompt(OntologicalQuestionType.ESSENCE)
18
+
19
+ # For full analysis with LLM (high-level, centralized)
20
+ insight = await analyze_ontologically(llm_adapter, context, (ROOT_CAUSE, ESSENCE))
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ from dataclasses import dataclass
26
+ from enum import StrEnum
27
+ import json
28
+ from typing import TYPE_CHECKING, Protocol
29
+
30
+ if TYPE_CHECKING:
31
+ from ouroboros.core.errors import ProviderError, ValidationError
32
+ from ouroboros.core.types import Result
33
+ from ouroboros.providers.base import LLMAdapter
34
+
35
+
36
+ class OntologicalQuestionType(StrEnum):
37
+ """Types of ontological questions.
38
+
39
+ Each type probes a different aspect of the fundamental nature
40
+ of a problem or solution.
41
+ """
42
+
43
+ ESSENCE = "essence"
44
+ ROOT_CAUSE = "root_cause"
45
+ PREREQUISITES = "prerequisites"
46
+ HIDDEN_ASSUMPTIONS = "hidden_assumptions"
47
+
48
+
49
+ @dataclass(frozen=True, slots=True)
50
+ class OntologicalQuestion:
51
+ """A single ontological question with metadata.
52
+
53
+ Attributes:
54
+ type: The category of ontological question.
55
+ question: The core question to ask.
56
+ purpose: What this question aims to reveal.
57
+ follow_up: A probing follow-up consideration.
58
+ """
59
+
60
+ type: OntologicalQuestionType
61
+ question: str
62
+ purpose: str
63
+ follow_up: str
64
+
65
+
66
+ ONTOLOGICAL_QUESTIONS: dict[OntologicalQuestionType, OntologicalQuestion] = {
67
+ OntologicalQuestionType.ESSENCE: OntologicalQuestion(
68
+ type=OntologicalQuestionType.ESSENCE,
69
+ question="What IS this, really?",
70
+ purpose="Identify the true nature of the problem/solution",
71
+ follow_up="Strip away accidental properties - what remains?",
72
+ ),
73
+ OntologicalQuestionType.ROOT_CAUSE: OntologicalQuestion(
74
+ type=OntologicalQuestionType.ROOT_CAUSE,
75
+ question="Is this the root cause or a symptom?",
76
+ purpose="Distinguish fundamental issues from surface manifestations",
77
+ follow_up="If we solve this, does the underlying issue remain?",
78
+ ),
79
+ OntologicalQuestionType.PREREQUISITES: OntologicalQuestion(
80
+ type=OntologicalQuestionType.PREREQUISITES,
81
+ question="What must exist first?",
82
+ purpose="Identify hidden dependencies and foundations",
83
+ follow_up="What assumptions are we making about existing structures?",
84
+ ),
85
+ OntologicalQuestionType.HIDDEN_ASSUMPTIONS: OntologicalQuestion(
86
+ type=OntologicalQuestionType.HIDDEN_ASSUMPTIONS,
87
+ question="What are we assuming?",
88
+ purpose="Surface implicit beliefs that may be wrong",
89
+ follow_up="What if the opposite were true?",
90
+ ),
91
+ }
92
+
93
+
94
+ @dataclass(frozen=True, slots=True)
95
+ class OntologicalInsight:
96
+ """Result of ontological analysis.
97
+
98
+ Attributes:
99
+ essence: The identified essential nature of the subject.
100
+ is_root_problem: Whether this addresses a root cause.
101
+ prerequisites: Things that must exist first.
102
+ hidden_assumptions: Implicit beliefs discovered.
103
+ confidence: Confidence in the analysis (0.0-1.0).
104
+ reasoning: The reasoning process that led to these insights.
105
+ """
106
+
107
+ essence: str
108
+ is_root_problem: bool
109
+ prerequisites: tuple[str, ...]
110
+ hidden_assumptions: tuple[str, ...]
111
+ confidence: float
112
+ reasoning: str
113
+
114
+
115
+ class OntologicalAnalyzer(Protocol):
116
+ """Protocol for components that perform ontological analysis.
117
+
118
+ This protocol is implemented by:
119
+ - InterviewOntologyAnalyzer (bigbang/ontology.py)
120
+ - Devil's Advocate in Consensus (evaluation/consensus.py)
121
+ - CONTRARIAN persona in Lateral Thinking (resilience/lateral.py) [future]
122
+ """
123
+
124
+ async def analyze_essence(self, subject: str) -> str:
125
+ """Identify the essential nature of a subject.
126
+
127
+ Args:
128
+ subject: The problem or solution to analyze.
129
+
130
+ Returns:
131
+ A description of the essential nature.
132
+ """
133
+ ...
134
+
135
+ async def check_root_cause(
136
+ self,
137
+ problem: str,
138
+ proposed_solution: str,
139
+ ) -> tuple[bool, str]:
140
+ """Check if a solution addresses the root cause.
141
+
142
+ Args:
143
+ problem: The problem being solved.
144
+ proposed_solution: The proposed solution.
145
+
146
+ Returns:
147
+ Tuple of (is_root_cause, reasoning).
148
+ """
149
+ ...
150
+
151
+ async def identify_prerequisites(self, goal: str) -> list[str]:
152
+ """Identify what must exist before pursuing a goal.
153
+
154
+ Args:
155
+ goal: The goal to analyze.
156
+
157
+ Returns:
158
+ List of prerequisites.
159
+ """
160
+ ...
161
+
162
+ async def surface_assumptions(self, context: str) -> list[str]:
163
+ """Surface hidden assumptions in a context.
164
+
165
+ Args:
166
+ context: The context to analyze.
167
+
168
+ Returns:
169
+ List of hidden assumptions discovered.
170
+ """
171
+ ...
172
+
173
+
174
+ def build_ontological_prompt(question_type: OntologicalQuestionType) -> str:
175
+ """Build a prompt fragment for ontological questioning.
176
+
177
+ Args:
178
+ question_type: The type of ontological question.
179
+
180
+ Returns:
181
+ A formatted prompt string for LLM use.
182
+ """
183
+ q = ONTOLOGICAL_QUESTIONS[question_type]
184
+ return f"""Apply ontological analysis:
185
+ - Core Question: {q.question}
186
+ - Purpose: {q.purpose}
187
+ - Follow-up consideration: {q.follow_up}
188
+ """
189
+
190
+
191
+ def build_devil_advocate_prompt() -> str:
192
+ """Build the Devil's Advocate prompt using all ontological questions.
193
+
194
+ This prompt is used in the Deliberative Consensus phase to ensure
195
+ solutions address root problems rather than symptoms.
196
+
197
+ Returns:
198
+ A formatted prompt string for the Devil's Advocate role.
199
+ """
200
+ questions = "\n".join(
201
+ f"- {q.question} ({q.purpose})" for q in ONTOLOGICAL_QUESTIONS.values()
202
+ )
203
+ return f"""You are the DEVIL'S ADVOCATE. Your role is to critically examine
204
+ this solution using ONTOLOGICAL ANALYSIS.
205
+
206
+ Apply these fundamental questions:
207
+ {questions}
208
+
209
+ Your goal is NOT to reject everything, but to ensure we're solving
210
+ the ROOT problem, not just treating SYMPTOMS.
211
+
212
+ Guidelines:
213
+ - If you find fundamental issues, explain WHY this is symptom treatment
214
+ - If the solution is sound, acknowledge its validity with clear reasoning
215
+ - Focus on the ESSENCE of the problem - is it being addressed?
216
+ - Challenge hidden ASSUMPTIONS respectfully but firmly
217
+ - Consider what PREREQUISITES might be missing
218
+
219
+ Be rigorous but fair. A good solution deserves recognition.
220
+ A symptomatic treatment deserves honest critique.
221
+ """
222
+
223
+
224
+ def get_all_questions() -> list[OntologicalQuestion]:
225
+ """Get all ontological questions as a list.
226
+
227
+ Returns:
228
+ List of all OntologicalQuestion instances.
229
+ """
230
+ return list(ONTOLOGICAL_QUESTIONS.values())
231
+
232
+
233
+ def get_question(question_type: OntologicalQuestionType) -> OntologicalQuestion:
234
+ """Get a specific ontological question by type.
235
+
236
+ Args:
237
+ question_type: The type of question to retrieve.
238
+
239
+ Returns:
240
+ The corresponding OntologicalQuestion.
241
+ """
242
+ return ONTOLOGICAL_QUESTIONS[question_type]
243
+
244
+
245
+ # ============================================================================
246
+ # Centralized Ontological Analysis
247
+ # ============================================================================
248
+ #
249
+ # This is the SINGLE PLACE where philosophical interpretation happens.
250
+ # All phases (Interview, Consensus, Resilience) should use this function
251
+ # to ensure consistent ontological analysis across the system.
252
+ #
253
+ # Philosophical Interpretation Criteria:
254
+ # - ROOT solution indicators: "fundamental", "core nature", "essential"
255
+ # - SYMPTOM treatment indicators: "surface", "temporary", "workaround"
256
+ # ============================================================================
257
+
258
+ ONTOLOGY_ANALYSIS_SYSTEM_PROMPT = """You are an ontological analyst. Your task is to apply philosophical inquiry to determine whether a solution addresses the ROOT CAUSE or merely treats SYMPTOMS.
259
+
260
+ You must respond ONLY with a valid JSON object:
261
+ {
262
+ "essence": "<string: what IS this, fundamentally>",
263
+ "is_root_problem": <boolean: true if addresses root cause, false if symptom treatment>,
264
+ "prerequisites": ["<string: what must exist first>", ...],
265
+ "hidden_assumptions": ["<string: implicit belief>", ...],
266
+ "confidence": <float: 0.0-1.0>,
267
+ "reasoning": "<string: your analysis process>"
268
+ }
269
+
270
+ Guidelines:
271
+ - ESSENCE: Strip away accidental properties. What remains?
272
+ - ROOT vs SYMPTOM: If we solve this, does the underlying issue remain?
273
+ - PREREQUISITES: What foundations are being assumed?
274
+ - ASSUMPTIONS: What beliefs might be wrong?
275
+ - Be honest. High confidence (>0.8) requires strong evidence.
276
+ """
277
+
278
+
279
+ def _build_analysis_prompt(
280
+ context: str,
281
+ question_types: tuple[OntologicalQuestionType, ...],
282
+ ) -> str:
283
+ """Build the user prompt for ontological analysis.
284
+
285
+ Args:
286
+ context: The subject to analyze
287
+ question_types: Which questions to emphasize (all if empty)
288
+
289
+ Returns:
290
+ Formatted prompt string
291
+ """
292
+ if question_types:
293
+ questions_text = "\n".join(
294
+ f"- {ONTOLOGICAL_QUESTIONS[qt].question}: {ONTOLOGICAL_QUESTIONS[qt].purpose}"
295
+ for qt in question_types
296
+ )
297
+ focus = f"\n\nFocus especially on:\n{questions_text}"
298
+ else:
299
+ questions_text = "\n".join(
300
+ f"- {q.question}: {q.purpose}" for q in ONTOLOGICAL_QUESTIONS.values()
301
+ )
302
+ focus = f"\n\nApply all ontological questions:\n{questions_text}"
303
+
304
+ return f"""Analyze the following using ontological inquiry:
305
+
306
+ ## Subject
307
+ {context}
308
+ {focus}
309
+
310
+ Respond with JSON containing: essence, is_root_problem, prerequisites, hidden_assumptions, confidence, reasoning."""
311
+
312
+
313
+ def _parse_insight_response(response_text: str) -> OntologicalInsight | None:
314
+ """Parse LLM response into OntologicalInsight.
315
+
316
+ Args:
317
+ response_text: Raw LLM response
318
+
319
+ Returns:
320
+ OntologicalInsight or None if parsing fails
321
+ """
322
+ # Extract JSON using index-based approach
323
+ start = response_text.find("{")
324
+ end = response_text.rfind("}")
325
+ if start == -1 or end == -1 or end <= start:
326
+ return None
327
+
328
+ try:
329
+ data = json.loads(response_text[start : end + 1])
330
+ except json.JSONDecodeError:
331
+ return None
332
+
333
+ # Extract and validate fields with defaults
334
+ try:
335
+ prereqs_raw = data.get("prerequisites", [])
336
+ prerequisites = (
337
+ tuple(str(p) for p in prereqs_raw) if isinstance(prereqs_raw, list) else ()
338
+ )
339
+
340
+ assumptions_raw = data.get("hidden_assumptions", [])
341
+ hidden_assumptions = (
342
+ tuple(str(a) for a in assumptions_raw)
343
+ if isinstance(assumptions_raw, list)
344
+ else ()
345
+ )
346
+
347
+ confidence = max(0.0, min(1.0, float(data.get("confidence", 0.5))))
348
+
349
+ return OntologicalInsight(
350
+ essence=str(data.get("essence", "Unknown")),
351
+ is_root_problem=bool(data.get("is_root_problem", False)),
352
+ prerequisites=prerequisites,
353
+ hidden_assumptions=hidden_assumptions,
354
+ confidence=confidence,
355
+ reasoning=str(data.get("reasoning", "No reasoning provided")),
356
+ )
357
+ except (TypeError, ValueError):
358
+ return None
359
+
360
+
361
+ async def analyze_ontologically(
362
+ llm_adapter: LLMAdapter,
363
+ context: str,
364
+ question_types: tuple[OntologicalQuestionType, ...] = (),
365
+ model: str = "openrouter/google/gemini-2.0-flash-001",
366
+ temperature: float = 0.3,
367
+ max_tokens: int = 2048,
368
+ ) -> Result[OntologicalInsight, ProviderError | ValidationError]:
369
+ """Central ontological analysis function.
370
+
371
+ This is the SINGLE SOURCE OF TRUTH for ontological analysis.
372
+ All phases (Interview, Consensus, Resilience) should use this function
373
+ to ensure consistent philosophical interpretation across the system.
374
+
375
+ The function:
376
+ 1. Builds a standardized ontological prompt
377
+ 2. Calls the LLM for analysis
378
+ 3. Parses the response using centralized criteria
379
+ 4. Returns a structured OntologicalInsight
380
+
381
+ Args:
382
+ llm_adapter: LLM adapter for analysis
383
+ context: What to analyze (problem, solution, or situation)
384
+ question_types: Which questions to emphasize (empty = all)
385
+ model: Model to use for analysis
386
+ temperature: Sampling temperature (lower = more deterministic)
387
+ max_tokens: Maximum tokens for LLM response
388
+
389
+ Returns:
390
+ Result containing OntologicalInsight or error
391
+
392
+ Example:
393
+ # For consensus (Devil's Advocate)
394
+ insight = await analyze_ontologically(
395
+ llm, artifact,
396
+ (OntologicalQuestionType.ROOT_CAUSE, OntologicalQuestionType.ESSENCE)
397
+ )
398
+ if insight.is_ok and not insight.value.is_root_problem:
399
+ # Solution treats symptoms, not root cause
400
+
401
+ # For interview (surface assumptions)
402
+ insight = await analyze_ontologically(
403
+ llm, user_context,
404
+ (OntologicalQuestionType.HIDDEN_ASSUMPTIONS,)
405
+ )
406
+
407
+ # For resilience (CONTRARIAN - challenge everything)
408
+ insight = await analyze_ontologically(llm, stuck_context) # All questions
409
+ """
410
+ # Import here to avoid circular dependency
411
+ from ouroboros.core.errors import ValidationError
412
+ from ouroboros.core.types import Result
413
+ from ouroboros.providers.base import CompletionConfig, Message, MessageRole
414
+
415
+ messages = [
416
+ Message(role=MessageRole.SYSTEM, content=ONTOLOGY_ANALYSIS_SYSTEM_PROMPT),
417
+ Message(
418
+ role=MessageRole.USER,
419
+ content=_build_analysis_prompt(context, question_types),
420
+ ),
421
+ ]
422
+
423
+ config = CompletionConfig(
424
+ model=model,
425
+ temperature=temperature,
426
+ max_tokens=max_tokens,
427
+ )
428
+
429
+ llm_result = await llm_adapter.complete(messages, config)
430
+ if llm_result.is_err:
431
+ return Result.err(llm_result.error)
432
+
433
+ insight = _parse_insight_response(llm_result.value.content)
434
+ if insight is None:
435
+ return Result.err(
436
+ ValidationError(
437
+ "Failed to parse ontological analysis response",
438
+ field="response",
439
+ value=llm_result.value.content[:200],
440
+ )
441
+ )
442
+
443
+ return Result.ok(insight)
444
+
445
+
446
+ __all__ = [
447
+ # Types and Constants
448
+ "OntologicalQuestionType",
449
+ "OntologicalQuestion",
450
+ "ONTOLOGICAL_QUESTIONS",
451
+ "OntologicalInsight",
452
+ "OntologicalAnalyzer",
453
+ # Prompt Builders (low-level)
454
+ "build_ontological_prompt",
455
+ "build_devil_advocate_prompt",
456
+ # Question Accessors
457
+ "get_all_questions",
458
+ "get_question",
459
+ # Centralized Analysis (high-level)
460
+ "analyze_ontologically",
461
+ "ONTOLOGY_ANALYSIS_SYSTEM_PROMPT",
462
+ ]
@@ -35,7 +35,10 @@ from ouroboros.evaluation.consensus import (
35
35
  DEFAULT_CONSENSUS_MODELS,
36
36
  ConsensusConfig,
37
37
  ConsensusEvaluator,
38
+ DeliberativeConfig,
39
+ DeliberativeConsensus,
38
40
  run_consensus_evaluation,
41
+ run_deliberative_evaluation,
39
42
  )
40
43
  from ouroboros.evaluation.mechanical import (
41
44
  MechanicalConfig,
@@ -46,11 +49,15 @@ from ouroboros.evaluation.models import (
46
49
  CheckResult,
47
50
  CheckType,
48
51
  ConsensusResult,
52
+ DeliberationResult,
49
53
  EvaluationContext,
50
54
  EvaluationResult,
55
+ FinalVerdict,
56
+ JudgmentResult,
51
57
  MechanicalResult,
52
58
  SemanticResult,
53
59
  Vote,
60
+ VoterRole,
54
61
  )
55
62
  from ouroboros.evaluation.pipeline import (
56
63
  EvaluationPipeline,
@@ -77,11 +84,15 @@ __all__ = [
77
84
  "CheckResult",
78
85
  "CheckType",
79
86
  "ConsensusResult",
87
+ "DeliberationResult",
80
88
  "EvaluationContext",
81
89
  "EvaluationResult",
90
+ "FinalVerdict",
91
+ "JudgmentResult",
82
92
  "MechanicalResult",
83
93
  "SemanticResult",
84
94
  "Vote",
95
+ "VoterRole",
85
96
  # Stage 1
86
97
  "MechanicalConfig",
87
98
  "MechanicalVerifier",
@@ -91,11 +102,15 @@ __all__ = [
91
102
  "SemanticConfig",
92
103
  "SemanticEvaluator",
93
104
  "run_semantic_evaluation",
94
- # Stage 3
105
+ # Stage 3 - Simple Consensus
95
106
  "DEFAULT_CONSENSUS_MODELS",
96
107
  "ConsensusConfig",
97
108
  "ConsensusEvaluator",
98
109
  "run_consensus_evaluation",
110
+ # Stage 3 - Deliberative Consensus
111
+ "DeliberativeConfig",
112
+ "DeliberativeConsensus",
113
+ "run_deliberative_evaluation",
99
114
  # Trigger
100
115
  "ConsensusTrigger",
101
116
  "TriggerConfig",