ouroboros-ai 0.2.3__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (44) hide show
  1. ouroboros/__init__.py +1 -1
  2. ouroboros/bigbang/__init__.py +9 -0
  3. ouroboros/bigbang/interview.py +16 -18
  4. ouroboros/bigbang/ontology.py +180 -0
  5. ouroboros/cli/commands/__init__.py +2 -0
  6. ouroboros/cli/commands/init.py +162 -97
  7. ouroboros/cli/commands/mcp.py +161 -0
  8. ouroboros/cli/commands/run.py +165 -27
  9. ouroboros/cli/main.py +2 -1
  10. ouroboros/core/ontology_aspect.py +455 -0
  11. ouroboros/core/ontology_questions.py +462 -0
  12. ouroboros/evaluation/__init__.py +16 -1
  13. ouroboros/evaluation/consensus.py +569 -11
  14. ouroboros/evaluation/models.py +81 -0
  15. ouroboros/events/ontology.py +135 -0
  16. ouroboros/mcp/__init__.py +83 -0
  17. ouroboros/mcp/client/__init__.py +20 -0
  18. ouroboros/mcp/client/adapter.py +632 -0
  19. ouroboros/mcp/client/manager.py +600 -0
  20. ouroboros/mcp/client/protocol.py +161 -0
  21. ouroboros/mcp/errors.py +377 -0
  22. ouroboros/mcp/resources/__init__.py +22 -0
  23. ouroboros/mcp/resources/handlers.py +328 -0
  24. ouroboros/mcp/server/__init__.py +21 -0
  25. ouroboros/mcp/server/adapter.py +408 -0
  26. ouroboros/mcp/server/protocol.py +291 -0
  27. ouroboros/mcp/server/security.py +636 -0
  28. ouroboros/mcp/tools/__init__.py +24 -0
  29. ouroboros/mcp/tools/definitions.py +351 -0
  30. ouroboros/mcp/tools/registry.py +269 -0
  31. ouroboros/mcp/types.py +333 -0
  32. ouroboros/orchestrator/__init__.py +31 -0
  33. ouroboros/orchestrator/events.py +40 -0
  34. ouroboros/orchestrator/mcp_config.py +419 -0
  35. ouroboros/orchestrator/mcp_tools.py +483 -0
  36. ouroboros/orchestrator/runner.py +119 -2
  37. ouroboros/providers/claude_code_adapter.py +75 -0
  38. ouroboros/strategies/__init__.py +23 -0
  39. ouroboros/strategies/devil_advocate.py +197 -0
  40. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/METADATA +73 -17
  41. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/RECORD +44 -19
  42. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/WHEEL +0 -0
  43. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/entry_points.txt +0 -0
  44. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/licenses/LICENSE +0 -0
ouroboros/__init__.py CHANGED
@@ -13,7 +13,7 @@ Example:
13
13
  from ouroboros.bigbang import InterviewEngine
14
14
  """
15
15
 
16
- __version__ = "0.2.0"
16
+ __version__ = "0.4.0"
17
17
 
18
18
  __all__ = ["__version__", "main"]
19
19
 
@@ -14,6 +14,11 @@ from ouroboros.bigbang.ambiguity import (
14
14
  is_ready_for_seed,
15
15
  )
16
16
  from ouroboros.bigbang.interview import InterviewEngine, InterviewState
17
+ from ouroboros.bigbang.ontology import (
18
+ InterviewOntologyAnalyzer,
19
+ OntologicalQuestionDecision,
20
+ default_interview_ontology_analyzer,
21
+ )
17
22
  from ouroboros.bigbang.seed_generator import (
18
23
  SeedGenerator,
19
24
  load_seed,
@@ -32,6 +37,10 @@ __all__ = [
32
37
  # Interview
33
38
  "InterviewEngine",
34
39
  "InterviewState",
40
+ # Ontology (for Interview)
41
+ "InterviewOntologyAnalyzer",
42
+ "OntologicalQuestionDecision",
43
+ "default_interview_ontology_analyzer",
35
44
  # Seed Generation
36
45
  "SeedGenerator",
37
46
  "load_seed",
@@ -1,7 +1,7 @@
1
1
  """Interactive interview engine for requirement clarification.
2
2
 
3
3
  This module implements the interview protocol that refines vague ideas into
4
- clear requirements through iterative questioning (max 10 rounds).
4
+ clear requirements through iterative questioning. Users control when to stop.
5
5
  """
6
6
 
7
7
  from collections.abc import Iterator
@@ -52,10 +52,17 @@ def _file_lock(file_path: Path, exclusive: bool = True) -> Iterator[None]:
52
52
 
53
53
  log = structlog.get_logger()
54
54
 
55
- MAX_INTERVIEW_ROUNDS = 10
55
+ # Interview round constants
56
+ MIN_ROUNDS_BEFORE_EARLY_EXIT = 3 # Must complete at least 3 rounds
57
+ SOFT_LIMIT_WARNING_THRESHOLD = 15 # Warn about diminishing returns after this
58
+ DEFAULT_INTERVIEW_ROUNDS = 10 # Reference value for prompts (not enforced)
59
+
56
60
  # Default model moved to config.models.ClarificationConfig.default_model
57
61
  _FALLBACK_MODEL = "openrouter/google/gemini-2.0-flash-001"
58
62
 
63
+ # Legacy alias for backward compatibility
64
+ MAX_INTERVIEW_ROUNDS = DEFAULT_INTERVIEW_ROUNDS
65
+
59
66
 
60
67
  class InterviewStatus(StrEnum):
61
68
  """Status of the interview process."""
@@ -69,13 +76,13 @@ class InterviewRound(BaseModel):
69
76
  """A single round of interview questions and responses.
70
77
 
71
78
  Attributes:
72
- round_number: 1-based round number (1 to MAX_INTERVIEW_ROUNDS).
79
+ round_number: 1-based round number (no upper limit - user controls).
73
80
  question: The question asked by the system.
74
81
  user_response: The user's response (None if not yet answered).
75
82
  timestamp: When this round was created.
76
83
  """
77
84
 
78
- round_number: int = Field(ge=1, le=MAX_INTERVIEW_ROUNDS)
85
+ round_number: int = Field(ge=1) # No upper limit - user decides when to stop
79
86
  question: str
80
87
  user_response: str | None = None
81
88
  timestamp: datetime = Field(default_factory=lambda: datetime.now(UTC))
@@ -107,11 +114,8 @@ class InterviewState(BaseModel):
107
114
 
108
115
  @property
109
116
  def is_complete(self) -> bool:
110
- """Check if interview has reached max rounds or is marked complete."""
111
- return (
112
- self.status == InterviewStatus.COMPLETED
113
- or len(self.rounds) >= MAX_INTERVIEW_ROUNDS
114
- )
117
+ """Check if interview is marked complete (user-controlled)."""
118
+ return self.status == InterviewStatus.COMPLETED
115
119
 
116
120
  def mark_updated(self) -> None:
117
121
  """Update the updated_at timestamp."""
@@ -321,14 +325,8 @@ class InterviewEngine:
321
325
  response_length=len(user_response),
322
326
  )
323
327
 
324
- # Check if we've reached max rounds
325
- if len(state.rounds) >= MAX_INTERVIEW_ROUNDS:
326
- state.status = InterviewStatus.COMPLETED
327
- log.info(
328
- "interview.max_rounds_reached",
329
- interview_id=state.interview_id,
330
- total_rounds=len(state.rounds),
331
- )
328
+ # Note: No auto-complete on round limit. User controls when to stop.
329
+ # CLI handles prompting user to continue after each round.
332
330
 
333
331
  return Result.ok(state)
334
332
 
@@ -437,7 +435,7 @@ class InterviewEngine:
437
435
  Returns:
438
436
  The system prompt.
439
437
  """
440
- round_info = f"Round {state.current_round_number} of {MAX_INTERVIEW_ROUNDS}"
438
+ round_info = f"Round {state.current_round_number}"
441
439
 
442
440
  return f"""You are an expert requirements engineer conducting an interview to refine vague ideas into clear, executable requirements.
443
441
 
@@ -0,0 +1,180 @@
1
+ """Ontological questioning integration for Interview Phase.
2
+
3
+ This module provides the bridge between the core ontological framework
4
+ and the Interview engine. It determines WHEN and WHICH ontological
5
+ questions should be asked during the interview process.
6
+
7
+ The Two Ancient Methods:
8
+ 1. Socratic Questioning (existing) - "Why?", "What if?"
9
+ → Reveals hidden assumptions through iterative questioning
10
+
11
+ 2. Ontological Analysis (this module) - "What IS this?", "Root cause or symptom?"
12
+ → Finds root problems, ensures we're solving the right thing
13
+
14
+ These methods are interleaved: Socratic questions every round,
15
+ ontological questions periodically to probe deeper.
16
+ """
17
+
18
+ from dataclasses import dataclass
19
+
20
+ from ouroboros.core.ontology_questions import (
21
+ ONTOLOGICAL_QUESTIONS,
22
+ OntologicalQuestionType,
23
+ build_ontological_prompt,
24
+ )
25
+
26
+ # Question schedule: which ontological question type to use at each milestone
27
+ # Pattern: ESSENCE → ROOT_CAUSE → PREREQUISITES → HIDDEN_ASSUMPTIONS
28
+ _QUESTION_SCHEDULE: tuple[OntologicalQuestionType, ...] = (
29
+ OntologicalQuestionType.ESSENCE, # Round 3: What IS this?
30
+ OntologicalQuestionType.ROOT_CAUSE, # Round 6: Root cause or symptom?
31
+ OntologicalQuestionType.PREREQUISITES, # Round 9: What must exist first?
32
+ OntologicalQuestionType.HIDDEN_ASSUMPTIONS, # Round 12+: What are we assuming?
33
+ )
34
+
35
+
36
+ @dataclass(frozen=True, slots=True)
37
+ class OntologicalQuestionDecision:
38
+ """Decision about whether to ask an ontological question.
39
+
40
+ Attributes:
41
+ should_ask: Whether an ontological question should be asked this round.
42
+ question_type: The type of question to ask (if should_ask is True).
43
+ system_prompt_addition: Text to add to system prompt (if should_ask).
44
+ """
45
+
46
+ should_ask: bool
47
+ question_type: OntologicalQuestionType | None = None
48
+ system_prompt_addition: str = ""
49
+
50
+
51
+ class InterviewOntologyAnalyzer:
52
+ """Analyzer that decides when to inject ontological questions.
53
+
54
+ Ontological questions are interspersed with Socratic questioning
55
+ to periodically probe the fundamental nature of the problem.
56
+
57
+ Schedule:
58
+ - Round 1-2: Pure Socratic (building basic context)
59
+ - Round 3: ESSENCE - "What IS this, really?"
60
+ - Round 4-5: Pure Socratic
61
+ - Round 6: ROOT_CAUSE - "Is this root cause or symptom?"
62
+ - Round 7-8: Pure Socratic
63
+ - Round 9: PREREQUISITES - "What must exist first?"
64
+ - Round 10-11: Pure Socratic
65
+ - Round 12+: HIDDEN_ASSUMPTIONS (every 3rd round after)
66
+ """
67
+
68
+ def __init__(self, start_round: int = 3, frequency: int = 3) -> None:
69
+ """Initialize the analyzer.
70
+
71
+ Args:
72
+ start_round: First round to start ontological questioning.
73
+ frequency: Ask ontological questions every N rounds.
74
+ """
75
+ self._start_round = start_round
76
+ self._frequency = frequency
77
+
78
+ def should_ask_ontological_question(self, round_number: int) -> bool:
79
+ """Determine if this round should include an ontological question.
80
+
81
+ Args:
82
+ round_number: Current interview round (1-based).
83
+
84
+ Returns:
85
+ True if ontological question should be asked.
86
+ """
87
+ if round_number < self._start_round:
88
+ return False
89
+ return (round_number - self._start_round) % self._frequency == 0
90
+
91
+ def select_question_type(
92
+ self,
93
+ round_number: int,
94
+ ) -> OntologicalQuestionType:
95
+ """Select which ontological question type to ask.
96
+
97
+ Uses a rotating schedule through the four question types.
98
+
99
+ Args:
100
+ round_number: Current interview round (1-based).
101
+
102
+ Returns:
103
+ The question type to use for this round.
104
+ """
105
+ if round_number < self._start_round:
106
+ # Default to ESSENCE for early rounds (shouldn't happen normally)
107
+ return OntologicalQuestionType.ESSENCE
108
+
109
+ # Calculate position in schedule
110
+ position = (round_number - self._start_round) // self._frequency
111
+ schedule_index = position % len(_QUESTION_SCHEDULE)
112
+ return _QUESTION_SCHEDULE[schedule_index]
113
+
114
+ def get_decision(self, round_number: int) -> OntologicalQuestionDecision:
115
+ """Get the complete decision for a round.
116
+
117
+ Args:
118
+ round_number: Current interview round (1-based).
119
+
120
+ Returns:
121
+ Decision with all necessary information.
122
+ """
123
+ should_ask = self.should_ask_ontological_question(round_number)
124
+
125
+ if not should_ask:
126
+ return OntologicalQuestionDecision(should_ask=False)
127
+
128
+ question_type = self.select_question_type(round_number)
129
+ prompt_addition = build_ontological_prompt(question_type)
130
+
131
+ return OntologicalQuestionDecision(
132
+ should_ask=True,
133
+ question_type=question_type,
134
+ system_prompt_addition=prompt_addition,
135
+ )
136
+
137
+ def build_ontological_system_prompt(
138
+ self,
139
+ round_number: int,
140
+ base_prompt: str,
141
+ ) -> str:
142
+ """Build system prompt with optional ontological addition.
143
+
144
+ If this round requires an ontological question, it's appended
145
+ to the base prompt. Otherwise, the base prompt is returned as-is.
146
+
147
+ Args:
148
+ round_number: Current interview round (1-based).
149
+ base_prompt: The base Socratic system prompt.
150
+
151
+ Returns:
152
+ Enhanced prompt with ontological guidance if applicable.
153
+ """
154
+ decision = self.get_decision(round_number)
155
+
156
+ if not decision.should_ask or decision.question_type is None:
157
+ return base_prompt
158
+
159
+ # Add ontological context
160
+ question = ONTOLOGICAL_QUESTIONS[decision.question_type]
161
+ return f"""{base_prompt}
162
+
163
+ ---
164
+ ONTOLOGICAL FOCUS FOR THIS ROUND:
165
+ {decision.system_prompt_addition}
166
+ Your question this round should probe: {question.question}
167
+ Purpose: {question.purpose}
168
+ Consider: {question.follow_up}
169
+ ---"""
170
+
171
+
172
+ # Default singleton for convenience
173
+ default_interview_ontology_analyzer = InterviewOntologyAnalyzer()
174
+
175
+
176
+ __all__ = [
177
+ "InterviewOntologyAnalyzer",
178
+ "OntologicalQuestionDecision",
179
+ "default_interview_ontology_analyzer",
180
+ ]
@@ -1,7 +1,9 @@
1
1
  """CLI command implementations for Ouroboros.
2
2
 
3
3
  This module contains the command group implementations:
4
+ - init: Start interactive interview
4
5
  - run: Execute workflows
5
6
  - config: Manage configuration
6
7
  - status: Check system status
8
+ - mcp: MCP server management
7
9
  """