ouroboros-ai 0.2.3__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (44) hide show
  1. ouroboros/__init__.py +1 -1
  2. ouroboros/bigbang/__init__.py +9 -0
  3. ouroboros/bigbang/interview.py +16 -18
  4. ouroboros/bigbang/ontology.py +180 -0
  5. ouroboros/cli/commands/__init__.py +2 -0
  6. ouroboros/cli/commands/init.py +162 -97
  7. ouroboros/cli/commands/mcp.py +161 -0
  8. ouroboros/cli/commands/run.py +165 -27
  9. ouroboros/cli/main.py +2 -1
  10. ouroboros/core/ontology_aspect.py +455 -0
  11. ouroboros/core/ontology_questions.py +462 -0
  12. ouroboros/evaluation/__init__.py +16 -1
  13. ouroboros/evaluation/consensus.py +569 -11
  14. ouroboros/evaluation/models.py +81 -0
  15. ouroboros/events/ontology.py +135 -0
  16. ouroboros/mcp/__init__.py +83 -0
  17. ouroboros/mcp/client/__init__.py +20 -0
  18. ouroboros/mcp/client/adapter.py +632 -0
  19. ouroboros/mcp/client/manager.py +600 -0
  20. ouroboros/mcp/client/protocol.py +161 -0
  21. ouroboros/mcp/errors.py +377 -0
  22. ouroboros/mcp/resources/__init__.py +22 -0
  23. ouroboros/mcp/resources/handlers.py +328 -0
  24. ouroboros/mcp/server/__init__.py +21 -0
  25. ouroboros/mcp/server/adapter.py +408 -0
  26. ouroboros/mcp/server/protocol.py +291 -0
  27. ouroboros/mcp/server/security.py +636 -0
  28. ouroboros/mcp/tools/__init__.py +24 -0
  29. ouroboros/mcp/tools/definitions.py +351 -0
  30. ouroboros/mcp/tools/registry.py +269 -0
  31. ouroboros/mcp/types.py +333 -0
  32. ouroboros/orchestrator/__init__.py +31 -0
  33. ouroboros/orchestrator/events.py +40 -0
  34. ouroboros/orchestrator/mcp_config.py +419 -0
  35. ouroboros/orchestrator/mcp_tools.py +483 -0
  36. ouroboros/orchestrator/runner.py +119 -2
  37. ouroboros/providers/claude_code_adapter.py +75 -0
  38. ouroboros/strategies/__init__.py +23 -0
  39. ouroboros/strategies/devil_advocate.py +197 -0
  40. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/METADATA +73 -17
  41. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/RECORD +44 -19
  42. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/WHEEL +0 -0
  43. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/entry_points.txt +0 -0
  44. {ouroboros_ai-0.2.3.dist-info → ouroboros_ai-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -5,6 +5,7 @@ Supports both LiteLLM (external API) and Claude Code (Max Plan) modes.
5
5
  """
6
6
 
7
7
  import asyncio
8
+ from enum import Enum, auto
8
9
  from pathlib import Path
9
10
  from typing import Annotated
10
11
 
@@ -12,13 +13,28 @@ from rich.prompt import Confirm, Prompt
12
13
  import typer
13
14
 
14
15
  from ouroboros.bigbang.ambiguity import AmbiguityScorer
15
- from ouroboros.bigbang.interview import MAX_INTERVIEW_ROUNDS, InterviewEngine, InterviewState
16
+ from ouroboros.bigbang.interview import (
17
+ MIN_ROUNDS_BEFORE_EARLY_EXIT,
18
+ SOFT_LIMIT_WARNING_THRESHOLD,
19
+ InterviewEngine,
20
+ InterviewState,
21
+ InterviewStatus,
22
+ )
16
23
  from ouroboros.bigbang.seed_generator import SeedGenerator
17
24
  from ouroboros.cli.formatters import console
18
25
  from ouroboros.cli.formatters.panels import print_error, print_info, print_success, print_warning
19
26
  from ouroboros.providers.base import LLMAdapter
20
27
  from ouroboros.providers.litellm_adapter import LiteLLMAdapter
21
28
 
29
+
30
+ class SeedGenerationResult(Enum):
31
+ """Result of seed generation attempt."""
32
+
33
+ SUCCESS = auto()
34
+ CANCELLED = auto()
35
+ CONTINUE_INTERVIEW = auto()
36
+
37
+
22
38
  app = typer.Typer(
23
39
  name="init",
24
40
  help="Start interactive interview to refine requirements.",
@@ -43,62 +59,30 @@ def _get_adapter(use_orchestrator: bool) -> LLMAdapter:
43
59
  return LiteLLMAdapter()
44
60
 
45
61
 
46
- async def _run_interview(
47
- initial_context: str,
48
- resume_id: str | None = None,
49
- state_dir: Path | None = None,
50
- use_orchestrator: bool = False,
51
- ) -> None:
52
- """Run the interview process.
62
+ async def _run_interview_loop(
63
+ engine: InterviewEngine,
64
+ state: InterviewState,
65
+ ) -> InterviewState:
66
+ """Run the interview question loop until completion or user exit.
53
67
 
54
- Args:
55
- initial_context: Initial context or idea for the interview.
56
- resume_id: Optional interview ID to resume.
57
- state_dir: Optional custom state directory.
58
- use_orchestrator: If True, use Claude Code (Max Plan) instead of LiteLLM.
59
- """
60
- # Initialize components
61
- llm_adapter = _get_adapter(use_orchestrator)
62
- engine = InterviewEngine(
63
- llm_adapter=llm_adapter,
64
- state_dir=state_dir or Path.home() / ".ouroboros" / "data",
65
- )
68
+ Implements tiered confirmation:
69
+ - Rounds 1-3: Auto-continue (minimum context)
70
+ - Rounds 4-15: Ask "Continue?" after each round
71
+ - Rounds 16+: Ask "Continue?" with diminishing returns warning
66
72
 
67
- # Load or start interview
68
- if resume_id:
69
- print_info(f"Resuming interview: {resume_id}")
70
- state_result = await engine.load_state(resume_id)
71
- if state_result.is_err:
72
- print_error(f"Failed to load interview: {state_result.error.message}")
73
- raise typer.Exit(code=1)
74
- state = state_result.value
75
- else:
76
- print_info("Starting new interview session...")
77
- state_result = await engine.start_interview(initial_context)
78
- if state_result.is_err:
79
- print_error(f"Failed to start interview: {state_result.error.message}")
80
- raise typer.Exit(code=1)
81
- state = state_result.value
82
-
83
- console.print()
84
- console.print(
85
- f"[bold cyan]Interview Session: {state.interview_id}[/]",
86
- )
87
- console.print(f"[muted]Max rounds: {MAX_INTERVIEW_ROUNDS}[/]")
88
- console.print()
73
+ Args:
74
+ engine: Interview engine instance.
75
+ state: Current interview state.
89
76
 
90
- # Interview loop
77
+ Returns:
78
+ Updated interview state.
79
+ """
91
80
  while not state.is_complete:
92
81
  current_round = state.current_round_number
93
- console.print(
94
- f"[bold]Round {current_round}/{MAX_INTERVIEW_ROUNDS}[/]",
95
- )
82
+ console.print(f"[bold]Round {current_round}[/]")
96
83
 
97
84
  # Generate question
98
- with console.status(
99
- "[cyan]Generating question...[/]",
100
- spinner="dots",
101
- ):
85
+ with console.status("[cyan]Generating question...[/]", spinner="dots"):
102
86
  question_result = await engine.ask_next_question(state)
103
87
 
104
88
  if question_result.is_err:
@@ -130,15 +114,22 @@ async def _run_interview(
130
114
 
131
115
  state = record_result.value
132
116
 
133
- # Save state
117
+ # Save state immediately after recording
134
118
  save_result = await engine.save_state(state)
135
119
  if save_result.is_err:
136
120
  print_error(f"Warning: Failed to save state: {save_result.error.message}")
137
121
 
138
122
  console.print()
139
123
 
140
- # Check if user wants to continue or finish early
141
- if not state.is_complete and current_round >= 3:
124
+ # Tiered confirmation logic
125
+ if current_round >= MIN_ROUNDS_BEFORE_EARLY_EXIT:
126
+ # Show warning for rounds beyond soft limit
127
+ if current_round >= SOFT_LIMIT_WARNING_THRESHOLD:
128
+ print_warning(
129
+ f"Round {current_round}: Diminishing returns expected. "
130
+ "Consider generating Seed to check ambiguity score."
131
+ )
132
+
142
133
  should_continue = Confirm.ask(
143
134
  "Continue with more questions?",
144
135
  default=True,
@@ -147,40 +138,104 @@ async def _run_interview(
147
138
  complete_result = await engine.complete_interview(state)
148
139
  if complete_result.is_ok:
149
140
  state = complete_result.value
150
- await engine.save_state(state)
141
+ await engine.save_state(state)
151
142
  break
152
143
 
153
- # Interview complete
154
- console.print()
155
- print_success("Interview completed!")
156
- console.print(f"[muted]Total rounds: {len(state.rounds)}[/]")
157
- console.print(f"[muted]Interview ID: {state.interview_id}[/]")
144
+ return state
158
145
 
159
- # Save final state
160
- save_result = await engine.save_state(state)
161
- if save_result.is_ok:
162
- console.print(f"[muted]State saved to: {save_result.value}[/]")
163
146
 
164
- console.print()
147
+ async def _run_interview(
148
+ initial_context: str,
149
+ resume_id: str | None = None,
150
+ state_dir: Path | None = None,
151
+ use_orchestrator: bool = False,
152
+ ) -> None:
153
+ """Run the interview process.
165
154
 
166
- # Ask if user wants to proceed to Seed generation
167
- should_generate_seed = Confirm.ask(
168
- "[bold cyan]Proceed to generate Seed specification?[/]",
169
- default=True,
155
+ Args:
156
+ initial_context: Initial context or idea for the interview.
157
+ resume_id: Optional interview ID to resume.
158
+ state_dir: Optional custom state directory.
159
+ use_orchestrator: If True, use Claude Code (Max Plan) instead of LiteLLM.
160
+ """
161
+ # Initialize components
162
+ llm_adapter = _get_adapter(use_orchestrator)
163
+ engine = InterviewEngine(
164
+ llm_adapter=llm_adapter,
165
+ state_dir=state_dir or Path.home() / ".ouroboros" / "data",
170
166
  )
171
167
 
172
- if not should_generate_seed:
173
- console.print(
174
- "[muted]You can resume later with:[/] "
175
- f"[bold]ouroboros init start --resume {state.interview_id}[/]"
168
+ # Load or start interview
169
+ if resume_id:
170
+ print_info(f"Resuming interview: {resume_id}")
171
+ state_result = await engine.load_state(resume_id)
172
+ if state_result.is_err:
173
+ print_error(f"Failed to load interview: {state_result.error.message}")
174
+ raise typer.Exit(code=1)
175
+ state = state_result.value
176
+ else:
177
+ print_info("Starting new interview session...")
178
+ state_result = await engine.start_interview(initial_context)
179
+ if state_result.is_err:
180
+ print_error(f"Failed to start interview: {state_result.error.message}")
181
+ raise typer.Exit(code=1)
182
+ state = state_result.value
183
+
184
+ console.print()
185
+ console.print(f"[bold cyan]Interview Session: {state.interview_id}[/]")
186
+ console.print("[muted]No round limit - you decide when to stop[/]")
187
+ console.print()
188
+
189
+ # Run initial interview loop
190
+ state = await _run_interview_loop(engine, state)
191
+
192
+ # Outer loop for retry on high ambiguity
193
+ while True:
194
+ # Interview complete
195
+ console.print()
196
+ print_success("Interview completed!")
197
+ console.print(f"[muted]Total rounds: {len(state.rounds)}[/]")
198
+ console.print(f"[muted]Interview ID: {state.interview_id}[/]")
199
+
200
+ # Save final state
201
+ save_result = await engine.save_state(state)
202
+ if save_result.is_ok:
203
+ console.print(f"[muted]State saved to: {save_result.value}[/]")
204
+
205
+ console.print()
206
+
207
+ # Ask if user wants to proceed to Seed generation
208
+ should_generate_seed = Confirm.ask(
209
+ "[bold cyan]Proceed to generate Seed specification?[/]",
210
+ default=True,
176
211
  )
177
- return
178
212
 
179
- # Generate Seed
180
- seed_path = await _generate_seed_from_interview(state, llm_adapter)
213
+ if not should_generate_seed:
214
+ console.print(
215
+ "[muted]You can resume later with:[/] "
216
+ f"[bold]ouroboros init start --resume {state.interview_id}[/]"
217
+ )
218
+ return
181
219
 
182
- if seed_path is None:
183
- return
220
+ # Generate Seed
221
+ seed_path, result = await _generate_seed_from_interview(state, llm_adapter)
222
+
223
+ if result == SeedGenerationResult.CONTINUE_INTERVIEW:
224
+ # Re-open interview for more questions
225
+ console.print()
226
+ print_info("Continuing interview to reduce ambiguity...")
227
+ state.status = InterviewStatus.IN_PROGRESS
228
+ await engine.save_state(state) # Save status change immediately
229
+
230
+ # Continue interview loop (reusing the same helper)
231
+ state = await _run_interview_loop(engine, state)
232
+ continue
233
+
234
+ if result == SeedGenerationResult.CANCELLED:
235
+ return
236
+
237
+ # Success - proceed to workflow
238
+ break
184
239
 
185
240
  # Ask if user wants to start workflow
186
241
  console.print()
@@ -196,7 +251,7 @@ async def _run_interview(
196
251
  async def _generate_seed_from_interview(
197
252
  state: InterviewState,
198
253
  llm_adapter: LLMAdapter,
199
- ) -> Path | None:
254
+ ) -> tuple[Path | None, SeedGenerationResult]:
200
255
  """Generate Seed from completed interview.
201
256
 
202
257
  Args:
@@ -204,7 +259,7 @@ async def _generate_seed_from_interview(
204
259
  llm_adapter: LLM adapter for scoring and generation.
205
260
 
206
261
  Returns:
207
- Path to generated seed file, or None if failed.
262
+ Tuple of (path to generated seed file or None, result status).
208
263
  """
209
264
  console.print()
210
265
  console.print("[bold cyan]Generating Seed specification...[/]")
@@ -216,7 +271,7 @@ async def _generate_seed_from_interview(
216
271
 
217
272
  if score_result.is_err:
218
273
  print_error(f"Failed to calculate ambiguity: {score_result.error.message}")
219
- return None
274
+ return None, SeedGenerationResult.CANCELLED
220
275
 
221
276
  ambiguity_score = score_result.value
222
277
  console.print(f"[muted]Ambiguity score: {ambiguity_score.overall_score:.2f}[/]")
@@ -226,12 +281,24 @@ async def _generate_seed_from_interview(
226
281
  f"Ambiguity score ({ambiguity_score.overall_score:.2f}) is too high. "
227
282
  "Consider more interview rounds to clarify requirements."
228
283
  )
229
- should_force = Confirm.ask(
230
- "[yellow]Generate Seed anyway?[/]",
231
- default=False,
284
+ console.print()
285
+ console.print("[bold]What would you like to do?[/]")
286
+ console.print(" [cyan]1[/] - Continue interview with more questions")
287
+ console.print(" [cyan]2[/] - Generate Seed anyway (force)")
288
+ console.print(" [cyan]3[/] - Cancel")
289
+ console.print()
290
+
291
+ choice = Prompt.ask(
292
+ "[yellow]Select option[/]",
293
+ choices=["1", "2", "3"],
294
+ default="1",
232
295
  )
233
- if not should_force:
234
- return None
296
+
297
+ if choice == "1":
298
+ return None, SeedGenerationResult.CONTINUE_INTERVIEW
299
+ elif choice == "3":
300
+ return None, SeedGenerationResult.CANCELLED
301
+ # choice == "2" falls through to generate anyway
235
302
 
236
303
  # Step 2: Generate Seed
237
304
  with console.status("[cyan]Generating Seed from interview...[/]", spinner="dots"):
@@ -240,18 +307,20 @@ async def _generate_seed_from_interview(
240
307
  if ambiguity_score.is_ready_for_seed:
241
308
  seed_result = await generator.generate(state, ambiguity_score)
242
309
  else:
243
- # Create a modified score that passes threshold for forced generation
310
+ # TODO: Add force=True parameter to SeedGenerator.generate() instead of this hack
311
+ # Creating a modified score to bypass threshold check
244
312
  from ouroboros.bigbang.ambiguity import AmbiguityScore as AmbScore
245
313
 
314
+ FORCED_SCORE_VALUE = 0.19 # Just under threshold (0.2)
246
315
  forced_score = AmbScore(
247
- overall_score=0.19, # Just under threshold
316
+ overall_score=FORCED_SCORE_VALUE,
248
317
  breakdown=ambiguity_score.breakdown,
249
318
  )
250
319
  seed_result = await generator.generate(state, forced_score)
251
320
 
252
321
  if seed_result.is_err:
253
322
  print_error(f"Failed to generate Seed: {seed_result.error.message}")
254
- return None
323
+ return None, SeedGenerationResult.CANCELLED
255
324
 
256
325
  seed = seed_result.value
257
326
 
@@ -261,10 +330,10 @@ async def _generate_seed_from_interview(
261
330
 
262
331
  if save_result.is_err:
263
332
  print_error(f"Failed to save Seed: {save_result.error.message}")
264
- return None
333
+ return None, SeedGenerationResult.CANCELLED
265
334
 
266
335
  print_success(f"Seed generated: {seed_path}")
267
- return seed_path
336
+ return seed_path, SeedGenerationResult.SUCCESS
268
337
 
269
338
 
270
339
  async def _start_workflow(seed_path: Path, use_orchestrator: bool = False) -> None:
@@ -344,21 +413,17 @@ def start(
344
413
  """
345
414
  # Get initial context if not provided
346
415
  if not resume and not context:
347
- console.print(
348
- "[bold cyan]Welcome to Ouroboros Interview![/]",
349
- )
416
+ console.print("[bold cyan]Welcome to Ouroboros Interview![/]")
350
417
  console.print()
351
418
  console.print(
352
419
  "This interactive process will help refine your ideas into clear requirements.",
353
420
  )
354
421
  console.print(
355
- f"You'll be asked up to {MAX_INTERVIEW_ROUNDS} questions to reduce ambiguity.",
422
+ "You control when to stop - no arbitrary round limit.",
356
423
  )
357
424
  console.print()
358
425
 
359
- context = Prompt.ask(
360
- "[bold]What would you like to build?[/]",
361
- )
426
+ context = Prompt.ask("[bold]What would you like to build?[/]")
362
427
 
363
428
  if not resume and not context:
364
429
  print_error("Initial context is required when not resuming.")
@@ -0,0 +1,161 @@
1
+ """MCP command group for Ouroboros.
2
+
3
+ Start and manage the MCP (Model Context Protocol) server.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import asyncio
9
+ from typing import Annotated
10
+
11
+ import typer
12
+
13
+ from ouroboros.cli.formatters.panels import print_error, print_info, print_success
14
+
15
+ app = typer.Typer(
16
+ name="mcp",
17
+ help="MCP (Model Context Protocol) server commands.",
18
+ no_args_is_help=True,
19
+ )
20
+
21
+
22
+ async def _run_mcp_server(
23
+ host: str,
24
+ port: int,
25
+ transport: str,
26
+ ) -> None:
27
+ """Run the MCP server.
28
+
29
+ Args:
30
+ host: Host to bind to.
31
+ port: Port to bind to.
32
+ transport: Transport type (stdio or sse).
33
+ """
34
+ from ouroboros.mcp.server.adapter import create_ouroboros_server
35
+ from ouroboros.mcp.tools.definitions import OUROBOROS_TOOLS
36
+
37
+ # Create server
38
+ server = create_ouroboros_server(
39
+ name="ouroboros-mcp",
40
+ version="1.0.0",
41
+ )
42
+
43
+ # Register tools
44
+ for tool in OUROBOROS_TOOLS:
45
+ server.register_tool(tool)
46
+
47
+ print_success(f"MCP Server starting on {transport}...")
48
+ print_info(f"Registered {len(OUROBOROS_TOOLS)} tools")
49
+
50
+ if transport == "stdio":
51
+ print_info("Reading from stdin, writing to stdout")
52
+ print_info("Press Ctrl+C to stop")
53
+ else:
54
+ print_info(f"Listening on {host}:{port}")
55
+ print_info("Press Ctrl+C to stop")
56
+
57
+ # Start serving
58
+ await server.serve(transport=transport)
59
+
60
+
61
+ @app.command()
62
+ def serve(
63
+ host: Annotated[
64
+ str,
65
+ typer.Option(
66
+ "--host",
67
+ "-h",
68
+ help="Host to bind to.",
69
+ ),
70
+ ] = "localhost",
71
+ port: Annotated[
72
+ int,
73
+ typer.Option(
74
+ "--port",
75
+ "-p",
76
+ help="Port to bind to.",
77
+ ),
78
+ ] = 8080,
79
+ transport: Annotated[
80
+ str,
81
+ typer.Option(
82
+ "--transport",
83
+ "-t",
84
+ help="Transport type: stdio or sse.",
85
+ ),
86
+ ] = "stdio",
87
+ ) -> None:
88
+ """Start the MCP server.
89
+
90
+ Exposes Ouroboros functionality via Model Context Protocol,
91
+ allowing Claude Desktop and other MCP clients to interact
92
+ with Ouroboros.
93
+
94
+ Available tools:
95
+ - ouroboros_execute_seed: Execute a seed specification
96
+ - ouroboros_session_status: Get session status
97
+ - ouroboros_query_events: Query event history
98
+
99
+ Examples:
100
+
101
+ # Start with stdio transport (for Claude Desktop)
102
+ ouroboros mcp serve
103
+
104
+ # Start with SSE transport on custom port
105
+ ouroboros mcp serve --transport sse --port 9000
106
+ """
107
+ try:
108
+ asyncio.run(_run_mcp_server(host, port, transport))
109
+ except KeyboardInterrupt:
110
+ print_info("\nMCP Server stopped")
111
+ except ImportError as e:
112
+ print_error(f"MCP dependencies not installed: {e}")
113
+ print_info("Install with: uv add mcp")
114
+ raise typer.Exit(1) from e
115
+
116
+
117
+ @app.command()
118
+ def info() -> None:
119
+ """Show MCP server information and available tools."""
120
+ from ouroboros.mcp.server.adapter import create_ouroboros_server
121
+ from ouroboros.mcp.tools.definitions import OUROBOROS_TOOLS
122
+
123
+ from ouroboros.cli.formatters import console
124
+
125
+ # Create server
126
+ server = create_ouroboros_server(
127
+ name="ouroboros-mcp",
128
+ version="1.0.0",
129
+ )
130
+
131
+ # Register tools
132
+ for tool in OUROBOROS_TOOLS:
133
+ server.register_tool(tool)
134
+
135
+ server_info = server.info
136
+
137
+ console.print()
138
+ console.print("[bold]MCP Server Information[/bold]")
139
+ console.print(f" Name: {server_info.name}")
140
+ console.print(f" Version: {server_info.version}")
141
+ console.print()
142
+
143
+ console.print("[bold]Capabilities[/bold]")
144
+ console.print(f" Tools: {server_info.capabilities.tools}")
145
+ console.print(f" Resources: {server_info.capabilities.resources}")
146
+ console.print(f" Prompts: {server_info.capabilities.prompts}")
147
+ console.print()
148
+
149
+ console.print("[bold]Available Tools[/bold]")
150
+ for tool in server_info.tools:
151
+ console.print(f" [green]{tool.name}[/green]")
152
+ console.print(f" {tool.description}")
153
+ if tool.parameters:
154
+ console.print(" Parameters:")
155
+ for param in tool.parameters:
156
+ required = "[red]*[/red]" if param.required else ""
157
+ console.print(f" - {param.name}{required}: {param.description}")
158
+ console.print()
159
+
160
+
161
+ __all__ = ["app"]