ouroboros-ai 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (81) hide show
  1. ouroboros/__init__.py +15 -0
  2. ouroboros/__main__.py +9 -0
  3. ouroboros/bigbang/__init__.py +39 -0
  4. ouroboros/bigbang/ambiguity.py +464 -0
  5. ouroboros/bigbang/interview.py +530 -0
  6. ouroboros/bigbang/seed_generator.py +610 -0
  7. ouroboros/cli/__init__.py +9 -0
  8. ouroboros/cli/commands/__init__.py +7 -0
  9. ouroboros/cli/commands/config.py +79 -0
  10. ouroboros/cli/commands/init.py +425 -0
  11. ouroboros/cli/commands/run.py +201 -0
  12. ouroboros/cli/commands/status.py +85 -0
  13. ouroboros/cli/formatters/__init__.py +31 -0
  14. ouroboros/cli/formatters/panels.py +157 -0
  15. ouroboros/cli/formatters/progress.py +112 -0
  16. ouroboros/cli/formatters/tables.py +166 -0
  17. ouroboros/cli/main.py +60 -0
  18. ouroboros/config/__init__.py +81 -0
  19. ouroboros/config/loader.py +292 -0
  20. ouroboros/config/models.py +332 -0
  21. ouroboros/core/__init__.py +62 -0
  22. ouroboros/core/ac_tree.py +401 -0
  23. ouroboros/core/context.py +472 -0
  24. ouroboros/core/errors.py +246 -0
  25. ouroboros/core/seed.py +212 -0
  26. ouroboros/core/types.py +205 -0
  27. ouroboros/evaluation/__init__.py +110 -0
  28. ouroboros/evaluation/consensus.py +350 -0
  29. ouroboros/evaluation/mechanical.py +351 -0
  30. ouroboros/evaluation/models.py +235 -0
  31. ouroboros/evaluation/pipeline.py +286 -0
  32. ouroboros/evaluation/semantic.py +302 -0
  33. ouroboros/evaluation/trigger.py +278 -0
  34. ouroboros/events/__init__.py +5 -0
  35. ouroboros/events/base.py +80 -0
  36. ouroboros/events/decomposition.py +153 -0
  37. ouroboros/events/evaluation.py +248 -0
  38. ouroboros/execution/__init__.py +44 -0
  39. ouroboros/execution/atomicity.py +451 -0
  40. ouroboros/execution/decomposition.py +481 -0
  41. ouroboros/execution/double_diamond.py +1386 -0
  42. ouroboros/execution/subagent.py +275 -0
  43. ouroboros/observability/__init__.py +63 -0
  44. ouroboros/observability/drift.py +383 -0
  45. ouroboros/observability/logging.py +504 -0
  46. ouroboros/observability/retrospective.py +338 -0
  47. ouroboros/orchestrator/__init__.py +78 -0
  48. ouroboros/orchestrator/adapter.py +391 -0
  49. ouroboros/orchestrator/events.py +278 -0
  50. ouroboros/orchestrator/runner.py +597 -0
  51. ouroboros/orchestrator/session.py +486 -0
  52. ouroboros/persistence/__init__.py +23 -0
  53. ouroboros/persistence/checkpoint.py +511 -0
  54. ouroboros/persistence/event_store.py +183 -0
  55. ouroboros/persistence/migrations/__init__.py +1 -0
  56. ouroboros/persistence/migrations/runner.py +100 -0
  57. ouroboros/persistence/migrations/scripts/001_initial.sql +20 -0
  58. ouroboros/persistence/schema.py +56 -0
  59. ouroboros/persistence/uow.py +230 -0
  60. ouroboros/providers/__init__.py +28 -0
  61. ouroboros/providers/base.py +133 -0
  62. ouroboros/providers/claude_code_adapter.py +212 -0
  63. ouroboros/providers/litellm_adapter.py +316 -0
  64. ouroboros/py.typed +0 -0
  65. ouroboros/resilience/__init__.py +67 -0
  66. ouroboros/resilience/lateral.py +595 -0
  67. ouroboros/resilience/stagnation.py +727 -0
  68. ouroboros/routing/__init__.py +60 -0
  69. ouroboros/routing/complexity.py +272 -0
  70. ouroboros/routing/downgrade.py +664 -0
  71. ouroboros/routing/escalation.py +340 -0
  72. ouroboros/routing/router.py +204 -0
  73. ouroboros/routing/tiers.py +247 -0
  74. ouroboros/secondary/__init__.py +40 -0
  75. ouroboros/secondary/scheduler.py +467 -0
  76. ouroboros/secondary/todo_registry.py +483 -0
  77. ouroboros_ai-0.1.0.dist-info/METADATA +607 -0
  78. ouroboros_ai-0.1.0.dist-info/RECORD +81 -0
  79. ouroboros_ai-0.1.0.dist-info/WHEEL +4 -0
  80. ouroboros_ai-0.1.0.dist-info/entry_points.txt +2 -0
  81. ouroboros_ai-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,610 @@
1
+ """Seed generation module for transforming interview results to immutable Seeds.
2
+
3
+ This module implements the transformation from InterviewState to Seed,
4
+ gating on ambiguity score (must be <= 0.2) to ensure requirements are
5
+ clear enough for execution.
6
+
7
+ The SeedGenerator:
8
+ 1. Validates ambiguity score is within threshold
9
+ 2. Uses LLM to extract structured requirements from interview
10
+ 3. Creates immutable Seed with proper metadata
11
+ 4. Optionally saves to YAML file
12
+ """
13
+
14
+ from dataclasses import dataclass, field
15
+ from datetime import UTC, datetime
16
+ from pathlib import Path
17
+ from typing import Any
18
+
19
+ import structlog
20
+ import yaml
21
+
22
+ from ouroboros.bigbang.ambiguity import AMBIGUITY_THRESHOLD, AmbiguityScore
23
+ from ouroboros.bigbang.interview import InterviewState
24
+ from ouroboros.core.errors import ProviderError, ValidationError
25
+ from ouroboros.core.seed import (
26
+ EvaluationPrinciple,
27
+ ExitCondition,
28
+ OntologyField,
29
+ OntologySchema,
30
+ Seed,
31
+ SeedMetadata,
32
+ )
33
+ from ouroboros.core.types import Result
34
+ from ouroboros.providers.base import CompletionConfig, Message, MessageRole
35
+ from ouroboros.providers.litellm_adapter import LiteLLMAdapter
36
+
37
+ log = structlog.get_logger()
38
+
39
+ # Default model moved to config.models.ClarificationConfig.default_model
40
+ _FALLBACK_MODEL = "openrouter/google/gemini-2.0-flash-001"
41
+ EXTRACTION_TEMPERATURE = 0.2
42
+
43
+
44
+ @dataclass
45
+ class SeedGenerator:
46
+ """Generator for creating immutable Seeds from interview state.
47
+
48
+ Transforms completed interviews with low ambiguity scores into
49
+ structured, immutable Seed specifications.
50
+
51
+ Example:
52
+ generator = SeedGenerator(llm_adapter=LiteLLMAdapter())
53
+
54
+ # Generate seed from interview
55
+ result = await generator.generate(
56
+ state=interview_state,
57
+ ambiguity_score=ambiguity_result,
58
+ )
59
+
60
+ if result.is_ok:
61
+ seed = result.value
62
+ # Save to file
63
+ save_result = await generator.save_seed(seed, Path("seed.yaml"))
64
+
65
+ Note:
66
+ The model can be configured via OuroborosConfig.clarification.default_model
67
+ or passed directly to the constructor.
68
+ """
69
+
70
+ llm_adapter: LiteLLMAdapter
71
+ model: str = _FALLBACK_MODEL
72
+ temperature: float = EXTRACTION_TEMPERATURE
73
+ max_tokens: int = 4096
74
+ output_dir: Path = field(default_factory=lambda: Path.home() / ".ouroboros" / "seeds")
75
+
76
+ def __post_init__(self) -> None:
77
+ """Ensure output directory exists."""
78
+ self.output_dir.mkdir(parents=True, exist_ok=True)
79
+
80
+ async def generate(
81
+ self,
82
+ state: InterviewState,
83
+ ambiguity_score: AmbiguityScore,
84
+ ) -> Result[Seed, ValidationError | ProviderError]:
85
+ """Generate an immutable Seed from interview state.
86
+
87
+ Gates on ambiguity score - generation fails if score > 0.2.
88
+
89
+ Args:
90
+ state: Completed interview state.
91
+ ambiguity_score: The ambiguity score for the interview.
92
+
93
+ Returns:
94
+ Result containing the generated Seed or error.
95
+ """
96
+ log.info(
97
+ "seed.generation.started",
98
+ interview_id=state.interview_id,
99
+ ambiguity_score=ambiguity_score.overall_score,
100
+ )
101
+
102
+ # Gate on ambiguity score
103
+ if not ambiguity_score.is_ready_for_seed:
104
+ log.warning(
105
+ "seed.generation.ambiguity_too_high",
106
+ interview_id=state.interview_id,
107
+ ambiguity_score=ambiguity_score.overall_score,
108
+ threshold=AMBIGUITY_THRESHOLD,
109
+ )
110
+ return Result.err(
111
+ ValidationError(
112
+ f"Ambiguity score {ambiguity_score.overall_score:.2f} exceeds "
113
+ f"threshold {AMBIGUITY_THRESHOLD}. Cannot generate Seed.",
114
+ field="ambiguity_score",
115
+ value=ambiguity_score.overall_score,
116
+ details={
117
+ "threshold": AMBIGUITY_THRESHOLD,
118
+ "interview_id": state.interview_id,
119
+ },
120
+ )
121
+ )
122
+
123
+ # Extract structured requirements from interview
124
+ extraction_result = await self._extract_requirements(state)
125
+
126
+ if extraction_result.is_err:
127
+ return Result.err(extraction_result.error)
128
+
129
+ requirements = extraction_result.value
130
+
131
+ # Create metadata
132
+ metadata = SeedMetadata(
133
+ ambiguity_score=ambiguity_score.overall_score,
134
+ interview_id=state.interview_id,
135
+ )
136
+
137
+ # Build the seed
138
+ try:
139
+ seed = self._build_seed(requirements, metadata)
140
+
141
+ log.info(
142
+ "seed.generation.completed",
143
+ interview_id=state.interview_id,
144
+ seed_id=seed.metadata.seed_id,
145
+ goal_length=len(seed.goal),
146
+ constraint_count=len(seed.constraints),
147
+ criteria_count=len(seed.acceptance_criteria),
148
+ )
149
+
150
+ return Result.ok(seed)
151
+
152
+ except Exception as e:
153
+ log.exception(
154
+ "seed.generation.build_failed",
155
+ interview_id=state.interview_id,
156
+ error=str(e),
157
+ )
158
+ return Result.err(
159
+ ValidationError(
160
+ f"Failed to build seed: {e}",
161
+ details={"interview_id": state.interview_id},
162
+ )
163
+ )
164
+
165
+ async def _extract_requirements(
166
+ self, state: InterviewState
167
+ ) -> Result[dict[str, Any], ProviderError]:
168
+ """Extract structured requirements from interview using LLM.
169
+
170
+ Args:
171
+ state: The interview state.
172
+
173
+ Returns:
174
+ Result containing extracted requirements dict or error.
175
+ """
176
+ context = self._build_interview_context(state)
177
+ system_prompt = self._build_extraction_system_prompt()
178
+ user_prompt = self._build_extraction_user_prompt(context)
179
+
180
+ messages = [
181
+ Message(role=MessageRole.SYSTEM, content=system_prompt),
182
+ Message(role=MessageRole.USER, content=user_prompt),
183
+ ]
184
+
185
+ config = CompletionConfig(
186
+ model=self.model,
187
+ temperature=self.temperature,
188
+ max_tokens=self.max_tokens,
189
+ )
190
+
191
+ result = await self.llm_adapter.complete(messages, config)
192
+
193
+ if result.is_err:
194
+ log.warning(
195
+ "seed.extraction.failed",
196
+ interview_id=state.interview_id,
197
+ error=str(result.error),
198
+ )
199
+ return Result.err(result.error)
200
+
201
+ # Parse the response
202
+ try:
203
+ requirements = self._parse_extraction_response(result.value.content)
204
+ return Result.ok(requirements)
205
+ except (ValueError, KeyError) as e:
206
+ log.warning(
207
+ "seed.extraction.parse_failed",
208
+ interview_id=state.interview_id,
209
+ error=str(e),
210
+ response=result.value.content[:500],
211
+ )
212
+ return Result.err(
213
+ ProviderError(
214
+ f"Failed to parse extraction response: {e}",
215
+ details={"response_preview": result.value.content[:200]},
216
+ )
217
+ )
218
+
219
+ def _build_interview_context(self, state: InterviewState) -> str:
220
+ """Build context string from interview state.
221
+
222
+ Args:
223
+ state: The interview state.
224
+
225
+ Returns:
226
+ Formatted context string.
227
+ """
228
+ parts = [f"Initial Context: {state.initial_context}"]
229
+
230
+ for round_data in state.rounds:
231
+ parts.append(f"\nQ: {round_data.question}")
232
+ if round_data.user_response:
233
+ parts.append(f"A: {round_data.user_response}")
234
+
235
+ return "\n".join(parts)
236
+
237
+ def _build_extraction_system_prompt(self) -> str:
238
+ """Build system prompt for requirement extraction.
239
+
240
+ Returns:
241
+ System prompt string.
242
+ """
243
+ return """You are an expert requirements engineer extracting structured requirements from an interview conversation.
244
+
245
+ Your task is to extract the following components from the conversation:
246
+
247
+ 1. GOAL: A clear, specific statement of the primary objective.
248
+ 2. CONSTRAINTS: Hard limitations or requirements that must be satisfied.
249
+ 3. ACCEPTANCE_CRITERIA: Specific, measurable criteria for success.
250
+ 4. ONTOLOGY_NAME: A name for the domain model/data structure.
251
+ 5. ONTOLOGY_DESCRIPTION: Description of what the ontology represents.
252
+ 6. ONTOLOGY_FIELDS: Key fields/attributes in the domain model.
253
+ 7. EVALUATION_PRINCIPLES: Principles for evaluating the output quality.
254
+ 8. EXIT_CONDITIONS: Conditions that indicate the workflow should terminate.
255
+
256
+ Respond in this exact format (each field on its own line):
257
+
258
+ GOAL: <goal statement>
259
+ CONSTRAINTS: <constraint 1> | <constraint 2> | ...
260
+ ACCEPTANCE_CRITERIA: <criterion 1> | <criterion 2> | ...
261
+ ONTOLOGY_NAME: <name>
262
+ ONTOLOGY_DESCRIPTION: <description>
263
+ ONTOLOGY_FIELDS: <name>:<type>:<description> | <name>:<type>:<description> | ...
264
+ EVALUATION_PRINCIPLES: <name>:<description>:<weight> | ...
265
+ EXIT_CONDITIONS: <name>:<description>:<criteria> | ...
266
+
267
+ Field types should be one of: string, number, boolean, array, object
268
+ Weights should be between 0.0 and 1.0
269
+
270
+ Be specific and concrete. Extract actual requirements from the conversation, not generic placeholders."""
271
+
272
+ def _build_extraction_user_prompt(self, context: str) -> str:
273
+ """Build user prompt with interview context.
274
+
275
+ Args:
276
+ context: Formatted interview context.
277
+
278
+ Returns:
279
+ User prompt string.
280
+ """
281
+ return f"""Please extract structured requirements from the following interview conversation:
282
+
283
+ ---
284
+ {context}
285
+ ---
286
+
287
+ Extract all components and provide them in the specified format."""
288
+
289
+ def _parse_extraction_response(self, response: str) -> dict[str, Any]:
290
+ """Parse LLM response into requirements dictionary.
291
+
292
+ Args:
293
+ response: Raw LLM response text.
294
+
295
+ Returns:
296
+ Parsed requirements dictionary.
297
+
298
+ Raises:
299
+ ValueError: If response cannot be parsed.
300
+ """
301
+ lines = response.strip().split("\n")
302
+ requirements: dict[str, Any] = {}
303
+
304
+ for line in lines:
305
+ line = line.strip()
306
+ if not line:
307
+ continue
308
+
309
+ for prefix in [
310
+ "GOAL:",
311
+ "CONSTRAINTS:",
312
+ "ACCEPTANCE_CRITERIA:",
313
+ "ONTOLOGY_NAME:",
314
+ "ONTOLOGY_DESCRIPTION:",
315
+ "ONTOLOGY_FIELDS:",
316
+ "EVALUATION_PRINCIPLES:",
317
+ "EXIT_CONDITIONS:",
318
+ ]:
319
+ if line.startswith(prefix):
320
+ key = prefix[:-1].lower() # Remove colon and lowercase
321
+ value = line[len(prefix) :].strip()
322
+ requirements[key] = value
323
+ break
324
+
325
+ # Validate required fields
326
+ required_fields = [
327
+ "goal",
328
+ "ontology_name",
329
+ "ontology_description",
330
+ ]
331
+
332
+ for field_name in required_fields:
333
+ if field_name not in requirements:
334
+ raise ValueError(f"Missing required field: {field_name}")
335
+
336
+ return requirements
337
+
338
+ def _build_seed(self, requirements: dict[str, Any], metadata: SeedMetadata) -> Seed:
339
+ """Build Seed from extracted requirements.
340
+
341
+ Args:
342
+ requirements: Extracted requirements dictionary.
343
+ metadata: Seed metadata.
344
+
345
+ Returns:
346
+ Constructed Seed instance.
347
+ """
348
+ # Parse constraints
349
+ constraints: tuple[str, ...] = tuple()
350
+ if "constraints" in requirements and requirements["constraints"]:
351
+ constraints = tuple(
352
+ c.strip() for c in requirements["constraints"].split("|") if c.strip()
353
+ )
354
+
355
+ # Parse acceptance criteria
356
+ acceptance_criteria: tuple[str, ...] = tuple()
357
+ if "acceptance_criteria" in requirements and requirements["acceptance_criteria"]:
358
+ acceptance_criteria = tuple(
359
+ c.strip()
360
+ for c in requirements["acceptance_criteria"].split("|")
361
+ if c.strip()
362
+ )
363
+
364
+ # Parse ontology fields
365
+ ontology_fields: list[OntologyField] = []
366
+ if "ontology_fields" in requirements and requirements["ontology_fields"]:
367
+ for field_str in requirements["ontology_fields"].split("|"):
368
+ field_str = field_str.strip()
369
+ if not field_str:
370
+ continue
371
+ parts = field_str.split(":")
372
+ if len(parts) >= 3:
373
+ ontology_fields.append(
374
+ OntologyField(
375
+ name=parts[0].strip(),
376
+ field_type=parts[1].strip(),
377
+ description=":".join(parts[2:]).strip(),
378
+ )
379
+ )
380
+
381
+ # Build ontology schema
382
+ ontology_schema = OntologySchema(
383
+ name=requirements["ontology_name"],
384
+ description=requirements["ontology_description"],
385
+ fields=tuple(ontology_fields),
386
+ )
387
+
388
+ # Parse evaluation principles
389
+ evaluation_principles: list[EvaluationPrinciple] = []
390
+ if "evaluation_principles" in requirements and requirements["evaluation_principles"]:
391
+ for principle_str in requirements["evaluation_principles"].split("|"):
392
+ principle_str = principle_str.strip()
393
+ if not principle_str:
394
+ continue
395
+ parts = principle_str.split(":")
396
+ if len(parts) >= 2:
397
+ weight = 1.0
398
+ if len(parts) >= 3:
399
+ try:
400
+ weight = float(parts[2].strip())
401
+ except ValueError:
402
+ weight = 1.0
403
+ evaluation_principles.append(
404
+ EvaluationPrinciple(
405
+ name=parts[0].strip(),
406
+ description=parts[1].strip(),
407
+ weight=min(1.0, max(0.0, weight)),
408
+ )
409
+ )
410
+
411
+ # Parse exit conditions
412
+ exit_conditions: list[ExitCondition] = []
413
+ if "exit_conditions" in requirements and requirements["exit_conditions"]:
414
+ for condition_str in requirements["exit_conditions"].split("|"):
415
+ condition_str = condition_str.strip()
416
+ if not condition_str:
417
+ continue
418
+ parts = condition_str.split(":")
419
+ if len(parts) >= 3:
420
+ exit_conditions.append(
421
+ ExitCondition(
422
+ name=parts[0].strip(),
423
+ description=parts[1].strip(),
424
+ evaluation_criteria=":".join(parts[2:]).strip(),
425
+ )
426
+ )
427
+
428
+ return Seed(
429
+ goal=requirements["goal"],
430
+ constraints=constraints,
431
+ acceptance_criteria=acceptance_criteria,
432
+ ontology_schema=ontology_schema,
433
+ evaluation_principles=tuple(evaluation_principles),
434
+ exit_conditions=tuple(exit_conditions),
435
+ metadata=metadata,
436
+ )
437
+
438
+ async def save_seed(
439
+ self,
440
+ seed: Seed,
441
+ file_path: Path | None = None,
442
+ ) -> Result[Path, ValidationError]:
443
+ """Save seed to YAML file.
444
+
445
+ Args:
446
+ seed: The seed to save.
447
+ file_path: Optional path for the seed file.
448
+ If not provided, uses output_dir/seed_{id}.yaml
449
+
450
+ Returns:
451
+ Result containing the file path or error.
452
+ """
453
+ if file_path is None:
454
+ file_path = self.output_dir / f"{seed.metadata.seed_id}.yaml"
455
+
456
+ log.info(
457
+ "seed.saving",
458
+ seed_id=seed.metadata.seed_id,
459
+ file_path=str(file_path),
460
+ )
461
+
462
+ try:
463
+ # Ensure parent directory exists
464
+ file_path.parent.mkdir(parents=True, exist_ok=True)
465
+
466
+ # Convert to dict for YAML serialization
467
+ seed_dict = seed.to_dict()
468
+
469
+ # Write YAML with proper formatting
470
+ content = yaml.dump(
471
+ seed_dict,
472
+ default_flow_style=False,
473
+ allow_unicode=True,
474
+ sort_keys=False,
475
+ )
476
+
477
+ file_path.write_text(content, encoding="utf-8")
478
+
479
+ log.info(
480
+ "seed.saved",
481
+ seed_id=seed.metadata.seed_id,
482
+ file_path=str(file_path),
483
+ )
484
+
485
+ return Result.ok(file_path)
486
+
487
+ except (OSError, yaml.YAMLError) as e:
488
+ log.exception(
489
+ "seed.save_failed",
490
+ seed_id=seed.metadata.seed_id,
491
+ file_path=str(file_path),
492
+ error=str(e),
493
+ )
494
+ return Result.err(
495
+ ValidationError(
496
+ f"Failed to save seed: {e}",
497
+ details={
498
+ "seed_id": seed.metadata.seed_id,
499
+ "file_path": str(file_path),
500
+ },
501
+ )
502
+ )
503
+
504
+
505
+ async def load_seed(file_path: Path) -> Result[Seed, ValidationError]:
506
+ """Load seed from YAML file.
507
+
508
+ Args:
509
+ file_path: Path to the seed YAML file.
510
+
511
+ Returns:
512
+ Result containing the loaded Seed or error.
513
+ """
514
+ if not file_path.exists():
515
+ return Result.err(
516
+ ValidationError(
517
+ f"Seed file not found: {file_path}",
518
+ field="file_path",
519
+ value=str(file_path),
520
+ )
521
+ )
522
+
523
+ try:
524
+ content = file_path.read_text(encoding="utf-8")
525
+ seed_dict = yaml.safe_load(content)
526
+
527
+ # Validate and create Seed
528
+ seed = Seed.from_dict(seed_dict)
529
+
530
+ log.info(
531
+ "seed.loaded",
532
+ seed_id=seed.metadata.seed_id,
533
+ file_path=str(file_path),
534
+ )
535
+
536
+ return Result.ok(seed)
537
+
538
+ except (OSError, yaml.YAMLError, ValueError) as e:
539
+ log.exception(
540
+ "seed.load_failed",
541
+ file_path=str(file_path),
542
+ error=str(e),
543
+ )
544
+ return Result.err(
545
+ ValidationError(
546
+ f"Failed to load seed: {e}",
547
+ field="file_path",
548
+ value=str(file_path),
549
+ details={"error": str(e)},
550
+ )
551
+ )
552
+
553
+
554
+ def save_seed_sync(seed: Seed, file_path: Path) -> Result[Path, ValidationError]:
555
+ """Synchronous version of save_seed for convenience.
556
+
557
+ Args:
558
+ seed: The seed to save.
559
+ file_path: Path for the seed file.
560
+
561
+ Returns:
562
+ Result containing the file path or error.
563
+ """
564
+ log.info(
565
+ "seed.saving.sync",
566
+ seed_id=seed.metadata.seed_id,
567
+ file_path=str(file_path),
568
+ )
569
+
570
+ try:
571
+ # Ensure parent directory exists
572
+ file_path.parent.mkdir(parents=True, exist_ok=True)
573
+
574
+ # Convert to dict for YAML serialization
575
+ seed_dict = seed.to_dict()
576
+
577
+ # Write YAML with proper formatting
578
+ content = yaml.dump(
579
+ seed_dict,
580
+ default_flow_style=False,
581
+ allow_unicode=True,
582
+ sort_keys=False,
583
+ )
584
+
585
+ file_path.write_text(content, encoding="utf-8")
586
+
587
+ log.info(
588
+ "seed.saved.sync",
589
+ seed_id=seed.metadata.seed_id,
590
+ file_path=str(file_path),
591
+ )
592
+
593
+ return Result.ok(file_path)
594
+
595
+ except (OSError, yaml.YAMLError) as e:
596
+ log.exception(
597
+ "seed.save_failed.sync",
598
+ seed_id=seed.metadata.seed_id,
599
+ file_path=str(file_path),
600
+ error=str(e),
601
+ )
602
+ return Result.err(
603
+ ValidationError(
604
+ f"Failed to save seed: {e}",
605
+ details={
606
+ "seed_id": seed.metadata.seed_id,
607
+ "file_path": str(file_path),
608
+ },
609
+ )
610
+ )
@@ -0,0 +1,9 @@
1
+ """Ouroboros CLI module.
2
+
3
+ This module provides the command-line interface for the Ouroboros system,
4
+ built with Typer for CLI framework and Rich for beautiful output.
5
+ """
6
+
7
+ from ouroboros.cli.main import app
8
+
9
+ __all__ = ["app"]
@@ -0,0 +1,7 @@
1
+ """CLI command implementations for Ouroboros.
2
+
3
+ This module contains the command group implementations:
4
+ - run: Execute workflows
5
+ - config: Manage configuration
6
+ - status: Check system status
7
+ """
@@ -0,0 +1,79 @@
1
+ """Config command group for Ouroboros.
2
+
3
+ Manage configuration settings and provider setup.
4
+ """
5
+
6
+ from typing import Annotated
7
+
8
+ import typer
9
+
10
+ from ouroboros.cli.formatters.panels import print_info, print_warning
11
+ from ouroboros.cli.formatters.tables import create_key_value_table, print_table
12
+
13
+ app = typer.Typer(
14
+ name="config",
15
+ help="Manage Ouroboros configuration.",
16
+ no_args_is_help=True,
17
+ )
18
+
19
+
20
+ @app.command()
21
+ def show(
22
+ section: Annotated[
23
+ str | None,
24
+ typer.Argument(help="Configuration section to display (e.g., 'providers')."),
25
+ ] = None,
26
+ ) -> None:
27
+ """Display current configuration.
28
+
29
+ Shows all configuration if no section specified.
30
+ """
31
+ # Placeholder implementation
32
+ if section:
33
+ print_info(f"Would display configuration section: {section}")
34
+ else:
35
+ # Example placeholder data
36
+ config_data = {
37
+ "config_path": "~/.ouroboros/config.yaml",
38
+ "database": "~/.ouroboros/ouroboros.db",
39
+ "log_level": "INFO",
40
+ }
41
+ table = create_key_value_table(config_data, "Current Configuration")
42
+ print_table(table)
43
+
44
+
45
+ @app.command()
46
+ def init() -> None:
47
+ """Initialize Ouroboros configuration.
48
+
49
+ Creates default configuration files if they don't exist.
50
+ """
51
+ # Placeholder implementation
52
+ print_info("Would initialize configuration at ~/.ouroboros/")
53
+
54
+
55
+ @app.command("set")
56
+ def set_value(
57
+ key: Annotated[str, typer.Argument(help="Configuration key (dot notation).")],
58
+ value: Annotated[str, typer.Argument(help="Value to set.")],
59
+ ) -> None:
60
+ """Set a configuration value.
61
+
62
+ Use dot notation for nested keys (e.g., providers.openai.api_key).
63
+ """
64
+ # Placeholder implementation
65
+ print_info(f"Would set {key} = {value}")
66
+ print_warning("Sensitive values should be set via environment variables")
67
+
68
+
69
+ @app.command()
70
+ def validate() -> None:
71
+ """Validate current configuration.
72
+
73
+ Checks configuration files for errors and missing required values.
74
+ """
75
+ # Placeholder implementation
76
+ print_info("Would validate configuration")
77
+
78
+
79
+ __all__ = ["app"]