ouroboros-ai 0.2.1__tar.gz → 0.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ouroboros-ai might be problematic. Click here for more details.

Files changed (202) hide show
  1. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/PKG-INFO +1 -1
  2. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/pyproject.toml +1 -1
  3. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/bigbang/ambiguity.py +60 -76
  4. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/bigbang/test_ambiguity.py +24 -30
  5. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/uv.lock +1 -1
  6. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/.gitignore +0 -0
  7. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/.pre-commit-config.yaml +0 -0
  8. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/.python-version +0 -0
  9. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/CHANGELOG.md +0 -0
  10. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/LICENSE +0 -0
  11. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/PR-43-CODE-REVIEW-REPORT.md +0 -0
  12. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/README.md +0 -0
  13. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/github-issue-mapping.yaml +0 -0
  14. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/sprint-status.yaml +0 -0
  15. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-1-project-initialization-with-uv.md +0 -0
  16. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-2-core-types-and-error-handling.md +0 -0
  17. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-3-event-store-with-sqlalchemy-core.md +0 -0
  18. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-4-configuration-and-credentials-management.md +0 -0
  19. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-5-llm-provider-adapter-with-litellm.md +0 -0
  20. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-6-cli-skeleton-with-typer-and-rich.md +0 -0
  21. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-7-structured-logging-with-structlog.md +0 -0
  22. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-8-checkpoint-and-recovery-system.md +0 -0
  23. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/0-9-context-compression-engine.md +0 -0
  24. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/1-1-interview-protocol-engine.md +0 -0
  25. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/1-2-ambiguity-score-calculation.md +0 -0
  26. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/1-3-immutable-seed-generation.md +0 -0
  27. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/2-1-three-tier-model-configuration.md +0 -0
  28. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/2-2-complexity-based-routing.md +0 -0
  29. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/2-3-escalation-on-failure.md +0 -0
  30. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/2-4-downgrade-on-success.md +0 -0
  31. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/3-1-double-diamond-cycle-implementation.md +0 -0
  32. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/3-2-hierarchical-ac-decomposition.md +0 -0
  33. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/3-3-atomicity-detection.md +0 -0
  34. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/3-4-subagent-isolation.md +0 -0
  35. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/4-1-stagnation-detection-4-patterns.md +0 -0
  36. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/4-2-lateral-thinking-personas.md +0 -0
  37. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/4-3-persona-rotation-strategy.md +0 -0
  38. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/5-1-stage-1-mechanical-verification.md +0 -0
  39. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/5-2-stage-2-semantic-evaluation.md +0 -0
  40. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/5-3-stage-3-multi-model-consensus.md +0 -0
  41. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/5-4-consensus-trigger-matrix.md +0 -0
  42. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/6-1-drift-measurement-engine.md +0 -0
  43. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/6-2-automatic-retrospective.md +0 -0
  44. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/7-1-todo-registry.md +0 -0
  45. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/implementation-artifacts/stories/7-2-secondary-loop-batch-processing.md +0 -0
  46. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/planning-artifacts/architecture.md +0 -0
  47. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/planning-artifacts/bmm-workflow-status.yaml +0 -0
  48. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/planning-artifacts/epics.md +0 -0
  49. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/_bmad-output/update-stories.sh +0 -0
  50. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/docs/running-with-claude-code.md +0 -0
  51. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/project-context.md +0 -0
  52. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/requirement/1_EXECUTIVE_SUMMARY.md +0 -0
  53. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/requirement/2_FULL_SPECIFICATION.md +0 -0
  54. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/requirement/3_CONFIG_TEMPLATE.yaml +0 -0
  55. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/requirement/4_REDDIT_EXAMPLE.md +0 -0
  56. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/__init__.py +0 -0
  57. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/__main__.py +0 -0
  58. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/bigbang/__init__.py +0 -0
  59. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/bigbang/interview.py +0 -0
  60. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/bigbang/seed_generator.py +0 -0
  61. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/__init__.py +0 -0
  62. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/commands/__init__.py +0 -0
  63. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/commands/config.py +0 -0
  64. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/commands/init.py +0 -0
  65. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/commands/run.py +0 -0
  66. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/commands/status.py +0 -0
  67. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/formatters/__init__.py +0 -0
  68. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/formatters/panels.py +0 -0
  69. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/formatters/progress.py +0 -0
  70. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/formatters/tables.py +0 -0
  71. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/cli/main.py +0 -0
  72. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/config/__init__.py +0 -0
  73. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/config/loader.py +0 -0
  74. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/config/models.py +0 -0
  75. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/core/__init__.py +0 -0
  76. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/core/ac_tree.py +0 -0
  77. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/core/context.py +0 -0
  78. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/core/errors.py +0 -0
  79. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/core/security.py +0 -0
  80. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/core/seed.py +0 -0
  81. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/core/types.py +0 -0
  82. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/evaluation/__init__.py +0 -0
  83. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/evaluation/consensus.py +0 -0
  84. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/evaluation/mechanical.py +0 -0
  85. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/evaluation/models.py +0 -0
  86. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/evaluation/pipeline.py +0 -0
  87. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/evaluation/semantic.py +0 -0
  88. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/evaluation/trigger.py +0 -0
  89. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/events/__init__.py +0 -0
  90. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/events/base.py +0 -0
  91. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/events/decomposition.py +0 -0
  92. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/events/evaluation.py +0 -0
  93. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/execution/__init__.py +0 -0
  94. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/execution/atomicity.py +0 -0
  95. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/execution/decomposition.py +0 -0
  96. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/execution/double_diamond.py +0 -0
  97. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/execution/subagent.py +0 -0
  98. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/observability/__init__.py +0 -0
  99. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/observability/drift.py +0 -0
  100. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/observability/logging.py +0 -0
  101. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/observability/retrospective.py +0 -0
  102. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/orchestrator/__init__.py +0 -0
  103. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/orchestrator/adapter.py +0 -0
  104. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/orchestrator/events.py +0 -0
  105. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/orchestrator/runner.py +0 -0
  106. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/orchestrator/session.py +0 -0
  107. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/__init__.py +0 -0
  108. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/checkpoint.py +0 -0
  109. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/event_store.py +0 -0
  110. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/migrations/__init__.py +0 -0
  111. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/migrations/runner.py +0 -0
  112. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/migrations/scripts/001_initial.sql +0 -0
  113. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/schema.py +0 -0
  114. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/persistence/uow.py +0 -0
  115. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/providers/__init__.py +0 -0
  116. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/providers/base.py +0 -0
  117. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/providers/claude_code_adapter.py +0 -0
  118. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/providers/litellm_adapter.py +0 -0
  119. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/py.typed +0 -0
  120. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/resilience/__init__.py +0 -0
  121. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/resilience/lateral.py +0 -0
  122. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/resilience/stagnation.py +0 -0
  123. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/routing/__init__.py +0 -0
  124. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/routing/complexity.py +0 -0
  125. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/routing/downgrade.py +0 -0
  126. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/routing/escalation.py +0 -0
  127. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/routing/router.py +0 -0
  128. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/routing/tiers.py +0 -0
  129. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/secondary/__init__.py +0 -0
  130. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/secondary/scheduler.py +0 -0
  131. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/src/ouroboros/secondary/todo_registry.py +0 -0
  132. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/conftest.py +0 -0
  133. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/integration/test_entry_point.py +0 -0
  134. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/bigbang/__init__.py +0 -0
  135. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/bigbang/test_interview.py +0 -0
  136. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/bigbang/test_seed_generator.py +0 -0
  137. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/cli/__init__.py +0 -0
  138. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/cli/formatters/__init__.py +0 -0
  139. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/cli/formatters/test_console.py +0 -0
  140. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/cli/formatters/test_panels.py +0 -0
  141. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/cli/formatters/test_progress.py +0 -0
  142. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/cli/formatters/test_tables.py +0 -0
  143. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/cli/test_main.py +0 -0
  144. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/config/__init__.py +0 -0
  145. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/config/test_loader.py +0 -0
  146. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/config/test_models.py +0 -0
  147. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/core/__init__.py +0 -0
  148. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/core/test_ac_tree.py +0 -0
  149. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/core/test_context.py +0 -0
  150. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/core/test_errors.py +0 -0
  151. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/core/test_security.py +0 -0
  152. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/core/test_seed.py +0 -0
  153. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/core/test_types.py +0 -0
  154. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/evaluation/__init__.py +0 -0
  155. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/evaluation/test_consensus.py +0 -0
  156. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/evaluation/test_mechanical.py +0 -0
  157. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/evaluation/test_models.py +0 -0
  158. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/evaluation/test_semantic.py +0 -0
  159. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/evaluation/test_trigger.py +0 -0
  160. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/events/__init__.py +0 -0
  161. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/events/test_base.py +0 -0
  162. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/events/test_decomposition_events.py +0 -0
  163. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/execution/__init__.py +0 -0
  164. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/execution/test_atomicity.py +0 -0
  165. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/execution/test_decomposition.py +0 -0
  166. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/execution/test_double_diamond.py +0 -0
  167. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/execution/test_subagent_isolation.py +0 -0
  168. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/observability/__init__.py +0 -0
  169. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/observability/test_drift.py +0 -0
  170. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/observability/test_logging.py +0 -0
  171. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/observability/test_retrospective.py +0 -0
  172. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/orchestrator/__init__.py +0 -0
  173. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/orchestrator/test_adapter.py +0 -0
  174. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/orchestrator/test_events.py +0 -0
  175. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/orchestrator/test_runner.py +0 -0
  176. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/orchestrator/test_session.py +0 -0
  177. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/persistence/__init__.py +0 -0
  178. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/persistence/test_checkpoint.py +0 -0
  179. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/persistence/test_event_store.py +0 -0
  180. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/persistence/test_schema.py +0 -0
  181. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/persistence/test_uow.py +0 -0
  182. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/providers/__init__.py +0 -0
  183. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/providers/test_base.py +0 -0
  184. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/providers/test_litellm_adapter.py +0 -0
  185. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/resilience/__init__.py +0 -0
  186. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/resilience/test_lateral.py +0 -0
  187. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/resilience/test_stagnation.py +0 -0
  188. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/routing/__init__.py +0 -0
  189. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/routing/test_complexity.py +0 -0
  190. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/routing/test_downgrade.py +0 -0
  191. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/routing/test_escalation.py +0 -0
  192. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/routing/test_router.py +0 -0
  193. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/routing/test_tiers.py +0 -0
  194. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/secondary/__init__.py +0 -0
  195. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/secondary/test_scheduler.py +0 -0
  196. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/secondary/test_todo_registry.py +0 -0
  197. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/test_dependencies_configured.py +0 -0
  198. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/test_main_entry_point.py +0 -0
  199. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/test_module_structure.py +0 -0
  200. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/test_project_initialization.py +0 -0
  201. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tests/unit/test_tooling_configuration.py +0 -0
  202. {ouroboros_ai-0.2.1 → ouroboros_ai-0.2.3}/tools/sync_github_project.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ouroboros-ai
3
- Version: 0.2.1
3
+ Version: 0.2.3
4
4
  Summary: Self-Improving AI Workflow System
5
5
  Author-email: Q00 <jqyu.lee@gmail.com>
6
6
  License-File: LICENSE
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "ouroboros-ai"
3
- version = "0.2.1"
3
+ version = "0.2.3"
4
4
  description = "Self-Improving AI Workflow System"
5
5
  readme = "README.md"
6
6
  authors = [
@@ -9,6 +9,8 @@ The scoring algorithm evaluates three key components:
9
9
  - Success Criteria Clarity (30%): How measurable the success criteria are
10
10
  """
11
11
 
12
+ import json
13
+ import re
12
14
  from dataclasses import dataclass
13
15
  from typing import Any
14
16
 
@@ -110,15 +112,15 @@ class AmbiguityScorer:
110
112
  from interview conversation, producing reproducible scores.
111
113
 
112
114
  Uses adaptive token allocation: starts with `initial_max_tokens` and
113
- doubles on truncation up to `MAX_TOKEN_LIMIT`. Retries up to `max_retries`
114
- times on both provider errors and parse failures.
115
+ doubles on truncation up to `MAX_TOKEN_LIMIT`. Retries until success
116
+ by default (unlimited), or up to `max_retries` if specified.
115
117
 
116
118
  Attributes:
117
119
  llm_adapter: The LLM adapter for completions.
118
120
  model: Model identifier to use.
119
121
  temperature: Temperature for reproducibility (default 0.1).
120
122
  initial_max_tokens: Starting token limit (default 2048).
121
- max_retries: Maximum retry attempts (default 3).
123
+ max_retries: Maximum retry attempts, or None for unlimited (default).
122
124
 
123
125
  Example:
124
126
  scorer = AmbiguityScorer(llm_adapter=LiteLLMAdapter())
@@ -138,7 +140,8 @@ class AmbiguityScorer:
138
140
  model: str = DEFAULT_MODEL
139
141
  temperature: float = SCORING_TEMPERATURE
140
142
  initial_max_tokens: int = 2048
141
- max_retries: int = 3
143
+ max_retries: int | None = None # None = unlimited retries
144
+ max_format_error_retries: int = 5 # Stop after N format errors (non-truncation)
142
145
 
143
146
  async def score(
144
147
  self, state: InterviewState
@@ -180,8 +183,15 @@ class AmbiguityScorer:
180
183
  current_max_tokens = self.initial_max_tokens
181
184
  last_error: Exception | ProviderError | None = None
182
185
  last_response: str = ""
186
+ attempt = 0
187
+
188
+ while True:
189
+ # Check retry limit if set
190
+ if self.max_retries is not None and attempt >= self.max_retries:
191
+ break
192
+
193
+ attempt += 1
183
194
 
184
- for attempt in range(self.max_retries):
185
195
  config = CompletionConfig(
186
196
  model=self.model,
187
197
  temperature=self.temperature,
@@ -190,15 +200,15 @@ class AmbiguityScorer:
190
200
 
191
201
  result = await self.llm_adapter.complete(messages, config)
192
202
 
193
- # Fix #3: Retry on provider errors (rate limits, transient failures)
203
+ # Retry on provider errors (rate limits, transient failures)
194
204
  if result.is_err:
195
205
  last_error = result.error
196
206
  log.warning(
197
207
  "ambiguity.scoring.provider_error_retrying",
198
208
  interview_id=state.interview_id,
199
209
  error=str(result.error),
200
- attempt=attempt + 1,
201
- max_retries=self.max_retries,
210
+ attempt=attempt,
211
+ max_retries=self.max_retries or "unlimited",
202
212
  )
203
213
  continue
204
214
 
@@ -221,7 +231,7 @@ class AmbiguityScorer:
221
231
  constraint_clarity=breakdown.constraint_clarity.clarity_score,
222
232
  success_criteria_clarity=breakdown.success_criteria_clarity.clarity_score,
223
233
  tokens_used=current_max_tokens,
224
- attempt=attempt + 1,
234
+ attempt=attempt,
225
235
  )
226
236
 
227
237
  return Result.ok(ambiguity_score)
@@ -230,11 +240,11 @@ class AmbiguityScorer:
230
240
  last_error = e
231
241
  last_response = result.value.content
232
242
 
233
- # Fix #2: Only increase tokens if response was truncated
243
+ # Only increase tokens if response was truncated
234
244
  is_truncated = result.value.finish_reason == "length"
235
245
 
236
246
  if is_truncated:
237
- # Double tokens on truncation (no upper limit)
247
+ # Double tokens on truncation, capped at MAX_TOKEN_LIMIT if set
238
248
  next_tokens = current_max_tokens * 2
239
249
  if MAX_TOKEN_LIMIT is not None:
240
250
  next_tokens = min(next_tokens, MAX_TOKEN_LIMIT)
@@ -242,7 +252,7 @@ class AmbiguityScorer:
242
252
  "ambiguity.scoring.truncated_retrying",
243
253
  interview_id=state.interview_id,
244
254
  error=str(e),
245
- attempt=attempt + 1,
255
+ attempt=attempt,
246
256
  current_tokens=current_max_tokens,
247
257
  next_tokens=next_tokens,
248
258
  )
@@ -253,11 +263,11 @@ class AmbiguityScorer:
253
263
  "ambiguity.scoring.format_error_retrying",
254
264
  interview_id=state.interview_id,
255
265
  error=str(e),
256
- attempt=attempt + 1,
266
+ attempt=attempt,
257
267
  finish_reason=result.value.finish_reason,
258
268
  )
259
269
 
260
- # All retries exhausted
270
+ # All retries exhausted (only reached if max_retries is set)
261
271
  log.warning(
262
272
  "ambiguity.scoring.failed",
263
273
  interview_id=state.interview_id,
@@ -296,38 +306,19 @@ class AmbiguityScorer:
296
306
  Returns:
297
307
  System prompt string.
298
308
  """
299
- return """You are an expert requirements analyst evaluating the clarity of software requirements.
300
-
301
- Your task is to assess how clear and unambiguous the requirements are based on an interview conversation.
309
+ return """You are an expert requirements analyst. Evaluate the clarity of software requirements.
302
310
 
303
311
  Evaluate three components:
304
- 1. Goal Clarity (40% weight): Is the goal statement specific and well-defined?
305
- - Clear: "Build a CLI tool for task management with project grouping"
306
- - Unclear: "Build something useful for productivity"
307
-
308
- 2. Constraint Clarity (30% weight): Are constraints and limitations specified?
309
- - Clear: "Must use Python 3.14+, no external database dependencies"
310
- - Unclear: No mention of technical constraints or limitations
311
-
312
- 3. Success Criteria Clarity (30% weight): Are success criteria measurable?
313
- - Clear: "Tasks can be created, edited, deleted; supports filtering by status"
314
- - Unclear: "The tool should be easy to use"
312
+ 1. Goal Clarity (40%): Is the goal specific and well-defined?
313
+ 2. Constraint Clarity (30%): Are constraints and limitations specified?
314
+ 3. Success Criteria Clarity (30%): Are success criteria measurable?
315
315
 
316
- For each component, provide:
317
- - A clarity score between 0.0 (completely unclear) and 1.0 (perfectly clear)
318
- - A brief justification (1-2 sentences max) explaining the score
316
+ Score each from 0.0 (unclear) to 1.0 (perfectly clear). Scores above 0.8 require very specific requirements.
319
317
 
320
- IMPORTANT: You MUST provide ALL six fields below. Keep justifications concise.
318
+ RESPOND ONLY WITH VALID JSON. No other text before or after.
321
319
 
322
- Respond in this exact format:
323
- GOAL_CLARITY_SCORE: <score>
324
- GOAL_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
325
- CONSTRAINT_CLARITY_SCORE: <score>
326
- CONSTRAINT_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
327
- SUCCESS_CRITERIA_CLARITY_SCORE: <score>
328
- SUCCESS_CRITERIA_CLARITY_JUSTIFICATION: <justification in 1-2 sentences>
329
-
330
- Be strict in your evaluation. Scores above 0.8 require very specific, measurable requirements."""
320
+ Required JSON format:
321
+ {"goal_clarity_score": 0.0, "goal_clarity_justification": "string", "constraint_clarity_score": 0.0, "constraint_clarity_justification": "string", "success_criteria_clarity_score": 0.0, "success_criteria_clarity_justification": "string"}"""
331
322
 
332
323
  def _build_scoring_user_prompt(self, context: str) -> str:
333
324
  """Build user prompt with interview context.
@@ -358,27 +349,23 @@ Analyze each component and provide scores with justifications."""
358
349
  Raises:
359
350
  ValueError: If response cannot be parsed.
360
351
  """
361
- lines = response.strip().split("\n")
362
- scores: dict[str, Any] = {}
363
-
364
- for line in lines:
365
- line = line.strip()
366
- if not line:
367
- continue
368
-
369
- for prefix in [
370
- "GOAL_CLARITY_SCORE:",
371
- "GOAL_CLARITY_JUSTIFICATION:",
372
- "CONSTRAINT_CLARITY_SCORE:",
373
- "CONSTRAINT_CLARITY_JUSTIFICATION:",
374
- "SUCCESS_CRITERIA_CLARITY_SCORE:",
375
- "SUCCESS_CRITERIA_CLARITY_JUSTIFICATION:",
376
- ]:
377
- if line.startswith(prefix):
378
- key = prefix[:-1].lower() # Remove colon and lowercase
379
- value = line[len(prefix) :].strip()
380
- scores[key] = value
381
- break
352
+ # Extract JSON from response (handle markdown code blocks)
353
+ text = response.strip()
354
+
355
+ # Try to find JSON in markdown code block
356
+ json_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, re.DOTALL)
357
+ if json_match:
358
+ text = json_match.group(1)
359
+ else:
360
+ # Try to find raw JSON object
361
+ json_match = re.search(r"\{.*\}", text, re.DOTALL)
362
+ if json_match:
363
+ text = json_match.group(0)
364
+
365
+ try:
366
+ data = json.loads(text)
367
+ except json.JSONDecodeError as e:
368
+ raise ValueError(f"Invalid JSON response: {e}") from e
382
369
 
383
370
  # Validate all required fields are present
384
371
  required_fields = [
@@ -391,35 +378,32 @@ Analyze each component and provide scores with justifications."""
391
378
  ]
392
379
 
393
380
  for field_name in required_fields:
394
- if field_name not in scores:
381
+ if field_name not in data:
395
382
  raise ValueError(f"Missing required field: {field_name}")
396
383
 
397
- # Parse scores to float
398
- def parse_score(value: str) -> float:
399
- try:
400
- score = float(value)
401
- return max(0.0, min(1.0, score)) # Clamp to [0, 1]
402
- except ValueError as e:
403
- raise ValueError(f"Invalid score value: {value}") from e
384
+ # Parse and clamp scores
385
+ def clamp_score(value: Any) -> float:
386
+ score = float(value)
387
+ return max(0.0, min(1.0, score))
404
388
 
405
389
  return ScoreBreakdown(
406
390
  goal_clarity=ComponentScore(
407
391
  name="Goal Clarity",
408
- clarity_score=parse_score(scores["goal_clarity_score"]),
392
+ clarity_score=clamp_score(data["goal_clarity_score"]),
409
393
  weight=GOAL_CLARITY_WEIGHT,
410
- justification=scores["goal_clarity_justification"],
394
+ justification=str(data["goal_clarity_justification"]),
411
395
  ),
412
396
  constraint_clarity=ComponentScore(
413
397
  name="Constraint Clarity",
414
- clarity_score=parse_score(scores["constraint_clarity_score"]),
398
+ clarity_score=clamp_score(data["constraint_clarity_score"]),
415
399
  weight=CONSTRAINT_CLARITY_WEIGHT,
416
- justification=scores["constraint_clarity_justification"],
400
+ justification=str(data["constraint_clarity_justification"]),
417
401
  ),
418
402
  success_criteria_clarity=ComponentScore(
419
403
  name="Success Criteria Clarity",
420
- clarity_score=parse_score(scores["success_criteria_clarity_score"]),
404
+ clarity_score=clamp_score(data["success_criteria_clarity_score"]),
421
405
  weight=SUCCESS_CRITERIA_CLARITY_WEIGHT,
422
- justification=scores["success_criteria_clarity_justification"],
406
+ justification=str(data["success_criteria_clarity_justification"]),
423
407
  ),
424
408
  )
425
409
 
@@ -46,13 +46,17 @@ def create_valid_scoring_response(
46
46
  success_score: float = 0.8,
47
47
  success_justification: str = "Success criteria are measurable.",
48
48
  ) -> str:
49
- """Create a valid LLM scoring response string."""
50
- return f"""GOAL_CLARITY_SCORE: {goal_score}
51
- GOAL_CLARITY_JUSTIFICATION: {goal_justification}
52
- CONSTRAINT_CLARITY_SCORE: {constraint_score}
53
- CONSTRAINT_CLARITY_JUSTIFICATION: {constraint_justification}
54
- SUCCESS_CRITERIA_CLARITY_SCORE: {success_score}
55
- SUCCESS_CRITERIA_CLARITY_JUSTIFICATION: {success_justification}"""
49
+ """Create a valid LLM scoring response string in JSON format."""
50
+ import json
51
+
52
+ return json.dumps({
53
+ "goal_clarity_score": goal_score,
54
+ "goal_clarity_justification": goal_justification,
55
+ "constraint_clarity_score": constraint_score,
56
+ "constraint_clarity_justification": constraint_justification,
57
+ "success_criteria_clarity_score": success_score,
58
+ "success_criteria_clarity_justification": success_justification,
59
+ })
56
60
 
57
61
 
58
62
  def create_interview_state_with_rounds(
@@ -302,7 +306,7 @@ class TestAmbiguityScorerInit:
302
306
  assert scorer.model == "openrouter/google/gemini-2.0-flash-001"
303
307
  assert scorer.temperature == SCORING_TEMPERATURE
304
308
  assert scorer.initial_max_tokens == 2048
305
- assert scorer.max_retries == 3
309
+ assert scorer.max_retries is None # Unlimited by default
306
310
 
307
311
  def test_scorer_custom_values(self) -> None:
308
312
  """AmbiguityScorer accepts custom values."""
@@ -665,41 +669,31 @@ class TestAmbiguityScorerParseResponse:
665
669
  mock_adapter = MagicMock()
666
670
  scorer = AmbiguityScorer(llm_adapter=mock_adapter)
667
671
 
668
- response = """GOAL_CLARITY_SCORE: 0.9
669
- GOAL_CLARITY_JUSTIFICATION: Good goal."""
672
+ # JSON with missing field
673
+ response = '{"goal_clarity_score": 0.9, "goal_clarity_justification": "Good goal."}'
670
674
 
671
675
  with pytest.raises(ValueError, match="Missing required field"):
672
676
  scorer._parse_scoring_response(response)
673
677
 
674
- def test_parse_response_invalid_score_format(self) -> None:
675
- """_parse_scoring_response raises error for invalid score format."""
678
+ def test_parse_response_invalid_json(self) -> None:
679
+ """_parse_scoring_response raises error for invalid JSON."""
676
680
  mock_adapter = MagicMock()
677
681
  scorer = AmbiguityScorer(llm_adapter=mock_adapter)
678
682
 
679
- response = """GOAL_CLARITY_SCORE: not_a_number
680
- GOAL_CLARITY_JUSTIFICATION: Test
681
- CONSTRAINT_CLARITY_SCORE: 0.8
682
- CONSTRAINT_CLARITY_JUSTIFICATION: Test
683
- SUCCESS_CRITERIA_CLARITY_SCORE: 0.7
684
- SUCCESS_CRITERIA_CLARITY_JUSTIFICATION: Test"""
683
+ response = "This is not valid JSON at all"
685
684
 
686
- with pytest.raises(ValueError, match="Invalid score value"):
685
+ with pytest.raises(ValueError, match="Invalid JSON response"):
687
686
  scorer._parse_scoring_response(response)
688
687
 
689
- def test_parse_response_with_extra_whitespace(self) -> None:
690
- """_parse_scoring_response handles extra whitespace."""
688
+ def test_parse_response_with_markdown_code_block(self) -> None:
689
+ """_parse_scoring_response handles JSON in markdown code block."""
691
690
  mock_adapter = MagicMock()
692
691
  scorer = AmbiguityScorer(llm_adapter=mock_adapter)
693
692
 
694
- response = """
695
- GOAL_CLARITY_SCORE: 0.85
696
- GOAL_CLARITY_JUSTIFICATION: Clear goal with details.
697
-
698
- CONSTRAINT_CLARITY_SCORE: 0.75
699
- CONSTRAINT_CLARITY_JUSTIFICATION: Good constraints.
700
-
701
- SUCCESS_CRITERIA_CLARITY_SCORE: 0.65
702
- SUCCESS_CRITERIA_CLARITY_JUSTIFICATION: Clear criteria.
693
+ response = """Here is the analysis:
694
+ ```json
695
+ {"goal_clarity_score": 0.85, "goal_clarity_justification": "Clear goal with details.", "constraint_clarity_score": 0.75, "constraint_clarity_justification": "Good constraints.", "success_criteria_clarity_score": 0.65, "success_criteria_clarity_justification": "Clear criteria."}
696
+ ```
703
697
  """
704
698
 
705
699
  breakdown = scorer._parse_scoring_response(response)
@@ -892,7 +892,7 @@ wheels = [
892
892
 
893
893
  [[package]]
894
894
  name = "ouroboros-ai"
895
- version = "0.2.1"
895
+ version = "0.2.2"
896
896
  source = { editable = "." }
897
897
  dependencies = [
898
898
  { name = "aiosqlite" },
File without changes
File without changes
File without changes
File without changes