specsmith 0.7.0.dev236__tar.gz → 0.10.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (197) hide show
  1. {specsmith-0.7.0.dev236/src/specsmith.egg-info → specsmith-0.10.0}/PKG-INFO +20 -1
  2. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/README.md +19 -0
  3. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/pyproject.toml +4 -1
  4. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/__init__.py +1 -1
  5. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/chat_runner.py +240 -22
  6. specsmith-0.10.0/src/specsmith/agent/core.py +98 -0
  7. specsmith-0.10.0/src/specsmith/agent/endpoints.py +493 -0
  8. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/events.py +58 -0
  9. specsmith-0.10.0/src/specsmith/agent/fallback.py +142 -0
  10. specsmith-0.10.0/src/specsmith/agent/profiles.py +655 -0
  11. specsmith-0.10.0/src/specsmith/agent/runner.py +434 -0
  12. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/cli.py +902 -109
  13. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/vcs_commands.py +1 -1
  14. {specsmith-0.7.0.dev236 → specsmith-0.10.0/src/specsmith.egg-info}/PKG-INFO +20 -1
  15. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith.egg-info/SOURCES.txt +11 -1
  16. specsmith-0.10.0/tests/test_agent_profiles.py +70 -0
  17. specsmith-0.10.0/tests/test_agent_runner_ready.py +75 -0
  18. specsmith-0.10.0/tests/test_chat_runner_openai_compat.py +198 -0
  19. specsmith-0.10.0/tests/test_endpoints_cli.py +244 -0
  20. specsmith-0.10.0/tests/test_endpoints_store.py +350 -0
  21. specsmith-0.10.0/tests/test_fallback_chain.py +343 -0
  22. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_phase34_completion.py +4 -36
  23. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_warp_parity.py +4 -115
  24. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_warp_parity_followup.py +0 -95
  25. specsmith-0.7.0.dev236/src/specsmith/cloud_serve.py +0 -150
  26. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/LICENSE +0 -0
  27. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/setup.cfg +0 -0
  28. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/__init__.py +0 -0
  29. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/belief.py +0 -0
  30. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/certainty.py +0 -0
  31. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/failure_graph.py +0 -0
  32. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/py.typed +0 -0
  33. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/recovery.py +0 -0
  34. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/session.py +0 -0
  35. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/stress_tester.py +0 -0
  36. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/epistemic/trace.py +0 -0
  37. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/__main__.py +0 -0
  38. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/__init__.py +0 -0
  39. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/broker.py +0 -0
  40. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/cleanup.py +0 -0
  41. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/indexer.py +0 -0
  42. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/mcp.py +0 -0
  43. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/memory.py +0 -0
  44. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/orchestrator.py +0 -0
  45. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/repl.py +0 -0
  46. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/router.py +0 -0
  47. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/rules.py +0 -0
  48. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/safety.py +0 -0
  49. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/suggester.py +0 -0
  50. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/tools.py +0 -0
  51. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/verifier.py +0 -0
  52. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/agent/voice.py +0 -0
  53. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/architect.py +0 -0
  54. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/auditor.py +0 -0
  55. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/auth.py +0 -0
  56. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/block_export.py +0 -0
  57. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/commands/__init__.py +0 -0
  58. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/compressor.py +0 -0
  59. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/config.py +0 -0
  60. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/console_utils.py +0 -0
  61. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/credit_analyzer.py +0 -0
  62. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/credits.py +0 -0
  63. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/differ.py +0 -0
  64. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/doctor.py +0 -0
  65. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/drive.py +0 -0
  66. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/epistemic/__init__.py +0 -0
  67. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/epistemic/belief.py +0 -0
  68. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/epistemic/certainty.py +0 -0
  69. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/epistemic/failure_graph.py +0 -0
  70. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/epistemic/recovery.py +0 -0
  71. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/epistemic/stress_tester.py +0 -0
  72. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/executor.py +0 -0
  73. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/exporter.py +0 -0
  74. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/__init__.py +0 -0
  75. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/app.py +0 -0
  76. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/main_window.py +0 -0
  77. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/session_tab.py +0 -0
  78. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/theme.py +0 -0
  79. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/widgets/__init__.py +0 -0
  80. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/widgets/chat_view.py +0 -0
  81. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/widgets/input_bar.py +0 -0
  82. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/widgets/provider_bar.py +0 -0
  83. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/widgets/token_meter.py +0 -0
  84. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/widgets/tool_panel.py +0 -0
  85. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/widgets/update_checker.py +0 -0
  86. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/gui/worker.py +0 -0
  87. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/history_search.py +0 -0
  88. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/importer.py +0 -0
  89. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/__init__.py +0 -0
  90. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/agent_skill.py +0 -0
  91. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/aider.py +0 -0
  92. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/base.py +0 -0
  93. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/claude_code.py +0 -0
  94. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/copilot.py +0 -0
  95. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/cursor.py +0 -0
  96. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/gemini.py +0 -0
  97. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/integrations/windsurf.py +0 -0
  98. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/languages.py +0 -0
  99. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/ledger.py +0 -0
  100. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/patent.py +0 -0
  101. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/phase.py +0 -0
  102. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/plugins.py +0 -0
  103. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/profiles.py +0 -0
  104. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/rate_limits.py +0 -0
  105. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/releaser.py +0 -0
  106. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/requirements.py +0 -0
  107. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/requirements_parser.py +0 -0
  108. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/retrieval.py +0 -0
  109. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/scaffolder.py +0 -0
  110. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/serve.py +0 -0
  111. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/session.py +0 -0
  112. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/skills.py +0 -0
  113. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/agents.md.j2 +0 -0
  114. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/bug_report.md.j2 +0 -0
  115. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/code_of_conduct.md.j2 +0 -0
  116. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/contributing.md.j2 +0 -0
  117. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/feature_request.md.j2 +0 -0
  118. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/license-Apache-2.0.j2 +0 -0
  119. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/license-MIT.j2 +0 -0
  120. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/pull_request_template.md.j2 +0 -0
  121. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/community/security.md.j2 +0 -0
  122. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/docs/architecture.md.j2 +0 -0
  123. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/docs/mkdocs.yml.j2 +0 -0
  124. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/docs/readthedocs.yaml.j2 +0 -0
  125. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/docs/requirements.md.j2 +0 -0
  126. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/docs/test-spec.md.j2 +0 -0
  127. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/editorconfig.j2 +0 -0
  128. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/gitattributes.j2 +0 -0
  129. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/gitignore.j2 +0 -0
  130. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/go/go.mod.j2 +0 -0
  131. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/go/main.go.j2 +0 -0
  132. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/belief-registry.md.j2 +0 -0
  133. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/context-budget.md.j2 +0 -0
  134. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/drift-metrics.md.j2 +0 -0
  135. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/epistemic-axioms.md.j2 +0 -0
  136. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/failure-modes.md.j2 +0 -0
  137. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/lifecycle.md.j2 +0 -0
  138. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/roles.md.j2 +0 -0
  139. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/rules.md.j2 +0 -0
  140. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/session-protocol.md.j2 +0 -0
  141. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/uncertainty-map.md.j2 +0 -0
  142. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/governance/verification.md.j2 +0 -0
  143. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/js/package.json.j2 +0 -0
  144. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/ledger.md.j2 +0 -0
  145. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/python/cli.py.j2 +0 -0
  146. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/python/init.py.j2 +0 -0
  147. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/python/pyproject.toml.j2 +0 -0
  148. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/readme.md.j2 +0 -0
  149. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/rust/Cargo.toml.j2 +0 -0
  150. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/rust/main.rs.j2 +0 -0
  151. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/scripts/exec.cmd.j2 +0 -0
  152. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/scripts/exec.sh.j2 +0 -0
  153. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/scripts/run.cmd.j2 +0 -0
  154. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/scripts/run.sh.j2 +0 -0
  155. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/scripts/setup.cmd.j2 +0 -0
  156. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/scripts/setup.sh.j2 +0 -0
  157. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/templates/workflows/release.yml.j2 +0 -0
  158. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/tool_installer.py +0 -0
  159. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/toolrules.py +0 -0
  160. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/tools.py +0 -0
  161. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/trace.py +0 -0
  162. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/updater.py +0 -0
  163. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/upgrader.py +0 -0
  164. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/validator.py +0 -0
  165. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/vcs/__init__.py +0 -0
  166. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/vcs/base.py +0 -0
  167. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/vcs/bitbucket.py +0 -0
  168. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/vcs/github.py +0 -0
  169. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/vcs/gitlab.py +0 -0
  170. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/wireframes.py +0 -0
  171. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith/workspace.py +0 -0
  172. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith.egg-info/dependency_links.txt +0 -0
  173. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith.egg-info/entry_points.txt +0 -0
  174. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith.egg-info/requires.txt +0 -0
  175. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/src/specsmith.egg-info/top_level.txt +0 -0
  176. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_CMD_001.py +0 -0
  177. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_auditor.py +0 -0
  178. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_chat_diff_decision.py +0 -0
  179. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_chat_stdin_protocol.py +0 -0
  180. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_cli.py +0 -0
  181. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_cli_workflows_history_drive.py +0 -0
  182. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_compressor.py +0 -0
  183. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_e2e_nexus.py +0 -0
  184. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_epistemic.py +0 -0
  185. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_importer.py +0 -0
  186. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_integrations.py +0 -0
  187. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_mcp_client.py +0 -0
  188. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_nexus.py +0 -0
  189. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_phase1_4_new.py +0 -0
  190. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_rate_limits.py +0 -0
  191. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_scaffolder.py +0 -0
  192. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_skill_marketplace.py +0 -0
  193. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_smoke.py +0 -0
  194. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_suggester.py +0 -0
  195. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_tools.py +0 -0
  196. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_validator.py +0 -0
  197. {specsmith-0.7.0.dev236 → specsmith-0.10.0}/tests/test_vcs.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: specsmith
3
- Version: 0.7.0.dev236
3
+ Version: 0.10.0
4
4
  Summary: Applied Epistemic Engineering toolkit — AEE agent sessions, execution profiles, FPGA/HDL governance, tool installer, 50+ CLI commands.
5
5
  Author: BitConcepts
6
6
  License-Expression: MIT
@@ -88,6 +88,25 @@ specsmith treats belief systems like code: codable, testable, and deployable. It
88
88
  epistemically-governed projects, stress-tests requirements as BeliefArtifacts, runs
89
89
  cryptographically-sealed trace vaults, and orchestrates AI agents under formal AEE governance.
90
90
 
91
+ **0.10.0 — Multi-Agent + BYOE.** A `/plan` goes to the architect, `/fix`
92
+ goes to the coder, `/review` goes to a reviewer that runs on a different
93
+ model family. Each *profile* is a `(provider, model, endpoint?, fallback_chain)`
94
+ bundle stored in `~/.specsmith/agents.json`; an *activity routing table*
95
+ maps slash commands and AEE phases to profiles; **BYOE endpoints**
96
+ (`~/.specsmith/endpoints.json`) let you point a profile at any
97
+ OpenAI-v1-compatible backend you self-host (vLLM, llama.cpp `server`,
98
+ LM Studio, TGI, ...). Cross-family **diversity guard**, capability
99
+ filtering, transient-failure fallback chains, and TraceVault decision
100
+ seals on every `/agent` pin are wired in by default. See
101
+ [`docs/site/agents.md`](docs/site/agents.md) for the five-minute walkthrough.
102
+
103
+ ```bash
104
+ specsmith agents preset apply default # frontier coder + cross-family reviewer
105
+ specsmith endpoints add --id home-vllm \
106
+ --base-url http://10.0.0.4:8000/v1 --auth bearer-keyring
107
+ specsmith run --agent opus-reviewer # one-shot per-session pin
108
+ ```
109
+
91
110
  It also co-installs the standalone `epistemic` Python library for direct use in any project:
92
111
 
93
112
  ```python
@@ -16,6 +16,25 @@ specsmith treats belief systems like code: codable, testable, and deployable. It
16
16
  epistemically-governed projects, stress-tests requirements as BeliefArtifacts, runs
17
17
  cryptographically-sealed trace vaults, and orchestrates AI agents under formal AEE governance.
18
18
 
19
+ **0.10.0 — Multi-Agent + BYOE.** A `/plan` goes to the architect, `/fix`
20
+ goes to the coder, `/review` goes to a reviewer that runs on a different
21
+ model family. Each *profile* is a `(provider, model, endpoint?, fallback_chain)`
22
+ bundle stored in `~/.specsmith/agents.json`; an *activity routing table*
23
+ maps slash commands and AEE phases to profiles; **BYOE endpoints**
24
+ (`~/.specsmith/endpoints.json`) let you point a profile at any
25
+ OpenAI-v1-compatible backend you self-host (vLLM, llama.cpp `server`,
26
+ LM Studio, TGI, ...). Cross-family **diversity guard**, capability
27
+ filtering, transient-failure fallback chains, and TraceVault decision
28
+ seals on every `/agent` pin are wired in by default. See
29
+ [`docs/site/agents.md`](docs/site/agents.md) for the five-minute walkthrough.
30
+
31
+ ```bash
32
+ specsmith agents preset apply default # frontier coder + cross-family reviewer
33
+ specsmith endpoints add --id home-vllm \
34
+ --base-url http://10.0.0.4:8000/v1 --auth bearer-keyring
35
+ specsmith run --agent opus-reviewer # one-shot per-session pin
36
+ ```
37
+
19
38
  It also co-installs the standalone `epistemic` Python library for direct use in any project:
20
39
 
21
40
  ```python
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "specsmith"
7
- version = "0.7.0.dev236"
7
+ version = "0.10.0"
8
8
  description = "Applied Epistemic Engineering toolkit — AEE agent sessions, execution profiles, FPGA/HDL governance, tool installer, 50+ CLI commands."
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -171,6 +171,9 @@ module = [
171
171
  "specsmith.importer",
172
172
  "specsmith.agent.providers.gemini",
173
173
  "specsmith.agent.runner",
174
+ "specsmith.agent.profiles",
175
+ "specsmith.agent.fallback",
176
+ "specsmith.agent.core",
174
177
  "specsmith.agent.cleanup",
175
178
  "specsmith.agent.orchestrator",
176
179
  "specsmith.agent.repl",
@@ -8,4 +8,4 @@ from importlib.metadata import version as _pkg_version
8
8
  try:
9
9
  __version__: str = _pkg_version("specsmith")
10
10
  except PackageNotFoundError: # running from source without install
11
- __version__ = "0.3.6" # fallback: keep in sync with pyproject.toml
11
+ __version__ = "0.10.0" # fallback: keep in sync with pyproject.toml
@@ -53,6 +53,14 @@ class ChatRunResult:
53
53
  files_changed: list[str] = field(default_factory=list)
54
54
  verdict: VerifierVerdict | None = None
55
55
  raw_text: str = ""
56
+ # C1: per-turn token + cost accounting. Populated by the provider
57
+ # driver when it can read counters from the response (Ollama and
58
+ # Anthropic both expose them). Falls back to a deterministic char-
59
+ # based heuristic so the TokenMeter chip is never zero on Ollama or
60
+ # OpenAI-compat endpoints that don't surface usage in streaming mode.
61
+ tokens_in: int = 0
62
+ tokens_out: int = 0
63
+ cost_usd: float = 0.0
56
64
 
57
65
  def to_dict(self) -> dict[str, Any]:
58
66
  return {
@@ -61,6 +69,9 @@ class ChatRunResult:
61
69
  "files_changed": list(self.files_changed),
62
70
  "confidence": self.verdict.confidence if self.verdict else 0.0,
63
71
  "equilibrium": self.verdict.equilibrium if self.verdict else False,
72
+ "tokens_in": int(self.tokens_in),
73
+ "tokens_out": int(self.tokens_out),
74
+ "cost_usd": float(self.cost_usd),
64
75
  }
65
76
 
66
77
 
@@ -80,43 +91,122 @@ def run_chat(
80
91
  history: list[dict[str, Any]] | None = None,
81
92
  confidence_target: float = 0.7,
82
93
  rules_prefix: str = "",
94
+ endpoint_id: str | None = None,
83
95
  ) -> ChatRunResult | None:
84
- """Drive a real LLM turn. Return ``None`` if no provider is reachable."""
96
+ """Drive a real LLM turn. Return ``None`` if no provider is reachable.
97
+
98
+ When ``endpoint_id`` is set, the BYOE store (REQ-142) is consulted and
99
+ the resolved :class:`Endpoint` short-circuits the provider chain via
100
+ the new :func:`_run_openai_compat` driver. Any error during endpoint
101
+ resolution falls back to the legacy auto-detect chain so an offline
102
+ misconfigured endpoint never breaks `specsmith chat`.
103
+ """
85
104
  history = history or []
86
105
  messages = _build_messages(utterance, history, rules_prefix)
87
106
 
107
+ # REQ-142: explicit endpoint override.
108
+ if endpoint_id:
109
+ try:
110
+ from specsmith.agent.endpoints import EndpointStore
111
+
112
+ endpoint = EndpointStore.load().resolve(endpoint_id)
113
+ except Exception: # noqa: BLE001 - any failure → fall back to auto-detect
114
+ endpoint = None
115
+ if endpoint is not None:
116
+ try:
117
+ full_text, usage = _run_openai_compat(
118
+ messages, emitter, msg_block, endpoint=endpoint
119
+ )
120
+ except Exception: # noqa: BLE001 - degrade to auto-detect
121
+ full_text, usage = None, _UsageDelta()
122
+ if full_text is not None:
123
+ return _finalize(
124
+ full_text,
125
+ "openai_compat",
126
+ project_dir,
127
+ confidence_target,
128
+ messages=messages,
129
+ usage=usage,
130
+ )
131
+
88
132
  # Order matters: Ollama first because it's local-first and free.
89
133
  for provider in (_run_ollama, _run_anthropic, _run_openai, _run_gemini):
90
134
  try:
91
- full_text = provider(messages, emitter, msg_block)
135
+ full_text, usage = provider(messages, emitter, msg_block)
92
136
  except Exception: # noqa: BLE001 - any failure → next provider
93
137
  continue
94
138
  if full_text is None:
95
139
  continue
96
- return _finalize(full_text, provider.__name__, project_dir, confidence_target)
140
+ return _finalize(
141
+ full_text,
142
+ provider.__name__,
143
+ project_dir,
144
+ confidence_target,
145
+ messages=messages,
146
+ usage=usage,
147
+ )
97
148
  return None
98
149
 
99
150
 
151
+ @dataclass
152
+ class _UsageDelta:
153
+ """Per-turn token + cost counters reported by a provider driver.
154
+
155
+ All fields default to ``0`` so callers can construct a zero-value
156
+ instance without caring whether the provider supports usage tracking.
157
+ """
158
+
159
+ tokens_in: int = 0
160
+ tokens_out: int = 0
161
+ cost_usd: float = 0.0
162
+
163
+
100
164
  def _finalize(
101
165
  full_text: str,
102
166
  provider_fn_name: str,
103
167
  project_dir: Path,
104
168
  confidence_target: float,
169
+ *,
170
+ messages: list[dict[str, str]] | None = None,
171
+ usage: _UsageDelta | None = None,
105
172
  ) -> ChatRunResult:
106
173
  sections = _parse_output_contract(full_text)
107
174
  files_changed = _split_files_list(sections.get("files_changed", ""))
108
175
  report = report_from_chat_sections(sections, files_changed=files_changed)
109
176
  verdict = score(report, confidence_target=confidence_target)
110
177
  summary = (sections.get("plan") or full_text.strip()[:200]).strip() or verdict.summary
178
+
179
+ # C1: when the provider didn't report exact counts, estimate from text.
180
+ # The four-chars-per-token rule of thumb is OpenAI's published guidance
181
+ # and matches Ollama / Anthropic / Gemini within ~10% across the model
182
+ # families we ship today — close enough for the TokenMeter chip and
183
+ # the ``credits record`` ledger event.
184
+ if usage is None:
185
+ usage = _UsageDelta()
186
+ if usage.tokens_in == 0 and messages is not None:
187
+ usage.tokens_in = _estimate_tokens("\n".join(m.get("content", "") for m in messages))
188
+ if usage.tokens_out == 0:
189
+ usage.tokens_out = _estimate_tokens(full_text)
190
+
111
191
  return ChatRunResult(
112
192
  provider=provider_fn_name.removeprefix("_run_"),
113
193
  summary=summary,
114
194
  files_changed=files_changed,
115
195
  verdict=verdict,
116
196
  raw_text=full_text,
197
+ tokens_in=int(usage.tokens_in),
198
+ tokens_out=int(usage.tokens_out),
199
+ cost_usd=float(usage.cost_usd),
117
200
  )
118
201
 
119
202
 
203
+ def _estimate_tokens(text: str) -> int:
204
+ """Rough char→token heuristic (4 chars/token, floor at 1 if non-empty)."""
205
+ if not text:
206
+ return 0
207
+ return max(1, len(text) // 4)
208
+
209
+
120
210
  # ---------------------------------------------------------------------------
121
211
  # Provider drivers — each returns the full assembled text or None
122
212
  # ---------------------------------------------------------------------------
@@ -126,13 +216,14 @@ def _run_ollama(
126
216
  messages: list[dict[str, str]],
127
217
  emitter: EventEmitter,
128
218
  block_id: str,
129
- ) -> str | None:
219
+ ) -> tuple[str | None, _UsageDelta]:
130
220
  """Stream from a local Ollama daemon using only stdlib."""
131
221
  host = os.environ.get("OLLAMA_HOST", DEFAULT_OLLAMA_HOST).rstrip("/")
132
222
  model = os.environ.get("SPECSMITH_OLLAMA_MODEL", DEFAULT_OLLAMA_MODEL)
223
+ usage = _UsageDelta()
133
224
 
134
225
  if not _ollama_alive(host):
135
- return None
226
+ return None, usage
136
227
 
137
228
  payload = json.dumps({"model": model, "messages": messages, "stream": True}).encode("utf-8")
138
229
  req = Request( # noqa: S310 - URL is a hardcoded localhost default
@@ -157,8 +248,13 @@ def _run_ollama(
157
248
  emitter.token(block_id, chunk)
158
249
  pieces.append(chunk)
159
250
  if obj.get("done"):
251
+ # C1: Ollama exposes prompt_eval_count + eval_count on the
252
+ # final ``done`` message. Cost is zero for local models.
253
+ usage.tokens_in = int(obj.get("prompt_eval_count") or 0)
254
+ usage.tokens_out = int(obj.get("eval_count") or 0)
255
+ usage.cost_usd = 0.0
160
256
  break
161
- return "".join(pieces) if pieces else None
257
+ return ("".join(pieces) if pieces else None), usage
162
258
 
163
259
 
164
260
  def _ollama_alive(host: str) -> bool:
@@ -173,14 +269,15 @@ def _run_anthropic(
173
269
  messages: list[dict[str, str]],
174
270
  emitter: EventEmitter,
175
271
  block_id: str,
176
- ) -> str | None:
272
+ ) -> tuple[str | None, _UsageDelta]:
177
273
  """Use the anthropic SDK if installed and a key is configured."""
274
+ usage = _UsageDelta()
178
275
  if not os.environ.get("ANTHROPIC_API_KEY"):
179
- return None
276
+ return None, usage
180
277
  try:
181
278
  import anthropic
182
279
  except ImportError:
183
- return None
280
+ return None, usage
184
281
 
185
282
  system = "\n".join(m["content"] for m in messages if m["role"] == "system")
186
283
  user_msgs = [m for m in messages if m["role"] != "system"]
@@ -197,62 +294,183 @@ def _run_anthropic(
197
294
  if text:
198
295
  emitter.token(block_id, text)
199
296
  pieces.append(text)
200
- return "".join(pieces) if pieces else None
297
+ # C1: pull final usage off the SDK's `final_message`. Cost is the
298
+ # caller's problem (rate-limit module knows the model price); we
299
+ # report tokens here and let the credits ledger compute USD.
300
+ try:
301
+ final = stream.get_final_message()
302
+ usage.tokens_in = int(getattr(final.usage, "input_tokens", 0) or 0)
303
+ usage.tokens_out = int(getattr(final.usage, "output_tokens", 0) or 0)
304
+ except Exception: # noqa: BLE001 - usage is best-effort
305
+ pass
306
+ return ("".join(pieces) if pieces else None), usage
201
307
 
202
308
 
203
309
  def _run_openai(
204
310
  messages: list[dict[str, str]],
205
311
  emitter: EventEmitter,
206
312
  block_id: str,
207
- ) -> str | None:
313
+ ) -> tuple[str | None, _UsageDelta]:
208
314
  """Use the openai SDK if installed and a key is configured."""
315
+ usage = _UsageDelta()
209
316
  if not os.environ.get("OPENAI_API_KEY"):
210
- return None
317
+ return None, usage
211
318
  try:
212
319
  from openai import OpenAI
213
320
  except ImportError:
214
- return None
321
+ return None, usage
215
322
 
216
323
  client = OpenAI()
324
+ # ``stream_options.include_usage`` makes the final SSE chunk carry a
325
+ # populated ``usage`` block (otherwise streaming responses emit it as
326
+ # ``None``). Older SDK versions silently ignore unknown kwargs.
217
327
  stream = client.chat.completions.create(
218
328
  model=os.environ.get("OPENAI_MODEL", "gpt-4o-mini"),
219
329
  messages=messages,
220
330
  stream=True,
331
+ stream_options={"include_usage": True},
221
332
  )
222
333
  pieces: list[str] = []
223
334
  for chunk in stream:
224
- text = (chunk.choices[0].delta.content or "") if chunk.choices else ""
225
- if text:
226
- emitter.token(block_id, text)
227
- pieces.append(text)
228
- return "".join(pieces) if pieces else None
335
+ if chunk.choices:
336
+ text = chunk.choices[0].delta.content or ""
337
+ if text:
338
+ emitter.token(block_id, text)
339
+ pieces.append(text)
340
+ usage_obj = getattr(chunk, "usage", None)
341
+ if usage_obj is not None:
342
+ usage.tokens_in = int(getattr(usage_obj, "prompt_tokens", 0) or 0)
343
+ usage.tokens_out = int(getattr(usage_obj, "completion_tokens", 0) or 0)
344
+ return ("".join(pieces) if pieces else None), usage
345
+
346
+
347
+ def _run_openai_compat(
348
+ messages: list[dict[str, str]],
349
+ emitter: EventEmitter,
350
+ block_id: str,
351
+ *,
352
+ endpoint: Any,
353
+ ) -> tuple[str | None, _UsageDelta]:
354
+ """Stream from a user-registered OpenAI-v1-compatible endpoint (REQ-142).
355
+
356
+ Uses raw stdlib HTTP so the openai SDK is not a hard dependency for
357
+ BYOE. Sends a streaming ``/chat/completions`` request, decodes the
358
+ Server-Sent-Events ``data:`` lines, and forwards each ``content``
359
+ delta as a ``token`` event on ``block_id``.
360
+ """
361
+ usage = _UsageDelta()
362
+ base_url = endpoint.base_url.rstrip("/")
363
+ url = f"{base_url}/chat/completions"
364
+ model = endpoint.default_model or os.environ.get("SPECSMITH_OPENAI_COMPAT_MODEL", "")
365
+ if not model:
366
+ # The endpoint did not pin a default model and the env override is
367
+ # absent. We cannot fabricate one; fall back to the auto-detect chain.
368
+ return None, usage
369
+
370
+ headers: dict[str, str] = {
371
+ "Content-Type": "application/json",
372
+ "Accept": "text/event-stream",
373
+ }
374
+ try:
375
+ token = endpoint.resolve_token()
376
+ except Exception: # noqa: BLE001 - fall back to auto-detect chain
377
+ return None, usage
378
+ if token:
379
+ headers["Authorization"] = f"Bearer {token}"
380
+
381
+ body = json.dumps(
382
+ {
383
+ "model": model,
384
+ "messages": messages,
385
+ "stream": True,
386
+ # Many vLLM/llama.cpp builds honour OpenAI's stream_options;
387
+ # the request is harmless if they don't.
388
+ "stream_options": {"include_usage": True},
389
+ }
390
+ ).encode("utf-8")
391
+ req = Request(url, data=body, headers=headers, method="POST") # noqa: S310 - user-supplied
392
+
393
+ ctx = None
394
+ if not endpoint.verify_tls and url.startswith("https://"):
395
+ import ssl
396
+
397
+ ctx = ssl.create_default_context()
398
+ ctx.check_hostname = False
399
+ ctx.verify_mode = ssl.CERT_NONE
400
+
401
+ pieces: list[str] = []
402
+ try:
403
+ with urlopen(req, timeout=120, context=ctx) as resp: # noqa: S310 - user-supplied
404
+ for raw_line in resp:
405
+ line = raw_line.decode("utf-8", errors="replace").rstrip("\n\r")
406
+ if not line.startswith("data:"):
407
+ continue
408
+ payload = line[len("data:") :].strip()
409
+ if not payload or payload == "[DONE]":
410
+ if payload == "[DONE]":
411
+ break
412
+ continue
413
+ try:
414
+ obj = json.loads(payload)
415
+ except ValueError:
416
+ continue
417
+ choices = obj.get("choices") or []
418
+ usage_obj = obj.get("usage")
419
+ if usage_obj:
420
+ usage.tokens_in = int(usage_obj.get("prompt_tokens") or 0)
421
+ usage.tokens_out = int(usage_obj.get("completion_tokens") or 0)
422
+ if not choices:
423
+ continue
424
+ delta = (choices[0] or {}).get("delta") or {}
425
+ chunk = str(delta.get("content") or "")
426
+ if chunk:
427
+ emitter.token(block_id, chunk)
428
+ pieces.append(chunk)
429
+ except (URLError, TimeoutError, OSError):
430
+ return None, usage
431
+ return ("".join(pieces) if pieces else None), usage
229
432
 
230
433
 
231
434
  def _run_gemini(
232
435
  messages: list[dict[str, str]],
233
436
  emitter: EventEmitter,
234
437
  block_id: str,
235
- ) -> str | None:
438
+ ) -> tuple[str | None, _UsageDelta]:
236
439
  """Use google-genai SDK if installed and a key is configured."""
440
+ usage = _UsageDelta()
237
441
  if not os.environ.get("GOOGLE_API_KEY"):
238
- return None
442
+ return None, usage
239
443
  try:
240
444
  from google import genai
241
445
  except ImportError:
242
- return None
446
+ return None, usage
243
447
 
244
448
  client = genai.Client()
245
449
  prompt = "\n".join(f"{m['role']}: {m['content']}" for m in messages)
246
450
  pieces: list[str] = []
451
+ last_chunk: Any = None
247
452
  for chunk in client.models.generate_content_stream(
248
453
  model=os.environ.get("GEMINI_MODEL", "gemini-2.5-flash"),
249
454
  contents=prompt,
250
455
  ):
456
+ last_chunk = chunk
251
457
  text = getattr(chunk, "text", "") or ""
252
458
  if text:
253
459
  emitter.token(block_id, text)
254
460
  pieces.append(text)
255
- return "".join(pieces) if pieces else None
461
+ # Gemini exposes ``usage_metadata`` on the final chunk. Field names
462
+ # vary across SDK versions; we accept the union.
463
+ meta = getattr(last_chunk, "usage_metadata", None) if last_chunk else None
464
+ if meta is not None:
465
+ usage.tokens_in = int(
466
+ getattr(meta, "prompt_token_count", 0) or getattr(meta, "input_token_count", 0) or 0
467
+ )
468
+ usage.tokens_out = int(
469
+ getattr(meta, "candidates_token_count", 0)
470
+ or getattr(meta, "output_token_count", 0)
471
+ or 0
472
+ )
473
+ return ("".join(pieces) if pieces else None), usage
256
474
 
257
475
 
258
476
  # ---------------------------------------------------------------------------
@@ -0,0 +1,98 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2026 BitConcepts, LLC. All rights reserved.
3
+ """Shared agent runtime primitives (REQ-145).
4
+
5
+ Hosts low-level enums and dataclasses that span :mod:`specsmith.agent.runner`,
6
+ :mod:`specsmith.serve`, :mod:`specsmith.agent.profiles`, and
7
+ :mod:`specsmith.agent.fallback` without forcing them to import each other.
8
+
9
+ The historical ``cli.py`` referenced ``ModelTier`` from this module before
10
+ it existed in the source tree (the file was lost in an earlier refactor),
11
+ which produced an ``ImportError`` the moment ``specsmith run`` was
12
+ invoked. Restoring the symbol here is the prerequisite for the bridge
13
+ ``ready`` event handshake to land before the VS Code extension's 20 s
14
+ startup timeout fires.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import enum
20
+ from dataclasses import dataclass, field
21
+ from typing import Any
22
+
23
+
24
+ class ModelTier(str, enum.Enum):
25
+ """Capability tier for an LLM call.
26
+
27
+ Ordered cheapest → most capable so that a fallback chain can iterate
28
+ in declaration order without external metadata.
29
+ """
30
+
31
+ FAST = "fast"
32
+ BALANCED = "balanced"
33
+ POWERFUL = "powerful"
34
+
35
+ @classmethod
36
+ def parse(
37
+ cls,
38
+ value: str | ModelTier | None,
39
+ default: ModelTier | None = None,
40
+ ) -> ModelTier:
41
+ """Tolerant parser used by CLI option handlers."""
42
+ if value is None or value == "":
43
+ return default or cls.BALANCED
44
+ if isinstance(value, cls):
45
+ return value
46
+ try:
47
+ return cls(str(value).strip().lower())
48
+ except ValueError:
49
+ return default or cls.BALANCED
50
+
51
+
52
+ @dataclass
53
+ class AgentState:
54
+ """Mutable per-session metrics surfaced via ``specsmith serve``'s
55
+ ``GET /api/status`` endpoint and the VS Code TokenMeter chip.
56
+
57
+ Field names mirror what :class:`specsmith.serve._AgentThread` reads off
58
+ ``runner._state``; do not rename without updating that consumer.
59
+ """
60
+
61
+ provider_name: str = ""
62
+ model_name: str = ""
63
+ profile_id: str = ""
64
+ session_tokens: int = 0
65
+ tokens_in: int = 0
66
+ tokens_out: int = 0
67
+ total_cost_usd: float = 0.0
68
+ tool_calls_made: int = 0
69
+ elapsed_minutes: float = 0.0
70
+ by_profile: dict[str, dict[str, Any]] = field(default_factory=dict)
71
+
72
+ def credit(
73
+ self,
74
+ *,
75
+ profile_id: str,
76
+ tokens_in: int = 0,
77
+ tokens_out: int = 0,
78
+ cost_usd: float = 0.0,
79
+ tool_calls: int = 0,
80
+ ) -> None:
81
+ """Aggregate one turn's metrics into the running totals."""
82
+ self.tokens_in += int(tokens_in)
83
+ self.tokens_out += int(tokens_out)
84
+ self.session_tokens = self.tokens_in + self.tokens_out
85
+ self.total_cost_usd += float(cost_usd)
86
+ self.tool_calls_made += int(tool_calls)
87
+ bucket = self.by_profile.setdefault(
88
+ profile_id or "(default)",
89
+ {"tokens_in": 0, "tokens_out": 0, "cost_usd": 0.0, "tool_calls": 0, "turns": 0},
90
+ )
91
+ bucket["tokens_in"] += int(tokens_in)
92
+ bucket["tokens_out"] += int(tokens_out)
93
+ bucket["cost_usd"] = round(bucket["cost_usd"] + float(cost_usd), 6)
94
+ bucket["tool_calls"] += int(tool_calls)
95
+ bucket["turns"] += 1
96
+
97
+
98
+ __all__ = ["AgentState", "ModelTier"]