versionhq 1.2.4.2__tar.gz → 1.2.4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (166) hide show
  1. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/PKG-INFO +1 -1
  2. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/pyproject.toml +1 -1
  3. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/__init__.py +1 -1
  4. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_utils/__init__.py +1 -0
  5. versionhq-1.2.4.3/src/versionhq/_utils/usage_metrics.py +72 -0
  6. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent/model.py +6 -3
  7. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent_network/formation.py +9 -24
  8. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent_network/model.py +0 -1
  9. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/llm/model.py +3 -6
  10. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/storage/task_output_storage.py +2 -2
  11. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task/model.py +41 -37
  12. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task_graph/draft.py +1 -1
  13. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task_graph/model.py +38 -34
  14. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq.egg-info/PKG-INFO +1 -1
  15. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/_prompt/auto_feedback_test.py +1 -1
  16. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/agent/agent_test.py +1 -0
  17. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/agent_network/agent_network_test.py +3 -2
  18. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task/doc_taskoutput_test.py +0 -1
  19. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task/doc_test.py +9 -5
  20. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task/eval_test.py +1 -2
  21. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task_graph/doc_test.py +5 -0
  22. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/uv.lock +107 -83
  23. versionhq-1.2.4.2/src/versionhq/_utils/usage_metrics.py +0 -55
  24. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.env.sample +0 -0
  25. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.github/workflows/deploy_docs.yml +0 -0
  26. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.github/workflows/publish.yml +0 -0
  27. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.github/workflows/publish_testpypi.yml +0 -0
  28. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.github/workflows/run_tests.yml +0 -0
  29. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.github/workflows/security_check.yml +0 -0
  30. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.gitignore +0 -0
  31. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.pre-commit-config.yaml +0 -0
  32. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/.python-version +0 -0
  33. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/LICENSE +0 -0
  34. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/README.md +0 -0
  35. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/SECURITY.md +0 -0
  36. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/db/preprocess.py +0 -0
  37. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/CNAME +0 -0
  38. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/_logos/favicon.ico +0 -0
  39. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/_logos/logo192.png +0 -0
  40. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/agent/config.md +0 -0
  41. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/agent/index.md +0 -0
  42. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/agent/task-handling.md +0 -0
  43. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/agent-network/config.md +0 -0
  44. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/agent-network/form.md +0 -0
  45. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/agent-network/index.md +0 -0
  46. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/agent-network/ref.md +0 -0
  47. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/knowledge.md +0 -0
  48. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/llm/index.md +0 -0
  49. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/memory.md +0 -0
  50. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/rag-tool.md +0 -0
  51. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task/evaluation.md +0 -0
  52. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task/index.md +0 -0
  53. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task/reference.md +0 -0
  54. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task/response-field.md +0 -0
  55. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task/task-execution.md +0 -0
  56. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task/task-output.md +0 -0
  57. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task/task-strc-response.md +0 -0
  58. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/task-graph/index.md +0 -0
  59. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/core/tool.md +0 -0
  60. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/index.md +0 -0
  61. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/quickstart.md +0 -0
  62. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/stylesheets/main.css +0 -0
  63. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/docs/tags.md +0 -0
  64. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/mkdocs.yml +0 -0
  65. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/requirements-dev.txt +0 -0
  66. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/requirements.txt +0 -0
  67. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/runtime.txt +0 -0
  68. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/setup.cfg +0 -0
  69. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_prompt/auto_feedback.py +0 -0
  70. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_prompt/constants.py +0 -0
  71. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_prompt/model.py +0 -0
  72. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_utils/i18n.py +0 -0
  73. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_utils/is_valid_url.py +0 -0
  74. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
  75. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_utils/logger.py +0 -0
  76. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_utils/process_config.py +0 -0
  77. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/_utils/vars.py +0 -0
  78. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  79. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  80. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent/__init__.py +0 -0
  81. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent/inhouse_agents.py +0 -0
  82. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent/parser.py +0 -0
  83. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent/rpm_controller.py +0 -0
  84. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/agent_network/__init__.py +0 -0
  85. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/cli/__init__.py +0 -0
  86. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/clients/__init__.py +0 -0
  87. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/clients/customer/__init__.py +0 -0
  88. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/clients/customer/model.py +0 -0
  89. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/clients/product/__init__.py +0 -0
  90. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/clients/product/model.py +0 -0
  91. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/clients/workflow/__init__.py +0 -0
  92. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/clients/workflow/model.py +0 -0
  93. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/knowledge/__init__.py +0 -0
  94. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/knowledge/_utils.py +0 -0
  95. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/knowledge/embedding.py +0 -0
  96. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/knowledge/model.py +0 -0
  97. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/knowledge/source.py +0 -0
  98. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/knowledge/source_docling.py +0 -0
  99. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/knowledge/storage.py +0 -0
  100. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/llm/__init__.py +0 -0
  101. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/llm/llm_vars.py +0 -0
  102. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/memory/__init__.py +0 -0
  103. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/memory/contextual_memory.py +0 -0
  104. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/memory/model.py +0 -0
  105. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/storage/__init__.py +0 -0
  106. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/storage/base.py +0 -0
  107. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
  108. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/storage/mem0_storage.py +0 -0
  109. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/storage/rag_storage.py +0 -0
  110. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/storage/utils.py +0 -0
  111. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task/TEMPLATES/Description.py +0 -0
  112. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task/__init__.py +0 -0
  113. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task/evaluation.py +0 -0
  114. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task/formatter.py +0 -0
  115. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task/structured_response.py +0 -0
  116. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task_graph/__init__.py +0 -0
  117. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/task_graph/colors.py +0 -0
  118. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/__init__.py +0 -0
  119. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/cache_handler.py +0 -0
  120. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/composio_tool.py +0 -0
  121. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/composio_tool_vars.py +0 -0
  122. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/decorator.py +0 -0
  123. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/model.py +0 -0
  124. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/rag_tool.py +0 -0
  125. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq/tool/tool_handler.py +0 -0
  126. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq.egg-info/SOURCES.txt +0 -0
  127. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq.egg-info/dependency_links.txt +0 -0
  128. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq.egg-info/requires.txt +0 -0
  129. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/src/versionhq.egg-info/top_level.txt +0 -0
  130. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/__init__.py +0 -0
  131. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/_prompt/prompt_test.py +0 -0
  132. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/_sample/sample.csv +0 -0
  133. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/_sample/sample.json +0 -0
  134. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/_sample/sample.mp3 +0 -0
  135. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/_sample/screenshot.png +0 -0
  136. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/agent/__init__.py +0 -0
  137. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/agent/doc_test.py +0 -0
  138. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/agent_network/Prompts/Demo_test.py +0 -0
  139. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/agent_network/__init__.py +0 -0
  140. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/agent_network/doc_test.py +0 -0
  141. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/cli/__init__.py +0 -0
  142. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/clients/customer_test.py +0 -0
  143. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/clients/product_test.py +0 -0
  144. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/clients/workflow_test.py +0 -0
  145. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/conftest.py +0 -0
  146. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/doc_test.py +0 -0
  147. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/formation_test.py +0 -0
  148. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/knowledge/__init__.py +0 -0
  149. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/knowledge/knowledge_test.py +0 -0
  150. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/knowledge/mock_report_compressed.pdf +0 -0
  151. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/llm/__init__.py +0 -0
  152. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/llm/llm_connection_test.py +0 -0
  153. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/llm/llm_test.py +0 -0
  154. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/memory/__init__.py +0 -0
  155. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/memory/memory_test.py +0 -0
  156. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task/__init__.py +0 -0
  157. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task/doc_eval_test.py +0 -0
  158. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task/task_test.py +0 -0
  159. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task_graph/__init__.py +0 -0
  160. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/task_graph/task_graph_test.py +0 -0
  161. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/tool/__init__.py +0 -0
  162. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/tool/composio_test.py +0 -0
  163. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/tool/doc_test.py +0 -0
  164. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/tool/rag_tool_test.py +0 -0
  165. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/tool/tool_test.py +0 -0
  166. {versionhq-1.2.4.2 → versionhq-1.2.4.3}/tests/usecase_test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.4.2
3
+ Version: 1.2.4.3
4
4
  Summary: Autonomous agent networks for task automation with multi-step reasoning.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.2.4.2"
18
+ version = "1.2.4.3"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "Autonomous agent networks for task automation with multi-step reasoning."
21
21
  readme = "README.md"
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
32
32
  from versionhq.task_graph.draft import workflow
33
33
 
34
34
 
35
- __version__ = "1.2.4.2"
35
+ __version__ = "1.2.4.3"
36
36
  __all__ = [
37
37
  "Agent",
38
38
 
@@ -2,3 +2,4 @@ from versionhq._utils.logger import Logger
2
2
  from versionhq._utils.process_config import process_config
3
3
  from versionhq._utils.vars import KNOWLEDGE_DIRECTORY, MAX_FILE_NAME_LENGTH
4
4
  from versionhq._utils.is_valid_url import is_valid_url
5
+ from versionhq._utils.usage_metrics import UsageMetrics, ErrorType
@@ -0,0 +1,72 @@
1
+ import uuid
2
+ import enum
3
+ import datetime
4
+ from typing import Dict, List
5
+ from typing_extensions import Self
6
+
7
+ from pydantic import BaseModel, UUID4, InstanceOf
8
+
9
+
10
+ class ErrorType(enum.Enum):
11
+ FORMAT = 1
12
+ TOOL = 2
13
+ API = 3
14
+ OVERFITTING = 4
15
+ HUMAN_INTERACTION = 5
16
+
17
+
18
+ class UsageMetrics(BaseModel):
19
+ """A Pydantic model to manage token usage, errors, job latency."""
20
+
21
+ id: UUID4 = uuid.uuid4() # stores task id or task graph id
22
+ total_tokens: int = 0
23
+ prompt_tokens: int = 0
24
+ completion_tokens: int = 0
25
+ successful_requests: int = 0
26
+ total_errors: int = 0
27
+ error_breakdown: Dict[ErrorType, int] = dict()
28
+ latency: float = 0.0 # in ms
29
+
30
+ def record_token_usage(self, token_usages: List[Dict[str, int]]) -> None:
31
+ """Records usage metrics from the raw response of the model."""
32
+
33
+ if token_usages:
34
+ for item in token_usages:
35
+ self.total_tokens += int(item["total_tokens"]) if "total_tokens" in item else 0
36
+ self.completion_tokens += int(item["completion_tokens"]) if "completion_tokens" in item else 0
37
+ self.prompt_tokens += int(item["prompt_tokens"]) if "prompt_tokens" in item else 0
38
+
39
+
40
+ def record_errors(self, type: ErrorType = None) -> None:
41
+ self.total_errors += 1
42
+ if type:
43
+ if type in self.error_breakdown:
44
+ self.error_breakdown[type] += 1
45
+ else:
46
+ self.error_breakdown[type] = 1
47
+
48
+
49
+ def record_latency(self, start_dt: datetime.datetime, end_dt: datetime.datetime) -> None:
50
+ self.latency += round((end_dt - start_dt).total_seconds() * 1000, 3)
51
+
52
+
53
+ def aggregate(self, metrics: InstanceOf["UsageMetrics"]) -> Self:
54
+ if not metrics:
55
+ return self
56
+
57
+ self.total_tokens += metrics.total_tokens if metrics.total_tokens else 0
58
+ self.prompt_tokens += metrics.prompt_tokens if metrics.prompt_tokens else 0
59
+ self.completion_tokens += metrics.completion_tokens if metrics.completion_tokens else 0
60
+ self.successful_requests += metrics.successful_requests if metrics.successful_requests else 0
61
+ self.total_errors += metrics.total_errors if metrics.total_errors else 0
62
+ self.latency += metrics.latency if metrics.latency else 0.0
63
+ self.latency = round(self.latency, 3)
64
+
65
+ if metrics.error_breakdown:
66
+ for k, v in metrics.error_breakdown.items():
67
+ if self.error_breakdown and k in self.error_breakdown:
68
+ self.error_breakdown[k] += int(v)
69
+ else:
70
+ self.error_breakdown.update({ k: v })
71
+
72
+ return self
@@ -11,7 +11,7 @@ from versionhq.agent.rpm_controller import RPMController
11
11
  from versionhq.tool.model import Tool, ToolSet, BaseTool
12
12
  from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
13
13
  from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
14
- from versionhq._utils import Logger, process_config, is_valid_url
14
+ from versionhq._utils import Logger, process_config, is_valid_url, ErrorType
15
15
 
16
16
 
17
17
  load_dotenv(override=True)
@@ -373,16 +373,17 @@ class Agent(BaseModel):
373
373
 
374
374
  if tool_res_as_final:
375
375
  raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
376
- task._tokens = self.func_calling_llm._tokens
376
+ task._usage.record_token_usage(token_usages=self.func_calling_llm._usages)
377
377
  else:
378
378
  raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
379
- task._tokens = self.llm._tokens
379
+ task._usage.record_token_usage(token_usages=self.llm._usages)
380
380
 
381
381
  task_execution_counter += 1
382
382
  Logger(**self._logger_config, filename=self.key).log(level="info", message=f"Agent response: {raw_response}", color="green")
383
383
  return raw_response
384
384
 
385
385
  except Exception as e:
386
+ task._usage.record_errors(type=ErrorType.API)
386
387
  Logger(**self._logger_config, filename=self.key).log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
387
388
 
388
389
  while not raw_response and task_execution_counter <= self.max_retry_limit:
@@ -526,6 +527,8 @@ class Agent(BaseModel):
526
527
  tool_res_as_final=task.tool_res_as_final,
527
528
  task=task
528
529
  )
530
+ if raw_response:
531
+ task._usage.successful_requests += 1
529
532
 
530
533
  except Exception as e:
531
534
  self._times_executed += 1
@@ -93,10 +93,11 @@ def form_agent_network(
93
93
 
94
94
  network_tasks = []
95
95
  members = []
96
- leader = str(res.pydantic.leader_agent) if res.pydantic and hasattr(res.pydantic, "leader_agent") else str(res.json_dict["leader_agent"]) if "leader_agent" in res.json_dict else None
97
-
98
- agent_roles = res.pydantic.agent_roles if res.pydantic else res.json_dict["agent_roles"]
99
- created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles]
96
+ leader = res._fetch_value_of(key="leader_agent")
97
+ agent_roles = res._fetch_value_of(key="agent_roles")
98
+ created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles] if agent_roles else []
99
+ task_descriptions = res._fetch_value_of(key="task_descriptions")
100
+ task_outcomes = res._fetch_value_of(key="task_outcomes")
100
101
 
101
102
  if agents:
102
103
  for i, agent in enumerate(created_agents):
@@ -108,9 +109,9 @@ def form_agent_network(
108
109
 
109
110
  created_tasks = []
110
111
 
111
- if res.pydantic:
112
- for i, item in enumerate(res.pydantic.task_outcomes):
113
- if len(res.pydantic.task_descriptions) > i and res.pydantic.task_descriptions[i]:
112
+ if task_outcomes:
113
+ for i, item in enumerate(task_outcomes):
114
+ if len(task_descriptions) > i and task_descriptions[i]:
114
115
  fields = {}
115
116
  for ob in item:
116
117
  try:
@@ -119,24 +120,9 @@ def form_agent_network(
119
120
  except:
120
121
  pass
121
122
  output = create_model("Output", **fields) if fields else None
122
- _task = Task(description=res.pydantic.task_descriptions[i], pydantic_output=output)
123
+ _task = Task(description=task_descriptions[i], pydantic_output=output)
123
124
  created_tasks.append(_task)
124
125
 
125
- elif res.json_dict:
126
- for i, item in enumerate(res["task_outcomes"]):
127
- if len(res["task_descriptions"]) > i and res["task_descriptions"][i]:
128
- fields = {}
129
- for ob in item:
130
- try:
131
- field_name = str(ob).lower().split(":")[0].replace(" ", "_")[0: 16]
132
- fields[field_name] = (str, Field(default=None))
133
- except:
134
- pass
135
- output = create_model("Output", **fields) if fields else None
136
- _task = Task(description=res["task_descriptions"][i], pydantic_output=output)
137
- created_tasks.append(_task)
138
-
139
-
140
126
  if len(created_tasks) <= len(created_agents):
141
127
  for i in range(len(created_tasks)):
142
128
  is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
@@ -159,7 +145,6 @@ def form_agent_network(
159
145
 
160
146
  network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
161
147
 
162
-
163
148
  if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
164
149
  role = leader if leader else "Leader"
165
150
  manager = Member(agent=Agent(role=role), is_manager=True)
@@ -90,7 +90,6 @@ class AgentNetwork(BaseModel):
90
90
 
91
91
  cache: bool = Field(default=True)
92
92
  execution_logs: List[Dict[str, Any]] = Field(default_factory=list, description="list of execution logs of the tasks handled by members")
93
- # usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
94
93
 
95
94
 
96
95
  def __name__(self) -> str:
@@ -69,7 +69,7 @@ class LLM(BaseModel):
69
69
 
70
70
  _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
71
71
  _init_model_name: str = PrivateAttr(default=None)
72
- _tokens: int = PrivateAttr(default=0) # aggregate number of tokens consumed
72
+ _usages: list[Dict[str, int]] = PrivateAttr(default_factory=list)
73
73
 
74
74
  model: str = Field(default=None)
75
75
  provider: Optional[str] = Field(default=None, description="model provider")
@@ -181,8 +181,6 @@ class LLM(BaseModel):
181
181
  """
182
182
  litellm.drop_params = True
183
183
 
184
- self._tokens = 0
185
-
186
184
  if self.callbacks:
187
185
  self._set_callbacks(self.callbacks)
188
186
 
@@ -319,7 +317,7 @@ class LLM(BaseModel):
319
317
  if not tools:
320
318
  params = self._create_valid_params(config=config)
321
319
  res = litellm.completion(model=self.model, messages=messages, stream=False, **params, **cred)
322
- self._tokens += int(res["usage"]["total_tokens"])
320
+ self._usages.append(res["usage"])
323
321
  return res["choices"][0]["message"]["content"]
324
322
 
325
323
  else:
@@ -384,12 +382,11 @@ class LLM(BaseModel):
384
382
  else:
385
383
  pass
386
384
 
387
-
388
385
  if tool_res_as_final:
389
386
  return tool_res
390
387
  else:
391
388
  res = litellm.completion(model=self.model, messages=messages, **params, **cred)
392
- self._tokens += int(res["usage"]["total_tokens"])
389
+ self._usages.append(res["usage"])
393
390
  return res.choices[0].message.content
394
391
 
395
392
 
@@ -147,8 +147,8 @@ class TaskOutputStorageHandler:
147
147
  description=str(task.description),
148
148
  raw=str(task.output.raw),
149
149
  responsible_agents=str(task.processed_agents),
150
- tokens=task.output._tokens,
151
- latency=task.output.latency,
150
+ tokens=task._usage.total_tokens,
151
+ latency=task._usage.latency,
152
152
  score=task.output.aggregate_score if task.output.aggregate_score else "None",
153
153
  )
154
154
  self.storage.add(task=task, output=output_to_store, inputs=inputs)
@@ -6,7 +6,7 @@ import inspect
6
6
  import enum
7
7
  from concurrent.futures import Future
8
8
  from hashlib import md5
9
- from typing import Any, Dict, List, Set, Optional, Callable, Type, Tuple
9
+ from typing import Any, Dict, List, Set, Optional, Callable, Type
10
10
  from typing_extensions import Annotated, Self
11
11
 
12
12
  from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, InstanceOf, field_validator
@@ -15,7 +15,7 @@ from pydantic_core import PydanticCustomError
15
15
  import versionhq as vhq
16
16
  from versionhq.task.evaluation import Evaluation, EvaluationItem
17
17
  from versionhq.tool.model import Tool, ToolSet
18
- from versionhq._utils import process_config, Logger
18
+ from versionhq._utils import process_config, Logger, UsageMetrics, ErrorType
19
19
 
20
20
 
21
21
  class TaskExecutionType(enum.Enum):
@@ -175,18 +175,31 @@ class TaskOutput(BaseModel):
175
175
  A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
176
176
  """
177
177
 
178
- _tokens: int = PrivateAttr(default=0)
179
-
180
178
  task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
181
179
  raw: str = Field(default="", description="Raw output of the task")
182
180
  json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
183
181
  pydantic: Optional[Any] = Field(default=None)
184
182
  tool_output: Optional[Any] = Field(default=None, description="stores tool result when the task takes tool output as its final output")
185
183
  callback_output: Optional[Any] = Field(default=None, description="stores task or agent callback outcome")
186
- latency: float = Field(default=None, description="job latency in ms")
187
184
  evaluation: Optional[InstanceOf[Evaluation]] = Field(default=None, description="stores overall evaluation of the task output. stored in ltm")
188
185
 
189
186
 
187
+ def _fetch_value_of(self, key: str = None) -> Any:
188
+ """Returns a value to the given key."""
189
+
190
+ if not key:
191
+ return None
192
+
193
+ if self.pydantic and hasattr(self.pydantic, key):
194
+ return getattr(self.pydantic, key)
195
+
196
+ elif self.json_dict and key in self.json_dict:
197
+ return self.json_dict[key]
198
+
199
+ else:
200
+ return None
201
+
202
+
190
203
  def _to_context_prompt(self) -> str:
191
204
  """Formats prompt context in text formats from the final response."""
192
205
 
@@ -225,7 +238,6 @@ class TaskOutput(BaseModel):
225
238
 
226
239
  task_eval = Task(description=description, pydantic_output=EvaluationItem)
227
240
  res = task_eval.execute(agent=self.evaluation.eval_by)
228
- self._tokens += task_eval._tokens
229
241
 
230
242
  if res.pydantic:
231
243
  item = EvaluationItem(
@@ -328,9 +340,7 @@ class Task(BaseModel):
328
340
  fsls: Optional[list[str]] = Field(default=None, description="stores ideal/weak responses")
329
341
 
330
342
  # recording
331
- _tokens: int = 0
332
- _tool_errors: int = 0
333
- _format_errors: int = 0
343
+ _usage: UsageMetrics = PrivateAttr(default=None)
334
344
  _delegations: int = 0
335
345
  processed_agents: Set[str] = Field(default_factory=set, description="store keys of the agents that executed the task")
336
346
  output: Optional[TaskOutput] = Field(default=None, description="store the final TaskOutput object")
@@ -355,6 +365,8 @@ class Task(BaseModel):
355
365
  for field in required_fields:
356
366
  if getattr(self, field) is None:
357
367
  raise ValueError( f"{field} must be provided either directly or through config")
368
+
369
+ self._usage = UsageMetrics(id=self.id)
358
370
  return self
359
371
 
360
372
 
@@ -433,14 +445,15 @@ class Task(BaseModel):
433
445
  output = json.loads(j)
434
446
 
435
447
  if isinstance(output, dict):
436
- return output
448
+ return output["json_schema"] if "json_schema" in output else output
437
449
  else:
438
450
  try:
439
451
  output = ast.literal_eval(j)
440
452
  except:
441
453
  output = ast.literal_eval(r)
442
454
 
443
- return output if isinstance(output, dict) else { "output": str(r) }
455
+
456
+ return output["json_schema"] if isinstance(output, dict) and "json_schema" in output else output if isinstance(output, dict) else { "output": str(r) }
444
457
 
445
458
 
446
459
  def _create_json_output(self, raw: str) -> Dict[str, Any]:
@@ -456,12 +469,13 @@ class Task(BaseModel):
456
469
  try:
457
470
  output = json.loads(raw)
458
471
  if isinstance(output, dict):
459
- return output
472
+ return output["json_schema"] if "json_schema" in output else output
460
473
  else:
461
474
  output = self._sanitize_raw_output(raw=raw)
462
475
  return output
463
476
  except:
464
477
  output = self._sanitize_raw_output(raw=raw)
478
+ self._usage.record_errors(type=ErrorType.FORMAT)
465
479
  return output
466
480
 
467
481
 
@@ -592,13 +606,6 @@ class Task(BaseModel):
592
606
  res = self._test_time_computation(agent=agent, context=context)
593
607
  return res
594
608
 
595
- # if self._pfg:
596
- # res, all_outputs = self.pfg.activate()
597
- # tokens, latency = self.pfg.usage
598
- # self._tokens = tokens
599
- # res.latency = latency
600
- # return res
601
-
602
609
  match type:
603
610
  case TaskExecutionType.SYNC:
604
611
  res = self._execute_sync(agent=agent, context=context)
@@ -629,11 +636,11 @@ class Task(BaseModel):
629
636
  def _execute_core(self, agent, context: Optional[Any]) -> TaskOutput:
630
637
  """A core method to execute a single task."""
631
638
 
639
+ start_dt = datetime.datetime.now()
632
640
  task_output: InstanceOf[TaskOutput] = None
633
641
  raw_output: str = None
634
642
  tool_output: str | list = None
635
643
  task_tools: List[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = []
636
- started_at, ended_at = datetime.datetime.now(), datetime.datetime.now()
637
644
  user_prompt, dev_prompt = None, None
638
645
 
639
646
  if self.tools:
@@ -647,17 +654,14 @@ class Task(BaseModel):
647
654
  self._delegations += 1
648
655
 
649
656
  if self.tool_res_as_final == True:
650
- started_at = datetime.datetime.now()
651
657
  user_prompt, dev_prompt, tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
652
658
  raw_output = str(tool_output) if tool_output else ""
653
- ended_at = datetime.datetime.now()
659
+ if not raw_output:
660
+ self._usage.record_errors(type=ErrorType.TOOL)
654
661
  task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=raw_output)
655
662
 
656
663
  else:
657
- started_at = datetime.datetime.now()
658
664
  user_prompt, dev_prompt, raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
659
- ended_at = datetime.datetime.now()
660
-
661
665
  json_dict_output = self._create_json_output(raw=raw_output)
662
666
  if "outcome" in json_dict_output:
663
667
  json_dict_output = self._create_json_output(raw=str(json_dict_output["outcome"]))
@@ -671,8 +675,6 @@ class Task(BaseModel):
671
675
  json_dict=json_dict_output,
672
676
  )
673
677
 
674
- task_output.latency = round((ended_at - started_at).total_seconds() * 1000, 3)
675
- task_output._tokens = self._tokens
676
678
  self.output = task_output
677
679
  self.processed_agents.add(agent.key)
678
680
 
@@ -706,6 +708,8 @@ class Task(BaseModel):
706
708
  self.output = task_output
707
709
  self._store_logs()
708
710
 
711
+ end_dt = datetime.datetime.now()
712
+ self._usage.record_latency(start_dt=start_dt, end_dt=end_dt)
709
713
  return task_output
710
714
 
711
715
 
@@ -716,22 +720,22 @@ class Task(BaseModel):
716
720
  from versionhq._prompt.model import Prompt
717
721
  from versionhq._prompt.auto_feedback import PromptFeedbackGraph
718
722
 
723
+ # self._usage = None
719
724
  prompt = Prompt(task=self, agent=agent, context=context)
720
725
  pfg = PromptFeedbackGraph(prompt=prompt, should_reform=self.human, reform_trigger_event=ReformTriggerEvent.USER_INPUT if self.human else None)
721
726
  pfg = pfg.set_up_graph()
722
727
  self._pfg = pfg
723
728
 
724
- # try:
725
- if self._pfg and self.output is None:
726
- res, _ = self._pfg.activate()
727
- tokens, latency = self._pfg.usage
728
- self._tokens = tokens
729
- res.latency = latency
730
- return res
729
+ try:
730
+ if self._pfg and self.output is None:
731
+ res, all_outputs = self._pfg.activate()
732
+ if all_outputs: self._usage = self._pfg._usage
733
+ return res
731
734
 
732
- # except:
733
- # Logger().log(level="error", message="Failed to execute the task.", color="red")
734
- # return None, None
735
+ except:
736
+ self._usage.record_errors(type=ErrorType.API)
737
+ Logger().log(level="error", message="Failed to execute the task.", color="red")
738
+ return None
735
739
 
736
740
 
737
741
  @property
@@ -96,6 +96,6 @@ def workflow(final_output: Type[BaseModel], context: Any = None, human: bool = F
96
96
  task_graph.add_dependency(
97
97
  source=source.identifier, target=target.identifier, dependency_type=dependency_type)
98
98
 
99
- task_graph.visualize()
99
+ # task_graph.visualize()
100
100
 
101
101
  return task_graph
@@ -15,7 +15,7 @@ from pydantic_core import PydanticCustomError
15
15
 
16
16
  from versionhq.agent.model import Agent
17
17
  from versionhq.task.model import Task, TaskOutput, Evaluation
18
- from versionhq._utils import Logger
18
+ from versionhq._utils import Logger, UsageMetrics, ErrorType
19
19
 
20
20
 
21
21
  class ReformTriggerEvent(enum.Enum):
@@ -393,6 +393,8 @@ class Graph(ABC, BaseModel):
393
393
 
394
394
 
395
395
  class TaskGraph(Graph):
396
+ _usage: Optional[UsageMetrics] = None
397
+
396
398
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
397
399
  should_reform: bool = False
398
400
  reform_trigger_event: Optional[ReformTriggerEvent] = None
@@ -418,6 +420,40 @@ class TaskGraph(Graph):
418
420
  Logger().log(level="error", message=f"Failed to save the graph {str(self.id)}: {str(e)}", color="red")
419
421
 
420
422
 
423
+ def _handle_usage(self) -> None:
424
+ """Returns total tokens and latency spended for the graph execution."""
425
+ if not self.nodes:
426
+ return None
427
+
428
+ self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
429
+
430
+ for node in self.nodes.values():
431
+ if node.task and node.task._usage:
432
+ self._usage.aggregate(metrics=node.task._usage)
433
+
434
+
435
+ def _handle_human_input(self) -> str | None:
436
+ """Handles input from human."""
437
+ request = None
438
+
439
+ print('Proceed? Y/n:')
440
+ x = input()
441
+
442
+ if x.lower() == "y":
443
+ Logger().log(message="Ok, proceeding to the next graph execution.", level="info", color="blue")
444
+
445
+ else:
446
+ request = input("Request?")
447
+ if request:
448
+ Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
449
+ else:
450
+ Logger().log(message="Cannot recognize your request.", level="error", color="red")
451
+ self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
452
+ self._usage.record_errors(type=ErrorType.HUMAN_INTERACTION)
453
+
454
+ return request
455
+
456
+
421
457
  def add_task(self, task: Node | Task) -> Node:
422
458
  """Convert `task` to a Node object and add it to G"""
423
459
 
@@ -635,6 +671,7 @@ class TaskGraph(Graph):
635
671
  self.concl = res
636
672
  self.concl_template = self.concl_template if self.concl_template else res.pydantic.__class__ if res.pydantic else None
637
673
  # last_task_output = [v for v in self.outputs.values()][len([v for v in self.outputs.values()]) - 1] if [v for v in self.outputs.values()] else None
674
+ self._handle_usage()
638
675
  return res, self.outputs
639
676
 
640
677
 
@@ -657,27 +694,6 @@ class TaskGraph(Graph):
657
694
  return eval
658
695
 
659
696
 
660
- def _handle_human_input(self) -> str | None:
661
- """Handles input from human."""
662
- request = None
663
-
664
- print('Proceed? Y/n:')
665
- x = input()
666
-
667
- if x.lower() == "y":
668
- Logger().log(message="Ok, proceeding to the next graph execution.", level="info", color="blue")
669
-
670
- else:
671
- request = input("Request?")
672
-
673
- if request:
674
- Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
675
- else:
676
- Logger().log(message="Cannot recognize your request.", level="error", color="red")
677
-
678
- return request
679
-
680
-
681
697
  def handle_reform(self, target: str = None) -> Self:
682
698
  task_description = "Improve the given output: "
683
699
  if target:
@@ -693,15 +709,3 @@ class TaskGraph(Graph):
693
709
  self.add_node(node=new_node)
694
710
  self.add_dependency(source=target, target=new_node.identifier)
695
711
  return self.activate(target=new_node.identifier)
696
-
697
-
698
- @property
699
- def usage(self) -> Tuple[int, float]:
700
- """Returns aggregate number of consumed tokens and job latency in ms during the activation"""
701
-
702
- tokens, latency = 0, 0
703
- for v in self.outputs.values():
704
- tokens += v._tokens
705
- latency += v.latency
706
-
707
- return tokens, latency
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.4.2
3
+ Version: 1.2.4.3
4
4
  Summary: Autonomous agent networks for task automation with multi-step reasoning.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -7,7 +7,7 @@ from versionhq.agent.model import Agent
7
7
  from versionhq.task.model import Task, TaskOutput
8
8
 
9
9
 
10
- def test_create():
10
+ def test_pfg():
11
11
  class Custom(BaseModel):
12
12
  schedule: str
13
13
  destination: str
@@ -329,6 +329,7 @@ def test_start_with_tools():
329
329
 
330
330
  res = agent.start(tool_res_as_final=True)
331
331
  assert res.tool_output == "demo"
332
+ assert res.raw == res.tool_output
332
333
 
333
334
 
334
335
  def test_self_learn():
@@ -293,5 +293,6 @@ def test_network_eval():
293
293
  network_tasks=[Task(description="draft a random poem")]
294
294
  )
295
295
 
296
- res, _ = network.launch()
297
- assert res._tokens and res.latency
296
+ res, tg = network.launch()
297
+ assert isinstance(res, vhq.TaskOutput)
298
+ assert isinstance(tg, vhq.TaskGraph)
@@ -32,6 +32,5 @@ def test_doc_core_taskoutput_a():
32
32
  assert res.tool_output is None
33
33
  assert isinstance(res.evaluation, vhq.Evaluation)
34
34
  assert [isinstance(item, vhq.EvaluationItem) and item.criteria in task.eval_criteria for item in res.evaluation.items]
35
- assert res.latency and res._tokens
36
35
  assert res.evaluation.aggregate_score is not None and res.evaluation.suggestion_summary
37
36
  assert res.final == res.callback_output