versionhq 1.2.3.0__tar.gz → 1.2.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.env.sample +0 -2
  2. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.github/workflows/run_tests.yml +9 -5
  3. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.gitignore +2 -1
  4. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/PKG-INFO +10 -13
  5. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/pyproject.toml +24 -25
  6. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/requirements.txt +1 -0
  7. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/__init__.py +1 -1
  8. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/clients/workflow/model.py +4 -71
  9. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/storage/ltm_sqlite_storage.py +1 -1
  10. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task/TEMPLATES/Description.py +1 -1
  11. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task/evaluation.py +30 -62
  12. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task/model.py +11 -6
  13. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq.egg-info/PKG-INFO +10 -13
  14. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq.egg-info/requires.txt +5 -12
  15. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/clients/workflow_test.py +2 -17
  16. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task/doc_eval_test.py +1 -3
  17. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/uv.lock +421 -462
  18. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.github/workflows/deploy_docs.yml +0 -0
  19. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.github/workflows/publish.yml +0 -0
  20. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.github/workflows/publish_testpypi.yml +0 -0
  21. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.github/workflows/security_check.yml +0 -0
  22. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.pre-commit-config.yaml +0 -0
  23. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/.python-version +0 -0
  24. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/LICENSE +0 -0
  25. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/README.md +0 -0
  26. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/SECURITY.md +0 -0
  27. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/db/preprocess.py +0 -0
  28. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/CNAME +0 -0
  29. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/_logos/favicon.ico +0 -0
  30. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/_logos/logo192.png +0 -0
  31. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/agent/config.md +0 -0
  32. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/agent/index.md +0 -0
  33. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/agent/task-handling.md +0 -0
  34. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/agent-network/config.md +0 -0
  35. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/agent-network/form.md +0 -0
  36. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/agent-network/index.md +0 -0
  37. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/agent-network/ref.md +0 -0
  38. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/knowledge.md +0 -0
  39. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/llm/index.md +0 -0
  40. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/memory.md +0 -0
  41. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/rag-tool.md +0 -0
  42. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task/evaluation.md +0 -0
  43. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task/index.md +0 -0
  44. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task/reference.md +0 -0
  45. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task/response-field.md +0 -0
  46. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task/task-execution.md +0 -0
  47. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task/task-output.md +0 -0
  48. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task/task-strc-response.md +0 -0
  49. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/task-graph/index.md +0 -0
  50. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/core/tool.md +0 -0
  51. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/index.md +0 -0
  52. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/quickstart.md +0 -0
  53. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/stylesheets/main.css +0 -0
  54. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/docs/tags.md +0 -0
  55. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/mkdocs.yml +0 -0
  56. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/requirements-dev.txt +0 -0
  57. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/runtime.txt +0 -0
  58. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/setup.cfg +0 -0
  59. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/_utils/__init__.py +0 -0
  60. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/_utils/i18n.py +0 -0
  61. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
  62. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/_utils/logger.py +0 -0
  63. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/_utils/process_config.py +0 -0
  64. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/_utils/usage_metrics.py +0 -0
  65. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/_utils/vars.py +0 -0
  66. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  67. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  68. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent/__init__.py +0 -0
  69. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent/inhouse_agents.py +0 -0
  70. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent/model.py +0 -0
  71. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent/parser.py +0 -0
  72. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent/rpm_controller.py +0 -0
  73. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent_network/__init__.py +0 -0
  74. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent_network/formation.py +0 -0
  75. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/agent_network/model.py +0 -0
  76. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/cli/__init__.py +0 -0
  77. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/clients/__init__.py +0 -0
  78. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/clients/customer/__init__.py +0 -0
  79. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/clients/customer/model.py +0 -0
  80. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/clients/product/__init__.py +0 -0
  81. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/clients/product/model.py +0 -0
  82. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/clients/workflow/__init__.py +0 -0
  83. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/knowledge/__init__.py +0 -0
  84. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/knowledge/_utils.py +0 -0
  85. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/knowledge/embedding.py +0 -0
  86. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/knowledge/model.py +0 -0
  87. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/knowledge/source.py +0 -0
  88. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/knowledge/source_docling.py +0 -0
  89. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/knowledge/storage.py +0 -0
  90. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/llm/__init__.py +0 -0
  91. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/llm/llm_vars.py +0 -0
  92. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/llm/model.py +0 -0
  93. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/memory/__init__.py +0 -0
  94. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/memory/contextual_memory.py +0 -0
  95. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/memory/model.py +0 -0
  96. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/storage/__init__.py +0 -0
  97. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/storage/base.py +0 -0
  98. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/storage/mem0_storage.py +0 -0
  99. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/storage/rag_storage.py +0 -0
  100. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/storage/task_output_storage.py +0 -0
  101. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/storage/utils.py +0 -0
  102. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task/__init__.py +0 -0
  103. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task/formatter.py +0 -0
  104. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task/structured_response.py +0 -0
  105. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task_graph/__init__.py +0 -0
  106. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task_graph/colors.py +0 -0
  107. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task_graph/draft.py +0 -0
  108. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/task_graph/model.py +0 -0
  109. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/__init__.py +0 -0
  110. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/cache_handler.py +0 -0
  111. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/composio_tool.py +0 -0
  112. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/composio_tool_vars.py +0 -0
  113. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/decorator.py +0 -0
  114. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/model.py +0 -0
  115. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/rag_tool.py +0 -0
  116. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq/tool/tool_handler.py +0 -0
  117. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq.egg-info/SOURCES.txt +0 -0
  118. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq.egg-info/dependency_links.txt +0 -0
  119. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/src/versionhq.egg-info/top_level.txt +0 -0
  120. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/__init__.py +0 -0
  121. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/_sample/sample.csv +0 -0
  122. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/_sample/sample.json +0 -0
  123. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/agent/__init__.py +0 -0
  124. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/agent/agent_test.py +0 -0
  125. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/agent/doc_test.py +0 -0
  126. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/agent_network/Prompts/Demo_test.py +0 -0
  127. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/agent_network/__init__.py +0 -0
  128. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/agent_network/agent_network_test.py +0 -0
  129. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/agent_network/doc_test.py +0 -0
  130. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/cli/__init__.py +0 -0
  131. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/clients/customer_test.py +0 -0
  132. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/clients/product_test.py +0 -0
  133. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/conftest.py +0 -0
  134. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/doc_test.py +0 -0
  135. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/formation_test.py +0 -0
  136. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/knowledge/__init__.py +0 -0
  137. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/knowledge/knowledge_test.py +0 -0
  138. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/knowledge/mock_report_compressed.pdf +0 -0
  139. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/llm/__init__.py +0 -0
  140. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/llm/llm_connection_test.py +0 -0
  141. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/llm/llm_test.py +0 -0
  142. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/memory/__init__.py +0 -0
  143. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/memory/memory_test.py +0 -0
  144. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task/__init__.py +0 -0
  145. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task/doc_taskoutput_test.py +0 -0
  146. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task/doc_test.py +0 -0
  147. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task/eval_test.py +0 -0
  148. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task/task_test.py +0 -0
  149. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task_graph/__init__.py +0 -0
  150. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task_graph/doc_test.py +0 -0
  151. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/task_graph/task_graph_test.py +0 -0
  152. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/tool/__init__.py +0 -0
  153. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/tool/composio_test.py +0 -0
  154. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/tool/doc_test.py +0 -0
  155. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/tool/rag_tool_test.py +0 -0
  156. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/tool/tool_test.py +0 -0
  157. {versionhq-1.2.3.0 → versionhq-1.2.3.4}/tests/usecase_test.py +0 -0
@@ -10,8 +10,6 @@ ANTHROPIC_API_KEY=
10
10
  HUGGINGFACE_API_BASE=
11
11
  HUGGINGFACE_API_KEY=
12
12
 
13
- UPSTAGE_API_KEY=
14
-
15
13
  COMPOSIO_API_KEY=
16
14
  COMPOSIO_CLI_KEY=
17
15
 
@@ -20,6 +20,7 @@ env:
20
20
  AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
21
21
  AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
22
22
  AWS_REGION_NAME: ${{ secrets.AWS_REGION_NAME }}
23
+ # PYO3_USE_ABI3_FORWARD_COMPATIBILITY: ${{ secrets.PYO3_USE_ABI3_FORWARD_COMPATIBILITY }}
23
24
 
24
25
  jobs:
25
26
  run_test:
@@ -30,17 +31,20 @@ jobs:
30
31
  - name: Checkout code
31
32
  uses: actions/checkout@v4
32
33
 
33
- - name: Install uv
34
- uses: astral-sh/setup-uv@v4
34
+ - uses: actions/setup-python@v4
35
35
  with:
36
- version: "0.5.11"
36
+ python-version: "3.13"
37
37
 
38
- - name: Set up the project
38
+ - name: Env set up
39
39
  run: |
40
+ echo "VIRTUAL_ENV=${Python_ROOT_DIR}" >> $GITHUB_ENV
41
+ pip install --upgrade pip pytest
42
+ pip install uv
40
43
  uv venv
41
44
  source .venv/bin/activate
45
+ uv pip install --upgrade pip pytest
42
46
  uv lock --upgrade
43
47
  uv sync --all-extras --no-extra pygraphviz
44
48
 
45
- - name: Run tests
49
+ - name: Pytest
46
50
  run: uv run pytest tests -vv --cache-clear
@@ -1,8 +1,9 @@
1
1
  deploy.py
2
2
  destinations.py
3
3
 
4
- auth/
4
+ _auth/
5
5
 
6
+ cli/
6
7
  refine.py
7
8
 
8
9
  train.py
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.3.0
3
+ Version: 1.2.3.4
4
4
  Summary: An agentic orchestration framework for building agent networks that handle task automation.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -48,8 +48,8 @@ Requires-Dist: regex==2024.11.6
48
48
  Requires-Dist: requests>=2.32.3
49
49
  Requires-Dist: pydantic>=2.10.6
50
50
  Requires-Dist: werkzeug>=3.1.3
51
- Requires-Dist: typing
52
- Requires-Dist: json-repair
51
+ Requires-Dist: typing>=0.0.0
52
+ Requires-Dist: json-repair>=0.0.0
53
53
  Requires-Dist: litellm>=1.55.8
54
54
  Requires-Dist: openai>=1.64.0
55
55
  Requires-Dist: composio-openai>=0.6.9
@@ -68,27 +68,24 @@ Requires-Dist: composio-core==0.7.0
68
68
  Requires-Dist: networkx>=3.4.2
69
69
  Requires-Dist: matplotlib>=3.10.0
70
70
  Requires-Dist: boto3>=1.37.1
71
- Provides-Extra: torch
72
- Requires-Dist: torch>=2.6.0; extra == "torch"
73
- Requires-Dist: torchvision>=0.21.0; extra == "torch"
74
- Requires-Dist: pytorch-triton-xpu>=3.2.0; sys_platform == "linux" and extra == "torch"
75
71
  Provides-Extra: docling
76
72
  Requires-Dist: docling>=2.25.2; extra == "docling"
77
73
  Provides-Extra: mem0ai
78
74
  Requires-Dist: mem0ai>=0.1.55; extra == "mem0ai"
79
75
  Provides-Extra: pdfplumber
80
76
  Requires-Dist: pdfplumber>=0.11.5; extra == "pdfplumber"
81
- Provides-Extra: pandas
82
- Requires-Dist: pandas>=2.2.3; extra == "pandas"
83
- Provides-Extra: numpy
84
- Requires-Dist: numpy>=1.26.4; extra == "numpy"
85
77
  Provides-Extra: pygraphviz
86
78
  Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
87
79
  Provides-Extra: tools
88
80
  Requires-Dist: html2text>=2024.2.26; extra == "tools"
89
81
  Requires-Dist: sec-api>=1.0.28; extra == "tools"
90
- Provides-Extra: eval
91
- Requires-Dist: scikit-learn>=1.6.1; extra == "eval"
82
+ Provides-Extra: torch
83
+ Requires-Dist: torch>=2.6.0; extra == "torch"
84
+ Requires-Dist: torchvision>=0.21.0; extra == "torch"
85
+ Provides-Extra: evals
86
+ Requires-Dist: scikit-learn>=1.6.1; extra == "evals"
87
+ Requires-Dist: numpy>=1.26.4; extra == "evals"
88
+ Requires-Dist: pandas>=2.2.3; extra == "evals"
92
89
 
93
90
  # Overview
94
91
 
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.2.3.0"
18
+ version = "1.2.3.4"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "An agentic orchestration framework for building agent networks that handle task automation."
21
21
  readme = "README.md"
@@ -27,8 +27,8 @@ dependencies = [
27
27
  "requests>=2.32.3",
28
28
  "pydantic>=2.10.6",
29
29
  "werkzeug>=3.1.3",
30
- "typing",
31
- "json-repair",
30
+ "typing>=0.0.0",
31
+ "json-repair>=0.0.0",
32
32
  "litellm>=1.55.8",
33
33
  "openai>=1.64.0",
34
34
  "composio-openai>=0.6.9",
@@ -46,7 +46,7 @@ dependencies = [
46
46
  "composio-core==0.7.0",
47
47
  "networkx>=3.4.2",
48
48
  "matplotlib>=3.10.0",
49
- "boto3>=1.37.1",
49
+ "boto3>=1.37.1"
50
50
  ]
51
51
  classifiers = [
52
52
  "Programming Language :: Python",
@@ -69,11 +69,6 @@ Repository = "https://github.com/versionHQ/multi-agent-system"
69
69
  Issues = "https://github.com/versionHQ/multi-agent-system/issues"
70
70
 
71
71
  [project.optional-dependencies]
72
- torch = [
73
- "torch>=2.6.0",
74
- "torchvision>=0.21.0",
75
- "pytorch-triton-xpu>=3.2.0 ; sys_platform == 'linux'",
76
- ]
77
72
  docling = [
78
73
  "docling>=2.25.2",
79
74
  ]
@@ -83,12 +78,6 @@ mem0ai = [
83
78
  pdfplumber = [
84
79
  "pdfplumber>=0.11.5",
85
80
  ]
86
- pandas = [
87
- "pandas>=2.2.3",
88
- ]
89
- numpy = [
90
- "numpy>=1.26.4",
91
- ]
92
81
  pygraphviz = [
93
82
  "pygraphviz>=1.14",
94
83
  ]
@@ -96,8 +85,14 @@ tools = [
96
85
  "html2text>=2024.2.26",
97
86
  "sec-api>=1.0.28",
98
87
  ]
99
- eval = [
88
+ torch = [
89
+ "torch>=2.6.0",
90
+ "torchvision>=0.21.0",
91
+ ]
92
+ evals = [
100
93
  "scikit-learn>=1.6.1",
94
+ "numpy>=1.26.4",
95
+ "pandas>=2.2.3",
101
96
  ]
102
97
 
103
98
  [tool.uv]
@@ -105,8 +100,8 @@ dev-dependencies = [
105
100
  "mypy>=1.10.0",
106
101
  "pre-commit>=4.0.1",
107
102
  "pytest-vcr>=1.0.2",
108
- "black",
109
- "bandit",
103
+ "black>=0.0.0",
104
+ "bandit>=0.0.0",
110
105
  "pytest>=8.3.4",
111
106
  "mkdocs>=1.6.1",
112
107
  "mkdocs-material>=9.6.2",
@@ -118,18 +113,22 @@ dev-dependencies = [
118
113
 
119
114
  [tool.uv.sources]
120
115
  torch = [
121
- { index = "pytorch-xpu", marker = "sys_platform == 'win32' or sys_platform == 'linux'" },
116
+ { index = "pytorch-cpu", extra = "torch", marker = "sys_platform != 'win32' and sys_platform != 'linux'" },
117
+ { index = "pytorch-cu124", extra = "torch", marker = "sys_platform == 'win32' or sys_platform == 'linux'" },
122
118
  ]
123
119
  torchvision = [
124
- { index = "pytorch-xpu", marker = "sys_platform == 'win32' or sys_platform == 'linux'" },
125
- ]
126
- pytorch-triton-xpu = [
127
- { index = "pytorch-xpu", marker = "sys_platform == 'linux'" },
120
+ { index = "pytorch-cpu", extra = "torch", marker = "sys_platform != 'win32' and sys_platform != 'linux'" },
121
+ { index = "pytorch-cu124", extra = "torch", marker = "sys_platform == 'win32' or sys_platform == 'linux'" },
128
122
  ]
129
123
 
130
124
  [[tool.uv.index]]
131
- name = "pytorch-xpu"
132
- url = "https://download.pytorch.org/whl/xpu"
125
+ name = "pytorch-cpu"
126
+ url = "https://download.pytorch.org/whl/cpu"
127
+ explicit = true
128
+
129
+ [[tool.uv.index]]
130
+ name = "pytorch-cu124"
131
+ url = "https://download.pytorch.org/whl/cu124"
133
132
  explicit = true
134
133
 
135
134
  [tool.uv.workspace]
@@ -21,3 +21,4 @@ envoy>=0.0.3
21
21
  composio-core==0.7.0
22
22
  networkx>=3.4.2
23
23
  matplotlib>=3.10.0
24
+ boto3>=1.37.1
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
32
32
  from versionhq.task_graph.draft import workflow
33
33
 
34
34
 
35
- __version__ = "1.2.3.0"
35
+ __version__ = "1.2.3.4"
36
36
  __all__ = [
37
37
  "Agent",
38
38
 
@@ -2,92 +2,25 @@ import uuid
2
2
  from abc import ABC
3
3
  from datetime import datetime
4
4
  from typing import Any, Dict, List, Optional
5
- from typing_extensions import Self
5
+
6
6
  from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator
7
7
  from pydantic_core import PydanticCustomError
8
8
 
9
- from versionhq.clients.product.model import Product
10
- from versionhq.clients.customer.model import Customer
11
9
  from versionhq.agent.model import Agent
12
10
  from versionhq.agent_network.model import AgentNetwork
11
+ from versionhq.clients.product.model import Product
12
+ from versionhq.clients.customer.model import Customer
13
13
  from versionhq.tool.composio_tool_vars import ComposioAppName
14
14
 
15
15
 
16
- class ScoreFormat:
17
- def __init__(self, rate: float | int = 0, weight: int = 1):
18
- self.rate = rate
19
- self.weight = weight
20
- self.aggregate = rate * weight
21
-
22
-
23
- class Score:
24
- """
25
- Evaluate the score on 0 (no performance) to 1 scale.
26
- `rate`: Any float from 0.0 to 1.0 given by an agent.
27
- `weight`: Importance of each factor to the aggregated score.
28
- """
29
-
30
- def __init__(
31
- self,
32
- brand_tone: ScoreFormat = ScoreFormat(0, 0),
33
- audience: ScoreFormat = ScoreFormat(0, 0),
34
- track_record: ScoreFormat = ScoreFormat(0, 0),
35
- **kwargs: Optional[Dict[str, ScoreFormat]],
36
- ):
37
- self.brand_tone = brand_tone
38
- self.audience = audience
39
- self.track_record = track_record
40
- self.kwargs = kwargs
41
-
42
-
43
- def result(self) -> int:
44
- aggregate_score = int(self.brand_tone.aggregate) + int(self.audience.aggregate) + int(self.track_record.aggregate)
45
- denominator = self.brand_tone.weight + self.audience.weight + self.track_record.weight
46
-
47
- for k, v in self.kwargs.items():
48
- aggregate_score += v.aggregate
49
- denominator += v.weight
50
-
51
- if denominator == 0:
52
- return 0
53
-
54
- return round(aggregate_score / denominator, 2)
55
-
56
-
57
-
58
16
  class MessagingComponent(ABC, BaseModel):
59
17
  layer_id: int = Field(default=0, description="add id of the layer: 0, 1, 2")
60
18
  message: str = Field(default=None, max_length=1024, description="text message content to be sent")
61
- score: InstanceOf[Score] = Field(default=None)
19
+ score: Optional[float | int] = Field(default=None)
62
20
  condition: str = Field(default=None, description="condition to execute the next component")
63
21
  interval: Optional[str] = Field(default=None, description="ideal interval to set to assess the condition")
64
22
 
65
23
 
66
- def store_scoring_result(self, subject: str, score_raw: int | Score | ScoreFormat = None) -> Self:
67
- """
68
- Set up the `score` field
69
- """
70
-
71
- if isinstance(score_raw, Score):
72
- setattr(self, "score", score_raw)
73
-
74
- elif isinstance(score_raw, ScoreFormat):
75
- score_instance = Score()
76
- setattr(score_instance, subject, score_raw)
77
- setattr(self, "score", score_instance)
78
-
79
- elif isinstance(score_raw, int) or isinstance(score_raw, float):
80
- score_instance, score_format_instance = Score(), ScoreFormat(rate=score_raw, weight=1)
81
- setattr(score_instance, "kwargs", { subject: score_format_instance })
82
- setattr(self, "score", score_instance)
83
-
84
- else:
85
- pass
86
-
87
- return self
88
-
89
-
90
-
91
24
  class MessagingWorkflow(ABC, BaseModel):
92
25
  """
93
26
  Store 3 layers of messaging workflow sent to `customer` on the `product`
@@ -97,7 +97,7 @@ class LTMSQLiteStorage:
97
97
  ]
98
98
 
99
99
  except sqlite3.Error as e:
100
- self._logger.log(level="error", message=f"MEMORY ERROR: An error occurred while querying LTM: {str(e)}",color="red")
100
+ self._logger.log(level="error", message=f"MEMORY ERROR: An error occurred while querying LTM: {str(e)}", color="red")
101
101
  return None
102
102
 
103
103
 
@@ -1,4 +1,4 @@
1
- EVALUATE="""Evaluate the provided task output against the given task description, assigning a score between 0 (worst) and 1 (best) based on the specified criteria. Scores should be numerical (integers or decimals). Provide specific suggestions for improvement. Do not assign identical scores to different criteria unless otherwise you have clear reasons to do so:
1
+ EVALUATE="""Evaluate the provided task output against the given task description, assigning a score between 0 (worst) and 1 (best) based on the specified criteria. Scores should be numerical (integers or decimals). Weight should be numerical (integers or decimals) and represents importance of the criteria to the final result. Provide specific suggestions for improvement. Do not assign identical scores to different criteria unless otherwise you have clear reasons to do so:
2
2
  Task output: {task_output}
3
3
  Task description: {task_description}
4
4
  Evaluation criteria: {eval_criteria}
@@ -1,64 +1,22 @@
1
- from typing import List, Optional, Dict, Any
1
+ from typing import List, Any
2
2
  from typing_extensions import Self
3
3
 
4
4
  from pydantic import BaseModel, model_validator
5
+ import pandas as pd
6
+ from sklearn.preprocessing import MinMaxScaler
5
7
 
6
8
  from versionhq.memory.model import MemoryMetadata
7
9
 
8
- """
9
- Evaluate task output from accuracy, token consumption, and latency perspectives, and mark the score from 0 to 1.
10
- """
11
-
12
-
13
- class ScoreFormat:
14
- def __init__(self, rate: float | int = 0, weight: int = 1):
15
- self.rate = rate
16
- self.weight = weight
17
- self.aggregate = rate * weight
18
-
19
-
20
- class Score:
21
- """
22
- Evaluate the score on 0 (no performance) to 1 scale.
23
- `rate`: Any float from 0.0 to 1.0 given by an agent.
24
- `weight`: Importance of each factor to the aggregated score.
25
- """
26
-
27
- def __init__(self, config: Optional[Dict[str, ScoreFormat]] = None):
28
- self.config = config
29
-
30
- if self.config:
31
- for k, v in self.config.items():
32
- if isinstance(v, ScoreFormat):
33
- setattr(self, k, v)
34
-
35
-
36
- def result(self) -> float:
37
- aggregate_score, denominator = 0, 0
38
-
39
- for k, v in self.__dict__.items():
40
- aggregate_score += v.aggregate
41
- denominator += v.weight
42
-
43
- if denominator == 0:
44
- return 0
45
-
46
- return round(aggregate_score / denominator, 3)
47
-
48
10
 
49
11
  class EvaluationItem(BaseModel):
50
12
  """
51
13
  A Pydantic class to store the evaluation result with scoring and suggestion based on the given criteria.
14
+ This class will be used as a response format for the eval task.
52
15
  """
53
16
  criteria: str
54
17
  suggestion: str
55
18
  score: float
56
-
57
- def _format_score(self, weight: int = 1) -> ScoreFormat | None:
58
- if self.score and isinstance(self.score, float):
59
- return ScoreFormat(rate=self.score, weight=weight)
60
-
61
- else: return None
19
+ weight: int = 1
62
20
 
63
21
 
64
22
  class Evaluation(BaseModel):
@@ -111,33 +69,43 @@ class Evaluation(BaseModel):
111
69
  return shot_prompt
112
70
 
113
71
 
114
- @property
115
- def aggregate_score(self) -> float:
72
+ def _normalize_df(self) -> pd.DataFrame:
116
73
  """
117
- Calcurate aggregate score from evaluation items.
74
+ Creates a pandas DataFrame from a list of EvaluationItem objects containing 'weight' and 'score' columns, and normalizes them using MinMaxScaler.
75
+
76
+ Args:
77
+ items: A list of EvaluationItem objects.
78
+
79
+ Returns:
80
+ A pandas DataFrame with normalized 'weight' and 'score' columns, or an empty DataFrame if the input is empty.
118
81
  """
119
82
  if not self.items:
120
- return 0
83
+ return pd.DataFrame()
121
84
 
122
- aggregate_score = 0
123
- denominator = 0
85
+ data = { 'weight': [item.weight for item in self.items], 'score': [item.score for item in self.items] }
86
+ df = pd.DataFrame(data)
124
87
 
125
- for item in self.items:
126
- score_format = item._format_score()
127
- aggregate_score += score_format.aggregate if score_format else 0
128
- denominator += score_format.weight if score_format else 0
88
+ scaler = MinMaxScaler(feature_range=(0, 1))
89
+ df[['weight', 'score']] = scaler.fit_transform(df[['weight', 'score']])
129
90
 
130
- if denominator == 0:
91
+ return df
92
+
93
+
94
+ @property
95
+ def aggregate_score(self) -> int | float:
96
+ if not self.items:
131
97
  return 0
132
98
 
133
- return round(aggregate_score / denominator, 2)
99
+ df = self._normalize_df()
100
+ df['weighted_score'] = df['weight'] * df['score']
101
+ aggregate_score = round(df['weighted_score'].sum(), 3)
102
+ return aggregate_score
134
103
 
135
104
 
136
105
  @property
137
106
  def suggestion_summary(self) -> str | None:
138
- """
139
- Returns a summary of the suggestions
140
- """
107
+ """Returns a summary of the suggestions"""
108
+
141
109
  if not self.items:
142
110
  return None
143
111
 
@@ -228,13 +228,21 @@ class TaskOutput(BaseModel):
228
228
  self._tokens += task_eval._tokens
229
229
 
230
230
  if res.pydantic:
231
- item = EvaluationItem(score=res.pydantic.score, suggestion=res.pydantic.suggestion, criteria=res.pydantic.criteria)
231
+ item = EvaluationItem(
232
+ score=res.pydantic.score,
233
+ weight=res.pydantic.weight,
234
+ suggestion=res.pydantic.suggestion,
235
+ criteria=res.pydantic.criteria
236
+ )
232
237
  self.evaluation.items.append(item)
233
238
 
234
239
  else:
235
240
  try:
236
241
  item = EvaluationItem(
237
- score=float(res.json_dict["score"]), suggestion=res.json_dict["suggestion"], criteria=res.json_dict["criteria"]
242
+ score=float(res.json_dict["score"]),
243
+ weight=float(res.json_dict["weight"]),
244
+ suggestion=res.json_dict["suggestion"],
245
+ criteria=res.json_dict["criteria"]
238
246
  )
239
247
  self.evaluation.items.append(item)
240
248
  except Exception as e:
@@ -246,10 +254,7 @@ class TaskOutput(BaseModel):
246
254
 
247
255
  @property
248
256
  def aggregate_score(self) -> float | int:
249
- if self.evaluation is None:
250
- return 0
251
- else:
252
- self.evaluation.aggregate_score
257
+ return self.evaluation.aggregate_score if self.evaluation is not None else 0
253
258
 
254
259
 
255
260
  @property
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.3.0
3
+ Version: 1.2.3.4
4
4
  Summary: An agentic orchestration framework for building agent networks that handle task automation.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -48,8 +48,8 @@ Requires-Dist: regex==2024.11.6
48
48
  Requires-Dist: requests>=2.32.3
49
49
  Requires-Dist: pydantic>=2.10.6
50
50
  Requires-Dist: werkzeug>=3.1.3
51
- Requires-Dist: typing
52
- Requires-Dist: json-repair
51
+ Requires-Dist: typing>=0.0.0
52
+ Requires-Dist: json-repair>=0.0.0
53
53
  Requires-Dist: litellm>=1.55.8
54
54
  Requires-Dist: openai>=1.64.0
55
55
  Requires-Dist: composio-openai>=0.6.9
@@ -68,27 +68,24 @@ Requires-Dist: composio-core==0.7.0
68
68
  Requires-Dist: networkx>=3.4.2
69
69
  Requires-Dist: matplotlib>=3.10.0
70
70
  Requires-Dist: boto3>=1.37.1
71
- Provides-Extra: torch
72
- Requires-Dist: torch>=2.6.0; extra == "torch"
73
- Requires-Dist: torchvision>=0.21.0; extra == "torch"
74
- Requires-Dist: pytorch-triton-xpu>=3.2.0; sys_platform == "linux" and extra == "torch"
75
71
  Provides-Extra: docling
76
72
  Requires-Dist: docling>=2.25.2; extra == "docling"
77
73
  Provides-Extra: mem0ai
78
74
  Requires-Dist: mem0ai>=0.1.55; extra == "mem0ai"
79
75
  Provides-Extra: pdfplumber
80
76
  Requires-Dist: pdfplumber>=0.11.5; extra == "pdfplumber"
81
- Provides-Extra: pandas
82
- Requires-Dist: pandas>=2.2.3; extra == "pandas"
83
- Provides-Extra: numpy
84
- Requires-Dist: numpy>=1.26.4; extra == "numpy"
85
77
  Provides-Extra: pygraphviz
86
78
  Requires-Dist: pygraphviz>=1.14; extra == "pygraphviz"
87
79
  Provides-Extra: tools
88
80
  Requires-Dist: html2text>=2024.2.26; extra == "tools"
89
81
  Requires-Dist: sec-api>=1.0.28; extra == "tools"
90
- Provides-Extra: eval
91
- Requires-Dist: scikit-learn>=1.6.1; extra == "eval"
82
+ Provides-Extra: torch
83
+ Requires-Dist: torch>=2.6.0; extra == "torch"
84
+ Requires-Dist: torchvision>=0.21.0; extra == "torch"
85
+ Provides-Extra: evals
86
+ Requires-Dist: scikit-learn>=1.6.1; extra == "evals"
87
+ Requires-Dist: numpy>=1.26.4; extra == "evals"
88
+ Requires-Dist: pandas>=2.2.3; extra == "evals"
92
89
 
93
90
  # Overview
94
91
 
@@ -2,8 +2,8 @@ regex==2024.11.6
2
2
  requests>=2.32.3
3
3
  pydantic>=2.10.6
4
4
  werkzeug>=3.1.3
5
- typing
6
- json-repair
5
+ typing>=0.0.0
6
+ json-repair>=0.0.0
7
7
  litellm>=1.55.8
8
8
  openai>=1.64.0
9
9
  composio-openai>=0.6.9
@@ -26,18 +26,14 @@ boto3>=1.37.1
26
26
  [docling]
27
27
  docling>=2.25.2
28
28
 
29
- [eval]
29
+ [evals]
30
30
  scikit-learn>=1.6.1
31
+ numpy>=1.26.4
32
+ pandas>=2.2.3
31
33
 
32
34
  [mem0ai]
33
35
  mem0ai>=0.1.55
34
36
 
35
- [numpy]
36
- numpy>=1.26.4
37
-
38
- [pandas]
39
- pandas>=2.2.3
40
-
41
37
  [pdfplumber]
42
38
  pdfplumber>=0.11.5
43
39
 
@@ -51,6 +47,3 @@ sec-api>=1.0.28
51
47
  [torch]
52
48
  torch>=2.6.0
53
49
  torchvision>=0.21.0
54
-
55
- [torch:sys_platform == "linux"]
56
- pytorch-triton-xpu>=3.2.0
@@ -10,23 +10,8 @@ def test_store_scores():
10
10
  Test if the final result will be calcurated using a random subject
11
11
  """
12
12
 
13
- messaging_component = MessagingComponent(message="demo")
14
- score_raw = 15
15
- messaging_component.store_scoring_result("demo", score_raw=score_raw)
16
-
17
- assert messaging_component.score is not None
18
- assert messaging_component.score.result() is not None
19
-
20
-
21
- def test_score_result():
22
- messaging_component = MessagingComponent(message="demo")
23
- score_raw = 15
24
- messaging_component.store_scoring_result("demo", score_raw=score_raw)
25
-
26
- result = messaging_component.score.result()
27
-
28
- assert result is not None
29
- assert result != 0
13
+ messaging_component = MessagingComponent(message="demo", score=15)
14
+ assert messaging_component.score == 15
30
15
 
31
16
 
32
17
  def test_setup_messaging_workflow_with_anonymous_provider():
@@ -16,8 +16,6 @@ def test_eval():
16
16
  res = task.execute()
17
17
 
18
18
  assert isinstance(res.evaluation, vhq.Evaluation)
19
- assert [item for item in res.evaluation.items if item.criteria == "Uniquness" or item.criteria == "Fit to audience"]
19
+ assert len(res.evaluation.items) == 2
20
20
  assert res.evaluation.aggregate_score is not None
21
21
  assert res.evaluation.suggestion_summary is not None
22
-
23
- test_eval()