versionhq 1.2.2.4__tar.gz → 1.2.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (156) hide show
  1. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.env.sample +4 -0
  2. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.github/workflows/run_tests.yml +3 -0
  3. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/PKG-INFO +6 -8
  4. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/README.md +4 -7
  5. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/agent/config.md +5 -10
  6. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/agent/index.md +1 -1
  7. versionhq-1.2.2.6/docs/core/llm/index.md +73 -0
  8. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task/task-strc-response.md +12 -11
  9. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/index.md +2 -5
  10. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/quickstart.md +1 -1
  11. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/pyproject.toml +2 -1
  12. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/__init__.py +1 -1
  13. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent/inhouse_agents.py +1 -1
  14. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent/model.py +20 -46
  15. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/llm/llm_vars.py +33 -68
  16. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/llm/model.py +62 -45
  17. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task/model.py +3 -3
  18. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq.egg-info/PKG-INFO +6 -8
  19. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq.egg-info/SOURCES.txt +3 -2
  20. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq.egg-info/requires.txt +1 -0
  21. versionhq-1.2.2.6/tests/__init__.py +39 -0
  22. versionhq-1.2.2.6/tests/_sample/sample.csv +241 -0
  23. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/agent/agent_test.py +31 -44
  24. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/agent/doc_test.py +6 -23
  25. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/doc_test.py +3 -3
  26. {versionhq-1.2.2.4/tests/cli → versionhq-1.2.2.6/tests/knowledge}/__init__.py +0 -0
  27. {versionhq-1.2.2.4/tests/knowledge → versionhq-1.2.2.6/tests/llm}/__init__.py +0 -0
  28. versionhq-1.2.2.6/tests/llm/llm_connection_test.py +66 -0
  29. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/llm/llm_test.py +8 -0
  30. {versionhq-1.2.2.4/tests/llm → versionhq-1.2.2.6/tests/memory}/__init__.py +0 -0
  31. {versionhq-1.2.2.4/tests/memory → versionhq-1.2.2.6/tests/task}/__init__.py +0 -0
  32. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/task/doc_test.py +14 -59
  33. {versionhq-1.2.2.4/tests/task → versionhq-1.2.2.6/tests/task_graph}/__init__.py +0 -0
  34. {versionhq-1.2.2.4/tests/task_graph → versionhq-1.2.2.6/tests/tool}/__init__.py +0 -0
  35. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/usecase_test.py +16 -13
  36. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/uv.lock +93 -40
  37. versionhq-1.2.2.4/docs/core/llm/index.md +0 -103
  38. versionhq-1.2.2.4/tests/task/llm_connection_test.py +0 -106
  39. versionhq-1.2.2.4/tests/tool/__init__.py +0 -0
  40. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.github/workflows/deploy_docs.yml +0 -0
  41. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.github/workflows/publish.yml +0 -0
  42. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.github/workflows/publish_testpypi.yml +0 -0
  43. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.github/workflows/security_check.yml +0 -0
  44. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.gitignore +0 -0
  45. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.pre-commit-config.yaml +0 -0
  46. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/.python-version +0 -0
  47. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/LICENSE +0 -0
  48. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/SECURITY.md +0 -0
  49. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/db/preprocess.py +0 -0
  50. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/CNAME +0 -0
  51. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/_logos/favicon.ico +0 -0
  52. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/_logos/logo192.png +0 -0
  53. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/agent/task-handling.md +0 -0
  54. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/agent-network/config.md +0 -0
  55. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/agent-network/form.md +0 -0
  56. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/agent-network/index.md +0 -0
  57. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/agent-network/ref.md +0 -0
  58. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task/evaluation.md +0 -0
  59. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task/index.md +0 -0
  60. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task/response-field.md +0 -0
  61. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task/task-execution.md +0 -0
  62. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task/task-output.md +0 -0
  63. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task/task-ref.md +0 -0
  64. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/task-graph/index.md +0 -0
  65. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/core/tool.md +0 -0
  66. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/stylesheets/main.css +0 -0
  67. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/docs/tags.md +0 -0
  68. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/mkdocs.yml +0 -0
  69. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/requirements-dev.txt +0 -0
  70. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/requirements.txt +0 -0
  71. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/runtime.txt +0 -0
  72. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/setup.cfg +0 -0
  73. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/_utils/__init__.py +0 -0
  74. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/_utils/i18n.py +0 -0
  75. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
  76. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/_utils/logger.py +0 -0
  77. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/_utils/process_config.py +0 -0
  78. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/_utils/usage_metrics.py +0 -0
  79. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/_utils/vars.py +0 -0
  80. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  81. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  82. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent/__init__.py +0 -0
  83. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent/parser.py +0 -0
  84. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent/rpm_controller.py +0 -0
  85. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent_network/__init__.py +0 -0
  86. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent_network/formation.py +0 -0
  87. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/agent_network/model.py +0 -0
  88. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/cli/__init__.py +0 -0
  89. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/clients/__init__.py +0 -0
  90. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/clients/customer/__init__.py +0 -0
  91. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/clients/customer/model.py +0 -0
  92. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/clients/product/__init__.py +0 -0
  93. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/clients/product/model.py +0 -0
  94. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/clients/workflow/__init__.py +0 -0
  95. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/clients/workflow/model.py +0 -0
  96. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/knowledge/__init__.py +0 -0
  97. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/knowledge/_utils.py +0 -0
  98. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/knowledge/embedding.py +0 -0
  99. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/knowledge/model.py +0 -0
  100. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/knowledge/source.py +0 -0
  101. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/knowledge/source_docling.py +0 -0
  102. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/knowledge/storage.py +0 -0
  103. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/llm/__init__.py +0 -0
  104. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/memory/__init__.py +0 -0
  105. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/memory/contextual_memory.py +0 -0
  106. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/memory/model.py +0 -0
  107. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/storage/__init__.py +0 -0
  108. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/storage/base.py +0 -0
  109. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
  110. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/storage/mem0_storage.py +0 -0
  111. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/storage/rag_storage.py +0 -0
  112. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/storage/task_output_storage.py +0 -0
  113. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/storage/utils.py +0 -0
  114. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task/TEMPLATES/Description.py +0 -0
  115. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task/__init__.py +0 -0
  116. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task/evaluation.py +0 -0
  117. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task/formatter.py +0 -0
  118. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task/structured_response.py +0 -0
  119. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task_graph/__init__.py +0 -0
  120. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task_graph/colors.py +0 -0
  121. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task_graph/draft.py +0 -0
  122. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/task_graph/model.py +0 -0
  123. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/__init__.py +0 -0
  124. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/cache_handler.py +0 -0
  125. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/composio_tool.py +0 -0
  126. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/composio_tool_vars.py +0 -0
  127. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/decorator.py +0 -0
  128. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/model.py +0 -0
  129. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/rag_tool.py +0 -0
  130. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq/tool/tool_handler.py +0 -0
  131. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq.egg-info/dependency_links.txt +0 -0
  132. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/src/versionhq.egg-info/top_level.txt +0 -0
  133. {versionhq-1.2.2.4/tests → versionhq-1.2.2.6/tests/_sample}/sample.json +0 -0
  134. {versionhq-1.2.2.4/tests → versionhq-1.2.2.6/tests/agent}/__init__.py +0 -0
  135. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/agent_network/Prompts/Demo_test.py +0 -0
  136. {versionhq-1.2.2.4/tests/agent → versionhq-1.2.2.6/tests/agent_network}/__init__.py +0 -0
  137. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/agent_network/agent_network_test.py +0 -0
  138. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/agent_network/doc_test.py +0 -0
  139. {versionhq-1.2.2.4/tests/agent_network → versionhq-1.2.2.6/tests/cli}/__init__.py +0 -0
  140. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/clients/customer_test.py +0 -0
  141. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/clients/product_test.py +0 -0
  142. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/clients/workflow_test.py +0 -0
  143. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/conftest.py +0 -0
  144. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/formation_test.py +0 -0
  145. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/knowledge/knowledge_test.py +0 -0
  146. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/knowledge/mock_report_compressed.pdf +0 -0
  147. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/memory/memory_test.py +0 -0
  148. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/task/doc_taskoutput_test.py +0 -0
  149. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/task/eval_test.py +0 -0
  150. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/task/task_test.py +0 -0
  151. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/task_graph/doc_test.py +0 -0
  152. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/task_graph/task_graph_test.py +0 -0
  153. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/tool/composio_test.py +0 -0
  154. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/tool/doc_test.py +0 -0
  155. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/tool/rag_tool_test.py +0 -0
  156. {versionhq-1.2.2.4 → versionhq-1.2.2.6}/tests/tool/tool_test.py +0 -0
@@ -16,3 +16,7 @@ COMPOSIO_API_KEY=
16
16
  COMPOSIO_CLI_KEY=
17
17
 
18
18
  MEM0_API_KEY=
19
+
20
+ AWS_ACCESS_KEY_ID=
21
+ AWS_SECRET_ACCESS_KEY=
22
+ AWS_REGION_NAME=
@@ -17,6 +17,9 @@ env:
17
17
  DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
18
18
  DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
19
19
  MEM0_API_KEY: ${{ secrets.MEM0_API_KEY }}
20
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
21
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
22
+ AWS_REGION_NAME: ${{ secrets.AWS_REGION_NAME }}
20
23
 
21
24
  jobs:
22
25
  run_test:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.2.4
3
+ Version: 1.2.2.6
4
4
  Summary: An agentic orchestration framework for building agent networks that handle task automation.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -66,6 +66,7 @@ Requires-Dist: envoy>=0.0.3
66
66
  Requires-Dist: composio-core==0.7.0
67
67
  Requires-Dist: networkx>=3.4.2
68
68
  Requires-Dist: matplotlib>=3.10.0
69
+ Requires-Dist: boto3>=1.37.1
69
70
  Provides-Extra: docling
70
71
  Requires-Dist: docling>=2.17.0; extra == "docling"
71
72
  Provides-Extra: mem0ai
@@ -141,7 +142,7 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
141
142
 
142
143
  `versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
143
144
 
144
- Agents are model-agnostic, and will improve task output, while oprimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
145
+ Agents are model-agnostic, and will improve task output, while optimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
145
146
 
146
147
 
147
148
  ### Agent Network
@@ -227,10 +228,7 @@ The following code snippet demonstrates agent customization:
227
228
  ```python
228
229
  import versionhq as vhq
229
230
 
230
- agent = vhq.Agent(
231
- role="Marketing Analyst",
232
- goal="my amazing goal"
233
- ) # assuming this agent was created during the network formation
231
+ agent = vhq.Agent(role="Marketing Analyst")
234
232
 
235
233
  # update the agent
236
234
  agent.update(
@@ -321,8 +319,8 @@ To create an agent network with one or more manager agents, designate members us
321
319
  ```python
322
320
  import versionhq as vhq
323
321
 
324
- agent_a = vhq.Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
325
- agent_b = vhq.Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-choice")
322
+ agent_a = vhq.Agent(role="agent a", llm="llm-of-your-choice")
323
+ agent_b = vhq.Agent(role="agent b", llm="llm-of-your-choice")
326
324
 
327
325
  task_1 = vhq.Task(
328
326
  description="Analyze the client's business model.",
@@ -55,7 +55,7 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
55
55
 
56
56
  `versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
57
57
 
58
- Agents are model-agnostic, and will improve task output, while oprimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
58
+ Agents are model-agnostic, and will improve task output, while optimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
59
59
 
60
60
 
61
61
  ### Agent Network
@@ -141,10 +141,7 @@ The following code snippet demonstrates agent customization:
141
141
  ```python
142
142
  import versionhq as vhq
143
143
 
144
- agent = vhq.Agent(
145
- role="Marketing Analyst",
146
- goal="my amazing goal"
147
- ) # assuming this agent was created during the network formation
144
+ agent = vhq.Agent(role="Marketing Analyst")
148
145
 
149
146
  # update the agent
150
147
  agent.update(
@@ -235,8 +232,8 @@ To create an agent network with one or more manager agents, designate members us
235
232
  ```python
236
233
  import versionhq as vhq
237
234
 
238
- agent_a = vhq.Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
239
- agent_b = vhq.Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-choice")
235
+ agent_a = vhq.Agent(role="agent a", llm="llm-of-your-choice")
236
+ agent_b = vhq.Agent(role="agent b", llm="llm-of-your-choice")
240
237
 
241
238
  task_1 = vhq.Task(
242
239
  description="Analyze the client's business model.",
@@ -11,11 +11,7 @@ By default, when the model provider name is provided, we will select the most co
11
11
  ```python
12
12
  import versionhq as vhq
13
13
 
14
- agent = vhq.Agent(
15
- role="Marketing Analyst",
16
- goal="Coping with price competition in saturated markets",
17
- llm="gemini-2.0"
18
- )
14
+ agent = vhq.Agent(role="Marketing Analyst", llm="gemini-2.0")
19
15
  ```
20
16
 
21
17
  <hr/>
@@ -34,7 +30,6 @@ import versionhq as vhq
34
30
 
35
31
  agent = vhq.Agent(
36
32
  role="Marketing Analyst",
37
- goal="Coping with increased price competition in saturated markets.",
38
33
  respect_context_window=False,
39
34
  max_execution_time=60,
40
35
  max_rpm=5,
@@ -48,10 +43,10 @@ agent = vhq.Agent(
48
43
  )
49
44
 
50
45
  assert isinstance(agent.llm, vhq.LLM)
51
- assert agent.llm.temperature == 1
52
- assert agent.llm.top_p == 0.1
53
- assert agent.llm.n == 1
54
- assert agent.llm.stop == "answer"
46
+ assert agent.llm.llm_config["temperature"] == 1
47
+ assert agent.llm.llm_config["top_p"] == 0.1
48
+ assert agent.llm.llm_config["n"] == 1
49
+ assert agent.llm.llm_config["stop"] == "answer"
55
50
  ```
56
51
 
57
52
  <hr>
@@ -7,7 +7,7 @@ tags:
7
7
 
8
8
  <class>`class` versionhq.agent.model.<bold>Agent<bold></class>
9
9
 
10
- A Pydantic class to store `Agent` objects and handle `Task` execution as well as `LLM` configuration.
10
+ A Pydantic class to store an `Agent` object that handles `Task` execution.
11
11
 
12
12
 
13
13
  ## Quick Start
@@ -0,0 +1,73 @@
1
+ ---
2
+ tags:
3
+ - Agent Network
4
+ ---
5
+
6
+ # LLM
7
+
8
+ <class>`class` versionhq.llm.model.<bold>LLM<bold></class>
9
+
10
+ A Pydantic class to store LLM objects and its task handling rules.
11
+
12
+ You can specify a model and integration platform from the list. Else, we'll use `gemini` or `gpt` via `LiteLLM` by default.
13
+
14
+
15
+ **List of available models**
16
+
17
+ ```python
18
+ "openai": [
19
+ "gpt-4",
20
+ "gpt-4o",
21
+ "gpt-4o-mini",
22
+ "o1-mini",
23
+ "o1-preview",
24
+ ],
25
+ "gemini": [
26
+ "gemini/gemini-1.5-flash",
27
+ "gemini/gemini-1.5-pro",
28
+ "gemini/gemini-2.0-flash-exp",
29
+ ],
30
+ "anthropic": [
31
+ "claude-3-7-sonnet-latest",
32
+ "claude-3-5-sonnet-20241022",
33
+ "claude-3-5-sonnet-20240620",
34
+ "claude-3-haiku-2024030",
35
+ "claude-3-opus-20240229",
36
+ "claude-3-haiku-20240307",
37
+ ],
38
+ "openrouter": [
39
+ "openrouter/deepseek/deepseek-r1",
40
+
41
+ "openrouter/qwen/qwen-2.5-72b-instruct",
42
+
43
+ "openrouter/google/gemini-2.0-flash-thinking-exp:free",
44
+ "openrouter/google/gemini-2.0-flash-thinking-exp-1219:free",
45
+ "openrouter/google/gemini-2.0-flash-001",
46
+
47
+ "openrouter/meta-llama/llama-3.3-70b-instruct",
48
+ "openrouter/mistralai/mistral-large-2411",
49
+ "openrouter/cohere/command-r-plus",
50
+ ],
51
+ "bedrock": [
52
+ "bedrock/converse/us.meta.llama3-3-70b-instruct-v1:0",
53
+ "bedrock/us.meta.llama3-2-1b-instruct-v1:0",
54
+ "bedrock/us.meta.llama3-2-3b-instruct-v1:0",
55
+ "bedrock/us.meta.llama3-2-11b-instruct-v1:0",
56
+
57
+ "bedrock/mistral.mistral-7b-instruct-v0:2",
58
+ "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
59
+ "bedrock/mistral.mistral-large-2407-v1:0",
60
+
61
+ "bedrock/amazon.titan-text-lite-v1",
62
+ "bedrock/amazon.titan-text-express-v1",
63
+ "bedrock/amazon.titan-text-premier-v1:0",
64
+
65
+ "bedrock/cohere.command-r-plus-v1:0",
66
+ "bedrock/cohere.command-r-v1:0",
67
+ "bedrock/cohere.command-text-v14",
68
+ "bedrock/cohere.command-light-text-v14",
69
+ ],
70
+ "huggingface": [
71
+ "huggingface/qwen/qwen2.5-VL-72B-Instruct",
72
+ ]
73
+ ```
@@ -165,7 +165,7 @@ import versionhq as vhq
165
165
  from pydantic import BaseModel
166
166
  from typing import Any
167
167
 
168
- # 1. Define and execute a sub task with Pydantic output.
168
+ # 1. Defines a sub task
169
169
  class Sub(BaseModel):
170
170
  sub1: list[dict[str, Any]]
171
171
  sub2: dict[str, Any]
@@ -176,27 +176,28 @@ sub_task = vhq.Task(
176
176
  )
177
177
  sub_res = sub_task.execute()
178
178
 
179
- # 2. Define a main task, callback function to format the final response.
179
+ # 2. Defines a main task with callbacks
180
180
  class Main(BaseModel):
181
- main1: list[Any] # <= assume expecting to store Sub object in this field.
182
- # error_main1: list[InstanceOf[Sub]] # as this will trigger 400 error!
181
+ main1: list[Any] # <= assume expecting to store Sub object.
183
182
  main2: dict[str, Any]
184
183
 
185
- def format_response(sub: InstanceOf[Sub], main1: list[Any], main2: dict[str, Any]) -> Main:
186
- main1.append(sub)
184
+ def format_response(sub, main1, main2) -> Main:
185
+ if main1:
186
+ main1.append(sub)
187
187
  main = Main(main1=main1, main2=main2)
188
188
  return main
189
189
 
190
- # 3. Execute
190
+ # 3. Executes
191
191
  main_task = vhq.Task(
192
- description="generate random values that strictly follows the given format",
192
+ description="generate random values that strictly follows the given format.",
193
193
  pydantic_output=Main,
194
194
  callback=format_response,
195
- callback_kwargs=dict(sub=Sub(sub1=sub_res.pydantic.sub1, sub2=sub_res.pydantic.sub2)),
195
+ callback_kwargs=dict(sub=sub_res.json_dict),
196
196
  )
197
- res = main_task.execute(context=sub_res.raw) # [Optional] Adding sub_task as a context.
197
+ res = main_task.execute(context=sub_res.raw) # [Optional] Adding sub_task's response as context.
198
198
 
199
- assert [item for item in res.callback_output.main1 if isinstance(item, Sub)]
199
+ assert res.callback_output.main1 is not None
200
+ assert res.callback_output.main2 is not None
200
201
  ```
201
202
 
202
203
  To automate these manual setups, refer to <a href="/core/agent-network">AgentNetwork</a> class.
@@ -25,7 +25,7 @@ A Python framework for agentic orchestration that handles complex task automatio
25
25
 
26
26
  `versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
27
27
 
28
- Agents are model-agnostic, and will improve task output, while oprimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
28
+ Agents are model-agnostic, and will improve task output, while optimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
29
29
 
30
30
 
31
31
  ### Agent Network
@@ -113,10 +113,7 @@ The following code snippet demonstrates agent customization:
113
113
  ```python
114
114
  import versionhq as vhq
115
115
 
116
- agent = vhq.Agent(
117
- role="Marketing Analyst",
118
- goal="my amazing goal"
119
- ) # assuming this agent was created during the network formation
116
+ agent = vhq.Agent(role="Marketing Analyst")
120
117
 
121
118
  # update the agent
122
119
  agent.update(
@@ -48,7 +48,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
48
48
  return f"""{message}: {test1}, {", ".join(test2)}"""
49
49
 
50
50
 
51
- agent = vhq.Agent(role="demo", goal="amazing project goal")
51
+ agent = vhq.Agent(role="demo manager")
52
52
 
53
53
  task = vhq.Task(
54
54
  description="Amazing task",
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.2.2.4"
18
+ version = "1.2.2.6"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "An agentic orchestration framework for building agent networks that handle task automation."
21
21
  readme = "README.md"
@@ -46,6 +46,7 @@ dependencies = [
46
46
  "composio-core==0.7.0",
47
47
  "networkx>=3.4.2",
48
48
  "matplotlib>=3.10.0",
49
+ "boto3>=1.37.1",
49
50
  ]
50
51
  classifiers = [
51
52
  "Programming Language :: Python",
@@ -32,7 +32,7 @@ from versionhq.agent_network.formation import form_agent_network
32
32
  from versionhq.task_graph.draft import workflow
33
33
 
34
34
 
35
- __version__ = "1.2.2.4"
35
+ __version__ = "1.2.2.6"
36
36
  __all__ = [
37
37
  "Agent",
38
38
 
@@ -38,7 +38,7 @@ vhq_formation_planner = Agent(
38
38
  "Solo is a formation where a single agent with tools, knowledge, and memory handles tasks indivudually. When self-learning mode is on - it will turn into Random formation. Typical usecase is an email agent drafts promo message for the given audience using their own knowledge.",
39
39
  "Supervising is a formation where the leader agent gives directions, while sharing its knowledge and memory with subbordinates.Subordinates can be solo agents or networks. Typical usecase is that the leader agent strategizes an outbound campaign plan and assigns components such as media mix or message creation to subordinate agents.",
40
40
  "Network is a formation where multple agents can share tasks, knowledge, and memory among network members without hierarchy. Typical usecase is that an email agent and social media agent share the product knowledge and deploy multi-channel outbound campaign. ",
41
- "Random is a formation where a single agent handles tasks, asking help from other agents without sharing its memory or knowledge. Typical usecase is that an email agent drafts promo message for the given audience, asking insights on tones from other email agents which oversee other customer clusters, or an agent calls the external, third party agent to deploy the campaign. ",
41
+ "Random is a formation where a single agent handles tasks, asking help from other agents without sharing its memory or knowledge. Typical usecase is that an email agent drafts promo message for the given audience, asking insights on tones from other email agents which oversee other customer clusters, or an agent calls the external, third party agent to deploy the campaign.",
42
42
  ]
43
43
  )
44
44
 
@@ -35,8 +35,8 @@ class Agent(BaseModel):
35
35
  config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
36
36
 
37
37
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
38
- role: str = Field(description="role of the agent - used in summary and logs")
39
- goal: str = Field(description="concise goal of the agent (details are set in the Task instance)")
38
+ role: str = Field(description="required. agent's role")
39
+ goal: Optional[str] = Field(default=None)
40
40
  backstory: Optional[str] = Field(default=None, description="developer prompt to the llm")
41
41
  skillsets: Optional[List[str]] = Field(default_factory=list)
42
42
  tools: Optional[List[Any]] = Field(default_factory=list)
@@ -68,10 +68,10 @@ class Agent(BaseModel):
68
68
  # llm settings cascaded to the LLM model
69
69
  llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
70
70
  func_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
71
- respect_context_window: bool = Field(default=True,description="keep messages under the context window size")
71
+ respect_context_window: bool = Field(default=True, description="keep messages under the context window size")
72
72
  max_execution_time: Optional[int] = Field(default=None, description="max. task execution time in seconds")
73
73
  max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute")
74
- llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM model")
74
+ llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM class")
75
75
 
76
76
  # # cache, error, ops handling
77
77
  # formatting_errors: int = Field(default=0, description="number of formatting errors.")
@@ -92,7 +92,7 @@ class Agent(BaseModel):
92
92
 
93
93
  @model_validator(mode="after")
94
94
  def validate_required_fields(self) -> Self:
95
- required_fields = ["role", "goal"]
95
+ required_fields = ["role",]
96
96
  for field in required_fields:
97
97
  if getattr(self, field) is None:
98
98
  raise ValueError(f"{field} must be provided either directly or through config")
@@ -172,7 +172,7 @@ class Agent(BaseModel):
172
172
  skills = ", ".join([item for item in self.skillsets]) if self.skillsets else ""
173
173
  tools = ", ".join([item.name for item in self.tools if hasattr(item, "name") and item.name is not None]) if self.tools else ""
174
174
  role = self.role.lower()
175
- goal = self.goal.lower()
175
+ goal = self.goal.lower() if self.goal else ""
176
176
 
177
177
  if self.tools or self.skillsets:
178
178
  backstory = BACKSTORY_FULL.format(role=role, goal=goal, skills=skills, tools=tools)
@@ -276,8 +276,8 @@ class Agent(BaseModel):
276
276
  return self._set_llm_params(llm=llm, config=self.llm_config)
277
277
 
278
278
  case str():
279
- llm_obj = LLM(model=llm)
280
- return self._set_llm_params(llm=llm_obj, config=self.llm_config)
279
+ llm = LLM(model=llm)
280
+ return self._set_llm_params(llm=llm, config=self.llm_config)
281
281
 
282
282
  case dict():
283
283
  model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
@@ -287,53 +287,21 @@ class Agent(BaseModel):
287
287
 
288
288
  case _:
289
289
  model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
290
- llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
290
+ llm = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
291
291
  llm_params = {
292
- "max_tokens": (getattr(llm, "max_tokens") or 3000),
293
292
  "timeout": getattr(llm, "timeout", self.max_execution_time),
294
293
  "callbacks": getattr(llm, "callbacks", None),
295
- "temperature": getattr(llm, "temperature", None),
296
- "logprobs": getattr(llm, "logprobs", None),
297
- "api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
294
+ "llm_config": getattr(llm, "llm_config", None),
298
295
  "base_url": getattr(llm, "base_url", None),
299
296
  }
300
297
  config = llm_params.update(self.llm_config) if self.llm_config else llm_params
301
- return self._set_llm_params(llm=llm_obj, config=config)
298
+ return self._set_llm_params(llm=llm, config=config)
302
299
 
303
300
 
304
301
  def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
305
302
  """
306
303
  Add valid params to the LLM object.
307
304
  """
308
-
309
- import litellm
310
- from versionhq.llm.llm_vars import PARAMS
311
-
312
- valid_config = {k: v for k, v in config.items() if v} if config else {}
313
-
314
- if valid_config:
315
- valid_keys = list()
316
- try:
317
- valid_keys = litellm.get_supported_openai_params(model=llm.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
318
- if not valid_keys:
319
- valid_keys = PARAMS.get("common")
320
- except:
321
- valid_keys = PARAMS.get("common")
322
-
323
- valid_keys += PARAMS.get("litellm")
324
-
325
- for key in valid_keys:
326
- if key in valid_config and valid_config[key]:
327
- val = valid_config[key]
328
- if [key == k for k, v in LLM.model_fields.items()]:
329
- setattr(llm, key, val)
330
- else:
331
- llm.other_valid_config.update({ key: val})
332
-
333
-
334
- llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
335
- # llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
336
-
337
305
  if llm.provider is None:
338
306
  provider_name = llm.model.split("/")[0]
339
307
  valid_provider = provider_name if provider_name in PROVIDERS else None
@@ -346,6 +314,12 @@ class Agent(BaseModel):
346
314
  if self.respect_context_window == False:
347
315
  llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
348
316
 
317
+ llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
318
+
319
+ if config:
320
+ llm.llm_config = {k: v for k, v in config.items() if v or v == False}
321
+ llm.setup_config()
322
+
349
323
  return llm
350
324
 
351
325
 
@@ -494,7 +468,7 @@ class Agent(BaseModel):
494
468
  Defines and executes a task when it is not given and returns TaskOutput object.
495
469
  """
496
470
 
497
- if not self.goal or not self.role:
471
+ if not self.role:
498
472
  return None
499
473
 
500
474
  from versionhq.task.model import Task
@@ -504,7 +478,7 @@ class Agent(BaseModel):
504
478
  steps: list[str]
505
479
 
506
480
  task = Task(
507
- description=f"Generate a simple result in a sentence to achieve the goal: {self.goal}. If needed, list up necessary steps in concise manner.",
481
+ description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
508
482
  pydantic_output=Output,
509
483
  tool_res_as_final=tool_res_as_final,
510
484
  )
@@ -595,7 +569,7 @@ class Agent(BaseModel):
595
569
 
596
570
 
597
571
  def __repr__(self):
598
- return f"Agent(role={self.role}, goal={self.goal}"
572
+ return f"Agent(role={self.role}, id={str(self.id)}"
599
573
 
600
574
  def __str__(self):
601
575
  return super().__str__()
@@ -6,30 +6,16 @@ PROVIDERS = [
6
6
  "openai",
7
7
  "gemini",
8
8
  "openrouter",
9
- "huggingface",
10
9
  "anthropic",
11
- "sagemaker",
12
10
  "bedrock",
13
- "ollama",
14
- "watson",
15
- "azure",
16
- "cerebras",
17
- "llama",
11
+ "bedrock/converse",
12
+ "huggingface",
18
13
  ]
19
14
 
20
15
  ENDPOINT_PROVIDERS = [
21
16
  "huggingface",
22
17
  ]
23
18
 
24
- """
25
- List of models available on the framework.
26
- Model names align with the LiteLLM's key names defined in the JSON URL.
27
- Provider names align with the custom provider or model provider names.
28
- -> model_key = custom_provider_name/model_name
29
-
30
- Option
31
- litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=1)
32
- """
33
19
 
34
20
  MODELS = {
35
21
  "openai": [
@@ -45,6 +31,7 @@ MODELS = {
45
31
  "gemini/gemini-2.0-flash-exp",
46
32
  ],
47
33
  "anthropic": [
34
+ "claude-3-7-sonnet-latest",
48
35
  "claude-3-5-sonnet-20241022",
49
36
  "claude-3-5-sonnet-20240620",
50
37
  "claude-3-haiku-2024030",
@@ -53,77 +40,52 @@ MODELS = {
53
40
  ],
54
41
  "openrouter": [
55
42
  "openrouter/deepseek/deepseek-r1",
43
+
56
44
  "openrouter/qwen/qwen-2.5-72b-instruct",
45
+
57
46
  "openrouter/google/gemini-2.0-flash-thinking-exp:free",
58
47
  "openrouter/google/gemini-2.0-flash-thinking-exp-1219:free",
59
48
  "openrouter/google/gemini-2.0-flash-001",
49
+
60
50
  "openrouter/meta-llama/llama-3.3-70b-instruct",
61
51
  "openrouter/mistralai/mistral-large-2411",
52
+ "openrouter/cohere/command-r-plus",
62
53
  ],
63
- "huggingface": [
64
- "huggingface/qwen/qwen2.5-VL-72B-Instruct",
65
- ],
66
- # "sagemaker": [
67
- # "sagemaker/huggingface-text2text-flan-t5-base",
68
- # "sagemaker/huggingface-llm-gemma-7b",
69
- # "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b",
70
- # "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b",
71
- # "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-8b",
72
- # "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-70b",
73
- # "sagemaker/huggingface-llm-mistral-7b"
74
- # ], #https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-latest.html
75
- "ollama": [
76
- "ollama/llama3.1",
77
- "ollama/mixtral",
78
- "ollama/mixtral-8x22B-Instruct-v0.1",
79
- ],
80
- # "watson": [
81
- # "watsonx/meta-llama/llama-3-1-70b-instruct",
82
- # "watsonx/meta-llama/llama-3-1-8b-instruct",
83
- # "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
84
- # "watsonx/meta-llama/llama-3-2-1b-instruct",
85
- # "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
86
- # "watsonx/meta-llama/llama-3-405b-instruct",
87
- # "watsonx/mistral/mistral-large",
88
- # "watsonx/ibm/granite-3-8b-instruct",
89
- # ],
90
54
  "bedrock": [
91
- "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
92
- "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
93
- "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
94
- "bedrock/anthropic.claude-3-opus-20240229-v1:0",
95
- "bedrock/anthropic.claude-v2",
96
- "bedrock/anthropic.claude-instant-v1",
97
- "bedrock/meta.llama3-1-405b-instruct-v1:0",
98
- "bedrock/meta.llama3-1-70b-instruct-v1:0",
99
- "bedrock/meta.llama3-1-8b-instruct-v1:0",
100
- "bedrock/meta.llama3-70b-instruct-v1:0",
101
- "bedrock/meta.llama3-8b-instruct-v1:0",
55
+ "bedrock/converse/us.meta.llama3-3-70b-instruct-v1:0",
56
+ "bedrock/us.meta.llama3-2-1b-instruct-v1:0",
57
+ "bedrock/us.meta.llama3-2-3b-instruct-v1:0",
58
+ "bedrock/us.meta.llama3-2-11b-instruct-v1:0",
59
+
60
+ "bedrock/mistral.mistral-7b-instruct-v0:2",
61
+ "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
62
+ "bedrock/mistral.mistral-large-2407-v1:0",
63
+
102
64
  "bedrock/amazon.titan-text-lite-v1",
103
65
  "bedrock/amazon.titan-text-express-v1",
66
+ "bedrock/amazon.titan-text-premier-v1:0",
67
+
68
+ "bedrock/cohere.command-r-plus-v1:0",
69
+ "bedrock/cohere.command-r-v1:0",
104
70
  "bedrock/cohere.command-text-v14",
105
- "bedrock/ai21.j2-mid-v1",
106
- "bedrock/ai21.j2-ultra-v1",
107
- "bedrock/ai21.jamba-instruct-v1:0",
108
- "bedrock/meta.llama2-13b-chat-v1",
109
- "bedrock/meta.llama2-70b-chat-v1",
110
- "bedrock/mistral.mistral-7b-instruct-v0:2",
111
- "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
71
+ "bedrock/cohere.command-light-text-v14",
72
+ ],
73
+ "huggingface": [
74
+ "huggingface/qwen/qwen2.5-VL-72B-Instruct",
112
75
  ],
113
76
  }
114
77
 
115
78
 
116
-
117
- KEYS = {
79
+ ENV_VARS = {
118
80
  "openai": ["OPENAI_API_KEY"],
119
81
  "gemini": ["GEMINI_API_KEY"],
120
82
  "anthropic": ["ANTHROPIC_API_KEY"],
121
83
  "huggingface": ["HUGGINGFACE_API_KEY", ],
122
- "sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
84
+ "bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
85
+ "sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
123
86
  }
124
87
 
125
88
 
126
-
127
89
  """
128
90
  Max input token size by the model.
129
91
  """
@@ -193,8 +155,8 @@ PARAMS = {
193
155
  "response_format",
194
156
  "n",
195
157
  "stop",
196
- "base_url",
197
- "api_key",
158
+ # "base_url",
159
+ # "api_key",
198
160
  ],
199
161
  "openai": [
200
162
  "timeout",
@@ -216,7 +178,10 @@ PARAMS = {
216
178
  ],
217
179
  "gemini": [
218
180
  "topK",
219
- ]
181
+ ],
182
+ "bedrock": {
183
+ "top-k",
184
+ }
220
185
  }
221
186
 
222
187