versionhq 1.2.2.5__tar.gz → 1.2.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.env.sample +4 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.github/workflows/run_tests.yml +3 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/PKG-INFO +3 -2
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/README.md +1 -1
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/agent/config.md +5 -10
- versionhq-1.2.2.6/docs/core/llm/index.md +73 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task/task-strc-response.md +12 -11
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/index.md +1 -1
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/pyproject.toml +2 -1
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/__init__.py +1 -1
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent/model.py +15 -41
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/llm/llm_vars.py +33 -68
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/llm/model.py +62 -45
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task/model.py +3 -3
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq.egg-info/PKG-INFO +3 -2
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq.egg-info/SOURCES.txt +3 -2
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq.egg-info/requires.txt +1 -0
- versionhq-1.2.2.6/tests/__init__.py +39 -0
- versionhq-1.2.2.6/tests/_sample/sample.csv +241 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/agent/agent_test.py +29 -39
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/agent/doc_test.py +6 -23
- {versionhq-1.2.2.5/tests/cli → versionhq-1.2.2.6/tests/knowledge}/__init__.py +0 -0
- {versionhq-1.2.2.5/tests/knowledge → versionhq-1.2.2.6/tests/llm}/__init__.py +0 -0
- versionhq-1.2.2.6/tests/llm/llm_connection_test.py +66 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/llm/llm_test.py +8 -0
- {versionhq-1.2.2.5/tests/llm → versionhq-1.2.2.6/tests/memory}/__init__.py +0 -0
- {versionhq-1.2.2.5/tests/memory → versionhq-1.2.2.6/tests/task}/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/task/doc_test.py +14 -59
- {versionhq-1.2.2.5/tests/task → versionhq-1.2.2.6/tests/task_graph}/__init__.py +0 -0
- {versionhq-1.2.2.5/tests/task_graph → versionhq-1.2.2.6/tests/tool}/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/usecase_test.py +16 -10
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/uv.lock +86 -34
- versionhq-1.2.2.5/docs/core/llm/index.md +0 -103
- versionhq-1.2.2.5/tests/task/llm_connection_test.py +0 -106
- versionhq-1.2.2.5/tests/tool/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.github/workflows/deploy_docs.yml +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.github/workflows/publish.yml +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.gitignore +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.pre-commit-config.yaml +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/.python-version +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/LICENSE +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/SECURITY.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/db/preprocess.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/CNAME +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/_logos/favicon.ico +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/_logos/logo192.png +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/agent/index.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/agent/task-handling.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/agent-network/config.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/agent-network/form.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/agent-network/index.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/agent-network/ref.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task/evaluation.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task/index.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task/response-field.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task/task-execution.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task/task-output.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task/task-ref.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/task-graph/index.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/core/tool.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/quickstart.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/stylesheets/main.css +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/docs/tags.md +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/mkdocs.yml +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/requirements-dev.txt +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/requirements.txt +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/runtime.txt +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/setup.cfg +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent/inhouse_agents.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent_network/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent_network/formation.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/agent_network/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/knowledge/source_docling.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task/evaluation.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task_graph/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task_graph/colors.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task_graph/draft.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/task_graph/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/rag_tool.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.2.2.5/tests → versionhq-1.2.2.6/tests/_sample}/sample.json +0 -0
- {versionhq-1.2.2.5/tests → versionhq-1.2.2.6/tests/agent}/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/agent_network/Prompts/Demo_test.py +0 -0
- {versionhq-1.2.2.5/tests/agent → versionhq-1.2.2.6/tests/agent_network}/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/agent_network/agent_network_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/agent_network/doc_test.py +0 -0
- {versionhq-1.2.2.5/tests/agent_network → versionhq-1.2.2.6/tests/cli}/__init__.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/clients/customer_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/clients/product_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/conftest.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/doc_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/formation_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/memory/memory_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/task/doc_taskoutput_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/task/eval_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/task/task_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/task_graph/doc_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/task_graph/task_graph_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/tool/composio_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/tool/doc_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/tool/rag_tool_test.py +0 -0
- {versionhq-1.2.2.5 → versionhq-1.2.2.6}/tests/tool/tool_test.py +0 -0
@@ -17,6 +17,9 @@ env:
|
|
17
17
|
DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
|
18
18
|
DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
|
19
19
|
MEM0_API_KEY: ${{ secrets.MEM0_API_KEY }}
|
20
|
+
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
21
|
+
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
22
|
+
AWS_REGION_NAME: ${{ secrets.AWS_REGION_NAME }}
|
20
23
|
|
21
24
|
jobs:
|
22
25
|
run_test:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.2.
|
3
|
+
Version: 1.2.2.6
|
4
4
|
Summary: An agentic orchestration framework for building agent networks that handle task automation.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -66,6 +66,7 @@ Requires-Dist: envoy>=0.0.3
|
|
66
66
|
Requires-Dist: composio-core==0.7.0
|
67
67
|
Requires-Dist: networkx>=3.4.2
|
68
68
|
Requires-Dist: matplotlib>=3.10.0
|
69
|
+
Requires-Dist: boto3>=1.37.1
|
69
70
|
Provides-Extra: docling
|
70
71
|
Requires-Dist: docling>=2.17.0; extra == "docling"
|
71
72
|
Provides-Extra: mem0ai
|
@@ -141,7 +142,7 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
|
|
141
142
|
|
142
143
|
`versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
|
143
144
|
|
144
|
-
Agents are model-agnostic, and will improve task output, while
|
145
|
+
Agents are model-agnostic, and will improve task output, while optimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
|
145
146
|
|
146
147
|
|
147
148
|
### Agent Network
|
@@ -55,7 +55,7 @@ Agentic orchestration framework for multi-agent networks and task graphs for com
|
|
55
55
|
|
56
56
|
`versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
|
57
57
|
|
58
|
-
Agents are model-agnostic, and will improve task output, while
|
58
|
+
Agents are model-agnostic, and will improve task output, while optimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
|
59
59
|
|
60
60
|
|
61
61
|
### Agent Network
|
@@ -11,11 +11,7 @@ By default, when the model provider name is provided, we will select the most co
|
|
11
11
|
```python
|
12
12
|
import versionhq as vhq
|
13
13
|
|
14
|
-
agent = vhq.Agent(
|
15
|
-
role="Marketing Analyst",
|
16
|
-
goal="Coping with price competition in saturated markets",
|
17
|
-
llm="gemini-2.0"
|
18
|
-
)
|
14
|
+
agent = vhq.Agent(role="Marketing Analyst", llm="gemini-2.0")
|
19
15
|
```
|
20
16
|
|
21
17
|
<hr/>
|
@@ -34,7 +30,6 @@ import versionhq as vhq
|
|
34
30
|
|
35
31
|
agent = vhq.Agent(
|
36
32
|
role="Marketing Analyst",
|
37
|
-
goal="Coping with increased price competition in saturated markets.",
|
38
33
|
respect_context_window=False,
|
39
34
|
max_execution_time=60,
|
40
35
|
max_rpm=5,
|
@@ -48,10 +43,10 @@ agent = vhq.Agent(
|
|
48
43
|
)
|
49
44
|
|
50
45
|
assert isinstance(agent.llm, vhq.LLM)
|
51
|
-
assert agent.llm.temperature == 1
|
52
|
-
assert agent.llm.top_p == 0.1
|
53
|
-
assert agent.llm.n == 1
|
54
|
-
assert agent.llm.stop == "answer"
|
46
|
+
assert agent.llm.llm_config["temperature"] == 1
|
47
|
+
assert agent.llm.llm_config["top_p"] == 0.1
|
48
|
+
assert agent.llm.llm_config["n"] == 1
|
49
|
+
assert agent.llm.llm_config["stop"] == "answer"
|
55
50
|
```
|
56
51
|
|
57
52
|
<hr>
|
@@ -0,0 +1,73 @@
|
|
1
|
+
---
|
2
|
+
tags:
|
3
|
+
- Agent Network
|
4
|
+
---
|
5
|
+
|
6
|
+
# LLM
|
7
|
+
|
8
|
+
<class>`class` versionhq.llm.model.<bold>LLM<bold></class>
|
9
|
+
|
10
|
+
A Pydantic class to store LLM objects and its task handling rules.
|
11
|
+
|
12
|
+
You can specify a model and integration platform from the list. Else, we'll use `gemini` or `gpt` via `LiteLLM` by default.
|
13
|
+
|
14
|
+
|
15
|
+
**List of available models**
|
16
|
+
|
17
|
+
```python
|
18
|
+
"openai": [
|
19
|
+
"gpt-4",
|
20
|
+
"gpt-4o",
|
21
|
+
"gpt-4o-mini",
|
22
|
+
"o1-mini",
|
23
|
+
"o1-preview",
|
24
|
+
],
|
25
|
+
"gemini": [
|
26
|
+
"gemini/gemini-1.5-flash",
|
27
|
+
"gemini/gemini-1.5-pro",
|
28
|
+
"gemini/gemini-2.0-flash-exp",
|
29
|
+
],
|
30
|
+
"anthropic": [
|
31
|
+
"claude-3-7-sonnet-latest",
|
32
|
+
"claude-3-5-sonnet-20241022",
|
33
|
+
"claude-3-5-sonnet-20240620",
|
34
|
+
"claude-3-haiku-2024030",
|
35
|
+
"claude-3-opus-20240229",
|
36
|
+
"claude-3-haiku-20240307",
|
37
|
+
],
|
38
|
+
"openrouter": [
|
39
|
+
"openrouter/deepseek/deepseek-r1",
|
40
|
+
|
41
|
+
"openrouter/qwen/qwen-2.5-72b-instruct",
|
42
|
+
|
43
|
+
"openrouter/google/gemini-2.0-flash-thinking-exp:free",
|
44
|
+
"openrouter/google/gemini-2.0-flash-thinking-exp-1219:free",
|
45
|
+
"openrouter/google/gemini-2.0-flash-001",
|
46
|
+
|
47
|
+
"openrouter/meta-llama/llama-3.3-70b-instruct",
|
48
|
+
"openrouter/mistralai/mistral-large-2411",
|
49
|
+
"openrouter/cohere/command-r-plus",
|
50
|
+
],
|
51
|
+
"bedrock": [
|
52
|
+
"bedrock/converse/us.meta.llama3-3-70b-instruct-v1:0",
|
53
|
+
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
54
|
+
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
55
|
+
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
56
|
+
|
57
|
+
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
58
|
+
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
59
|
+
"bedrock/mistral.mistral-large-2407-v1:0",
|
60
|
+
|
61
|
+
"bedrock/amazon.titan-text-lite-v1",
|
62
|
+
"bedrock/amazon.titan-text-express-v1",
|
63
|
+
"bedrock/amazon.titan-text-premier-v1:0",
|
64
|
+
|
65
|
+
"bedrock/cohere.command-r-plus-v1:0",
|
66
|
+
"bedrock/cohere.command-r-v1:0",
|
67
|
+
"bedrock/cohere.command-text-v14",
|
68
|
+
"bedrock/cohere.command-light-text-v14",
|
69
|
+
],
|
70
|
+
"huggingface": [
|
71
|
+
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
72
|
+
]
|
73
|
+
```
|
@@ -165,7 +165,7 @@ import versionhq as vhq
|
|
165
165
|
from pydantic import BaseModel
|
166
166
|
from typing import Any
|
167
167
|
|
168
|
-
# 1.
|
168
|
+
# 1. Defines a sub task
|
169
169
|
class Sub(BaseModel):
|
170
170
|
sub1: list[dict[str, Any]]
|
171
171
|
sub2: dict[str, Any]
|
@@ -176,27 +176,28 @@ sub_task = vhq.Task(
|
|
176
176
|
)
|
177
177
|
sub_res = sub_task.execute()
|
178
178
|
|
179
|
-
# 2.
|
179
|
+
# 2. Defines a main task with callbacks
|
180
180
|
class Main(BaseModel):
|
181
|
-
main1: list[Any] # <= assume expecting to store Sub object
|
182
|
-
# error_main1: list[InstanceOf[Sub]] # as this will trigger 400 error!
|
181
|
+
main1: list[Any] # <= assume expecting to store Sub object.
|
183
182
|
main2: dict[str, Any]
|
184
183
|
|
185
|
-
def format_response(sub
|
186
|
-
main1
|
184
|
+
def format_response(sub, main1, main2) -> Main:
|
185
|
+
if main1:
|
186
|
+
main1.append(sub)
|
187
187
|
main = Main(main1=main1, main2=main2)
|
188
188
|
return main
|
189
189
|
|
190
|
-
# 3.
|
190
|
+
# 3. Executes
|
191
191
|
main_task = vhq.Task(
|
192
|
-
description="generate random values that strictly follows the given format",
|
192
|
+
description="generate random values that strictly follows the given format.",
|
193
193
|
pydantic_output=Main,
|
194
194
|
callback=format_response,
|
195
|
-
callback_kwargs=dict(sub=
|
195
|
+
callback_kwargs=dict(sub=sub_res.json_dict),
|
196
196
|
)
|
197
|
-
res = main_task.execute(context=sub_res.raw) # [Optional] Adding sub_task as
|
197
|
+
res = main_task.execute(context=sub_res.raw) # [Optional] Adding sub_task's response as context.
|
198
198
|
|
199
|
-
assert
|
199
|
+
assert res.callback_output.main1 is not None
|
200
|
+
assert res.callback_output.main2 is not None
|
200
201
|
```
|
201
202
|
|
202
203
|
To automate these manual setups, refer to <a href="/core/agent-network">AgentNetwork</a> class.
|
@@ -25,7 +25,7 @@ A Python framework for agentic orchestration that handles complex task automatio
|
|
25
25
|
|
26
26
|
`versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
|
27
27
|
|
28
|
-
Agents are model-agnostic, and will improve task output, while
|
28
|
+
Agents are model-agnostic, and will improve task output, while optimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
|
29
29
|
|
30
30
|
|
31
31
|
### Agent Network
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.2.2.
|
18
|
+
version = "1.2.2.6"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "An agentic orchestration framework for building agent networks that handle task automation."
|
21
21
|
readme = "README.md"
|
@@ -46,6 +46,7 @@ dependencies = [
|
|
46
46
|
"composio-core==0.7.0",
|
47
47
|
"networkx>=3.4.2",
|
48
48
|
"matplotlib>=3.10.0",
|
49
|
+
"boto3>=1.37.1",
|
49
50
|
]
|
50
51
|
classifiers = [
|
51
52
|
"Programming Language :: Python",
|
@@ -68,10 +68,10 @@ class Agent(BaseModel):
|
|
68
68
|
# llm settings cascaded to the LLM model
|
69
69
|
llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
70
70
|
func_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
71
|
-
respect_context_window: bool = Field(default=True,description="keep messages under the context window size")
|
71
|
+
respect_context_window: bool = Field(default=True, description="keep messages under the context window size")
|
72
72
|
max_execution_time: Optional[int] = Field(default=None, description="max. task execution time in seconds")
|
73
73
|
max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute")
|
74
|
-
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM
|
74
|
+
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM class")
|
75
75
|
|
76
76
|
# # cache, error, ops handling
|
77
77
|
# formatting_errors: int = Field(default=0, description="number of formatting errors.")
|
@@ -276,8 +276,8 @@ class Agent(BaseModel):
|
|
276
276
|
return self._set_llm_params(llm=llm, config=self.llm_config)
|
277
277
|
|
278
278
|
case str():
|
279
|
-
|
280
|
-
return self._set_llm_params(llm=
|
279
|
+
llm = LLM(model=llm)
|
280
|
+
return self._set_llm_params(llm=llm, config=self.llm_config)
|
281
281
|
|
282
282
|
case dict():
|
283
283
|
model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
|
@@ -287,53 +287,21 @@ class Agent(BaseModel):
|
|
287
287
|
|
288
288
|
case _:
|
289
289
|
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
290
|
-
|
290
|
+
llm = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
291
291
|
llm_params = {
|
292
|
-
"max_tokens": (getattr(llm, "max_tokens") or 3000),
|
293
292
|
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
294
293
|
"callbacks": getattr(llm, "callbacks", None),
|
295
|
-
"
|
296
|
-
"logprobs": getattr(llm, "logprobs", None),
|
297
|
-
"api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
294
|
+
"llm_config": getattr(llm, "llm_config", None),
|
298
295
|
"base_url": getattr(llm, "base_url", None),
|
299
296
|
}
|
300
297
|
config = llm_params.update(self.llm_config) if self.llm_config else llm_params
|
301
|
-
return self._set_llm_params(llm=
|
298
|
+
return self._set_llm_params(llm=llm, config=config)
|
302
299
|
|
303
300
|
|
304
301
|
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
305
302
|
"""
|
306
303
|
Add valid params to the LLM object.
|
307
304
|
"""
|
308
|
-
|
309
|
-
import litellm
|
310
|
-
from versionhq.llm.llm_vars import PARAMS
|
311
|
-
|
312
|
-
valid_config = {k: v for k, v in config.items() if v} if config else {}
|
313
|
-
|
314
|
-
if valid_config:
|
315
|
-
valid_keys = list()
|
316
|
-
try:
|
317
|
-
valid_keys = litellm.get_supported_openai_params(model=llm.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
|
318
|
-
if not valid_keys:
|
319
|
-
valid_keys = PARAMS.get("common")
|
320
|
-
except:
|
321
|
-
valid_keys = PARAMS.get("common")
|
322
|
-
|
323
|
-
valid_keys += PARAMS.get("litellm")
|
324
|
-
|
325
|
-
for key in valid_keys:
|
326
|
-
if key in valid_config and valid_config[key]:
|
327
|
-
val = valid_config[key]
|
328
|
-
if [key == k for k, v in LLM.model_fields.items()]:
|
329
|
-
setattr(llm, key, val)
|
330
|
-
else:
|
331
|
-
llm.other_valid_config.update({ key: val})
|
332
|
-
|
333
|
-
|
334
|
-
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
335
|
-
# llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
336
|
-
|
337
305
|
if llm.provider is None:
|
338
306
|
provider_name = llm.model.split("/")[0]
|
339
307
|
valid_provider = provider_name if provider_name in PROVIDERS else None
|
@@ -346,6 +314,12 @@ class Agent(BaseModel):
|
|
346
314
|
if self.respect_context_window == False:
|
347
315
|
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
348
316
|
|
317
|
+
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
318
|
+
|
319
|
+
if config:
|
320
|
+
llm.llm_config = {k: v for k, v in config.items() if v or v == False}
|
321
|
+
llm.setup_config()
|
322
|
+
|
349
323
|
return llm
|
350
324
|
|
351
325
|
|
@@ -494,7 +468,7 @@ class Agent(BaseModel):
|
|
494
468
|
Defines and executes a task when it is not given and returns TaskOutput object.
|
495
469
|
"""
|
496
470
|
|
497
|
-
if not self.role
|
471
|
+
if not self.role:
|
498
472
|
return None
|
499
473
|
|
500
474
|
from versionhq.task.model import Task
|
@@ -504,7 +478,7 @@ class Agent(BaseModel):
|
|
504
478
|
steps: list[str]
|
505
479
|
|
506
480
|
task = Task(
|
507
|
-
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal}. If needed, list up necessary steps in concise manner.",
|
481
|
+
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
|
508
482
|
pydantic_output=Output,
|
509
483
|
tool_res_as_final=tool_res_as_final,
|
510
484
|
)
|
@@ -6,30 +6,16 @@ PROVIDERS = [
|
|
6
6
|
"openai",
|
7
7
|
"gemini",
|
8
8
|
"openrouter",
|
9
|
-
"huggingface",
|
10
9
|
"anthropic",
|
11
|
-
"sagemaker",
|
12
10
|
"bedrock",
|
13
|
-
"
|
14
|
-
"
|
15
|
-
"azure",
|
16
|
-
"cerebras",
|
17
|
-
"llama",
|
11
|
+
"bedrock/converse",
|
12
|
+
"huggingface",
|
18
13
|
]
|
19
14
|
|
20
15
|
ENDPOINT_PROVIDERS = [
|
21
16
|
"huggingface",
|
22
17
|
]
|
23
18
|
|
24
|
-
"""
|
25
|
-
List of models available on the framework.
|
26
|
-
Model names align with the LiteLLM's key names defined in the JSON URL.
|
27
|
-
Provider names align with the custom provider or model provider names.
|
28
|
-
-> model_key = custom_provider_name/model_name
|
29
|
-
|
30
|
-
Option
|
31
|
-
litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=1)
|
32
|
-
"""
|
33
19
|
|
34
20
|
MODELS = {
|
35
21
|
"openai": [
|
@@ -45,6 +31,7 @@ MODELS = {
|
|
45
31
|
"gemini/gemini-2.0-flash-exp",
|
46
32
|
],
|
47
33
|
"anthropic": [
|
34
|
+
"claude-3-7-sonnet-latest",
|
48
35
|
"claude-3-5-sonnet-20241022",
|
49
36
|
"claude-3-5-sonnet-20240620",
|
50
37
|
"claude-3-haiku-2024030",
|
@@ -53,77 +40,52 @@ MODELS = {
|
|
53
40
|
],
|
54
41
|
"openrouter": [
|
55
42
|
"openrouter/deepseek/deepseek-r1",
|
43
|
+
|
56
44
|
"openrouter/qwen/qwen-2.5-72b-instruct",
|
45
|
+
|
57
46
|
"openrouter/google/gemini-2.0-flash-thinking-exp:free",
|
58
47
|
"openrouter/google/gemini-2.0-flash-thinking-exp-1219:free",
|
59
48
|
"openrouter/google/gemini-2.0-flash-001",
|
49
|
+
|
60
50
|
"openrouter/meta-llama/llama-3.3-70b-instruct",
|
61
51
|
"openrouter/mistralai/mistral-large-2411",
|
52
|
+
"openrouter/cohere/command-r-plus",
|
62
53
|
],
|
63
|
-
"huggingface": [
|
64
|
-
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
65
|
-
],
|
66
|
-
# "sagemaker": [
|
67
|
-
# "sagemaker/huggingface-text2text-flan-t5-base",
|
68
|
-
# "sagemaker/huggingface-llm-gemma-7b",
|
69
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b",
|
70
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b",
|
71
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-8b",
|
72
|
-
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-70b",
|
73
|
-
# "sagemaker/huggingface-llm-mistral-7b"
|
74
|
-
# ], #https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-latest.html
|
75
|
-
"ollama": [
|
76
|
-
"ollama/llama3.1",
|
77
|
-
"ollama/mixtral",
|
78
|
-
"ollama/mixtral-8x22B-Instruct-v0.1",
|
79
|
-
],
|
80
|
-
# "watson": [
|
81
|
-
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
82
|
-
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
83
|
-
# "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
84
|
-
# "watsonx/meta-llama/llama-3-2-1b-instruct",
|
85
|
-
# "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
86
|
-
# "watsonx/meta-llama/llama-3-405b-instruct",
|
87
|
-
# "watsonx/mistral/mistral-large",
|
88
|
-
# "watsonx/ibm/granite-3-8b-instruct",
|
89
|
-
# ],
|
90
54
|
"bedrock": [
|
91
|
-
"bedrock/
|
92
|
-
"bedrock/
|
93
|
-
"bedrock/
|
94
|
-
"bedrock/
|
95
|
-
|
96
|
-
"bedrock/
|
97
|
-
"bedrock/
|
98
|
-
"bedrock/
|
99
|
-
|
100
|
-
"bedrock/meta.llama3-70b-instruct-v1:0",
|
101
|
-
"bedrock/meta.llama3-8b-instruct-v1:0",
|
55
|
+
"bedrock/converse/us.meta.llama3-3-70b-instruct-v1:0",
|
56
|
+
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
57
|
+
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
58
|
+
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
59
|
+
|
60
|
+
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
61
|
+
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
62
|
+
"bedrock/mistral.mistral-large-2407-v1:0",
|
63
|
+
|
102
64
|
"bedrock/amazon.titan-text-lite-v1",
|
103
65
|
"bedrock/amazon.titan-text-express-v1",
|
66
|
+
"bedrock/amazon.titan-text-premier-v1:0",
|
67
|
+
|
68
|
+
"bedrock/cohere.command-r-plus-v1:0",
|
69
|
+
"bedrock/cohere.command-r-v1:0",
|
104
70
|
"bedrock/cohere.command-text-v14",
|
105
|
-
"bedrock/
|
106
|
-
|
107
|
-
|
108
|
-
"
|
109
|
-
"bedrock/meta.llama2-70b-chat-v1",
|
110
|
-
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
111
|
-
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
71
|
+
"bedrock/cohere.command-light-text-v14",
|
72
|
+
],
|
73
|
+
"huggingface": [
|
74
|
+
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
112
75
|
],
|
113
76
|
}
|
114
77
|
|
115
78
|
|
116
|
-
|
117
|
-
KEYS = {
|
79
|
+
ENV_VARS = {
|
118
80
|
"openai": ["OPENAI_API_KEY"],
|
119
81
|
"gemini": ["GEMINI_API_KEY"],
|
120
82
|
"anthropic": ["ANTHROPIC_API_KEY"],
|
121
83
|
"huggingface": ["HUGGINGFACE_API_KEY", ],
|
122
|
-
"
|
84
|
+
"bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
85
|
+
"sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
123
86
|
}
|
124
87
|
|
125
88
|
|
126
|
-
|
127
89
|
"""
|
128
90
|
Max input token size by the model.
|
129
91
|
"""
|
@@ -193,8 +155,8 @@ PARAMS = {
|
|
193
155
|
"response_format",
|
194
156
|
"n",
|
195
157
|
"stop",
|
196
|
-
"base_url",
|
197
|
-
"api_key",
|
158
|
+
# "base_url",
|
159
|
+
# "api_key",
|
198
160
|
],
|
199
161
|
"openai": [
|
200
162
|
"timeout",
|
@@ -216,7 +178,10 @@ PARAMS = {
|
|
216
178
|
],
|
217
179
|
"gemini": [
|
218
180
|
"topK",
|
219
|
-
]
|
181
|
+
],
|
182
|
+
"bedrock": {
|
183
|
+
"top-k",
|
184
|
+
}
|
220
185
|
}
|
221
186
|
|
222
187
|
|