versionhq 1.2.4.3__tar.gz → 1.2.4.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/PKG-INFO +4 -4
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/README.md +3 -3
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/llm/index.md +5 -3
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task/evaluation.md +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task/reference.md +25 -26
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task/task-output.md +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task/task-strc-response.md +11 -15
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/quickstart.md +3 -3
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/pyproject.toml +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/__init__.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_prompt/auto_feedback.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_prompt/model.py +13 -21
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent/inhouse_agents.py +2 -2
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent/model.py +12 -5
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent_network/formation.py +3 -3
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent_network/model.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/llm/llm_vars.py +7 -4
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/llm/model.py +3 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task/model.py +41 -42
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task_graph/draft.py +4 -4
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task_graph/model.py +4 -4
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq.egg-info/PKG-INFO +4 -4
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/_prompt/auto_feedback_test.py +2 -2
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/_prompt/prompt_test.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/agent/agent_test.py +10 -10
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/agent/doc_test.py +3 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/agent_network/agent_network_test.py +15 -15
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/agent_network/doc_test.py +3 -2
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/doc_test.py +3 -3
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/llm/llm_connection_test.py +36 -4
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/llm/llm_test.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task/doc_eval_test.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task/doc_taskoutput_test.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task/doc_test.py +7 -6
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task/eval_test.py +2 -2
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task/task_test.py +3 -3
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task_graph/doc_test.py +1 -1
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/usecase_test.py +4 -2
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/uv.lock +22 -22
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.env.sample +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.github/workflows/deploy_docs.yml +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.github/workflows/publish.yml +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.github/workflows/run_tests.yml +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.gitignore +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.pre-commit-config.yaml +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/.python-version +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/LICENSE +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/SECURITY.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/db/preprocess.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/CNAME +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/_logos/favicon.ico +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/_logos/logo192.png +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/agent/config.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/agent/index.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/agent/task-handling.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/agent-network/config.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/agent-network/form.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/agent-network/index.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/agent-network/ref.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/knowledge.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/memory.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/rag-tool.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task/index.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task/response-field.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task/task-execution.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/task-graph/index.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/core/tool.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/index.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/stylesheets/main.css +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/docs/tags.md +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/mkdocs.yml +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/requirements-dev.txt +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/requirements.txt +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/runtime.txt +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/setup.cfg +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_prompt/constants.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/is_valid_url.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/knowledge/source_docling.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task/evaluation.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/task_graph/colors.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/rag_tool.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq.egg-info/SOURCES.txt +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/_sample/sample.csv +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/_sample/sample.json +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/_sample/sample.mp3 +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/_sample/screenshot.png +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/agent/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/agent_network/Prompts/Demo_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/cli/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/clients/customer_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/clients/product_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/conftest.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/formation_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/llm/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/memory/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/memory/memory_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/task_graph/task_graph_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/tool/__init__.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/tool/composio_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/tool/doc_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/tool/rag_tool_test.py +0 -0
- {versionhq-1.2.4.3 → versionhq-1.2.4.5}/tests/tool/tool_test.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.4.
|
3
|
+
Version: 1.2.4.5
|
4
4
|
Summary: Autonomous agent networks for task automation with multi-step reasoning.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -295,7 +295,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
|
|
295
295
|
|
296
296
|
task = vhq.Task(
|
297
297
|
description="Amazing task",
|
298
|
-
|
298
|
+
response_schema=CustomOutput,
|
299
299
|
callback=dummy_func,
|
300
300
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
301
301
|
)
|
@@ -317,13 +317,13 @@ agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
|
|
317
317
|
|
318
318
|
task_1 = vhq.Task(
|
319
319
|
description="Analyze the client's business model.",
|
320
|
-
|
320
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
|
321
321
|
allow_delegation=True
|
322
322
|
)
|
323
323
|
|
324
324
|
task_2 = vhq.Task(
|
325
325
|
description="Define a cohort.",
|
326
|
-
|
326
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
|
327
327
|
allow_delegation=False
|
328
328
|
)
|
329
329
|
|
@@ -212,7 +212,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
|
|
212
212
|
|
213
213
|
task = vhq.Task(
|
214
214
|
description="Amazing task",
|
215
|
-
|
215
|
+
response_schema=CustomOutput,
|
216
216
|
callback=dummy_func,
|
217
217
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
218
218
|
)
|
@@ -234,13 +234,13 @@ agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
|
|
234
234
|
|
235
235
|
task_1 = vhq.Task(
|
236
236
|
description="Analyze the client's business model.",
|
237
|
-
|
237
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
|
238
238
|
allow_delegation=True
|
239
239
|
)
|
240
240
|
|
241
241
|
task_2 = vhq.Task(
|
242
242
|
description="Define a cohort.",
|
243
|
-
|
243
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
|
244
244
|
allow_delegation=False
|
245
245
|
)
|
246
246
|
|
@@ -17,16 +17,17 @@ You can specify a model and integration platform from the list. Else, we'll use
|
|
17
17
|
```python
|
18
18
|
"openai": [
|
19
19
|
"gpt-4.5-preview-2025-02-27",
|
20
|
+
"gpt-4",
|
20
21
|
"gpt-4o",
|
21
22
|
"gpt-4o-mini",
|
22
|
-
"gpt-4",
|
23
23
|
"o1-mini",
|
24
24
|
"o1-preview",
|
25
25
|
],
|
26
26
|
"gemini": [
|
27
|
-
"gemini/gemini-1.5-flash",
|
28
|
-
"gemini/gemini-1.5-pro",
|
29
27
|
"gemini/gemini-2.0-flash-exp",
|
28
|
+
"gemini/gemini-2.0-flash",
|
29
|
+
"gemini/gemini-2.0-flash-thinking-exp",
|
30
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
30
31
|
],
|
31
32
|
"anthropic": [
|
32
33
|
"claude-3-7-sonnet-latest",
|
@@ -47,6 +48,7 @@ You can specify a model and integration platform from the list. Else, we'll use
|
|
47
48
|
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
48
49
|
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
49
50
|
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
51
|
+
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
|
50
52
|
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
51
53
|
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
52
54
|
"bedrock/mistral.mistral-large-2407-v1:0",
|
@@ -59,7 +59,7 @@ class CustomOutput(BaseModel):
|
|
59
59
|
|
60
60
|
task = vhq.Task(
|
61
61
|
description="Research a topic to teach a kid aged 6 about math.",
|
62
|
-
|
62
|
+
response_schema=CustomOutput,
|
63
63
|
should_evaluate=True, # triggers evaluation
|
64
64
|
eval_criteria=["uniquness", "audience fit",],
|
65
65
|
|
@@ -2,30 +2,29 @@
|
|
2
2
|
|
3
3
|
### Variables
|
4
4
|
|
5
|
-
| <div style="width:160px">**Variable**</div> | **Data Type** | **Default**
|
6
|
-
| :--- | :--- | :---
|
7
|
-
| **`id`** | UUID | uuid.uuid4()
|
8
|
-
| **`name`** | Optional[str] | None
|
9
|
-
| **`description`** | str | None
|
10
|
-
| **`
|
11
|
-
| **`
|
12
|
-
| **`
|
13
|
-
| **`
|
14
|
-
| **`
|
15
|
-
| **`
|
16
|
-
| **`
|
17
|
-
| **`
|
18
|
-
| **`
|
19
|
-
| **`
|
20
|
-
| **`
|
21
|
-
| **`
|
22
|
-
| **`
|
23
|
-
| **`
|
24
|
-
| **`
|
25
|
-
| **`
|
26
|
-
| **`
|
27
|
-
| **`
|
28
|
-
| **`output`** | Optional[TaskOutput] | None | Stores `TaskOutput` object after the execution |
|
5
|
+
| <div style="width:160px">**Variable**</div> | **Data Type** | **Default** | **Description** |
|
6
|
+
| :--- | :--- | :--- | :--- |
|
7
|
+
| **`id`** | UUID | uuid.uuid4() | Stores task `id` as an identifier. |
|
8
|
+
| **`name`** | Optional[str] | None | Stores a task name (Inherited as `node` identifier if the task is dependent) |
|
9
|
+
| **`description`** | str | None | Required field to store a concise task description |
|
10
|
+
| **`response_schema`** | Optional[Type[BaseModel] \| List[ResponseField]] | None | Response schema for structured output. |
|
11
|
+
| **`tools`** | Optional[List[BaseTool \| ToolSet]] | None | Tools, tool sets, or RAG tools |
|
12
|
+
| **`can_use_agent_tools`** | bool | True | Whether to use the agent tools |
|
13
|
+
| **`tool_res_as_final`** | bool | False | Whether to make a tool output as a final response from the agent |
|
14
|
+
| **`image`** | Optional[str] | None | Absolute file path or URL to the image file |
|
15
|
+
| **`file`** | Optional[str] | None | Absolute file path or URL to the file |
|
16
|
+
| **`audio`** | Optional[str] | None | Absolute file path or URL to the audio file |
|
17
|
+
| **`should_test_run`** | bool | False | Whether to turn on auto-feedback learning |
|
18
|
+
| **`human`** | bool | False | Whether to ask human input during the task execution |
|
19
|
+
| **`execution_type`** | TaskExecutionType | TaskExecutionType.SYNC | Sync or async execution |
|
20
|
+
| **`allow_delegation`** | bool | False | Whether to allow the agent to delegate the task to another agent |
|
21
|
+
| **`callback`** | Optional[Callable] | None | Callback function to be executed after LLM calling |
|
22
|
+
| **`callback_kwargs`** | Optional[Dict[str, Any]] | dict() | Args for the callback function (if any)|
|
23
|
+
| **`should_evaluate`** | bool | False | Whether to evaluate the task output using eval criteria |
|
24
|
+
| **`eval_criteria`** | Optional[List[str]] | list() | Evaluation criteria given by the human client |
|
25
|
+
| **`fsls`** | Optional[List[str]] | None | Examples of competitive and/or weak responses |
|
26
|
+
| **`processed_agents`** | Set[str] | set() | Stores keys of agents that executed the task |
|
27
|
+
| **`output`** | Optional[TaskOutput] | None | Stores `TaskOutput` object after the execution |
|
29
28
|
|
30
29
|
|
31
30
|
### Class Methods
|
@@ -68,8 +67,8 @@
|
|
68
67
|
| :--- | :--- | :--- | :--- |
|
69
68
|
| **`task_id`** | UUID | uuid.uuid4() | Stores task `id` as an identifier. |
|
70
69
|
| **`raw`** | str | None | Stores response in plane text format. `None` or `""` when the model returned errors.|
|
71
|
-
| **`json_dict`** | Dict[str, Any] | None | Stores response in JSON serializable dictionary.
|
72
|
-
| **`pydantic`** | Type[`BaseModel`] | None | Populates
|
70
|
+
| **`json_dict`** | Dict[str, Any] | None | Stores response in JSON serializable dictionary. |
|
71
|
+
| **`pydantic`** | Type[`BaseModel`] | None | Populates the given response schema in pydantic class |
|
73
72
|
| **`tool_output`** | Optional[Any] | None | Stores results from the tools of the task or agents ONLY when `tool_res_as_final` set as `True`. |
|
74
73
|
| **`callback_output`** | Optional[Any] | None | Stores results from callback functions if any. |
|
75
74
|
| **`latency`** | Optional[float] | None | Stores job latency in milseconds. |
|
@@ -27,7 +27,7 @@ def summarize_response(message: str, test1: str, test2: list[str]) -> str:
|
|
27
27
|
|
28
28
|
task = vhq.Task(
|
29
29
|
description="Research a topic to teach a kid aged 6 about math.",
|
30
|
-
|
30
|
+
response_schema=CustomOutput,
|
31
31
|
tools=[dummy_tool],
|
32
32
|
callback=summarize_response,
|
33
33
|
callback_kwargs=dict(message="Hi! Here is the result: "),
|
@@ -8,11 +8,11 @@ But you can choose to generate Pydantic class or specifig JSON object as respons
|
|
8
8
|
|
9
9
|
<hr />
|
10
10
|
|
11
|
-
|
11
|
+
`[var]`<bold>`response_schema: Optional[Type[BaseModel] | List[ResponseField]] = None`</bold>
|
12
12
|
|
13
|
-
|
13
|
+
## 1. Pydantic
|
14
14
|
|
15
|
-
Add a `custom Pydantic class`
|
15
|
+
Add a `custom Pydantic class` to the `response_schema` field to generate a structured response.
|
16
16
|
|
17
17
|
The custom class can accept **one layer of a nested child** as you can see in the following code snippet:
|
18
18
|
|
@@ -44,7 +44,7 @@ class Demo(BaseModel):
|
|
44
44
|
# 2. Define a task
|
45
45
|
task = vhq.Task(
|
46
46
|
description="generate random output that strictly follows the given format",
|
47
|
-
|
47
|
+
response_schema=Demo,
|
48
48
|
)
|
49
49
|
|
50
50
|
# 3. Execute
|
@@ -63,8 +63,6 @@ assert [
|
|
63
63
|
|
64
64
|
## 2. JSON
|
65
65
|
|
66
|
-
`[var]`<bold>`response_fields: List[InstanceOf[ResponseField]] = None`</bold>
|
67
|
-
|
68
66
|
Similar to Pydantic, JSON output structure can be defined by using a list of `ResponseField` objects.
|
69
67
|
|
70
68
|
The following code snippet demonstrates how to use `ResponseField` to generate output with a maximum of one level of nesting.
|
@@ -75,16 +73,15 @@ Custom JSON outputs can accept **one layer of a nested child**.
|
|
75
73
|
|
76
74
|
- `demo_response_fields` in the following case is identical to the previous Demo class, except that titles are specified for nested fields.
|
77
75
|
|
78
|
-
- Agents generate JSON output by default, whether or not `
|
76
|
+
- Agents generate JSON output by default, whether or not `response_schema` is given.
|
79
77
|
|
80
|
-
- However,
|
78
|
+
- However, `response_schema` is required to specify the key value sets.
|
81
79
|
|
82
80
|
```python
|
83
81
|
import versionhq as vhq
|
84
82
|
|
85
83
|
# 1. Define a list of ResponseField objects.
|
86
84
|
demo_response_fields = [
|
87
|
-
# no nesting
|
88
85
|
vhq.ResponseField(title="demo_1", data_type=int),
|
89
86
|
vhq.ResponseField(title="demo_2", data_type=float),
|
90
87
|
vhq.ResponseField(title="demo_3", data_type=str),
|
@@ -125,16 +122,15 @@ demo_response_fields = [
|
|
125
122
|
# 2. Define a task
|
126
123
|
task = vhq.Task(
|
127
124
|
description="Output random values strictly following the data type defined in the given response format.",
|
128
|
-
|
125
|
+
response_schema=demo_response_fields
|
129
126
|
)
|
130
127
|
|
131
128
|
|
132
129
|
# 3. Execute
|
133
130
|
res = task.execute()
|
134
131
|
|
135
|
-
assert isinstance(res, vhq.TaskOutput)
|
136
|
-
assert
|
137
|
-
assert [v and type(v) == task.response_fields[i].data_type for i, (k, v) in enumerate(res.json_dict.items())]
|
132
|
+
assert isinstance(res, vhq.TaskOutput)
|
133
|
+
assert [v and type(v) == task.response_schema[i].data_type for i, (k, v) in enumerate(res.json_dict.items())]
|
138
134
|
```
|
139
135
|
|
140
136
|
* Ref. <a href="/core/task/response-field">`ResponseField`</a> class
|
@@ -172,7 +168,7 @@ class Sub(BaseModel):
|
|
172
168
|
|
173
169
|
sub_task = vhq.Task(
|
174
170
|
description="generate random values that strictly follows the given format.",
|
175
|
-
|
171
|
+
response_schema=Sub
|
176
172
|
)
|
177
173
|
sub_res = sub_task.execute()
|
178
174
|
|
@@ -190,7 +186,7 @@ def format_response(sub, main1, main2) -> Main:
|
|
190
186
|
# 3. Executes
|
191
187
|
main_task = vhq.Task(
|
192
188
|
description="generate random values that strictly follows the given format.",
|
193
|
-
|
189
|
+
response_schema=Main,
|
194
190
|
callback=format_response,
|
195
191
|
callback_kwargs=dict(sub=sub_res.json_dict),
|
196
192
|
)
|
@@ -52,7 +52,7 @@ agent = vhq.Agent(role="demo manager")
|
|
52
52
|
|
53
53
|
task = vhq.Task(
|
54
54
|
description="Amazing task",
|
55
|
-
|
55
|
+
response_schema=CustomOutput,
|
56
56
|
callback=dummy_func,
|
57
57
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
58
58
|
)
|
@@ -88,13 +88,13 @@ agent_b = vhq.Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-ch
|
|
88
88
|
|
89
89
|
task_1 = vhq.Task(
|
90
90
|
description="Analyze the client's business model.",
|
91
|
-
|
91
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
|
92
92
|
allow_delegation=True
|
93
93
|
)
|
94
94
|
|
95
95
|
task_2 = vhq.Task(
|
96
96
|
description="Define a cohort.",
|
97
|
-
|
97
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
|
98
98
|
allow_delegation=False
|
99
99
|
)
|
100
100
|
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.2.4.
|
18
|
+
version = "1.2.4.5"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "Autonomous agent networks for task automation with multi-step reasoning."
|
21
21
|
readme = "README.md"
|
@@ -67,7 +67,7 @@ class PromptFeedbackGraph(TaskGraph):
|
|
67
67
|
if not agents:
|
68
68
|
return None
|
69
69
|
|
70
|
-
self.
|
70
|
+
self.concl_response_schema = base_task.response_schema
|
71
71
|
base_agent.callbacks.append(self._reflect)
|
72
72
|
init_node = Node(task=base_task, assigned_to=base_agent)
|
73
73
|
self.add_node(init_node)
|
@@ -2,7 +2,7 @@
|
|
2
2
|
from typing import Dict, List, Tuple, Any
|
3
3
|
from textwrap import dedent
|
4
4
|
|
5
|
-
from pydantic import InstanceOf
|
5
|
+
from pydantic import InstanceOf, BaseModel
|
6
6
|
|
7
7
|
from versionhq._utils import is_valid_url
|
8
8
|
|
@@ -25,34 +25,26 @@ class Prompt:
|
|
25
25
|
|
26
26
|
|
27
27
|
def _draft_output_prompt(self) -> str:
|
28
|
-
"""Drafts prompt for output
|
28
|
+
"""Drafts prompt for output format using `response_schema`."""
|
29
29
|
|
30
|
-
from versionhq.
|
30
|
+
from versionhq.task.model import ResponseField
|
31
31
|
|
32
32
|
output_prompt = ""
|
33
|
-
|
33
|
+
output_formats_to_follow = dict()
|
34
34
|
|
35
|
-
if self.task.
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
35
|
+
if self.task.response_schema:
|
36
|
+
if isinstance(self.task.response_schema, list):
|
37
|
+
for item in self.task.response_schema:
|
38
|
+
if isinstance(item, ResponseField):
|
39
|
+
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
40
40
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
"""
|
45
|
-
elif self.task.response_fields:
|
46
|
-
output_prompt, output_formats_to_follow = "", dict()
|
47
|
-
response_format = str(self.task._structure_response_format(model_provider=model_provider))
|
48
|
-
for item in self.task.response_fields:
|
49
|
-
if item:
|
50
|
-
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
41
|
+
elif issubclass(self.task.response_schema, BaseModel):
|
42
|
+
for k, v in self.task.response_schema.model_fields.items():
|
43
|
+
output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
|
51
44
|
|
52
45
|
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
53
|
-
Response format: {response_format}
|
54
46
|
Ref. Output image: {output_formats_to_follow}
|
55
|
-
"""
|
47
|
+
"""
|
56
48
|
else:
|
57
49
|
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
58
50
|
|
@@ -30,7 +30,7 @@ vhq_task_evaluator = Agent(
|
|
30
30
|
vhq_formation_planner = Agent(
|
31
31
|
role="vhq-Formation Planner",
|
32
32
|
goal="Plan a formation of agents based on the given task descirption.",
|
33
|
-
llm="gemini/gemini-2.0-flash
|
33
|
+
llm="gemini/gemini-2.0-flash",
|
34
34
|
llm_config=dict(top_p=0.8, topK=40, temperature=0.9),
|
35
35
|
maxit=1,
|
36
36
|
max_retry_limit=1,
|
@@ -46,7 +46,7 @@ vhq_formation_planner = Agent(
|
|
46
46
|
vhq_agent_creator = Agent(
|
47
47
|
role="vhq-Agent Creator",
|
48
48
|
goal="build an agent that can handle the given task",
|
49
|
-
llm="gemini/gemini-2.0-flash
|
49
|
+
llm="gemini/gemini-2.0-flash",
|
50
50
|
maxit=1,
|
51
51
|
max_retry_limit=1,
|
52
52
|
)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import os
|
2
2
|
import uuid
|
3
|
-
from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
|
3
|
+
from typing import Any, Dict, List, Optional, TypeVar, Callable, Type, Tuple
|
4
4
|
from typing_extensions import Self
|
5
5
|
from dotenv import load_dotenv
|
6
6
|
|
@@ -475,9 +475,16 @@ class Agent(BaseModel):
|
|
475
475
|
return self
|
476
476
|
|
477
477
|
|
478
|
-
def start(
|
478
|
+
def start(
|
479
|
+
self,
|
480
|
+
context: Any = None,
|
481
|
+
tool_res_as_final: bool = False,
|
482
|
+
image: str = None,
|
483
|
+
file: str = None,
|
484
|
+
audio: str = None
|
485
|
+
) -> Tuple[Any | None, Any | None]:
|
479
486
|
"""
|
480
|
-
Defines and executes a task
|
487
|
+
Defines and executes a task, then returns TaskOutput object with the generated task.
|
481
488
|
"""
|
482
489
|
|
483
490
|
if not self.role:
|
@@ -491,14 +498,14 @@ class Agent(BaseModel):
|
|
491
498
|
|
492
499
|
task = Task(
|
493
500
|
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
|
494
|
-
|
501
|
+
response_schema=Output,
|
495
502
|
tool_res_as_final=tool_res_as_final,
|
496
503
|
image=image, #REFINEME - query memory/knowledge or self create
|
497
504
|
file=file,
|
498
505
|
audio=audio,
|
499
506
|
)
|
500
507
|
res = task.execute(agent=self, context=context)
|
501
|
-
return res
|
508
|
+
return res, task
|
502
509
|
|
503
510
|
|
504
511
|
def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
@@ -74,8 +74,8 @@ def form_agent_network(
|
|
74
74
|
leader_agent: str
|
75
75
|
|
76
76
|
vhq_task = Task(
|
77
|
-
description=f"Design a team of specialized agents to fully automate the following task and
|
78
|
-
|
77
|
+
description=f"Design a team of specialized agents to fully automate the following task and deliver the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
|
78
|
+
response_schema=Outcome
|
79
79
|
)
|
80
80
|
|
81
81
|
if agents:
|
@@ -120,7 +120,7 @@ def form_agent_network(
|
|
120
120
|
except:
|
121
121
|
pass
|
122
122
|
output = create_model("Output", **fields) if fields else None
|
123
|
-
_task = Task(description=task_descriptions[i],
|
123
|
+
_task = Task(description=task_descriptions[i], response_schema=output)
|
124
124
|
created_tasks.append(_task)
|
125
125
|
|
126
126
|
if len(created_tasks) <= len(created_agents):
|
@@ -206,7 +206,7 @@ class AgentNetwork(BaseModel):
|
|
206
206
|
for unassgined_task in unassigned_tasks:
|
207
207
|
task = Task(
|
208
208
|
description=f"Based on the following task summary, draft an agent's role and goal in concise manner. Task summary: {unassgined_task.summary}",
|
209
|
-
|
209
|
+
response_schema=[
|
210
210
|
ResponseField(title="goal", data_type=str, required=True),
|
211
211
|
ResponseField(title="role", data_type=str, required=True),
|
212
212
|
],
|
@@ -27,8 +27,9 @@ MODELS = {
|
|
27
27
|
"o1-preview",
|
28
28
|
],
|
29
29
|
"gemini": [
|
30
|
-
"gemini/gemini-
|
31
|
-
"gemini/gemini-
|
30
|
+
"gemini/gemini-2.0-flash",
|
31
|
+
"gemini/gemini-2.0-flash-thinking-exp",
|
32
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
32
33
|
"gemini/gemini-2.0-flash-exp",
|
33
34
|
],
|
34
35
|
"anthropic": [
|
@@ -75,6 +76,7 @@ ENV_VARS = {
|
|
75
76
|
"huggingface": ["HUGGINGFACE_API_KEY", ],
|
76
77
|
"bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
77
78
|
"sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
79
|
+
"azure_ai": ["AZURE_AI_API_KEY", "AZURE_AI_API_BASE"],
|
78
80
|
}
|
79
81
|
|
80
82
|
|
@@ -90,9 +92,10 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
90
92
|
"o1-preview": 128000,
|
91
93
|
"o1-mini": 128000,
|
92
94
|
|
93
|
-
"gemini/gemini-1.5-flash": 1048576,
|
94
|
-
"gemini/gemini-1.5-pro": 2097152,
|
95
95
|
"gemini/gemini-2.0-flash-exp": 1048576,
|
96
|
+
"gemini/gemini-2.0-flash": 1048576,
|
97
|
+
"gemini/gemini-2.0-flash-thinking-exp": 1048576,
|
98
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05": 1048576,
|
96
99
|
|
97
100
|
"claude-3-7-sonnet-latest": 200000,
|
98
101
|
"claude-3-5-haiku-latest": 200000,
|
@@ -228,6 +228,9 @@ class LLM(BaseModel):
|
|
228
228
|
if self.context_window_size and valid_config["max_tokens"] > self.context_window_size:
|
229
229
|
valid_config["max_tokens"] = self.context_window_size
|
230
230
|
|
231
|
+
if "model" in valid_config:
|
232
|
+
self.model = valid_config.pop("model")
|
233
|
+
|
231
234
|
self.llm_config = valid_config
|
232
235
|
return valid_config
|
233
236
|
|
@@ -389,7 +392,6 @@ class LLM(BaseModel):
|
|
389
392
|
self._usages.append(res["usage"])
|
390
393
|
return res.choices[0].message.content
|
391
394
|
|
392
|
-
|
393
395
|
except JSONSchemaValidationError as e:
|
394
396
|
self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
|
395
397
|
raise e
|