versionhq 1.2.4.2__tar.gz → 1.2.4.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/PKG-INFO +4 -4
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/README.md +3 -3
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/llm/index.md +5 -3
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task/evaluation.md +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task/reference.md +25 -26
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task/task-output.md +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task/task-strc-response.md +11 -15
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/quickstart.md +3 -3
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/pyproject.toml +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/__init__.py +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_prompt/auto_feedback.py +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_prompt/model.py +13 -21
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_utils/__init__.py +1 -0
- versionhq-1.2.4.5/src/versionhq/_utils/usage_metrics.py +72 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent/inhouse_agents.py +2 -2
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent/model.py +18 -8
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent_network/formation.py +11 -26
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent_network/model.py +1 -2
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/llm/llm_vars.py +7 -4
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/llm/model.py +6 -7
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/storage/task_output_storage.py +2 -2
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task/model.py +81 -78
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task_graph/draft.py +5 -5
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task_graph/model.py +42 -38
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq.egg-info/PKG-INFO +4 -4
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/_prompt/auto_feedback_test.py +3 -3
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/_prompt/prompt_test.py +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/agent/agent_test.py +11 -10
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/agent/doc_test.py +3 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/agent_network/agent_network_test.py +18 -17
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/agent_network/doc_test.py +3 -2
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/doc_test.py +3 -3
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/llm/llm_connection_test.py +36 -4
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/llm/llm_test.py +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task/doc_eval_test.py +1 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task/doc_taskoutput_test.py +1 -2
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task/doc_test.py +16 -11
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task/eval_test.py +3 -4
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task/task_test.py +3 -3
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task_graph/doc_test.py +6 -1
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/usecase_test.py +4 -2
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/uv.lock +125 -101
- versionhq-1.2.4.2/src/versionhq/_utils/usage_metrics.py +0 -55
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.env.sample +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.github/workflows/deploy_docs.yml +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.github/workflows/publish.yml +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.github/workflows/run_tests.yml +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.gitignore +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.pre-commit-config.yaml +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/.python-version +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/LICENSE +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/SECURITY.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/db/preprocess.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/CNAME +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/_logos/favicon.ico +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/_logos/logo192.png +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/agent/config.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/agent/index.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/agent/task-handling.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/agent-network/config.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/agent-network/form.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/agent-network/index.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/agent-network/ref.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/knowledge.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/memory.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/rag-tool.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task/index.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task/response-field.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task/task-execution.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/task-graph/index.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/core/tool.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/index.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/stylesheets/main.css +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/docs/tags.md +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/mkdocs.yml +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/requirements-dev.txt +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/requirements.txt +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/runtime.txt +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/setup.cfg +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_prompt/constants.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_utils/is_valid_url.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_utils/llm_as_a_judge.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/knowledge/source_docling.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task/evaluation.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/task_graph/colors.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/rag_tool.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq.egg-info/SOURCES.txt +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/_sample/sample.csv +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/_sample/sample.json +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/_sample/sample.mp3 +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/_sample/screenshot.png +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/agent/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/agent_network/Prompts/Demo_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/agent_network/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/cli/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/clients/customer_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/clients/product_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/conftest.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/formation_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/llm/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/memory/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/memory/memory_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task_graph/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/task_graph/task_graph_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/tool/__init__.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/tool/composio_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/tool/doc_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/tool/rag_tool_test.py +0 -0
- {versionhq-1.2.4.2 → versionhq-1.2.4.5}/tests/tool/tool_test.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.2.4.
|
3
|
+
Version: 1.2.4.5
|
4
4
|
Summary: Autonomous agent networks for task automation with multi-step reasoning.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -295,7 +295,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
|
|
295
295
|
|
296
296
|
task = vhq.Task(
|
297
297
|
description="Amazing task",
|
298
|
-
|
298
|
+
response_schema=CustomOutput,
|
299
299
|
callback=dummy_func,
|
300
300
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
301
301
|
)
|
@@ -317,13 +317,13 @@ agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
|
|
317
317
|
|
318
318
|
task_1 = vhq.Task(
|
319
319
|
description="Analyze the client's business model.",
|
320
|
-
|
320
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
|
321
321
|
allow_delegation=True
|
322
322
|
)
|
323
323
|
|
324
324
|
task_2 = vhq.Task(
|
325
325
|
description="Define a cohort.",
|
326
|
-
|
326
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
|
327
327
|
allow_delegation=False
|
328
328
|
)
|
329
329
|
|
@@ -212,7 +212,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
|
|
212
212
|
|
213
213
|
task = vhq.Task(
|
214
214
|
description="Amazing task",
|
215
|
-
|
215
|
+
response_schema=CustomOutput,
|
216
216
|
callback=dummy_func,
|
217
217
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
218
218
|
)
|
@@ -234,13 +234,13 @@ agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
|
|
234
234
|
|
235
235
|
task_1 = vhq.Task(
|
236
236
|
description="Analyze the client's business model.",
|
237
|
-
|
237
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
|
238
238
|
allow_delegation=True
|
239
239
|
)
|
240
240
|
|
241
241
|
task_2 = vhq.Task(
|
242
242
|
description="Define a cohort.",
|
243
|
-
|
243
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
|
244
244
|
allow_delegation=False
|
245
245
|
)
|
246
246
|
|
@@ -17,16 +17,17 @@ You can specify a model and integration platform from the list. Else, we'll use
|
|
17
17
|
```python
|
18
18
|
"openai": [
|
19
19
|
"gpt-4.5-preview-2025-02-27",
|
20
|
+
"gpt-4",
|
20
21
|
"gpt-4o",
|
21
22
|
"gpt-4o-mini",
|
22
|
-
"gpt-4",
|
23
23
|
"o1-mini",
|
24
24
|
"o1-preview",
|
25
25
|
],
|
26
26
|
"gemini": [
|
27
|
-
"gemini/gemini-1.5-flash",
|
28
|
-
"gemini/gemini-1.5-pro",
|
29
27
|
"gemini/gemini-2.0-flash-exp",
|
28
|
+
"gemini/gemini-2.0-flash",
|
29
|
+
"gemini/gemini-2.0-flash-thinking-exp",
|
30
|
+
"gemini/gemini-2.0-flash-lite-preview-02-05",
|
30
31
|
],
|
31
32
|
"anthropic": [
|
32
33
|
"claude-3-7-sonnet-latest",
|
@@ -47,6 +48,7 @@ You can specify a model and integration platform from the list. Else, we'll use
|
|
47
48
|
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
48
49
|
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
49
50
|
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
51
|
+
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
|
50
52
|
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
51
53
|
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
52
54
|
"bedrock/mistral.mistral-large-2407-v1:0",
|
@@ -59,7 +59,7 @@ class CustomOutput(BaseModel):
|
|
59
59
|
|
60
60
|
task = vhq.Task(
|
61
61
|
description="Research a topic to teach a kid aged 6 about math.",
|
62
|
-
|
62
|
+
response_schema=CustomOutput,
|
63
63
|
should_evaluate=True, # triggers evaluation
|
64
64
|
eval_criteria=["uniquness", "audience fit",],
|
65
65
|
|
@@ -2,30 +2,29 @@
|
|
2
2
|
|
3
3
|
### Variables
|
4
4
|
|
5
|
-
| <div style="width:160px">**Variable**</div> | **Data Type** | **Default**
|
6
|
-
| :--- | :--- | :---
|
7
|
-
| **`id`** | UUID | uuid.uuid4()
|
8
|
-
| **`name`** | Optional[str] | None
|
9
|
-
| **`description`** | str | None
|
10
|
-
| **`
|
11
|
-
| **`
|
12
|
-
| **`
|
13
|
-
| **`
|
14
|
-
| **`
|
15
|
-
| **`
|
16
|
-
| **`
|
17
|
-
| **`
|
18
|
-
| **`
|
19
|
-
| **`
|
20
|
-
| **`
|
21
|
-
| **`
|
22
|
-
| **`
|
23
|
-
| **`
|
24
|
-
| **`
|
25
|
-
| **`
|
26
|
-
| **`
|
27
|
-
| **`
|
28
|
-
| **`output`** | Optional[TaskOutput] | None | Stores `TaskOutput` object after the execution |
|
5
|
+
| <div style="width:160px">**Variable**</div> | **Data Type** | **Default** | **Description** |
|
6
|
+
| :--- | :--- | :--- | :--- |
|
7
|
+
| **`id`** | UUID | uuid.uuid4() | Stores task `id` as an identifier. |
|
8
|
+
| **`name`** | Optional[str] | None | Stores a task name (Inherited as `node` identifier if the task is dependent) |
|
9
|
+
| **`description`** | str | None | Required field to store a concise task description |
|
10
|
+
| **`response_schema`** | Optional[Type[BaseModel] \| List[ResponseField]] | None | Response schema for structured output. |
|
11
|
+
| **`tools`** | Optional[List[BaseTool \| ToolSet]] | None | Tools, tool sets, or RAG tools |
|
12
|
+
| **`can_use_agent_tools`** | bool | True | Whether to use the agent tools |
|
13
|
+
| **`tool_res_as_final`** | bool | False | Whether to make a tool output as a final response from the agent |
|
14
|
+
| **`image`** | Optional[str] | None | Absolute file path or URL to the image file |
|
15
|
+
| **`file`** | Optional[str] | None | Absolute file path or URL to the file |
|
16
|
+
| **`audio`** | Optional[str] | None | Absolute file path or URL to the audio file |
|
17
|
+
| **`should_test_run`** | bool | False | Whether to turn on auto-feedback learning |
|
18
|
+
| **`human`** | bool | False | Whether to ask human input during the task execution |
|
19
|
+
| **`execution_type`** | TaskExecutionType | TaskExecutionType.SYNC | Sync or async execution |
|
20
|
+
| **`allow_delegation`** | bool | False | Whether to allow the agent to delegate the task to another agent |
|
21
|
+
| **`callback`** | Optional[Callable] | None | Callback function to be executed after LLM calling |
|
22
|
+
| **`callback_kwargs`** | Optional[Dict[str, Any]] | dict() | Args for the callback function (if any)|
|
23
|
+
| **`should_evaluate`** | bool | False | Whether to evaluate the task output using eval criteria |
|
24
|
+
| **`eval_criteria`** | Optional[List[str]] | list() | Evaluation criteria given by the human client |
|
25
|
+
| **`fsls`** | Optional[List[str]] | None | Examples of competitive and/or weak responses |
|
26
|
+
| **`processed_agents`** | Set[str] | set() | Stores keys of agents that executed the task |
|
27
|
+
| **`output`** | Optional[TaskOutput] | None | Stores `TaskOutput` object after the execution |
|
29
28
|
|
30
29
|
|
31
30
|
### Class Methods
|
@@ -68,8 +67,8 @@
|
|
68
67
|
| :--- | :--- | :--- | :--- |
|
69
68
|
| **`task_id`** | UUID | uuid.uuid4() | Stores task `id` as an identifier. |
|
70
69
|
| **`raw`** | str | None | Stores response in plane text format. `None` or `""` when the model returned errors.|
|
71
|
-
| **`json_dict`** | Dict[str, Any] | None | Stores response in JSON serializable dictionary.
|
72
|
-
| **`pydantic`** | Type[`BaseModel`] | None | Populates
|
70
|
+
| **`json_dict`** | Dict[str, Any] | None | Stores response in JSON serializable dictionary. |
|
71
|
+
| **`pydantic`** | Type[`BaseModel`] | None | Populates the given response schema in pydantic class |
|
73
72
|
| **`tool_output`** | Optional[Any] | None | Stores results from the tools of the task or agents ONLY when `tool_res_as_final` set as `True`. |
|
74
73
|
| **`callback_output`** | Optional[Any] | None | Stores results from callback functions if any. |
|
75
74
|
| **`latency`** | Optional[float] | None | Stores job latency in milseconds. |
|
@@ -27,7 +27,7 @@ def summarize_response(message: str, test1: str, test2: list[str]) -> str:
|
|
27
27
|
|
28
28
|
task = vhq.Task(
|
29
29
|
description="Research a topic to teach a kid aged 6 about math.",
|
30
|
-
|
30
|
+
response_schema=CustomOutput,
|
31
31
|
tools=[dummy_tool],
|
32
32
|
callback=summarize_response,
|
33
33
|
callback_kwargs=dict(message="Hi! Here is the result: "),
|
@@ -8,11 +8,11 @@ But you can choose to generate Pydantic class or specifig JSON object as respons
|
|
8
8
|
|
9
9
|
<hr />
|
10
10
|
|
11
|
-
|
11
|
+
`[var]`<bold>`response_schema: Optional[Type[BaseModel] | List[ResponseField]] = None`</bold>
|
12
12
|
|
13
|
-
|
13
|
+
## 1. Pydantic
|
14
14
|
|
15
|
-
Add a `custom Pydantic class`
|
15
|
+
Add a `custom Pydantic class` to the `response_schema` field to generate a structured response.
|
16
16
|
|
17
17
|
The custom class can accept **one layer of a nested child** as you can see in the following code snippet:
|
18
18
|
|
@@ -44,7 +44,7 @@ class Demo(BaseModel):
|
|
44
44
|
# 2. Define a task
|
45
45
|
task = vhq.Task(
|
46
46
|
description="generate random output that strictly follows the given format",
|
47
|
-
|
47
|
+
response_schema=Demo,
|
48
48
|
)
|
49
49
|
|
50
50
|
# 3. Execute
|
@@ -63,8 +63,6 @@ assert [
|
|
63
63
|
|
64
64
|
## 2. JSON
|
65
65
|
|
66
|
-
`[var]`<bold>`response_fields: List[InstanceOf[ResponseField]] = None`</bold>
|
67
|
-
|
68
66
|
Similar to Pydantic, JSON output structure can be defined by using a list of `ResponseField` objects.
|
69
67
|
|
70
68
|
The following code snippet demonstrates how to use `ResponseField` to generate output with a maximum of one level of nesting.
|
@@ -75,16 +73,15 @@ Custom JSON outputs can accept **one layer of a nested child**.
|
|
75
73
|
|
76
74
|
- `demo_response_fields` in the following case is identical to the previous Demo class, except that titles are specified for nested fields.
|
77
75
|
|
78
|
-
- Agents generate JSON output by default, whether or not `
|
76
|
+
- Agents generate JSON output by default, whether or not `response_schema` is given.
|
79
77
|
|
80
|
-
- However,
|
78
|
+
- However, `response_schema` is required to specify the key value sets.
|
81
79
|
|
82
80
|
```python
|
83
81
|
import versionhq as vhq
|
84
82
|
|
85
83
|
# 1. Define a list of ResponseField objects.
|
86
84
|
demo_response_fields = [
|
87
|
-
# no nesting
|
88
85
|
vhq.ResponseField(title="demo_1", data_type=int),
|
89
86
|
vhq.ResponseField(title="demo_2", data_type=float),
|
90
87
|
vhq.ResponseField(title="demo_3", data_type=str),
|
@@ -125,16 +122,15 @@ demo_response_fields = [
|
|
125
122
|
# 2. Define a task
|
126
123
|
task = vhq.Task(
|
127
124
|
description="Output random values strictly following the data type defined in the given response format.",
|
128
|
-
|
125
|
+
response_schema=demo_response_fields
|
129
126
|
)
|
130
127
|
|
131
128
|
|
132
129
|
# 3. Execute
|
133
130
|
res = task.execute()
|
134
131
|
|
135
|
-
assert isinstance(res, vhq.TaskOutput)
|
136
|
-
assert
|
137
|
-
assert [v and type(v) == task.response_fields[i].data_type for i, (k, v) in enumerate(res.json_dict.items())]
|
132
|
+
assert isinstance(res, vhq.TaskOutput)
|
133
|
+
assert [v and type(v) == task.response_schema[i].data_type for i, (k, v) in enumerate(res.json_dict.items())]
|
138
134
|
```
|
139
135
|
|
140
136
|
* Ref. <a href="/core/task/response-field">`ResponseField`</a> class
|
@@ -172,7 +168,7 @@ class Sub(BaseModel):
|
|
172
168
|
|
173
169
|
sub_task = vhq.Task(
|
174
170
|
description="generate random values that strictly follows the given format.",
|
175
|
-
|
171
|
+
response_schema=Sub
|
176
172
|
)
|
177
173
|
sub_res = sub_task.execute()
|
178
174
|
|
@@ -190,7 +186,7 @@ def format_response(sub, main1, main2) -> Main:
|
|
190
186
|
# 3. Executes
|
191
187
|
main_task = vhq.Task(
|
192
188
|
description="generate random values that strictly follows the given format.",
|
193
|
-
|
189
|
+
response_schema=Main,
|
194
190
|
callback=format_response,
|
195
191
|
callback_kwargs=dict(sub=sub_res.json_dict),
|
196
192
|
)
|
@@ -52,7 +52,7 @@ agent = vhq.Agent(role="demo manager")
|
|
52
52
|
|
53
53
|
task = vhq.Task(
|
54
54
|
description="Amazing task",
|
55
|
-
|
55
|
+
response_schema=CustomOutput,
|
56
56
|
callback=dummy_func,
|
57
57
|
callback_kwargs=dict(message="Hi! Here is the result: ")
|
58
58
|
)
|
@@ -88,13 +88,13 @@ agent_b = vhq.Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-ch
|
|
88
88
|
|
89
89
|
task_1 = vhq.Task(
|
90
90
|
description="Analyze the client's business model.",
|
91
|
-
|
91
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
|
92
92
|
allow_delegation=True
|
93
93
|
)
|
94
94
|
|
95
95
|
task_2 = vhq.Task(
|
96
96
|
description="Define a cohort.",
|
97
|
-
|
97
|
+
response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
|
98
98
|
allow_delegation=False
|
99
99
|
)
|
100
100
|
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.2.4.
|
18
|
+
version = "1.2.4.5"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "Autonomous agent networks for task automation with multi-step reasoning."
|
21
21
|
readme = "README.md"
|
@@ -67,7 +67,7 @@ class PromptFeedbackGraph(TaskGraph):
|
|
67
67
|
if not agents:
|
68
68
|
return None
|
69
69
|
|
70
|
-
self.
|
70
|
+
self.concl_response_schema = base_task.response_schema
|
71
71
|
base_agent.callbacks.append(self._reflect)
|
72
72
|
init_node = Node(task=base_task, assigned_to=base_agent)
|
73
73
|
self.add_node(init_node)
|
@@ -2,7 +2,7 @@
|
|
2
2
|
from typing import Dict, List, Tuple, Any
|
3
3
|
from textwrap import dedent
|
4
4
|
|
5
|
-
from pydantic import InstanceOf
|
5
|
+
from pydantic import InstanceOf, BaseModel
|
6
6
|
|
7
7
|
from versionhq._utils import is_valid_url
|
8
8
|
|
@@ -25,34 +25,26 @@ class Prompt:
|
|
25
25
|
|
26
26
|
|
27
27
|
def _draft_output_prompt(self) -> str:
|
28
|
-
"""Drafts prompt for output
|
28
|
+
"""Drafts prompt for output format using `response_schema`."""
|
29
29
|
|
30
|
-
from versionhq.
|
30
|
+
from versionhq.task.model import ResponseField
|
31
31
|
|
32
32
|
output_prompt = ""
|
33
|
-
|
33
|
+
output_formats_to_follow = dict()
|
34
34
|
|
35
|
-
if self.task.
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
35
|
+
if self.task.response_schema:
|
36
|
+
if isinstance(self.task.response_schema, list):
|
37
|
+
for item in self.task.response_schema:
|
38
|
+
if isinstance(item, ResponseField):
|
39
|
+
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
40
40
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
"""
|
45
|
-
elif self.task.response_fields:
|
46
|
-
output_prompt, output_formats_to_follow = "", dict()
|
47
|
-
response_format = str(self.task._structure_response_format(model_provider=model_provider))
|
48
|
-
for item in self.task.response_fields:
|
49
|
-
if item:
|
50
|
-
output_formats_to_follow[item.title] = f"<Return your answer in {item.data_type.__name__}>"
|
41
|
+
elif issubclass(self.task.response_schema, BaseModel):
|
42
|
+
for k, v in self.task.response_schema.model_fields.items():
|
43
|
+
output_formats_to_follow[k] = f"<Return your answer in {v.annotation}>"
|
51
44
|
|
52
45
|
output_prompt = f"""Your response MUST be a valid JSON string that strictly follows the response format. Use double quotes for all keys and string values. Do not use single quotes, trailing commas, or any other non-standard JSON syntax.
|
53
|
-
Response format: {response_format}
|
54
46
|
Ref. Output image: {output_formats_to_follow}
|
55
|
-
"""
|
47
|
+
"""
|
56
48
|
else:
|
57
49
|
output_prompt = "You MUST return your response as a valid JSON serializable string, enclosed in double quotes. Use double quotes for all keys and string values. Do NOT use single quotes, trailing commas, or other non-standard JSON syntax."
|
58
50
|
|
@@ -2,3 +2,4 @@ from versionhq._utils.logger import Logger
|
|
2
2
|
from versionhq._utils.process_config import process_config
|
3
3
|
from versionhq._utils.vars import KNOWLEDGE_DIRECTORY, MAX_FILE_NAME_LENGTH
|
4
4
|
from versionhq._utils.is_valid_url import is_valid_url
|
5
|
+
from versionhq._utils.usage_metrics import UsageMetrics, ErrorType
|
@@ -0,0 +1,72 @@
|
|
1
|
+
import uuid
|
2
|
+
import enum
|
3
|
+
import datetime
|
4
|
+
from typing import Dict, List
|
5
|
+
from typing_extensions import Self
|
6
|
+
|
7
|
+
from pydantic import BaseModel, UUID4, InstanceOf
|
8
|
+
|
9
|
+
|
10
|
+
class ErrorType(enum.Enum):
|
11
|
+
FORMAT = 1
|
12
|
+
TOOL = 2
|
13
|
+
API = 3
|
14
|
+
OVERFITTING = 4
|
15
|
+
HUMAN_INTERACTION = 5
|
16
|
+
|
17
|
+
|
18
|
+
class UsageMetrics(BaseModel):
|
19
|
+
"""A Pydantic model to manage token usage, errors, job latency."""
|
20
|
+
|
21
|
+
id: UUID4 = uuid.uuid4() # stores task id or task graph id
|
22
|
+
total_tokens: int = 0
|
23
|
+
prompt_tokens: int = 0
|
24
|
+
completion_tokens: int = 0
|
25
|
+
successful_requests: int = 0
|
26
|
+
total_errors: int = 0
|
27
|
+
error_breakdown: Dict[ErrorType, int] = dict()
|
28
|
+
latency: float = 0.0 # in ms
|
29
|
+
|
30
|
+
def record_token_usage(self, token_usages: List[Dict[str, int]]) -> None:
|
31
|
+
"""Records usage metrics from the raw response of the model."""
|
32
|
+
|
33
|
+
if token_usages:
|
34
|
+
for item in token_usages:
|
35
|
+
self.total_tokens += int(item["total_tokens"]) if "total_tokens" in item else 0
|
36
|
+
self.completion_tokens += int(item["completion_tokens"]) if "completion_tokens" in item else 0
|
37
|
+
self.prompt_tokens += int(item["prompt_tokens"]) if "prompt_tokens" in item else 0
|
38
|
+
|
39
|
+
|
40
|
+
def record_errors(self, type: ErrorType = None) -> None:
|
41
|
+
self.total_errors += 1
|
42
|
+
if type:
|
43
|
+
if type in self.error_breakdown:
|
44
|
+
self.error_breakdown[type] += 1
|
45
|
+
else:
|
46
|
+
self.error_breakdown[type] = 1
|
47
|
+
|
48
|
+
|
49
|
+
def record_latency(self, start_dt: datetime.datetime, end_dt: datetime.datetime) -> None:
|
50
|
+
self.latency += round((end_dt - start_dt).total_seconds() * 1000, 3)
|
51
|
+
|
52
|
+
|
53
|
+
def aggregate(self, metrics: InstanceOf["UsageMetrics"]) -> Self:
|
54
|
+
if not metrics:
|
55
|
+
return self
|
56
|
+
|
57
|
+
self.total_tokens += metrics.total_tokens if metrics.total_tokens else 0
|
58
|
+
self.prompt_tokens += metrics.prompt_tokens if metrics.prompt_tokens else 0
|
59
|
+
self.completion_tokens += metrics.completion_tokens if metrics.completion_tokens else 0
|
60
|
+
self.successful_requests += metrics.successful_requests if metrics.successful_requests else 0
|
61
|
+
self.total_errors += metrics.total_errors if metrics.total_errors else 0
|
62
|
+
self.latency += metrics.latency if metrics.latency else 0.0
|
63
|
+
self.latency = round(self.latency, 3)
|
64
|
+
|
65
|
+
if metrics.error_breakdown:
|
66
|
+
for k, v in metrics.error_breakdown.items():
|
67
|
+
if self.error_breakdown and k in self.error_breakdown:
|
68
|
+
self.error_breakdown[k] += int(v)
|
69
|
+
else:
|
70
|
+
self.error_breakdown.update({ k: v })
|
71
|
+
|
72
|
+
return self
|
@@ -30,7 +30,7 @@ vhq_task_evaluator = Agent(
|
|
30
30
|
vhq_formation_planner = Agent(
|
31
31
|
role="vhq-Formation Planner",
|
32
32
|
goal="Plan a formation of agents based on the given task descirption.",
|
33
|
-
llm="gemini/gemini-2.0-flash
|
33
|
+
llm="gemini/gemini-2.0-flash",
|
34
34
|
llm_config=dict(top_p=0.8, topK=40, temperature=0.9),
|
35
35
|
maxit=1,
|
36
36
|
max_retry_limit=1,
|
@@ -46,7 +46,7 @@ vhq_formation_planner = Agent(
|
|
46
46
|
vhq_agent_creator = Agent(
|
47
47
|
role="vhq-Agent Creator",
|
48
48
|
goal="build an agent that can handle the given task",
|
49
|
-
llm="gemini/gemini-2.0-flash
|
49
|
+
llm="gemini/gemini-2.0-flash",
|
50
50
|
maxit=1,
|
51
51
|
max_retry_limit=1,
|
52
52
|
)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import os
|
2
2
|
import uuid
|
3
|
-
from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
|
3
|
+
from typing import Any, Dict, List, Optional, TypeVar, Callable, Type, Tuple
|
4
4
|
from typing_extensions import Self
|
5
5
|
from dotenv import load_dotenv
|
6
6
|
|
@@ -11,7 +11,7 @@ from versionhq.agent.rpm_controller import RPMController
|
|
11
11
|
from versionhq.tool.model import Tool, ToolSet, BaseTool
|
12
12
|
from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
|
13
13
|
from versionhq.memory.model import ShortTermMemory, LongTermMemory, UserMemory
|
14
|
-
from versionhq._utils import Logger, process_config, is_valid_url
|
14
|
+
from versionhq._utils import Logger, process_config, is_valid_url, ErrorType
|
15
15
|
|
16
16
|
|
17
17
|
load_dotenv(override=True)
|
@@ -373,16 +373,17 @@ class Agent(BaseModel):
|
|
373
373
|
|
374
374
|
if tool_res_as_final:
|
375
375
|
raw_response = self.func_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
|
376
|
-
task.
|
376
|
+
task._usage.record_token_usage(token_usages=self.func_calling_llm._usages)
|
377
377
|
else:
|
378
378
|
raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
|
379
|
-
task.
|
379
|
+
task._usage.record_token_usage(token_usages=self.llm._usages)
|
380
380
|
|
381
381
|
task_execution_counter += 1
|
382
382
|
Logger(**self._logger_config, filename=self.key).log(level="info", message=f"Agent response: {raw_response}", color="green")
|
383
383
|
return raw_response
|
384
384
|
|
385
385
|
except Exception as e:
|
386
|
+
task._usage.record_errors(type=ErrorType.API)
|
386
387
|
Logger(**self._logger_config, filename=self.key).log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
|
387
388
|
|
388
389
|
while not raw_response and task_execution_counter <= self.max_retry_limit:
|
@@ -474,9 +475,16 @@ class Agent(BaseModel):
|
|
474
475
|
return self
|
475
476
|
|
476
477
|
|
477
|
-
def start(
|
478
|
+
def start(
|
479
|
+
self,
|
480
|
+
context: Any = None,
|
481
|
+
tool_res_as_final: bool = False,
|
482
|
+
image: str = None,
|
483
|
+
file: str = None,
|
484
|
+
audio: str = None
|
485
|
+
) -> Tuple[Any | None, Any | None]:
|
478
486
|
"""
|
479
|
-
Defines and executes a task
|
487
|
+
Defines and executes a task, then returns TaskOutput object with the generated task.
|
480
488
|
"""
|
481
489
|
|
482
490
|
if not self.role:
|
@@ -490,14 +498,14 @@ class Agent(BaseModel):
|
|
490
498
|
|
491
499
|
task = Task(
|
492
500
|
description=f"Generate a simple result in a sentence to achieve the goal: {self.goal if self.goal else self.role}. If needed, list up necessary steps in concise manner.",
|
493
|
-
|
501
|
+
response_schema=Output,
|
494
502
|
tool_res_as_final=tool_res_as_final,
|
495
503
|
image=image, #REFINEME - query memory/knowledge or self create
|
496
504
|
file=file,
|
497
505
|
audio=audio,
|
498
506
|
)
|
499
507
|
res = task.execute(agent=self, context=context)
|
500
|
-
return res
|
508
|
+
return res, task
|
501
509
|
|
502
510
|
|
503
511
|
def execute_task(self, task, context: Optional[Any] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
@@ -526,6 +534,8 @@ class Agent(BaseModel):
|
|
526
534
|
tool_res_as_final=task.tool_res_as_final,
|
527
535
|
task=task
|
528
536
|
)
|
537
|
+
if raw_response:
|
538
|
+
task._usage.successful_requests += 1
|
529
539
|
|
530
540
|
except Exception as e:
|
531
541
|
self._times_executed += 1
|
@@ -74,8 +74,8 @@ def form_agent_network(
|
|
74
74
|
leader_agent: str
|
75
75
|
|
76
76
|
vhq_task = Task(
|
77
|
-
description=f"Design a team of specialized agents to fully automate the following task and
|
78
|
-
|
77
|
+
description=f"Design a team of specialized agents to fully automate the following task and deliver the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
|
78
|
+
response_schema=Outcome
|
79
79
|
)
|
80
80
|
|
81
81
|
if agents:
|
@@ -93,10 +93,11 @@ def form_agent_network(
|
|
93
93
|
|
94
94
|
network_tasks = []
|
95
95
|
members = []
|
96
|
-
leader =
|
97
|
-
|
98
|
-
|
99
|
-
|
96
|
+
leader = res._fetch_value_of(key="leader_agent")
|
97
|
+
agent_roles = res._fetch_value_of(key="agent_roles")
|
98
|
+
created_agents = [Agent(role=str(item), goal=str(item)) for item in agent_roles] if agent_roles else []
|
99
|
+
task_descriptions = res._fetch_value_of(key="task_descriptions")
|
100
|
+
task_outcomes = res._fetch_value_of(key="task_outcomes")
|
100
101
|
|
101
102
|
if agents:
|
102
103
|
for i, agent in enumerate(created_agents):
|
@@ -108,9 +109,9 @@ def form_agent_network(
|
|
108
109
|
|
109
110
|
created_tasks = []
|
110
111
|
|
111
|
-
if
|
112
|
-
for i, item in enumerate(
|
113
|
-
if len(
|
112
|
+
if task_outcomes:
|
113
|
+
for i, item in enumerate(task_outcomes):
|
114
|
+
if len(task_descriptions) > i and task_descriptions[i]:
|
114
115
|
fields = {}
|
115
116
|
for ob in item:
|
116
117
|
try:
|
@@ -119,24 +120,9 @@ def form_agent_network(
|
|
119
120
|
except:
|
120
121
|
pass
|
121
122
|
output = create_model("Output", **fields) if fields else None
|
122
|
-
_task = Task(description=
|
123
|
+
_task = Task(description=task_descriptions[i], response_schema=output)
|
123
124
|
created_tasks.append(_task)
|
124
125
|
|
125
|
-
elif res.json_dict:
|
126
|
-
for i, item in enumerate(res["task_outcomes"]):
|
127
|
-
if len(res["task_descriptions"]) > i and res["task_descriptions"][i]:
|
128
|
-
fields = {}
|
129
|
-
for ob in item:
|
130
|
-
try:
|
131
|
-
field_name = str(ob).lower().split(":")[0].replace(" ", "_")[0: 16]
|
132
|
-
fields[field_name] = (str, Field(default=None))
|
133
|
-
except:
|
134
|
-
pass
|
135
|
-
output = create_model("Output", **fields) if fields else None
|
136
|
-
_task = Task(description=res["task_descriptions"][i], pydantic_output=output)
|
137
|
-
created_tasks.append(_task)
|
138
|
-
|
139
|
-
|
140
126
|
if len(created_tasks) <= len(created_agents):
|
141
127
|
for i in range(len(created_tasks)):
|
142
128
|
is_manager = False if not leader else bool(created_agents[i].role.lower() == leader.lower())
|
@@ -159,7 +145,6 @@ def form_agent_network(
|
|
159
145
|
|
160
146
|
network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
|
161
147
|
|
162
|
-
|
163
148
|
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
164
149
|
role = leader if leader else "Leader"
|
165
150
|
manager = Member(agent=Agent(role=role), is_manager=True)
|