versionhq 1.2.1.20__tar.gz → 1.2.1.22__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/PKG-INFO +1 -1
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/agent/ref.md +8 -0
- versionhq-1.2.1.22/docs/core/task/index.md +55 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/task/response-field.md +0 -5
- versionhq-1.2.1.22/docs/core/task/task-execution.md +177 -0
- versionhq-1.2.1.22/docs/core/task/task-ref.md +37 -0
- versionhq-1.2.1.22/docs/core/task/task-strc-response.md +202 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/mkdocs.yml +7 -3
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/pyproject.toml +1 -1
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/__init__.py +1 -1
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent/model.py +1 -1
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq.egg-info/PKG-INFO +1 -1
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq.egg-info/SOURCES.txt +3 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/uv.lock +1 -1
- versionhq-1.2.1.20/docs/core/task/index.md +0 -476
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.env.sample +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.github/workflows/deploy_docs.yml +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.github/workflows/publish.yml +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.github/workflows/run_tests.yml +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.gitignore +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.pre-commit-config.yaml +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/.python-version +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/LICENSE +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/README.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/SECURITY.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/db/preprocess.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/CNAME +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/_logos/favicon.ico +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/_logos/logo192.png +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/agent/config.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/agent/index.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/agent/task-handling.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/agent-network/index.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/llm/index.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/task/evaluation.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/task/task-output.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/task-graph/index.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/core/tool.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/index.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/quickstart.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/stylesheets/main.css +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/docs/tags.md +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/requirements-dev.txt +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/requirements.txt +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/runtime.txt +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/setup.cfg +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent/inhouse_agents.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent_network/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent_network/formation.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/agent_network/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/knowledge/source_docling.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/llm/llm_vars.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/llm/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task/evaluate.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task/log_handler.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task_graph/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task_graph/colors.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task_graph/draft.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/task_graph/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/agent/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/agent/agent_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/agent/doc_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/agent_network/Prompts/Demo_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/agent_network/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/agent_network/agent_network_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/cli/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/clients/customer_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/clients/product_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/conftest.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/doc_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/formation_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/llm/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/llm/llm_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/memory/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/memory/memory_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task/doc_taskoutput_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task/doc_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task/llm_connection_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task/task_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task_graph/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task_graph/doc_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/task_graph/task_graph_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/tool/__init__.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/tool/composio_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/tool/doc_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/tool/tool_test.py +0 -0
- {versionhq-1.2.1.20 → versionhq-1.2.1.22}/tests/usecase_test.py +0 -0
@@ -38,3 +38,11 @@
|
|
38
38
|
| **`update`** | **kwargs: Any | Self | Updates agents with given kwargs. Invalid keys will be ignored. |
|
39
39
|
| **`start`** | context: Any = None <br> tool_res_as_final: bool = False | `TaskOutput` \| None | Starts to operate the agent. |
|
40
40
|
| **`execute_task`** | task: [Task] <br> context: Any = None <br> task_tools: Optional[List[Tool \| ToolSet]] = list() | str | Returns response from the model in plane text format. |
|
41
|
+
|
42
|
+
|
43
|
+
|
44
|
+
## Properties
|
45
|
+
|
46
|
+
| <div style="width:120px">**Property**</div> | **Returns** | **Description** |
|
47
|
+
| :--- | :--- | :--- |
|
48
|
+
| **`key`** | str | Unique identifier of the agent using its ID and sanitized role. |
|
@@ -0,0 +1,55 @@
|
|
1
|
+
---
|
2
|
+
tags:
|
3
|
+
- Task Graph
|
4
|
+
---
|
5
|
+
|
6
|
+
# Task
|
7
|
+
|
8
|
+
<class>`class` versionhq.task.model.<bold>Task<bold></class>
|
9
|
+
|
10
|
+
A class to store and manage information for individual tasks, including their assignment to agents or agent networks, and dependencies via a node-based system that tracks conditions and status.
|
11
|
+
|
12
|
+
Ref. Node / Edge / <a href="/core/task-graph">TaskGraph</a> class
|
13
|
+
|
14
|
+
<hr />
|
15
|
+
|
16
|
+
## Quick Start
|
17
|
+
|
18
|
+
Create a task by defining its description in one simple sentence. The `description` will be used for task prompting later.
|
19
|
+
|
20
|
+
Each task will be assigned a unique ID as an identifier.
|
21
|
+
|
22
|
+
```python
|
23
|
+
import versionhq as vhq
|
24
|
+
|
25
|
+
task = vhq.Task(description="MY AMAZING TASK")
|
26
|
+
|
27
|
+
import uuid
|
28
|
+
assert uuid.UUID(str(task.id), version=4)
|
29
|
+
```
|
30
|
+
|
31
|
+
|
32
|
+
And you can simply execute the task by calling `.execute()` function.
|
33
|
+
|
34
|
+
```python
|
35
|
+
import versionhq as vhq
|
36
|
+
|
37
|
+
task = vhq.Task(description="MY AMAZING TASK")
|
38
|
+
res = task.execute()
|
39
|
+
|
40
|
+
assert isinstance(res, vhq.TaskOutput) # Generates TaskOutput object
|
41
|
+
assert res.raw and res.json # By default, TaskOutput object stores output in plane text and json formats.
|
42
|
+
assert task.processed_agents is not None # Agents will be automatically assigned to the given task.
|
43
|
+
```
|
44
|
+
|
45
|
+
<hr />
|
46
|
+
|
47
|
+
## Evaluating
|
48
|
+
|
49
|
+
`[var]`<bold>`should_evaluate: bool = False`</bold>
|
50
|
+
|
51
|
+
`[var]`<bold>`eval_criteria: Optional[List[str]] = list()`</bold>
|
52
|
+
|
53
|
+
You can turn on customized evaluations using the given criteria.
|
54
|
+
|
55
|
+
Refer <a href="/core/task/task-output">TaskOutput</a> class for details.
|
@@ -0,0 +1,177 @@
|
|
1
|
+
# Executing Task
|
2
|
+
|
3
|
+
|
4
|
+
## Prompting
|
5
|
+
|
6
|
+
`[class method]`<bold>`prompt(self, model_provider: str = None, context: Optional[Any] = None) -> str`</bold>
|
7
|
+
|
8
|
+
Prompts are generated automatically based on the task `description`, response format, context, agent `role`, and `goal`.
|
9
|
+
|
10
|
+
|
11
|
+
**Context**
|
12
|
+
|
13
|
+
The following snippet demonstrates how to add `context` to the prompt.
|
14
|
+
|
15
|
+
```python
|
16
|
+
import versionhq as vhq
|
17
|
+
|
18
|
+
sub_task_1 = vhq.Task(description="Run a sub demo part 1")
|
19
|
+
sub_res = sub_task_1.execute()
|
20
|
+
|
21
|
+
sub_task_2 = vhq.Task(description="Run a sub demo part 2")
|
22
|
+
|
23
|
+
task = vhq.Task(description="Run a main demo")
|
24
|
+
|
25
|
+
context = [sub_res, sub_task_2, "context to add in string"]
|
26
|
+
res = task.execute(context=context)
|
27
|
+
|
28
|
+
# Explicitly mentioned. `task.execute()` will trigger the following:
|
29
|
+
task_prompt = task._prompt(context=context)
|
30
|
+
|
31
|
+
assert sub_res.to_context_prompt() in task_prompt
|
32
|
+
assert sub_task_2.output and sub_task_2.output.to_context_prompt() in task_prompt # sub tasks' outputs are included in the task prompt.
|
33
|
+
assert "context to add in string" in task_promp
|
34
|
+
assert res
|
35
|
+
```
|
36
|
+
|
37
|
+
Context can consist of `Task` objects, `TaskOutput` objects, plain text `strings`, or `lists` containing any of these.
|
38
|
+
|
39
|
+
In this scenario, `sub_task_2` executes before the main task. Its string output is then incorporated into the main task's context prompt on top of other context before the main task is executed.
|
40
|
+
|
41
|
+
<hr>
|
42
|
+
|
43
|
+
## Executing
|
44
|
+
|
45
|
+
**Agent delegation**
|
46
|
+
|
47
|
+
`[var]`<bold>`allow_delegation: bool = False`</bold>
|
48
|
+
|
49
|
+
You can assign another agent to complete the task:
|
50
|
+
|
51
|
+
```python
|
52
|
+
import versionhq as vhq
|
53
|
+
|
54
|
+
task = vhq.Task(
|
55
|
+
description="return the output following the given prompt.",
|
56
|
+
allow_delegation=True
|
57
|
+
)
|
58
|
+
task.execute()
|
59
|
+
|
60
|
+
assert task.output is not None
|
61
|
+
assert "vhq-Delegated-Agent" in task.processed_agents # delegated agent
|
62
|
+
assert task.delegations ==1
|
63
|
+
```
|
64
|
+
|
65
|
+
<hr>
|
66
|
+
|
67
|
+
**SYNC - ASYNC**
|
68
|
+
|
69
|
+
`[var]`<bold>`type: bool = False`</bold>
|
70
|
+
|
71
|
+
You can specify whether the task will be executed asynchronously.
|
72
|
+
|
73
|
+
```python
|
74
|
+
import versionhq as vhq
|
75
|
+
|
76
|
+
task = vhq.Task(
|
77
|
+
description="Return a word: 'test'",
|
78
|
+
type=vhq.TaskExecutionType.ASYNC # default: vhq.TaskExecutionType.SYNC
|
79
|
+
)
|
80
|
+
|
81
|
+
from unittest.mock import patch
|
82
|
+
with patch.object(vhq.Agent, "execute_task", return_value="test") as execute:
|
83
|
+
res = task.execute()
|
84
|
+
assert res.raw == "test"
|
85
|
+
execute.assert_called_once_with(task=task, context=None, task_tools=list())
|
86
|
+
```
|
87
|
+
|
88
|
+
<hr>
|
89
|
+
|
90
|
+
**Using tools**
|
91
|
+
|
92
|
+
`[var]`<bold>`tools: Optional[List[ToolSet | Tool | Any]] = None`</bold>
|
93
|
+
|
94
|
+
`[var]`<bold>`tool_res_as_final: bool = False`</bold>
|
95
|
+
|
96
|
+
|
97
|
+
Tasks can directly store tools explicitly called by the agent.
|
98
|
+
|
99
|
+
If the results from the tool should be the final results, set `tool_res_as_final` True.
|
100
|
+
|
101
|
+
This will allow the agent to store the tool results in the `tool_output` field of `TaskOutput` object.
|
102
|
+
|
103
|
+
|
104
|
+
```python
|
105
|
+
import versionhq as vhq
|
106
|
+
from typing import Callable
|
107
|
+
|
108
|
+
def random_func(message: str) -> str:
|
109
|
+
return message + "_demo"
|
110
|
+
|
111
|
+
tool = vhq.Tool(name="tool", func=random_func)
|
112
|
+
tool_set = vhq.ToolSet(tool=tool, kwargs=dict(message="empty func"))
|
113
|
+
task = vhq.Task(
|
114
|
+
description="execute the given tools",
|
115
|
+
tools=[tool_set,], # stores tools
|
116
|
+
tool_res_as_final=True, # stores tool results in TaskOutput object
|
117
|
+
)
|
118
|
+
|
119
|
+
res = task.execute()
|
120
|
+
assert res.tool_output == "empty func_demo"
|
121
|
+
```
|
122
|
+
|
123
|
+
Ref. <a href="/core/tool">Tool</a> class / <a href="/core/task/task-output">TaskOutput</a> class
|
124
|
+
|
125
|
+
<hr>
|
126
|
+
|
127
|
+
**Using agents' tools**
|
128
|
+
|
129
|
+
`[var]`<bold>`can_use_agent_tools: bool = True`</bold>
|
130
|
+
|
131
|
+
Tasks can explicitly stop/start using agent tools on top of the tools stored in the task object.
|
132
|
+
|
133
|
+
```python
|
134
|
+
import versionhq as vhq
|
135
|
+
|
136
|
+
simple_tool = vhq.Tool(name="simple tool", func=lambda x: "simple func")
|
137
|
+
agent = vhq.Agent(role="demo", goal="execute tools", tools=[simple_tool,])
|
138
|
+
task = vhq.Task(
|
139
|
+
description="execute tools",
|
140
|
+
can_use_agent_tools=True, # Flagged
|
141
|
+
tool_res_as_final=True
|
142
|
+
)
|
143
|
+
res = task.execute(agent=agent)
|
144
|
+
assert res.tool_output == "simple func"
|
145
|
+
```
|
146
|
+
|
147
|
+
<hr>
|
148
|
+
|
149
|
+
## Callback
|
150
|
+
|
151
|
+
`[var]`<bold>`callback: Optional[Callable] = None`</bold>
|
152
|
+
|
153
|
+
`[var]`<bold>`callback_kwargs: Optional[Dict[str, Any]] = dict()`</bold>
|
154
|
+
|
155
|
+
After executing the task, you can run a `callback` function with `callback_kwargs` and task output as parameters.
|
156
|
+
|
157
|
+
Callback results will be stored in `callback_output` filed of the `TaskOutput` object.
|
158
|
+
|
159
|
+
```python
|
160
|
+
import versionhq as vhq
|
161
|
+
|
162
|
+
def callback_func(condition: str, test1: str):
|
163
|
+
return f"Result: {test1}, condition added: {condition}"
|
164
|
+
|
165
|
+
task = vhq.Task(
|
166
|
+
description="return the output following the given prompt.",
|
167
|
+
callback=callback_func,
|
168
|
+
callback_kwargs=dict(condition="demo for pytest")
|
169
|
+
)
|
170
|
+
res = task.execute()
|
171
|
+
|
172
|
+
assert res and isinstance(res, vhq.TaskOutput)
|
173
|
+
assert res.task_id is task.id
|
174
|
+
assert "demo for pytest" in res.callback_output
|
175
|
+
```
|
176
|
+
|
177
|
+
<hr>
|
@@ -0,0 +1,37 @@
|
|
1
|
+
## Variables `Task`
|
2
|
+
|
3
|
+
| <div style="width:160px">**Variable**</div> | **Data Type** | **Default** | **Nullable** | **Description** |
|
4
|
+
| :--- | :--- | :--- | :--- | :--- |
|
5
|
+
| **`id`** | UUID | uuid.uuid4() | False | Stores task `id` as an identifier. |
|
6
|
+
| **`name`** | Optional[str] | None | True | Stores a task name (Inherited as `node` identifier if the task is dependent) |
|
7
|
+
| **`description`** | str | None | False | Required field to store a concise task description |
|
8
|
+
| **`pydantic_output`** | Optional[Type[BaseModel]] | None | True | Stores pydantic custom output class for structured response |
|
9
|
+
| **`response_fields`** | Optional[List[ResponseField]] | list() | True | Stores JSON formats for stuructured response |
|
10
|
+
| **`tools`** | Optional[List[ToolSet | Tool | Any]] | None | True | Stores tools to be called when the agent executes the task. |
|
11
|
+
| **`can_use_agent_tools`** | bool | True | - | Whether to use the agent tools |
|
12
|
+
| **`tool_res_as_final`** | bool | False | - | Whether to make the tool response a final response from the agent |
|
13
|
+
| **`execution_type`** | TaskExecutionType | TaskExecutionType.SYNC | - | Sync or async execution |
|
14
|
+
| **`allow_delegation`** | bool | False | - | Whether to allow the agent to delegate the task to another agent |
|
15
|
+
| **`callback`** | Optional[Callable] | None | True | Callback function to be executed after LLM calling |
|
16
|
+
| **`callback_kwargs`** | Optional[Dict[str, Any]] | dict() | True | Args for the callback function (if any)|
|
17
|
+
| **`should_evaluate`** | bool | False | - | Whether to evaluate the task output using eval criteria |
|
18
|
+
| **`eval_criteria`** | Optional[List[str]] | list() | True | Evaluation criteria given by the human client |
|
19
|
+
| **`processed_agents`** | Set[str] | set() | True | [Ops] Stores roles of the agents executed the task |
|
20
|
+
| **`tool_errors`** | int | 0 | True | [Ops] Stores number of tool errors |
|
21
|
+
| **`delegation`** | int | 0 | True | [Ops] Stores number of agent delegations |
|
22
|
+
| **`output`** | Optional[TaskOutput] | None | True | [Ops] Stores `TaskOutput` object after the execution |
|
23
|
+
|
24
|
+
|
25
|
+
## Class Methods `Task`
|
26
|
+
|
27
|
+
| <div style="width:120px">**Method**</div> | <div style="width:300px">**Params**</div> | **Returns** | **Description** |
|
28
|
+
| :--- | :--- | :--- | :--- |
|
29
|
+
| **`execute`** | <p>type: TaskExecutionType = None<br>agent: Optional["vhq.Agent"] = None<br>context: Optional[Any] = None</p> | InstanceOf[`TaskOutput`] or None (error) | A main method to handle task execution. Auto-build an agent when the agent is not given. |
|
30
|
+
|
31
|
+
|
32
|
+
## Properties `Task`
|
33
|
+
|
34
|
+
| <div style="width:120px">**Property**</div> | **Returns** | **Description** |
|
35
|
+
| :--- | :--- | :--- |
|
36
|
+
| **`key`** | str | Returns task key based on its description and output format. |
|
37
|
+
| **`summary`** | str | Returns a summary of the task based on its id, description and tools. |
|
@@ -0,0 +1,202 @@
|
|
1
|
+
# Structured Response
|
2
|
+
|
3
|
+
By default, agents will generate plane text and JSON outputs, and store them in the `TaskOutput` object.
|
4
|
+
|
5
|
+
* Ref. <a href="/core/task/task-output">`TaskOutput`</a> class
|
6
|
+
|
7
|
+
But you can choose to generate Pydantic class or specifig JSON object as response.
|
8
|
+
|
9
|
+
<hr />
|
10
|
+
|
11
|
+
## 1. Pydantic
|
12
|
+
|
13
|
+
`[var]`<bold>`pydantic_output: Optional[Type[BaseModel]] = None`</bold>
|
14
|
+
|
15
|
+
Create and add a `custom Pydantic class` as a structured response format to the `pydantic_output` field.
|
16
|
+
|
17
|
+
The custom class can accept **one layer of a nested child** as you can see in the following code snippet:
|
18
|
+
|
19
|
+
```python
|
20
|
+
import versionhq as vhq
|
21
|
+
from pydantic import BaseModel
|
22
|
+
from typing import Any
|
23
|
+
|
24
|
+
|
25
|
+
# 1. Define Pydantic class with a description (optional), annotations and field names.
|
26
|
+
class Demo(BaseModel):
|
27
|
+
"""
|
28
|
+
A demo pydantic class to validate the outcome with various nested data types.
|
29
|
+
"""
|
30
|
+
demo_1: int
|
31
|
+
demo_2: float
|
32
|
+
demo_3: str
|
33
|
+
demo_4: bool
|
34
|
+
demo_5: list[str]
|
35
|
+
demo_6: dict[str, Any]
|
36
|
+
demo_nest_1: list[dict[str, Any]] # 1 layer of nested child is ok.
|
37
|
+
demo_nest_2: list[list[str]]
|
38
|
+
demo_nest_3: dict[str, list[str]]
|
39
|
+
demo_nest_4: dict[str, dict[str, Any]]
|
40
|
+
# error_1: list[list[dict[str, list[str]]]] # <- Trigger 400 error due to 2+ layers of nested child.
|
41
|
+
# error_2: InstanceOf[AnotherPydanticClass] # <- Trigger 400 error due to non-typing annotation.
|
42
|
+
# error_3: list[InstanceOf[AnotherPydanticClass]] # <- Trigger 400 error due to non-typing annotation as a nested child.
|
43
|
+
|
44
|
+
# 2. Define a task
|
45
|
+
task = vhq.Task(
|
46
|
+
description="generate random output that strictly follows the given format",
|
47
|
+
pydantic_output=Demo,
|
48
|
+
)
|
49
|
+
|
50
|
+
# 3. Execute
|
51
|
+
res = task.execute()
|
52
|
+
|
53
|
+
assert isinstance(res, vhq.TaskOutput)
|
54
|
+
assert res.raw and res.json
|
55
|
+
assert isinstance(res.raw, str) and isinstance(res.json_dict, dict)
|
56
|
+
assert [
|
57
|
+
getattr(res.pydantic, k) and v.annotation == Demo.model_fields[k].annotation
|
58
|
+
for k, v in res.pydantic.model_fields.items()
|
59
|
+
]
|
60
|
+
```
|
61
|
+
|
62
|
+
<hr />
|
63
|
+
|
64
|
+
## 2. JSON
|
65
|
+
|
66
|
+
`[var]`<bold>`response_fields: List[InstanceOf[ResponseField]] = None`</bold>
|
67
|
+
|
68
|
+
Similar to Pydantic, JSON output structure can be defined by using a list of `ResponseField` objects.
|
69
|
+
|
70
|
+
The following code snippet demonstrates how to use `ResponseField` to generate output with a maximum of one level of nesting.
|
71
|
+
|
72
|
+
Custom JSON outputs can accept **one layer of a nested child**.
|
73
|
+
|
74
|
+
**[NOTES]**
|
75
|
+
|
76
|
+
- `demo_response_fields` in the following case is identical to the previous Demo class, except that titles are specified for nested fields.
|
77
|
+
|
78
|
+
- Agents generate JSON output by default, whether or not `response_fields` are used.
|
79
|
+
|
80
|
+
- However, response_fields are REQUIRED to specify JSON key titles and data types.
|
81
|
+
|
82
|
+
```python
|
83
|
+
import versionhq as vhq
|
84
|
+
|
85
|
+
# 1. Define a list of ResponseField objects.
|
86
|
+
demo_response_fields = [
|
87
|
+
# no nesting
|
88
|
+
vhq.ResponseField(title="demo_1", data_type=int),
|
89
|
+
vhq.ResponseField(title="demo_2", data_type=float),
|
90
|
+
vhq.ResponseField(title="demo_3", data_type=str),
|
91
|
+
vhq.ResponseField(title="demo_4", data_type=bool),
|
92
|
+
vhq.ResponseField(title="demo_5", data_type=list, items=str),
|
93
|
+
vhq.ResponseField(
|
94
|
+
title="demo_6",
|
95
|
+
data_type=dict,
|
96
|
+
properties=[vhq.ResponseField(title="demo-item", data_type=str)]
|
97
|
+
),
|
98
|
+
# nesting
|
99
|
+
vhq.ResponseField(
|
100
|
+
title="demo_nest_1",
|
101
|
+
data_type=list,
|
102
|
+
items=dict,
|
103
|
+
properties=([
|
104
|
+
vhq.ResponseField(
|
105
|
+
title="nest1",
|
106
|
+
data_type=dict,
|
107
|
+
properties=[vhq.ResponseField(title="nest11", data_type=str)]
|
108
|
+
)
|
109
|
+
])
|
110
|
+
),
|
111
|
+
vhq.ResponseField(title="demo_nest_2", data_type=list, items=list),
|
112
|
+
vhq.ResponseField(title="demo_nest_3", data_type=dict, properties=[
|
113
|
+
vhq.ResponseField(title="nest1", data_type=list, items=str)
|
114
|
+
]),
|
115
|
+
vhq.ResponseField(title="demo_nest_4", data_type=dict, properties=[
|
116
|
+
vhq.ResponseField(
|
117
|
+
title="nest1",
|
118
|
+
data_type=dict,
|
119
|
+
properties=[vhq.ResponseField(title="nest12", data_type=str)]
|
120
|
+
)
|
121
|
+
])
|
122
|
+
]
|
123
|
+
|
124
|
+
|
125
|
+
# 2. Define a task
|
126
|
+
task = vhq.Task(
|
127
|
+
description="Output random values strictly following the data type defined in the given response format.",
|
128
|
+
response_fields=demo_response_fields
|
129
|
+
)
|
130
|
+
|
131
|
+
|
132
|
+
# 3. Execute
|
133
|
+
res = task.execute()
|
134
|
+
|
135
|
+
assert isinstance(res, vhq.TaskOutput) and res.task_id is task.id
|
136
|
+
assert res.raw and res.json and res.pydantic is None
|
137
|
+
assert [v and type(v) == task.response_fields[i].data_type for i, (k, v) in enumerate(res.json_dict.items())]
|
138
|
+
```
|
139
|
+
|
140
|
+
* Ref. <a href="/core/task/response-field">`ResponseField`</a> class
|
141
|
+
|
142
|
+
<hr />
|
143
|
+
|
144
|
+
**Structuring reponse format**
|
145
|
+
|
146
|
+
- Higlhy recommends assigning agents optimized for `gemini-x` or `gpt-x` to produce structured outputs with nested items.
|
147
|
+
|
148
|
+
- To generate response with more than 2 layers of nested items, seperate them into multipe tasks or utilize nodes.
|
149
|
+
|
150
|
+
The following case demonstrates to returning a `Main` class that contains a nested `Sub` class.
|
151
|
+
|
152
|
+
**[NOTES]**
|
153
|
+
|
154
|
+
- Using `callback` functions to format the final response. (You can try other functions suitable for your use case.)
|
155
|
+
|
156
|
+
- Passing parameter: `sub` to the callback function via the `callback_kwargs` variable.
|
157
|
+
|
158
|
+
- By default, the outputs of `main_task` are automatically passed to the callback function; you do NOT need to explicitly define them.
|
159
|
+
|
160
|
+
- Callback results will be stored in the `callback_output` field of the `TaskOutput` class.
|
161
|
+
|
162
|
+
|
163
|
+
```python
|
164
|
+
import versionhq as vhq
|
165
|
+
from pydantic import BaseModel
|
166
|
+
from typing import Any
|
167
|
+
|
168
|
+
# 1. Define and execute a sub task with Pydantic output.
|
169
|
+
class Sub(BaseModel):
|
170
|
+
sub1: list[dict[str, Any]]
|
171
|
+
sub2: dict[str, Any]
|
172
|
+
|
173
|
+
sub_task = vhq.Task(
|
174
|
+
description="generate random values that strictly follows the given format.",
|
175
|
+
pydantic_output=Sub
|
176
|
+
)
|
177
|
+
sub_res = sub_task.execute()
|
178
|
+
|
179
|
+
# 2. Define a main task, callback function to format the final response.
|
180
|
+
class Main(BaseModel):
|
181
|
+
main1: list[Any] # <= assume expecting to store Sub object in this field.
|
182
|
+
# error_main1: list[InstanceOf[Sub]] # as this will trigger 400 error!
|
183
|
+
main2: dict[str, Any]
|
184
|
+
|
185
|
+
def format_response(sub: InstanceOf[Sub], main1: list[Any], main2: dict[str, Any]) -> Main:
|
186
|
+
main1.append(sub)
|
187
|
+
main = Main(main1=main1, main2=main2)
|
188
|
+
return main
|
189
|
+
|
190
|
+
# 3. Execute
|
191
|
+
main_task = vhq.Task(
|
192
|
+
description="generate random values that strictly follows the given format",
|
193
|
+
pydantic_output=Main,
|
194
|
+
callback=format_response,
|
195
|
+
callback_kwargs=dict(sub=Sub(sub1=sub_res.pydantic.sub1, sub2=sub_res.pydantic.sub2)),
|
196
|
+
)
|
197
|
+
res = main_task.execute(context=sub_res.raw) # [Optional] Adding sub_task as a context.
|
198
|
+
|
199
|
+
assert [item for item in res.callback_output.main1 if isinstance(item, Sub)]
|
200
|
+
```
|
201
|
+
|
202
|
+
To automate these manual setups, refer to <a href="/core/agent-network">AgentNetwork</a> class.
|
@@ -123,9 +123,13 @@ nav:
|
|
123
123
|
- 'core/task-graph/index.md'
|
124
124
|
- Task:
|
125
125
|
- 'core/task/index.md'
|
126
|
-
-
|
127
|
-
|
128
|
-
|
126
|
+
- Structuring Response:
|
127
|
+
- Concept: 'core/task/task-strc-response.md'
|
128
|
+
- ResponseField: 'core/task/response-field.md'
|
129
|
+
- Executing: 'core/task/task-execution.md'
|
130
|
+
- Outputs: 'core/task/task-output.md'
|
131
|
+
- Evaluating: 'core/task/evaluation.md'
|
132
|
+
- Reference: 'core/task/task-ref.md'
|
129
133
|
- Components:
|
130
134
|
- Tool: 'core/tool.md'
|
131
135
|
- Archive: 'tags.md'
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.2.1.
|
18
|
+
version = "1.2.1.22"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "An agentic orchestration framework for building agent networks that handle task automation."
|
21
21
|
readme = "README.md"
|
@@ -603,7 +603,7 @@ class Agent(BaseModel):
|
|
603
603
|
"""
|
604
604
|
A key to identify an agent. Used in storage, logging, and other recodings.
|
605
605
|
"""
|
606
|
-
sanitized_role = self.role.lower().replace(" ", "-").replace("/", "").replace("{", "").replace("}", "").replace("\n", "")
|
606
|
+
sanitized_role = self.role.lower().replace(" ", "-").replace("/", "").replace("{", "").replace("}", "").replace("\n", "")[0: 16]
|
607
607
|
return f"{str(self.id)}-{sanitized_role}"
|
608
608
|
|
609
609
|
|
@@ -33,7 +33,10 @@ docs/core/llm/index.md
|
|
33
33
|
docs/core/task/evaluation.md
|
34
34
|
docs/core/task/index.md
|
35
35
|
docs/core/task/response-field.md
|
36
|
+
docs/core/task/task-execution.md
|
36
37
|
docs/core/task/task-output.md
|
38
|
+
docs/core/task/task-ref.md
|
39
|
+
docs/core/task/task-strc-response.md
|
37
40
|
docs/core/task-graph/index.md
|
38
41
|
docs/stylesheets/main.css
|
39
42
|
src/versionhq/__init__.py
|