versionhq 1.2.1.15__tar.gz → 1.2.1.16__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.github/workflows/deploy_docs.yml +10 -10
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/PKG-INFO +1 -1
- versionhq-1.2.1.16/SECURITY.md +37 -0
- versionhq-1.2.1.16/docs/core/agent/config.md +209 -0
- versionhq-1.2.1.16/docs/core/agent/index.md +34 -0
- versionhq-1.2.1.16/docs/core/agent/ref.md +40 -0
- versionhq-1.2.1.16/docs/core/agent/task-handling.md +245 -0
- versionhq-1.2.1.16/docs/core/llm/index.md +103 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/core/tool.md +3 -3
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/mkdocs.yml +7 -2
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/pyproject.toml +1 -1
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/__init__.py +1 -1
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent/inhouse_agents.py +2 -2
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent/model.py +118 -119
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/model.py +2 -2
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task_graph/draft.py +2 -2
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/tool/composio_tool.py +1 -2
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq.egg-info/PKG-INFO +1 -1
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq.egg-info/SOURCES.txt +4 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/agent/agent_test.py +44 -28
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/agent/doc_test.py +27 -31
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/tool/doc_test.py +3 -3
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/usecase_test.py +2 -2
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/uv.lock +54 -52
- versionhq-1.2.1.15/SECURITY.md +0 -31
- versionhq-1.2.1.15/docs/core/agent/index.md +0 -611
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.env.sample +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.github/workflows/publish.yml +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.github/workflows/run_tests.yml +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.gitignore +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.pre-commit-config.yaml +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/.python-version +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/LICENSE +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/README.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/db/preprocess.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/CNAME +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/_logos/favicon.ico +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/_logos/logo192.png +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/core/task/evaluation.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/core/task/index.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/core/task/response-field.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/core/task/task-output.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/core/task-graph/index.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/index.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/quickstart.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/stylesheets/main.css +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/docs/tags.md +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/requirements-dev.txt +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/requirements.txt +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/runtime.txt +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/setup.cfg +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent_network/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/agent_network/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/knowledge/source_docling.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/llm/llm_vars.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/llm/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/evaluate.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/formation.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/log_handler.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task_graph/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task_graph/colors.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/task_graph/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/agent/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/agent_network/Prompts/Demo_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/agent_network/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/agent_network/agent_network_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/cli/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/clients/customer_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/clients/product_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/conftest.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/doc_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/formation_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/llm/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/llm/llm_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/memory/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/memory/memory_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task/doc_taskoutput_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task/doc_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task/llm_connection_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task/task_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task_graph/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task_graph/doc_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/task_graph/task_graph_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/tool/__init__.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/tool/composio_test.py +0 -0
- {versionhq-1.2.1.15 → versionhq-1.2.1.16}/tests/tool/tool_test.py +0 -0
@@ -16,15 +16,15 @@ jobs:
|
|
16
16
|
run: |
|
17
17
|
git config user.name github-actions[bot]
|
18
18
|
git config user.email 41898282+github-actions[bot]@users.noreply.github.com
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
19
|
+
- uses: actions/setup-python@v5
|
20
|
+
with:
|
21
|
+
python-version: 3.12
|
22
|
+
- run: echo "cache_id=$(date +%s)" >> $GITHUB_ENV
|
23
|
+
- uses: actions/cache@v4
|
24
|
+
with:
|
25
|
+
key: mkdocs-material-${{ env.cache_id }}
|
26
|
+
path: .cache
|
27
|
+
restore-keys: |
|
28
|
+
mkdocs-material-
|
29
29
|
- run: pip install mkdocs-material
|
30
30
|
- run: mkdocs gh-deploy --force --clean
|
@@ -0,0 +1,37 @@
|
|
1
|
+
# Security Policy
|
2
|
+
|
3
|
+
This policy outlines the process for reporting security vulnerabilities affecting our systems, products, or services. We appreciate your efforts in helping us maintain a secure environment and take all vulnerability reports seriously.
|
4
|
+
|
5
|
+
## Supported Versions
|
6
|
+
|
7
|
+
|
8
|
+
| Version | Supported |
|
9
|
+
| ------- | ------------------ |
|
10
|
+
| 3.12.x | :white_check_mark: |
|
11
|
+
| 3.11.x | :x: |
|
12
|
+
| < 3.11 | :x: |
|
13
|
+
|
14
|
+
## Reporting a Vulnerability
|
15
|
+
|
16
|
+
To report a security vulnerability, please submit your findings through our [reporting platform](https://versi0n.io/contact-us).
|
17
|
+
|
18
|
+
Please provide the following information in your report to help us understand and address the vulnerability effectively:
|
19
|
+
|
20
|
+
- Detailed Description: Clearly describe the vulnerability, including its potential impact. Be as specific as possible, including affected systems, components, and URLs.
|
21
|
+
|
22
|
+
- Steps to Reproduce: Provide a step-by-step guide on how to reproduce the vulnerability. This is crucial for our team to verify and address the issue quickly. Include screenshots or videos if helpful.
|
23
|
+
|
24
|
+
- Affected Systems/Components: Specify the affected systems, software, or hardware. Include version numbers where applicable.
|
25
|
+
|
26
|
+
- Your Contact Information: Provide your name and email address so we can contact you for further clarification or updates. We respect your privacy and will handle your information responsibly.
|
27
|
+
|
28
|
+
|
29
|
+
### What to Expect:
|
30
|
+
|
31
|
+
We will acknowledge receipt of your vulnerability report within three business days and provide updates on the progress of our investigation and remediation efforts.
|
32
|
+
|
33
|
+
If the vulnerability is accepted, we will prioritize its remediation based on its severity and potential impact. We will work to address the issue as quickly as possible and may request further information from you during the process.
|
34
|
+
|
35
|
+
By submitting a vulnerability report, you agree that you are not violating any applicable laws or regulations.
|
36
|
+
|
37
|
+
We appreciate your cooperation in helping us maintain a secure environment. If you have any questions about this policy, please contact us at [reporting platform](https://versi0n.io/contact-us).
|
@@ -0,0 +1,209 @@
|
|
1
|
+
|
2
|
+
<hr />
|
3
|
+
|
4
|
+
## Optimizing Model
|
5
|
+
|
6
|
+
**Model Optimization**
|
7
|
+
|
8
|
+
`[var]`<bold>`llm: Optional[str | LLM | Dict[str, Any]] = "gpt-4o"`</bold>
|
9
|
+
|
10
|
+
You can select a model or model provider that the agent will run on.
|
11
|
+
|
12
|
+
By default, when the model provider name is provided, we will select the most cost-efficient model from the given provider.
|
13
|
+
|
14
|
+
```python
|
15
|
+
import versionhq as vhq
|
16
|
+
|
17
|
+
agent = vhq.Agent(
|
18
|
+
role="Marketing Analyst",
|
19
|
+
goal="Coping with price competition in saturated markets",
|
20
|
+
llm="gemini-2.0"
|
21
|
+
)
|
22
|
+
```
|
23
|
+
|
24
|
+
<hr/>
|
25
|
+
|
26
|
+
|
27
|
+
**Other LLM Configuration**
|
28
|
+
|
29
|
+
`[var]`<bold>`llm_config: Optional[Dict[str, Any]] = None`</bold>
|
30
|
+
|
31
|
+
You can specify any other parameters that the agent needs to follow when they call the LLM. Else, the agent will follow the default settings given by the model provider.
|
32
|
+
|
33
|
+
e.g. Expect longer context and form a short answer
|
34
|
+
|
35
|
+
```python
|
36
|
+
import versionhq as vhq
|
37
|
+
|
38
|
+
agent = vhq.Agent(
|
39
|
+
role="Marketing Analyst",
|
40
|
+
goal="Coping with increased price competition in saturated markets.",
|
41
|
+
respect_context_window=False,
|
42
|
+
max_execution_time=60,
|
43
|
+
max_rpm=5,
|
44
|
+
llm_config=dict(
|
45
|
+
temperature=1,
|
46
|
+
top_p=0.1,
|
47
|
+
n=1,
|
48
|
+
stop="answer",
|
49
|
+
dummy="I am dummy" # <- invalid field will be ignored automatically.
|
50
|
+
)
|
51
|
+
)
|
52
|
+
|
53
|
+
assert isinstance(agent.llm, vhq.LLM)
|
54
|
+
assert agent.llm.temperature == 1
|
55
|
+
assert agent.llm.top_p == 0.1
|
56
|
+
assert agent.llm.n == 1
|
57
|
+
assert agent.llm.stop == "answer"
|
58
|
+
```
|
59
|
+
|
60
|
+
<hr>
|
61
|
+
|
62
|
+
|
63
|
+
## Building Knowledge
|
64
|
+
|
65
|
+
**Knowlege Source**
|
66
|
+
|
67
|
+
`[var]`<bold>`knowledge_sources: Optional[List[KnowledgeSource]] = None`</bold>
|
68
|
+
|
69
|
+
You can add knowledge sources to the agent in the following formats:
|
70
|
+
|
71
|
+
- Plane text
|
72
|
+
- Excel file
|
73
|
+
- PPTX
|
74
|
+
- PDF
|
75
|
+
- CSV
|
76
|
+
- JSON
|
77
|
+
- HTML file
|
78
|
+
|
79
|
+
The agent will run a query in the given knowledge source using the given context, then add the search results to the task prompt context.
|
80
|
+
|
81
|
+
```python
|
82
|
+
import versionhq as vhq
|
83
|
+
|
84
|
+
content = "Kuriko's favorite color is gold, and she enjoy Japanese food."
|
85
|
+
string_source = vhq.StringKnowledgeSource(content=content)
|
86
|
+
|
87
|
+
agent = vhq.Agent(
|
88
|
+
role="Information Agent",
|
89
|
+
goal="Provide information based on knowledge sources",
|
90
|
+
knowledge_sources=[string_source,]
|
91
|
+
)
|
92
|
+
|
93
|
+
task = vhq.Task(
|
94
|
+
description="Answer the following question: What is Kuriko's favorite color?"
|
95
|
+
)
|
96
|
+
|
97
|
+
res = task.execute(agent=agent)
|
98
|
+
assert "gold" in res.raw == True
|
99
|
+
```
|
100
|
+
|
101
|
+
* Reference: <bold>`Knowledge` class</bold>
|
102
|
+
|
103
|
+
<hr />
|
104
|
+
|
105
|
+
## Accessing Memories
|
106
|
+
|
107
|
+
Store task execution results in memory
|
108
|
+
|
109
|
+
`[var]`<bold>`with_memory: bool = False`</bold>
|
110
|
+
|
111
|
+
By turning on the with_memory val True, the agent will create and store the task output and contextualize the memory when they execute the task.
|
112
|
+
|
113
|
+
```python
|
114
|
+
from versionhq.task.model import Agent
|
115
|
+
|
116
|
+
agent = vhq.Agent(
|
117
|
+
role="Researcher",
|
118
|
+
goal="You research about math.",
|
119
|
+
with_memory=True
|
120
|
+
)
|
121
|
+
|
122
|
+
assert isinstance(agent.short_term_memory, vhq.ShortTermMemory)
|
123
|
+
assert isinstance(agent.long_term_memory, vhq.LongTermMemory)
|
124
|
+
```
|
125
|
+
|
126
|
+
<hr />
|
127
|
+
|
128
|
+
**RAG Storage**
|
129
|
+
|
130
|
+
When the agent is not given any `memory_config` values, they will create `RAGStorage` to store memory:
|
131
|
+
|
132
|
+
```python
|
133
|
+
RAGStorage(
|
134
|
+
type="stm", # short-term memory
|
135
|
+
allow_reset=True, # default = True. Explicitly mentioned.
|
136
|
+
embedder_config=None,
|
137
|
+
agents=[agent,]
|
138
|
+
)
|
139
|
+
```
|
140
|
+
|
141
|
+
MEM0 Storage
|
142
|
+
|
143
|
+
* Reference: <bold>`Memory`</bold> class
|
144
|
+
|
145
|
+
<hr />
|
146
|
+
|
147
|
+
## Updating Existing Agents
|
148
|
+
|
149
|
+
**Model configuration**
|
150
|
+
|
151
|
+
`[var]`<bold>`config: Optional[Dict[str, Any]] = None`</bold>
|
152
|
+
|
153
|
+
You can create an agent by using model config parameters instead.
|
154
|
+
|
155
|
+
e.g. Using config val
|
156
|
+
|
157
|
+
```python
|
158
|
+
import versionhq as vhq
|
159
|
+
|
160
|
+
agent = vhq.Agent(
|
161
|
+
config=dict(
|
162
|
+
role="Marketing Analyst",
|
163
|
+
goal="Coping with increased price competition in saturated markets.",
|
164
|
+
)
|
165
|
+
)
|
166
|
+
```
|
167
|
+
|
168
|
+
This is the same as the following:
|
169
|
+
|
170
|
+
```python
|
171
|
+
import versionhq as vhq
|
172
|
+
|
173
|
+
agent = vhq.Agent(
|
174
|
+
role="Marketing Analyst",
|
175
|
+
goal="Coping with price competition in saturated markets.",
|
176
|
+
)
|
177
|
+
```
|
178
|
+
|
179
|
+
<hr />
|
180
|
+
|
181
|
+
**Updating existing agents**
|
182
|
+
|
183
|
+
`[class method]`<bold>`update(self, **kwargs) -> Self`</bold>
|
184
|
+
|
185
|
+
You can update values of exsiting agents using `update` class method.
|
186
|
+
|
187
|
+
This class method will safely trigger some setups that needs to be run before the agent start executing tasks.
|
188
|
+
|
189
|
+
|
190
|
+
```python
|
191
|
+
import versionhq as vhq
|
192
|
+
|
193
|
+
agent = vhq.Agent(
|
194
|
+
role="Marketing Analyst",
|
195
|
+
goal="Coping with price competition in saturated markets"
|
196
|
+
)
|
197
|
+
|
198
|
+
tool = vhq.Tool(func=lambda x: x)
|
199
|
+
agent.update(
|
200
|
+
tools=[tool],
|
201
|
+
goal="my new goal", # updating the goal (this will trigger updating the developer_prompt.)
|
202
|
+
max_rpm=3,
|
203
|
+
knowledge_sources=["testing", "testing2"], # adding knowledge sources (this will trigger the storage creation.)
|
204
|
+
memory_config={"user_id": "0000"},
|
205
|
+
llm="gemini-2.0", # Updating model (The valid llm_config for the new model will be inherited.)
|
206
|
+
use_developer_prompt=False,
|
207
|
+
dummy="I am dummy" # <- Invalid field will be automatically ignored.
|
208
|
+
)
|
209
|
+
```
|
@@ -0,0 +1,34 @@
|
|
1
|
+
---
|
2
|
+
tags:
|
3
|
+
- Agent Network
|
4
|
+
---
|
5
|
+
|
6
|
+
# Agent
|
7
|
+
|
8
|
+
<class>`class` versionhq.agent.model.<bold>Agent<bold></class>
|
9
|
+
|
10
|
+
A Pydantic class to store `Agent` objects and handle `Task` execution as well as `LLM` configuration.
|
11
|
+
|
12
|
+
|
13
|
+
## Quick Start
|
14
|
+
|
15
|
+
By defining its role and goal in a simple sentence, the AI agent will be set up to run on <bold>`gpt-4o`</bold> by default.
|
16
|
+
|
17
|
+
Calling `.start()` method can start the agent operation, then generate response in text and JSON formats stored in the `TaskOutput` object.
|
18
|
+
|
19
|
+
```python
|
20
|
+
import versionhq as vhq
|
21
|
+
|
22
|
+
agent = vhq.Agent(
|
23
|
+
role="Marketing Analyst",
|
24
|
+
goal="Coping with price competition in saturated markets"
|
25
|
+
)
|
26
|
+
|
27
|
+
res = agent.start(context="Planning a new campaign promotion starting this summer")
|
28
|
+
|
29
|
+
assert agent.id
|
30
|
+
assert isinstance(res, vhq.TaskOutput)
|
31
|
+
assert res.json
|
32
|
+
```
|
33
|
+
|
34
|
+
Ref. <a href="/core/task">Task</a> class / <a href="/core/llm">LLM</a> class
|
@@ -0,0 +1,40 @@
|
|
1
|
+
## Variables
|
2
|
+
|
3
|
+
| <div style="width:160px">**Variable**</div> | **Data Type** | **Default** | **Nullable** | **Description** |
|
4
|
+
| :--- | :--- | :--- | :--- | :--- |
|
5
|
+
| **`id`** | UUID4 | uuid.uuid4() | False | Stores auto-generated ID as identifier. Not editable. |
|
6
|
+
| **`role`** | str | None | False | Stores a role of the agent. |
|
7
|
+
| **`goal`** | str | None | False | Stores a goal of the agent. |
|
8
|
+
| **`backstory`** | str | None | True | Stores backstory of the agent. Utilized as system prompt. |
|
9
|
+
| **`tools`** | List[InstanceOf[`Tool` \| `ToolSet`] \| Type[`Tool`]] | None | True | Stores tools to be used when executing a task. |
|
10
|
+
| **`knowledge_sources`** | List[`BaseKnowledgeSource` \| Any] | None | True | Stores knowledge sources in text, file path, or url. |
|
11
|
+
| **`embedder_config`** | Dict[str, Any] | None | True | Stores embedding configuration for storing knowledge sources. |
|
12
|
+
| **`with_memory`** | bool | False | - | Whether to store tasks and results in memory. |
|
13
|
+
| **`memory_config`** | Dict[str, Any] | None | True | Stores configuration of the memory. |
|
14
|
+
| **`short_term_memory`** | InstanceOf[`ShortTermMemory`] | None | True | Stores `ShortTermMemory` object. |
|
15
|
+
| **`long_term_memory`** | InstanceOf[`LongTermMemory`] | None | True | Stores `LongTermMemory` object. |
|
16
|
+
| **`user_memory`** | InstanceOf[`UserMemory`] | None | True | Stores `UserMemory` object. |
|
17
|
+
| **`use_developer_prompt`** | bool | True | - | Whether to use the system (developer) prompt when calling the model. |
|
18
|
+
| **`developer_promt_template`** | str | None | True | File path to the prompt template. |
|
19
|
+
| **`user_promt_template`** | str | None | True | File path to the prompt template. |
|
20
|
+
| **`network`** | [List[Any]] | None | True | Stores a list of agent networks that the agent belongs to. |
|
21
|
+
| **`allow_delegation`** | bool | False | - | Whether the agent can delegate assinged tasks to another agent. |
|
22
|
+
| **`max_retry_limit`** | int | 2 | - | Maximum number of retries when the task execution failed. |
|
23
|
+
| **`maxit`** | int | 25 | - | Maximum number of total optimization loops conducted when an error occues during the task execution. |
|
24
|
+
| **`callbacks`** | List[Callabale] | None | True | Stores a list of callback functions that must be called after every task execution completed.|
|
25
|
+
| **`llm`** | str \| InstanceOf[`LLM`] \| Dict[str, Any] | None | False | Stores the main model that the agent runs on. |
|
26
|
+
| **`func_calling_llm`** | str \| InstanceOf[`LLM`] \| Dict[str, Any] | None | False | Stores the function calling model that the agent runs on. |
|
27
|
+
| **`respect_context_window`** | bool | True | - | Whether to follow the main model's maximum context window size. |
|
28
|
+
| **`max_execution_time`** | int | None | True | Stores maximum execution time in seconds. |
|
29
|
+
| **`max_rpm`** | int | None | True | Stores maximum number of requests per minute. |
|
30
|
+
| **`llm_config`** | Dict[str, Any] | None | True | Stores configuration of `LLM` object. |
|
31
|
+
| **`config`** | Dict[str, Any] | None | True | Stores model config. |
|
32
|
+
|
33
|
+
|
34
|
+
## Class Methods
|
35
|
+
|
36
|
+
| <div style="width:120px">**Method**</div> | **Params** | **Returns** | **Description** |
|
37
|
+
| :--- | :--- | :--- | :--- |
|
38
|
+
| **`update`** | **kwargs: Any | Self | Updates agents with given kwargs. Invalid keys will be ignored. |
|
39
|
+
| **`start`** | context: Any = None <br> tool_res_as_final: bool = False | `TaskOutput` \| None | Starts to operate the agent. |
|
40
|
+
| **`execute_task`** | task: [Task] <br> context: Any = None <br> task_tools: Optional[List[Tool \| ToolSet]] = list() | str | Returns response from the model in plane text format. |
|
@@ -0,0 +1,245 @@
|
|
1
|
+
|
2
|
+
## Prompting
|
3
|
+
|
4
|
+
**Developer Prompt (System Prompt)**
|
5
|
+
|
6
|
+
`[var]`<bold>`backstory: Optional[str] = TEMPLATE_BACKSTORY`<bold>
|
7
|
+
|
8
|
+
Backstory will be drafted automatically using the given role, goal and other values in the Agent model, and converted into the **developer prompt** when the agent executes the task.
|
9
|
+
|
10
|
+
<hr/>
|
11
|
+
|
12
|
+
**Backstory template (full) for auto drafting:**
|
13
|
+
|
14
|
+
```python
|
15
|
+
BACKSTORY_FULL="""You are an expert {role} highly skilled in {skills}. You have abilities to query relevant information from the given knowledge sources and use tools such as {tools}. Leveraging these, you will identify competitive solutions to achieve the following goal: {goal}."""
|
16
|
+
```
|
17
|
+
|
18
|
+
For example, the following agent’s backstory will be auto drafted using a simple template.
|
19
|
+
|
20
|
+
```python
|
21
|
+
import versionhq as vhq
|
22
|
+
|
23
|
+
agent = vhq.Agent(
|
24
|
+
role="Marketing Analyst",
|
25
|
+
goal="Coping with price competition in saturated markets"
|
26
|
+
)
|
27
|
+
|
28
|
+
print(agent.backstory)
|
29
|
+
|
30
|
+
# You are an expert marketing analyst with relevant skillsets and abilities to query relevant information from the given knowledge sources. Leveraging these, you will identify competitive solutions to achieve the following goal: coping with price competition in saturated markets.
|
31
|
+
```
|
32
|
+
|
33
|
+
You can also specify your own backstory by simply adding the value to the backstory field of the Agent model:
|
34
|
+
|
35
|
+
```python
|
36
|
+
import versionhq as vhq
|
37
|
+
|
38
|
+
agent = vhq.Agent(
|
39
|
+
role="Marketing Analyst",
|
40
|
+
goal="Coping with increased price competition in saturated markets.",
|
41
|
+
backstory="You are a marketing analyst for a company in a saturated market. The market is becoming increasingly price-competitive, and your company's profit margins are shrinking. Your primary goal is to develop and implement strategies to help your company maintain its market share and profitability in this challenging environment."
|
42
|
+
)
|
43
|
+
|
44
|
+
print(agent.backstory)
|
45
|
+
|
46
|
+
# You are a marketing analyst for a company in a saturated market. The market is becoming increasingly price-competitive, and your company's profit margins are shrinking. Your primary goal is to develop and implement strategies to help your company maintain its market share and profitability in this challenging environment.
|
47
|
+
```
|
48
|
+
<hr />
|
49
|
+
|
50
|
+
`[var]`<bold>`use_developer_prompt: [bool] = True`</bold>
|
51
|
+
|
52
|
+
You can turn off the system prompt by setting `use_developer_prompt` False. In this case, the backstory is ignored when the agent call the LLM.
|
53
|
+
|
54
|
+
```python
|
55
|
+
import versionhq as vhq
|
56
|
+
|
57
|
+
agent = vhq.Agent(
|
58
|
+
role="Marketing Analyst",
|
59
|
+
goal="Coping with increased price competition in saturated markets.",
|
60
|
+
use_developer_prompt=False # default - True
|
61
|
+
)
|
62
|
+
```
|
63
|
+
|
64
|
+
<hr >
|
65
|
+
|
66
|
+
## Executing Tasks
|
67
|
+
|
68
|
+
**Delegation**
|
69
|
+
|
70
|
+
`[var]`<bold>`allow_delegation: [bool] = False`</bold>
|
71
|
+
|
72
|
+
When the agent is occupied with other tasks or not capable enough to the given task, you can delegate the task to another agent or ask another agent for additional information. The delegated agent will be selected based on nature of the given task and/or tool.
|
73
|
+
|
74
|
+
```python
|
75
|
+
import versionhq as vhq
|
76
|
+
|
77
|
+
agent = vhq.Agent(
|
78
|
+
role="Marketing Analyst",
|
79
|
+
goal="Coping with increased price competition in saturated markets.",
|
80
|
+
allow_delegation=True
|
81
|
+
)
|
82
|
+
```
|
83
|
+
|
84
|
+
<hr />
|
85
|
+
|
86
|
+
**Max Retry Limit**
|
87
|
+
|
88
|
+
`[var]`<bold>`max_retry_limit: Optional[int] = 2`</bold>
|
89
|
+
|
90
|
+
You can define how many times the agent can retry the execution under the same given conditions when it encounters an error.
|
91
|
+
|
92
|
+
```python
|
93
|
+
import versionhq as vhq
|
94
|
+
|
95
|
+
agent = vhq.Agent(
|
96
|
+
role="Marketing Analyst",
|
97
|
+
goal="Coping with increased price competition in saturated markets.",
|
98
|
+
max_retry_limit=3
|
99
|
+
)
|
100
|
+
```
|
101
|
+
|
102
|
+
<hr />
|
103
|
+
|
104
|
+
**Maximum Number of Iterations (maxit)**
|
105
|
+
|
106
|
+
`[var]`<bold>`maxit: Optional[int] = 25`</bold>
|
107
|
+
|
108
|
+
You can also define the number of loops that the agent will run after it encounters an error.
|
109
|
+
|
110
|
+
i.e., The agent will stop the task execution after the 30th loop.
|
111
|
+
|
112
|
+
```python
|
113
|
+
import versionhq as vhq
|
114
|
+
|
115
|
+
agent = vhq.Agent(
|
116
|
+
role="Marketing Analyst",
|
117
|
+
goal="Coping with increased price competition in saturated markets.",
|
118
|
+
maxit=30 # default = 25
|
119
|
+
)
|
120
|
+
```
|
121
|
+
|
122
|
+
<hr />
|
123
|
+
|
124
|
+
**Context Window**
|
125
|
+
|
126
|
+
`[var]`<bold>`respect_context_window: [bool] = True`</bold>
|
127
|
+
|
128
|
+
A context window determines the amount of text that the model takes into account when generating a response.
|
129
|
+
|
130
|
+
By adjusting the context window, you can control the level of context the model considers while generating the output. A smaller context window focuses on immediate context, while a larger context window provides a broader context.
|
131
|
+
|
132
|
+
By default, the agent will follow **the 80% rule** - where they only use 80% of the context window limit of the LLM they run on.
|
133
|
+
|
134
|
+
You can turn off this rule by setting `respect_context_window` False to have larger context window.
|
135
|
+
|
136
|
+
|
137
|
+
<hr />
|
138
|
+
|
139
|
+
**Max Tokens**
|
140
|
+
|
141
|
+
`[var]`<bold>`max_tokens: Optional[int] = None`</bold>
|
142
|
+
|
143
|
+
Max tokens defines the maximum number of tokens in the generated response. Tokens can be thought of as the individual units of text, which can be words or characters.
|
144
|
+
|
145
|
+
By default, the agent will follow the default max_tokens of the model, but you can specify the max token to limit the length of the generated output.
|
146
|
+
|
147
|
+
|
148
|
+
<hr />
|
149
|
+
|
150
|
+
**Maximum Execution Time**
|
151
|
+
|
152
|
+
`[var]`<bold>`max_execution_times: Optional[int] = None`</bold>
|
153
|
+
|
154
|
+
The maximum amount of wall clock time to spend in the execution loop.
|
155
|
+
|
156
|
+
By default, the agent will follow the default setting of the model.
|
157
|
+
|
158
|
+
|
159
|
+
<hr />
|
160
|
+
|
161
|
+
**Maximum RPM (Requests Per Minute)**
|
162
|
+
|
163
|
+
`[var]`<bold>`max_rpm: Optional[int] = None`</bold>
|
164
|
+
|
165
|
+
The maximum number of requests that the agent can send to the LLM.
|
166
|
+
|
167
|
+
By default, the agent will follow the default setting of the model. When the value is given, we let the model sleep for 60 seconds when the number of executions exceeds the maximum requests per minute.
|
168
|
+
|
169
|
+
<hr />
|
170
|
+
|
171
|
+
|
172
|
+
## Callbacks
|
173
|
+
|
174
|
+
`[var]`<bold>`callbacks: Optional[List[Callable]] = None`</bold>
|
175
|
+
|
176
|
+
You can add callback functions that the agent will run after executing any task.
|
177
|
+
|
178
|
+
By default, raw response from the agent will be added to the arguments of the callback function.
|
179
|
+
|
180
|
+
e.g. Format a response after executing the task:
|
181
|
+
|
182
|
+
```python
|
183
|
+
import json
|
184
|
+
from typing import Dict, Any
|
185
|
+
|
186
|
+
import versionhq as vhq
|
187
|
+
|
188
|
+
def format_response(res: str = None) -> str | Dict[str, Any]:
|
189
|
+
try:
|
190
|
+
r = json.dumps(eval(res))
|
191
|
+
formatted_res = json.loads(r)
|
192
|
+
return formatted_res
|
193
|
+
except:
|
194
|
+
return res
|
195
|
+
|
196
|
+
agent = vhq.Agent(
|
197
|
+
role="Marketing Analyst",
|
198
|
+
goal="Coping with increased price competition in saturated markets.",
|
199
|
+
callbacks=[format_response]
|
200
|
+
)
|
201
|
+
res = agent.start()
|
202
|
+
|
203
|
+
assert res.callback_output
|
204
|
+
```
|
205
|
+
|
206
|
+
<hr />
|
207
|
+
|
208
|
+
**Multiple callbacks to call**
|
209
|
+
|
210
|
+
The callback functions are called in order of the list index referring to the task response and response from the previous callback functions by default.
|
211
|
+
|
212
|
+
e.g. Validate an initial response from the assigned agent, and format the response.
|
213
|
+
|
214
|
+
```python
|
215
|
+
import json
|
216
|
+
from typing import Dict, Any
|
217
|
+
|
218
|
+
import versionhq as vhq
|
219
|
+
|
220
|
+
def assessment(res: str) -> str:
|
221
|
+
try:
|
222
|
+
sub_agent = vhq.Agent(role="Validator", goal="Validate the given solutions.")
|
223
|
+
task = vhq.Task(
|
224
|
+
description=f"Assess the given solution based on feasibilities and fits to client's strategies, then refine the solution if necessary.\nSolution: {res}"
|
225
|
+
)
|
226
|
+
r = task.sync_execute(agent=sub_agent)
|
227
|
+
return r.raw
|
228
|
+
|
229
|
+
except:
|
230
|
+
return res
|
231
|
+
|
232
|
+
def format_response(res: str = None) -> str | Dict[str, Any]:
|
233
|
+
try:
|
234
|
+
r = json.dumps(eval(res))
|
235
|
+
formatted_res = json.loads(r)
|
236
|
+
return formatted_res
|
237
|
+
except:
|
238
|
+
return res
|
239
|
+
|
240
|
+
agent = vhq.Agent(
|
241
|
+
role="Marketing Analyst",
|
242
|
+
goal="Build solutions to address increased price competition in saturated markets",
|
243
|
+
callbacks=[assessment, format_response] # add multiple funcs as callbacks - executed in order of index
|
244
|
+
)
|
245
|
+
```
|