versionhq 1.1.12.4__tar.gz → 1.1.12.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. versionhq-1.1.12.5/.github/workflows/deploy_docs.yml +30 -0
  2. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/PKG-INFO +35 -50
  3. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/README.md +33 -48
  4. versionhq-1.1.12.5/docs/_logos/favicon.ico +0 -0
  5. versionhq-1.1.12.5/docs/_logos/logo192.png +0 -0
  6. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/docs/core/Agent.md +31 -33
  7. versionhq-1.1.12.5/docs/core/tool.md +389 -0
  8. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/docs/index.md +34 -43
  9. versionhq-1.1.12.5/docs/quickstart.md +122 -0
  10. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/docs/stylesheets/main.css +8 -8
  11. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/mkdocs.yml +2 -2
  12. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/pyproject.toml +3 -3
  13. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/__init__.py +1 -1
  14. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/agent/model.py +1 -1
  15. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/llm/model.py +66 -40
  16. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/log_handler.py +1 -1
  17. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/model.py +2 -2
  18. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/tool/model.py +1 -4
  19. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq.egg-info/PKG-INFO +35 -50
  20. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq.egg-info/SOURCES.txt +1 -0
  21. versionhq-1.1.12.5/tests/doc_test.py +492 -0
  22. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/task/task_test.py +1 -1
  23. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/team/team_test.py +1 -1
  24. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/uv.lock +58 -58
  25. versionhq-1.1.12.4/.github/workflows/deploy_docs.yml +0 -38
  26. versionhq-1.1.12.4/docs/_logos/favicon.ico +0 -0
  27. versionhq-1.1.12.4/docs/_logos/logo192.png +0 -0
  28. versionhq-1.1.12.4/docs/quickstart.md +0 -73
  29. versionhq-1.1.12.4/tests/doc_test.py +0 -200
  30. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/.github/workflows/publish.yml +0 -0
  31. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/.github/workflows/publish_testpypi.yml +0 -0
  32. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/.github/workflows/run_tests.yml +0 -0
  33. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/.github/workflows/security_check.yml +0 -0
  34. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/.gitignore +0 -0
  35. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/.pre-commit-config.yaml +0 -0
  36. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/.python-version +0 -0
  37. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/CNAME +0 -0
  38. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/LICENSE +0 -0
  39. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/SECURITY.md +0 -0
  40. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/db/preprocess.py +0 -0
  41. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/docs/CNAME +0 -0
  42. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/requirements-dev.txt +0 -0
  43. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/requirements.txt +0 -0
  44. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/runtime.txt +0 -0
  45. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/setup.cfg +0 -0
  46. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/_utils/__init__.py +0 -0
  47. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/_utils/i18n.py +0 -0
  48. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/_utils/logger.py +0 -0
  49. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/_utils/process_config.py +0 -0
  50. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/_utils/usage_metrics.py +0 -0
  51. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/_utils/vars.py +0 -0
  52. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  53. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  54. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/agent/__init__.py +0 -0
  55. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/agent/inhouse_agents.py +0 -0
  56. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/agent/parser.py +0 -0
  57. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/agent/rpm_controller.py +0 -0
  58. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/cli/__init__.py +0 -0
  59. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/clients/__init__.py +0 -0
  60. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/clients/customer/__init__.py +0 -0
  61. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/clients/customer/model.py +0 -0
  62. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/clients/product/__init__.py +0 -0
  63. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/clients/product/model.py +0 -0
  64. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/clients/workflow/__init__.py +0 -0
  65. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/clients/workflow/model.py +0 -0
  66. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/knowledge/__init__.py +0 -0
  67. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/knowledge/_utils.py +0 -0
  68. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/knowledge/embedding.py +0 -0
  69. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/knowledge/model.py +0 -0
  70. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/knowledge/source.py +0 -0
  71. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/knowledge/source_docling.py +0 -0
  72. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/knowledge/storage.py +0 -0
  73. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/llm/__init__.py +0 -0
  74. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/llm/llm_vars.py +0 -0
  75. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/memory/__init__.py +0 -0
  76. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/memory/contextual_memory.py +0 -0
  77. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/memory/model.py +0 -0
  78. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/storage/__init__.py +0 -0
  79. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/storage/base.py +0 -0
  80. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
  81. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/storage/mem0_storage.py +0 -0
  82. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/storage/rag_storage.py +0 -0
  83. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/storage/task_output_storage.py +0 -0
  84. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/storage/utils.py +0 -0
  85. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/TEMPLATES/Description.py +0 -0
  86. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/__init__.py +0 -0
  87. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/evaluate.py +0 -0
  88. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/formation.py +0 -0
  89. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/formatter.py +0 -0
  90. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/task/structured_response.py +0 -0
  91. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/team/__init__.py +0 -0
  92. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/team/model.py +0 -0
  93. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/team/team_planner.py +0 -0
  94. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/tool/__init__.py +0 -0
  95. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/tool/cache_handler.py +0 -0
  96. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/tool/composio_tool.py +0 -0
  97. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/tool/composio_tool_vars.py +0 -0
  98. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/tool/decorator.py +0 -0
  99. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq/tool/tool_handler.py +0 -0
  100. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq.egg-info/dependency_links.txt +0 -0
  101. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq.egg-info/requires.txt +0 -0
  102. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/src/versionhq.egg-info/top_level.txt +0 -0
  103. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/__init__.py +0 -0
  104. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/agent/__init__.py +0 -0
  105. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/agent/agent_test.py +0 -0
  106. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/cli/__init__.py +0 -0
  107. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/clients/customer_test.py +0 -0
  108. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/clients/product_test.py +0 -0
  109. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/clients/workflow_test.py +0 -0
  110. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/conftest.py +0 -0
  111. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/knowledge/__init__.py +0 -0
  112. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/knowledge/knowledge_test.py +0 -0
  113. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/knowledge/mock_report_compressed.pdf +0 -0
  114. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/llm/__init__.py +0 -0
  115. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/llm/llm_test.py +0 -0
  116. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/memory/__init__.py +0 -0
  117. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/memory/memory_test.py +0 -0
  118. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/task/__init__.py +0 -0
  119. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/task/llm_connection_test.py +0 -0
  120. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/team/Prompts/Demo_test.py +0 -0
  121. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/team/__init__.py +0 -0
  122. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/tool/__init__.py +0 -0
  123. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/tool/composio_test.py +0 -0
  124. {versionhq-1.1.12.4 → versionhq-1.1.12.5}/tests/tool/tool_test.py +0 -0
@@ -0,0 +1,30 @@
1
+ name: Deploy Docs
2
+ on:
3
+ push:
4
+ branches:
5
+ - main
6
+
7
+ permissions:
8
+ contents: write
9
+
10
+ jobs:
11
+ deploy:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/checkout@v4
15
+ - name: Configure Git Credentials
16
+ run: |
17
+ git config user.name github-actions[bot]
18
+ git config user.email 41898282+github-actions[bot]@users.noreply.github.com
19
+ - uses: actions/setup-python@v5
20
+ with:
21
+ python-version: 3.12
22
+ - run: echo "cache_id=$(date +%s)" >> $GITHUB_ENV
23
+ - uses: actions/cache@v4
24
+ with:
25
+ key: mkdocs-material-${{ env.cache_id }}
26
+ path: .cache
27
+ restore-keys: |
28
+ mkdocs-material-
29
+ - run: pip install mkdocs-material
30
+ - run: mkdocs gh-deploy --force
@@ -1,7 +1,7 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.12.4
4
- Summary: An agentic orchestration framework for multi-agent system that shares memory, knowledge base, and RAG tools.
3
+ Version: 1.1.12.5
4
+ Summary: An agentic orchestration framework for building agent networks that handle task automation without human interaction.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
7
7
 
@@ -77,6 +77,7 @@ Requires-Dist: numpy>=1.26.4; extra == "numpy"
77
77
 
78
78
  # Overview
79
79
 
80
+ ![DL](https://img.shields.io/badge/Download-15K+-red)
80
81
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
81
82
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
82
83
  ![PyPI](https://img.shields.io/badge/PyPI-v1.1.12+-blue)
@@ -89,9 +90,9 @@ Agentic orchestration framework to deploy agent network and handle complex task
89
90
  **Visit:**
90
91
 
91
92
  - [PyPI](https://pypi.org/project/versionhq/)
92
- - [Github (LLM orchestration framework)](https://github.com/versionHQ/multi-agent-system)
93
- - [Use case](https://versi0n.io/) / [Quick demo](https://res.cloudinary.com/dfeirxlea/video/upload/v1737732977/pj_m_home/pnsyh5mfvmilwgt0eusa.mov)
94
93
  - [Docs](https://docs.versi0n.io)
94
+ - [Github Repository](https://github.com/versionHQ/multi-agent-system)
95
+ - [Playground](https://versi0n.io/)
95
96
 
96
97
 
97
98
  <hr />
@@ -105,15 +106,14 @@ Agentic orchestration framework to deploy agent network and handle complex task
105
106
  - [Quick Start](#quick-start)
106
107
  - [Generate agent networks and launch task execution:](#generate-agent-networks-and-launch-task-execution)
107
108
  - [Solo Agent:](#solo-agent)
108
- - [Return a structured output with a summary in string.](#return-a-structured-output-with-a-summary-in-string)
109
+ - [Solo Agent:](#solo-agent-1)
109
110
  - [Supervising:](#supervising)
110
111
  - [Technologies Used](#technologies-used)
111
112
  - [Project Structure](#project-structure)
112
113
  - [Setup](#setup)
113
- - [Set up a project](#set-up-a-project)
114
114
  - [Contributing](#contributing)
115
115
  - [Documentation](#documentation)
116
- - [Customizing AI Agents](#customizing-ai-agents)
116
+ - [Customizing AI Agent](#customizing-ai-agent)
117
117
  - [Modifying RAG Functionality](#modifying-rag-functionality)
118
118
  - [Package Management with uv](#package-management-with-uv)
119
119
  - [Pre-Commit Hooks](#pre-commit-hooks)
@@ -158,7 +158,7 @@ You can specify a desired formation or allow the agents to determine it autonomo
158
158
 
159
159
  ### Generate agent networks and launch task execution:
160
160
 
161
- ```
161
+ ```python
162
162
  from versionhq import form_agent_network
163
163
 
164
164
  network = form_agent_network(
@@ -173,9 +173,15 @@ You can specify a desired formation or allow the agents to determine it autonomo
173
173
 
174
174
  ### Solo Agent:
175
175
 
176
- #### Return a structured output with a summary in string.
176
+ ### Solo Agent:
177
+
178
+ You can simply build an agent using `Agent` model.
177
179
 
178
- ```
180
+ By default, the agent prioritize JSON serializable output.
181
+
182
+ But you can add a plane text summary of the structured output by using callbacks.
183
+
184
+ ```python
179
185
  from pydantic import BaseModel
180
186
  from versionhq import Agent, Task
181
187
 
@@ -200,23 +206,24 @@ You can specify a desired formation or allow the agents to determine it autonomo
200
206
  print(res)
201
207
  ```
202
208
 
203
- This will return `TaskOutput` instance that stores a response in plane text, JSON serializable dict, and Pydantic model: `CustomOutput` formats with a callback result, tool output (if given), and evaluation results (if given).
204
209
 
205
- ```
210
+ This will return a `TaskOutput` object that stores response in plane text, JSON, and Pydantic model: `CustomOutput` formats with a callback result, tool output (if given), and evaluation results (if given).
211
+
212
+ ```python
206
213
  res == TaskOutput(
207
- task_id=UUID('<TASK UUID>')
214
+ task_id=UUID('<TASK UUID>'),
208
215
  raw='{\"test1\":\"random str\", \"test2\":[\"str item 1\", \"str item 2\", \"str item 3\"]}',
209
216
  json_dict={'test1': 'random str', 'test2': ['str item 1', 'str item 2', 'str item 3']},
210
- pydantic=<class '__main__.CustomOutput'>
217
+ pydantic=<class '__main__.CustomOutput'>,
211
218
  tool_output=None,
212
- callback_output='Hi! Here is the result: random str, str item 1, str item 2, str item 3',
219
+ callback_output='Hi! Here is the result: random str, str item 1, str item 2, str item 3', # returned a plain text summary
213
220
  evaluation=None
214
221
  )
215
222
  ```
216
223
 
217
224
  ### Supervising:
218
225
 
219
- ```
226
+ ```python
220
227
  from versionhq import Agent, Task, ResponseField, Team, TeamMember
221
228
 
222
229
  agent_a = Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
@@ -281,26 +288,22 @@ Tasks can be delegated to a team manager, peers in the team, or completely new a
281
288
  .github
282
289
  └── workflows/ # Github actions
283
290
 
291
+ docs/ # Documentation built by MkDocs
292
+
284
293
  src/
285
- └── versionhq/ # Orchestration frameworks
286
- │ ├── agent/ # Components
294
+ └── versionhq/ # Orchestration framework package
295
+ │ ├── agent/ # Core components
287
296
  │ └── llm/
288
297
  │ └── task/
289
- │ └── team/
290
298
  │ └── tool/
291
- │ └── cli/
292
- │ └── ...
293
- │ │
294
- │ ├── db/ # Storage
295
- │ ├── chroma.sqlite3
296
299
  │ └── ...
297
300
 
298
- └──tests/ # Pytest
301
+ └──tests/ # Pytest - by core component and use cases in the docs
299
302
  │ └── agent/
300
303
  │ └── llm/
301
304
  │ └── ...
302
305
 
303
- └── uploads/ # Local repo to store the uploaded files
306
+ └── uploads/ # Local directory that stores uloaded files
304
307
 
305
308
  ```
306
309
 
@@ -308,10 +311,6 @@ src/
308
311
 
309
312
  ## Setup
310
313
 
311
-
312
-
313
- ## Set up a project
314
-
315
314
  1. Install `uv` package manager:
316
315
 
317
316
  For MacOS:
@@ -396,6 +395,7 @@ src/
396
395
 
397
396
 
398
397
  ### Documentation
398
+
399
399
  * To edit the documentation, see `docs` repository and edit the respective component.
400
400
 
401
401
  * We use `mkdocs` to update the docs. You can run the doc locally at http://127.0.0.1:8000/:
@@ -404,8 +404,10 @@ src/
404
404
  uv run python3 -m mkdocs serve --clean
405
405
  ```
406
406
 
407
+ * To add a new page, update `mkdocs.yml` in the root. Refer to [MkDocs official docs](https://squidfunk.github.io/mkdocs-material/getting-started/) for more details.
407
408
 
408
- ### Customizing AI Agents
409
+
410
+ ### Customizing AI Agent
409
411
 
410
412
  To add an agent, use `sample` directory to add new `project`. You can define an agent with a specific role, goal, and set of tools.
411
413
 
@@ -470,27 +472,10 @@ Common issues and solutions:
470
472
  ## Frequently Asked Questions (FAQ)
471
473
  **Q. Where can I see if the agent is working?**
472
474
 
473
- > A. You can find a frontend app [here](https://versi0n.io) with real-world outbound use cases.
474
- > You can also test features [here](https://github.com/versionHQ/test-client-app) using React app.
475
+ A. Visit [playground](https://versi0n.io).
476
+
475
477
 
476
478
  **Q. How do you analyze the customer?**
477
479
 
478
480
  > A. We employ soft clustering for each customer.
479
481
  > <img width="200" src="https://res.cloudinary.com/dfeirxlea/image/upload/v1732732628/pj_m_agents/ito937s5d5x0so8isvw6.png">
480
-
481
-
482
- **Q. When should I use a team vs an agent?**
483
-
484
- > A. In essence, use a team for intricate, evolving projects, and agents for quick, straightforward tasks.
485
-
486
- > Use a team when:
487
-
488
- > **Complex tasks**: You need to complete multiple, interconnected tasks that require sequential or hierarchical processing.
489
-
490
- > **Iterative refinement**: You want to iteratively improve upon the output through multiple rounds of feedback and revision.
491
-
492
- > Use an agent when:
493
-
494
- > **Simple tasks**: You have a straightforward, one-off task that doesn't require significant complexity or iteration.
495
-
496
- > **Human input**: You need to provide initial input or guidance to the agent, or you expect to review and refine the output.
@@ -1,5 +1,6 @@
1
1
  # Overview
2
2
 
3
+ ![DL](https://img.shields.io/badge/Download-15K+-red)
3
4
  ![MIT license](https://img.shields.io/badge/License-MIT-green)
4
5
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
5
6
  ![PyPI](https://img.shields.io/badge/PyPI-v1.1.12+-blue)
@@ -12,9 +13,9 @@ Agentic orchestration framework to deploy agent network and handle complex task
12
13
  **Visit:**
13
14
 
14
15
  - [PyPI](https://pypi.org/project/versionhq/)
15
- - [Github (LLM orchestration framework)](https://github.com/versionHQ/multi-agent-system)
16
- - [Use case](https://versi0n.io/) / [Quick demo](https://res.cloudinary.com/dfeirxlea/video/upload/v1737732977/pj_m_home/pnsyh5mfvmilwgt0eusa.mov)
17
16
  - [Docs](https://docs.versi0n.io)
17
+ - [Github Repository](https://github.com/versionHQ/multi-agent-system)
18
+ - [Playground](https://versi0n.io/)
18
19
 
19
20
 
20
21
  <hr />
@@ -28,15 +29,14 @@ Agentic orchestration framework to deploy agent network and handle complex task
28
29
  - [Quick Start](#quick-start)
29
30
  - [Generate agent networks and launch task execution:](#generate-agent-networks-and-launch-task-execution)
30
31
  - [Solo Agent:](#solo-agent)
31
- - [Return a structured output with a summary in string.](#return-a-structured-output-with-a-summary-in-string)
32
+ - [Solo Agent:](#solo-agent-1)
32
33
  - [Supervising:](#supervising)
33
34
  - [Technologies Used](#technologies-used)
34
35
  - [Project Structure](#project-structure)
35
36
  - [Setup](#setup)
36
- - [Set up a project](#set-up-a-project)
37
37
  - [Contributing](#contributing)
38
38
  - [Documentation](#documentation)
39
- - [Customizing AI Agents](#customizing-ai-agents)
39
+ - [Customizing AI Agent](#customizing-ai-agent)
40
40
  - [Modifying RAG Functionality](#modifying-rag-functionality)
41
41
  - [Package Management with uv](#package-management-with-uv)
42
42
  - [Pre-Commit Hooks](#pre-commit-hooks)
@@ -81,7 +81,7 @@ You can specify a desired formation or allow the agents to determine it autonomo
81
81
 
82
82
  ### Generate agent networks and launch task execution:
83
83
 
84
- ```
84
+ ```python
85
85
  from versionhq import form_agent_network
86
86
 
87
87
  network = form_agent_network(
@@ -96,9 +96,15 @@ You can specify a desired formation or allow the agents to determine it autonomo
96
96
 
97
97
  ### Solo Agent:
98
98
 
99
- #### Return a structured output with a summary in string.
99
+ ### Solo Agent:
100
+
101
+ You can simply build an agent using `Agent` model.
100
102
 
101
- ```
103
+ By default, the agent prioritize JSON serializable output.
104
+
105
+ But you can add a plane text summary of the structured output by using callbacks.
106
+
107
+ ```python
102
108
  from pydantic import BaseModel
103
109
  from versionhq import Agent, Task
104
110
 
@@ -123,23 +129,24 @@ You can specify a desired formation or allow the agents to determine it autonomo
123
129
  print(res)
124
130
  ```
125
131
 
126
- This will return `TaskOutput` instance that stores a response in plane text, JSON serializable dict, and Pydantic model: `CustomOutput` formats with a callback result, tool output (if given), and evaluation results (if given).
127
132
 
128
- ```
133
+ This will return a `TaskOutput` object that stores response in plane text, JSON, and Pydantic model: `CustomOutput` formats with a callback result, tool output (if given), and evaluation results (if given).
134
+
135
+ ```python
129
136
  res == TaskOutput(
130
- task_id=UUID('<TASK UUID>')
137
+ task_id=UUID('<TASK UUID>'),
131
138
  raw='{\"test1\":\"random str\", \"test2\":[\"str item 1\", \"str item 2\", \"str item 3\"]}',
132
139
  json_dict={'test1': 'random str', 'test2': ['str item 1', 'str item 2', 'str item 3']},
133
- pydantic=<class '__main__.CustomOutput'>
140
+ pydantic=<class '__main__.CustomOutput'>,
134
141
  tool_output=None,
135
- callback_output='Hi! Here is the result: random str, str item 1, str item 2, str item 3',
142
+ callback_output='Hi! Here is the result: random str, str item 1, str item 2, str item 3', # returned a plain text summary
136
143
  evaluation=None
137
144
  )
138
145
  ```
139
146
 
140
147
  ### Supervising:
141
148
 
142
- ```
149
+ ```python
143
150
  from versionhq import Agent, Task, ResponseField, Team, TeamMember
144
151
 
145
152
  agent_a = Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
@@ -204,26 +211,22 @@ Tasks can be delegated to a team manager, peers in the team, or completely new a
204
211
  .github
205
212
  └── workflows/ # Github actions
206
213
 
214
+ docs/ # Documentation built by MkDocs
215
+
207
216
  src/
208
- └── versionhq/ # Orchestration frameworks
209
- │ ├── agent/ # Components
217
+ └── versionhq/ # Orchestration framework package
218
+ │ ├── agent/ # Core components
210
219
  │ └── llm/
211
220
  │ └── task/
212
- │ └── team/
213
221
  │ └── tool/
214
- │ └── cli/
215
- │ └── ...
216
- │ │
217
- │ ├── db/ # Storage
218
- │ ├── chroma.sqlite3
219
222
  │ └── ...
220
223
 
221
- └──tests/ # Pytest
224
+ └──tests/ # Pytest - by core component and use cases in the docs
222
225
  │ └── agent/
223
226
  │ └── llm/
224
227
  │ └── ...
225
228
 
226
- └── uploads/ # Local repo to store the uploaded files
229
+ └── uploads/ # Local directory that stores uloaded files
227
230
 
228
231
  ```
229
232
 
@@ -231,10 +234,6 @@ src/
231
234
 
232
235
  ## Setup
233
236
 
234
-
235
-
236
- ## Set up a project
237
-
238
237
  1. Install `uv` package manager:
239
238
 
240
239
  For MacOS:
@@ -319,6 +318,7 @@ src/
319
318
 
320
319
 
321
320
  ### Documentation
321
+
322
322
  * To edit the documentation, see `docs` repository and edit the respective component.
323
323
 
324
324
  * We use `mkdocs` to update the docs. You can run the doc locally at http://127.0.0.1:8000/:
@@ -327,8 +327,10 @@ src/
327
327
  uv run python3 -m mkdocs serve --clean
328
328
  ```
329
329
 
330
+ * To add a new page, update `mkdocs.yml` in the root. Refer to [MkDocs official docs](https://squidfunk.github.io/mkdocs-material/getting-started/) for more details.
330
331
 
331
- ### Customizing AI Agents
332
+
333
+ ### Customizing AI Agent
332
334
 
333
335
  To add an agent, use `sample` directory to add new `project`. You can define an agent with a specific role, goal, and set of tools.
334
336
 
@@ -393,27 +395,10 @@ Common issues and solutions:
393
395
  ## Frequently Asked Questions (FAQ)
394
396
  **Q. Where can I see if the agent is working?**
395
397
 
396
- > A. You can find a frontend app [here](https://versi0n.io) with real-world outbound use cases.
397
- > You can also test features [here](https://github.com/versionHQ/test-client-app) using React app.
398
+ A. Visit [playground](https://versi0n.io).
399
+
398
400
 
399
401
  **Q. How do you analyze the customer?**
400
402
 
401
403
  > A. We employ soft clustering for each customer.
402
404
  > <img width="200" src="https://res.cloudinary.com/dfeirxlea/image/upload/v1732732628/pj_m_agents/ito937s5d5x0so8isvw6.png">
403
-
404
-
405
- **Q. When should I use a team vs an agent?**
406
-
407
- > A. In essence, use a team for intricate, evolving projects, and agents for quick, straightforward tasks.
408
-
409
- > Use a team when:
410
-
411
- > **Complex tasks**: You need to complete multiple, interconnected tasks that require sequential or hierarchical processing.
412
-
413
- > **Iterative refinement**: You want to iteratively improve upon the output through multiple rounds of feedback and revision.
414
-
415
- > Use an agent when:
416
-
417
- > **Simple tasks**: You have a straightforward, one-off task that doesn't require significant complexity or iteration.
418
-
419
- > **Human input**: You need to provide initial input or guidance to the agent, or you expect to review and refine the output.
@@ -7,7 +7,7 @@ tags:
7
7
 
8
8
  # Agent
9
9
 
10
- <class><bold>class:</bold> versionhq.agent.model.<bold>Agent<bold></class>
10
+ <class>`class` versionhq.agent.model.<bold>Agent<bold></class>
11
11
 
12
12
  Each agent has its unique knowledge and memory on the past task.
13
13
 
@@ -16,7 +16,7 @@ You can create one and assign the task or reassign another task to the existing
16
16
 
17
17
  ## Core usage
18
18
 
19
- By defining its role and goal in a simple sentence, the AI agent will be set up to run on <bold>`gpt-4o`</bold> by default.
19
+ By defining its role and goal in a simple sentence, the AI agent will be set up to run on <bold>`gpt-4o`</bold> by default.
20
20
 
21
21
  ```python
22
22
  from versionhq import Agent
@@ -33,7 +33,7 @@ agent = Agent(
33
33
 
34
34
  ### Model optimization
35
35
 
36
- <variable>var: <bold>llm</bold>: Optional[str | LLM | Dict[str, Any]] = "gpt4o"</variable>
36
+ `[var]`<bold>`llm: Optional[str | LLM | Dict[str, Any]] = "gpt-4o"`
37
37
 
38
38
  You can select a model or model provider that the agent will run on.
39
39
 
@@ -82,7 +82,7 @@ agent = Agent(
82
82
  ```
83
83
 
84
84
 
85
- We are testing some features on the following providers.
85
+ Following models are under review.
86
86
 
87
87
  ```python
88
88
  "huggingface": [
@@ -143,7 +143,7 @@ We are testing some features on the following providers.
143
143
 
144
144
  ### Developer Prompt (System Prompt)
145
145
 
146
- <variable>var: <bold>backstory</bold>: Optional[str] = TEMPLATE_BACKSTORY</variable>
146
+ `[var]`<bold>`backstory: Optional[str] = TEMPLATE_BACKSTORY`
147
147
 
148
148
  Backstory will be drafted automatically using the given role, goal and other values in the Agent model, and converted into the **developer prompt** when the agent executes the task.
149
149
 
@@ -183,8 +183,9 @@ print(agent.backstory)
183
183
 
184
184
  # You are a marketing analyst for a company in a saturated market. The market is becoming increasingly price-competitive, and your company's profit margins are shrinking. Your primary goal is to develop and implement strategies to help your company maintain its market share and profitability in this challenging environment.
185
185
  ```
186
+ <hr />
186
187
 
187
- <variable>var: <bold>use_developer_prompt</bold>: [bool] = True</variable>
188
+ `[var]`<bold>`use_developer_prompt: [bool] = True`</bold>
188
189
 
189
190
  You can turn off the system prompt by setting `use_developer_prompt` False. In this case, the backstory is ignored when the agent call the LLM.
190
191
 
@@ -202,7 +203,7 @@ agent = Agent(
202
203
 
203
204
  ### Delegation
204
205
 
205
- <variable>var: <bold>allow_delegation</bold>: [bool] = False</variable>
206
+ `[var]`<bold>`allow_delegation: [bool] = False`</bold>
206
207
 
207
208
  When the agent is occupied with other tasks or not capable enough to the given task, you can delegate the task to another agent or ask another agent for additional information. The delegated agent will be selected based on nature of the given task and/or tool.
208
209
 
@@ -218,7 +219,7 @@ agent = Agent(
218
219
 
219
220
  ### Max Retry Limit
220
221
 
221
- <variable>var: <bold>max_retry_limit</bold>: Optional[int] = 2</variable>
222
+ `[var]`<bold>`max_retry_limit: Optional[int] = 2`</bold>
222
223
 
223
224
  You can define how many times the agent can retry the execution under the same given conditions when it encounters an error.
224
225
 
@@ -234,7 +235,7 @@ agent = Agent(
234
235
 
235
236
  ### Maximum Number of Iterations (MaxIt)
236
237
 
237
- <variable>var: <bold>maxit</bold>: Optional[int] = 25</variable>
238
+ `[var]`<bold>`maxit: Optional[int] = 25`</bold>
238
239
 
239
240
  You can also define the number of loops that the agent will run after it encounters an error.
240
241
 
@@ -252,7 +253,7 @@ agent = Agent(
252
253
 
253
254
  ### Callbacks
254
255
 
255
- <variable>var: <bold>callbacks</bold>: Optional[List[Callable]] = None</variable>
256
+ `[var]`<bold>`callbacks: Optional[List[Callable]] = None`</bold>
256
257
 
257
258
  You can add callback functions that the agent will run after executing any task.
258
259
 
@@ -320,7 +321,7 @@ agent = Agent(
320
321
 
321
322
  ### Context Window
322
323
 
323
- <variable>var: <bold>respect_context_window</bold>: [bool] = True</variable>
324
+ `[var]`<bold>`respect_context_window: [bool] = True`</bold>
324
325
 
325
326
  A context window determines the amount of text that the model takes into account when generating a response.
326
327
 
@@ -332,7 +333,7 @@ You can turn off this rule by setting `respect_context_window` False to have lar
332
333
 
333
334
  ### Max Tokens
334
335
 
335
- <variable>var: <bold>max_tokens</bold>: Optional[int] = None</variable>
336
+ `[var]`<bold>`max_tokens: Optional[int] = None`</bold>
336
337
 
337
338
  Max tokens defines the maximum number of tokens in the generated response. Tokens can be thought of as the individual units of text, which can be words or characters.
338
339
 
@@ -340,7 +341,7 @@ By default, the agent will follow the default max_tokens of the model, but you c
340
341
 
341
342
  ### Maximum Execution Time
342
343
 
343
- <variable>var: <bold>max_execution_times</bold>: Optional[int] = None</variable>
344
+ `[var]`<bold>`max_execution_times: Optional[int] = None`</bold>
344
345
 
345
346
  The maximum amount of wall clock time to spend in the execution loop.
346
347
 
@@ -348,7 +349,7 @@ By default, the agent will follow the default setting of the model.
348
349
 
349
350
  ### Maximum RPM (Requests Per Minute)
350
351
 
351
- <variable>var: <bold>max_rpm</bold>: Optional[int] = None</variable>
352
+ `[var]`<bold>`max_rpm: Optional[int] = None`</bold>
352
353
 
353
354
  The maximum number of requests that the agent can send to the LLM.
354
355
 
@@ -356,7 +357,7 @@ By default, the agent will follow the default setting of the model. When the val
356
357
 
357
358
  ### Other LLM Configuration
358
359
 
359
- <variable>var: <bold>llm_xonfig</bold>: Optional[Dict[str, Any]] = None</variable>
360
+ `[var]`<bold>`llm_config: Optional[Dict[str, Any]] = None`</bold>
360
361
 
361
362
  You can specify any other parameters that the agent needs to follow when they call the LLM. Else, the agent will follow the default settings given by the model provider.
362
363
 
@@ -385,17 +386,16 @@ agent = Agent(
385
386
  )
386
387
 
387
388
  print(agent.llm)
388
-
389
- *<class LLM(
390
- max_tokens=3000,
391
- temperature=1,
392
- top_p=0.1,
393
- n=1,
394
- stream=False,
395
- stream_options=None,
396
- stop="test",
397
- max_completion_tokens=10000,
398
- )>*
389
+ # LLM(
390
+ # max_tokens=3000,
391
+ # temperature=1,
392
+ # top_p=0.1,
393
+ # n=1,
394
+ # stream=False,
395
+ # stream_options=None,
396
+ # stop="test",
397
+ # max_completion_tokens=10000,
398
+ # )
399
399
  ```
400
400
 
401
401
  <hr />
@@ -404,7 +404,7 @@ print(agent.llm)
404
404
 
405
405
  ### Knowledge Sources
406
406
 
407
- <variable>var: <bold>knowledge_sources</bold>: Optional[List[KnowledgeSource]] = None</variable>
407
+ `[var]`<bold>`knowledge_sources: Optional[List[KnowledgeSource]] = None`</bold>
408
408
 
409
409
  You can add knowledge sources to the agent in the following formats:
410
410
 
@@ -441,7 +441,7 @@ res = task.execute_sync(agent=agent)
441
441
  # "gold" in res.raw == True
442
442
  ```
443
443
 
444
- Ref. Knowledge class
444
+ * Reference: <bold>`Knowledge` class</bold>
445
445
 
446
446
  ---
447
447
 
@@ -449,7 +449,7 @@ Ref. Knowledge class
449
449
 
450
450
  ### Store task execution results in memory
451
451
 
452
- <variable>var: <bold>use_memory</bold>: bool = False</variable>
452
+ `[var]`<bold>`use_memory: bool = False`</bold>
453
453
 
454
454
  By turning on the use_memory val True, the agent will create and store the task output and contextualize the memory when they execute the task.
455
455
 
@@ -484,7 +484,7 @@ RAGStorage(
484
484
 
485
485
  MEM0 Storage
486
486
 
487
- Ref. Memory class
487
+ * Reference: <bold>`Memory`</bold> class
488
488
 
489
489
  <hr />
490
490
 
@@ -492,7 +492,7 @@ Ref. Memory class
492
492
 
493
493
  ### Model configuration
494
494
 
495
- <variable>var: <bold>config</bold>: Optional[Dict[str, Any]] = None</variable>
495
+ `[var]`<bold>`config: Optional[Dict[str, Any]] = None`</bold>
496
496
 
497
497
  You can create an agent by using model config parameters instead.
498
498
 
@@ -522,5 +522,3 @@ agent = Agent(
522
522
 
523
523
  - Self-learning
524
524
  - Tool
525
-
526
- [LLM](https://www.notion.so/LLM-17e923685cf980999ac0e7f65cdb80b7?pvs=21)