versionhq 1.1.12.2__py3-none-any.whl → 1.1.12.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +5 -2
- versionhq/agent/model.py +47 -23
- versionhq/llm/llm_vars.py +1 -1
- versionhq/llm/model.py +3 -1
- versionhq/task/formation.py +3 -12
- versionhq/task/model.py +1 -1
- versionhq/team/model.py +2 -2
- {versionhq-1.1.12.2.dist-info → versionhq-1.1.12.4.dist-info}/METADATA +54 -23
- {versionhq-1.1.12.2.dist-info → versionhq-1.1.12.4.dist-info}/RECORD +12 -12
- {versionhq-1.1.12.2.dist-info → versionhq-1.1.12.4.dist-info}/LICENSE +0 -0
- {versionhq-1.1.12.2.dist-info → versionhq-1.1.12.4.dist-info}/WHEEL +0 -0
- {versionhq-1.1.12.2.dist-info → versionhq-1.1.12.4.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -21,9 +21,10 @@ from versionhq.tool.composio_tool import ComposioHandler
|
|
21
21
|
from versionhq.memory.contextual_memory import ContextualMemory
|
22
22
|
from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, MemoryItem
|
23
23
|
|
24
|
+
from versionhq.task.formation import form_agent_network
|
24
25
|
|
25
26
|
|
26
|
-
__version__ = "1.1.12.
|
27
|
+
__version__ = "1.1.12.4"
|
27
28
|
__all__ = [
|
28
29
|
"Agent",
|
29
30
|
|
@@ -67,5 +68,7 @@ __all__ = [
|
|
67
68
|
"ShortTermMemory",
|
68
69
|
"LongTermMemory",
|
69
70
|
"UserMemory",
|
70
|
-
"MemoryItem"
|
71
|
+
"MemoryItem",
|
72
|
+
|
73
|
+
"form_agent_network"
|
71
74
|
]
|
versionhq/agent/model.py
CHANGED
@@ -128,7 +128,7 @@ class Agent(BaseModel):
|
|
128
128
|
max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
|
129
129
|
max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
|
130
130
|
max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute")
|
131
|
-
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the model")
|
131
|
+
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the LLM model")
|
132
132
|
|
133
133
|
# cache, error, ops handling
|
134
134
|
formatting_errors: int = Field(default=0, description="number of formatting errors.")
|
@@ -164,34 +164,43 @@ class Agent(BaseModel):
|
|
164
164
|
Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
|
165
165
|
"""
|
166
166
|
self.agent_ops_agent_name = self.role
|
167
|
-
self.llm = self.
|
167
|
+
self.llm = self._convert_to_llm_object(llm=self.llm)
|
168
168
|
|
169
169
|
function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
|
170
|
-
function_calling_llm = self.
|
170
|
+
function_calling_llm = self._convert_to_llm_object(llm=function_calling_llm)
|
171
171
|
if function_calling_llm._supports_function_calling():
|
172
172
|
self.function_calling_llm = function_calling_llm
|
173
|
+
elif self.llm._supports_function_calling():
|
174
|
+
self.function_calling_llm = self.llm
|
175
|
+
else:
|
176
|
+
self.function_calling_llm = self._convert_to_llm_object(llm=LLM(model=DEFAULT_MODEL_NAME))
|
173
177
|
return self
|
174
178
|
|
175
179
|
|
176
|
-
def
|
180
|
+
def _convert_to_llm_object(self, llm: Any | None) -> LLM:
|
181
|
+
"""
|
182
|
+
Convert the given value to LLM object.
|
183
|
+
When `llm` is dict or self.llm_config is not None, add these values to the LLM object after validating them.
|
184
|
+
"""
|
177
185
|
llm = llm if llm is not None else DEFAULT_MODEL_NAME
|
178
186
|
|
179
187
|
match llm:
|
180
188
|
case LLM():
|
181
|
-
return self._set_llm_params(llm=llm)
|
189
|
+
return self._set_llm_params(llm=llm, config=self.llm_config)
|
182
190
|
|
183
191
|
case str():
|
184
192
|
llm_obj = LLM(model=llm)
|
185
|
-
return self._set_llm_params(llm=llm_obj)
|
193
|
+
return self._set_llm_params(llm=llm_obj, config=self.llm_config)
|
186
194
|
|
187
195
|
case dict():
|
188
196
|
model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
|
189
197
|
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
190
|
-
|
198
|
+
config = llm.update(self.llm_config) if self.llm_config else llm
|
199
|
+
return self._set_llm_params(llm_obj, config=config)
|
191
200
|
|
192
201
|
case _:
|
193
202
|
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
194
|
-
llm_obj = LLM(model=model_name)
|
203
|
+
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
195
204
|
llm_params = {
|
196
205
|
"max_tokens": (getattr(llm, "max_tokens") or self.max_tokens or 3000),
|
197
206
|
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
@@ -201,15 +210,40 @@ class Agent(BaseModel):
|
|
201
210
|
"api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
202
211
|
"base_url": getattr(llm, "base_url", None),
|
203
212
|
}
|
204
|
-
|
213
|
+
config = llm_params.update(self.llm_config) if self.llm_config else llm_params
|
214
|
+
return self._set_llm_params(llm=llm_obj, config=config)
|
205
215
|
|
206
216
|
|
207
217
|
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
208
218
|
"""
|
209
|
-
|
210
|
-
Prioritize the agent's settings over the model's base setups.
|
219
|
+
Add valid params to the LLM object.
|
211
220
|
"""
|
212
221
|
|
222
|
+
import litellm
|
223
|
+
from versionhq.llm.llm_vars import PARAMS
|
224
|
+
|
225
|
+
valid_config = {k: v for k, v in config.items() if v} if config else {}
|
226
|
+
|
227
|
+
if valid_config:
|
228
|
+
valid_keys = list()
|
229
|
+
try:
|
230
|
+
valid_keys = litellm.get_supported_openai_params(model=llm.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
|
231
|
+
if not valid_keys:
|
232
|
+
valid_keys = PARAMS.get("common")
|
233
|
+
except:
|
234
|
+
valid_keys = PARAMS.get("common")
|
235
|
+
|
236
|
+
valid_keys += PARAMS.get("litellm")
|
237
|
+
|
238
|
+
for key in valid_keys:
|
239
|
+
if key in valid_config and valid_config[key]:
|
240
|
+
val = valid_config[key]
|
241
|
+
if [key == k for k, v in LLM.model_fields.items()]:
|
242
|
+
setattr(llm, key, val)
|
243
|
+
else:
|
244
|
+
llm.other_valid_config.update({ key: val})
|
245
|
+
|
246
|
+
|
213
247
|
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
214
248
|
llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
215
249
|
|
@@ -225,15 +259,6 @@ class Agent(BaseModel):
|
|
225
259
|
if self.respect_context_window == False:
|
226
260
|
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
227
261
|
|
228
|
-
config = self.config.update(config) if self.config else config
|
229
|
-
if config:
|
230
|
-
valid_params = litellm.get_supported_openai_params(model=llm.model)
|
231
|
-
for k, v in config.items():
|
232
|
-
try:
|
233
|
-
if k in valid_params and v is not None:
|
234
|
-
setattr(llm, k, v)
|
235
|
-
except:
|
236
|
-
pass
|
237
262
|
return llm
|
238
263
|
|
239
264
|
|
@@ -407,9 +432,8 @@ class Agent(BaseModel):
|
|
407
432
|
self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
408
433
|
|
409
434
|
if tool_res_as_final:
|
410
|
-
|
411
|
-
|
412
|
-
task.tokens = func_llm._tokens
|
435
|
+
raw_response = self.function_calling_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
|
436
|
+
task.tokens = self.function_calling_llm._tokens
|
413
437
|
else:
|
414
438
|
raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
|
415
439
|
task.tokens = self.llm._tokens
|
versionhq/llm/llm_vars.py
CHANGED
versionhq/llm/model.py
CHANGED
@@ -95,6 +95,7 @@ class LLM(BaseModel):
|
|
95
95
|
top_logprobs: Optional[int] = Field(default=None)
|
96
96
|
tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
|
97
97
|
callbacks: List[Any] = Field(default_factory=list)
|
98
|
+
other_valid_config: Optional[Dict[str, Any]] = Field(default_factory=dict, description="store other valid values in dict to cascade to the model")
|
98
99
|
|
99
100
|
# LiteLLM specific fields
|
100
101
|
api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
|
@@ -201,7 +202,6 @@ class LLM(BaseModel):
|
|
201
202
|
if api_key_name:
|
202
203
|
self.api_key = os.environ.get(api_key_name, None)
|
203
204
|
|
204
|
-
|
205
205
|
base_url_key_name = self.endpoint_provider.upper() + "_API_BASE" if self.endpoint_provider else None
|
206
206
|
|
207
207
|
if base_url_key_name:
|
@@ -229,6 +229,8 @@ class LLM(BaseModel):
|
|
229
229
|
for item in valid_keys:
|
230
230
|
if hasattr(self, item) and getattr(self, item):
|
231
231
|
valid_params[item] = getattr(self, item)
|
232
|
+
elif item in self.other_valid_config and self.other_valid_config[item]:
|
233
|
+
valid_params[item] = self.other_valid_config[item]
|
232
234
|
elif item in config and config[item]:
|
233
235
|
valid_params[item] = config[item]
|
234
236
|
|
versionhq/task/formation.py
CHANGED
@@ -11,7 +11,7 @@ from versionhq._utils import Logger
|
|
11
11
|
|
12
12
|
|
13
13
|
def form_agent_network(
|
14
|
-
|
14
|
+
task: str,
|
15
15
|
expected_outcome: str,
|
16
16
|
agents: List[Agent] = None,
|
17
17
|
context: str = None,
|
@@ -21,7 +21,7 @@ def form_agent_network(
|
|
21
21
|
Make a formation of agents from the given task description, agents (optional), context (optional), and expected outcome (optional).
|
22
22
|
"""
|
23
23
|
|
24
|
-
if not
|
24
|
+
if not task:
|
25
25
|
Logger(verbose=True).log(level="error", message="Missing task description.", color="red")
|
26
26
|
return None
|
27
27
|
|
@@ -40,7 +40,7 @@ def form_agent_network(
|
|
40
40
|
vhq_task = Task(
|
41
41
|
description=f"""
|
42
42
|
Create a team of specialized agents designed to automate the following task and deliver the expected outcome. Consider the necessary roles for each agent with a clear task description. If you think we neeed a leader to handle the automation, return a leader_agent role as well, but if not, leave the a leader_agent role blank.
|
43
|
-
Task: {str(
|
43
|
+
Task: {str(task)}
|
44
44
|
Expected outcome: {str(expected_outcome)}
|
45
45
|
""",
|
46
46
|
pydantic_output=Outcome
|
@@ -112,12 +112,3 @@ def form_agent_network(
|
|
112
112
|
except Exception as e:
|
113
113
|
Logger(verbose=True).log(level="error", message=f"Failed to create an agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
|
114
114
|
return None
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
if __name__ == "__main__":
|
119
|
-
res = form_agent_network(
|
120
|
-
task_overview="Launch an outbound campaign to attract young audience.",
|
121
|
-
expected_outcome="Best media mix of the campaign.",
|
122
|
-
context="We are selling sports wear.",
|
123
|
-
)
|
versionhq/task/model.py
CHANGED
@@ -607,7 +607,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
607
607
|
if len(peers) > 0:
|
608
608
|
agent_to_delegate = peers[0]
|
609
609
|
else:
|
610
|
-
agent_to_delegate = Agent(role="
|
610
|
+
agent_to_delegate = Agent(role="vhq-Delegated-Agent", goal=agent.goal, llm=agent.llm)
|
611
611
|
|
612
612
|
agent = agent_to_delegate
|
613
613
|
self.delegations += 1
|
versionhq/team/model.py
CHANGED
@@ -387,9 +387,9 @@ class Team(BaseModel):
|
|
387
387
|
return self._create_team_output(task_outputs, lead_task_output)
|
388
388
|
|
389
389
|
|
390
|
-
def
|
390
|
+
def launch(self, kwargs_before: Optional[Dict[str, str]] = None, kwargs_after: Optional[Dict[str, Any]] = None) -> TeamOutput:
|
391
391
|
"""
|
392
|
-
|
392
|
+
Confirm and launch the formation - execute tasks and record outputs.
|
393
393
|
0. Assign an agent to a task - using conditions (manager prioritizes team_tasks) and planning_llm.
|
394
394
|
1. Address `before_kickoff_callbacks` if any.
|
395
395
|
2. Handle team members' tasks in accordance with the process.
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.12.
|
4
|
-
Summary:
|
3
|
+
Version: 1.1.12.4
|
4
|
+
Summary: An agentic orchestration framework for multi-agent system that shares memory, knowledge base, and RAG tools.
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
7
7
|
|
@@ -26,7 +26,7 @@ License: MIT License
|
|
26
26
|
SOFTWARE.
|
27
27
|
|
28
28
|
Project-URL: Homepage, https://versi0n.io
|
29
|
-
Project-URL: Documentation, https://
|
29
|
+
Project-URL: Documentation, https://docs.versi0n.io
|
30
30
|
Project-URL: Repository, https://github.com/versionHQ/multi-agent-system
|
31
31
|
Project-URL: Issues, https://github.com/versionHQ/multi-agent-system/issues
|
32
32
|
Keywords: orchestration framework,orchestration,ai agent,multi-agent system,RAG,agent,agentic orchestration,llm
|
@@ -91,7 +91,7 @@ Agentic orchestration framework to deploy agent network and handle complex task
|
|
91
91
|
- [PyPI](https://pypi.org/project/versionhq/)
|
92
92
|
- [Github (LLM orchestration framework)](https://github.com/versionHQ/multi-agent-system)
|
93
93
|
- [Use case](https://versi0n.io/) / [Quick demo](https://res.cloudinary.com/dfeirxlea/video/upload/v1737732977/pj_m_home/pnsyh5mfvmilwgt0eusa.mov)
|
94
|
-
- [
|
94
|
+
- [Docs](https://docs.versi0n.io)
|
95
95
|
|
96
96
|
|
97
97
|
<hr />
|
@@ -103,13 +103,16 @@ Agentic orchestration framework to deploy agent network and handle complex task
|
|
103
103
|
- [Key Features](#key-features)
|
104
104
|
- [Agent formation](#agent-formation)
|
105
105
|
- [Quick Start](#quick-start)
|
106
|
-
- [
|
106
|
+
- [Generate agent networks and launch task execution:](#generate-agent-networks-and-launch-task-execution)
|
107
|
+
- [Solo Agent:](#solo-agent)
|
107
108
|
- [Return a structured output with a summary in string.](#return-a-structured-output-with-a-summary-in-string)
|
108
|
-
- [
|
109
|
+
- [Supervising:](#supervising)
|
109
110
|
- [Technologies Used](#technologies-used)
|
110
111
|
- [Project Structure](#project-structure)
|
111
112
|
- [Setup](#setup)
|
113
|
+
- [Set up a project](#set-up-a-project)
|
112
114
|
- [Contributing](#contributing)
|
115
|
+
- [Documentation](#documentation)
|
113
116
|
- [Customizing AI Agents](#customizing-ai-agents)
|
114
117
|
- [Modifying RAG Functionality](#modifying-rag-functionality)
|
115
118
|
- [Package Management with uv](#package-management-with-uv)
|
@@ -123,9 +126,9 @@ Agentic orchestration framework to deploy agent network and handle complex task
|
|
123
126
|
|
124
127
|
## Key Features
|
125
128
|
|
126
|
-
|
129
|
+
`versionhq` is a Python framework for agent networks that handle complex task automation without human interaction.
|
127
130
|
|
128
|
-
Agents are model-agnostic, and
|
131
|
+
Agents are model-agnostic, and will improve task output, while oprimizing token cost and job latency, by sharing their memory, knowledge base, and RAG tools with other agents in the network.
|
129
132
|
|
130
133
|
|
131
134
|
### Agent formation
|
@@ -137,7 +140,7 @@ You can specify a desired formation or allow the agents to determine it autonomo
|
|
137
140
|
|
138
141
|
| | **Solo Agent** | **Supervising** | **Network** | **Random** |
|
139
142
|
| :--- | :--- | :--- | :--- | :--- |
|
140
|
-
| **Formation** | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/
|
143
|
+
| **Formation** | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/rbgxttfoeqqis1ettlfz.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/zhungor3elxzer5dum10.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/dnusl7iy7kiwkxwlpmg8.png" alt="solo" width="200"> | <img src="https://res.cloudinary.com/dfeirxlea/image/upload/v1738818211/pj_m_agents/sndpczatfzbrosxz9ama.png" alt="solo" width="200"> |
|
141
144
|
| **Usage** | <ul><li>A single agent with tools, knowledge, and memory.</li><li>When self-learning mode is on - it will turn into **Random** formation.</li></ul> | <ul><li>Leader agent gives directions, while sharing its knowledge and memory.</li><li>Subordinates can be solo agents or networks.</li></ul> | <ul><li>Share tasks, knowledge, and memory among network members.</li></ul> | <ul><li>A single agent handles tasks, asking help from other agents without sharing its memory or knowledge.</li></ul> |
|
142
145
|
| **Use case** | An email agent drafts promo message for the given audience. | The leader agent strategizes an outbound campaign plan and assigns components such as media mix or message creation to subordinate agents. | An email agent and social media agent share the product knowledge and deploy multi-channel outbound campaign. | 1. An email agent drafts promo message for the given audience, asking insights on tones from other email agents which oversee other clusters. 2. An agent calls the external agent to deploy the campaign. |
|
143
146
|
|
@@ -153,8 +156,22 @@ You can specify a desired formation or allow the agents to determine it autonomo
|
|
153
156
|
|
154
157
|
(Python 3.11 or higher)
|
155
158
|
|
159
|
+
### Generate agent networks and launch task execution:
|
156
160
|
|
157
|
-
|
161
|
+
```
|
162
|
+
from versionhq import form_agent_network
|
163
|
+
|
164
|
+
network = form_agent_network(
|
165
|
+
task="YOUR AMAZING TASK OVERVIEW",
|
166
|
+
expected_outcome="YOUR OUTCOME EXPECTATION",
|
167
|
+
)
|
168
|
+
res = network.launch()
|
169
|
+
```
|
170
|
+
|
171
|
+
This will form a network with multiple agents on `Formation` and return `TaskOutput` object with output in JSON, plane text, Pydantic model format with evaluation.
|
172
|
+
|
173
|
+
|
174
|
+
### Solo Agent:
|
158
175
|
|
159
176
|
#### Return a structured output with a summary in string.
|
160
177
|
|
@@ -197,7 +214,7 @@ This will return `TaskOutput` instance that stores a response in plane text, JSO
|
|
197
214
|
)
|
198
215
|
```
|
199
216
|
|
200
|
-
###
|
217
|
+
### Supervising:
|
201
218
|
|
202
219
|
```
|
203
220
|
from versionhq import Agent, Task, ResponseField, Team, TeamMember
|
@@ -291,19 +308,24 @@ src/
|
|
291
308
|
|
292
309
|
## Setup
|
293
310
|
|
311
|
+
|
312
|
+
|
313
|
+
## Set up a project
|
314
|
+
|
294
315
|
1. Install `uv` package manager:
|
295
316
|
|
296
|
-
|
317
|
+
For MacOS:
|
297
318
|
|
298
|
-
|
299
|
-
|
300
|
-
|
319
|
+
```
|
320
|
+
brew install uv
|
321
|
+
```
|
301
322
|
|
302
|
-
|
323
|
+
For Ubuntu/Debian:
|
324
|
+
|
325
|
+
```
|
326
|
+
sudo apt-get install uv
|
327
|
+
```
|
303
328
|
|
304
|
-
```
|
305
|
-
sudo apt-get install uv
|
306
|
-
```
|
307
329
|
|
308
330
|
2. Install dependencies:
|
309
331
|
```
|
@@ -325,11 +347,11 @@ src/
|
|
325
347
|
3. Set up environment variables:
|
326
348
|
Create a `.env` file in the project root and add the following:
|
327
349
|
```
|
328
|
-
OPENAI_API_KEY=your-openai-api-key
|
329
350
|
LITELLM_API_KEY=your-litellm-api-key
|
330
|
-
|
351
|
+
OPENAI_API_KEY=your-openai-api-key
|
331
352
|
COMPOSIO_API_KEY=your-composio-api-key
|
332
353
|
COMPOSIO_CLI_KEY=your-composio-cli-key
|
354
|
+
[LLM_INTERFACE_PROVIDER_OF_YOUR_CHOICE]_API_KEY=your-api-key
|
333
355
|
```
|
334
356
|
|
335
357
|
<hr />
|
@@ -363,16 +385,25 @@ src/
|
|
363
385
|
**Optional**
|
364
386
|
* Flag with `#! REFINEME` for any improvements needed and `#! FIXME` for any errors.
|
365
387
|
|
366
|
-
* Run a React demo app: [React demo app](https://github.com/versionHQ/test-client-app) to check it on the client endpoint.
|
388
|
+
<!-- * Run a React demo app: [React demo app](https://github.com/versionHQ/test-client-app) to check it on the client endpoint.
|
367
389
|
```
|
368
390
|
npm i
|
369
391
|
npm start
|
370
392
|
```
|
371
|
-
The frontend will be available at `http://localhost:3000`.
|
393
|
+
The frontend will be available at `http://localhost:3000`. -->
|
372
394
|
|
373
395
|
* `production` use case is available at `https://versi0n.io`. Currently, we are running alpha test.
|
374
396
|
|
375
397
|
|
398
|
+
### Documentation
|
399
|
+
* To edit the documentation, see `docs` repository and edit the respective component.
|
400
|
+
|
401
|
+
* We use `mkdocs` to update the docs. You can run the doc locally at http://127.0.0.1:8000/:
|
402
|
+
|
403
|
+
```
|
404
|
+
uv run python3 -m mkdocs serve --clean
|
405
|
+
```
|
406
|
+
|
376
407
|
|
377
408
|
### Customizing AI Agents
|
378
409
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=JhEVJI4u5L_5zgmcnPC4erd7T1L_nkgwT49dfI_G6TU,2349
|
2
2
|
versionhq/_utils/__init__.py,sha256=dzoZr4cBlh-2QZuPzTdehPUCe9lP1dmRtauD7qTjUaA,158
|
3
3
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
4
4
|
versionhq/_utils/logger.py,sha256=j9SlQPIefdVUlwpGfJY83E2BUt1ejWgZ2M2I8aMyQ3c,1579
|
@@ -7,7 +7,7 @@ versionhq/_utils/usage_metrics.py,sha256=hhq1OCW8Z4V93vwW2O2j528EyjOlF8wlTsX5IL-
|
|
7
7
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
8
8
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
9
|
versionhq/agent/inhouse_agents.py,sha256=v8frZjmiqYR8zuuh4CjYJheaHfHT2n_utT8pWCLJFes,2375
|
10
|
-
versionhq/agent/model.py,sha256=
|
10
|
+
versionhq/agent/model.py,sha256=qJCPsPV4SWIJnvbOL8Ntb60YFzWmVrqoNINww6Gbv2Q,23313
|
11
11
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
12
12
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
13
13
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
|
@@ -28,8 +28,8 @@ versionhq/knowledge/source.py,sha256=30VXsl3uHdM0wK0Dik3XfFxpNpEiy539PBNBvg0Y4-g
|
|
28
28
|
versionhq/knowledge/source_docling.py,sha256=hhHn3rS4KVsFKEPWcfllM8VxSL86PckZdAHDZNQNOq8,5411
|
29
29
|
versionhq/knowledge/storage.py,sha256=7oxCg3W9mFjYH1YmuH9kFtTbNxquzYFjuUjd_TlsB9E,8170
|
30
30
|
versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
|
-
versionhq/llm/llm_vars.py,sha256=
|
32
|
-
versionhq/llm/model.py,sha256=
|
31
|
+
versionhq/llm/llm_vars.py,sha256=p4MbhH0eaQ1qWkHfA3QBP1KteJFkatEecvSCwqJ4m-M,6773
|
32
|
+
versionhq/llm/model.py,sha256=tbsd6IXwxLwCzni7weSVPDMgBbOcg5dtlzhN_as89gE,15446
|
33
33
|
versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
34
|
versionhq/memory/contextual_memory.py,sha256=tCsOOAUnfrOL7YiakqGoi3uShzzS870TmGnlGd3z_A4,3556
|
35
35
|
versionhq/memory/model.py,sha256=4wow2O3UuMZ0AbC2NyxddGZac3-_GjNZbK9wsA015NA,8145
|
@@ -42,14 +42,14 @@ versionhq/storage/task_output_storage.py,sha256=E1t_Fkt78dPYIOl3MP7LfQ8oGtjlzxBu
|
|
42
42
|
versionhq/storage/utils.py,sha256=ByYXPoEIGJYLUqz-DWjbCAnneNrH1otiYbp12SCILpM,747
|
43
43
|
versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
44
|
versionhq/task/evaluate.py,sha256=f8S-nuEl2xJ2LnLv7iQixH53-gp0pKx1hFp_sUlN464,3977
|
45
|
-
versionhq/task/formation.py,sha256=
|
45
|
+
versionhq/task/formation.py,sha256=bZytru6I5a_CFq2rtmsBb0hVWIqExpzUxXPXYpd6qnI,4706
|
46
46
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
47
47
|
versionhq/task/log_handler.py,sha256=KJRrcNZgFSKhlNzvtYFnvtp6xukaF1s7ifX9u4zWrN8,1683
|
48
|
-
versionhq/task/model.py,sha256=
|
48
|
+
versionhq/task/model.py,sha256=iEzNZ2BILmCg6iEhuspNR0mrUh1vxfCZxz4Sjw5Nkto,29448
|
49
49
|
versionhq/task/structured_response.py,sha256=uVqgeUxNOACPe2hdc0RELSbtKd1vrwonfjXMOGTT0TI,4818
|
50
50
|
versionhq/task/TEMPLATES/Description.py,sha256=V-4kh8xpQTKOcDMi2xnuP-fcNk6kuoz1_5tYBlDLQWQ,420
|
51
51
|
versionhq/team/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
52
|
-
versionhq/team/model.py,sha256=
|
52
|
+
versionhq/team/model.py,sha256=WDnaJuvnVMmreztR0CMypnKpqFtGzAb7rrZ-HKa010Y,18886
|
53
53
|
versionhq/team/team_planner.py,sha256=l1PwyBXK1F3uOcbF1IvJBWKApJhghZnBF_ErkNcE04s,3745
|
54
54
|
versionhq/tool/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
55
55
|
versionhq/tool/cache_handler.py,sha256=iL8FH7X0G-cdT0uhJwzuhLDaadTXOdfybZcDy151-es,1085
|
@@ -58,8 +58,8 @@ versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtg
|
|
58
58
|
versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
|
59
59
|
versionhq/tool/model.py,sha256=ILfvRviQR1W1w-u3MtOIz0PrWa-brpgfAiyKWol63mk,12227
|
60
60
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
61
|
-
versionhq-1.1.12.
|
62
|
-
versionhq-1.1.12.
|
63
|
-
versionhq-1.1.12.
|
64
|
-
versionhq-1.1.12.
|
65
|
-
versionhq-1.1.12.
|
61
|
+
versionhq-1.1.12.4.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
62
|
+
versionhq-1.1.12.4.dist-info/METADATA,sha256=rL_c6WuvaaZVs_uX7tMLfT-hbkH3qCT7KT90lO_tuVE,19414
|
63
|
+
versionhq-1.1.12.4.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
64
|
+
versionhq-1.1.12.4.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
65
|
+
versionhq-1.1.12.4.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|