versionhq 1.2.1.16__py3-none-any.whl → 1.2.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +2 -2
- versionhq/_utils/process_config.py +1 -1
- versionhq/agent/model.py +93 -90
- versionhq/agent_network/formation.py +157 -0
- versionhq/agent_network/model.py +7 -8
- versionhq/task/model.py +32 -14
- {versionhq-1.2.1.16.dist-info → versionhq-1.2.1.18.dist-info}/METADATA +1 -1
- {versionhq-1.2.1.16.dist-info → versionhq-1.2.1.18.dist-info}/RECORD +11 -11
- versionhq/task/formation.py +0 -159
- {versionhq-1.2.1.16.dist-info → versionhq-1.2.1.18.dist-info}/LICENSE +0 -0
- {versionhq-1.2.1.16.dist-info → versionhq-1.2.1.18.dist-info}/WHEEL +0 -0
- {versionhq-1.2.1.16.dist-info → versionhq-1.2.1.18.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -27,11 +27,11 @@ from versionhq.tool.composio_tool import ComposioHandler
|
|
27
27
|
from versionhq.memory.contextual_memory import ContextualMemory
|
28
28
|
from versionhq.memory.model import ShortTermMemory,LongTermMemory, UserMemory, MemoryItem
|
29
29
|
|
30
|
-
from versionhq.
|
30
|
+
from versionhq.agent_network.formation import form_agent_network
|
31
31
|
from versionhq.task_graph.draft import workflow
|
32
32
|
|
33
33
|
|
34
|
-
__version__ = "1.2.1.
|
34
|
+
__version__ = "1.2.1.18"
|
35
35
|
__all__ = [
|
36
36
|
"Agent",
|
37
37
|
|
@@ -8,7 +8,7 @@ def process_config(values_to_update: Dict[str, Any], model_class: Type[BaseModel
|
|
8
8
|
Refer to the Pydantic model class for field validation.
|
9
9
|
"""
|
10
10
|
|
11
|
-
config = values_to_update.pop("config"
|
11
|
+
config = values_to_update.pop("config") if "config" in values_to_update else {}
|
12
12
|
|
13
13
|
if config:
|
14
14
|
for k, v in config.items():
|
versionhq/agent/model.py
CHANGED
@@ -91,7 +91,7 @@ class Agent(BaseModel):
|
|
91
91
|
user_prompt_template: Optional[str] = Field(default=None, description="abs. file path to user prompt template")
|
92
92
|
|
93
93
|
# task execution rules
|
94
|
-
|
94
|
+
networks: Optional[List[Any]] = Field(default_factory=list, description="store a list of agent networks that the agent belong as a member")
|
95
95
|
allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
|
96
96
|
max_retry_limit: int = Field(default=2, description="max. number of task retries when an error occurs")
|
97
97
|
maxit: Optional[int] = Field(default=25, description="max. number of total optimization loops conducted when an error occurs")
|
@@ -149,94 +149,6 @@ class Agent(BaseModel):
|
|
149
149
|
return self
|
150
150
|
|
151
151
|
|
152
|
-
def _convert_to_llm_object(self, llm: Any = None) -> LLM:
|
153
|
-
"""
|
154
|
-
Convert the given value to LLM object.
|
155
|
-
When `llm` is dict or self.llm_config is not None, add these values to the LLM object after validating them.
|
156
|
-
"""
|
157
|
-
llm = llm if llm else self.llm if self.llm else DEFAULT_MODEL_NAME
|
158
|
-
|
159
|
-
if not llm:
|
160
|
-
pass
|
161
|
-
|
162
|
-
match llm:
|
163
|
-
case LLM():
|
164
|
-
return self._set_llm_params(llm=llm, config=self.llm_config)
|
165
|
-
|
166
|
-
case str():
|
167
|
-
llm_obj = LLM(model=llm)
|
168
|
-
return self._set_llm_params(llm=llm_obj, config=self.llm_config)
|
169
|
-
|
170
|
-
case dict():
|
171
|
-
model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
|
172
|
-
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
173
|
-
config = llm.update(self.llm_config) if self.llm_config else llm
|
174
|
-
return self._set_llm_params(llm_obj, config=config)
|
175
|
-
|
176
|
-
case _:
|
177
|
-
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
178
|
-
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
179
|
-
llm_params = {
|
180
|
-
"max_tokens": (getattr(llm, "max_tokens") or 3000),
|
181
|
-
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
182
|
-
"callbacks": getattr(llm, "callbacks", None),
|
183
|
-
"temperature": getattr(llm, "temperature", None),
|
184
|
-
"logprobs": getattr(llm, "logprobs", None),
|
185
|
-
"api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
186
|
-
"base_url": getattr(llm, "base_url", None),
|
187
|
-
}
|
188
|
-
config = llm_params.update(self.llm_config) if self.llm_config else llm_params
|
189
|
-
return self._set_llm_params(llm=llm_obj, config=config)
|
190
|
-
|
191
|
-
|
192
|
-
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
193
|
-
"""
|
194
|
-
Add valid params to the LLM object.
|
195
|
-
"""
|
196
|
-
|
197
|
-
import litellm
|
198
|
-
from versionhq.llm.llm_vars import PARAMS
|
199
|
-
|
200
|
-
valid_config = {k: v for k, v in config.items() if v} if config else {}
|
201
|
-
|
202
|
-
if valid_config:
|
203
|
-
valid_keys = list()
|
204
|
-
try:
|
205
|
-
valid_keys = litellm.get_supported_openai_params(model=llm.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
|
206
|
-
if not valid_keys:
|
207
|
-
valid_keys = PARAMS.get("common")
|
208
|
-
except:
|
209
|
-
valid_keys = PARAMS.get("common")
|
210
|
-
|
211
|
-
valid_keys += PARAMS.get("litellm")
|
212
|
-
|
213
|
-
for key in valid_keys:
|
214
|
-
if key in valid_config and valid_config[key]:
|
215
|
-
val = valid_config[key]
|
216
|
-
if [key == k for k, v in LLM.model_fields.items()]:
|
217
|
-
setattr(llm, key, val)
|
218
|
-
else:
|
219
|
-
llm.other_valid_config.update({ key: val})
|
220
|
-
|
221
|
-
|
222
|
-
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
223
|
-
# llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
224
|
-
|
225
|
-
if llm.provider is None:
|
226
|
-
provider_name = llm.model.split("/")[0]
|
227
|
-
valid_provider = provider_name if provider_name in PROVIDERS else None
|
228
|
-
llm.provider = valid_provider
|
229
|
-
|
230
|
-
if self.callbacks:
|
231
|
-
llm.callbacks = self.callbacks
|
232
|
-
llm._set_callbacks(llm.callbacks)
|
233
|
-
|
234
|
-
if self.respect_context_window == False:
|
235
|
-
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
236
|
-
|
237
|
-
return llm
|
238
|
-
|
239
|
-
|
240
152
|
@model_validator(mode="after")
|
241
153
|
def set_up_tools(self) -> Self:
|
242
154
|
"""
|
@@ -369,6 +281,94 @@ class Agent(BaseModel):
|
|
369
281
|
return self
|
370
282
|
|
371
283
|
|
284
|
+
def _convert_to_llm_object(self, llm: Any = None) -> LLM:
|
285
|
+
"""
|
286
|
+
Convert the given value to LLM object.
|
287
|
+
When `llm` is dict or self.llm_config is not None, add these values to the LLM object after validating them.
|
288
|
+
"""
|
289
|
+
llm = llm if llm else self.llm if self.llm else DEFAULT_MODEL_NAME
|
290
|
+
|
291
|
+
if not llm:
|
292
|
+
pass
|
293
|
+
|
294
|
+
match llm:
|
295
|
+
case LLM():
|
296
|
+
return self._set_llm_params(llm=llm, config=self.llm_config)
|
297
|
+
|
298
|
+
case str():
|
299
|
+
llm_obj = LLM(model=llm)
|
300
|
+
return self._set_llm_params(llm=llm_obj, config=self.llm_config)
|
301
|
+
|
302
|
+
case dict():
|
303
|
+
model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
|
304
|
+
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
305
|
+
config = llm.update(self.llm_config) if self.llm_config else llm
|
306
|
+
return self._set_llm_params(llm_obj, config=config)
|
307
|
+
|
308
|
+
case _:
|
309
|
+
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
310
|
+
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
311
|
+
llm_params = {
|
312
|
+
"max_tokens": (getattr(llm, "max_tokens") or 3000),
|
313
|
+
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
314
|
+
"callbacks": getattr(llm, "callbacks", None),
|
315
|
+
"temperature": getattr(llm, "temperature", None),
|
316
|
+
"logprobs": getattr(llm, "logprobs", None),
|
317
|
+
"api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
318
|
+
"base_url": getattr(llm, "base_url", None),
|
319
|
+
}
|
320
|
+
config = llm_params.update(self.llm_config) if self.llm_config else llm_params
|
321
|
+
return self._set_llm_params(llm=llm_obj, config=config)
|
322
|
+
|
323
|
+
|
324
|
+
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
325
|
+
"""
|
326
|
+
Add valid params to the LLM object.
|
327
|
+
"""
|
328
|
+
|
329
|
+
import litellm
|
330
|
+
from versionhq.llm.llm_vars import PARAMS
|
331
|
+
|
332
|
+
valid_config = {k: v for k, v in config.items() if v} if config else {}
|
333
|
+
|
334
|
+
if valid_config:
|
335
|
+
valid_keys = list()
|
336
|
+
try:
|
337
|
+
valid_keys = litellm.get_supported_openai_params(model=llm.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
|
338
|
+
if not valid_keys:
|
339
|
+
valid_keys = PARAMS.get("common")
|
340
|
+
except:
|
341
|
+
valid_keys = PARAMS.get("common")
|
342
|
+
|
343
|
+
valid_keys += PARAMS.get("litellm")
|
344
|
+
|
345
|
+
for key in valid_keys:
|
346
|
+
if key in valid_config and valid_config[key]:
|
347
|
+
val = valid_config[key]
|
348
|
+
if [key == k for k, v in LLM.model_fields.items()]:
|
349
|
+
setattr(llm, key, val)
|
350
|
+
else:
|
351
|
+
llm.other_valid_config.update({ key: val})
|
352
|
+
|
353
|
+
|
354
|
+
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
355
|
+
# llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
356
|
+
|
357
|
+
if llm.provider is None:
|
358
|
+
provider_name = llm.model.split("/")[0]
|
359
|
+
valid_provider = provider_name if provider_name in PROVIDERS else None
|
360
|
+
llm.provider = valid_provider
|
361
|
+
|
362
|
+
if self.callbacks:
|
363
|
+
llm.callbacks = self.callbacks
|
364
|
+
llm._set_callbacks(llm.callbacks)
|
365
|
+
|
366
|
+
if self.respect_context_window == False:
|
367
|
+
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
368
|
+
|
369
|
+
return llm
|
370
|
+
|
371
|
+
|
372
372
|
def _update_llm(self, llm: Any = None, llm_config: Optional[Dict[str, Any]] = None) -> Self:
|
373
373
|
"""
|
374
374
|
Update llm and llm_config of the exsiting agent. (Other conditions will remain the same.)
|
@@ -567,7 +567,7 @@ class Agent(BaseModel):
|
|
567
567
|
|
568
568
|
|
569
569
|
## comment out for now
|
570
|
-
# if self.
|
570
|
+
# if self.networks and self.networks._train:
|
571
571
|
# task_prompt = self._training_handler(task_prompt=task_prompt)
|
572
572
|
# else:
|
573
573
|
# task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
@@ -599,3 +599,6 @@ class Agent(BaseModel):
|
|
599
599
|
|
600
600
|
def __repr__(self):
|
601
601
|
return f"Agent(role={self.role}, goal={self.goal}"
|
602
|
+
|
603
|
+
def __str__(self):
|
604
|
+
return super().__str__()
|
@@ -0,0 +1,157 @@
|
|
1
|
+
from typing import List, Type
|
2
|
+
from enum import Enum
|
3
|
+
|
4
|
+
from pydantic import BaseModel, create_model, Field
|
5
|
+
|
6
|
+
from versionhq.task.model import Task
|
7
|
+
from versionhq.agent.model import Agent
|
8
|
+
from versionhq.agent_network.model import AgentNetwork, Member, Formation
|
9
|
+
from versionhq.agent.inhouse_agents import vhq_formation_planner
|
10
|
+
from versionhq._utils import Logger
|
11
|
+
|
12
|
+
|
13
|
+
def form_agent_network(
|
14
|
+
task: str,
|
15
|
+
expected_outcome: str | Type[BaseModel],
|
16
|
+
agents: List[Agent] = None,
|
17
|
+
context: str = None,
|
18
|
+
formation: Type[Formation] = None
|
19
|
+
) -> AgentNetwork | None:
|
20
|
+
"""
|
21
|
+
Make a formation of agents from the given task description, expected outcome, agents (optional), and context (optional).
|
22
|
+
"""
|
23
|
+
|
24
|
+
if not task:
|
25
|
+
Logger(verbose=True).log(level="error", message="Missing task description.", color="red")
|
26
|
+
return None
|
27
|
+
|
28
|
+
if not expected_outcome:
|
29
|
+
Logger(verbose=True).log(level="error", message="Missing expected outcome.", color="red")
|
30
|
+
return None
|
31
|
+
|
32
|
+
if formation:
|
33
|
+
try:
|
34
|
+
match formation:
|
35
|
+
case Formation():
|
36
|
+
pass
|
37
|
+
|
38
|
+
case str():
|
39
|
+
matched = [item for item in Formation.s_ if item == formation.upper()]
|
40
|
+
if matched:
|
41
|
+
formation = getattr(Formation, matched[0])
|
42
|
+
else:
|
43
|
+
# Formation._generate_next_value_(name=f"CUSTOM_{formation.upper()}", start=100, count=6, last_values=Formation.HYBRID.name)
|
44
|
+
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
|
45
|
+
formation = None
|
46
|
+
|
47
|
+
case int() | float():
|
48
|
+
formation = Formation(int(formation))
|
49
|
+
|
50
|
+
case _:
|
51
|
+
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
|
52
|
+
formation = None
|
53
|
+
|
54
|
+
except Exception as e:
|
55
|
+
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid: {str(e)}. We'll recreate a formation.", color="yellow")
|
56
|
+
formation = None
|
57
|
+
|
58
|
+
# try:
|
59
|
+
prompt_formation = formation.name if formation and isinstance(formation, Formation) else f"Select the best formation to effectively execute the tasks from the given Enum sets: {str(Formation.__dict__)}."
|
60
|
+
|
61
|
+
prompt_expected_outcome = expected_outcome if isinstance(expected_outcome, str) else expected_outcome.model_dump_json()
|
62
|
+
|
63
|
+
class Outcome(BaseModel):
|
64
|
+
formation: Enum
|
65
|
+
agent_roles: list[str]
|
66
|
+
task_descriptions: list[str]
|
67
|
+
task_outcomes: list[list[str]]
|
68
|
+
leader_agent: str
|
69
|
+
|
70
|
+
vhq_task = Task(
|
71
|
+
description=f"Design a team of specialized agents to fully automate the following task and achieve the expected outcome. For each agent, define its role, task description, and expected outputs via the task with items in a list. Then specify the team formation if the formation is not given. If you think SUPERVISING or HYBRID is the best formation, include a leader_agent role, else leave the leader_agent role blank.\nTask: {str(task)}\nExpected outcome: {prompt_expected_outcome}\nFormation: {prompt_formation}",
|
72
|
+
pydantic_output=Outcome
|
73
|
+
)
|
74
|
+
|
75
|
+
if agents:
|
76
|
+
vhq_task.description += "Consider adding following agents in the formation: " + ", ".join([agent.role for agent in agents if isinstance(agent, Agent)])
|
77
|
+
|
78
|
+
res = vhq_task.execute(agent=vhq_formation_planner, context=context)
|
79
|
+
|
80
|
+
formation_keys = ([k for k in Formation._member_map_.keys() if k == res.pydantic.formation.upper()]
|
81
|
+
if res.pydantic else [k for k in Formation._member_map_.keys() if k == res.json_dict["formation"].upper()])
|
82
|
+
_formation = Formation[formation_keys[0]] if formation_keys else Formation.SUPERVISING
|
83
|
+
|
84
|
+
network_tasks = []
|
85
|
+
members = []
|
86
|
+
leader = str(res.pydantic.leader_agent) if res.pydantic else str(res.json_dict["leader_agent"])
|
87
|
+
|
88
|
+
created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
|
89
|
+
created_tasks = []
|
90
|
+
|
91
|
+
if res.pydantic:
|
92
|
+
for i, item in enumerate(res.pydantic.task_outcomes):
|
93
|
+
if len(res.pydantic.task_descriptions) > i and res.pydantic.task_descriptions[i]:
|
94
|
+
fields = {}
|
95
|
+
for ob in item:
|
96
|
+
try:
|
97
|
+
field_name = str(ob).lower().split(":")[0].replace(" ", "_")[0: 16]
|
98
|
+
fields[field_name] = (str, Field(default=None))
|
99
|
+
except:
|
100
|
+
pass
|
101
|
+
output = create_model("Output", **fields) if fields else None
|
102
|
+
_task = Task(description=res.pydantic.task_descriptions[i], pydantic_output=output)
|
103
|
+
created_tasks.append(_task)
|
104
|
+
|
105
|
+
elif res.json_dict:
|
106
|
+
for i, item in enumerate(res["task_outcomes"]):
|
107
|
+
if len(res["task_descriptions"]) > i and res["task_descriptions"][i]:
|
108
|
+
fields = {}
|
109
|
+
for ob in item:
|
110
|
+
try:
|
111
|
+
field_name = str(ob).lower().split(":")[0].replace(" ", "_")[0: 16]
|
112
|
+
fields[field_name] = (str, Field(default=None))
|
113
|
+
except:
|
114
|
+
pass
|
115
|
+
output = create_model("Output", **fields) if fields else None
|
116
|
+
_task = Task(description=res["task_descriptions"][i], pydantic_output=output)
|
117
|
+
created_tasks.append(_task)
|
118
|
+
|
119
|
+
|
120
|
+
if len(created_tasks) <= len(created_agents):
|
121
|
+
for i in range(len(created_tasks)):
|
122
|
+
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
123
|
+
member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
|
124
|
+
members.append(member)
|
125
|
+
|
126
|
+
for i in range(len(created_tasks), len(created_agents)):
|
127
|
+
try:
|
128
|
+
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
129
|
+
member_w_o_task = Member(agent=created_agents[i], is_manager=is_manager)
|
130
|
+
members.append(member_w_o_task)
|
131
|
+
except:
|
132
|
+
pass
|
133
|
+
|
134
|
+
elif len(created_tasks) > len(created_agents):
|
135
|
+
for i in range(len(created_agents)):
|
136
|
+
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
137
|
+
member = Member(agent=created_agents[i], is_manager=is_manager, tasks=[created_tasks[i]])
|
138
|
+
members.append(member)
|
139
|
+
|
140
|
+
network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
|
141
|
+
|
142
|
+
|
143
|
+
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
144
|
+
manager = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
|
145
|
+
members.append(manager)
|
146
|
+
|
147
|
+
members.sort(key=lambda x: x.is_manager == False)
|
148
|
+
network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
|
149
|
+
|
150
|
+
Logger().log(level="info", message=f"Successfully created a agent network: {str(network.id)} with {len(network.members)} agents.", color="blue")
|
151
|
+
|
152
|
+
return network
|
153
|
+
|
154
|
+
|
155
|
+
# except Exception as e:
|
156
|
+
# Logger().log(level="error", message=f"Failed to create a agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
|
157
|
+
# return None
|
versionhq/agent_network/model.py
CHANGED
@@ -28,7 +28,6 @@ warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
|
28
28
|
|
29
29
|
|
30
30
|
class Formation(str, Enum):
|
31
|
-
UNDEFINED = 0
|
32
31
|
SOLO = 1
|
33
32
|
SUPERVISING = 2
|
34
33
|
SQUAD = 3
|
@@ -96,7 +95,6 @@ class AgentNetwork(BaseModel):
|
|
96
95
|
|
97
96
|
__hash__ = object.__hash__
|
98
97
|
_execution_span: Any = PrivateAttr()
|
99
|
-
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
100
98
|
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
101
99
|
|
102
100
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
@@ -167,12 +165,12 @@ class AgentNetwork(BaseModel):
|
|
167
165
|
"""
|
168
166
|
if self.process == TaskHandlingProcess.HIERARCHY or self.formation == Formation.SUPERVISING:
|
169
167
|
if not self.managers:
|
170
|
-
|
168
|
+
Logger().log(level="error", message="The process or formation created needs at least 1 manager agent.", color="red")
|
171
169
|
raise PydanticCustomError("missing_manager", "`manager` is required when using hierarchical process.", {})
|
172
170
|
|
173
171
|
## comment out for the formation flexibilities
|
174
172
|
# if self.managers and (self.manager_tasks is None or self.network_tasks is None):
|
175
|
-
#
|
173
|
+
# Logger().log(level="error", message="The manager is idling. At least 1 task needs to be assigned to the manager.", color="red")
|
176
174
|
# raise PydanticCustomError("missing_manager_task", "manager needs to have at least one manager task or network task.", {})
|
177
175
|
|
178
176
|
return self
|
@@ -186,10 +184,11 @@ class AgentNetwork(BaseModel):
|
|
186
184
|
if self.process == TaskHandlingProcess.SEQUENT and self.network_tasks is None:
|
187
185
|
for task in self.tasks:
|
188
186
|
if not [member.task == task for member in self.members]:
|
189
|
-
|
187
|
+
Logger().log(level="error", message=f"The following task needs a dedicated agent to be assinged: {task.description}", color="red")
|
190
188
|
raise PydanticCustomError("missing_agent_in_task", "Sequential process error: Agent is missing the task", {})
|
191
189
|
return self
|
192
190
|
|
191
|
+
|
193
192
|
@model_validator(mode="after")
|
194
193
|
def validate_end_with_at_most_one_async_task(self):
|
195
194
|
"""
|
@@ -371,7 +370,7 @@ class AgentNetwork(BaseModel):
|
|
371
370
|
task_outputs = self._process_async_tasks(futures, was_replayed)
|
372
371
|
|
373
372
|
if not task_outputs:
|
374
|
-
|
373
|
+
Logger().log(level="error", message="Missing task outputs.", color="red")
|
375
374
|
raise ValueError("Missing task outputs")
|
376
375
|
|
377
376
|
final_task_output = lead_task_output if lead_task_output is not None else task_outputs[0] #! REFINEME
|
@@ -399,12 +398,12 @@ class AgentNetwork(BaseModel):
|
|
399
398
|
self._assign_tasks()
|
400
399
|
|
401
400
|
if kwargs_pre is not None:
|
402
|
-
for func in self.pre_launch_callbacks:
|
401
|
+
for func in self.pre_launch_callbacks: # signature check
|
403
402
|
func(**kwargs_pre)
|
404
403
|
|
405
404
|
for member in self.members:
|
406
405
|
agent = member.agent
|
407
|
-
agent.
|
406
|
+
agent.networks.append(self)
|
408
407
|
|
409
408
|
if self.step_callback:
|
410
409
|
agent.callbacks.append(self.step_callback)
|
versionhq/task/model.py
CHANGED
@@ -281,7 +281,7 @@ class Task(BaseModel):
|
|
281
281
|
|
282
282
|
# executing
|
283
283
|
execution_type: TaskExecutionType = Field(default=TaskExecutionType.SYNC)
|
284
|
-
allow_delegation: bool = Field(default=False, description="
|
284
|
+
allow_delegation: bool = Field(default=False, description="whether to delegate the task to another agent")
|
285
285
|
callback: Optional[Callable] = Field(default=None, description="callback to be executed after the task is completed.")
|
286
286
|
callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
|
287
287
|
|
@@ -574,6 +574,36 @@ Ref. Output image: {output_formats_to_follow}
|
|
574
574
|
return agent
|
575
575
|
|
576
576
|
|
577
|
+
def _select_agent_to_delegate(self, agent: Any = None) -> Any | None: # return agent object or None
|
578
|
+
"""
|
579
|
+
Creates or selects an agent to delegate the given task and returns Agent object else None.
|
580
|
+
"""
|
581
|
+
|
582
|
+
from versionhq.agent.model import Agent
|
583
|
+
|
584
|
+
if not self.allow_delegation:
|
585
|
+
return None
|
586
|
+
|
587
|
+
agent_to_delegate: InstanceOf[Agent] = None
|
588
|
+
|
589
|
+
if not agent:
|
590
|
+
agent_to_delegate = self._build_agent_from_task()
|
591
|
+
|
592
|
+
elif agent and not agent.networks:
|
593
|
+
agent_to_delegate = Agent(role="vhq-Delegated-Agent", goal=agent.goal, llm=agent.llm)
|
594
|
+
|
595
|
+
else:
|
596
|
+
_managers = []
|
597
|
+
_members = []
|
598
|
+
for network in agent.networks:
|
599
|
+
_managers.extend(member.agent for member in network.members if member.is_manager)
|
600
|
+
_members.extend(member.agent for member in network.members if not member.is_manager)
|
601
|
+
|
602
|
+
agent_to_delegate = _managers[0] if _managers else _members[0] if _members else Agent(role="vhq-Delegated-Agent", goal=agent.goal, llm=agent.llm)
|
603
|
+
|
604
|
+
return agent_to_delegate
|
605
|
+
|
606
|
+
|
577
607
|
# task execution
|
578
608
|
def execute(
|
579
609
|
self, type: TaskExecutionType = None, agent: Optional["vhq.Agent"] = None, context: Optional[Any] = None
|
@@ -635,19 +665,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
635
665
|
task_tools.append(item)
|
636
666
|
|
637
667
|
if self.allow_delegation == True:
|
638
|
-
agent_to_delegate =
|
639
|
-
|
640
|
-
if hasattr(agent, "network") and isinstance(agent.network, AgentNetwork):
|
641
|
-
if agent.network.managers:
|
642
|
-
idling_manager_agents = [manager.agent for manager in agent.network.managers if manager.is_idling]
|
643
|
-
agent_to_delegate = idling_manager_agents[0] if idling_manager_agents else agent.network.managers[0]
|
644
|
-
else:
|
645
|
-
peers = [member.agent for member in agent.network.members if member.is_manager == False and member.agent.id is not agent.id]
|
646
|
-
if len(peers) > 0:
|
647
|
-
agent_to_delegate = peers[0]
|
648
|
-
else:
|
649
|
-
agent_to_delegate = Agent(role="vhq-Delegated-Agent", goal=agent.goal, llm=agent.llm)
|
650
|
-
|
668
|
+
agent_to_delegate = self._select_agent_to_delegate(agent=agent)
|
651
669
|
agent = agent_to_delegate
|
652
670
|
self.delegations += 1
|
653
671
|
|
@@ -1,19 +1,20 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=I1z2OKVSflVHMiWtAsm5qE2-pmpxTtiMQhcytzInHas,2892
|
2
2
|
versionhq/_utils/__init__.py,sha256=dzoZr4cBlh-2QZuPzTdehPUCe9lP1dmRtauD7qTjUaA,158
|
3
3
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
4
4
|
versionhq/_utils/logger.py,sha256=zgogTwAY-ujDLrdryAKhdtoaNe1nOFajmEN0V8aMR34,3155
|
5
|
-
versionhq/_utils/process_config.py,sha256=
|
5
|
+
versionhq/_utils/process_config.py,sha256=YTGY_erW335RfceQfzS18YAqq-AAb-iSvKSjN7noD2E,782
|
6
6
|
versionhq/_utils/usage_metrics.py,sha256=NXF18dn5NNvGK7EsQ4AAghpR8ppYOjMx6ABenLLHnmM,1066
|
7
7
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
8
8
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
9
|
versionhq/agent/inhouse_agents.py,sha256=vupO1viYqVb7sKohIE1zThu6JArhh5JLo5LBeSnh0kM,2534
|
10
|
-
versionhq/agent/model.py,sha256=
|
10
|
+
versionhq/agent/model.py,sha256=acAFIDmCbSVbMIf0Qlp9lzhdF0f_201havj3RnYj0xw,25661
|
11
11
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
12
12
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
13
13
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
|
14
14
|
versionhq/agent/TEMPLATES/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
15
|
versionhq/agent_network/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
|
-
versionhq/agent_network/
|
16
|
+
versionhq/agent_network/formation.py,sha256=QHLbv4XgbmMEQpdoHGrV6_CQOW3kIe7Jp32G0HgpA90,7418
|
17
|
+
versionhq/agent_network/model.py,sha256=hjtYIopAN52nStcM6TlV0b6ulRMrmzKH7jIkzNmZHDE,19265
|
17
18
|
versionhq/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
19
|
versionhq/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
20
|
versionhq/clients/customer/__init__.py,sha256=-YXh1FQfvpfLacK8SUC7bD7Wx_eIEi4yrkCC_cUasFg,217
|
@@ -44,10 +45,9 @@ versionhq/storage/task_output_storage.py,sha256=E1t_Fkt78dPYIOl3MP7LfQ8oGtjlzxBu
|
|
44
45
|
versionhq/storage/utils.py,sha256=ByYXPoEIGJYLUqz-DWjbCAnneNrH1otiYbp12SCILpM,747
|
45
46
|
versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
47
|
versionhq/task/evaluate.py,sha256=WdUgjbZL62XrxyWe5MTz29scfzwmuAHGxJ7GvAB8Fmk,3954
|
47
|
-
versionhq/task/formation.py,sha256=WH604q9bRmWH7KQCrk2qKJwisCopYX5CjJvsj4TgFjI,6894
|
48
48
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
49
49
|
versionhq/task/log_handler.py,sha256=LT7YnO7gcPR9IZS7eRvMjnHh8crMBFtqduxd8dxIbkk,1680
|
50
|
-
versionhq/task/model.py,sha256=
|
50
|
+
versionhq/task/model.py,sha256=KshCysteol3ggfotZMfFn192dMYALg8lvjiGpyLUVQA,28948
|
51
51
|
versionhq/task/structured_response.py,sha256=4q-hQPu7oMMHHXEzh9YW4SJ7N5eCZ7OfZ65juyl_jCI,5000
|
52
52
|
versionhq/task/TEMPLATES/Description.py,sha256=V-4kh8xpQTKOcDMi2xnuP-fcNk6kuoz1_5tYBlDLQWQ,420
|
53
53
|
versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -61,8 +61,8 @@ versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtg
|
|
61
61
|
versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
|
62
62
|
versionhq/tool/model.py,sha256=PO4zNWBZcJhYVur381YL1dy6zqurio2jWjtbxOxZMGI,12194
|
63
63
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
64
|
-
versionhq-1.2.1.
|
65
|
-
versionhq-1.2.1.
|
66
|
-
versionhq-1.2.1.
|
67
|
-
versionhq-1.2.1.
|
68
|
-
versionhq-1.2.1.
|
64
|
+
versionhq-1.2.1.18.dist-info/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
65
|
+
versionhq-1.2.1.18.dist-info/METADATA,sha256=NvKh5tYpJbFp0SHQaukksKnHOCrzO2SBGJfO0rJlTZM,22033
|
66
|
+
versionhq-1.2.1.18.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
67
|
+
versionhq-1.2.1.18.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
68
|
+
versionhq-1.2.1.18.dist-info/RECORD,,
|
versionhq/task/formation.py
DELETED
@@ -1,159 +0,0 @@
|
|
1
|
-
from typing import List, Type
|
2
|
-
from enum import Enum
|
3
|
-
|
4
|
-
from pydantic import BaseModel
|
5
|
-
|
6
|
-
from versionhq.task.model import Task
|
7
|
-
from versionhq.agent.model import Agent
|
8
|
-
from versionhq.agent_network.model import AgentNetwork, Member, Formation
|
9
|
-
from versionhq.agent.inhouse_agents import vhq_formation_planner
|
10
|
-
from versionhq._utils import Logger
|
11
|
-
|
12
|
-
|
13
|
-
def form_agent_network(
|
14
|
-
task: str,
|
15
|
-
expected_outcome: str,
|
16
|
-
agents: List[Agent] = None,
|
17
|
-
context: str = None,
|
18
|
-
formation: Type[Formation] = None
|
19
|
-
) -> AgentNetwork | None:
|
20
|
-
"""
|
21
|
-
Make a formation of agents from the given task description, expected outcome, agents (optional), and context (optional).
|
22
|
-
"""
|
23
|
-
|
24
|
-
if not task:
|
25
|
-
Logger(verbose=True).log(level="error", message="Missing task description.", color="red")
|
26
|
-
return None
|
27
|
-
|
28
|
-
if not expected_outcome:
|
29
|
-
Logger(verbose=True).log(level="error", message="Missing expected outcome.", color="red")
|
30
|
-
return None
|
31
|
-
|
32
|
-
if formation:
|
33
|
-
try:
|
34
|
-
match formation:
|
35
|
-
case Formation():
|
36
|
-
if formation == Formation.UNDEFINED:
|
37
|
-
formation = None
|
38
|
-
else:
|
39
|
-
pass
|
40
|
-
|
41
|
-
case str():
|
42
|
-
matched = [item for item in Formation.s_ if item == formation.upper()]
|
43
|
-
if matched:
|
44
|
-
formation = getattr(Formation, matched[0])
|
45
|
-
else:
|
46
|
-
# Formation._generate_next_value_(name=f"CUSTOM_{formation.upper()}", start=100, count=6, last_values=Formation.HYBRID.name)
|
47
|
-
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
|
48
|
-
formation = None
|
49
|
-
|
50
|
-
case int() | float():
|
51
|
-
formation = Formation(int(formation))
|
52
|
-
|
53
|
-
case _:
|
54
|
-
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid. We'll recreate a valid formation.", color="yellow")
|
55
|
-
formation = None
|
56
|
-
|
57
|
-
except Exception as e:
|
58
|
-
Logger(verbose=True).log(level="warning", message=f"The formation {formation} is invalid: {str(e)}. We'll recreate a formation.", color="yellow")
|
59
|
-
formation = None
|
60
|
-
|
61
|
-
try:
|
62
|
-
prompt_formation = formation.name if formation and isinstance(formation, Formation) else f"Select the best formation to effectively execute the tasks from the given Enum sets: {str(Formation.__dict__)}."
|
63
|
-
class Outcome(BaseModel):
|
64
|
-
formation: Enum
|
65
|
-
agent_roles: list[str]
|
66
|
-
task_descriptions: list[str]
|
67
|
-
leader_agent: str
|
68
|
-
|
69
|
-
vhq_task = Task(
|
70
|
-
description=f"""
|
71
|
-
Create a team of specialized agents designed to automate the following task and deliver the expected outcome. Consider the necessary roles for each agent with a clear task description. If you think we neeed a leader to handle the automation, return a leader_agent role as well, but if not, leave the a leader_agent role blank. When you have a leader_agent, the formation must be SUPERVISING or HYBRID.
|
72
|
-
Task: {str(task)}
|
73
|
-
Expected outcome: {str(expected_outcome)}
|
74
|
-
Formation: {prompt_formation}
|
75
|
-
""",
|
76
|
-
pydantic_output=Outcome
|
77
|
-
)
|
78
|
-
|
79
|
-
if agents:
|
80
|
-
vhq_task.description += "Consider adding following agents in the formation: " + ", ".join([agent.role for agent in agents if isinstance(agent, Agent)])
|
81
|
-
|
82
|
-
res = vhq_task.execute(agent=vhq_formation_planner, context=context)
|
83
|
-
_formation = Formation.SUPERVISING
|
84
|
-
|
85
|
-
|
86
|
-
if res.pydantic:
|
87
|
-
formation_keys = [k for k, v in Formation._member_map_.items() if k == res.pydantic.formation.upper()]
|
88
|
-
|
89
|
-
if formation_keys:
|
90
|
-
_formation = Formation[formation_keys[0]]
|
91
|
-
|
92
|
-
|
93
|
-
network_tasks = []
|
94
|
-
members = []
|
95
|
-
leader = str(res.pydantic.leader_agent)
|
96
|
-
|
97
|
-
created_agents = [Agent(role=item, goal=item) for item in res.pydantic.agent_roles]
|
98
|
-
created_tasks = [Task(description=item) for item in res.pydantic.task_descriptions]
|
99
|
-
|
100
|
-
|
101
|
-
for i in range(len(created_agents)):
|
102
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
103
|
-
member = Member(agent=created_agents[i], is_manager=is_manager)
|
104
|
-
|
105
|
-
if len(created_tasks) >= i and created_tasks[i]:
|
106
|
-
member.tasks.append(created_tasks[i])
|
107
|
-
members.append(member)
|
108
|
-
|
109
|
-
|
110
|
-
if len(created_agents) < len(created_tasks):
|
111
|
-
network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
|
112
|
-
|
113
|
-
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
114
|
-
manager = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
|
115
|
-
members.append(manager)
|
116
|
-
|
117
|
-
members.sort(key=lambda x: x.is_manager == False)
|
118
|
-
network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
|
119
|
-
return network
|
120
|
-
|
121
|
-
else:
|
122
|
-
res = res.json_dict
|
123
|
-
formation_keys = [k for k, v in Formation._member_map_.items() if k == res["formation"].upper()]
|
124
|
-
|
125
|
-
if formation_keys:
|
126
|
-
_formation = Formation[formation_keys[0]]
|
127
|
-
|
128
|
-
created_agents = [Agent(role=item, goal=item) for item in res["agent_roles"]]
|
129
|
-
created_tasks = [Task(description=item) for item in res["task_descriptions"]]
|
130
|
-
|
131
|
-
network_tasks = []
|
132
|
-
members = []
|
133
|
-
leader = str(res["leader_agent"])
|
134
|
-
|
135
|
-
for i in range(len(created_agents)):
|
136
|
-
is_manager = bool(created_agents[i].role.lower() == leader.lower())
|
137
|
-
member = Member(agent=created_agents[i], is_manager=is_manager)
|
138
|
-
|
139
|
-
if len(created_tasks) >= i and created_tasks[i]:
|
140
|
-
member.tasks.append(created_tasks[i])
|
141
|
-
|
142
|
-
members.append(member)
|
143
|
-
|
144
|
-
if len(created_agents) < len(created_tasks):
|
145
|
-
network_tasks.extend(created_tasks[len(created_agents):len(created_tasks)])
|
146
|
-
|
147
|
-
if _formation == Formation.SUPERVISING and not [member for member in members if member.is_manager]:
|
148
|
-
member = Member(agent=Agent(role=leader, goal=leader), is_manager=True)
|
149
|
-
members.append(member)
|
150
|
-
|
151
|
-
members.sort(key=lambda x: x.is_manager == False)
|
152
|
-
network = AgentNetwork(members=members, formation=_formation, network_tasks=network_tasks)
|
153
|
-
|
154
|
-
return network
|
155
|
-
|
156
|
-
|
157
|
-
except Exception as e:
|
158
|
-
Logger(verbose=True).log(level="error", message=f"Failed to create a agent network - return None. You can try with solo agent. Error: {str(e)}", color="red")
|
159
|
-
return None
|
File without changes
|
File without changes
|
File without changes
|