versionhq 1.1.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +33 -0
- versionhq/_utils/__init__.py +0 -0
- versionhq/_utils/cache_handler.py +13 -0
- versionhq/_utils/i18n.py +48 -0
- versionhq/_utils/logger.py +57 -0
- versionhq/_utils/process_config.py +28 -0
- versionhq/_utils/rpm_controller.py +73 -0
- versionhq/_utils/usage_metrics.py +31 -0
- versionhq/agent/__init__.py +0 -0
- versionhq/agent/model.py +472 -0
- versionhq/agent/parser.py +148 -0
- versionhq/cli/__init__.py +0 -0
- versionhq/clients/__init__.py +0 -0
- versionhq/clients/customer/__init__.py +0 -0
- versionhq/clients/customer/model.py +57 -0
- versionhq/clients/product/__init__.py +0 -0
- versionhq/clients/product/model.py +74 -0
- versionhq/clients/workflow/__init__.py +0 -0
- versionhq/clients/workflow/model.py +174 -0
- versionhq/llm/__init__.py +0 -0
- versionhq/llm/llm_vars.py +173 -0
- versionhq/llm/model.py +245 -0
- versionhq/task/__init__.py +9 -0
- versionhq/task/formatter.py +22 -0
- versionhq/task/model.py +430 -0
- versionhq/team/__init__.py +0 -0
- versionhq/team/model.py +585 -0
- versionhq/team/team_planner.py +55 -0
- versionhq/tool/__init__.py +0 -0
- versionhq/tool/composio.py +102 -0
- versionhq/tool/decorator.py +40 -0
- versionhq/tool/model.py +220 -0
- versionhq/tool/tool_handler.py +47 -0
- versionhq-1.1.4.4.dist-info/LICENSE +21 -0
- versionhq-1.1.4.4.dist-info/METADATA +353 -0
- versionhq-1.1.4.4.dist-info/RECORD +38 -0
- versionhq-1.1.4.4.dist-info/WHEEL +5 -0
- versionhq-1.1.4.4.dist-info/top_level.txt +1 -0
versionhq/team/model.py
ADDED
@@ -0,0 +1,585 @@
|
|
1
|
+
import uuid
|
2
|
+
import warnings
|
3
|
+
import json
|
4
|
+
from abc import ABC
|
5
|
+
from enum import Enum
|
6
|
+
from dotenv import load_dotenv
|
7
|
+
from concurrent.futures import Future
|
8
|
+
from hashlib import md5
|
9
|
+
from typing import Any, Dict, List, TYPE_CHECKING, Callable, Optional, Tuple, Union
|
10
|
+
from pydantic import (
|
11
|
+
UUID4,
|
12
|
+
InstanceOf,
|
13
|
+
Json,
|
14
|
+
BaseModel,
|
15
|
+
Field,
|
16
|
+
PrivateAttr,
|
17
|
+
field_validator,
|
18
|
+
model_validator,
|
19
|
+
)
|
20
|
+
from pydantic_core import PydanticCustomError
|
21
|
+
|
22
|
+
from versionhq.agent.model import Agent
|
23
|
+
from versionhq.task.model import Task, TaskOutput, ConditionalTask, TaskOutputFormat
|
24
|
+
from versionhq.task.formatter import create_raw_outputs
|
25
|
+
from versionhq.team.team_planner import TeamPlanner
|
26
|
+
from versionhq._utils.logger import Logger
|
27
|
+
from versionhq._utils.usage_metrics import UsageMetrics
|
28
|
+
|
29
|
+
|
30
|
+
from pydantic._internal._generate_schema import GenerateSchema
|
31
|
+
from pydantic_core import core_schema
|
32
|
+
|
33
|
+
initial_match_type = GenerateSchema.match_type
|
34
|
+
|
35
|
+
|
36
|
+
def match_type(self, obj):
|
37
|
+
if getattr(obj, "__name__", None) == "datetime":
|
38
|
+
return core_schema.datetime_schema()
|
39
|
+
return initial_match_type(self, obj)
|
40
|
+
|
41
|
+
|
42
|
+
GenerateSchema.match_type = match_type
|
43
|
+
|
44
|
+
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
45
|
+
load_dotenv(override=True)
|
46
|
+
|
47
|
+
# agentops = None
|
48
|
+
# if os.environ.get("AGENTOPS_API_KEY"):
|
49
|
+
# try:
|
50
|
+
# import agentops # type: ignore
|
51
|
+
# except ImportError:
|
52
|
+
# pass
|
53
|
+
|
54
|
+
|
55
|
+
class TaskHandlingProcess(str, Enum):
|
56
|
+
"""
|
57
|
+
Class representing the different processes that can be used to tackle multiple tasks.
|
58
|
+
"""
|
59
|
+
|
60
|
+
sequential = "sequential"
|
61
|
+
hierarchical = "hierarchical"
|
62
|
+
consensual = "consensual"
|
63
|
+
|
64
|
+
|
65
|
+
class TeamOutput(BaseModel):
|
66
|
+
"""Class that represents the result of a team."""
|
67
|
+
|
68
|
+
team_id: UUID4 = Field(
|
69
|
+
default_factory=uuid.uuid4,
|
70
|
+
frozen=True,
|
71
|
+
description="store the team ID that generate the TeamOutput",
|
72
|
+
)
|
73
|
+
raw: str = Field(default="", description="raw output")
|
74
|
+
pydantic: Optional[BaseModel] = Field(default=None, description="pydantic output")
|
75
|
+
json_dict: Optional[Dict[str, Any]] = Field(
|
76
|
+
default=None, description="JSON dict output"
|
77
|
+
)
|
78
|
+
task_output_list: list[TaskOutput] = Field(
|
79
|
+
default=list,
|
80
|
+
description="store output of all the tasks that the team has executed",
|
81
|
+
)
|
82
|
+
token_usage: UsageMetrics = Field(
|
83
|
+
default=dict, description="processed token summary"
|
84
|
+
)
|
85
|
+
|
86
|
+
def __str__(self):
|
87
|
+
return (
|
88
|
+
str(self.pydantic)
|
89
|
+
if self.pydantic
|
90
|
+
else str(self.json_dict) if self.json_dict else self.raw
|
91
|
+
)
|
92
|
+
|
93
|
+
def __getitem__(self, key):
|
94
|
+
if self.pydantic and hasattr(self.pydantic, key):
|
95
|
+
return getattr(self.pydantic, key)
|
96
|
+
elif self.json_dict and key in self.json_dict:
|
97
|
+
return self.json_dict[key]
|
98
|
+
else:
|
99
|
+
raise KeyError(f"Key '{key}' not found in the team output.")
|
100
|
+
|
101
|
+
@property
|
102
|
+
def json(self) -> Optional[str]:
|
103
|
+
if self.tasks_output[-1].output_format != TaskOutputFormat.JSON:
|
104
|
+
raise ValueError(
|
105
|
+
"No JSON output found in the final task. Please make sure to set the output_json property in the final task in your team."
|
106
|
+
)
|
107
|
+
return json.dumps(self.json_dict)
|
108
|
+
|
109
|
+
def to_dict(self) -> Dict[str, Any]:
|
110
|
+
"""
|
111
|
+
Convert json_output and pydantic_output to a dictionary.
|
112
|
+
"""
|
113
|
+
output_dict = {}
|
114
|
+
if self.json_dict:
|
115
|
+
output_dict.update(self.json_dict)
|
116
|
+
elif self.pydantic:
|
117
|
+
output_dict.update(self.pydantic.model_dump())
|
118
|
+
else:
|
119
|
+
output_dict.update({"raw", self.raw})
|
120
|
+
return output_dict
|
121
|
+
|
122
|
+
def return_all_task_outputs(self) -> List[Dict[str, Any]]:
|
123
|
+
res = []
|
124
|
+
for output in self.task_output_list:
|
125
|
+
if output is not None:
|
126
|
+
res.append(output.to_dict())
|
127
|
+
|
128
|
+
return res
|
129
|
+
|
130
|
+
|
131
|
+
class TeamMember(ABC, BaseModel):
|
132
|
+
agent: Agent | None = Field(
|
133
|
+
default=None, description="store the agent to be a member"
|
134
|
+
)
|
135
|
+
is_manager: bool = Field(default=False)
|
136
|
+
task: Task | None = Field(default=None)
|
137
|
+
|
138
|
+
|
139
|
+
class Team(BaseModel):
|
140
|
+
"""
|
141
|
+
A collaborative team of agents that handles complex, multiple tasks.
|
142
|
+
We define strategies for task executions and overall workflow.
|
143
|
+
"""
|
144
|
+
|
145
|
+
__hash__ = object.__hash__
|
146
|
+
_execution_span: Any = PrivateAttr()
|
147
|
+
_logger: Logger = PrivateAttr()
|
148
|
+
# _inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
149
|
+
|
150
|
+
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
151
|
+
name: Optional[str] = Field(default=None)
|
152
|
+
members: List[TeamMember] = Field(
|
153
|
+
default_factory=list,
|
154
|
+
description="store agents' uuids and bool if it is manager",
|
155
|
+
)
|
156
|
+
|
157
|
+
# work as a team
|
158
|
+
team_tasks: Optional[List[Task]] = Field(
|
159
|
+
default_factory=list, description="optional tasks for the team"
|
160
|
+
)
|
161
|
+
planning_llm: Optional[Any] = Field(
|
162
|
+
default=None,
|
163
|
+
description="llm to handle the planning of the team tasks (if any)",
|
164
|
+
)
|
165
|
+
function_calling_llm: Optional[Any] = Field(
|
166
|
+
default=None,
|
167
|
+
description="llm to execute func after all agent execution (if any)",
|
168
|
+
)
|
169
|
+
prompt_file: str = Field(
|
170
|
+
default="", description="path to the prompt json file to be used by the team."
|
171
|
+
)
|
172
|
+
process: TaskHandlingProcess = Field(default=TaskHandlingProcess.sequential)
|
173
|
+
|
174
|
+
# callbacks
|
175
|
+
before_kickoff_callbacks: List[
|
176
|
+
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
177
|
+
] = Field(
|
178
|
+
default_factory=list,
|
179
|
+
description="list of callback functions to be executed before the team kickoff. i.e., adjust inputs",
|
180
|
+
)
|
181
|
+
after_kickoff_callbacks: List[Callable[[TeamOutput], TeamOutput]] = Field(
|
182
|
+
default_factory=list,
|
183
|
+
description="list of callback functions to be executed after the team kickoff. i.e., store the result in repo",
|
184
|
+
)
|
185
|
+
task_callback: Optional[Any] = Field(
|
186
|
+
default=None,
|
187
|
+
description="callback to be executed after each task for all agents execution",
|
188
|
+
)
|
189
|
+
step_callback: Optional[Any] = Field(
|
190
|
+
default=None,
|
191
|
+
description="callback to be executed after each step for all agents execution",
|
192
|
+
)
|
193
|
+
|
194
|
+
verbose: bool = Field(default=True)
|
195
|
+
cache: bool = Field(default=True)
|
196
|
+
memory: bool = Field(
|
197
|
+
default=False,
|
198
|
+
description="whether the team should use memory to store memories of its execution",
|
199
|
+
)
|
200
|
+
execution_logs: List[Dict[str, Any]] = Field(
|
201
|
+
default=[], description="list of execution logs for tasks"
|
202
|
+
)
|
203
|
+
usage_metrics: Optional[UsageMetrics] = Field(
|
204
|
+
default=None, description="usage metrics for all the llm executions"
|
205
|
+
)
|
206
|
+
|
207
|
+
def __name__(self) -> str:
|
208
|
+
return self.name if self.name is not None else self.id
|
209
|
+
|
210
|
+
@property
|
211
|
+
def key(self) -> str:
|
212
|
+
source = [member.agent.key for member in self.members] + [
|
213
|
+
task.key for task in self.tasks
|
214
|
+
]
|
215
|
+
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
216
|
+
|
217
|
+
@property
|
218
|
+
def manager_agent(self) -> Agent:
|
219
|
+
manager_agent = [
|
220
|
+
member.agent for member in self.members if member.is_manager == True
|
221
|
+
]
|
222
|
+
return manager_agent[0] if len(manager_agent) > 0 else None
|
223
|
+
|
224
|
+
@property
|
225
|
+
def manager_task(self) -> Task:
|
226
|
+
"""
|
227
|
+
Aside from the team task, return the task that the `manager_agent` needs to handle.
|
228
|
+
The task is set as second priority following to the team tasks.
|
229
|
+
"""
|
230
|
+
task = [member.task for member in self.members if member.is_manager == True]
|
231
|
+
return task[0] if len(task) > 0 else None
|
232
|
+
|
233
|
+
@property
|
234
|
+
def tasks(self):
|
235
|
+
"""
|
236
|
+
Return all the tasks that the team needs to handle in order of priority:
|
237
|
+
1. team tasks,
|
238
|
+
2. manager_task,
|
239
|
+
3. members' tasks
|
240
|
+
"""
|
241
|
+
sorted_member_tasks = [
|
242
|
+
member.task for member in self.members if member.is_manager == True
|
243
|
+
] + [member.task for member in self.members if member.is_manager == False]
|
244
|
+
return (
|
245
|
+
self.team_tasks + sorted_member_tasks
|
246
|
+
if len(self.team_tasks) > 0
|
247
|
+
else sorted_member_tasks
|
248
|
+
)
|
249
|
+
|
250
|
+
# validators
|
251
|
+
@field_validator("id", mode="before")
|
252
|
+
@classmethod
|
253
|
+
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
254
|
+
"""Prevent manual setting of the 'id' field by users."""
|
255
|
+
if v:
|
256
|
+
raise PydanticCustomError(
|
257
|
+
"may_not_set_field", "The 'id' field cannot be set by the user.", {}
|
258
|
+
)
|
259
|
+
|
260
|
+
# @field_validator("config", mode="before")
|
261
|
+
# @classmethod
|
262
|
+
# def check_config_type(cls, v: Union[Json, Dict[str, Any]]) -> Union[Json, Dict[str, Any]]:
|
263
|
+
# return json.loads(v) if isinstance(v, Json) else v
|
264
|
+
|
265
|
+
@model_validator(mode="after")
|
266
|
+
def check_manager_llm(self):
|
267
|
+
"""
|
268
|
+
Validates that the language model is set when using hierarchical process.
|
269
|
+
"""
|
270
|
+
|
271
|
+
if self.process == TaskHandlingProcess.hierarchical:
|
272
|
+
if self.manager_agent is None:
|
273
|
+
raise PydanticCustomError(
|
274
|
+
"missing_manager_llm_or_manager_agent",
|
275
|
+
"Attribute `manager_llm` or `manager_agent` is required when using hierarchical process.",
|
276
|
+
{},
|
277
|
+
)
|
278
|
+
|
279
|
+
if (self.manager_agent is not None) and (
|
280
|
+
self.members.count(self.manager_agent) > 0
|
281
|
+
):
|
282
|
+
raise PydanticCustomError(
|
283
|
+
"manager_agent_in_agents",
|
284
|
+
"Manager agent should not be included in agents list.",
|
285
|
+
{},
|
286
|
+
)
|
287
|
+
return self
|
288
|
+
|
289
|
+
@model_validator(mode="after")
|
290
|
+
def validate_tasks(self):
|
291
|
+
"""
|
292
|
+
Every team member should have a task to handle.
|
293
|
+
"""
|
294
|
+
|
295
|
+
if self.process == TaskHandlingProcess.sequential:
|
296
|
+
for member in self.members:
|
297
|
+
if member.task is None:
|
298
|
+
raise PydanticCustomError(
|
299
|
+
"missing_agent_in_task",
|
300
|
+
f"Sequential process error: Agent is missing in the task with the following description: {member.task.description}",
|
301
|
+
{},
|
302
|
+
)
|
303
|
+
return self
|
304
|
+
|
305
|
+
@model_validator(mode="after")
|
306
|
+
def validate_end_with_at_most_one_async_task(self):
|
307
|
+
"""
|
308
|
+
Validates that the team completes max. one asynchronous task by counting tasks traversed backward
|
309
|
+
"""
|
310
|
+
|
311
|
+
async_task_count = 0
|
312
|
+
for task in reversed(self.tasks):
|
313
|
+
if task.async_execution:
|
314
|
+
async_task_count += 1
|
315
|
+
else:
|
316
|
+
break # stop traversing when a non-async task is found
|
317
|
+
|
318
|
+
if async_task_count > 1:
|
319
|
+
raise PydanticCustomError(
|
320
|
+
"async_task_count",
|
321
|
+
"The team must end with max. one asynchronous task.",
|
322
|
+
{},
|
323
|
+
)
|
324
|
+
return self
|
325
|
+
|
326
|
+
def _get_responsible_agent(self, task: Task) -> Agent:
|
327
|
+
res = [member.agent for member in self.members if member.task.id == task.id]
|
328
|
+
return None if len(res) == 0 else res[0]
|
329
|
+
|
330
|
+
# setup team planner
|
331
|
+
def _handle_team_planning(self):
|
332
|
+
team_planner = TeamPlanner(tasks=self.tasks, planner_llm=self.planning_llm)
|
333
|
+
result = team_planner._handle_task_planning()
|
334
|
+
|
335
|
+
if result is not None:
|
336
|
+
for task in self.tasks:
|
337
|
+
task_id = task.id
|
338
|
+
task.description += (
|
339
|
+
result[task_id] if hasattr(result, str(task_id)) else result
|
340
|
+
)
|
341
|
+
|
342
|
+
# task execution
|
343
|
+
def _process_async_tasks(
|
344
|
+
self,
|
345
|
+
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
346
|
+
was_replayed: bool = False,
|
347
|
+
) -> List[TaskOutput]:
|
348
|
+
task_outputs: List[TaskOutput] = []
|
349
|
+
for future_task, future, task_index in futures:
|
350
|
+
task_output = future.result()
|
351
|
+
task_outputs.append(task_output)
|
352
|
+
self._process_task_result(future_task, task_output)
|
353
|
+
self._store_execution_log(
|
354
|
+
future_task, task_output, task_index, was_replayed
|
355
|
+
)
|
356
|
+
return task_outputs
|
357
|
+
|
358
|
+
def _handle_conditional_task(
|
359
|
+
self,
|
360
|
+
task: ConditionalTask,
|
361
|
+
task_outputs: List[TaskOutput],
|
362
|
+
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
363
|
+
task_index: int,
|
364
|
+
was_replayed: bool,
|
365
|
+
) -> Optional[TaskOutput]:
|
366
|
+
if futures:
|
367
|
+
task_outputs = self._process_async_tasks(futures, was_replayed)
|
368
|
+
futures.clear()
|
369
|
+
|
370
|
+
previous_output = task_outputs[task_index - 1] if task_outputs else None
|
371
|
+
if previous_output is not None and not task.should_execute(previous_output):
|
372
|
+
self._logger.log(
|
373
|
+
"debug",
|
374
|
+
f"Skipping conditional task: {task.description}",
|
375
|
+
color="yellow",
|
376
|
+
)
|
377
|
+
skipped_task_output = task.get_skipped_task_output()
|
378
|
+
|
379
|
+
if not was_replayed:
|
380
|
+
self._store_execution_log(task, skipped_task_output, task_index)
|
381
|
+
return skipped_task_output
|
382
|
+
return None
|
383
|
+
|
384
|
+
def _create_team_output(self, task_outputs: List[TaskOutput]) -> TeamOutput:
|
385
|
+
if len(task_outputs) != 1:
|
386
|
+
raise ValueError(
|
387
|
+
"Something went wrong. Kickoff should return only one task output."
|
388
|
+
)
|
389
|
+
|
390
|
+
final_task_output = task_outputs[0]
|
391
|
+
# final_string_output = final_task_output.raw
|
392
|
+
# self._finish_execution(final_string_output)
|
393
|
+
token_usage = self._calculate_usage_metrics()
|
394
|
+
|
395
|
+
return TeamOutput(
|
396
|
+
team_id=self.id,
|
397
|
+
raw=final_task_output.raw,
|
398
|
+
pydantic=final_task_output.pydantic,
|
399
|
+
json_dict=final_task_output.json_dict,
|
400
|
+
task_output_list=[task.output for task in self.tasks if task.output],
|
401
|
+
token_usage=token_usage,
|
402
|
+
)
|
403
|
+
|
404
|
+
def _calculate_usage_metrics(self) -> UsageMetrics:
|
405
|
+
"""
|
406
|
+
Calculate and return the usage metrics that consumed by the team.
|
407
|
+
"""
|
408
|
+
total_usage_metrics = UsageMetrics()
|
409
|
+
|
410
|
+
for member in self.members:
|
411
|
+
agent = member.agent
|
412
|
+
if hasattr(agent, "_token_process"):
|
413
|
+
token_sum = agent._token_process.get_summary()
|
414
|
+
total_usage_metrics.add_usage_metrics(token_sum)
|
415
|
+
|
416
|
+
if self.manager_agent and hasattr(self.manager_agent, "_token_process"):
|
417
|
+
token_sum = self.manager_agent._token_process.get_summary()
|
418
|
+
total_usage_metrics.add_usage_metrics(token_sum)
|
419
|
+
|
420
|
+
self.usage_metrics = total_usage_metrics
|
421
|
+
return total_usage_metrics
|
422
|
+
|
423
|
+
def _execute_tasks(
|
424
|
+
self,
|
425
|
+
tasks: List[Task],
|
426
|
+
start_index: Optional[int] = 0,
|
427
|
+
was_replayed: bool = False,
|
428
|
+
) -> TeamOutput:
|
429
|
+
"""
|
430
|
+
Executes tasks sequentially and returns the final output in TeamOutput class.
|
431
|
+
When we have a manager agent, we will start from executing manager agent's tasks.
|
432
|
+
Priority
|
433
|
+
1. Team tasks > 2. Manager task > 3. Member tasks (in order of index)
|
434
|
+
"""
|
435
|
+
|
436
|
+
task_outputs: List[TaskOutput] = []
|
437
|
+
futures: List[Tuple[Task, Future[TaskOutput], int]] = []
|
438
|
+
last_sync_output: Optional[TaskOutput] = None
|
439
|
+
|
440
|
+
for task_index, task in enumerate(tasks):
|
441
|
+
if start_index is not None and task_index < start_index:
|
442
|
+
if task.output:
|
443
|
+
if task.async_execution:
|
444
|
+
task_outputs.append(task.output)
|
445
|
+
else:
|
446
|
+
task_outputs = [task.output]
|
447
|
+
last_sync_output = task.output
|
448
|
+
continue
|
449
|
+
|
450
|
+
responsible_agent = self._get_responsible_agent(task)
|
451
|
+
if responsible_agent is None:
|
452
|
+
responsible_agent = self.members[
|
453
|
+
0
|
454
|
+
].agent #! REFINEME - select a suitable agent for the task
|
455
|
+
|
456
|
+
# self._prepare_agent_tools(task)
|
457
|
+
# self._log_task_start(task, responsible_agent)
|
458
|
+
|
459
|
+
if isinstance(task, ConditionalTask):
|
460
|
+
skipped_task_output = self._handle_conditional_task(
|
461
|
+
task, task_outputs, futures, task_index, was_replayed
|
462
|
+
)
|
463
|
+
if skipped_task_output:
|
464
|
+
continue
|
465
|
+
|
466
|
+
if task.async_execution:
|
467
|
+
context = create_raw_outputs(
|
468
|
+
tasks=[
|
469
|
+
task,
|
470
|
+
],
|
471
|
+
task_outputs=(
|
472
|
+
[
|
473
|
+
last_sync_output,
|
474
|
+
]
|
475
|
+
if last_sync_output
|
476
|
+
else []
|
477
|
+
),
|
478
|
+
)
|
479
|
+
future = task.execute_async(
|
480
|
+
agent=responsible_agent,
|
481
|
+
context=context,
|
482
|
+
# tools=responsible_agent.tools,
|
483
|
+
)
|
484
|
+
futures.append((task, future, task_index))
|
485
|
+
else:
|
486
|
+
if futures:
|
487
|
+
task_outputs = self._process_async_tasks(futures, was_replayed)
|
488
|
+
futures.clear()
|
489
|
+
|
490
|
+
context = create_raw_outputs(
|
491
|
+
tasks=[
|
492
|
+
task,
|
493
|
+
],
|
494
|
+
task_outputs=(
|
495
|
+
[
|
496
|
+
last_sync_output,
|
497
|
+
]
|
498
|
+
if last_sync_output
|
499
|
+
else []
|
500
|
+
),
|
501
|
+
)
|
502
|
+
task_output = task.execute_sync(
|
503
|
+
agent=responsible_agent,
|
504
|
+
context=context,
|
505
|
+
# tools=responsible_agent.tools,
|
506
|
+
)
|
507
|
+
task_outputs = [
|
508
|
+
task_output,
|
509
|
+
]
|
510
|
+
# self._process_task_result(task, task_output)
|
511
|
+
# self._store_execution_log(task, task_output, task_index, was_replayed)
|
512
|
+
|
513
|
+
# if futures:
|
514
|
+
# task_outputs = self._process_async_tasks(futures, was_replayed)
|
515
|
+
|
516
|
+
return self._create_team_output(task_outputs)
|
517
|
+
|
518
|
+
def kickoff(
|
519
|
+
self,
|
520
|
+
kwargs_before: Optional[Dict[str, str]] = None,
|
521
|
+
kwargs_after: Optional[Dict[str, Any]] = None,
|
522
|
+
) -> TeamOutput:
|
523
|
+
"""
|
524
|
+
Kickoff the team:
|
525
|
+
0. Plan the team action if we have `team_tasks` using `planning_llm`.
|
526
|
+
1. Address `before_kickoff_callbacks` if any.
|
527
|
+
2. Handle team members' tasks in accordance with the `process`.
|
528
|
+
3. Address `after_kickoff_callbacks` if any.
|
529
|
+
"""
|
530
|
+
|
531
|
+
metrics: List[UsageMetrics] = []
|
532
|
+
|
533
|
+
if len(self.team_tasks) > 0 or self.planning_llm is not None:
|
534
|
+
self._handle_team_planning()
|
535
|
+
|
536
|
+
if kwargs_before is not None:
|
537
|
+
for before_callback in self.before_kickoff_callbacks:
|
538
|
+
before_callback(**kwargs_before)
|
539
|
+
|
540
|
+
# self._execution_span = self._telemetry.team_execution_span(self, inputs)
|
541
|
+
# self._task_output_handler.reset()
|
542
|
+
# self._logging_color = "bold_purple"
|
543
|
+
|
544
|
+
# if inputs is not None:
|
545
|
+
# self._inputs = inputs
|
546
|
+
# self._interpolate_inputs(inputs)
|
547
|
+
|
548
|
+
for task in self.tasks:
|
549
|
+
if not task.callback:
|
550
|
+
task.callback = self.task_callback
|
551
|
+
|
552
|
+
# i18n = I18N(prompt_file=self.prompt_file)
|
553
|
+
|
554
|
+
for member in self.members:
|
555
|
+
agent = member.agent
|
556
|
+
# agent.i18n = i18n
|
557
|
+
agent.team = self
|
558
|
+
|
559
|
+
# add the team's common callbacks to each agent.
|
560
|
+
if not agent.function_calling_llm:
|
561
|
+
agent.function_calling_llm = self.function_calling_llm
|
562
|
+
|
563
|
+
# if agent.allow_code_execution:
|
564
|
+
# agent.tools += agent.get_code_execution_tools()
|
565
|
+
|
566
|
+
if not agent.step_callback:
|
567
|
+
agent.step_callback = self.step_callback
|
568
|
+
|
569
|
+
if self.process is None:
|
570
|
+
self.process = TaskHandlingProcess.sequential
|
571
|
+
|
572
|
+
result = self._execute_tasks(self.tasks)
|
573
|
+
|
574
|
+
for after_callback in self.after_kickoff_callbacks:
|
575
|
+
result = after_callback(result, **kwargs_after)
|
576
|
+
|
577
|
+
metrics += [
|
578
|
+
member.agent._token_process.get_summary() for member in self.members
|
579
|
+
]
|
580
|
+
|
581
|
+
self.usage_metrics = UsageMetrics()
|
582
|
+
for metric in metrics:
|
583
|
+
self.usage_metrics.add_usage_metrics(metric)
|
584
|
+
|
585
|
+
return result
|
@@ -0,0 +1,55 @@
|
|
1
|
+
import os
|
2
|
+
from dotenv import load_dotenv
|
3
|
+
from typing import Any, List, Optional
|
4
|
+
from pydantic import BaseModel, Field
|
5
|
+
|
6
|
+
from versionhq.agent.model import Agent
|
7
|
+
from versionhq.task.model import Task, ResponseField
|
8
|
+
|
9
|
+
load_dotenv(override=True)
|
10
|
+
|
11
|
+
|
12
|
+
class TeamPlanner:
|
13
|
+
"""
|
14
|
+
(Optional) Plan how the team should handle multiple tasks using LLM.
|
15
|
+
"""
|
16
|
+
|
17
|
+
def __init__(self, tasks: List[Task], planner_llm: Optional[Any] = None):
|
18
|
+
self.tasks = tasks
|
19
|
+
self.planner_llm = (
|
20
|
+
planner_llm if planner_llm != None else os.environ.get("LITELLM_MODEL_NAME")
|
21
|
+
)
|
22
|
+
|
23
|
+
def _handle_task_planning(self) -> BaseModel:
|
24
|
+
"""
|
25
|
+
Handles the team planning by creating detailed step-by-step plans for each task.
|
26
|
+
"""
|
27
|
+
|
28
|
+
planning_agent = Agent(
|
29
|
+
role="Task Execution Planner",
|
30
|
+
goal="Your goal is to create an extremely detailed, step-by-step plan based on the tasks and tools available to each agent so that they can perform the tasks in an exemplary manner",
|
31
|
+
backstory="You have a strong ability to design efficient organizational structures and task processes, minimizing unnecessary steps.",
|
32
|
+
llm=self.planner_llm,
|
33
|
+
)
|
34
|
+
|
35
|
+
task_summary_list = [task.summary for task in self.tasks]
|
36
|
+
task_to_handle = Task(
|
37
|
+
description=f"""
|
38
|
+
Based on the following task summaries, create the most descriptive plan that the team can execute most efficiently. Take all the task summaries - task's description and tools available - into consideration. Your answer only contains a dictionary.
|
39
|
+
|
40
|
+
Task summaries: {" ".join(task_summary_list)}
|
41
|
+
""",
|
42
|
+
expected_output_json=False,
|
43
|
+
expected_output_pydantic=True,
|
44
|
+
output_field_list=[
|
45
|
+
ResponseField(title=f"{task.id}", type="str", required=True)
|
46
|
+
for task in self.tasks
|
47
|
+
],
|
48
|
+
)
|
49
|
+
task_output = task_to_handle.execute_sync(agent=planning_agent)
|
50
|
+
|
51
|
+
if isinstance(task_output.pydantic, BaseModel):
|
52
|
+
return task_output.pydantic
|
53
|
+
|
54
|
+
else:
|
55
|
+
return None
|
File without changes
|