versionhq 1.1.6.4__py3-none-any.whl → 1.1.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/_utils/usage_metrics.py +4 -12
- versionhq/agent/TEMPLATES/Backstory.py +3 -0
- versionhq/agent/TEMPLATES/__init__.py +0 -0
- versionhq/agent/model.py +96 -159
- versionhq/task/model.py +195 -157
- versionhq/team/model.py +78 -153
- versionhq/team/team_planner.py +2 -2
- versionhq/tool/model.py +6 -16
- {versionhq-1.1.6.4.dist-info → versionhq-1.1.7.1.dist-info}/METADATA +63 -44
- {versionhq-1.1.6.4.dist-info → versionhq-1.1.7.1.dist-info}/RECORD +14 -12
- {versionhq-1.1.6.4.dist-info → versionhq-1.1.7.1.dist-info}/LICENSE +0 -0
- {versionhq-1.1.6.4.dist-info → versionhq-1.1.7.1.dist-info}/WHEEL +0 -0
- {versionhq-1.1.6.4.dist-info → versionhq-1.1.7.1.dist-info}/top_level.txt +0 -0
versionhq/team/model.py
CHANGED
@@ -56,39 +56,29 @@ class TaskHandlingProcess(str, Enum):
|
|
56
56
|
"""
|
57
57
|
Class representing the different processes that can be used to tackle multiple tasks.
|
58
58
|
"""
|
59
|
-
|
60
59
|
sequential = "sequential"
|
61
60
|
hierarchical = "hierarchical"
|
62
61
|
consensual = "consensual"
|
63
62
|
|
64
63
|
|
65
64
|
class TeamOutput(BaseModel):
|
66
|
-
"""
|
65
|
+
"""
|
66
|
+
Store outputs of the tasks handled by the team.
|
67
|
+
`json_dict` and `raw` store overall output of tasks that handled by the team,
|
68
|
+
while `task_output_list` stores each TaskOutput instance to the tasks handled by the team members.
|
69
|
+
Note that `raw` and `json_dict` will be prioritized as TeamOutput to refer over `task_output_list`.
|
70
|
+
"""
|
67
71
|
|
68
|
-
team_id: UUID4 = Field(
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
)
|
73
|
-
|
74
|
-
pydantic: Optional[BaseModel] = Field(default=None, description="pydantic output")
|
75
|
-
json_dict: Optional[Dict[str, Any]] = Field(
|
76
|
-
default=None, description="JSON dict output"
|
77
|
-
)
|
78
|
-
task_output_list: list[TaskOutput] = Field(
|
79
|
-
default=list,
|
80
|
-
description="store output of all the tasks that the team has executed",
|
81
|
-
)
|
82
|
-
token_usage: UsageMetrics = Field(
|
83
|
-
default=dict, description="processed token summary"
|
84
|
-
)
|
72
|
+
team_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store the team ID that generate the TeamOutput")
|
73
|
+
raw: str = Field(default="", description="raw output of the team lead task handled by the team leader")
|
74
|
+
pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
|
75
|
+
json_dict: Union[Dict[str, Any]] = Field(default=None, description="`raw` converted to dictionary")
|
76
|
+
task_output_list: list[TaskOutput] = Field(default=list, description="store output of all the tasks that the team has executed")
|
77
|
+
token_usage: UsageMetrics = Field(default=dict, description="processed token summary")
|
85
78
|
|
86
79
|
def __str__(self):
|
87
|
-
return (
|
88
|
-
|
89
|
-
if self.pydantic
|
90
|
-
else str(self.json_dict) if self.json_dict else self.raw
|
91
|
-
)
|
80
|
+
return (str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw)
|
81
|
+
|
92
82
|
|
93
83
|
def __getitem__(self, key):
|
94
84
|
if self.pydantic and hasattr(self.pydantic, key):
|
@@ -98,6 +88,7 @@ class TeamOutput(BaseModel):
|
|
98
88
|
else:
|
99
89
|
raise KeyError(f"Key '{key}' not found in the team output.")
|
100
90
|
|
91
|
+
|
101
92
|
@property
|
102
93
|
def json(self) -> Optional[str]:
|
103
94
|
if self.tasks_output[-1].output_format != TaskOutputFormat.JSON:
|
@@ -106,32 +97,30 @@ class TeamOutput(BaseModel):
|
|
106
97
|
)
|
107
98
|
return json.dumps(self.json_dict)
|
108
99
|
|
100
|
+
|
109
101
|
def to_dict(self) -> Dict[str, Any]:
|
110
102
|
"""
|
111
|
-
Convert
|
103
|
+
Convert pydantic / raw output into dict and return the dict.
|
104
|
+
When we only have `raw` output, return `{ output: raw }` to avoid an error
|
112
105
|
"""
|
106
|
+
|
113
107
|
output_dict = {}
|
114
108
|
if self.json_dict:
|
115
109
|
output_dict.update(self.json_dict)
|
116
110
|
elif self.pydantic:
|
117
111
|
output_dict.update(self.pydantic.model_dump())
|
118
112
|
else:
|
119
|
-
output_dict.
|
113
|
+
output_dict.upate({ "output": self.raw })
|
120
114
|
return output_dict
|
121
115
|
|
122
|
-
def return_all_task_outputs(self) -> List[Dict[str, Any]]:
|
123
|
-
res = []
|
124
|
-
for output in self.task_output_list:
|
125
|
-
if output is not None:
|
126
|
-
res.append(output.to_dict())
|
127
116
|
|
117
|
+
def return_all_task_outputs(self) -> List[Dict[str, Any]]:
|
118
|
+
res = [output.json_dict for output in self.task_output_list]
|
128
119
|
return res
|
129
120
|
|
130
121
|
|
131
122
|
class TeamMember(ABC, BaseModel):
|
132
|
-
agent: Agent | None = Field(
|
133
|
-
default=None, description="store the agent to be a member"
|
134
|
-
)
|
123
|
+
agent: Agent | None = Field(default=None, description="store the agent to be a member")
|
135
124
|
is_manager: bool = Field(default=False)
|
136
125
|
task: Task | None = Field(default=None)
|
137
126
|
|
@@ -149,78 +138,50 @@ class Team(BaseModel):
|
|
149
138
|
|
150
139
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
151
140
|
name: Optional[str] = Field(default=None)
|
152
|
-
members: List[TeamMember] = Field(
|
153
|
-
default_factory=list,
|
154
|
-
description="store agents' uuids and bool if it is manager",
|
155
|
-
)
|
141
|
+
members: List[TeamMember] = Field(default_factory=list, description="store agents' uuids and bool if it is manager")
|
156
142
|
|
157
143
|
# work as a team
|
158
|
-
team_tasks: Optional[List[Task]] = Field(
|
159
|
-
|
160
|
-
)
|
161
|
-
|
162
|
-
default=None,
|
163
|
-
description="llm to handle the planning of the team tasks (if any)",
|
164
|
-
)
|
165
|
-
function_calling_llm: Optional[Any] = Field(
|
166
|
-
default=None,
|
167
|
-
description="llm to execute func after all agent execution (if any)",
|
168
|
-
)
|
169
|
-
prompt_file: str = Field(
|
170
|
-
default="", description="path to the prompt json file to be used by the team."
|
171
|
-
)
|
144
|
+
team_tasks: Optional[List[Task]] = Field(default_factory=list, description="optional tasks for the team")
|
145
|
+
planning_llm: Optional[Any] = Field(default=None, description="llm to handle the planning of the team tasks (if any)")
|
146
|
+
function_calling_llm: Optional[Any] = Field(default=None, description="llm to execute func after all agent execution (if any)")
|
147
|
+
prompt_file: str = Field(default="", description="path to the prompt json file to be used by the team.")
|
172
148
|
process: TaskHandlingProcess = Field(default=TaskHandlingProcess.sequential)
|
173
149
|
|
174
150
|
# callbacks
|
175
|
-
before_kickoff_callbacks: List[
|
176
|
-
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
177
|
-
] = Field(
|
151
|
+
before_kickoff_callbacks: List[Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]] = Field(
|
178
152
|
default_factory=list,
|
179
|
-
description="list of callback functions to be executed before the team kickoff. i.e., adjust inputs"
|
153
|
+
description="list of callback functions to be executed before the team kickoff. i.e., adjust inputs"
|
180
154
|
)
|
181
155
|
after_kickoff_callbacks: List[Callable[[TeamOutput], TeamOutput]] = Field(
|
182
156
|
default_factory=list,
|
183
|
-
description="list of callback functions to be executed after the team kickoff. i.e., store the result in repo"
|
184
|
-
)
|
185
|
-
task_callback: Optional[Any] = Field(
|
186
|
-
default=None,
|
187
|
-
description="callback to be executed after each task for all agents execution",
|
188
|
-
)
|
189
|
-
step_callback: Optional[Any] = Field(
|
190
|
-
default=None,
|
191
|
-
description="callback to be executed after each step for all agents execution",
|
157
|
+
description="list of callback functions to be executed after the team kickoff. i.e., store the result in repo"
|
192
158
|
)
|
159
|
+
task_callback: Optional[Any] = Field(default=None, description="callback to be executed after each task for all agents execution")
|
160
|
+
step_callback: Optional[Any] = Field(default=None, description="callback to be executed after each step for all agents execution")
|
193
161
|
|
194
162
|
verbose: bool = Field(default=True)
|
195
163
|
cache: bool = Field(default=True)
|
196
|
-
memory: bool = Field(
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
execution_logs: List[Dict[str, Any]] = Field(
|
201
|
-
default=[], description="list of execution logs for tasks"
|
202
|
-
)
|
203
|
-
usage_metrics: Optional[UsageMetrics] = Field(
|
204
|
-
default=None, description="usage metrics for all the llm executions"
|
205
|
-
)
|
164
|
+
memory: bool = Field(default=False, description="whether the team should use memory to store memories of its execution")
|
165
|
+
execution_logs: List[Dict[str, Any]] = Field(default=[], description="list of execution logs for tasks")
|
166
|
+
usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
|
167
|
+
|
206
168
|
|
207
169
|
def __name__(self) -> str:
|
208
|
-
return self.name if self.name is not None else self.id
|
170
|
+
return self.name if self.name is not None else self.id.__str__
|
171
|
+
|
209
172
|
|
210
173
|
@property
|
211
174
|
def key(self) -> str:
|
212
|
-
source = [member.agent.
|
213
|
-
task.key for task in self.tasks
|
214
|
-
]
|
175
|
+
source = [str(member.agent.id.__str__) for member in self.members] + [str(task.id.__str__) for task in self.tasks]
|
215
176
|
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
216
177
|
|
178
|
+
|
217
179
|
@property
|
218
180
|
def manager_agent(self) -> Agent:
|
219
|
-
manager_agent = [
|
220
|
-
member.agent for member in self.members if member.is_manager == True
|
221
|
-
]
|
181
|
+
manager_agent = [member.agent for member in self.members if member.is_manager == True]
|
222
182
|
return manager_agent[0] if len(manager_agent) > 0 else None
|
223
183
|
|
184
|
+
|
224
185
|
@property
|
225
186
|
def manager_task(self) -> Task:
|
226
187
|
"""
|
@@ -230,6 +191,7 @@ class Team(BaseModel):
|
|
230
191
|
task = [member.task for member in self.members if member.is_manager == True]
|
231
192
|
return task[0] if len(task) > 0 else None
|
232
193
|
|
194
|
+
|
233
195
|
@property
|
234
196
|
def tasks(self):
|
235
197
|
"""
|
@@ -253,9 +215,7 @@ class Team(BaseModel):
|
|
253
215
|
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
254
216
|
"""Prevent manual setting of the 'id' field by users."""
|
255
217
|
if v:
|
256
|
-
raise PydanticCustomError(
|
257
|
-
"may_not_set_field", "The 'id' field cannot be set by the user.", {}
|
258
|
-
)
|
218
|
+
raise PydanticCustomError("may_not_set_field", "The 'id' field cannot be set by the user.", {})
|
259
219
|
|
260
220
|
# @field_validator("config", mode="before")
|
261
221
|
# @classmethod
|
@@ -286,12 +246,12 @@ class Team(BaseModel):
|
|
286
246
|
)
|
287
247
|
return self
|
288
248
|
|
249
|
+
|
289
250
|
@model_validator(mode="after")
|
290
251
|
def validate_tasks(self):
|
291
252
|
"""
|
292
253
|
Every team member should have a task to handle.
|
293
254
|
"""
|
294
|
-
|
295
255
|
if self.process == TaskHandlingProcess.sequential:
|
296
256
|
for member in self.members:
|
297
257
|
if member.task is None:
|
@@ -381,13 +341,16 @@ class Team(BaseModel):
|
|
381
341
|
return skipped_task_output
|
382
342
|
return None
|
383
343
|
|
384
|
-
def _create_team_output(self, task_outputs: List[TaskOutput]) -> TeamOutput:
|
385
|
-
if len(task_outputs) != 1:
|
386
|
-
raise ValueError(
|
387
|
-
"Something went wrong. Kickoff should return only one task output."
|
388
|
-
)
|
389
344
|
|
390
|
-
|
345
|
+
def _create_team_output(self, task_outputs: List[TaskOutput], lead_task_output: TaskOutput = None) -> TeamOutput:
|
346
|
+
"""
|
347
|
+
Take the output of the first task or the lead task output as the team output `raw` value.
|
348
|
+
Note that `tasks` are already sorted by the importance.
|
349
|
+
"""
|
350
|
+
if len(task_outputs) < 1:
|
351
|
+
raise ValueError("Something went wrong. Kickoff should return only one task output.")
|
352
|
+
|
353
|
+
final_task_output = lead_task_output if lead_task_output is not None else task_outputs[0]
|
391
354
|
# final_string_output = final_task_output.raw
|
392
355
|
# self._finish_execution(final_string_output)
|
393
356
|
token_usage = self._calculate_usage_metrics()
|
@@ -395,12 +358,13 @@ class Team(BaseModel):
|
|
395
358
|
return TeamOutput(
|
396
359
|
team_id=self.id,
|
397
360
|
raw=final_task_output.raw,
|
398
|
-
pydantic=final_task_output.pydantic,
|
399
361
|
json_dict=final_task_output.json_dict,
|
400
|
-
|
362
|
+
pydantic=final_task_output.pydantic,
|
363
|
+
task_output_list=task_outputs,
|
401
364
|
token_usage=token_usage,
|
402
365
|
)
|
403
366
|
|
367
|
+
|
404
368
|
def _calculate_usage_metrics(self) -> UsageMetrics:
|
405
369
|
"""
|
406
370
|
Calculate and return the usage metrics that consumed by the team.
|
@@ -420,12 +384,8 @@ class Team(BaseModel):
|
|
420
384
|
self.usage_metrics = total_usage_metrics
|
421
385
|
return total_usage_metrics
|
422
386
|
|
423
|
-
|
424
|
-
|
425
|
-
tasks: List[Task],
|
426
|
-
start_index: Optional[int] = 0,
|
427
|
-
was_replayed: bool = False,
|
428
|
-
) -> TeamOutput:
|
387
|
+
|
388
|
+
def _execute_tasks(self, tasks: List[Task], start_index: Optional[int] = 0, was_replayed: bool = False) -> TeamOutput:
|
429
389
|
"""
|
430
390
|
Executes tasks sequentially and returns the final output in TeamOutput class.
|
431
391
|
When we have a manager agent, we will start from executing manager agent's tasks.
|
@@ -434,6 +394,7 @@ class Team(BaseModel):
|
|
434
394
|
"""
|
435
395
|
|
436
396
|
task_outputs: List[TaskOutput] = []
|
397
|
+
lead_task_output: TaskOutput = None
|
437
398
|
futures: List[Tuple[Task, Future[TaskOutput], int]] = []
|
438
399
|
last_sync_output: Optional[TaskOutput] = None
|
439
400
|
|
@@ -449,77 +410,44 @@ class Team(BaseModel):
|
|
449
410
|
|
450
411
|
responsible_agent = self._get_responsible_agent(task)
|
451
412
|
if responsible_agent is None:
|
452
|
-
responsible_agent = self.members[
|
453
|
-
0
|
454
|
-
].agent #! REFINEME - select a suitable agent for the task
|
413
|
+
responsible_agent = self.manager_agent if self.manager_agent else self.members[0].agent
|
455
414
|
|
456
415
|
# self._prepare_agent_tools(task)
|
457
416
|
# self._log_task_start(task, responsible_agent)
|
458
417
|
|
459
418
|
if isinstance(task, ConditionalTask):
|
460
|
-
skipped_task_output = self._handle_conditional_task(
|
461
|
-
task, task_outputs, futures, task_index, was_replayed
|
462
|
-
)
|
419
|
+
skipped_task_output = self._handle_conditional_task(task, task_outputs, futures, task_index, was_replayed)
|
463
420
|
if skipped_task_output:
|
464
421
|
continue
|
465
422
|
|
466
423
|
if task.async_execution:
|
467
|
-
context = create_raw_outputs(
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
task_outputs=(
|
472
|
-
[
|
473
|
-
last_sync_output,
|
474
|
-
]
|
475
|
-
if last_sync_output
|
476
|
-
else []
|
477
|
-
),
|
478
|
-
)
|
479
|
-
future = task.execute_async(
|
480
|
-
agent=responsible_agent,
|
481
|
-
context=context,
|
482
|
-
# tools=responsible_agent.tools,
|
483
|
-
)
|
424
|
+
context = create_raw_outputs(tasks=[task, ],task_outputs=([last_sync_output,] if last_sync_output else []))
|
425
|
+
future = task.execute_async(agent=responsible_agent, context=context,
|
426
|
+
# tools=responsible_agent.tools
|
427
|
+
)
|
484
428
|
futures.append((task, future, task_index))
|
485
429
|
else:
|
486
430
|
if futures:
|
487
431
|
task_outputs = self._process_async_tasks(futures, was_replayed)
|
488
432
|
futures.clear()
|
489
433
|
|
490
|
-
context = create_raw_outputs(
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
if last_sync_output
|
499
|
-
else []
|
500
|
-
),
|
501
|
-
)
|
502
|
-
task_output = task.execute_sync(
|
503
|
-
agent=responsible_agent,
|
504
|
-
context=context,
|
505
|
-
# tools=responsible_agent.tools,
|
506
|
-
)
|
507
|
-
task_outputs = [
|
508
|
-
task_output,
|
509
|
-
]
|
434
|
+
context = create_raw_outputs(tasks=[task,], task_outputs=([ last_sync_output,] if last_sync_output else [] ))
|
435
|
+
task_output = task.execute_sync(agent=responsible_agent, context=context,
|
436
|
+
# tools=responsible_agent.tools
|
437
|
+
)
|
438
|
+
if responsible_agent is self.manager_agent:
|
439
|
+
lead_task_output = task_output
|
440
|
+
|
441
|
+
task_outputs.append(task_output)
|
510
442
|
# self._process_task_result(task, task_output)
|
511
443
|
# self._store_execution_log(task, task_output, task_index, was_replayed)
|
512
444
|
|
513
445
|
# if futures:
|
514
446
|
# task_outputs = self._process_async_tasks(futures, was_replayed)
|
447
|
+
return self._create_team_output(task_outputs, lead_task_output)
|
515
448
|
|
516
|
-
return self._create_team_output(task_outputs)
|
517
449
|
|
518
|
-
def kickoff(
|
519
|
-
self,
|
520
|
-
kwargs_before: Optional[Dict[str, str]] = None,
|
521
|
-
kwargs_after: Optional[Dict[str, Any]] = None,
|
522
|
-
) -> TeamOutput:
|
450
|
+
def kickoff(self, kwargs_before: Optional[Dict[str, str]] = None, kwargs_after: Optional[Dict[str, Any]] = None) -> TeamOutput:
|
523
451
|
"""
|
524
452
|
Kickoff the team:
|
525
453
|
0. Plan the team action if we have `team_tasks` using `planning_llm`.
|
@@ -553,7 +481,6 @@ class Team(BaseModel):
|
|
553
481
|
|
554
482
|
for member in self.members:
|
555
483
|
agent = member.agent
|
556
|
-
# agent.i18n = i18n
|
557
484
|
agent.team = self
|
558
485
|
|
559
486
|
# add the team's common callbacks to each agent.
|
@@ -574,9 +501,7 @@ class Team(BaseModel):
|
|
574
501
|
for after_callback in self.after_kickoff_callbacks:
|
575
502
|
result = after_callback(result, **kwargs_after)
|
576
503
|
|
577
|
-
metrics += [
|
578
|
-
member.agent._token_process.get_summary() for member in self.members
|
579
|
-
]
|
504
|
+
metrics += [member.agent._token_process.get_summary() for member in self.members]
|
580
505
|
|
581
506
|
self.usage_metrics = UsageMetrics()
|
582
507
|
for metric in metrics:
|
versionhq/team/team_planner.py
CHANGED
@@ -36,13 +36,13 @@ class TeamPlanner:
|
|
36
36
|
task_to_handle = Task(
|
37
37
|
description=f"""
|
38
38
|
Based on the following task summaries, create the most descriptive plan that the team can execute most efficiently. Take all the task summaries - task's description and tools available - into consideration. Your answer only contains a dictionary.
|
39
|
-
|
39
|
+
|
40
40
|
Task summaries: {" ".join(task_summary_list)}
|
41
41
|
""",
|
42
42
|
expected_output_json=False,
|
43
43
|
expected_output_pydantic=True,
|
44
44
|
output_field_list=[
|
45
|
-
ResponseField(title=f"{task.id}", type=
|
45
|
+
ResponseField(title=f"{task.id}", type=str, required=True)
|
46
46
|
for task in self.tasks
|
47
47
|
],
|
48
48
|
)
|
versionhq/tool/model.py
CHANGED
@@ -35,14 +35,12 @@ class Tool(ABC, BaseModel):
|
|
35
35
|
@property
|
36
36
|
def description(self):
|
37
37
|
args_schema = {
|
38
|
-
name: {
|
39
|
-
"description": field.description,
|
40
|
-
"type": Tool._get_arg_annotations(field.annotation),
|
41
|
-
}
|
38
|
+
name: { "description": field.description, "type": Tool._get_arg_annotations(field.annotation) }
|
42
39
|
for name, field in self.args_schema.model_fields.items()
|
43
40
|
}
|
44
41
|
return f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nTool Description: {self.description}"
|
45
42
|
|
43
|
+
|
46
44
|
@field_validator("args_schema", mode="before")
|
47
45
|
@classmethod
|
48
46
|
def _default_args_schema(cls, v: Type[BaseModel]) -> Type[BaseModel]:
|
@@ -188,21 +186,13 @@ class ToolCalled(BaseModel):
|
|
188
186
|
Store the tool called and any kwargs used.
|
189
187
|
"""
|
190
188
|
|
191
|
-
tool: InstanceOf[Tool] = Field(
|
192
|
-
|
193
|
-
)
|
194
|
-
arguments: Optional[Dict[str, Any]] = Field(
|
195
|
-
..., description="kwargs passed to the tool"
|
196
|
-
)
|
189
|
+
tool: InstanceOf[Tool] = Field(..., description="store the tool instance to be called.")
|
190
|
+
arguments: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
|
197
191
|
|
198
192
|
|
199
193
|
class InstructorToolCalled(BaseModel):
|
200
|
-
tool: InstanceOf[Tool] = Field(
|
201
|
-
|
202
|
-
)
|
203
|
-
arguments: Optional[Dict[str, Any]] = Field(
|
204
|
-
..., description="kwargs passed to the tool"
|
205
|
-
)
|
194
|
+
tool: InstanceOf[Tool] = Field(..., description="store the tool instance to be called.")
|
195
|
+
arguments: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
|
206
196
|
|
207
197
|
|
208
198
|
class CacheTool(BaseModel):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.
|
3
|
+
Version: 1.1.7.1
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -52,8 +52,7 @@ Requires-Dist: wheel>=0.45.1
|
|
52
52
|
|
53
53
|
# Overview
|
54
54
|
|
55
|
-
  
|
55
|
+
 [](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)   
|
57
56
|
|
58
57
|
|
59
58
|
An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
|
@@ -70,6 +69,7 @@ Messaging workflows are created at individual level, and will be deployed on thi
|
|
70
69
|
- [Orchestration frameworks](https://github.com/versionHQ/multi-agent-system)
|
71
70
|
- [Test client app](https://github.com/versionHQ/test-client-app)
|
72
71
|
|
72
|
+
<hr />
|
73
73
|
|
74
74
|
## Mindmap
|
75
75
|
|
@@ -86,10 +86,10 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
|
|
86
86
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
87
87
|
|
88
88
|
- [Key Features](#key-features)
|
89
|
+
- [Usage](#usage)
|
89
90
|
- [Technologies Used](#technologies-used)
|
90
91
|
- [Project Structure](#project-structure)
|
91
92
|
- [Setup](#setup)
|
92
|
-
- [Usage](#usage)
|
93
93
|
- [Contributing](#contributing)
|
94
94
|
- [Customizing AI Agents](#customizing-ai-agents)
|
95
95
|
- [Modifying RAG Functionality](#modifying-rag-functionality)
|
@@ -111,7 +111,7 @@ The `agent` is model agnostic. The default model is set Chat GTP 4o. We ask the
|
|
111
111
|
Multiple `agents` can form a `team` to complete complex tasks together.
|
112
112
|
|
113
113
|
**1. Analysis**
|
114
|
-
- Professional `agents` handle the analysis `tasks` on each client, customer, and product.
|
114
|
+
- Professional `agents` handle the analysis `tasks` on each client, customer, and product.
|
115
115
|
|
116
116
|
**2. Messaging Workflow Creation**
|
117
117
|
- Several `teams` receive the analysis and design initial messaging workflow with several layers.
|
@@ -121,6 +121,52 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
121
121
|
**3. Autopiloting**
|
122
122
|
- Responsible `agents` or `teams` autopilot executing and refining the messaging workflow.
|
123
123
|
|
124
|
+
<hr />
|
125
|
+
|
126
|
+
## Usage
|
127
|
+
|
128
|
+
1. Install `versionhq` package:
|
129
|
+
```
|
130
|
+
uv pip install versionhq
|
131
|
+
```
|
132
|
+
|
133
|
+
2. You can use the `versionhq` module in your Python app.
|
134
|
+
|
135
|
+
- **i.e.,** Make LLM-based agent execute the task and return JSON dict.
|
136
|
+
|
137
|
+
```
|
138
|
+
from versionhq.agent.model import Agent
|
139
|
+
from versionhq.task.model import Task, ResponseField
|
140
|
+
|
141
|
+
agent = Agent(
|
142
|
+
role="demo",
|
143
|
+
goal="amazing project goal",
|
144
|
+
skillsets=["skill_1", "skill_2", ],
|
145
|
+
llm="llm-of-choice"
|
146
|
+
)
|
147
|
+
|
148
|
+
task = Task(
|
149
|
+
description="Amazing task",
|
150
|
+
expected_output_json=True,
|
151
|
+
expected_output_pydantic=False,
|
152
|
+
output_field_list=[
|
153
|
+
ResponseField(title="test1", type=str, required=True),
|
154
|
+
ResponseField(title="test2", type=list, required=True),
|
155
|
+
],
|
156
|
+
context=["amazing context",],
|
157
|
+
tools=["amazing tool"],
|
158
|
+
callback=None,
|
159
|
+
)
|
160
|
+
|
161
|
+
res = task.execute_sync(agent=agent)
|
162
|
+
|
163
|
+
return res.to_dict()
|
164
|
+
|
165
|
+
```
|
166
|
+
|
167
|
+
For more details:
|
168
|
+
|
169
|
+
[PyPi package](https://pypi.org/project/versionhq/)
|
124
170
|
|
125
171
|
<hr />
|
126
172
|
|
@@ -145,6 +191,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
145
191
|
- [pre-commit](https://pre-commit.com/): Manage and maintain pre-commit hooks
|
146
192
|
- [setuptools](https://pypi.org/project/setuptools/): Build python modules
|
147
193
|
|
194
|
+
<hr />
|
148
195
|
|
149
196
|
## Project Structure
|
150
197
|
|
@@ -166,10 +213,10 @@ src/
|
|
166
213
|
│ └── ...
|
167
214
|
│
|
168
215
|
└──tests/
|
169
|
-
└── cli/
|
216
|
+
└── cli/
|
170
217
|
└── team/
|
171
218
|
└── ...
|
172
|
-
│
|
219
|
+
│
|
173
220
|
└── uploads/ # Uploaded files for the project
|
174
221
|
|
175
222
|
```
|
@@ -210,25 +257,6 @@ src/
|
|
210
257
|
|
211
258
|
<hr />
|
212
259
|
|
213
|
-
## Usage
|
214
|
-
|
215
|
-
1. Install `versionhq` package:
|
216
|
-
```
|
217
|
-
uv pip install versionhq
|
218
|
-
```
|
219
|
-
|
220
|
-
2. You can use the `versionhq` module in your Python app.
|
221
|
-
```
|
222
|
-
from versionhq.agent.model import Agent
|
223
|
-
agent = Agent(llm="your-llm", ...)
|
224
|
-
```
|
225
|
-
|
226
|
-
For more details:
|
227
|
-
|
228
|
-
[PyPi package](https://pypi.org/project/versionhq/)
|
229
|
-
|
230
|
-
<hr />
|
231
|
-
|
232
260
|
## Contributing
|
233
261
|
|
234
262
|
1. Fork the repository
|
@@ -239,13 +267,17 @@ For more details:
|
|
239
267
|
|
240
268
|
4. Test the features using the `tests` directory.
|
241
269
|
|
242
|
-
- Add a
|
270
|
+
- Add a test function to respective components in the `tests` directory.
|
271
|
+
- Add your `LITELLM_API_KEY` and `OPENAI_API_KEY` to the Github `repository secrets` @ settings > secrets & variables > Actions.
|
243
272
|
- Run a test.
|
244
273
|
```
|
245
|
-
uv run
|
274
|
+
uv run pytest tests -vv
|
246
275
|
```
|
247
276
|
|
248
|
-
|
277
|
+
**pytest**
|
278
|
+
|
279
|
+
* When adding a new file to `tests`, name the file ended with `_test.py`.
|
280
|
+
* When adding a new feature to the file, name the feature started with `test_`.
|
249
281
|
|
250
282
|
5. Pull the latest version of source code from the main branch (`git pull origin main`) *Address conflicts if any.
|
251
283
|
6. Commit your changes (`git add .` / `git commit -m 'Add your-amazing-feature'`)
|
@@ -321,9 +353,10 @@ Common issues and solutions:
|
|
321
353
|
- API key errors: Ensure all API keys in the `.env` file are correct and up to date. Make sure to add `load_dotenv()` on the top of the python file to apply the latest environment values.
|
322
354
|
- Database connection issues: Check if the Chroma DB is properly initialized and accessible.
|
323
355
|
- Memory errors: If processing large contracts, you may need to increase the available memory for the Python process.
|
324
|
-
- Issues related to dependencies
|
356
|
+
- Issues related to dependencies: `rm -rf uv.lock`, `uv cache clean`, `uv venv`, and run `uv pip install -r requirements.txt -v`.
|
325
357
|
- Issues related to the AI agents or RAG system: Check the `output.log` file for detailed error messages and stack traces.
|
326
358
|
- Issues related to `Python quit unexpectedly`: Check [this stackoverflow article](https://stackoverflow.com/questions/59888499/macos-catalina-python-quit-unexpectedly-error).
|
359
|
+
- `reportMissingImports` error from pyright after installing the package: This might occur when installing new libraries while VSCode is running. Open the command pallete (ctrl + shift + p) and run the Python: Restart language server task.
|
327
360
|
|
328
361
|
<hr />
|
329
362
|
|
@@ -354,17 +387,3 @@ Common issues and solutions:
|
|
354
387
|
> **Simple tasks**: You have a straightforward, one-off task that doesn't require significant complexity or iteration.
|
355
388
|
|
356
389
|
> **Human input**: You need to provide initial input or guidance to the agent, or you expect to review and refine the output.
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
<--- Remaining tasks --->
|
361
|
-
|
362
|
-
- llm handling - agent
|
363
|
-
- more llms integration
|
364
|
-
- simpler prompting
|
365
|
-
- broader knowledge
|
366
|
-
|
367
|
-
- utils - log
|
368
|
-
- utils - time
|
369
|
-
|
370
|
-
- end to end client app test
|