versionhq 1.1.7.3__tar.gz → 1.1.7.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/PKG-INFO +51 -8
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/README.md +50 -7
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/pyproject.toml +1 -1
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/__init__.py +1 -1
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/clients/workflow/model.py +7 -7
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/task/model.py +32 -15
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/team/model.py +4 -8
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq.egg-info/PKG-INFO +51 -8
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/clients/workflow_test.py +1 -1
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/task/task_test.py +23 -22
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/team/team_test.py +94 -6
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/uv.lock +1 -1
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/.github/workflows/publish.yml +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/.github/workflows/run_tests.yml +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/.gitignore +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/.pre-commit-config.yaml +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/.python-version +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/LICENSE +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/SECURITY.md +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/db/preprocess.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/requirements.txt +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/runtime.txt +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/setup.cfg +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/_utils/cache_handler.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/_utils/rpm_controller.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/agent/model.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/llm/llm_vars.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/llm/model.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/team/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/team/team_planner.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq.egg-info/SOURCES.txt +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/agent/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/agent/agent_test.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/cli/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/conftest.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/task/__init__.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/team/Prompts/Demo_test.py +0 -0
- {versionhq-1.1.7.3 → versionhq-1.1.7.5}/tests/team/__init__.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.7.
|
3
|
+
Version: 1.1.7.5
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -52,7 +52,11 @@ Requires-Dist: wheel>=0.45.1
|
|
52
52
|
|
53
53
|
# Overview
|
54
54
|
|
55
|
-

|
55
|
+

|
56
|
+
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
57
|
+

|
58
|
+

|
59
|
+

|
56
60
|
|
57
61
|
|
58
62
|
An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
|
@@ -64,10 +68,11 @@ Messaging workflows are created at individual level, and will be deployed on thi
|
|
64
68
|
|
65
69
|
**Visit:**
|
66
70
|
|
67
|
-
- [
|
68
|
-
- [
|
69
|
-
- [
|
70
|
-
- [
|
71
|
+
- [PyPI](https://pypi.org/project/versionhq/)
|
72
|
+
- [Github (LLM orchestration)](https://github.com/versionHQ/multi-agent-system)
|
73
|
+
- [Github (Test client app)](https://github.com/versionHQ/test-client-app)
|
74
|
+
- [Use case](https://versi0n.io/) - client app (alpha)
|
75
|
+
|
71
76
|
|
72
77
|
<hr />
|
73
78
|
|
@@ -87,6 +92,8 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
|
|
87
92
|
|
88
93
|
- [Key Features](#key-features)
|
89
94
|
- [Usage](#usage)
|
95
|
+
- [Case 1. Build an AI agent on LLM of your choice and execute a task:](#case-1-build-an-ai-agent-on-llm-of-your-choice-and-execute-a-task)
|
96
|
+
- [Case 2. Form a team to handle multiple tasks:](#case-2-form-a-team-to-handle-multiple-tasks)
|
90
97
|
- [Technologies Used](#technologies-used)
|
91
98
|
- [Project Structure](#project-structure)
|
92
99
|
- [Setup](#setup)
|
@@ -132,7 +139,8 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
132
139
|
|
133
140
|
2. You can use the `versionhq` module in your Python app.
|
134
141
|
|
135
|
-
|
142
|
+
|
143
|
+
### Case 1. Build an AI agent on LLM of your choice and execute a task:
|
136
144
|
|
137
145
|
```
|
138
146
|
from versionhq.agent.model import Agent
|
@@ -142,6 +150,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
142
150
|
role="demo",
|
143
151
|
goal="amazing project goal",
|
144
152
|
skillsets=["skill_1", "skill_2", ],
|
153
|
+
tools=["amazing RAG tool",]
|
145
154
|
llm="llm-of-your-choice"
|
146
155
|
)
|
147
156
|
|
@@ -165,7 +174,41 @@ This will return a dictionary with keys defined in the `ResponseField`.
|
|
165
174
|
{ test1: "answer1", "test2": ["answer2-1", "answer2-2", "answer2-3",] }
|
166
175
|
```
|
167
176
|
|
168
|
-
|
177
|
+
### Case 2. Form a team to handle multiple tasks:
|
178
|
+
|
179
|
+
```
|
180
|
+
from versionhq.agent.model import Agent
|
181
|
+
from versionhq.task.model import Task, ResponseField
|
182
|
+
from versionhq.team.model import Team, TeamMember
|
183
|
+
|
184
|
+
agent_a = Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
|
185
|
+
agent_b = Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-choice")
|
186
|
+
|
187
|
+
task_1 = Task(
|
188
|
+
description="Analyze the client's business model.",
|
189
|
+
output_field_list=[ResponseField(title="test1", type=str, required=True),],
|
190
|
+
allow_delegation=True
|
191
|
+
)
|
192
|
+
|
193
|
+
task_2 = Task(
|
194
|
+
description="Define the cohort.",
|
195
|
+
output_field_list=[ResponseField(title="test1", type=int, required=True),],
|
196
|
+
allow_delegation=False
|
197
|
+
)
|
198
|
+
|
199
|
+
team = Team(
|
200
|
+
members=[
|
201
|
+
TeamMember(agent=agent_a, is_manager=False, task=task_1),
|
202
|
+
TeamMember(agent=agent_b, is_manager=True, task=task_2),
|
203
|
+
],
|
204
|
+
)
|
205
|
+
res = team.kickoff()
|
206
|
+
```
|
207
|
+
|
208
|
+
This will return a list with dictionaries with keys defined in the `ResponseField` of each task.
|
209
|
+
|
210
|
+
Tasks can be delegated to a team manager, peers in the team, or completely new agent.
|
211
|
+
|
169
212
|
|
170
213
|
<hr />
|
171
214
|
|
@@ -1,6 +1,10 @@
|
|
1
1
|
# Overview
|
2
2
|
|
3
|
-

|
3
|
+

|
4
|
+
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
5
|
+

|
6
|
+

|
7
|
+

|
4
8
|
|
5
9
|
|
6
10
|
An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
|
@@ -12,10 +16,11 @@ Messaging workflows are created at individual level, and will be deployed on thi
|
|
12
16
|
|
13
17
|
**Visit:**
|
14
18
|
|
15
|
-
- [
|
16
|
-
- [
|
17
|
-
- [
|
18
|
-
- [
|
19
|
+
- [PyPI](https://pypi.org/project/versionhq/)
|
20
|
+
- [Github (LLM orchestration)](https://github.com/versionHQ/multi-agent-system)
|
21
|
+
- [Github (Test client app)](https://github.com/versionHQ/test-client-app)
|
22
|
+
- [Use case](https://versi0n.io/) - client app (alpha)
|
23
|
+
|
19
24
|
|
20
25
|
<hr />
|
21
26
|
|
@@ -35,6 +40,8 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
|
|
35
40
|
|
36
41
|
- [Key Features](#key-features)
|
37
42
|
- [Usage](#usage)
|
43
|
+
- [Case 1. Build an AI agent on LLM of your choice and execute a task:](#case-1-build-an-ai-agent-on-llm-of-your-choice-and-execute-a-task)
|
44
|
+
- [Case 2. Form a team to handle multiple tasks:](#case-2-form-a-team-to-handle-multiple-tasks)
|
38
45
|
- [Technologies Used](#technologies-used)
|
39
46
|
- [Project Structure](#project-structure)
|
40
47
|
- [Setup](#setup)
|
@@ -80,7 +87,8 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
80
87
|
|
81
88
|
2. You can use the `versionhq` module in your Python app.
|
82
89
|
|
83
|
-
|
90
|
+
|
91
|
+
### Case 1. Build an AI agent on LLM of your choice and execute a task:
|
84
92
|
|
85
93
|
```
|
86
94
|
from versionhq.agent.model import Agent
|
@@ -90,6 +98,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
90
98
|
role="demo",
|
91
99
|
goal="amazing project goal",
|
92
100
|
skillsets=["skill_1", "skill_2", ],
|
101
|
+
tools=["amazing RAG tool",]
|
93
102
|
llm="llm-of-your-choice"
|
94
103
|
)
|
95
104
|
|
@@ -113,7 +122,41 @@ This will return a dictionary with keys defined in the `ResponseField`.
|
|
113
122
|
{ test1: "answer1", "test2": ["answer2-1", "answer2-2", "answer2-3",] }
|
114
123
|
```
|
115
124
|
|
116
|
-
|
125
|
+
### Case 2. Form a team to handle multiple tasks:
|
126
|
+
|
127
|
+
```
|
128
|
+
from versionhq.agent.model import Agent
|
129
|
+
from versionhq.task.model import Task, ResponseField
|
130
|
+
from versionhq.team.model import Team, TeamMember
|
131
|
+
|
132
|
+
agent_a = Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
|
133
|
+
agent_b = Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-choice")
|
134
|
+
|
135
|
+
task_1 = Task(
|
136
|
+
description="Analyze the client's business model.",
|
137
|
+
output_field_list=[ResponseField(title="test1", type=str, required=True),],
|
138
|
+
allow_delegation=True
|
139
|
+
)
|
140
|
+
|
141
|
+
task_2 = Task(
|
142
|
+
description="Define the cohort.",
|
143
|
+
output_field_list=[ResponseField(title="test1", type=int, required=True),],
|
144
|
+
allow_delegation=False
|
145
|
+
)
|
146
|
+
|
147
|
+
team = Team(
|
148
|
+
members=[
|
149
|
+
TeamMember(agent=agent_a, is_manager=False, task=task_1),
|
150
|
+
TeamMember(agent=agent_b, is_manager=True, task=task_2),
|
151
|
+
],
|
152
|
+
)
|
153
|
+
res = team.kickoff()
|
154
|
+
```
|
155
|
+
|
156
|
+
This will return a list with dictionaries with keys defined in the `ResponseField` of each task.
|
157
|
+
|
158
|
+
Tasks can be delegated to a team manager, peers in the team, or completely new agent.
|
159
|
+
|
117
160
|
|
118
161
|
<hr />
|
119
162
|
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.1.7.
|
18
|
+
version = "1.1.7.5"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
|
21
21
|
readme = "README.md"
|
@@ -61,21 +61,21 @@ class MessagingComponent(ABC, BaseModel):
|
|
61
61
|
score: Union[float, InstanceOf[Score]] = Field(default=None)
|
62
62
|
|
63
63
|
|
64
|
-
def store_scoring_result(self, scoring_subject: str,
|
64
|
+
def store_scoring_result(self, scoring_subject: str, score_raw: Union[int, Score, ScoreFormat] = None):
|
65
65
|
"""
|
66
66
|
Set up the `score` field
|
67
67
|
"""
|
68
68
|
|
69
|
-
if isinstance(
|
70
|
-
setattr(self, "score",
|
69
|
+
if isinstance(score_raw, Score):
|
70
|
+
setattr(self, "score", score_raw)
|
71
71
|
|
72
|
-
elif isinstance(
|
72
|
+
elif isinstance(score_raw, ScoreFormat):
|
73
73
|
score_instance = Score()
|
74
|
-
setattr(score_instance, scoring_subject,
|
74
|
+
setattr(score_instance, scoring_subject, score_raw)
|
75
75
|
setattr(self, "score", score_instance)
|
76
76
|
|
77
|
-
elif isinstance(
|
78
|
-
score_instance, score_format_instance = Score(), ScoreFormat(rate=
|
77
|
+
elif isinstance(score_raw, int) or isinstance(score_raw, float):
|
78
|
+
score_instance, score_format_instance = Score(), ScoreFormat(rate=score_raw, weight=1)
|
79
79
|
setattr(score_instance, "kwargs", { scoring_subject: score_format_instance })
|
80
80
|
setattr(self, "score", score_instance)
|
81
81
|
|
@@ -158,6 +158,7 @@ class Task(BaseModel):
|
|
158
158
|
async_execution: bool = Field(default=False,description="whether the task should be executed asynchronously or not")
|
159
159
|
config: Optional[Dict[str, Any]] = Field(default=None, description="configuration for the agent")
|
160
160
|
callback: Optional[Any] = Field(default=None, description="callback to be executed after the task is completed.")
|
161
|
+
callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
|
161
162
|
|
162
163
|
# recording
|
163
164
|
processed_by_agents: Set[str] = Field(default_factory=set)
|
@@ -371,7 +372,7 @@ Your outputs MUST adhere to the following format and should NOT include any irre
|
|
371
372
|
|
372
373
|
|
373
374
|
# task execution
|
374
|
-
def execute_sync(self, agent, context: Optional[str] = None
|
375
|
+
def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
|
375
376
|
"""
|
376
377
|
Execute the task synchronously.
|
377
378
|
When the task has context, make sure we have executed all the tasks in the context first.
|
@@ -380,12 +381,12 @@ Your outputs MUST adhere to the following format and should NOT include any irre
|
|
380
381
|
if self.context:
|
381
382
|
for task in self.context:
|
382
383
|
if task.output is None:
|
383
|
-
task._execute_core(agent, context
|
384
|
+
task._execute_core(agent, context)
|
384
385
|
|
385
386
|
return self._execute_core(agent, context)
|
386
387
|
|
387
388
|
|
388
|
-
def execute_async(self, agent, context: Optional[str] = None
|
389
|
+
def execute_async(self, agent, context: Optional[str] = None) -> Future[TaskOutput]:
|
389
390
|
"""
|
390
391
|
Execute the task asynchronously.
|
391
392
|
"""
|
@@ -394,36 +395,55 @@ Your outputs MUST adhere to the following format and should NOT include any irre
|
|
394
395
|
threading.Thread(
|
395
396
|
daemon=True,
|
396
397
|
target=self._execute_task_async,
|
397
|
-
args=(agent, context,
|
398
|
+
args=(agent, context, future),
|
398
399
|
).start()
|
399
400
|
return future
|
400
401
|
|
401
402
|
|
402
|
-
def _execute_task_async(self, agent, context: Optional[str],
|
403
|
+
def _execute_task_async(self, agent, context: Optional[str], future: Future[TaskOutput]) -> None:
|
403
404
|
"""
|
404
405
|
Execute the task asynchronously with context handling.
|
405
406
|
"""
|
406
407
|
|
407
|
-
result = self._execute_core(agent, context
|
408
|
+
result = self._execute_core(agent, context)
|
408
409
|
future.set_result(result)
|
409
410
|
|
410
411
|
|
411
|
-
def _execute_core(self, agent, context: Optional[str]
|
412
|
+
def _execute_core(self, agent, context: Optional[str]) -> TaskOutput:
|
412
413
|
"""
|
413
414
|
Run the core execution logic of the task.
|
414
415
|
To speed up the process, when the format is not expected to return, we will skip the conversion process.
|
416
|
+
When the task is allowed to delegate to another agent, we will select a responsible one in order of manager_agent > peer_agent > anoymous agent.
|
415
417
|
"""
|
416
418
|
from versionhq.agent.model import Agent
|
419
|
+
from versionhq.team.model import Team
|
417
420
|
|
418
421
|
self.prompt_context = context
|
419
422
|
|
420
423
|
if self.allow_delegation:
|
421
|
-
|
424
|
+
agent_to_delegate = None
|
425
|
+
|
426
|
+
if hasattr(agent, "team") and isinstance(agent.team, Team):
|
427
|
+
if agent.team.manager_agent:
|
428
|
+
agent_to_delegate = agent.team.manager_agent
|
429
|
+
else:
|
430
|
+
peers = [member.agent for member in agent.team.members if member.is_manager == False and member.agent.id is not agent.id]
|
431
|
+
if len(peers) > 0:
|
432
|
+
agent_to_delegate = peers[0]
|
433
|
+
else:
|
434
|
+
agent_to_delegate = Agent(role="delegated_agent", goal=agent.goal, llm=agent.llm)
|
435
|
+
|
436
|
+
agent = agent_to_delegate
|
422
437
|
self.delegations += 1
|
423
438
|
|
424
|
-
output_raw = agent.execute_task(task=self, context=context)
|
425
|
-
|
426
|
-
|
439
|
+
output_raw, output_json_dict, output_pydantic = agent.execute_task(task=self, context=context), None, None
|
440
|
+
|
441
|
+
if self.expected_output_json:
|
442
|
+
output_json_dict = self.create_json_output(raw_result=output_raw)
|
443
|
+
|
444
|
+
if self.expected_output_pydantic:
|
445
|
+
output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict)
|
446
|
+
|
427
447
|
task_output = TaskOutput(
|
428
448
|
task_id=self.id,
|
429
449
|
raw=output_raw,
|
@@ -436,10 +456,7 @@ Your outputs MUST adhere to the following format and should NOT include any irre
|
|
436
456
|
# self._set_end_execution_time(start_time)
|
437
457
|
|
438
458
|
if self.callback:
|
439
|
-
|
440
|
-
self.callback(**callback_kwargs)
|
441
|
-
else:
|
442
|
-
self.callback(self.output)
|
459
|
+
self.callback({ **self.callback_kwargs, **self.output.__dict__ })
|
443
460
|
|
444
461
|
# if self._execution_span:
|
445
462
|
# # self._telemetry.task_ended(self._execution_span, self, agent.team)
|
@@ -111,7 +111,7 @@ class TeamOutput(BaseModel):
|
|
111
111
|
class TeamMember(ABC, BaseModel):
|
112
112
|
agent: Agent | None = Field(default=None, description="store the agent to be a member")
|
113
113
|
is_manager: bool = Field(default=False)
|
114
|
-
task: Task
|
114
|
+
task: Optional[Task] = Field(default=None)
|
115
115
|
|
116
116
|
|
117
117
|
class Team(BaseModel):
|
@@ -145,7 +145,6 @@ class Team(BaseModel):
|
|
145
145
|
default_factory=list,
|
146
146
|
description="list of callback functions to be executed after the team kickoff. i.e., store the result in repo"
|
147
147
|
)
|
148
|
-
task_callback: Optional[Any] = Field(default=None, description="callback to be executed after each task for all agents execution")
|
149
148
|
step_callback: Optional[Any] = Field(default=None, description="callback to be executed after each step for all agents execution")
|
150
149
|
|
151
150
|
verbose: bool = Field(default=True)
|
@@ -379,7 +378,7 @@ class Team(BaseModel):
|
|
379
378
|
"""
|
380
379
|
Executes tasks sequentially and returns the final output in TeamOutput class.
|
381
380
|
When we have a manager agent, we will start from executing manager agent's tasks.
|
382
|
-
Priority
|
381
|
+
Priority:
|
383
382
|
1. Team tasks > 2. Manager task > 3. Member tasks (in order of index)
|
384
383
|
"""
|
385
384
|
|
@@ -412,7 +411,7 @@ class Team(BaseModel):
|
|
412
411
|
|
413
412
|
if task.async_execution:
|
414
413
|
context = create_raw_outputs(tasks=[task, ],task_outputs=([last_sync_output,] if last_sync_output else []))
|
415
|
-
future = task.execute_async(agent=responsible_agent, context=context
|
414
|
+
future = task.execute_async(agent=responsible_agent, context=context
|
416
415
|
# tools=responsible_agent.tools
|
417
416
|
)
|
418
417
|
futures.append((task, future, task_index))
|
@@ -422,7 +421,7 @@ class Team(BaseModel):
|
|
422
421
|
futures.clear()
|
423
422
|
|
424
423
|
context = create_raw_outputs(tasks=[task,], task_outputs=([ last_sync_output,] if last_sync_output else [] ))
|
425
|
-
task_output = task.execute_sync(agent=responsible_agent, context=context
|
424
|
+
task_output = task.execute_sync(agent=responsible_agent, context=context
|
426
425
|
# tools=responsible_agent.tools
|
427
426
|
)
|
428
427
|
if responsible_agent is self.manager_agent:
|
@@ -463,9 +462,6 @@ class Team(BaseModel):
|
|
463
462
|
# self._inputs = inputs
|
464
463
|
# self._interpolate_inputs(inputs)
|
465
464
|
|
466
|
-
for task in self.tasks:
|
467
|
-
if not task.callback:
|
468
|
-
task.callback = self.task_callback
|
469
465
|
|
470
466
|
# i18n = I18N(prompt_file=self.prompt_file)
|
471
467
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.7.
|
3
|
+
Version: 1.1.7.5
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -52,7 +52,11 @@ Requires-Dist: wheel>=0.45.1
|
|
52
52
|
|
53
53
|
# Overview
|
54
54
|
|
55
|
-

|
55
|
+

|
56
|
+
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
57
|
+

|
58
|
+

|
59
|
+

|
56
60
|
|
57
61
|
|
58
62
|
An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
|
@@ -64,10 +68,11 @@ Messaging workflows are created at individual level, and will be deployed on thi
|
|
64
68
|
|
65
69
|
**Visit:**
|
66
70
|
|
67
|
-
- [
|
68
|
-
- [
|
69
|
-
- [
|
70
|
-
- [
|
71
|
+
- [PyPI](https://pypi.org/project/versionhq/)
|
72
|
+
- [Github (LLM orchestration)](https://github.com/versionHQ/multi-agent-system)
|
73
|
+
- [Github (Test client app)](https://github.com/versionHQ/test-client-app)
|
74
|
+
- [Use case](https://versi0n.io/) - client app (alpha)
|
75
|
+
|
71
76
|
|
72
77
|
<hr />
|
73
78
|
|
@@ -87,6 +92,8 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
|
|
87
92
|
|
88
93
|
- [Key Features](#key-features)
|
89
94
|
- [Usage](#usage)
|
95
|
+
- [Case 1. Build an AI agent on LLM of your choice and execute a task:](#case-1-build-an-ai-agent-on-llm-of-your-choice-and-execute-a-task)
|
96
|
+
- [Case 2. Form a team to handle multiple tasks:](#case-2-form-a-team-to-handle-multiple-tasks)
|
90
97
|
- [Technologies Used](#technologies-used)
|
91
98
|
- [Project Structure](#project-structure)
|
92
99
|
- [Setup](#setup)
|
@@ -132,7 +139,8 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
132
139
|
|
133
140
|
2. You can use the `versionhq` module in your Python app.
|
134
141
|
|
135
|
-
|
142
|
+
|
143
|
+
### Case 1. Build an AI agent on LLM of your choice and execute a task:
|
136
144
|
|
137
145
|
```
|
138
146
|
from versionhq.agent.model import Agent
|
@@ -142,6 +150,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
|
|
142
150
|
role="demo",
|
143
151
|
goal="amazing project goal",
|
144
152
|
skillsets=["skill_1", "skill_2", ],
|
153
|
+
tools=["amazing RAG tool",]
|
145
154
|
llm="llm-of-your-choice"
|
146
155
|
)
|
147
156
|
|
@@ -165,7 +174,41 @@ This will return a dictionary with keys defined in the `ResponseField`.
|
|
165
174
|
{ test1: "answer1", "test2": ["answer2-1", "answer2-2", "answer2-3",] }
|
166
175
|
```
|
167
176
|
|
168
|
-
|
177
|
+
### Case 2. Form a team to handle multiple tasks:
|
178
|
+
|
179
|
+
```
|
180
|
+
from versionhq.agent.model import Agent
|
181
|
+
from versionhq.task.model import Task, ResponseField
|
182
|
+
from versionhq.team.model import Team, TeamMember
|
183
|
+
|
184
|
+
agent_a = Agent(role="agent a", goal="My amazing goals", llm="llm-of-your-choice")
|
185
|
+
agent_b = Agent(role="agent b", goal="My amazing goals", llm="llm-of-your-choice")
|
186
|
+
|
187
|
+
task_1 = Task(
|
188
|
+
description="Analyze the client's business model.",
|
189
|
+
output_field_list=[ResponseField(title="test1", type=str, required=True),],
|
190
|
+
allow_delegation=True
|
191
|
+
)
|
192
|
+
|
193
|
+
task_2 = Task(
|
194
|
+
description="Define the cohort.",
|
195
|
+
output_field_list=[ResponseField(title="test1", type=int, required=True),],
|
196
|
+
allow_delegation=False
|
197
|
+
)
|
198
|
+
|
199
|
+
team = Team(
|
200
|
+
members=[
|
201
|
+
TeamMember(agent=agent_a, is_manager=False, task=task_1),
|
202
|
+
TeamMember(agent=agent_b, is_manager=True, task=task_2),
|
203
|
+
],
|
204
|
+
)
|
205
|
+
res = team.kickoff()
|
206
|
+
```
|
207
|
+
|
208
|
+
This will return a list with dictionaries with keys defined in the `ResponseField` of each task.
|
209
|
+
|
210
|
+
Tasks can be delegated to a team manager, peers in the team, or completely new agent.
|
211
|
+
|
169
212
|
|
170
213
|
<hr />
|
171
214
|
|
@@ -15,7 +15,7 @@ def test_store_scores():
|
|
15
15
|
|
16
16
|
messaging_component = MessagingComponent(message="demo")
|
17
17
|
score_raw = 15
|
18
|
-
messaging_component.store_scoring_result("demo",
|
18
|
+
messaging_component.store_scoring_result("demo", score_raw=score_raw)
|
19
19
|
|
20
20
|
assert messaging_component.score is not None
|
21
21
|
assert messaging_component.score.result() is not None
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import os
|
2
2
|
import pytest
|
3
3
|
from unittest.mock import patch
|
4
|
-
from typing import Union
|
4
|
+
from typing import Union, Dict, Any
|
5
5
|
from versionhq.agent.model import Agent
|
6
6
|
from versionhq.task.model import Task, ResponseField, TaskOutput, AgentOutput
|
7
7
|
|
@@ -27,8 +27,6 @@ def test_sync_execute_task():
|
|
27
27
|
ResponseField(title="test1", type=str, required=True),
|
28
28
|
ResponseField(title="test2", type=list, required=True),
|
29
29
|
],
|
30
|
-
context=None,
|
31
|
-
callback=None,
|
32
30
|
)
|
33
31
|
res = task.execute_sync(agent=agent)
|
34
32
|
|
@@ -202,14 +200,14 @@ def test_callback():
|
|
202
200
|
See if the callback function is executed well with kwargs.
|
203
201
|
"""
|
204
202
|
|
205
|
-
def callback_func(
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
)
|
203
|
+
def callback_func(kwargs: Dict[str, Any]):
|
204
|
+
task_id = kwargs.get("task_id", None)
|
205
|
+
added_condition = kwargs.get("added_condition", None)
|
206
|
+
assert task_id is not None
|
207
|
+
assert added_condition is not None
|
208
|
+
return f"Result: {task_id}, condition added: {added_condition}"
|
212
209
|
|
210
|
+
agent = Agent(role="demo agent 5", goal="My amazing goals")
|
213
211
|
task = Task(
|
214
212
|
description="Analyze the client's business model and define the optimal cohort timeframe.",
|
215
213
|
expected_output_json=True,
|
@@ -217,23 +215,27 @@ def test_callback():
|
|
217
215
|
output_field_list=[
|
218
216
|
ResponseField(title="test1", type=str, required=True),
|
219
217
|
],
|
220
|
-
callback=callback_func
|
218
|
+
callback=callback_func,
|
219
|
+
callback_kwargs=dict(added_condition="demo for pytest")
|
221
220
|
)
|
222
221
|
|
223
|
-
|
224
|
-
execution = task.execute_async(agent=agent, callback_kwargs={"item": "demo for pytest"})
|
225
|
-
result = execution.result()
|
226
|
-
assert result.raw == "ok"
|
227
|
-
execute.assert_called_once_with(task=task, context=None)
|
222
|
+
res = task.execute_sync(agent=agent)
|
228
223
|
|
224
|
+
assert res is not None
|
225
|
+
assert isinstance(res, TaskOutput)
|
226
|
+
assert res.task_id is task.id
|
227
|
+
assert res.raw is not None
|
228
|
+
|
229
|
+
# with patch.object(Agent, "execute_task", return_value="ok") as execute:
|
230
|
+
# execution = task.execute_async(agent=agent)
|
231
|
+
# result = execution.result()
|
232
|
+
# assert result.raw == "ok"
|
233
|
+
# execute.assert_called_once_with(task=task, context=None)
|
229
234
|
|
230
235
|
|
231
|
-
def test_delegate():
|
232
|
-
agent = Agent(
|
233
|
-
role="demo agent 6",
|
234
|
-
goal="My amazing goals",
|
235
|
-
)
|
236
236
|
|
237
|
+
def test_delegate():
|
238
|
+
agent = Agent(role="demo agent 6", goal="My amazing goals")
|
237
239
|
task = Task(
|
238
240
|
description="Analyze the client's business model and define the optimal cohort timeframe.",
|
239
241
|
expected_output_json=True,
|
@@ -254,5 +256,4 @@ def test_delegate():
|
|
254
256
|
# def test_conditional_task():
|
255
257
|
|
256
258
|
|
257
|
-
|
258
259
|
# tools, CONDITIONAL, token usage
|
@@ -192,17 +192,105 @@ def test_kickoff_team_without_leader():
|
|
192
192
|
assert len(res_all) == 2
|
193
193
|
for item in res_all:
|
194
194
|
assert isinstance(item, dict)
|
195
|
-
# if not hasattr(item, "output") and not hasattr(res_all, "output"):
|
196
|
-
# assert "test1" in item
|
197
|
-
# assert "test2" in item
|
198
|
-
# else:
|
199
|
-
# assert "output" in item
|
200
|
-
|
201
195
|
assert isinstance(res.token_usage, UsageMetrics)
|
202
196
|
assert res.token_usage.total_tokens == 0 # as we dont set token usage on agent
|
203
197
|
|
204
198
|
|
205
199
|
|
200
|
+
def team_kickoff_with_task_callback():
|
201
|
+
"""
|
202
|
+
Each task has callback with callback kwargs.
|
203
|
+
"""
|
204
|
+
demo_list = []
|
205
|
+
def demo_callback(item: str) -> None:
|
206
|
+
demo_list.append(item)
|
207
|
+
|
208
|
+
agent_a = Agent(
|
209
|
+
role="agent a",
|
210
|
+
goal="My amazing goals",
|
211
|
+
llm=MODEL_NAME
|
212
|
+
)
|
213
|
+
|
214
|
+
agent_b = Agent(
|
215
|
+
role="agent b",
|
216
|
+
goal="My amazing goals",
|
217
|
+
llm=MODEL_NAME
|
218
|
+
)
|
219
|
+
|
220
|
+
task_1 = Task(
|
221
|
+
description="Analyze the client's business model.",
|
222
|
+
output_field_list=[ResponseField(title="test1", type=str, required=True),],
|
223
|
+
callback=demo_callback,
|
224
|
+
callback_kwargs=dict(item="pytest demo 1")
|
225
|
+
)
|
226
|
+
|
227
|
+
task_2 = Task(
|
228
|
+
description="Define the cohort.",
|
229
|
+
output_field_list=[ResponseField(title="test1", type=int, required=True),],
|
230
|
+
callback=demo_callback,
|
231
|
+
callback_kwargs=dict(item="pytest demo 2")
|
232
|
+
)
|
233
|
+
|
234
|
+
team = Team(
|
235
|
+
members=[
|
236
|
+
TeamMember(agent=agent_a, is_manager=False, task=task_1),
|
237
|
+
TeamMember(agent=agent_b, is_manager=False, task=task_2),
|
238
|
+
],
|
239
|
+
)
|
240
|
+
res = team.kickoff()
|
241
|
+
|
242
|
+
assert res.raw is not None
|
243
|
+
assert res.json_dict is not None
|
244
|
+
assert len(res.return_all_task_outputs()) == 2
|
245
|
+
assert len(demo_list) == 2
|
246
|
+
assert "pytest" in demo_list[0]
|
247
|
+
assert "pytest" in demo_list[1]
|
248
|
+
|
249
|
+
|
250
|
+
|
251
|
+
def test_delegate_in_team():
|
252
|
+
"""
|
253
|
+
When the agent belongs to the team, the team manager or peers are prioritized to delegete the task.
|
254
|
+
"""
|
255
|
+
|
256
|
+
agent_a = Agent(
|
257
|
+
role="agent a",
|
258
|
+
goal="My amazing goals",
|
259
|
+
llm=MODEL_NAME
|
260
|
+
)
|
261
|
+
|
262
|
+
agent_b = Agent(
|
263
|
+
role="agent b",
|
264
|
+
goal="My amazing goals",
|
265
|
+
llm=MODEL_NAME
|
266
|
+
)
|
267
|
+
|
268
|
+
task_1 = Task(
|
269
|
+
description="Analyze the client's business model.",
|
270
|
+
output_field_list=[ResponseField(title="test1", type=str, required=True),],
|
271
|
+
allow_delegation=True
|
272
|
+
)
|
273
|
+
|
274
|
+
task_2 = Task(
|
275
|
+
description="Define the cohort.",
|
276
|
+
output_field_list=[ResponseField(title="test1", type=int, required=True),],
|
277
|
+
allow_delegation=False
|
278
|
+
)
|
279
|
+
|
280
|
+
team = Team(
|
281
|
+
members=[
|
282
|
+
TeamMember(agent=agent_a, is_manager=False, task=task_1),
|
283
|
+
TeamMember(agent=agent_b, is_manager=False, task=task_2),
|
284
|
+
],
|
285
|
+
)
|
286
|
+
res = team.kickoff()
|
287
|
+
|
288
|
+
assert res.raw is not None
|
289
|
+
assert res.json_dict is not None
|
290
|
+
assert "agent b" in task_1.processed_by_agents
|
291
|
+
|
292
|
+
|
293
|
+
|
206
294
|
# def test_kickoff_with_team_leader():
|
207
295
|
# agent_a = Agent(
|
208
296
|
# role="Demo Agent A",
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|