versionhq 1.1.7.0__tar.gz → 1.1.7.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/.github/workflows/run_tests.yml +1 -1
  2. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/PKG-INFO +51 -37
  3. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/README.md +50 -36
  4. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/pyproject.toml +8 -4
  5. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/__init__.py +1 -1
  6. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -4
  7. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/agent/model.py +2 -2
  8. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/task/model.py +93 -27
  9. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/team/model.py +9 -9
  10. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq.egg-info/PKG-INFO +51 -37
  11. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/agent/agent_test.py +1 -1
  12. versionhq-1.1.7.1/tests/conftest.py +3 -0
  13. versionhq-1.1.7.1/tests/task/task_test.py +112 -0
  14. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/team/team_test.py +15 -12
  15. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/uv.lock +2 -1
  16. versionhq-1.1.7.0/tests/conftest.py +0 -3
  17. versionhq-1.1.7.0/tests/task/task_test.py +0 -46
  18. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/.github/workflows/publish.yml +0 -0
  19. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/.github/workflows/publish_testpypi.yml +0 -0
  20. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/.github/workflows/security_check.yml +0 -0
  21. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/.gitignore +0 -0
  22. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/.pre-commit-config.yaml +0 -0
  23. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/.python-version +0 -0
  24. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/LICENSE +0 -0
  25. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/SECURITY.md +0 -0
  26. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/db/preprocess.py +0 -0
  27. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/requirements.txt +0 -0
  28. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/runtime.txt +0 -0
  29. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/setup.cfg +0 -0
  30. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/_utils/__init__.py +0 -0
  31. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/_utils/cache_handler.py +0 -0
  32. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/_utils/i18n.py +0 -0
  33. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/_utils/logger.py +0 -0
  34. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/_utils/process_config.py +0 -0
  35. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/_utils/rpm_controller.py +0 -0
  36. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/_utils/usage_metrics.py +0 -0
  37. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  38. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/agent/__init__.py +0 -0
  39. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/agent/parser.py +0 -0
  40. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/cli/__init__.py +0 -0
  41. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/clients/__init__.py +0 -0
  42. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/clients/customer/__init__.py +0 -0
  43. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/clients/customer/model.py +0 -0
  44. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/clients/product/__init__.py +0 -0
  45. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/clients/product/model.py +0 -0
  46. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/clients/workflow/__init__.py +0 -0
  47. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/clients/workflow/model.py +0 -0
  48. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/llm/__init__.py +0 -0
  49. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/llm/llm_vars.py +0 -0
  50. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/llm/model.py +0 -0
  51. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/task/__init__.py +0 -0
  52. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/task/formatter.py +0 -0
  53. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/team/__init__.py +0 -0
  54. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/team/team_planner.py +0 -0
  55. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/tool/__init__.py +0 -0
  56. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/tool/decorator.py +0 -0
  57. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/tool/model.py +0 -0
  58. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq/tool/tool_handler.py +0 -0
  59. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq.egg-info/SOURCES.txt +0 -0
  60. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq.egg-info/dependency_links.txt +0 -0
  61. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq.egg-info/requires.txt +0 -0
  62. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/src/versionhq.egg-info/top_level.txt +0 -0
  63. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/__init__.py +0 -0
  64. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/agent/__init__.py +0 -0
  65. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/cli/__init__.py +0 -0
  66. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/task/__init__.py +0 -0
  67. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/team/Prompts/Demo_test.py +0 -0
  68. {versionhq-1.1.7.0 → versionhq-1.1.7.1}/tests/team/__init__.py +0 -0
@@ -31,4 +31,4 @@ jobs:
31
31
  uv pip install -r requirements.txt
32
32
 
33
33
  - name: Run tests
34
- run: uv run pytest tests -vv --cache-clear
34
+ run: uv run pytest tests -vv --cache-clear
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.7.0
3
+ Version: 1.1.7.1
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -52,8 +52,7 @@ Requires-Dist: wheel>=0.45.1
52
52
 
53
53
  # Overview
54
54
 
55
- ![MIT license](https://img.shields.io/badge/License-MIT-green) ![PyPi](https://img.shields.io/badge/pypi-v1.1.6.3-blue)
56
- ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
55
+ ![MIT license](https://img.shields.io/badge/License-MIT-green) [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml) ![PyPi](https://img.shields.io/badge/pypi-v1.1.7.0-blue) ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
57
56
 
58
57
 
59
58
  An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
@@ -70,6 +69,7 @@ Messaging workflows are created at individual level, and will be deployed on thi
70
69
  - [Orchestration frameworks](https://github.com/versionHQ/multi-agent-system)
71
70
  - [Test client app](https://github.com/versionHQ/test-client-app)
72
71
 
72
+ <hr />
73
73
 
74
74
  ## Mindmap
75
75
 
@@ -86,10 +86,10 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
86
86
  <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
87
87
 
88
88
  - [Key Features](#key-features)
89
+ - [Usage](#usage)
89
90
  - [Technologies Used](#technologies-used)
90
91
  - [Project Structure](#project-structure)
91
92
  - [Setup](#setup)
92
- - [Usage](#usage)
93
93
  - [Contributing](#contributing)
94
94
  - [Customizing AI Agents](#customizing-ai-agents)
95
95
  - [Modifying RAG Functionality](#modifying-rag-functionality)
@@ -121,6 +121,52 @@ Multiple `agents` can form a `team` to complete complex tasks together.
121
121
  **3. Autopiloting**
122
122
  - Responsible `agents` or `teams` autopilot executing and refining the messaging workflow.
123
123
 
124
+ <hr />
125
+
126
+ ## Usage
127
+
128
+ 1. Install `versionhq` package:
129
+ ```
130
+ uv pip install versionhq
131
+ ```
132
+
133
+ 2. You can use the `versionhq` module in your Python app.
134
+
135
+ - **i.e.,** Make LLM-based agent execute the task and return JSON dict.
136
+
137
+ ```
138
+ from versionhq.agent.model import Agent
139
+ from versionhq.task.model import Task, ResponseField
140
+
141
+ agent = Agent(
142
+ role="demo",
143
+ goal="amazing project goal",
144
+ skillsets=["skill_1", "skill_2", ],
145
+ llm="llm-of-choice"
146
+ )
147
+
148
+ task = Task(
149
+ description="Amazing task",
150
+ expected_output_json=True,
151
+ expected_output_pydantic=False,
152
+ output_field_list=[
153
+ ResponseField(title="test1", type=str, required=True),
154
+ ResponseField(title="test2", type=list, required=True),
155
+ ],
156
+ context=["amazing context",],
157
+ tools=["amazing tool"],
158
+ callback=None,
159
+ )
160
+
161
+ res = task.execute_sync(agent=agent)
162
+
163
+ return res.to_dict()
164
+
165
+ ```
166
+
167
+ For more details:
168
+
169
+ [PyPi package](https://pypi.org/project/versionhq/)
124
170
 
125
171
  <hr />
126
172
 
@@ -145,6 +191,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
145
191
  - [pre-commit](https://pre-commit.com/): Manage and maintain pre-commit hooks
146
192
  - [setuptools](https://pypi.org/project/setuptools/): Build python modules
147
193
 
194
+ <hr />
148
195
 
149
196
  ## Project Structure
150
197
 
@@ -210,25 +257,6 @@ src/
210
257
 
211
258
  <hr />
212
259
 
213
- ## Usage
214
-
215
- 1. Install `versionhq` package:
216
- ```
217
- uv pip install versionhq
218
- ```
219
-
220
- 2. You can use the `versionhq` module in your Python app.
221
- ```
222
- from versionhq.agent.model import Agent
223
- agent = Agent(llm="your-llm", ...)
224
- ```
225
-
226
- For more details:
227
-
228
- [PyPi package](https://pypi.org/project/versionhq/)
229
-
230
- <hr />
231
-
232
260
  ## Contributing
233
261
 
234
262
  1. Fork the repository
@@ -359,17 +387,3 @@ Common issues and solutions:
359
387
  > **Simple tasks**: You have a straightforward, one-off task that doesn't require significant complexity or iteration.
360
388
 
361
389
  > **Human input**: You need to provide initial input or guidance to the agent, or you expect to review and refine the output.
362
-
363
-
364
-
365
- <--- Remaining tasks --->
366
-
367
- - llm handling - agent
368
- - more llms integration
369
- - simpler prompting
370
- - broader knowledge
371
-
372
- - utils - log
373
- - utils - time
374
-
375
- - end to end client app test
@@ -1,7 +1,6 @@
1
1
  # Overview
2
2
 
3
- ![MIT license](https://img.shields.io/badge/License-MIT-green) ![PyPi](https://img.shields.io/badge/pypi-v1.1.6.3-blue)
4
- ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
3
+ ![MIT license](https://img.shields.io/badge/License-MIT-green) [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml) ![PyPi](https://img.shields.io/badge/pypi-v1.1.7.0-blue) ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
5
4
 
6
5
 
7
6
  An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
@@ -18,6 +17,7 @@ Messaging workflows are created at individual level, and will be deployed on thi
18
17
  - [Orchestration frameworks](https://github.com/versionHQ/multi-agent-system)
19
18
  - [Test client app](https://github.com/versionHQ/test-client-app)
20
19
 
20
+ <hr />
21
21
 
22
22
  ## Mindmap
23
23
 
@@ -34,10 +34,10 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
34
34
  <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
35
35
 
36
36
  - [Key Features](#key-features)
37
+ - [Usage](#usage)
37
38
  - [Technologies Used](#technologies-used)
38
39
  - [Project Structure](#project-structure)
39
40
  - [Setup](#setup)
40
- - [Usage](#usage)
41
41
  - [Contributing](#contributing)
42
42
  - [Customizing AI Agents](#customizing-ai-agents)
43
43
  - [Modifying RAG Functionality](#modifying-rag-functionality)
@@ -69,6 +69,52 @@ Multiple `agents` can form a `team` to complete complex tasks together.
69
69
  **3. Autopiloting**
70
70
  - Responsible `agents` or `teams` autopilot executing and refining the messaging workflow.
71
71
 
72
+ <hr />
73
+
74
+ ## Usage
75
+
76
+ 1. Install `versionhq` package:
77
+ ```
78
+ uv pip install versionhq
79
+ ```
80
+
81
+ 2. You can use the `versionhq` module in your Python app.
82
+
83
+ - **i.e.,** Make LLM-based agent execute the task and return JSON dict.
84
+
85
+ ```
86
+ from versionhq.agent.model import Agent
87
+ from versionhq.task.model import Task, ResponseField
88
+
89
+ agent = Agent(
90
+ role="demo",
91
+ goal="amazing project goal",
92
+ skillsets=["skill_1", "skill_2", ],
93
+ llm="llm-of-choice"
94
+ )
95
+
96
+ task = Task(
97
+ description="Amazing task",
98
+ expected_output_json=True,
99
+ expected_output_pydantic=False,
100
+ output_field_list=[
101
+ ResponseField(title="test1", type=str, required=True),
102
+ ResponseField(title="test2", type=list, required=True),
103
+ ],
104
+ context=["amazing context",],
105
+ tools=["amazing tool"],
106
+ callback=None,
107
+ )
108
+
109
+ res = task.execute_sync(agent=agent)
110
+
111
+ return res.to_dict()
112
+
113
+ ```
114
+
115
+ For more details:
116
+
117
+ [PyPi package](https://pypi.org/project/versionhq/)
72
118
 
73
119
  <hr />
74
120
 
@@ -93,6 +139,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
93
139
  - [pre-commit](https://pre-commit.com/): Manage and maintain pre-commit hooks
94
140
  - [setuptools](https://pypi.org/project/setuptools/): Build python modules
95
141
 
142
+ <hr />
96
143
 
97
144
  ## Project Structure
98
145
 
@@ -158,25 +205,6 @@ src/
158
205
 
159
206
  <hr />
160
207
 
161
- ## Usage
162
-
163
- 1. Install `versionhq` package:
164
- ```
165
- uv pip install versionhq
166
- ```
167
-
168
- 2. You can use the `versionhq` module in your Python app.
169
- ```
170
- from versionhq.agent.model import Agent
171
- agent = Agent(llm="your-llm", ...)
172
- ```
173
-
174
- For more details:
175
-
176
- [PyPi package](https://pypi.org/project/versionhq/)
177
-
178
- <hr />
179
-
180
208
  ## Contributing
181
209
 
182
210
  1. Fork the repository
@@ -307,17 +335,3 @@ Common issues and solutions:
307
335
  > **Simple tasks**: You have a straightforward, one-off task that doesn't require significant complexity or iteration.
308
336
 
309
337
  > **Human input**: You need to provide initial input or guidance to the agent, or you expect to review and refine the output.
310
-
311
-
312
-
313
- <--- Remaining tasks --->
314
-
315
- - llm handling - agent
316
- - more llms integration
317
- - simpler prompting
318
- - broader knowledge
319
-
320
- - utils - log
321
- - utils - time
322
-
323
- - end to end client app test
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.1.7.0"
18
+ version = "1.1.7.1"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
21
21
  readme = "README.md"
@@ -59,7 +59,7 @@ dev-dependencies = [
59
59
  "black",
60
60
  "bandit",
61
61
  "twine",
62
- "pytest>=8.0.0",
62
+ "pytest>=8.3.4",
63
63
  ]
64
64
 
65
65
  [tool.uv.workspace]
@@ -71,6 +71,10 @@ ignore_missing_imports = true
71
71
  [tool.bandit]
72
72
  exclude_dirs = [""]
73
73
 
74
+
74
75
  [tool.pytest.ini_options]
75
- minversion = "6.0"
76
- addopts = "-ra -q"
76
+ filterwarnings = "ignore"
77
+ minversion = "8.0"
78
+ addopts = "-v -ra -q"
79
+ log_cli = true
80
+ log_cli_level = "INFO"
@@ -17,7 +17,7 @@ from versionhq.team.model import Team, TeamOutput
17
17
  from versionhq.tool.model import Tool
18
18
 
19
19
 
20
- __version__ = "1.1.7.0"
20
+ __version__ = "1.1.7.1"
21
21
  __all__ = [
22
22
  "Agent",
23
23
  "Customer",
@@ -1,7 +1,3 @@
1
1
  BACKSTORY="""You are a {role} with deep understanding of {knowledge} and highly skilled in {skillsets}.
2
2
  You have access to call the RAG tools that can {rag_tool_overview}. By leveraging these tools, your knowledge, and skillsets, you can identify competitive strategies that have been proven effective to achieve the goal: {goal}. Take these into consideration, create innovative solutions.
3
3
  """
4
-
5
-
6
- # developing strategies to improve cohort retention and customer lifetime value
7
- # ingest past successful campaigns
@@ -367,8 +367,8 @@ class Agent(ABC, BaseModel):
367
367
  """
368
368
 
369
369
  task_prompt = task.prompt()
370
- # if context:
371
- # task_prompt = self.i18n.slice("task_with_context").format(task=task_prompt, context=context)
370
+ if context:
371
+ task_prompt += context
372
372
 
373
373
  tool_results = []
374
374
  if task.tools_called:
@@ -18,21 +18,44 @@ class ResponseField(BaseModel):
18
18
  """
19
19
  Field class to use in the response schema for the JSON response.
20
20
  """
21
+
21
22
  title: str = Field(default=None)
22
23
  type: Type = Field(default=str)
23
24
  required: bool = Field(default=True)
24
25
 
26
+
25
27
  def _annotate(self, value: Any) -> Annotated:
26
28
  """
27
29
  Address `create_model`
28
30
  """
29
31
  return Annotated[self.type, value] if isinstance(value, self.type) else Annotated[str, str(value)]
30
32
 
33
+
34
+ def _convert(self, value: Any) -> Any:
35
+ try:
36
+ if self.type is Any:
37
+ pass
38
+ elif self.type is int:
39
+ return int(value)
40
+ elif self.type is float:
41
+ return float(value)
42
+ elif self.type is list or self.type is dict:
43
+ return json.loads(value)
44
+ else:
45
+ return value
46
+ except:
47
+ return value
48
+
49
+
31
50
  def create_pydantic_model(self, result: Dict, base_model: Union[BaseModel | Any]) -> Any:
32
51
  for k, v in result.items():
33
- if k is not self.title or type(v) is not self.type:
52
+ if k is not self.title:
34
53
  pass
35
- setattr(base_model, k, v)
54
+ elif type(v) is not self.type:
55
+ v = self._convert(v)
56
+ setattr(base_model, k, v)
57
+ else:
58
+ setattr(base_model, k, v)
36
59
  return base_model
37
60
 
38
61
 
@@ -43,12 +66,14 @@ class AgentOutput(BaseModel):
43
66
  """
44
67
  customer_id: str = Field(default=None, max_length=126, description="customer uuid")
45
68
  customer_analysis: str = Field(default=None, max_length=256, description="analysis of the customer")
46
- business_overview: str = Field(default=None,max_length=256,description="analysis of the client's business")
47
- cohort_timeframe: int = Field(default=None,max_length=256,description="Suitable cohort timeframe in days")
69
+ product_overview: str = Field(default=None, max_length=256, description="analysis of the client's business")
70
+ usp: str = Field()
71
+ cohort_timeframe: int = Field(default=None, max_length=256, description="suitable cohort timeframe in days")
48
72
  kpi_metrics: List[str] = Field(default=list, description="Ideal KPIs to be tracked")
49
73
  assumptions: List[Dict[str, Any]] = Field(default=list, description="assumptions to test")
50
74
 
51
75
 
76
+
52
77
  class TaskOutput(BaseModel):
53
78
  """
54
79
  Store the final output of the task in TaskOutput class.
@@ -57,8 +82,8 @@ class TaskOutput(BaseModel):
57
82
 
58
83
  task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
59
84
  raw: str = Field(default="", description="Raw output of the task")
60
- pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
61
85
  json_dict: Union[Dict[str, Any]] = Field(default=None, description="`raw` converted to dictionary")
86
+ pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
62
87
 
63
88
  def __str__(self) -> str:
64
89
  return str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw
@@ -75,16 +100,31 @@ class TaskOutput(BaseModel):
75
100
  )
76
101
  return json.dumps(self.json_dict)
77
102
 
103
+
78
104
  def to_dict(self) -> Dict[str, Any]:
79
- """Convert json_output and pydantic_output to a dictionary."""
105
+ """
106
+ Convert pydantic / raw output into dict and return the dict.
107
+ When we only have `raw` output, return `{ output: raw }` to avoid an error
108
+ """
109
+
80
110
  output_dict = {}
81
111
  if self.json_dict:
82
112
  output_dict.update(self.json_dict)
83
113
  elif self.pydantic:
84
114
  output_dict.update(self.pydantic.model_dump())
115
+ else:
116
+ output_dict.upate({ "output": self.raw })
85
117
  return output_dict
86
118
 
87
119
 
120
+ def context_prompting(self) -> str:
121
+ """
122
+ When the task is called as context, return its output in concise string to add it to the prompt
123
+ """
124
+ return json.dumps(self.json_dict) if self.json_dict else self.raw[0: 127]
125
+
126
+
127
+
88
128
  class Task(BaseModel):
89
129
  """
90
130
  Task to be executed by the agent or the team.
@@ -102,7 +142,10 @@ class Task(BaseModel):
102
142
  # output
103
143
  expected_output_json: bool = Field(default=True)
104
144
  expected_output_pydantic: bool = Field(default=False)
105
- output_field_list: Optional[List[ResponseField]] = Field(default=[ResponseField(title="output")])
145
+ output_field_list: List[ResponseField] = Field(
146
+ default=[ResponseField(title="output", type=str, required=False)],
147
+ description="provide output key and data type. this will be cascaded to the agent via task.prompt()"
148
+ )
106
149
  output: Optional[TaskOutput] = Field(default=None, description="store the final task output in TaskOutput class")
107
150
 
108
151
  # task setup
@@ -123,18 +166,18 @@ class Task(BaseModel):
123
166
 
124
167
 
125
168
  @property
126
- def output_prompt(self):
169
+ def output_prompt(self) -> str:
127
170
  """
128
171
  Draft prompts on the output format by converting `output_field_list` to dictionary.
129
172
  """
130
173
 
131
- output_prompt, output_dict = "", dict()
174
+ output_prompt, output_formats_to_follow = "", dict()
132
175
  for item in self.output_field_list:
133
- output_dict[item.title] = f"<Return your answer in {item.type.__name__}>"
176
+ output_formats_to_follow[item.title] = f"<Return your answer in {item.type.__name__}>"
134
177
 
135
178
  output_prompt = f"""
136
- Your outputs STRICTLY follow the following format and should NOT contain any other irrevant elements that not specified in the following format:
137
- {output_dict}
179
+ Your outputs MUST adhere to the following format and should NOT include any irrelevant elements:
180
+ {output_formats_to_follow}
138
181
  """
139
182
  return output_prompt
140
183
 
@@ -228,16 +271,25 @@ class Task(BaseModel):
228
271
  return self
229
272
 
230
273
 
231
- def prompt(self, customer=str | None, product_overview=str | None) -> str:
274
+ def prompt(self, customer: str = None, product_overview: str = None) -> str:
232
275
  """
233
- Format the task prompt.
276
+ Format the task prompt and cascade it to the agent.
277
+ When the task has context, add context prompting of all the tasks in the context.
278
+ When we have cusotmer/product info, add them to the prompt.
234
279
  """
235
- task_slices = [
236
- self.description,
237
- f"Customer overview: {customer}",
238
- f"Product overview: {product_overview}",
239
- f"{self.output_prompt}",
240
- ]
280
+
281
+ task_slices = [self.description, f"{self.output_prompt}"]
282
+
283
+ if self.context:
284
+ context_outputs = "\n".join([task.output.context_prompting() if hasattr(task, "output") else "" for task in self.context])
285
+ task_slices.insert(1, f"Take the following context into consideration: {context_outputs}")
286
+
287
+ if customer:
288
+ task_slices.insert(1, f"customer overview: {customer}")
289
+
290
+ if product_overview:
291
+ task_slices.insert(1, f"Product overview: {product_overview}")
292
+
241
293
  return "\n".join(task_slices)
242
294
 
243
295
 
@@ -273,10 +325,10 @@ class Task(BaseModel):
273
325
 
274
326
  def create_pydantic_output(self, output_json_dict: Dict[str, Any], raw_result: Any = None) -> Optional[Any]:
275
327
  """
276
- Create pydantic output from the raw result.
328
+ Create pydantic output from the `raw` result.
277
329
  """
278
330
 
279
- output_pydantic = None #! REFINEME
331
+ output_pydantic = None
280
332
  if isinstance(raw_result, BaseModel):
281
333
  output_pydantic = raw_result
282
334
 
@@ -285,8 +337,14 @@ class Task(BaseModel):
285
337
 
286
338
  else:
287
339
  output_pydantic = create_model("PydanticTaskOutput", __base__=BaseModel)
288
- for item in self.output_field_list:
289
- item.create_pydantic_model(result=output_json_dict, base_model=output_pydantic)
340
+ try:
341
+ for item in self.output_field_list:
342
+ value = output_json_dict[item.title] if hasattr(output_json_dict, item.title) else None
343
+ if value and type(value) is not item.type:
344
+ value = item._convert(value)
345
+ setattr(output_pydantic, item.title, value)
346
+ except:
347
+ setattr(output_pydantic, "output", output_json_dict)
290
348
 
291
349
  return output_pydantic
292
350
 
@@ -307,11 +365,19 @@ class Task(BaseModel):
307
365
  self.description = self._original_description.format(**inputs)
308
366
  # self.expected_output = self._original_expected_output.format(**inputs)
309
367
 
368
+
310
369
  # task execution
311
370
  def execute_sync(self, agent, context: Optional[str] = None) -> TaskOutput:
312
371
  """
313
372
  Execute the task synchronously.
373
+ When the task has context, make sure we have executed all the tasks in the context first.
314
374
  """
375
+
376
+ if self.context:
377
+ for task in self.context:
378
+ if task.output is None:
379
+ task._execute_core(agent, context)
380
+
315
381
  return self._execute_core(agent, context)
316
382
 
317
383
 
@@ -341,12 +407,12 @@ class Task(BaseModel):
341
407
  """
342
408
 
343
409
  self.prompt_context = context
344
- raw_result = agent.execute_task(task=self, context=context)
345
- output_json_dict = self.create_json_output(raw_result=raw_result)
410
+ output_raw = agent.execute_task(task=self, context=context)
411
+ output_json_dict = self.create_json_output(raw_result=output_raw)
346
412
  output_pydantic = self.create_pydantic_output(output_json_dict=output_json_dict)
347
413
  task_output = TaskOutput(
348
414
  task_id=self.id,
349
- raw=raw_result,
415
+ raw=output_raw,
350
416
  pydantic=output_pydantic,
351
417
  json_dict=output_json_dict
352
418
  )
@@ -100,26 +100,27 @@ class TeamOutput(BaseModel):
100
100
 
101
101
  def to_dict(self) -> Dict[str, Any]:
102
102
  """
103
- Convert json_output and pydantic_output to a dictionary.
103
+ Convert pydantic / raw output into dict and return the dict.
104
+ When we only have `raw` output, return `{ output: raw }` to avoid an error
104
105
  """
106
+
105
107
  output_dict = {}
106
108
  if self.json_dict:
107
109
  output_dict.update(self.json_dict)
108
110
  elif self.pydantic:
109
111
  output_dict.update(self.pydantic.model_dump())
110
112
  else:
111
- output_dict.update({"output", self.raw})
113
+ output_dict.upate({ "output": self.raw })
112
114
  return output_dict
113
115
 
116
+
114
117
  def return_all_task_outputs(self) -> List[Dict[str, Any]]:
115
118
  res = [output.json_dict for output in self.task_output_list]
116
119
  return res
117
120
 
118
121
 
119
122
  class TeamMember(ABC, BaseModel):
120
- agent: Agent | None = Field(
121
- default=None, description="store the agent to be a member"
122
- )
123
+ agent: Agent | None = Field(default=None, description="store the agent to be a member")
123
124
  is_manager: bool = Field(default=False)
124
125
  task: Task | None = Field(default=None)
125
126
 
@@ -164,9 +165,11 @@ class Team(BaseModel):
164
165
  execution_logs: List[Dict[str, Any]] = Field(default=[], description="list of execution logs for tasks")
165
166
  usage_metrics: Optional[UsageMetrics] = Field(default=None, description="usage metrics for all the llm executions")
166
167
 
168
+
167
169
  def __name__(self) -> str:
168
170
  return self.name if self.name is not None else self.id.__str__
169
171
 
172
+
170
173
  @property
171
174
  def key(self) -> str:
172
175
  source = [str(member.agent.id.__str__) for member in self.members] + [str(task.id.__str__) for task in self.tasks]
@@ -175,9 +178,7 @@ class Team(BaseModel):
175
178
 
176
179
  @property
177
180
  def manager_agent(self) -> Agent:
178
- manager_agent = [
179
- member.agent for member in self.members if member.is_manager == True
180
- ]
181
+ manager_agent = [member.agent for member in self.members if member.is_manager == True]
181
182
  return manager_agent[0] if len(manager_agent) > 0 else None
182
183
 
183
184
 
@@ -251,7 +252,6 @@ class Team(BaseModel):
251
252
  """
252
253
  Every team member should have a task to handle.
253
254
  """
254
-
255
255
  if self.process == TaskHandlingProcess.sequential:
256
256
  for member in self.members:
257
257
  if member.task is None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.7.0
3
+ Version: 1.1.7.1
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -52,8 +52,7 @@ Requires-Dist: wheel>=0.45.1
52
52
 
53
53
  # Overview
54
54
 
55
- ![MIT license](https://img.shields.io/badge/License-MIT-green) ![PyPi](https://img.shields.io/badge/pypi-v1.1.6.3-blue)
56
- ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
55
+ ![MIT license](https://img.shields.io/badge/License-MIT-green) [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml) ![PyPi](https://img.shields.io/badge/pypi-v1.1.7.0-blue) ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple) ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
57
56
 
58
57
 
59
58
  An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
@@ -70,6 +69,7 @@ Messaging workflows are created at individual level, and will be deployed on thi
70
69
  - [Orchestration frameworks](https://github.com/versionHQ/multi-agent-system)
71
70
  - [Test client app](https://github.com/versionHQ/test-client-app)
72
71
 
72
+ <hr />
73
73
 
74
74
  ## Mindmap
75
75
 
@@ -86,10 +86,10 @@ LLM-powered `agent`s and `team`s use `tool`s and their own knowledge to complete
86
86
  <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
87
87
 
88
88
  - [Key Features](#key-features)
89
+ - [Usage](#usage)
89
90
  - [Technologies Used](#technologies-used)
90
91
  - [Project Structure](#project-structure)
91
92
  - [Setup](#setup)
92
- - [Usage](#usage)
93
93
  - [Contributing](#contributing)
94
94
  - [Customizing AI Agents](#customizing-ai-agents)
95
95
  - [Modifying RAG Functionality](#modifying-rag-functionality)
@@ -121,6 +121,52 @@ Multiple `agents` can form a `team` to complete complex tasks together.
121
121
  **3. Autopiloting**
122
122
  - Responsible `agents` or `teams` autopilot executing and refining the messaging workflow.
123
123
 
124
+ <hr />
125
+
126
+ ## Usage
127
+
128
+ 1. Install `versionhq` package:
129
+ ```
130
+ uv pip install versionhq
131
+ ```
132
+
133
+ 2. You can use the `versionhq` module in your Python app.
134
+
135
+ - **i.e.,** Make LLM-based agent execute the task and return JSON dict.
136
+
137
+ ```
138
+ from versionhq.agent.model import Agent
139
+ from versionhq.task.model import Task, ResponseField
140
+
141
+ agent = Agent(
142
+ role="demo",
143
+ goal="amazing project goal",
144
+ skillsets=["skill_1", "skill_2", ],
145
+ llm="llm-of-choice"
146
+ )
147
+
148
+ task = Task(
149
+ description="Amazing task",
150
+ expected_output_json=True,
151
+ expected_output_pydantic=False,
152
+ output_field_list=[
153
+ ResponseField(title="test1", type=str, required=True),
154
+ ResponseField(title="test2", type=list, required=True),
155
+ ],
156
+ context=["amazing context",],
157
+ tools=["amazing tool"],
158
+ callback=None,
159
+ )
160
+
161
+ res = task.execute_sync(agent=agent)
162
+
163
+ return res.to_dict()
164
+
165
+ ```
166
+
167
+ For more details:
168
+
169
+ [PyPi package](https://pypi.org/project/versionhq/)
124
170
 
125
171
  <hr />
126
172
 
@@ -145,6 +191,7 @@ Multiple `agents` can form a `team` to complete complex tasks together.
145
191
  - [pre-commit](https://pre-commit.com/): Manage and maintain pre-commit hooks
146
192
  - [setuptools](https://pypi.org/project/setuptools/): Build python modules
147
193
 
194
+ <hr />
148
195
 
149
196
  ## Project Structure
150
197
 
@@ -210,25 +257,6 @@ src/
210
257
 
211
258
  <hr />
212
259
 
213
- ## Usage
214
-
215
- 1. Install `versionhq` package:
216
- ```
217
- uv pip install versionhq
218
- ```
219
-
220
- 2. You can use the `versionhq` module in your Python app.
221
- ```
222
- from versionhq.agent.model import Agent
223
- agent = Agent(llm="your-llm", ...)
224
- ```
225
-
226
- For more details:
227
-
228
- [PyPi package](https://pypi.org/project/versionhq/)
229
-
230
- <hr />
231
-
232
260
  ## Contributing
233
261
 
234
262
  1. Fork the repository
@@ -359,17 +387,3 @@ Common issues and solutions:
359
387
  > **Simple tasks**: You have a straightforward, one-off task that doesn't require significant complexity or iteration.
360
388
 
361
389
  > **Human input**: You need to provide initial input or guidance to the agent, or you expect to review and refine the output.
362
-
363
-
364
-
365
- <--- Remaining tasks --->
366
-
367
- - llm handling - agent
368
- - more llms integration
369
- - simpler prompting
370
- - broader knowledge
371
-
372
- - utils - log
373
- - utils - time
374
-
375
- - end to end client app test
@@ -76,4 +76,4 @@ def test_build_agent_with_llm():
76
76
  assert agent.llm.api_key == LITELLM_API_KEY
77
77
  assert agent.tools == []
78
78
 
79
- # NEED TO ADD AGENTS WITH TOOLS
79
+ # AGENTS WITH TOOLS
@@ -0,0 +1,3 @@
1
+ from dotenv import load_dotenv
2
+
3
+ load_dotenv(override=True)
@@ -0,0 +1,112 @@
1
+ import os
2
+ import pytest
3
+ from typing import Union
4
+ from versionhq.agent.model import Agent
5
+ from versionhq.task.model import Task, ResponseField, TaskOutput, AgentOutput
6
+
7
+ DEFAULT_MODEL_NAME = os.environ.get("LITELLM_MODEL_NAME", "gpt-3.5-turbo")
8
+ LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
9
+
10
+
11
+ def test_sync_execute_task():
12
+ agent = Agent(
13
+ role="demo agent 000",
14
+ goal="My amazing goals",
15
+ backstory="My amazing backstory",
16
+ verbose=True,
17
+ llm=DEFAULT_MODEL_NAME,
18
+ max_tokens=3000,
19
+ )
20
+
21
+ task = Task(
22
+ description="Analyze the client's business model and define the optimal cohort timeframe.",
23
+ expected_output_json=True,
24
+ expected_output_pydantic=True,
25
+ output_field_list=[
26
+ ResponseField(title="test1", type=str, required=True),
27
+ ResponseField(title="test2", type=list, required=True),
28
+ ],
29
+ context=None,
30
+ callback=None,
31
+ )
32
+ res = task.execute_sync(agent=agent)
33
+
34
+ assert isinstance(res, TaskOutput)
35
+ assert res.task_id is task.id
36
+ assert res.raw is not None
37
+ assert isinstance(res.raw, str)
38
+ assert res.json_dict is not None
39
+ assert isinstance(res.json_dict, dict)
40
+ assert res.pydantic is not None
41
+
42
+ if hasattr(res.pydantic, "output"):
43
+ assert res.pydantic.output is not None
44
+ else:
45
+ assert hasattr(res.pydantic, "test1")
46
+ if res.pydantic.test1:
47
+ assert type(res.pydantic.test1) == str
48
+
49
+ assert hasattr(res.pydantic, "test2")
50
+ if res.pydantic.test2:
51
+ assert type(res.pydantic.test2) == list
52
+
53
+
54
+ def test_sync_execute_task_with_context():
55
+ """
56
+ Use case = One agent handling multiple tasks sequentially using context set in the main task.
57
+ """
58
+
59
+ agent = Agent(
60
+ role="demo agent 001",
61
+ goal="My amazing goals",
62
+ backstory="My amazing backstory",
63
+ verbose=True,
64
+ llm=DEFAULT_MODEL_NAME,
65
+ max_tokens=3000,
66
+ )
67
+
68
+ sub_task = Task(
69
+ description="analyze the client's business model",
70
+ expected_output_json=True,
71
+ expected_output_pydantic=False,
72
+ output_field_list=[
73
+ ResponseField(title="result", type=str, required=True),
74
+ ]
75
+ )
76
+ main_task = Task(
77
+ description="Define the optimal cohort timeframe in days and target audience.",
78
+ expected_output_json=True,
79
+ expected_output_pydantic=True,
80
+ output_field_list=[
81
+ ResponseField(title="test1", type=int, required=True),
82
+ ResponseField(title="test2", type=str, required=True),
83
+ ],
84
+ context=[sub_task]
85
+ )
86
+ res = main_task.execute_sync(agent=agent)
87
+
88
+ assert isinstance(res, TaskOutput)
89
+ assert res.task_id is main_task.id
90
+ assert res.raw is not None
91
+ assert isinstance(res.raw, str)
92
+ assert res.json_dict is not None
93
+ assert isinstance(res.json_dict, dict)
94
+ assert res.pydantic is not None
95
+
96
+ if hasattr(res.pydantic, "output"):
97
+ assert res.pydantic.output is not None
98
+ else:
99
+ assert hasattr(res.pydantic, "test1")
100
+ if res.pydantic.test1:
101
+ assert type(res.pydantic.test1) == Union[int, str]
102
+
103
+ assert hasattr(res.pydantic, "test2")
104
+ if res.pydantic.test2:
105
+ assert type(res.pydantic.test2) == Union[list, str]
106
+
107
+ assert sub_task.output is not None
108
+ assert sub_task.output.json_dict is not None
109
+ assert "result" in main_task.prompt()
110
+
111
+
112
+ # CALLBACKS, tools, FUTURE, ASYNC, CONDITIONAL, token usage
@@ -11,7 +11,7 @@ MODEL_NAME = os.environ.get("LITELLM_MODEL_NAME", "gpt-3.5-turbo")
11
11
 
12
12
  def test_form_team():
13
13
  agent_a = Agent(
14
- role="Demo Agent A",
14
+ role="agent a",
15
15
  goal="My amazing goals",
16
16
  backstory="My amazing backstory",
17
17
  verbose=True,
@@ -20,7 +20,7 @@ def test_form_team():
20
20
  )
21
21
 
22
22
  agent_b = Agent(
23
- role="Demo Agent B-1",
23
+ role="agent b",
24
24
  goal="My amazing goals",
25
25
  verbose=True,
26
26
  llm=MODEL_NAME,
@@ -28,7 +28,7 @@ def test_form_team():
28
28
  )
29
29
 
30
30
  task_1 = Task(
31
- description="Analyze the client's business model, target audience, and customer information and define the optimal cohort timeframe based on customer lifecycle and product usage patterns.",
31
+ description="Analyze the client's business model.",
32
32
  expected_output_json=True,
33
33
  output_field_list=[
34
34
  ResponseField(title="test1", type=str, required=True),
@@ -41,7 +41,7 @@ def test_form_team():
41
41
  )
42
42
 
43
43
  task_2 = Task(
44
- description="Amazing task description",
44
+ description="Define the cohort.",
45
45
  expected_output_json=True,
46
46
  expected_output_pydantic=True,
47
47
  output_field_list=[
@@ -72,7 +72,7 @@ def test_form_team():
72
72
 
73
73
  def test_form_team_without_leader():
74
74
  agent_a = Agent(
75
- role="Demo Agent A",
75
+ role="agent a",
76
76
  goal="My amazing goals",
77
77
  backstory="My amazing backstory",
78
78
  verbose=True,
@@ -81,7 +81,7 @@ def test_form_team_without_leader():
81
81
  )
82
82
 
83
83
  agent_b = Agent(
84
- role="Demo Agent B-1",
84
+ role="agent b",
85
85
  goal="My amazing goals",
86
86
  verbose=True,
87
87
  llm=MODEL_NAME,
@@ -89,7 +89,7 @@ def test_form_team_without_leader():
89
89
  )
90
90
 
91
91
  task_1 = Task(
92
- description="Analyze the client's business model, target audience, and customer information and define the optimal cohort timeframe based on customer lifecycle and product usage patterns.",
92
+ description="Analyze the client's business model.",
93
93
  expected_output_json=True,
94
94
  output_field_list=[
95
95
  ResponseField(title="test1", type=str, required=True),
@@ -102,7 +102,7 @@ def test_form_team_without_leader():
102
102
  )
103
103
 
104
104
  task_2 = Task(
105
- description="Amazing task description",
105
+ description="Define the cohort.",
106
106
  expected_output_json=True,
107
107
  expected_output_pydantic=True,
108
108
  output_field_list=[
@@ -150,7 +150,7 @@ def test_kickoff_team_without_leader():
150
150
  )
151
151
 
152
152
  task_1 = Task(
153
- description="Analyze the client's business model, target audience, and customer information and define the optimal cohort timeframe based on customer lifecycle and product usage patterns.",
153
+ description="Analyze the client's business model.",
154
154
  expected_output_json=True,
155
155
  output_field_list=[
156
156
  ResponseField(title="test1", type=str, required=True),
@@ -163,7 +163,7 @@ def test_kickoff_team_without_leader():
163
163
  )
164
164
 
165
165
  task_2 = Task(
166
- description="Amazing task description",
166
+ description="Define the cohort.",
167
167
  expected_output_json=True,
168
168
  expected_output_pydantic=True,
169
169
  output_field_list=[
@@ -198,8 +198,11 @@ def test_kickoff_team_without_leader():
198
198
  assert len(res_all) == 2
199
199
  for item in res_all:
200
200
  assert isinstance(item, dict)
201
- assert "test1" in item
202
- assert "test2" in item
201
+ if not hasattr(item, "output") or not hasattr(res_all, "output"):
202
+ assert "test1" in item
203
+ assert "test2" in item
204
+ else:
205
+ assert "output" in item
203
206
 
204
207
  assert isinstance(res.token_usage, UsageMetrics)
205
208
  assert res.token_usage.total_tokens == 0 # as we dont set token usage on agent
@@ -1647,7 +1647,7 @@ wheels = [
1647
1647
 
1648
1648
  [[package]]
1649
1649
  name = "versionhq"
1650
- version = "1.1.7.0"
1650
+ version = "1.1.7.1"
1651
1651
  source = { editable = "." }
1652
1652
  dependencies = [
1653
1653
  { name = "composio" },
@@ -1703,6 +1703,7 @@ dev = [
1703
1703
  { name = "mypy", specifier = ">=1.10.0" },
1704
1704
  { name = "pre-commit", specifier = ">=3.6.0" },
1705
1705
  { name = "pytest", specifier = ">=8.0.0" },
1706
+ { name = "pytest", specifier = ">=8.3.4" },
1706
1707
  { name = "pytest-vcr", specifier = ">=1.0.2" },
1707
1708
  { name = "python-dotenv", specifier = ">=1.0.0" },
1708
1709
  { name = "twine" },
@@ -1,3 +0,0 @@
1
- from dotenv import load_dotenv
2
-
3
- load_result = load_dotenv(override=True)
@@ -1,46 +0,0 @@
1
- import os
2
- from typing import Type
3
- from pydantic import BaseModel
4
- from versionhq.agent.model import Agent
5
- from versionhq.task.model import Task, ResponseField, TaskOutput, AgentOutput
6
-
7
- MODEL_NAME = os.environ.get("LITELLM_MODEL_NAME", "gpt-3.5-turbo")
8
-
9
-
10
- def test_sync_execute_task():
11
- agent_a = Agent(
12
- role="Demo Agent A",
13
- goal="My amazing goals",
14
- backstory="My amazing backstory",
15
- verbose=True,
16
- llm=MODEL_NAME,
17
- max_tokens=3000,
18
- )
19
-
20
- task = Task(
21
- description="Analyze the client's business model, target audience, and customer information and define the optimal cohort timeframe based on customer lifecycle and product usage patterns.",
22
- expected_output_json=True,
23
- output_field_list=[
24
- ResponseField(title="test1", type=str, required=True),
25
- ResponseField(title="test2", type=list, required=True),
26
- ],
27
- expected_output_pydantic=True,
28
- context=[],
29
- tools=[],
30
- callback=None,
31
- )
32
- res = task.execute_sync(agent=agent_a)
33
-
34
- assert isinstance(res, TaskOutput)
35
- assert res.task_id is task.id
36
- assert res.raw is not None
37
- assert isinstance(res.raw, str)
38
- assert res.json_dict is not None
39
- assert isinstance(res.json_dict, dict)
40
- assert res.pydantic is not None
41
- assert hasattr(res.pydantic, "test1")
42
- assert type(res.pydantic.test1) == str
43
- assert hasattr(res.pydantic, "test2")
44
- assert type(res.pydantic.test2) == list
45
-
46
- # CALLBACKS, TASK HANDLED BY AGENTS WITH TOOLS, FUTURE, ASYNC, CONDITIONAL, token usage
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes