versionhq 1.1.7.5__tar.gz → 1.1.7.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/.gitignore +2 -0
  2. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/PKG-INFO +9 -7
  3. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/README.md +2 -3
  4. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/pyproject.toml +8 -6
  5. versionhq-1.1.7.8/requirements-dev.txt +9 -0
  6. versionhq-1.1.7.8/requirements.txt +220 -0
  7. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/__init__.py +1 -1
  8. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/_utils/process_config.py +2 -4
  9. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/agent/model.py +9 -9
  10. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/agent/parser.py +2 -2
  11. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/clients/workflow/model.py +13 -9
  12. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/llm/model.py +3 -3
  13. versionhq-1.1.7.8/src/versionhq/storage/task_output_storage.py +141 -0
  14. versionhq-1.1.7.8/src/versionhq/task/log_handler.py +59 -0
  15. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/task/model.py +53 -35
  16. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/team/model.py +30 -73
  17. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/tool/model.py +2 -9
  18. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/tool/tool_handler.py +3 -2
  19. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq.egg-info/PKG-INFO +9 -7
  20. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq.egg-info/SOURCES.txt +4 -0
  21. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq.egg-info/requires.txt +3 -3
  22. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/tests/clients/workflow_test.py +11 -0
  23. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/tests/task/task_test.py +31 -10
  24. versionhq-1.1.7.8/tests/team/__init__.py +0 -0
  25. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/tests/team/team_test.py +9 -104
  26. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/uv.lock +19 -24
  27. versionhq-1.1.7.5/requirements.txt +0 -20
  28. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/.github/workflows/publish.yml +0 -0
  29. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/.github/workflows/publish_testpypi.yml +0 -0
  30. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/.github/workflows/run_tests.yml +0 -0
  31. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/.github/workflows/security_check.yml +0 -0
  32. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/.pre-commit-config.yaml +0 -0
  33. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/.python-version +0 -0
  34. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/LICENSE +0 -0
  35. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/SECURITY.md +0 -0
  36. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/db/preprocess.py +0 -0
  37. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/runtime.txt +0 -0
  38. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/setup.cfg +0 -0
  39. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/_utils/__init__.py +0 -0
  40. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/_utils/cache_handler.py +0 -0
  41. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/_utils/i18n.py +0 -0
  42. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/_utils/logger.py +0 -0
  43. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/_utils/rpm_controller.py +0 -0
  44. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/_utils/usage_metrics.py +0 -0
  45. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  46. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  47. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/agent/__init__.py +0 -0
  48. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/cli/__init__.py +0 -0
  49. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/clients/__init__.py +0 -0
  50. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/clients/customer/__init__.py +0 -0
  51. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/clients/customer/model.py +0 -0
  52. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/clients/product/__init__.py +0 -0
  53. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/clients/product/model.py +0 -0
  54. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/clients/workflow/__init__.py +0 -0
  55. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/llm/__init__.py +0 -0
  56. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/llm/llm_vars.py +0 -0
  57. {versionhq-1.1.7.5/src/versionhq/team → versionhq-1.1.7.8/src/versionhq/storage}/__init__.py +0 -0
  58. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/task/__init__.py +0 -0
  59. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/task/formatter.py +0 -0
  60. {versionhq-1.1.7.5/src/versionhq/tool → versionhq-1.1.7.8/src/versionhq/team}/__init__.py +0 -0
  61. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/team/team_planner.py +0 -0
  62. {versionhq-1.1.7.5/tests → versionhq-1.1.7.8/src/versionhq/tool}/__init__.py +0 -0
  63. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq/tool/decorator.py +0 -0
  64. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq.egg-info/dependency_links.txt +0 -0
  65. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/src/versionhq.egg-info/top_level.txt +0 -0
  66. {versionhq-1.1.7.5/tests/agent → versionhq-1.1.7.8/tests}/__init__.py +0 -0
  67. {versionhq-1.1.7.5/tests/cli → versionhq-1.1.7.8/tests/agent}/__init__.py +0 -0
  68. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/tests/agent/agent_test.py +0 -0
  69. {versionhq-1.1.7.5/tests/task → versionhq-1.1.7.8/tests/cli}/__init__.py +0 -0
  70. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/tests/conftest.py +0 -0
  71. {versionhq-1.1.7.5/tests/team → versionhq-1.1.7.8/tests/task}/__init__.py +0 -0
  72. {versionhq-1.1.7.5 → versionhq-1.1.7.8}/tests/team/Prompts/Demo_test.py +0 -0
@@ -1,4 +1,5 @@
1
1
  knowledge/
2
+ memory/
2
3
 
3
4
  composio.py
4
5
  memo.txt
@@ -8,6 +9,7 @@ build/
8
9
  .pypirc
9
10
  uploads/
10
11
  sample_dataset/
12
+ chroma.sqlite3
11
13
  *egg-info/
12
14
 
13
15
  __pycache__/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: versionhq
3
- Version: 1.1.7.5
3
+ Version: 1.1.7.8
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -32,6 +32,9 @@ Keywords: orchestration framework,orchestration,ai agent,multi-agent system,RAG,
32
32
  Classifier: Programming Language :: Python
33
33
  Classifier: License :: OSI Approved :: MIT License
34
34
  Classifier: Operating System :: OS Independent
35
+ Classifier: Development Status :: 3 - Alpha
36
+ Classifier: Intended Audience :: Developers
37
+ Classifier: Topic :: Software Development :: Build Tools
35
38
  Requires-Python: >=3.12
36
39
  Description-Content-Type: text/markdown
37
40
  License-File: LICENSE
@@ -43,12 +46,12 @@ Requires-Dist: typing
43
46
  Requires-Dist: json-repair>=0.31.0
44
47
  Requires-Dist: litellm>=1.55.8
45
48
  Requires-Dist: openai>=1.57.0
46
- Requires-Dist: composio-openai>=0.6.0
47
- Requires-Dist: pre-commit>=4.0.1
48
- Requires-Dist: gunicorn>=23.0.0
49
+ Requires-Dist: composio-openai>=0.6.9
49
50
  Requires-Dist: composio>=0.1.0
50
51
  Requires-Dist: setuptools>=75.6.0
51
52
  Requires-Dist: wheel>=0.45.1
53
+ Requires-Dist: python-dotenv>=1.0.0
54
+ Requires-Dist: appdirs>=1.4.4
52
55
 
53
56
  # Overview
54
57
 
@@ -56,7 +59,7 @@ Requires-Dist: wheel>=0.45.1
56
59
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
57
60
  ![PyPI](https://img.shields.io/badge/PyPI-v1.1.7.5-blue)
58
61
  ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple)
59
- ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
62
+ ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
60
63
 
61
64
 
62
65
  An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
@@ -276,8 +279,7 @@ src/
276
279
  ```
277
280
  uv venv
278
281
  source .venv/bin/activate
279
-
280
- uv pip install -r requirements.txt -v
282
+ uv pip sync
281
283
  ```
282
284
 
283
285
  * In case of AssertionError/module mismatch, run Python version control using `.pyenv`
@@ -4,7 +4,7 @@
4
4
  [![Publisher](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml/badge.svg)](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
5
5
  ![PyPI](https://img.shields.io/badge/PyPI-v1.1.7.5-blue)
6
6
  ![python ver](https://img.shields.io/badge/Python-3.12/3.13-purple)
7
- ![pyenv ver](https://img.shields.io/badge/pyenv-2.4.23-orange)
7
+ ![pyenv ver](https://img.shields.io/badge/pyenv-2.5.0-orange)
8
8
 
9
9
 
10
10
  An LLM orchestration frameworks for multi-agent systems with RAG to autopilot outbound workflows.
@@ -224,8 +224,7 @@ src/
224
224
  ```
225
225
  uv venv
226
226
  source .venv/bin/activate
227
-
228
- uv pip install -r requirements.txt -v
227
+ uv pip sync
229
228
  ```
230
229
 
231
230
  * In case of AssertionError/module mismatch, run Python version control using `.pyenv`
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.1.7.5"
18
+ version = "1.1.7.8"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
21
21
  readme = "README.md"
@@ -31,17 +31,20 @@ dependencies = [
31
31
  "json-repair>=0.31.0",
32
32
  "litellm>=1.55.8",
33
33
  "openai>=1.57.0",
34
- "composio-openai>=0.6.0",
35
- "pre-commit>=4.0.1",
36
- "gunicorn>=23.0.0",
34
+ "composio-openai>=0.6.9",
37
35
  "composio>=0.1.0",
38
36
  "setuptools>=75.6.0",
39
37
  "wheel>=0.45.1",
38
+ "python-dotenv>=1.0.0",
39
+ "appdirs>=1.4.4",
40
40
  ]
41
41
  classifiers = [
42
42
  "Programming Language :: Python",
43
43
  "License :: OSI Approved :: MIT License",
44
44
  "Operating System :: OS Independent",
45
+ "Development Status :: 3 - Alpha",
46
+ "Intended Audience :: Developers",
47
+ "Topic :: Software Development :: Build Tools",
45
48
  ]
46
49
 
47
50
  [project.urls]
@@ -52,10 +55,9 @@ Issues = "https://github.com/versionHQ/multi-agent-system/issues"
52
55
  [tool.uv]
53
56
  dev-dependencies = [
54
57
  "mypy>=1.10.0",
55
- "pre-commit>=3.6.0",
58
+ "pre-commit>=4.0.1",
56
59
  "pytest>=8.0.0",
57
60
  "pytest-vcr>=1.0.2",
58
- "python-dotenv>=1.0.0",
59
61
  "black",
60
62
  "bandit",
61
63
  "twine",
@@ -0,0 +1,9 @@
1
+ mypy>=1.10.0
2
+ pre-commit>=4.0.1
3
+ pytest>=8.0.0
4
+ pytest-vcr>=1.0.2
5
+ python-dotenv>=1.0.0
6
+ black
7
+ bandit
8
+ twine
9
+ pytest>=8.3.4
@@ -0,0 +1,220 @@
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile pyproject.toml -o requirements.txt
3
+ aiohappyeyeballs==2.4.4
4
+ # via aiohttp
5
+ aiohttp==3.11.11
6
+ # via
7
+ # composio-core
8
+ # litellm
9
+ aiosignal==1.3.2
10
+ # via aiohttp
11
+ annotated-types==0.7.0
12
+ # via pydantic
13
+ anyio==4.7.0
14
+ # via
15
+ # httpx
16
+ # openai
17
+ # starlette
18
+ appdirs==1.4.4
19
+ # via versionhq (pyproject.toml)
20
+ attrs==24.3.0
21
+ # via
22
+ # aiohttp
23
+ # jsonschema
24
+ # referencing
25
+ bcrypt==4.2.1
26
+ # via paramiko
27
+ certifi==2024.12.14
28
+ # via
29
+ # httpcore
30
+ # httpx
31
+ # requests
32
+ # sentry-sdk
33
+ cffi==1.17.1
34
+ # via
35
+ # cryptography
36
+ # pynacl
37
+ charset-normalizer==3.4.1
38
+ # via requests
39
+ click==8.1.8
40
+ # via
41
+ # composio-core
42
+ # litellm
43
+ # uvicorn
44
+ composio==0.1.0
45
+ # via versionhq (pyproject.toml)
46
+ composio-core==0.6.9
47
+ # via composio-openai
48
+ composio-openai==0.6.9
49
+ # via versionhq (pyproject.toml)
50
+ cryptography==44.0.0
51
+ # via paramiko
52
+ distro==1.9.0
53
+ # via openai
54
+ fastapi==0.115.6
55
+ # via composio-core
56
+ filelock==3.16.1
57
+ # via huggingface-hub
58
+ frozenlist==1.5.0
59
+ # via
60
+ # aiohttp
61
+ # aiosignal
62
+ fsspec==2024.12.0
63
+ # via huggingface-hub
64
+ h11==0.14.0
65
+ # via
66
+ # httpcore
67
+ # uvicorn
68
+ httpcore==1.0.7
69
+ # via httpx
70
+ httpx==0.27.2
71
+ # via
72
+ # litellm
73
+ # openai
74
+ huggingface-hub==0.27.0
75
+ # via tokenizers
76
+ idna==3.10
77
+ # via
78
+ # anyio
79
+ # httpx
80
+ # requests
81
+ # yarl
82
+ importlib-metadata==8.5.0
83
+ # via
84
+ # composio-core
85
+ # litellm
86
+ inflection==0.5.1
87
+ # via composio-core
88
+ jinja2==3.1.5
89
+ # via litellm
90
+ jiter==0.8.2
91
+ # via openai
92
+ json-repair==0.35.0
93
+ # via versionhq (pyproject.toml)
94
+ jsonref==1.1.0
95
+ # via composio-core
96
+ jsonschema==4.23.0
97
+ # via
98
+ # composio-core
99
+ # litellm
100
+ jsonschema-specifications==2024.10.1
101
+ # via jsonschema
102
+ litellm==1.56.5
103
+ # via versionhq (pyproject.toml)
104
+ markdown-it-py==3.0.0
105
+ # via rich
106
+ markupsafe==3.0.2
107
+ # via
108
+ # jinja2
109
+ # werkzeug
110
+ mdurl==0.1.2
111
+ # via markdown-it-py
112
+ multidict==6.1.0
113
+ # via
114
+ # aiohttp
115
+ # yarl
116
+ openai==1.58.1
117
+ # via
118
+ # versionhq (pyproject.toml)
119
+ # composio-openai
120
+ # litellm
121
+ packaging==24.2
122
+ # via huggingface-hub
123
+ paramiko==3.5.0
124
+ # via composio-core
125
+ propcache==0.2.1
126
+ # via
127
+ # aiohttp
128
+ # yarl
129
+ pycparser==2.22
130
+ # via cffi
131
+ pydantic==2.10.4
132
+ # via
133
+ # versionhq (pyproject.toml)
134
+ # composio-core
135
+ # fastapi
136
+ # litellm
137
+ # openai
138
+ pydantic-core==2.27.2
139
+ # via pydantic
140
+ pygments==2.18.0
141
+ # via rich
142
+ pynacl==1.5.0
143
+ # via paramiko
144
+ pyperclip==1.9.0
145
+ # via composio-core
146
+ pysher==1.0.8
147
+ # via composio-core
148
+ python-dotenv==1.0.1
149
+ # via
150
+ # versionhq (pyproject.toml)
151
+ # litellm
152
+ pyyaml==6.0.2
153
+ # via huggingface-hub
154
+ referencing==0.35.1
155
+ # via
156
+ # jsonschema
157
+ # jsonschema-specifications
158
+ regex==2024.11.6
159
+ # via
160
+ # versionhq (pyproject.toml)
161
+ # tiktoken
162
+ requests==2.32.3
163
+ # via
164
+ # versionhq (pyproject.toml)
165
+ # composio-core
166
+ # huggingface-hub
167
+ # pysher
168
+ # tiktoken
169
+ rich==13.9.4
170
+ # via composio-core
171
+ rpds-py==0.22.3
172
+ # via
173
+ # jsonschema
174
+ # referencing
175
+ semver==3.0.2
176
+ # via composio-core
177
+ sentry-sdk==2.19.2
178
+ # via composio-core
179
+ setuptools==75.6.0
180
+ # via versionhq (pyproject.toml)
181
+ sniffio==1.3.1
182
+ # via
183
+ # anyio
184
+ # httpx
185
+ # openai
186
+ starlette==0.41.3
187
+ # via fastapi
188
+ tiktoken==0.8.0
189
+ # via litellm
190
+ tokenizers==0.21.0
191
+ # via litellm
192
+ tqdm==4.67.1
193
+ # via
194
+ # huggingface-hub
195
+ # openai
196
+ typing==3.10.0.0
197
+ # via versionhq (pyproject.toml)
198
+ typing-extensions==4.12.2
199
+ # via
200
+ # fastapi
201
+ # huggingface-hub
202
+ # openai
203
+ # pydantic
204
+ # pydantic-core
205
+ urllib3==2.3.0
206
+ # via
207
+ # requests
208
+ # sentry-sdk
209
+ uvicorn==0.34.0
210
+ # via composio-core
211
+ websocket-client==1.8.0
212
+ # via pysher
213
+ werkzeug==3.1.3
214
+ # via versionhq (pyproject.toml)
215
+ wheel==0.45.1
216
+ # via versionhq (pyproject.toml)
217
+ yarl==1.18.3
218
+ # via aiohttp
219
+ zipp==3.21.0
220
+ # via importlib-metadata
@@ -17,7 +17,7 @@ from versionhq.team.model import Team, TeamOutput
17
17
  from versionhq.tool.model import Tool
18
18
 
19
19
 
20
- __version__ = "1.1.7.5"
20
+ __version__ = "1.1.7.8"
21
21
  __all__ = [
22
22
  "Agent",
23
23
  "Customer",
@@ -2,9 +2,7 @@ from typing import Any, Dict, Type
2
2
  from pydantic import BaseModel
3
3
 
4
4
 
5
- def process_config(
6
- values_to_update: Dict[str, Any], model_class: Type[BaseModel]
7
- ) -> Dict[str, Any]:
5
+ def process_config(values_to_update: Dict[str, Any], model_class: Type[BaseModel]) -> Dict[str, Any]:
8
6
  """
9
7
  Process the config dictionary and update the values accordingly.
10
8
  Refer to the Pydantic model class for field validation.
@@ -15,7 +13,7 @@ def process_config(
15
13
  else:
16
14
  return values_to_update
17
15
 
18
- # copy values from config to the model's attributes if the attribute isn't already set.
16
+
19
17
  for key, value in config.items():
20
18
  if key not in model_class.model_fields or values_to_update.get(key) is not None:
21
19
  continue
@@ -1,7 +1,7 @@
1
1
  import os
2
2
  import uuid
3
3
  from abc import ABC
4
- from typing import Any, Dict, List, Optional, TypeVar, Union
4
+ from typing import Any, Dict, List, Optional, TypeVar
5
5
  from dotenv import load_dotenv
6
6
  from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
7
7
  from pydantic_core import PydanticCustomError
@@ -22,7 +22,7 @@ load_dotenv(override=True)
22
22
  T = TypeVar("T", bound="Agent")
23
23
 
24
24
 
25
- # def _format_answer(agent, answer: str) -> Union[AgentAction, AgentFinish]:
25
+ # def _format_answer(agent, answer: str) -> AgentAction | AgentFinish:
26
26
  # return AgentParser(agent=agent).parse(answer)
27
27
 
28
28
  # def mock_agent_ops_provider():
@@ -85,7 +85,7 @@ class Agent(ABC, BaseModel):
85
85
  """
86
86
 
87
87
  __hash__ = object.__hash__
88
- _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
88
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
89
89
  _rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
90
90
  _request_within_rpm_limit: Any = PrivateAttr(default=None)
91
91
  _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
@@ -111,8 +111,8 @@ class Agent(ABC, BaseModel):
111
111
  step_callback: Optional[Any] = Field(default=None,description="Callback to be executed after each step of the agent execution")
112
112
 
113
113
  # llm settings cascaded to the LLM model
114
- llm: Union[str, InstanceOf[LLM], Any] = Field(default=None)
115
- function_calling_llm: Union[str, InstanceOf[LLM], Any] = Field(default=None)
114
+ llm: str | InstanceOf[LLM] | Any = Field(default=None)
115
+ function_calling_llm: str | InstanceOf[LLM] | Any = Field(default=None)
116
116
  respect_context_window: bool = Field(default=True,description="Keep messages under the context window size by summarizing content")
117
117
  max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
118
118
  max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
@@ -327,7 +327,7 @@ class Agent(ABC, BaseModel):
327
327
  messages = []
328
328
  messages.append({"role": "user", "content": prompts}) #! REFINEME
329
329
  messages.append({"role": "assistant", "content": self.backstory})
330
- print("Messages sent to the model:", messages)
330
+ self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
331
331
 
332
332
  callbacks = kwargs.get("callbacks", None)
333
333
 
@@ -338,7 +338,7 @@ class Agent(ABC, BaseModel):
338
338
  callbacks=callbacks,
339
339
  )
340
340
  task_execution_counter += 1
341
- print("Agent's #1 res: ", response)
341
+ self._logger.log(level="info", message=f"Agent's first response: {response}", color="blue")
342
342
 
343
343
  if (response is None or response == "") and task_execution_counter < self.max_retry_limit:
344
344
  while task_execution_counter <= self.max_retry_limit:
@@ -349,10 +349,10 @@ class Agent(ABC, BaseModel):
349
349
  callbacks=callbacks,
350
350
  )
351
351
  task_execution_counter += 1
352
- print(f"Agent's #{task_execution_counter} res: ", response)
352
+ self._logger.log(level="info", message=f"Agent's next response: {response}", color="blue")
353
353
 
354
354
  elif response is None or response == "":
355
- print("Received None or empty response from LLM call.")
355
+ self._logger.log(level="error", message="Received None or empty response from the model", color="red")
356
356
  raise ValueError("Invalid response from LLM call - None or empty.")
357
357
 
358
358
  return {"output": response.output if hasattr(response, "output") else response}
@@ -1,5 +1,5 @@
1
1
  import re
2
- from typing import Any, Union
2
+ from typing import Any
3
3
  from json_repair import repair_json
4
4
 
5
5
  from versionhq._utils.i18n import I18N
@@ -70,7 +70,7 @@ class AgentParser:
70
70
  def __init__(self, agent: Any):
71
71
  self.agent = agent
72
72
 
73
- def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
73
+ def parse(self, text: str) -> AgentAction | AgentFinish:
74
74
  thought = self._extract_thought(text)
75
75
  includes_answer = FINAL_ANSWER_ACTION in text
76
76
  regex = (
@@ -1,8 +1,8 @@
1
1
  import uuid
2
2
  from abc import ABC
3
3
  from datetime import date, datetime, time, timedelta
4
- from typing import Any, Dict, List, Union, Callable, Type, Optional, get_args, get_origin
5
- from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
4
+ from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_origin
5
+ from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator
6
6
  from pydantic_core import PydanticCustomError
7
7
 
8
8
  from versionhq.clients.product.model import Product
@@ -12,7 +12,7 @@ from versionhq.team.model import Team
12
12
 
13
13
 
14
14
  class ScoreFormat:
15
- def __init__(self, rate: Union[float, int] = 0, weight: int = 1):
15
+ def __init__(self, rate: float | int = 0, weight: int = 1):
16
16
  self.rate = rate
17
17
  self.weight = weight
18
18
  self.aggregate = rate * weight
@@ -39,7 +39,7 @@ class Score:
39
39
 
40
40
 
41
41
  def result(self) -> int:
42
- aggregate_score = self.brand_tone.aggregate + self.audience.aggregate + self.track_record.aggregate
42
+ aggregate_score = int(self.brand_tone.aggregate) + int(self.audience.aggregate) + int(self.track_record.aggregate)
43
43
  denominator = self.brand_tone.weight + self.audience.weight + self.track_record.weight
44
44
 
45
45
  for k, v in self.kwargs.items():
@@ -57,11 +57,12 @@ class MessagingComponent(ABC, BaseModel):
57
57
  layer_id: int = Field(default=0, description="add id of the layer: 0, 1, 2")
58
58
  message: str = Field(default=None, max_length=1024, description="text message content to be sent")
59
59
  interval: Optional[str] = Field(
60
- default=None,description="interval to move on to the next layer. if this is the last layer, set as `None`")
61
- score: Union[float, InstanceOf[Score]] = Field(default=None)
60
+ default=None, description="interval to move on to the next layer. if this is the last layer, set as `None`"
61
+ )
62
+ score: float | InstanceOf[Score] = Field(default=None)
62
63
 
63
64
 
64
- def store_scoring_result(self, scoring_subject: str, score_raw: Union[int, Score, ScoreFormat] = None):
65
+ def store_scoring_result(self, scoring_subject: str, score_raw: int | Score | ScoreFormat = None):
65
66
  """
66
67
  Set up the `score` field
67
68
  """
@@ -109,8 +110,11 @@ class MessagingWorkflow(ABC, BaseModel):
109
110
  product: InstanceOf[Product] = Field(default=None)
110
111
  customer: InstanceOf[Customer] = Field(default=None)
111
112
 
112
- metrics: Union[List[Dict[str, Any]], List[str]] = Field(
113
- default=None, max_length=256, description="store metrics that used to predict and track the performance of this workflow.")
113
+ metrics: List[Dict[str, Any]] | List[str] = Field(
114
+ default=None,
115
+ max_length=256,
116
+ description="store metrics that used to predict and track the performance of this workflow."
117
+ )
114
118
 
115
119
 
116
120
  @property
@@ -7,7 +7,7 @@ import litellm
7
7
  from dotenv import load_dotenv
8
8
  from litellm import get_supported_openai_params
9
9
  from contextlib import contextmanager
10
- from typing import Any, Dict, List, Optional, Union
10
+ from typing import Any, Dict, List, Optional
11
11
 
12
12
  from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES
13
13
  from versionhq.task import TaskOutputFormat
@@ -103,7 +103,7 @@ class LLM:
103
103
  def __init__(
104
104
  self,
105
105
  model: str,
106
- timeout: Optional[Union[float, int]] = None,
106
+ timeout: Optional[float | int] = None,
107
107
  max_tokens: Optional[int] = None,
108
108
  max_completion_tokens: Optional[int] = None,
109
109
  context_window_size: Optional[int] = DEFAULT_CONTEXT_WINDOW,
@@ -111,7 +111,7 @@ class LLM:
111
111
  temperature: Optional[float] = None,
112
112
  top_p: Optional[float] = None,
113
113
  n: Optional[int] = None,
114
- stop: Optional[Union[str, List[str]]] = None,
114
+ stop: Optional[str | List[str]] = None,
115
115
  presence_penalty: Optional[float] = None,
116
116
  frequency_penalty: Optional[float] = None,
117
117
  logit_bias: Optional[Dict[int, float]] = None,