versionhq 1.1.10.2__tar.gz → 1.1.10.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/PKG-INFO +1 -1
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/pyproject.toml +1 -1
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/__init__.py +1 -1
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/agent/model.py +1 -19
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/llm/llm_vars.py +1 -1
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/llm/model.py +14 -6
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/task/model.py +36 -58
- versionhq-1.1.10.3/src/versionhq/task/structured_response.py +142 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq.egg-info/PKG-INFO +1 -1
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq.egg-info/SOURCES.txt +1 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/agent/agent_test.py +29 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/llm/llm_test.py +0 -1
- versionhq-1.1.10.3/tests/task/__init__.py +53 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/task/task_test.py +22 -87
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/tool/tool_test.py +0 -5
- versionhq-1.1.10.2/tests/tool/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/.github/workflows/publish.yml +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/.github/workflows/run_tests.yml +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/.gitignore +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/.pre-commit-config.yaml +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/.python-version +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/LICENSE +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/README.md +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/SECURITY.md +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/db/preprocess.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/requirements-dev.txt +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/requirements.txt +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/runtime.txt +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/setup.cfg +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/_utils/rpm_controller.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/task/log_handler.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/team/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/team/model.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/team/team_planner.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/agent/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/cli/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/clients/customer_test.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/clients/product_test.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/conftest.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/llm/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/team/Prompts/Demo_test.py +0 -0
- {versionhq-1.1.10.2/tests/task → versionhq-1.1.10.3/tests/team}/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/team/team_test.py +0 -0
- {versionhq-1.1.10.2/tests/team → versionhq-1.1.10.3/tests/tool}/__init__.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/tests/tool/composio_test.py +0 -0
- {versionhq-1.1.10.2 → versionhq-1.1.10.3}/uv.lock +0 -0
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.1.10.
|
18
|
+
version = "1.1.10.3"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
|
21
21
|
readme = "README.md"
|
@@ -390,7 +390,7 @@ class Agent(BaseModel):
|
|
390
390
|
return raw_response
|
391
391
|
|
392
392
|
|
393
|
-
def execute_task(self, task, context: Optional[str] = None, task_tools: Optional[List[Tool | ToolSet]] =
|
393
|
+
def execute_task(self, task, context: Optional[str] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
|
394
394
|
"""
|
395
395
|
Execute the task and return the response in string.
|
396
396
|
The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
|
@@ -405,24 +405,6 @@ class Agent(BaseModel):
|
|
405
405
|
if context is not task.prompt_context:
|
406
406
|
task_prompt += context
|
407
407
|
|
408
|
-
# if agent_tools_to_run_without_llm:
|
409
|
-
# tool_results = []
|
410
|
-
# for item in agent_tools_to_run_without_llm:
|
411
|
-
# if isinstance(item, ToolSet):
|
412
|
-
# tool_result = item.tool.run(**item.kwargs)
|
413
|
-
# tool_results.append(tool_result)
|
414
|
-
# elif isinstance(item, Tool):
|
415
|
-
# tool_result = item.run()
|
416
|
-
# tool_results.append(tool_result)
|
417
|
-
# else:
|
418
|
-
# try:
|
419
|
-
# item.run()
|
420
|
-
# except:
|
421
|
-
# pass
|
422
|
-
|
423
|
-
# if task.tool_res_as_final is True:
|
424
|
-
# return tool_results
|
425
|
-
|
426
408
|
# if self.team and self.team._train:
|
427
409
|
# task_prompt = self._training_handler(task_prompt=task_prompt)
|
428
410
|
# else:
|
@@ -196,6 +196,7 @@ class LLM(BaseModel):
|
|
196
196
|
"""
|
197
197
|
Execute LLM based on the agent's params and model params.
|
198
198
|
"""
|
199
|
+
litellm.drop_params = True
|
199
200
|
|
200
201
|
with suppress_warnings():
|
201
202
|
if len(self.callbacks) > 0:
|
@@ -206,7 +207,7 @@ class LLM(BaseModel):
|
|
206
207
|
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
207
208
|
|
208
209
|
if response_format:
|
209
|
-
self.response_format = { "type": "json_object" } if
|
210
|
+
self.response_format = { "type": "json_object" } if tool_res_as_final else response_format
|
210
211
|
|
211
212
|
provider = self.provider if self.provider else "openai"
|
212
213
|
|
@@ -227,6 +228,7 @@ class LLM(BaseModel):
|
|
227
228
|
res = litellm.completion(messages=messages, stream=False, **params)
|
228
229
|
|
229
230
|
if self.tools:
|
231
|
+
messages.append(res["choices"][0]["message"])
|
230
232
|
tool_calls = res["choices"][0]["message"]["tool_calls"]
|
231
233
|
tool_res = ""
|
232
234
|
|
@@ -242,18 +244,24 @@ class LLM(BaseModel):
|
|
242
244
|
tool_instance = tool.tool
|
243
245
|
args = tool.kwargs
|
244
246
|
res = tool_instance.run(params=args)
|
245
|
-
|
247
|
+
|
248
|
+
if tool_res_as_final:
|
249
|
+
tool_res += str(res)
|
250
|
+
else:
|
251
|
+
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(res) })
|
246
252
|
|
247
253
|
elif (isinstance(tool, Tool) or type(tool) == Tool) and (tool.name.replace(" ", "_") == func_name or tool.func.__name__ == func_name):
|
248
254
|
res = tool.run(params=func_args)
|
249
|
-
|
255
|
+
if tool_res_as_final:
|
256
|
+
tool_res += str(res)
|
257
|
+
else:
|
258
|
+
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(res) })
|
250
259
|
|
251
|
-
if tool_res_as_final
|
260
|
+
if tool_res_as_final:
|
252
261
|
return tool_res
|
253
|
-
pass
|
254
262
|
|
255
263
|
else:
|
256
|
-
messages
|
264
|
+
print(messages)
|
257
265
|
res = litellm.completion(messages=messages, stream=False, **params)
|
258
266
|
|
259
267
|
return res["choices"][0]["message"]["content"]
|
@@ -4,10 +4,10 @@ import datetime
|
|
4
4
|
import uuid
|
5
5
|
from concurrent.futures import Future
|
6
6
|
from hashlib import md5
|
7
|
-
from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type
|
7
|
+
from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type, TypeVar
|
8
8
|
from typing_extensions import Annotated, Self
|
9
9
|
|
10
|
-
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf
|
10
|
+
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, field_validator
|
11
11
|
from pydantic_core import PydanticCustomError
|
12
12
|
|
13
13
|
from versionhq._utils.process_config import process_config
|
@@ -96,8 +96,8 @@ class ResponseField(BaseModel):
|
|
96
96
|
for item in self.properties:
|
97
97
|
p.update(**item._format_props())
|
98
98
|
|
99
|
-
if item.required:
|
100
|
-
|
99
|
+
# if item.required:
|
100
|
+
r.append(item.title)
|
101
101
|
|
102
102
|
props = {
|
103
103
|
"type": schema_type,
|
@@ -161,14 +161,13 @@ class ResponseField(BaseModel):
|
|
161
161
|
|
162
162
|
class TaskOutput(BaseModel):
|
163
163
|
"""
|
164
|
-
|
165
|
-
Depending on the task output format, use `raw`, `pydantic`, `json_dict` accordingly.
|
164
|
+
A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
|
166
165
|
"""
|
167
166
|
|
168
167
|
task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
|
169
168
|
raw: str = Field(default="", description="Raw output of the task")
|
170
169
|
json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
|
171
|
-
pydantic: Optional[Any] = Field(default=None
|
170
|
+
pydantic: Optional[Any] = Field(default=None)
|
172
171
|
tool_output: Optional[Any] = Field(default=None, description="store tool result when the task takes tool output as its final output")
|
173
172
|
|
174
173
|
def __str__(self) -> str:
|
@@ -256,7 +255,7 @@ class Task(BaseModel):
|
|
256
255
|
|
257
256
|
@model_validator(mode="before")
|
258
257
|
@classmethod
|
259
|
-
def
|
258
|
+
def process_config(cls, values: Dict[str, Any]) -> None:
|
260
259
|
return process_config(values_to_update=values, model_class=cls)
|
261
260
|
|
262
261
|
|
@@ -276,16 +275,16 @@ class Task(BaseModel):
|
|
276
275
|
return self
|
277
276
|
|
278
277
|
|
279
|
-
@model_validator(mode="after")
|
280
|
-
def set_attributes_based_on_config(self) -> Self:
|
281
|
-
|
282
|
-
|
283
|
-
|
278
|
+
# @model_validator(mode="after")
|
279
|
+
# def set_attributes_based_on_config(self) -> Self:
|
280
|
+
# """
|
281
|
+
# Set attributes based on the task configuration.
|
282
|
+
# """
|
284
283
|
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
284
|
+
# if self.config:
|
285
|
+
# for key, value in self.config.items():
|
286
|
+
# setattr(self, key, value)
|
287
|
+
# return self
|
289
288
|
|
290
289
|
|
291
290
|
@model_validator(mode="after")
|
@@ -322,7 +321,7 @@ class Task(BaseModel):
|
|
322
321
|
if self.pydantic_custom_output:
|
323
322
|
output_prompt = f"""
|
324
323
|
Your response MUST STRICTLY follow the given repsonse format:
|
325
|
-
JSON schema: {str(
|
324
|
+
JSON schema: {str(self.pydantic_custom_output)}
|
326
325
|
"""
|
327
326
|
|
328
327
|
elif self.response_fields:
|
@@ -380,16 +379,13 @@ Ref. Output image: {output_formats_to_follow}
|
|
380
379
|
|
381
380
|
def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
|
382
381
|
"""
|
383
|
-
|
384
|
-
|
385
|
-
- SDK objects from `pydantic_custom_output`.
|
386
|
-
OpenAI:
|
387
|
-
https://platform.openai.com/docs/guides/structured-outputs?context=ex1#function-calling-vs-response-format
|
388
|
-
https://platform.openai.com/docs/guides/structured-outputs?context=with_parse#some-type-specific-keywords-are-not-yet-supported
|
389
|
-
Gemini:
|
382
|
+
Structure a response format either from`response_fields` or `pydantic_custom_output`.
|
383
|
+
1 nested item is accepted.
|
390
384
|
"""
|
391
385
|
|
392
|
-
|
386
|
+
from versionhq.task.structured_response import StructuredOutput
|
387
|
+
|
388
|
+
response_format: Dict[str, Any] = None
|
393
389
|
|
394
390
|
if self.response_fields:
|
395
391
|
properties, required_fields = {}, []
|
@@ -406,37 +402,19 @@ Ref. Output image: {output_formats_to_follow}
|
|
406
402
|
"type": "object",
|
407
403
|
"properties": properties,
|
408
404
|
"required": required_fields,
|
409
|
-
"additionalProperties": False,
|
405
|
+
"additionalProperties": False,
|
410
406
|
}
|
411
407
|
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
**self.pydantic_custom_output.model_json_schema(),
|
416
|
-
"additionalProperties": False,
|
417
|
-
"required": [k for k, v in self.pydantic_custom_output.__fields__.items()],
|
418
|
-
"strict": True,
|
408
|
+
response_format = {
|
409
|
+
"type": "json_schema",
|
410
|
+
"json_schema": { "name": "outcome", "schema": response_schema }
|
419
411
|
}
|
420
412
|
|
421
413
|
|
422
|
-
|
423
|
-
|
424
|
-
return {
|
425
|
-
"type": data_type,
|
426
|
-
"response_schema": response_schema,
|
427
|
-
"enforce_validation": True
|
428
|
-
}
|
414
|
+
elif self.pydantic_custom_output:
|
415
|
+
response_format = StructuredOutput(response_format=self.pydantic_custom_output)._format()
|
429
416
|
|
430
|
-
|
431
|
-
if self.pydantic_custom_output:
|
432
|
-
return self.pydantic_custom_output
|
433
|
-
else:
|
434
|
-
return {
|
435
|
-
"type": "json_schema",
|
436
|
-
"json_schema": { "name": "outcome", "strict": True, "schema": response_schema },
|
437
|
-
}
|
438
|
-
else:
|
439
|
-
return None
|
417
|
+
return response_format
|
440
418
|
|
441
419
|
|
442
420
|
def _create_json_output(self, raw: str) -> Dict[str, Any]:
|
@@ -477,17 +455,16 @@ Ref. Output image: {output_formats_to_follow}
|
|
477
455
|
|
478
456
|
def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
|
479
457
|
"""
|
480
|
-
Create pydantic output from
|
458
|
+
Create pydantic output from raw or json_dict output.
|
481
459
|
"""
|
482
460
|
|
483
|
-
output_pydantic =
|
484
|
-
json_dict = json_dict
|
461
|
+
output_pydantic = self.pydantic_custom_output
|
485
462
|
|
486
463
|
try:
|
487
|
-
if
|
488
|
-
json_dict = self._create_json_output(raw=raw)
|
464
|
+
json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
|
489
465
|
|
490
|
-
|
466
|
+
for k, v in json_dict.items():
|
467
|
+
setattr(output_pydantic, k, v)
|
491
468
|
|
492
469
|
except:
|
493
470
|
pass
|
@@ -600,13 +577,14 @@ Ref. Output image: {output_formats_to_follow}
|
|
600
577
|
if self.callback:
|
601
578
|
self.callback({ **self.callback_kwargs, **self.output.__dict__ })
|
602
579
|
|
603
|
-
# if self.output_file:
|
580
|
+
# if self.output_file: ## disabled for now
|
604
581
|
# content = (
|
605
582
|
# json_output
|
606
583
|
# if json_output
|
607
584
|
# else pydantic_output.model_dump_json() if pydantic_output else result
|
608
585
|
# )
|
609
586
|
# self._save_file(content)
|
587
|
+
|
610
588
|
ended_at = datetime.datetime.now()
|
611
589
|
self.execution_span_in_sec = (ended_at - started_at).total_seconds()
|
612
590
|
|
@@ -0,0 +1,142 @@
|
|
1
|
+
#! FIXME
|
2
|
+
from typing import Dict, Optional, Type, List, Any, TypeVar
|
3
|
+
|
4
|
+
from pydantic import BaseModel, Field, InstanceOf
|
5
|
+
|
6
|
+
from versionhq.llm.llm_vars import SchemaType
|
7
|
+
from versionhq.llm.model import LLM
|
8
|
+
|
9
|
+
|
10
|
+
"""
|
11
|
+
Structure a response schema (json schema) from the given Pydantic model.
|
12
|
+
"""
|
13
|
+
|
14
|
+
|
15
|
+
class StructuredObject:
|
16
|
+
"""
|
17
|
+
A class to store the structured dictionary.
|
18
|
+
"""
|
19
|
+
provider: str = "openai"
|
20
|
+
field: Type[Field]
|
21
|
+
|
22
|
+
title: str
|
23
|
+
dtype: str = "object"
|
24
|
+
properties: Dict[str, Dict[str, str]] = dict()
|
25
|
+
required: List[str] = list()
|
26
|
+
additionalProperties: bool = False
|
27
|
+
|
28
|
+
def __init__(self, name, field: Type[Field], provider: str | InstanceOf[LLM] = "openai"):
|
29
|
+
self.title = name
|
30
|
+
self.field = field
|
31
|
+
self.dtype = "object"
|
32
|
+
self.additionalProperties = False
|
33
|
+
self.provider = provider if isinstance(provider, str) else provider.provider
|
34
|
+
|
35
|
+
def _format(self):
|
36
|
+
if not self.field:
|
37
|
+
pass
|
38
|
+
else:
|
39
|
+
description = self.field.description if hasattr(self.field, "description") and self.field.description is not None else ""
|
40
|
+
self.properties.update({"item": { "type": SchemaType(self.field.annotation.__args__).convert() }})
|
41
|
+
self.required.append("item")
|
42
|
+
|
43
|
+
return {
|
44
|
+
self.title: {
|
45
|
+
"type": self.dtype,
|
46
|
+
"description": description,
|
47
|
+
"properties": self.properties,
|
48
|
+
"additionalProperties": self.additionalProperties,
|
49
|
+
"required": self.required
|
50
|
+
}
|
51
|
+
}
|
52
|
+
|
53
|
+
|
54
|
+
|
55
|
+
class StructuredList:
|
56
|
+
"""
|
57
|
+
A class to store a structured list with 1 nested object.
|
58
|
+
"""
|
59
|
+
provider: str = "openai"
|
60
|
+
field: Type[Field]
|
61
|
+
title: str = ""
|
62
|
+
dtype: str = "array"
|
63
|
+
items: Dict[str, Dict[str, str]] = dict()
|
64
|
+
|
65
|
+
def __init__(self, name, field: Type[Field], provider: str | LLM = "openai"):
|
66
|
+
self.provider = provider if isinstance(provider, str) else provider.provider
|
67
|
+
self.field = field
|
68
|
+
self.title = name
|
69
|
+
self.dtype = "array"
|
70
|
+
self.items = dict()
|
71
|
+
|
72
|
+
|
73
|
+
def _format(self):
|
74
|
+
field = self.field
|
75
|
+
if not field:
|
76
|
+
pass
|
77
|
+
else:
|
78
|
+
description = "" if field.description is None else field.description
|
79
|
+
props = {}
|
80
|
+
|
81
|
+
for item in field.annotation.__args__:
|
82
|
+
nested_object_type = item.__origin__ if hasattr(item, "__origin__") else item
|
83
|
+
|
84
|
+
if nested_object_type == dict:
|
85
|
+
props.update({
|
86
|
+
"nest": {
|
87
|
+
"type": "object",
|
88
|
+
"properties": { "item": { "type": "string"} }, #! REFINEME - field title <>`item`
|
89
|
+
"required": ["item",],
|
90
|
+
"additionalProperties": False
|
91
|
+
}})
|
92
|
+
|
93
|
+
elif nested_object_type == list:
|
94
|
+
props.update({
|
95
|
+
"nest": {
|
96
|
+
"type": "array",
|
97
|
+
"items": { "item": { "type": "string" } }, #! REFINEME - field title <>`item`
|
98
|
+
}})
|
99
|
+
else:
|
100
|
+
props.update({ "nest": { "type": SchemaType(nested_object_type).convert() }})
|
101
|
+
|
102
|
+
self.items = { **props }
|
103
|
+
return {
|
104
|
+
self.title: {
|
105
|
+
"type": self.dtype,
|
106
|
+
"description": description,
|
107
|
+
"items": self.items,
|
108
|
+
}
|
109
|
+
}
|
110
|
+
|
111
|
+
|
112
|
+
|
113
|
+
|
114
|
+
class StructuredOutput(BaseModel):
|
115
|
+
response_format: Any = None
|
116
|
+
provider: str = "openai"
|
117
|
+
applicable_models: List[InstanceOf[LLM] | str] = list()
|
118
|
+
name: str = ""
|
119
|
+
schema: Dict[str, Any] = dict(type="object", additionalProperties=False, properties=dict(), required=list())
|
120
|
+
|
121
|
+
|
122
|
+
def _format(self, **kwargs):
|
123
|
+
if self.response_format is None:
|
124
|
+
pass
|
125
|
+
|
126
|
+
self.name = self.response_format.__name__
|
127
|
+
|
128
|
+
for name, field in self.response_format.model_fields.items():
|
129
|
+
self.schema["required"].append(name)
|
130
|
+
|
131
|
+
if hasattr(field.annotation, "__origin__") and field.annotation.__origin__ == dict:
|
132
|
+
self.schema["properties"].update(StructuredObject(name=name, field=field)._format())
|
133
|
+
|
134
|
+
elif hasattr(field.annotation, "__origin__") and field.annotation.__origin__ == list:
|
135
|
+
self.schema["properties"].update(StructuredList(name=name, field=field)._format())
|
136
|
+
else:
|
137
|
+
self.schema["properties"].update({ name: { "type": SchemaType(field.annotation).convert(), **kwargs }})
|
138
|
+
|
139
|
+
return {
|
140
|
+
"type": "json_schema",
|
141
|
+
"json_schema": { "name": self.name, "schema": self.schema }
|
142
|
+
}
|
@@ -48,6 +48,7 @@ src/versionhq/task/__init__.py
|
|
48
48
|
src/versionhq/task/formatter.py
|
49
49
|
src/versionhq/task/log_handler.py
|
50
50
|
src/versionhq/task/model.py
|
51
|
+
src/versionhq/task/structured_response.py
|
51
52
|
src/versionhq/team/__init__.py
|
52
53
|
src/versionhq/team/model.py
|
53
54
|
src/versionhq/team/team_planner.py
|
@@ -1,4 +1,6 @@
|
|
1
1
|
import os
|
2
|
+
from unittest import mock
|
3
|
+
from unittest.mock import patch
|
2
4
|
import pytest
|
3
5
|
from typing import Callable, Any
|
4
6
|
|
@@ -6,6 +8,7 @@ from versionhq.agent.model import Agent
|
|
6
8
|
from versionhq.agent.TEMPLATES.Backstory import BACKSTORY_SHORT, BACKSTORY_FULL
|
7
9
|
from versionhq.llm.model import LLM, DEFAULT_MODEL_NAME
|
8
10
|
from versionhq.tool.model import Tool
|
11
|
+
from versionhq.tool.decorator import tool
|
9
12
|
|
10
13
|
MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME", "gpt-3.5-turbo")
|
11
14
|
LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
|
@@ -209,3 +212,29 @@ def test_agent_with_custom_tools():
|
|
209
212
|
assert agent.tools[0] is tool
|
210
213
|
assert agent.tools[0]._run(message="hi") == "hi_demo"
|
211
214
|
assert agent.tools[0].name == "custom tool"
|
215
|
+
|
216
|
+
|
217
|
+
# @pytest.mark.vcr(filter_headers=["authorization"])
|
218
|
+
def test_agent_custom_max_iterations():
|
219
|
+
from versionhq.task.model import Task
|
220
|
+
|
221
|
+
@tool
|
222
|
+
def get_final_answer() -> int:
|
223
|
+
"""Get the final answer but don't give it yet, just re-use this tool non-stop."""
|
224
|
+
return 42
|
225
|
+
|
226
|
+
agent = Agent(role="demo", goal="test goal", maxit=1, allow_delegation=False, tools=[get_final_answer])
|
227
|
+
|
228
|
+
with patch.object(
|
229
|
+
LLM, "call", wraps=LLM(model=DEFAULT_MODEL_NAME).call
|
230
|
+
) as private_mock:
|
231
|
+
task = Task(
|
232
|
+
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
233
|
+
can_use_agent_tools=True
|
234
|
+
)
|
235
|
+
agent.execute_task(task=task)
|
236
|
+
assert private_mock.call_count == 1
|
237
|
+
|
238
|
+
|
239
|
+
if __name__ == "__main__":
|
240
|
+
test_agent_custom_max_iterations()
|
@@ -0,0 +1,53 @@
|
|
1
|
+
from typing import Dict, Any
|
2
|
+
|
3
|
+
from pydantic import BaseModel
|
4
|
+
|
5
|
+
from versionhq.agent.model import Agent
|
6
|
+
from versionhq.task.model import ResponseField
|
7
|
+
from versionhq.llm.model import DEFAULT_MODEL_NAME, LLM
|
8
|
+
|
9
|
+
|
10
|
+
class DemoChild(BaseModel):
|
11
|
+
"""
|
12
|
+
A nested outcome class.
|
13
|
+
"""
|
14
|
+
ch_1: str
|
15
|
+
ch_2: dict[str, str]
|
16
|
+
|
17
|
+
|
18
|
+
class DemoOutcome(BaseModel):
|
19
|
+
"""
|
20
|
+
A demo pydantic class to validate the outcome with various nested data types.
|
21
|
+
"""
|
22
|
+
test0: int
|
23
|
+
test1: float
|
24
|
+
test2: str
|
25
|
+
test3: bool
|
26
|
+
test4: list[str]
|
27
|
+
test5: dict[str, Any]
|
28
|
+
test6: list[dict[str, Any]]
|
29
|
+
test8: list[list[str]]
|
30
|
+
# children: List[DemoChild]
|
31
|
+
|
32
|
+
|
33
|
+
demo_response_fields = [
|
34
|
+
ResponseField(title="test0", data_type=int),
|
35
|
+
ResponseField(title="test1", data_type=str, required=True),
|
36
|
+
ResponseField(title="test2", data_type=list, items=str),
|
37
|
+
ResponseField(title="test3", data_type=list, items=dict, properties=[
|
38
|
+
ResponseField(title="nest1", data_type=str),
|
39
|
+
ResponseField(title="nest2", type=dict, properties=[ResponseField(title="test", data_type=str)])
|
40
|
+
]),
|
41
|
+
ResponseField(title="test4", data_type=dict, properties=[ResponseField(title="ch", data_type=tuple)]),
|
42
|
+
ResponseField(title="test5", data_type=bool),
|
43
|
+
ResponseField(title="test6", data_type=list, items=Any, required=False),
|
44
|
+
# ResponseField(title="children", data_type=list, items=type(DemoChild)),
|
45
|
+
]
|
46
|
+
|
47
|
+
|
48
|
+
def create_base_agent(model: str | LLM | Dict[str, Any]) -> Agent:
|
49
|
+
agent = Agent(role="demo", goal="My amazing goals", llm=model, max_tokens=3000, maxit=1)
|
50
|
+
return agent
|
51
|
+
|
52
|
+
|
53
|
+
base_agent = create_base_agent(model=DEFAULT_MODEL_NAME)
|
@@ -8,93 +8,41 @@ from pydantic import BaseModel, Field, InstanceOf
|
|
8
8
|
from versionhq.agent.model import Agent
|
9
9
|
from versionhq.task.model import Task, ResponseField, TaskOutput, ConditionalTask
|
10
10
|
from versionhq.tool.model import Tool, ToolSet
|
11
|
-
from
|
12
|
-
from versionhq.llm.model import DEFAULT_MODEL_NAME, LLM
|
11
|
+
from tests.task import DemoOutcome, demo_response_fields, base_agent
|
13
12
|
|
14
13
|
|
15
|
-
class DemoChild(BaseModel):
|
16
|
-
"""
|
17
|
-
A nested outcome class.
|
18
|
-
"""
|
19
|
-
ch_1: str
|
20
|
-
ch_2: dict[str, str]
|
21
|
-
|
22
|
-
|
23
|
-
class DemoOutcome(BaseModel):
|
24
|
-
"""
|
25
|
-
A demo pydantic class to validate the outcome with various nested data types.
|
26
|
-
"""
|
27
|
-
test0: int
|
28
|
-
test1: float
|
29
|
-
test2: str
|
30
|
-
test3: bool
|
31
|
-
test4: list[str]
|
32
|
-
# test5: dict[str, Any]
|
33
|
-
# test6: list[dict[str, Any]]
|
34
|
-
test7: Optional[list[str]]
|
35
|
-
|
36
|
-
test8: list[list[str]]
|
37
|
-
# children: List[DemoChild]
|
38
|
-
|
39
|
-
|
40
|
-
demo_nested_response_fields = [
|
41
|
-
ResponseField(title="test0", data_type=int),
|
42
|
-
ResponseField(title="test1", data_type=str, required=True),
|
43
|
-
ResponseField(title="test2", data_type=list, items=str),
|
44
|
-
ResponseField(title="test3", data_type=list, items=dict, properties=[
|
45
|
-
ResponseField(title="nest1", data_type=str),
|
46
|
-
ResponseField(title="nest2", type=dict, properties=[ResponseField(title="test", data_type=str)])
|
47
|
-
]),
|
48
|
-
ResponseField(title="test4", data_type=dict, properties=[ResponseField(title="ch", data_type=tuple)]),
|
49
|
-
ResponseField(title="test5", data_type=bool),
|
50
|
-
ResponseField(title="test6", data_type=list, items=Any, required=False),
|
51
|
-
# ResponseField(title="children", data_type=list, items=type(DemoChild)),
|
52
|
-
]
|
53
|
-
|
54
|
-
|
55
|
-
def create_base_agent(model: str | LLM | Dict[str, Any]) -> Agent:
|
56
|
-
agent = Agent(role="demo", goal="My amazing goals", llm=model, max_tokens=3000)
|
57
|
-
return agent
|
58
|
-
|
59
|
-
agent = create_base_agent(model=DEFAULT_MODEL_NAME)
|
60
|
-
|
61
14
|
|
62
15
|
def test_sync_execute_task_with_pydantic_outcome():
|
63
16
|
task = Task(
|
64
|
-
description="Output random values strictly following the given response
|
17
|
+
description="Output random values strictly following the data type defined in the given response format.",
|
65
18
|
pydantic_custom_output=DemoOutcome
|
66
19
|
)
|
67
|
-
res = task.execute_sync(agent=
|
20
|
+
res = task.execute_sync(agent=base_agent)
|
68
21
|
|
69
22
|
assert isinstance(res, TaskOutput) and res.task_id is task.id
|
70
|
-
assert isinstance(res.raw, str)
|
71
|
-
assert
|
72
|
-
assert res.pydantic == DemoOutcome(**res.json_dict)
|
73
|
-
assert [v and type(v) is type(getattr(res.pydantic, k)) for k, v in res.pydantic.dict().items()]
|
74
|
-
# assert [isinstance(item.ch_1, str) and isinstance(item.ch_2, dict) for item in res.pydantic.children]
|
23
|
+
assert isinstance(res.raw, str) and isinstance(res.json_dict, dict)
|
24
|
+
assert [hasattr(res.pydantic, k) and getattr(res.pydantic, k) == v for k, v in res.json_dict.items()]
|
75
25
|
|
76
26
|
|
77
27
|
def test_sync_execute_task_with_json_dict():
|
78
28
|
task = Task(
|
79
|
-
description="Output random values strictly following the given response
|
80
|
-
response_fields=
|
29
|
+
description="Output random values strictly following the data type defined in the given response format.",
|
30
|
+
response_fields=demo_response_fields
|
81
31
|
)
|
82
|
-
res = task.execute_sync(agent=
|
32
|
+
res = task.execute_sync(agent=base_agent)
|
83
33
|
|
84
34
|
assert isinstance(res, TaskOutput) and res.task_id is task.id
|
85
35
|
assert res.raw and isinstance(res.raw, str)
|
86
36
|
assert res.pydantic is None
|
87
37
|
assert res.json_dict and isinstance(res.json_dict, dict)
|
88
38
|
assert [v and type(v) == task.response_fields[i].data_type for i, (k, v) in enumerate(res.json_dict.items())]
|
89
|
-
# assert [isinstance(item, DemoChild) and isinstance(item.ch_1, str) and isinstance(item.ch_2, dict)
|
90
|
-
# for item in res.json_dict["children"]]
|
91
39
|
|
92
40
|
|
93
41
|
def test_async_execute_task():
|
94
42
|
task = Task(description="Return string: 'test'")
|
95
43
|
|
96
44
|
with patch.object(Agent, "execute_task", return_value="test") as execute:
|
97
|
-
execution = task.execute_async(agent=
|
45
|
+
execution = task.execute_async(agent=base_agent)
|
98
46
|
result = execution.result()
|
99
47
|
assert result.raw == "test"
|
100
48
|
execute.assert_called_once_with(task=task, context=None, task_tools=list())
|
@@ -118,7 +66,7 @@ def test_sync_execute_with_task_context():
|
|
118
66
|
],
|
119
67
|
context=[sub_task,]
|
120
68
|
)
|
121
|
-
res = main_task.execute_sync(agent=
|
69
|
+
res = main_task.execute_sync(agent=base_agent)
|
122
70
|
|
123
71
|
assert isinstance(res, TaskOutput)
|
124
72
|
assert res.task_id is main_task.id
|
@@ -129,7 +77,7 @@ def test_sync_execute_with_task_context():
|
|
129
77
|
assert res.pydantic is None
|
130
78
|
assert sub_task.output is not None
|
131
79
|
assert sub_task.output.json_dict is not None
|
132
|
-
assert "subtask_result" in main_task.prompt(model_provider=
|
80
|
+
assert "subtask_result" in main_task.prompt(model_provider=base_agent.llm.provider)
|
133
81
|
|
134
82
|
|
135
83
|
def test_sync_execute_task_with_prompt_context():
|
@@ -144,9 +92,7 @@ def test_sync_execute_task_with_prompt_context():
|
|
144
92
|
|
145
93
|
sub_task = Task(
|
146
94
|
description="return the output following the given prompt.",
|
147
|
-
response_fields=[
|
148
|
-
ResponseField(title="result", data_type=str, required=True),
|
149
|
-
]
|
95
|
+
response_fields=[ResponseField(title="result", data_type=str, required=True),]
|
150
96
|
)
|
151
97
|
main_task = Task(
|
152
98
|
description="return the output following the given prompt.",
|
@@ -157,20 +103,16 @@ def test_sync_execute_task_with_prompt_context():
|
|
157
103
|
],
|
158
104
|
context=[sub_task]
|
159
105
|
)
|
160
|
-
res = main_task.execute_sync(agent=
|
106
|
+
res = main_task.execute_sync(agent=base_agent, context="plan a Black Friday campaign.")
|
161
107
|
|
162
108
|
assert isinstance(res, TaskOutput) and res.task_id is main_task.id
|
163
109
|
assert res.raw and isinstance(res.raw, str)
|
164
110
|
assert res.json_dict and isinstance(res.json_dict, dict)
|
165
|
-
assert res.pydantic ==
|
166
|
-
|
111
|
+
assert res.pydantic.test1 == res.json_dict["test1"] and res.pydantic.test2 == res.json_dict["test2"]
|
167
112
|
assert sub_task.output is not None
|
168
|
-
assert
|
169
|
-
assert sub_task.output.pydantic is None
|
170
|
-
|
171
|
-
assert "result" in main_task.prompt(model_provider=agent.llm.provider)
|
113
|
+
assert "result" in main_task.prompt(model_provider=base_agent.llm.provider)
|
172
114
|
assert main_task.prompt_context == "plan a Black Friday campaign."
|
173
|
-
assert "plan a Black Friday campaign." in main_task.prompt(model_provider=
|
115
|
+
assert "plan a Black Friday campaign." in main_task.prompt(model_provider=base_agent.llm.provider)
|
174
116
|
|
175
117
|
|
176
118
|
def test_callback():
|
@@ -193,7 +135,7 @@ def test_callback():
|
|
193
135
|
callback=callback_func,
|
194
136
|
callback_kwargs=dict(added_condition="demo for pytest")
|
195
137
|
)
|
196
|
-
res = task.execute_sync(agent=
|
138
|
+
res = task.execute_sync(agent=base_agent)
|
197
139
|
|
198
140
|
assert res is not None
|
199
141
|
assert isinstance(res, TaskOutput)
|
@@ -222,7 +164,7 @@ def test_conditional_task():
|
|
222
164
|
description="erturn the output following the given prompt.",
|
223
165
|
response_fields=[ResponseField(title="test1", data_type=str, required=True),],
|
224
166
|
)
|
225
|
-
res = task.execute_sync(agent=
|
167
|
+
res = task.execute_sync(agent=base_agent)
|
226
168
|
|
227
169
|
conditional_task = ConditionalTask(
|
228
170
|
description="return the output following the given prompt.",
|
@@ -271,7 +213,6 @@ def test_task_with_agent_tools():
|
|
271
213
|
class CustomTool(Tool):
|
272
214
|
name: str = "custom tool"
|
273
215
|
|
274
|
-
custom_tool = CustomTool(func=demo_func)
|
275
216
|
custom_tool = CustomTool(func=demo_func)
|
276
217
|
agent.tools = [custom_tool]
|
277
218
|
res = task.execute_sync(agent=agent)
|
@@ -297,18 +238,18 @@ def test_task_with_tools():
|
|
297
238
|
|
298
239
|
custom_tool = CustomTool(func=random_func)
|
299
240
|
task.tools = [custom_tool]
|
300
|
-
res = task.execute_sync(agent=
|
241
|
+
res = task.execute_sync(agent=base_agent)
|
301
242
|
assert "_demo" in res.tool_output
|
302
243
|
|
303
244
|
task.tools = [custom_tool]
|
304
|
-
res = task.execute_sync(agent=
|
245
|
+
res = task.execute_sync(agent=base_agent)
|
305
246
|
assert res.tool_output is not None
|
306
247
|
|
307
248
|
|
308
249
|
|
309
250
|
def test_task_without_response_format():
|
310
251
|
task = Task(description="return a simple output with any random values.")
|
311
|
-
res = task.execute_sync(agent=
|
252
|
+
res = task.execute_sync(agent=base_agent)
|
312
253
|
|
313
254
|
assert res and isinstance(res, TaskOutput)
|
314
255
|
assert res.json_dict and isinstance(res.json_dict, dict)
|
@@ -332,10 +273,4 @@ def test_build_agent_without_developer_prompt():
|
|
332
273
|
|
333
274
|
|
334
275
|
|
335
|
-
|
336
|
-
test_task_with_tools()
|
337
|
-
|
338
|
-
|
339
|
-
# tool - use_llm = true -
|
340
|
-
# task - agent - maxit
|
341
|
-
# agents with multiple callbacks
|
276
|
+
# task - maxit, loop, rpm
|
@@ -135,8 +135,3 @@ def test_cache_tool():
|
|
135
135
|
|
136
136
|
assert my_tool.cache_handler.read(tool_name=my_tool.name, input=str({"x": 3})) == 5
|
137
137
|
assert my_tool.cache_handler._cache[f"{my_tool.name}-{str({"x": 3})}"] == 5
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
if __name__ =="__main__":
|
142
|
-
test_tool_handler_with_cache()
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|