versionhq 1.1.10.2__tar.gz → 1.1.10.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/PKG-INFO +1 -1
  2. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/pyproject.toml +1 -1
  3. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/__init__.py +1 -1
  4. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/agent/model.py +4 -29
  5. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/llm/llm_vars.py +1 -1
  6. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/llm/model.py +14 -7
  7. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/task/model.py +41 -61
  8. versionhq-1.1.10.4/src/versionhq/task/structured_response.py +140 -0
  9. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq.egg-info/PKG-INFO +1 -1
  10. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq.egg-info/SOURCES.txt +1 -0
  11. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/agent/agent_test.py +29 -4
  12. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/llm/llm_test.py +0 -1
  13. versionhq-1.1.10.4/tests/task/__init__.py +53 -0
  14. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/task/task_test.py +67 -89
  15. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/tool/tool_test.py +0 -5
  16. versionhq-1.1.10.2/tests/tool/__init__.py +0 -0
  17. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/.github/workflows/publish.yml +0 -0
  18. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/.github/workflows/publish_testpypi.yml +0 -0
  19. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/.github/workflows/run_tests.yml +0 -0
  20. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/.github/workflows/security_check.yml +0 -0
  21. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/.gitignore +0 -0
  22. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/.pre-commit-config.yaml +0 -0
  23. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/.python-version +0 -0
  24. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/LICENSE +0 -0
  25. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/README.md +0 -0
  26. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/SECURITY.md +0 -0
  27. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/db/preprocess.py +0 -0
  28. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/requirements-dev.txt +0 -0
  29. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/requirements.txt +0 -0
  30. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/runtime.txt +0 -0
  31. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/setup.cfg +0 -0
  32. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/_utils/__init__.py +0 -0
  33. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/_utils/i18n.py +0 -0
  34. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/_utils/logger.py +0 -0
  35. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/_utils/process_config.py +0 -0
  36. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/_utils/rpm_controller.py +0 -0
  37. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/_utils/usage_metrics.py +0 -0
  38. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
  39. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
  40. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/agent/__init__.py +0 -0
  41. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/agent/parser.py +0 -0
  42. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/cli/__init__.py +0 -0
  43. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/clients/__init__.py +0 -0
  44. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/clients/customer/__init__.py +0 -0
  45. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/clients/customer/model.py +0 -0
  46. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/clients/product/__init__.py +0 -0
  47. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/clients/product/model.py +0 -0
  48. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/clients/workflow/__init__.py +0 -0
  49. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/clients/workflow/model.py +0 -0
  50. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/llm/__init__.py +0 -0
  51. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/storage/__init__.py +0 -0
  52. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/storage/task_output_storage.py +0 -0
  53. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/task/__init__.py +0 -0
  54. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/task/formatter.py +0 -0
  55. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/task/log_handler.py +0 -0
  56. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/team/__init__.py +0 -0
  57. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/team/model.py +0 -0
  58. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/team/team_planner.py +0 -0
  59. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/tool/__init__.py +0 -0
  60. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/tool/cache_handler.py +0 -0
  61. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/tool/composio_tool.py +0 -0
  62. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/tool/composio_tool_vars.py +0 -0
  63. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/tool/decorator.py +0 -0
  64. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/tool/model.py +0 -0
  65. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq/tool/tool_handler.py +0 -0
  66. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq.egg-info/dependency_links.txt +0 -0
  67. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq.egg-info/requires.txt +0 -0
  68. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/src/versionhq.egg-info/top_level.txt +0 -0
  69. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/__init__.py +0 -0
  70. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/agent/__init__.py +0 -0
  71. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/cli/__init__.py +0 -0
  72. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/clients/customer_test.py +0 -0
  73. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/clients/product_test.py +0 -0
  74. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/clients/workflow_test.py +0 -0
  75. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/conftest.py +0 -0
  76. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/llm/__init__.py +0 -0
  77. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/team/Prompts/Demo_test.py +0 -0
  78. {versionhq-1.1.10.2/tests/task → versionhq-1.1.10.4/tests/team}/__init__.py +0 -0
  79. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/team/team_test.py +0 -0
  80. {versionhq-1.1.10.2/tests/team → versionhq-1.1.10.4/tests/tool}/__init__.py +0 -0
  81. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/tests/tool/composio_test.py +0 -0
  82. {versionhq-1.1.10.2 → versionhq-1.1.10.4}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.10.2
3
+ Version: 1.1.10.4
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__"]
15
15
 
16
16
  [project]
17
17
  name = "versionhq"
18
- version = "1.1.10.2"
18
+ version = "1.1.10.4"
19
19
  authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
20
20
  description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
21
21
  readme = "README.md"
@@ -18,7 +18,7 @@ from versionhq.tool.model import Tool
18
18
  from versionhq.tool.composio_tool import ComposioHandler
19
19
 
20
20
 
21
- __version__ = "1.1.10.2"
21
+ __version__ = "1.1.10.4"
22
22
  __all__ = [
23
23
  "Agent",
24
24
  "Customer",
@@ -255,9 +255,9 @@ class Agent(BaseModel):
255
255
  llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
256
256
  llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
257
257
 
258
- # if self.callbacks:
259
- # llm.callbacks = self.callbacks
260
- # llm._set_callbacks(llm.callbacks)
258
+ if self.callbacks:
259
+ llm.callbacks = self.callbacks
260
+ llm._set_callbacks(llm.callbacks)
261
261
 
262
262
  if self.respect_context_window == False:
263
263
  llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
@@ -364,9 +364,6 @@ class Agent(BaseModel):
364
364
  task_execution_counter += 1
365
365
  self._logger.log(level="info", message=f"Agent response: {raw_response}", color="blue")
366
366
 
367
- if raw_response and self.callbacks:
368
- for item in self.callbacks:
369
- raw_response = item(raw_response)
370
367
 
371
368
  except Exception as e:
372
369
  self._logger.log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
@@ -379,10 +376,6 @@ class Agent(BaseModel):
379
376
  task_execution_counter += 1
380
377
  self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="blue")
381
378
 
382
- if raw_response and self.callbacks:
383
- for item in self.callbacks:
384
- raw_response = item(raw_response)
385
-
386
379
  if not raw_response:
387
380
  self._logger.log(level="error", message="Received None or empty response from the model", color="red")
388
381
  raise ValueError("Invalid response from LLM call - None or empty.")
@@ -390,7 +383,7 @@ class Agent(BaseModel):
390
383
  return raw_response
391
384
 
392
385
 
393
- def execute_task(self, task, context: Optional[str] = None, task_tools: Optional[List[Tool | ToolSet]] = None) -> str:
386
+ def execute_task(self, task, context: Optional[str] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
394
387
  """
395
388
  Execute the task and return the response in string.
396
389
  The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
@@ -405,24 +398,6 @@ class Agent(BaseModel):
405
398
  if context is not task.prompt_context:
406
399
  task_prompt += context
407
400
 
408
- # if agent_tools_to_run_without_llm:
409
- # tool_results = []
410
- # for item in agent_tools_to_run_without_llm:
411
- # if isinstance(item, ToolSet):
412
- # tool_result = item.tool.run(**item.kwargs)
413
- # tool_results.append(tool_result)
414
- # elif isinstance(item, Tool):
415
- # tool_result = item.run()
416
- # tool_results.append(tool_result)
417
- # else:
418
- # try:
419
- # item.run()
420
- # except:
421
- # pass
422
-
423
- # if task.tool_res_as_final is True:
424
- # return tool_results
425
-
426
401
  # if self.team and self.team._train:
427
402
  # task_prompt = self._training_handler(task_prompt=task_prompt)
428
403
  # else:
@@ -16,7 +16,7 @@ litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=
16
16
 
17
17
  MODELS = {
18
18
  "openai": [
19
- "gpt-3.5-turbo",
19
+ # "gpt-3.5-turbo",
20
20
  "gpt-4",
21
21
  "gpt-4o",
22
22
  "gpt-4o-mini",
@@ -196,17 +196,18 @@ class LLM(BaseModel):
196
196
  """
197
197
  Execute LLM based on the agent's params and model params.
198
198
  """
199
+ litellm.drop_params = True
199
200
 
200
201
  with suppress_warnings():
201
202
  if len(self.callbacks) > 0:
202
- self._set_callbacks(self.callbacks)
203
+ self._set_callbacks(self.callbacks) # passed by agent
203
204
 
204
205
  try:
205
206
  if tools:
206
207
  self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
207
208
 
208
209
  if response_format:
209
- self.response_format = { "type": "json_object" } if self.model == "gpt-3.5-turbo" or tool_res_as_final else response_format
210
+ self.response_format = { "type": "json_object" } if tool_res_as_final else response_format
210
211
 
211
212
  provider = self.provider if self.provider else "openai"
212
213
 
@@ -227,6 +228,7 @@ class LLM(BaseModel):
227
228
  res = litellm.completion(messages=messages, stream=False, **params)
228
229
 
229
230
  if self.tools:
231
+ messages.append(res["choices"][0]["message"])
230
232
  tool_calls = res["choices"][0]["message"]["tool_calls"]
231
233
  tool_res = ""
232
234
 
@@ -242,18 +244,23 @@ class LLM(BaseModel):
242
244
  tool_instance = tool.tool
243
245
  args = tool.kwargs
244
246
  res = tool_instance.run(params=args)
245
- tool_res += str(res)
247
+
248
+ if tool_res_as_final:
249
+ tool_res += str(res)
250
+ else:
251
+ messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(res) })
246
252
 
247
253
  elif (isinstance(tool, Tool) or type(tool) == Tool) and (tool.name.replace(" ", "_") == func_name or tool.func.__name__ == func_name):
248
254
  res = tool.run(params=func_args)
249
- tool_res += str(res)
255
+ if tool_res_as_final:
256
+ tool_res += str(res)
257
+ else:
258
+ messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(res) })
250
259
 
251
- if tool_res_as_final == True:
260
+ if tool_res_as_final:
252
261
  return tool_res
253
- pass
254
262
 
255
263
  else:
256
- messages.append({ "role": "tool", "tool_call_id": tool_calls.id, "content": tool_res })
257
264
  res = litellm.completion(messages=messages, stream=False, **params)
258
265
 
259
266
  return res["choices"][0]["message"]["content"]
@@ -4,10 +4,10 @@ import datetime
4
4
  import uuid
5
5
  from concurrent.futures import Future
6
6
  from hashlib import md5
7
- from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type
7
+ from typing import Any, Dict, List, Set, Optional, Tuple, Callable, Type, TypeVar
8
8
  from typing_extensions import Annotated, Self
9
9
 
10
- from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf
10
+ from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, field_validator
11
11
  from pydantic_core import PydanticCustomError
12
12
 
13
13
  from versionhq._utils.process_config import process_config
@@ -96,8 +96,8 @@ class ResponseField(BaseModel):
96
96
  for item in self.properties:
97
97
  p.update(**item._format_props())
98
98
 
99
- if item.required:
100
- r.append(item.title)
99
+ # if item.required:
100
+ r.append(item.title)
101
101
 
102
102
  props = {
103
103
  "type": schema_type,
@@ -161,15 +161,15 @@ class ResponseField(BaseModel):
161
161
 
162
162
  class TaskOutput(BaseModel):
163
163
  """
164
- Store the final output of the task in TaskOutput class.
165
- Depending on the task output format, use `raw`, `pydantic`, `json_dict` accordingly.
164
+ A class to store the final output of the given task in raw (string), json_dict, and pydantic class formats.
166
165
  """
167
166
 
168
167
  task_id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True, description="store Task ID")
169
168
  raw: str = Field(default="", description="Raw output of the task")
170
169
  json_dict: Dict[str, Any] = Field(default=None, description="`raw` converted to dictionary")
171
- pydantic: Optional[Any] = Field(default=None, description="`raw` converted to the abs. pydantic model")
170
+ pydantic: Optional[Any] = Field(default=None)
172
171
  tool_output: Optional[Any] = Field(default=None, description="store tool result when the task takes tool output as its final output")
172
+ gott: Optional[Any] = Field(default=None, description="store task or agent callback outcome")
173
173
 
174
174
  def __str__(self) -> str:
175
175
  return str(self.pydantic) if self.pydantic else str(self.json_dict) if self.json_dict else self.raw
@@ -244,7 +244,7 @@ class Task(BaseModel):
244
244
  # execution rules
245
245
  allow_delegation: bool = Field(default=False, description="ask other agents for help and run the task instead")
246
246
  async_execution: bool = Field(default=False,description="whether the task should be executed asynchronously or not")
247
- callback: Optional[Any] = Field(default=None, description="callback to be executed after the task is completed.")
247
+ callback: Optional[Callable] = Field(default=None, description="callback to be executed after the task is completed.")
248
248
  callback_kwargs: Optional[Dict[str, Any]] = Field(default_factory=dict, description="kwargs for the callback when the callback is callable")
249
249
 
250
250
  # recording
@@ -256,7 +256,7 @@ class Task(BaseModel):
256
256
 
257
257
  @model_validator(mode="before")
258
258
  @classmethod
259
- def process_model_config(cls, values: Dict[str, Any]) -> None:
259
+ def process_config(cls, values: Dict[str, Any]) -> None:
260
260
  return process_config(values_to_update=values, model_class=cls)
261
261
 
262
262
 
@@ -276,16 +276,16 @@ class Task(BaseModel):
276
276
  return self
277
277
 
278
278
 
279
- @model_validator(mode="after")
280
- def set_attributes_based_on_config(self) -> Self:
281
- """
282
- Set attributes based on the task configuration.
283
- """
279
+ # @model_validator(mode="after")
280
+ # def set_attributes_based_on_config(self) -> Self:
281
+ # """
282
+ # Set attributes based on the task configuration.
283
+ # """
284
284
 
285
- if self.config:
286
- for key, value in self.config.items():
287
- setattr(self, key, value)
288
- return self
285
+ # if self.config:
286
+ # for key, value in self.config.items():
287
+ # setattr(self, key, value)
288
+ # return self
289
289
 
290
290
 
291
291
  @model_validator(mode="after")
@@ -322,7 +322,7 @@ class Task(BaseModel):
322
322
  if self.pydantic_custom_output:
323
323
  output_prompt = f"""
324
324
  Your response MUST STRICTLY follow the given repsonse format:
325
- JSON schema: {str({k: v for k, v in self.pydantic_custom_output.__fields__.items()})}
325
+ JSON schema: {str(self.pydantic_custom_output)}
326
326
  """
327
327
 
328
328
  elif self.response_fields:
@@ -380,16 +380,13 @@ Ref. Output image: {output_formats_to_follow}
380
380
 
381
381
  def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
382
382
  """
383
- Create and return a valid response format using
384
- - mannual response schema from `self.response_fields`, or
385
- - SDK objects from `pydantic_custom_output`.
386
- OpenAI:
387
- https://platform.openai.com/docs/guides/structured-outputs?context=ex1#function-calling-vs-response-format
388
- https://platform.openai.com/docs/guides/structured-outputs?context=with_parse#some-type-specific-keywords-are-not-yet-supported
389
- Gemini:
383
+ Structure a response format either from`response_fields` or `pydantic_custom_output`.
384
+ 1 nested item is accepted.
390
385
  """
391
386
 
392
- response_schema = None
387
+ from versionhq.task.structured_response import StructuredOutput
388
+
389
+ response_format: Dict[str, Any] = None
393
390
 
394
391
  if self.response_fields:
395
392
  properties, required_fields = {}, []
@@ -406,37 +403,19 @@ Ref. Output image: {output_formats_to_follow}
406
403
  "type": "object",
407
404
  "properties": properties,
408
405
  "required": required_fields,
409
- "additionalProperties": False, # for openai
406
+ "additionalProperties": False,
410
407
  }
411
408
 
412
-
413
- elif self.pydantic_custom_output:
414
- response_schema = {
415
- **self.pydantic_custom_output.model_json_schema(),
416
- "additionalProperties": False,
417
- "required": [k for k, v in self.pydantic_custom_output.__fields__.items()],
418
- "strict": True,
409
+ response_format = {
410
+ "type": "json_schema",
411
+ "json_schema": { "name": "outcome", "schema": response_schema }
419
412
  }
420
413
 
421
414
 
422
- if response_schema:
423
- if model_provider == "gemini":
424
- return {
425
- "type": data_type,
426
- "response_schema": response_schema,
427
- "enforce_validation": True
428
- }
415
+ elif self.pydantic_custom_output:
416
+ response_format = StructuredOutput(response_format=self.pydantic_custom_output)._format()
429
417
 
430
- if model_provider == "openai":
431
- if self.pydantic_custom_output:
432
- return self.pydantic_custom_output
433
- else:
434
- return {
435
- "type": "json_schema",
436
- "json_schema": { "name": "outcome", "strict": True, "schema": response_schema },
437
- }
438
- else:
439
- return None
418
+ return response_format
440
419
 
441
420
 
442
421
  def _create_json_output(self, raw: str) -> Dict[str, Any]:
@@ -477,17 +456,16 @@ Ref. Output image: {output_formats_to_follow}
477
456
 
478
457
  def _create_pydantic_output(self, raw: str = None, json_dict: Dict[str, Any] = None) -> InstanceOf[BaseModel]:
479
458
  """
480
- Create pydantic output from the `raw` result.
459
+ Create pydantic output from raw or json_dict output.
481
460
  """
482
461
 
483
- output_pydantic = None
484
- json_dict = json_dict
462
+ output_pydantic = self.pydantic_custom_output
485
463
 
486
464
  try:
487
- if not json_dict:
488
- json_dict = self._create_json_output(raw=raw)
465
+ json_dict = json_dict if json_dict else self._create_json_output(raw=raw)
489
466
 
490
- output_pydantic = self.pydantic_custom_output(**json_dict)
467
+ for k, v in json_dict.items():
468
+ setattr(output_pydantic, k, v)
491
469
 
492
470
  except:
493
471
  pass
@@ -597,16 +575,18 @@ Ref. Output image: {output_formats_to_follow}
597
575
  self.output = task_output
598
576
  self.processed_by_agents.add(agent.role)
599
577
 
600
- if self.callback:
601
- self.callback({ **self.callback_kwargs, **self.output.__dict__ })
578
+ if self.callback and isinstance(self.callback, Callable):
579
+ callback_res = self.callback(**self.callback_kwargs, **task_output.json_dict)
580
+ task_output.callback_output = callback_res
602
581
 
603
- # if self.output_file:
582
+ # if self.output_file: ## disabled for now
604
583
  # content = (
605
584
  # json_output
606
585
  # if json_output
607
586
  # else pydantic_output.model_dump_json() if pydantic_output else result
608
587
  # )
609
588
  # self._save_file(content)
589
+
610
590
  ended_at = datetime.datetime.now()
611
591
  self.execution_span_in_sec = (ended_at - started_at).total_seconds()
612
592
 
@@ -0,0 +1,140 @@
1
+ from typing import Dict, Optional, Type, List, Any, TypeVar
2
+
3
+ from pydantic import BaseModel, Field, InstanceOf
4
+
5
+ from versionhq.llm.llm_vars import SchemaType
6
+ from versionhq.llm.model import LLM
7
+
8
+
9
+ """
10
+ Structure a response schema (json schema) from the given Pydantic model.
11
+ """
12
+
13
+
14
+ class StructuredObject:
15
+ """
16
+ A class to store the structured dictionary.
17
+ """
18
+ provider: str = "openai"
19
+ field: Type[Field]
20
+
21
+ title: str
22
+ dtype: str = "object"
23
+ properties: Dict[str, Dict[str, str]] = dict()
24
+ required: List[str] = list()
25
+ additionalProperties: bool = False
26
+
27
+ def __init__(self, name, field: Type[Field], provider: str | InstanceOf[LLM] = "openai"):
28
+ self.title = name
29
+ self.field = field
30
+ self.dtype = "object"
31
+ self.additionalProperties = False
32
+ self.provider = provider if isinstance(provider, str) else provider.provider
33
+
34
+ def _format(self):
35
+ if not self.field:
36
+ pass
37
+ else:
38
+ description = self.field.description if hasattr(self.field, "description") and self.field.description is not None else ""
39
+ self.properties.update({"item": { "type": SchemaType(self.field.annotation.__args__).convert() }})
40
+ self.required.append("item")
41
+
42
+ return {
43
+ self.title: {
44
+ "type": self.dtype,
45
+ "description": description,
46
+ "properties": self.properties,
47
+ "additionalProperties": self.additionalProperties,
48
+ "required": self.required
49
+ }
50
+ }
51
+
52
+
53
+
54
+ class StructuredList:
55
+ """
56
+ A class to store a structured list with 1 nested object.
57
+ """
58
+ provider: str = "openai"
59
+ field: Type[Field]
60
+ title: str = ""
61
+ dtype: str = "array"
62
+ items: Dict[str, Dict[str, str]] = dict()
63
+
64
+ def __init__(self, name, field: Type[Field], provider: str | LLM = "openai"):
65
+ self.provider = provider if isinstance(provider, str) else provider.provider
66
+ self.field = field
67
+ self.title = name
68
+ self.dtype = "array"
69
+ self.items = dict()
70
+
71
+
72
+ def _format(self):
73
+ field = self.field
74
+ if not field:
75
+ pass
76
+ else:
77
+ description = "" if field.description is None else field.description
78
+ props = {}
79
+
80
+ for item in field.annotation.__args__:
81
+ nested_object_type = item.__origin__ if hasattr(item, "__origin__") else item
82
+
83
+ if nested_object_type == dict:
84
+ props.update({
85
+ "nest": {
86
+ "type": "object",
87
+ "properties": { "item": { "type": "string"} }, #! REFINEME - field title <>`item`
88
+ "required": ["item",],
89
+ "additionalProperties": False
90
+ }})
91
+
92
+ elif nested_object_type == list:
93
+ props.update({
94
+ # "nest": {
95
+ "type": "array",
96
+ "items": { "type": "string" } , #! REFINEME - field title <>`item`
97
+ # }
98
+ })
99
+ else:
100
+ props.update({ "type": SchemaType(nested_object_type).convert() })
101
+
102
+ self.items = { **props }
103
+ return {
104
+ self.title: {
105
+ "type": self.dtype,
106
+ "description": description,
107
+ "items": self.items,
108
+ }
109
+ }
110
+
111
+
112
+ class StructuredOutput(BaseModel):
113
+ response_format: Any = None
114
+ provider: str = "openai"
115
+ applicable_models: List[InstanceOf[LLM] | str] = list()
116
+ name: str = ""
117
+ schema: Dict[str, Any] = dict(type="object", additionalProperties=False, properties=dict(), required=list())
118
+
119
+
120
+ def _format(self, **kwargs):
121
+ if self.response_format is None:
122
+ pass
123
+
124
+ self.name = self.response_format.__name__
125
+
126
+ for name, field in self.response_format.model_fields.items():
127
+ self.schema["required"].append(name)
128
+
129
+ if hasattr(field.annotation, "__origin__") and field.annotation.__origin__ == dict:
130
+ self.schema["properties"].update(StructuredObject(name=name, field=field)._format())
131
+
132
+ elif hasattr(field.annotation, "__origin__") and field.annotation.__origin__ == list:
133
+ self.schema["properties"].update(StructuredList(name=name, field=field)._format())
134
+ else:
135
+ self.schema["properties"].update({ name: { "type": SchemaType(field.annotation).convert(), **kwargs }})
136
+
137
+ return {
138
+ "type": "json_schema",
139
+ "json_schema": { "name": self.name, "schema": self.schema }
140
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.1.10.2
3
+ Version: 1.1.10.4
4
4
  Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -48,6 +48,7 @@ src/versionhq/task/__init__.py
48
48
  src/versionhq/task/formatter.py
49
49
  src/versionhq/task/log_handler.py
50
50
  src/versionhq/task/model.py
51
+ src/versionhq/task/structured_response.py
51
52
  src/versionhq/team/__init__.py
52
53
  src/versionhq/team/model.py
53
54
  src/versionhq/team/team_planner.py
@@ -1,4 +1,6 @@
1
1
  import os
2
+ from unittest import mock
3
+ from unittest.mock import patch
2
4
  import pytest
3
5
  from typing import Callable, Any
4
6
 
@@ -6,6 +8,7 @@ from versionhq.agent.model import Agent
6
8
  from versionhq.agent.TEMPLATES.Backstory import BACKSTORY_SHORT, BACKSTORY_FULL
7
9
  from versionhq.llm.model import LLM, DEFAULT_MODEL_NAME
8
10
  from versionhq.tool.model import Tool
11
+ from versionhq.tool.decorator import tool
9
12
 
10
13
  MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME", "gpt-3.5-turbo")
11
14
  LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
@@ -119,7 +122,7 @@ def test_build_agent_with_llm_config():
119
122
  assert agent.llm.max_tokens == 4000
120
123
  assert agent.llm.logprobs == False
121
124
  assert [hasattr(agent.llm, k) and v for k, v in llm_config.items() if v is not None]
122
- assert agent.llm.callbacks == []
125
+ assert agent.llm.callbacks == [dummy_func]
123
126
 
124
127
 
125
128
  def test_build_agent_with_llm_instance():
@@ -139,7 +142,7 @@ def test_build_agent_with_llm_instance():
139
142
  assert agent.llm.api_key is not None
140
143
  assert agent.llm.max_tokens == 3000
141
144
  assert agent.llm.logprobs == False
142
- assert agent.llm.callbacks == []
145
+ assert agent.llm.callbacks == [dummy_func]
143
146
 
144
147
 
145
148
  def test_build_agent_with_llm_and_func_llm_config():
@@ -160,7 +163,7 @@ def test_build_agent_with_llm_and_func_llm_config():
160
163
  assert agent.function_calling_llm.api_key is not None
161
164
  assert agent.function_calling_llm.max_tokens == 4000
162
165
  assert agent.function_calling_llm.logprobs == False
163
- assert agent.function_calling_llm.callbacks == []
166
+ assert agent.function_calling_llm.callbacks == [dummy_func]
164
167
 
165
168
 
166
169
  def test_build_agent_with_llm_and_func_llm_instance():
@@ -182,7 +185,7 @@ def test_build_agent_with_llm_and_func_llm_instance():
182
185
  assert agent.function_calling_llm.api_key is not None
183
186
  assert agent.function_calling_llm.max_tokens == 3000
184
187
  assert agent.function_calling_llm.logprobs == False
185
- assert agent.function_calling_llm.callbacks == []
188
+ assert agent.function_calling_llm.callbacks == [dummy_func]
186
189
 
187
190
 
188
191
  def test_agent_with_random_dict_tools():
@@ -209,3 +212,25 @@ def test_agent_with_custom_tools():
209
212
  assert agent.tools[0] is tool
210
213
  assert agent.tools[0]._run(message="hi") == "hi_demo"
211
214
  assert agent.tools[0].name == "custom tool"
215
+
216
+
217
+ # @pytest.mark.vcr(filter_headers=["authorization"])
218
+ def test_agent_custom_max_iterations():
219
+ from versionhq.task.model import Task
220
+
221
+ @tool
222
+ def get_final_answer() -> int:
223
+ """Get the final answer but don't give it yet, just re-use this tool non-stop."""
224
+ return 42
225
+
226
+ agent = Agent(role="demo", goal="test goal", maxit=1, allow_delegation=False, tools=[get_final_answer])
227
+
228
+ with patch.object(
229
+ LLM, "call", wraps=LLM(model=DEFAULT_MODEL_NAME).call
230
+ ) as private_mock:
231
+ task = Task(
232
+ description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
233
+ can_use_agent_tools=True
234
+ )
235
+ agent.execute_task(task=task)
236
+ assert private_mock.call_count == 1
@@ -64,4 +64,3 @@ def test_create_llm_from_provider():
64
64
 
65
65
  if __name__ == "__main__":
66
66
  test_create_llm_from_valid_name()
67
- test_create_llm_from_provider
@@ -0,0 +1,53 @@
1
+ from typing import Dict, Any
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from versionhq.agent.model import Agent
6
+ from versionhq.task.model import ResponseField
7
+ from versionhq.llm.model import DEFAULT_MODEL_NAME, LLM
8
+
9
+
10
+ class DemoChild(BaseModel):
11
+ """
12
+ A nested outcome class.
13
+ """
14
+ ch_1: str
15
+ ch_2: dict[str, str]
16
+
17
+
18
+ class DemoOutcome(BaseModel):
19
+ """
20
+ A demo pydantic class to validate the outcome with various nested data types.
21
+ """
22
+ test0: int
23
+ test1: float
24
+ test2: str
25
+ test3: bool
26
+ test4: list[str]
27
+ test5: dict[str, Any]
28
+ test6: list[dict[str, Any]]
29
+ test8: list[list[str]]
30
+ # children: List[DemoChild]
31
+
32
+
33
+ demo_response_fields = [
34
+ ResponseField(title="test0", data_type=int),
35
+ ResponseField(title="test1", data_type=str, required=True),
36
+ ResponseField(title="test2", data_type=list, items=str),
37
+ ResponseField(title="test3", data_type=list, items=dict, properties=[
38
+ ResponseField(title="nest1", data_type=str),
39
+ ResponseField(title="nest2", type=dict, properties=[ResponseField(title="test", data_type=str)])
40
+ ]),
41
+ ResponseField(title="test4", data_type=dict, properties=[ResponseField(title="ch", data_type=tuple)]),
42
+ ResponseField(title="test5", data_type=bool),
43
+ ResponseField(title="test6", data_type=list, items=Any, required=False),
44
+ # ResponseField(title="children", data_type=list, items=type(DemoChild)),
45
+ ]
46
+
47
+
48
+ def create_base_agent(model: str | LLM | Dict[str, Any]) -> Agent:
49
+ agent = Agent(role="demo", goal="My amazing goals", llm=model, max_tokens=3000, maxit=1)
50
+ return agent
51
+
52
+
53
+ base_agent = create_base_agent(model=DEFAULT_MODEL_NAME)
@@ -1,5 +1,7 @@
1
1
  import os
2
2
  import pytest
3
+ import sys
4
+ import threading
3
5
  from unittest.mock import patch
4
6
  from typing import Dict, Any, List, Optional, Callable
5
7
 
@@ -8,93 +10,42 @@ from pydantic import BaseModel, Field, InstanceOf
8
10
  from versionhq.agent.model import Agent
9
11
  from versionhq.task.model import Task, ResponseField, TaskOutput, ConditionalTask
10
12
  from versionhq.tool.model import Tool, ToolSet
11
- from versionhq.llm.llm_vars import MODELS
12
- from versionhq.llm.model import DEFAULT_MODEL_NAME, LLM
13
-
14
-
15
- class DemoChild(BaseModel):
16
- """
17
- A nested outcome class.
18
- """
19
- ch_1: str
20
- ch_2: dict[str, str]
21
-
22
-
23
- class DemoOutcome(BaseModel):
24
- """
25
- A demo pydantic class to validate the outcome with various nested data types.
26
- """
27
- test0: int
28
- test1: float
29
- test2: str
30
- test3: bool
31
- test4: list[str]
32
- # test5: dict[str, Any]
33
- # test6: list[dict[str, Any]]
34
- test7: Optional[list[str]]
35
-
36
- test8: list[list[str]]
37
- # children: List[DemoChild]
38
-
39
-
40
- demo_nested_response_fields = [
41
- ResponseField(title="test0", data_type=int),
42
- ResponseField(title="test1", data_type=str, required=True),
43
- ResponseField(title="test2", data_type=list, items=str),
44
- ResponseField(title="test3", data_type=list, items=dict, properties=[
45
- ResponseField(title="nest1", data_type=str),
46
- ResponseField(title="nest2", type=dict, properties=[ResponseField(title="test", data_type=str)])
47
- ]),
48
- ResponseField(title="test4", data_type=dict, properties=[ResponseField(title="ch", data_type=tuple)]),
49
- ResponseField(title="test5", data_type=bool),
50
- ResponseField(title="test6", data_type=list, items=Any, required=False),
51
- # ResponseField(title="children", data_type=list, items=type(DemoChild)),
52
- ]
53
-
54
-
55
- def create_base_agent(model: str | LLM | Dict[str, Any]) -> Agent:
56
- agent = Agent(role="demo", goal="My amazing goals", llm=model, max_tokens=3000)
57
- return agent
58
-
59
- agent = create_base_agent(model=DEFAULT_MODEL_NAME)
13
+ from tests.task import DemoOutcome, demo_response_fields, base_agent
60
14
 
15
+ sys.setrecursionlimit(2097152)
16
+ threading.stack_size(134217728)
61
17
 
62
18
  def test_sync_execute_task_with_pydantic_outcome():
63
19
  task = Task(
64
- description="Output random values strictly following the given response foramt and prompt.",
20
+ description="Output random values strictly following the data type defined in the given response format.",
65
21
  pydantic_custom_output=DemoOutcome
66
22
  )
67
- res = task.execute_sync(agent=agent)
23
+ res = task.execute_sync(agent=base_agent)
68
24
 
69
25
  assert isinstance(res, TaskOutput) and res.task_id is task.id
70
- assert isinstance(res.raw, str)
71
- assert isinstance(res.json_dict, dict)
72
- assert res.pydantic == DemoOutcome(**res.json_dict)
73
- assert [v and type(v) is type(getattr(res.pydantic, k)) for k, v in res.pydantic.dict().items()]
74
- # assert [isinstance(item.ch_1, str) and isinstance(item.ch_2, dict) for item in res.pydantic.children]
26
+ assert isinstance(res.raw, str) and isinstance(res.json_dict, dict)
27
+ assert [hasattr(res.pydantic, k) and getattr(res.pydantic, k) == v for k, v in res.json_dict.items()]
75
28
 
76
29
 
77
30
  def test_sync_execute_task_with_json_dict():
78
31
  task = Task(
79
- description="Output random values strictly following the given response foramt and prompt.",
80
- response_fields=demo_nested_response_fields
32
+ description="Output random values strictly following the data type defined in the given response format.",
33
+ response_fields=demo_response_fields
81
34
  )
82
- res = task.execute_sync(agent=agent)
35
+ res = task.execute_sync(agent=base_agent)
83
36
 
84
37
  assert isinstance(res, TaskOutput) and res.task_id is task.id
85
38
  assert res.raw and isinstance(res.raw, str)
86
39
  assert res.pydantic is None
87
40
  assert res.json_dict and isinstance(res.json_dict, dict)
88
41
  assert [v and type(v) == task.response_fields[i].data_type for i, (k, v) in enumerate(res.json_dict.items())]
89
- # assert [isinstance(item, DemoChild) and isinstance(item.ch_1, str) and isinstance(item.ch_2, dict)
90
- # for item in res.json_dict["children"]]
91
42
 
92
43
 
93
44
  def test_async_execute_task():
94
45
  task = Task(description="Return string: 'test'")
95
46
 
96
47
  with patch.object(Agent, "execute_task", return_value="test") as execute:
97
- execution = task.execute_async(agent=agent)
48
+ execution = task.execute_async(agent=base_agent)
98
49
  result = execution.result()
99
50
  assert result.raw == "test"
100
51
  execute.assert_called_once_with(task=task, context=None, task_tools=list())
@@ -118,7 +69,7 @@ def test_sync_execute_with_task_context():
118
69
  ],
119
70
  context=[sub_task,]
120
71
  )
121
- res = main_task.execute_sync(agent=agent)
72
+ res = main_task.execute_sync(agent=base_agent)
122
73
 
123
74
  assert isinstance(res, TaskOutput)
124
75
  assert res.task_id is main_task.id
@@ -129,7 +80,7 @@ def test_sync_execute_with_task_context():
129
80
  assert res.pydantic is None
130
81
  assert sub_task.output is not None
131
82
  assert sub_task.output.json_dict is not None
132
- assert "subtask_result" in main_task.prompt(model_provider=agent.llm.provider)
83
+ assert "subtask_result" in main_task.prompt(model_provider=base_agent.llm.provider)
133
84
 
134
85
 
135
86
  def test_sync_execute_task_with_prompt_context():
@@ -144,9 +95,7 @@ def test_sync_execute_task_with_prompt_context():
144
95
 
145
96
  sub_task = Task(
146
97
  description="return the output following the given prompt.",
147
- response_fields=[
148
- ResponseField(title="result", data_type=str, required=True),
149
- ]
98
+ response_fields=[ResponseField(title="result", data_type=str, required=True),]
150
99
  )
151
100
  main_task = Task(
152
101
  description="return the output following the given prompt.",
@@ -157,20 +106,16 @@ def test_sync_execute_task_with_prompt_context():
157
106
  ],
158
107
  context=[sub_task]
159
108
  )
160
- res = main_task.execute_sync(agent=agent, context="plan a Black Friday campaign.")
109
+ res = main_task.execute_sync(agent=base_agent, context="plan a Black Friday campaign.")
161
110
 
162
111
  assert isinstance(res, TaskOutput) and res.task_id is main_task.id
163
112
  assert res.raw and isinstance(res.raw, str)
164
113
  assert res.json_dict and isinstance(res.json_dict, dict)
165
- assert res.pydantic == Outcome(test1=res.json_dict["test1"], test2=res.json_dict["test2"])
166
-
114
+ assert res.pydantic.test1 == res.json_dict["test1"] and res.pydantic.test2 == res.json_dict["test2"]
167
115
  assert sub_task.output is not None
168
- assert sub_task.output.json_dict is not None
169
- assert sub_task.output.pydantic is None
170
-
171
- assert "result" in main_task.prompt(model_provider=agent.llm.provider)
116
+ assert "result" in main_task.prompt(model_provider=base_agent.llm.provider)
172
117
  assert main_task.prompt_context == "plan a Black Friday campaign."
173
- assert "plan a Black Friday campaign." in main_task.prompt(model_provider=agent.llm.provider)
118
+ assert "plan a Black Friday campaign." in main_task.prompt(model_provider=base_agent.llm.provider)
174
119
 
175
120
 
176
121
  def test_callback():
@@ -193,7 +138,7 @@ def test_callback():
193
138
  callback=callback_func,
194
139
  callback_kwargs=dict(added_condition="demo for pytest")
195
140
  )
196
- res = task.execute_sync(agent=agent)
141
+ res = task.execute_sync(agent=base_agent)
197
142
 
198
143
  assert res is not None
199
144
  assert isinstance(res, TaskOutput)
@@ -202,7 +147,7 @@ def test_callback():
202
147
 
203
148
 
204
149
  def test_delegate():
205
- agent = Agent(role="demo agent 6", goal="My amazing goals")
150
+ agent = Agent(role="demo agent 6", goal="My amazing goals", maxit=1, max_tokens=3000)
206
151
  task = Task(
207
152
  description="return the output following the given prompt.",
208
153
  response_fields=[
@@ -222,7 +167,7 @@ def test_conditional_task():
222
167
  description="erturn the output following the given prompt.",
223
168
  response_fields=[ResponseField(title="test1", data_type=str, required=True),],
224
169
  )
225
- res = task.execute_sync(agent=agent)
170
+ res = task.execute_sync(agent=base_agent)
226
171
 
227
172
  conditional_task = ConditionalTask(
228
173
  description="return the output following the given prompt.",
@@ -251,7 +196,7 @@ def test_store_task_log():
251
196
 
252
197
  def test_task_with_agent_tools():
253
198
  simple_tool = Tool(name="simple tool", func=lambda x: "simple func")
254
- agent = Agent(role="demo", goal="execute tools", tools=[simple_tool,])
199
+ agent = Agent(role="demo", goal="execute tools", tools=[simple_tool,], maxit=1, max_tokens=3000)
255
200
  task = Task(description="execute tool", can_use_agent_tools=True, tool_res_as_final=True)
256
201
  res = task.execute_sync(agent=agent)
257
202
  assert res.tool_output == "simple func"
@@ -271,7 +216,6 @@ def test_task_with_agent_tools():
271
216
  class CustomTool(Tool):
272
217
  name: str = "custom tool"
273
218
 
274
- custom_tool = CustomTool(func=demo_func)
275
219
  custom_tool = CustomTool(func=demo_func)
276
220
  agent.tools = [custom_tool]
277
221
  res = task.execute_sync(agent=agent)
@@ -286,7 +230,7 @@ def test_task_with_tools():
286
230
  tool = Tool(name="tool", func=random_func)
287
231
  tool_set = ToolSet(tool=tool, kwargs=dict(message="empty func"))
288
232
 
289
- agent = Agent(role="Tool Handler", goal="execute tools")
233
+ agent = Agent(role="Tool Handler", goal="execute tools", maxit=1, max_tokens=3000)
290
234
  task = Task(description="execute the function", tools=[tool_set,], tool_res_as_final=True)
291
235
  res = task.execute_sync(agent=agent)
292
236
  assert res.tool_output == "empty func_demo"
@@ -297,18 +241,18 @@ def test_task_with_tools():
297
241
 
298
242
  custom_tool = CustomTool(func=random_func)
299
243
  task.tools = [custom_tool]
300
- res = task.execute_sync(agent=agent)
244
+ res = task.execute_sync(agent=base_agent)
301
245
  assert "_demo" in res.tool_output
302
246
 
303
247
  task.tools = [custom_tool]
304
- res = task.execute_sync(agent=agent)
248
+ res = task.execute_sync(agent=base_agent)
305
249
  assert res.tool_output is not None
306
250
 
307
251
 
308
252
 
309
253
  def test_task_without_response_format():
310
254
  task = Task(description="return a simple output with any random values.")
311
- res = task.execute_sync(agent=agent)
255
+ res = task.execute_sync(agent=base_agent)
312
256
 
313
257
  assert res and isinstance(res, TaskOutput)
314
258
  assert res.json_dict and isinstance(res.json_dict, dict)
@@ -331,11 +275,45 @@ def test_build_agent_without_developer_prompt():
331
275
 
332
276
 
333
277
 
278
+ def test_callback():
279
+ from pydantic import BaseModel
280
+ from versionhq.agent.model import Agent
281
+ from versionhq.task.model import Task
334
282
 
335
- if __name__ == "__main__":
336
- test_task_with_tools()
283
+ class CustomOutput(BaseModel):
284
+ test1: str
285
+ test2: list[str]
286
+
287
+ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
288
+ return f"{message}: {test1}, {", ".join(test2)}"
289
+
290
+ agent = Agent(role="demo", goal="amazing project goal", maxit=1, max_tokens=3000)
337
291
 
292
+ task = Task(
293
+ description="Amazing task",
294
+ pydantic_custom_output=CustomOutput,
295
+ callback=dummy_func,
296
+ callback_kwargs=dict(message="Hi! Here is the result: ")
297
+ )
298
+ res = task.execute_sync(agent=agent, context="amazing context to consider.")
299
+
300
+ assert res.task_id == task.id
301
+ assert res.pydantic.test1 and res.pydantic.test2
302
+ assert "Hi! Here is the result: " in res.callback_output and res.pydantic.test1 in res.callback_output and ", ".join(res.pydantic.test2) in res.callback_output
338
303
 
339
- # tool - use_llm = true -
340
- # task - agent - maxit
341
- # agents with multiple callbacks
304
+
305
+ def test_task_with_agent_callback():
306
+ def dummy_func(*args, **kwargs) -> str:
307
+ return "Demo func"
308
+
309
+ agent = Agent(role="demo", goal="amazing project goal", maxit=1, max_tokens=3000, callbacks=[dummy_func,])
310
+ task = Task(description="Amazing task")
311
+ res = task.execute_sync(agent=agent)
312
+
313
+ assert res.raw and res.task_id == task.id
314
+
315
+ # task - maxit, loop, rpm
316
+
317
+
318
+ if __name__ == "__main__":
319
+ test_task_with_agent_callback()
@@ -135,8 +135,3 @@ def test_cache_tool():
135
135
 
136
136
  assert my_tool.cache_handler.read(tool_name=my_tool.name, input=str({"x": 3})) == 5
137
137
  assert my_tool.cache_handler._cache[f"{my_tool.name}-{str({"x": 3})}"] == 5
138
-
139
-
140
-
141
- if __name__ =="__main__":
142
- test_tool_handler_with_cache()
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes