versionhq 1.2.4.3__py3-none-any.whl → 1.2.4.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. versionhq/__init__.py +12 -3
  2. versionhq/_prompt/auto_feedback.py +2 -2
  3. versionhq/_prompt/model.py +24 -29
  4. versionhq/_utils/__init__.py +2 -0
  5. versionhq/_utils/convert_img_url.py +15 -0
  6. versionhq/_utils/is_valid_enum.py +25 -0
  7. versionhq/_utils/llm_as_a_judge.py +0 -1
  8. versionhq/_utils/usage_metrics.py +35 -14
  9. versionhq/agent/inhouse_agents.py +2 -2
  10. versionhq/agent/model.py +100 -29
  11. versionhq/agent_network/formation.py +6 -12
  12. versionhq/agent_network/model.py +4 -5
  13. versionhq/clients/customer/__init__.py +2 -2
  14. versionhq/clients/product/model.py +4 -4
  15. versionhq/clients/workflow/model.py +1 -1
  16. versionhq/llm/llm_vars.py +7 -6
  17. versionhq/llm/model.py +3 -1
  18. versionhq/storage/task_output_storage.py +2 -2
  19. versionhq/task/model.py +112 -100
  20. versionhq/task_graph/draft.py +4 -4
  21. versionhq/task_graph/model.py +34 -30
  22. versionhq/tool/composio/__init__.py +0 -0
  23. versionhq/tool/{composio_tool.py → composio/model.py} +4 -5
  24. versionhq/tool/gpt/__init__.py +6 -0
  25. versionhq/tool/gpt/_enum.py +28 -0
  26. versionhq/tool/gpt/cup.py +145 -0
  27. versionhq/tool/gpt/file_search.py +163 -0
  28. versionhq/tool/gpt/web_search.py +89 -0
  29. {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/METADATA +4 -4
  30. {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/RECORD +34 -26
  31. /versionhq/tool/{composio_tool_vars.py → composio/params.py} +0 -0
  32. {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/LICENSE +0 -0
  33. {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/WHEEL +0 -0
  34. {versionhq-1.2.4.3.dist-info → versionhq-1.2.4.6.dist-info}/top_level.txt +0 -0
@@ -1,30 +1,31 @@
1
1
  import matplotlib
2
2
  matplotlib.use('agg')
3
3
 
4
- import enum
5
4
  import uuid
6
- import networkx as nx
7
- import matplotlib.pyplot as plt
5
+ import datetime
6
+ from enum import IntEnum, Enum
8
7
  from abc import ABC
9
8
  from concurrent.futures import Future
10
9
  from typing import List, Any, Optional, Callable, Dict, Type, Tuple
11
10
  from typing_extensions import Self
12
11
 
12
+ import networkx as nx
13
+ import matplotlib.pyplot as plt
13
14
  from pydantic import BaseModel, InstanceOf, Field, UUID4, field_validator, model_validator
14
15
  from pydantic_core import PydanticCustomError
15
16
 
16
17
  from versionhq.agent.model import Agent
17
- from versionhq.task.model import Task, TaskOutput, Evaluation
18
+ from versionhq.task.model import Task, TaskOutput, Evaluation, ResponseField
18
19
  from versionhq._utils import Logger, UsageMetrics, ErrorType
19
20
 
20
21
 
21
- class ReformTriggerEvent(enum.Enum):
22
+ class ReformTriggerEvent(IntEnum):
22
23
  USER_INPUT = 1 # ask human
23
24
  TEST_TIME_COMPUTATION = 2 # mismatch between actual responses and expected outcome
24
25
  ERROR_DETECTION = 3 # response error
25
26
 
26
27
 
27
- class ConditionType(enum.Enum):
28
+ class ConditionType(IntEnum):
28
29
  AND = 1
29
30
  OR = 2
30
31
 
@@ -71,7 +72,7 @@ class Condition(BaseModel):
71
72
  return bool(len([item for item in cond_list if item == True]) == len(cond_list))
72
73
 
73
74
 
74
- class TaskStatus(enum.Enum):
75
+ class TaskStatus(IntEnum):
75
76
  """
76
77
  Enum to track the task execution status
77
78
  """
@@ -84,7 +85,7 @@ class TaskStatus(enum.Enum):
84
85
  ERROR = 7 # tried task execute but returned error. resupmtion follows edge weights and agent settings
85
86
 
86
87
 
87
- class DependencyType(enum.Enum):
88
+ class DependencyType(str, Enum):
88
89
  """
89
90
  Concise enumeration of the edge type.
90
91
  """
@@ -129,7 +130,7 @@ class Node(BaseModel):
129
130
  else:
130
131
  self.status = TaskStatus.IN_PROGRESS
131
132
  agent = agent if agent else self.assigned_to
132
- self.task.pydantic_output = self.task.pydantic_output if self.task.pydantic_output else response_format if type(response_format) == BaseModel else None
133
+ self.task.response_schema = self.task.response_schema if self.task.response_schema else response_format if type(response_format) == BaseModel or isinstance(response_format, list) else None
133
134
  res = self.task.execute(agent=agent, context=context)
134
135
 
135
136
  if isinstance(res, Future): # activate async
@@ -393,14 +394,13 @@ class Graph(ABC, BaseModel):
393
394
 
394
395
 
395
396
  class TaskGraph(Graph):
396
- _usage: Optional[UsageMetrics] = None
397
-
398
397
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
399
398
  should_reform: bool = False
400
399
  reform_trigger_event: Optional[ReformTriggerEvent] = None
401
400
  outputs: Dict[str, TaskOutput] = Field(default_factory=dict, description="stores node identifier and TaskOutput")
402
- concl_template: Optional[Dict[str, Any] | Type[BaseModel]] = Field(default=None, description="stores final response format in Pydantic class or JSON dict")
401
+ concl_response_schema: Optional[List[ResponseField] | Type[BaseModel]] = Field(default=None, description="stores final response schema in Pydantic class or response fields")
403
402
  concl: Optional[TaskOutput] = Field(default=None, description="stores the final or latest conclusion of the entire task graph")
403
+ usage: Optional[UsageMetrics] = None
404
404
 
405
405
 
406
406
  def _save(self, title: str, abs_file_path: str = None) -> None:
@@ -420,18 +420,6 @@ class TaskGraph(Graph):
420
420
  Logger().log(level="error", message=f"Failed to save the graph {str(self.id)}: {str(e)}", color="red")
421
421
 
422
422
 
423
- def _handle_usage(self) -> None:
424
- """Returns total tokens and latency spended for the graph execution."""
425
- if not self.nodes:
426
- return None
427
-
428
- self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
429
-
430
- for node in self.nodes.values():
431
- if node.task and node.task._usage:
432
- self._usage.aggregate(metrics=node.task._usage)
433
-
434
-
435
423
  def _handle_human_input(self) -> str | None:
436
424
  """Handles input from human."""
437
425
  request = None
@@ -448,12 +436,27 @@ class TaskGraph(Graph):
448
436
  Logger().log(message=f"Ok. regenerating the graph based on your input: ', {request}", level="info", color="blue")
449
437
  else:
450
438
  Logger().log(message="Cannot recognize your request.", level="error", color="red")
451
- self._usage = self._usage if self._usage else UsageMetrics(id=self.id)
452
- self._usage.record_errors(type=ErrorType.HUMAN_INTERACTION)
439
+ self.usage = self.usage if self.usage else UsageMetrics(id=self.id)
440
+ self.usage.record_errors(type=ErrorType.HUMAN_INTERACTION)
453
441
 
454
442
  return request
455
443
 
456
444
 
445
+ def _handle_usage(self, start_dt: datetime = None, end_dt: datetime = None) -> UsageMetrics:
446
+ usage = self.usage if self.usage else UsageMetrics(id=self.id)
447
+
448
+ if self.outputs:
449
+ for item in self.outputs.values():
450
+ if isinstance(item.usage, UsageMetrics):
451
+ usage = usage.aggregate(metrics=item.usage)
452
+
453
+ if start_dt and end_dt:
454
+ usage.record_latency(start_dt, end_dt)
455
+
456
+ self.usage = usage
457
+ return usage
458
+
459
+
457
460
  def add_task(self, task: Node | Task) -> Node:
458
461
  """Convert `task` to a Node object and add it to G"""
459
462
 
@@ -596,6 +599,7 @@ class TaskGraph(Graph):
596
599
  """
597
600
 
598
601
  Logger().log(color="blue", message=f"Start to activate the graph: {str(self.id)}", level="info")
602
+ start_dt = datetime.datetime.now()
599
603
 
600
604
  if target:
601
605
  if not [k for k in self.nodes.keys() if k == target]:
@@ -659,7 +663,6 @@ class TaskGraph(Graph):
659
663
  node_identifier = edge.target.identifier
660
664
  self.outputs.update({ node_identifier: res })
661
665
 
662
-
663
666
  if self.should_reform:
664
667
  target = [k for k in self.outputs.keys()][-1] if self.outputs else self.find_start_nodes()[0].identifier if self.find_start_nodes() else None
665
668
 
@@ -669,9 +672,10 @@ class TaskGraph(Graph):
669
672
  res, _ = self.handle_reform(target=target)
670
673
 
671
674
  self.concl = res
672
- self.concl_template = self.concl_template if self.concl_template else res.pydantic.__class__ if res.pydantic else None
673
- # last_task_output = [v for v in self.outputs.values()][len([v for v in self.outputs.values()]) - 1] if [v for v in self.outputs.values()] else None
674
- self._handle_usage()
675
+ self.concl_response_schema = self.concl_response_schema if self.concl_response_schema else res.pydantic.__class__ if res.pydantic else None
676
+
677
+ end_dt = datetime.datetime.now()
678
+ self._handle_usage(start_dt, end_dt)
675
679
  return res, self.outputs
676
680
 
677
681
 
File without changes
@@ -7,10 +7,9 @@ from typing_extensions import Self
7
7
 
8
8
  from pydantic import BaseModel, Field, model_validator, field_validator, UUID4, PrivateAttr
9
9
  from pydantic_core import PydanticCustomError
10
-
11
10
  from composio import ComposioToolSet
12
11
 
13
- from versionhq.tool.composio_tool_vars import ComposioAppName, ComposioAuthScheme, composio_app_set, ComposioStatus, ComposioAction
12
+ from versionhq.tool.composio.params import ComposioAppName, ComposioAuthScheme, composio_app_set, ComposioStatus, ComposioAction
14
13
  from versionhq.tool.cache_handler import CacheHandler
15
14
  from versionhq._utils.logger import Logger
16
15
 
@@ -22,7 +21,7 @@ DEFAULT_USER_ID = os.environ.get("DEFAULT_USER_ID", None)
22
21
  OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", None)
23
22
 
24
23
 
25
- class ComposioHandler(ABC, BaseModel):
24
+ class ComposioBaseTool(ABC, BaseModel):
26
25
  """
27
26
  A class to handle connecting account with Composio and executing actions using Composio ecosystem.
28
27
  `connected_account_id` is set up per `app_name` to call the actions on the given app. i.e., salesforce
@@ -78,8 +77,8 @@ class ComposioHandler(ABC, BaseModel):
78
77
  """
79
78
  Composio toolset on LangChain for action execution using LLM.
80
79
  """
81
- from composio_langchain import ComposioToolSet
82
- return ComposioToolSet(api_key=os.environ.get("COMPOSIO_API_KEY"), metadata={**metadata})
80
+ from composio_langchain import ComposioBaseToolSet
81
+ return ComposioBaseToolSet(api_key=os.environ.get("COMPOSIO_API_KEY"), metadata={**metadata})
83
82
 
84
83
 
85
84
  def _connect(
@@ -0,0 +1,6 @@
1
+ from dotenv import load_dotenv
2
+ load_dotenv(override=True)
3
+
4
+ import os
5
+ from openai import OpenAI
6
+ openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
@@ -0,0 +1,28 @@
1
+ from enum import Enum
2
+
3
+
4
+ class GPTSizeEnum(str, Enum):
5
+ LOW = "low"
6
+ MEDIUM = "medium"
7
+ HIGH = "high"
8
+
9
+
10
+ class GPTCUPEnvironmentEnum(str, Enum):
11
+ BROWSER = "browser"
12
+ MAC = "mac"
13
+ WINDOWS = "windows"
14
+ UNBUNTU = "ubuntu"
15
+
16
+
17
+ class GPTCUPTypeEnum(str, Enum):
18
+ COMPUTER_CALL_OUTPUT = "computer_call_output"
19
+ COMPUTER_USE_PREVIEW = "computer_use_preview"
20
+
21
+
22
+ class GPTFilterTypeEnum(str, Enum):
23
+ eq = "eq"
24
+ ne = "ne"
25
+ gt = "gt"
26
+ gte = "gte"
27
+ lt = "lt"
28
+ lte = "lte"
@@ -0,0 +1,145 @@
1
+ from typing import List, Dict, Any
2
+
3
+ from versionhq._utils import convert_img_url
4
+ from versionhq.tool.gpt import openai_client
5
+ from versionhq.tool.gpt._enum import GPTCUPEnvironmentEnum, GPTCUPTypeEnum, GPTSizeEnum
6
+ from versionhq._utils import is_valid_enum, UsageMetrics, ErrorType
7
+
8
+
9
+ class CUPToolSchema:
10
+ type: str = GPTCUPTypeEnum.COMPUTER_USE_PREVIEW.value
11
+ display_width: int = 1024
12
+ display_height: int = 768
13
+ environment: str = GPTCUPEnvironmentEnum.BROWSER.value
14
+
15
+ def __init__(
16
+ self,
17
+ type: str | GPTCUPTypeEnum = None,
18
+ display_width: int = None,
19
+ display_height: int = None,
20
+ environment: str | GPTCUPEnvironmentEnum = None
21
+ ):
22
+ self.display_height = display_height if display_height else self.display_height
23
+ self.display_width = display_width if display_width else self.display_width
24
+
25
+ if type and is_valid_enum(enum=GPTCUPTypeEnum, val=type):
26
+ self.type = type.value if isinstance(type, GPTCUPTypeEnum) else type
27
+
28
+ if environment and is_valid_enum(enum=GPTCUPEnvironmentEnum, val=environment):
29
+ self.environment = environment.value if isinstance(environment, GPTCUPEnvironmentEnum) else environment
30
+
31
+ self.environment = environment if environment else self.environment
32
+
33
+
34
+ @property
35
+ def schema(self) -> Dict[str, Any]:
36
+ return {
37
+ "type": self.type if isinstance(self.type, str) else self.type.value,
38
+ "display_width": self.display_width,
39
+ "display_height": self.display_height,
40
+ "environment": self.environment if isinstance(self.environment, str) else self.environment.value,
41
+ }
42
+
43
+
44
+ class GPTToolCUP:
45
+ model: str = "computer-use-preview"
46
+ tools: List[CUPToolSchema] = list()
47
+ user_prompt: str = None
48
+ img_url: str = None
49
+ reasoning_effort: str = GPTSizeEnum.MEDIUM.value
50
+ truncation: str = "auto"
51
+
52
+ def __init__(
53
+ self,
54
+ user_prompt: str,
55
+ tools: List[CUPToolSchema] | CUPToolSchema = None,
56
+ img_url: str = None,
57
+ reasoning_effort: GPTSizeEnum | str = None,
58
+ truncation: str = None
59
+ ):
60
+ self.user_prompt = user_prompt
61
+ self.truncation = truncation if truncation else self.truncation
62
+
63
+ if img_url:
64
+ img_url = convert_img_url(img_url)
65
+ self.img_url = img_url
66
+
67
+ if reasoning_effort and is_valid_enum(enum=GPTSizeEnum, val=reasoning_effort):
68
+ self.reasoning_effort = reasoning_effort.value if isinstance(reasoning_effort, GPTSizeEnum) else reasoning_effort
69
+
70
+ if tools:
71
+ match tools:
72
+ case list():
73
+ if self.tools:
74
+ self.tools.extend(tools)
75
+ else:
76
+ self.tools = tools
77
+ case CUPToolSchema():
78
+ if self.tools:
79
+ self.tools.append(tools)
80
+ else:
81
+ self.tools = [tools]
82
+ case _:
83
+ pass
84
+
85
+ def run(self):
86
+ raw_res = ""
87
+ usage = UsageMetrics()
88
+
89
+ try:
90
+ res = openai_client.responses.create(**self.schema)
91
+ if not res:
92
+ usage.record_errors(ErrorType.TOOL)
93
+ else:
94
+ raw_res = res.output[1].summary[0].text
95
+ usage.record_token_usage(**res.usage.__dict__)
96
+ return raw_res, None, usage
97
+ except:
98
+ usage.record_errors(ErrorType.TOOL)
99
+ return raw_res, None, usage
100
+
101
+
102
+ @property
103
+ def schema(self) -> Dict[str, Any]:
104
+ img_url = convert_img_url(self.img_url) if self.img_url else None
105
+ inputs = [{ "role": "user", "content": self.user_prompt } ]
106
+
107
+ if img_url:
108
+ inputs.append({"type": "input_image", "image_url": f"data:image/png;base64,{img_url}"})
109
+
110
+ tool_schema = [item.schema for item in self.tools]
111
+ schema = dict(model=self.model, tools=tool_schema, input=inputs, reasoning={ "effort": self.reasoning_effort}, truncation=self.truncation)
112
+ return schema
113
+
114
+
115
+ # "output": [
116
+ # {
117
+ # "type": "reasoning",
118
+ # "id": "rs_67cb...",
119
+ # "summary": [
120
+ # {
121
+ # "type": "summary_text",
122
+ # "text": "Exploring 'File' menu option."
123
+ # }
124
+ # ]
125
+ # },
126
+ # {
127
+ # "type": "computer_call",
128
+ # "id": "cu_67cb...",
129
+ # "call_id": "call_nEJ...",
130
+ # "action": {
131
+ # "type": "click",
132
+ # "button": "left",
133
+ # "x": 135,
134
+ # "y": 193
135
+ # },
136
+ # "pending_safety_checks": [
137
+ # {
138
+ # "id": "cu_sc_67cb...",
139
+ # "code": "malicious_instructions",
140
+ # "message": "We've detected instructions that may cause your application to perform malicious or unauthorized actions. Please acknowledge this warning if you'd like to proceed."
141
+ # }
142
+ # ],
143
+ # "status": "completed"
144
+ # }
145
+ # ]
@@ -0,0 +1,163 @@
1
+ from typing import List, Dict, Any, Optional, Tuple
2
+
3
+ from versionhq.tool.gpt import openai_client
4
+ from versionhq.tool.gpt._enum import GPTFilterTypeEnum
5
+ from versionhq._utils import is_valid_enum, UsageMetrics, ErrorType
6
+
7
+
8
+ def is_valid_vector_store_id(id: str | list[str]) -> bool:
9
+ if isinstance(id, list):
10
+ for item in id:
11
+ if not id.startswith("vs_"):
12
+ return False
13
+ else:
14
+ return id.startswith("vs_")
15
+
16
+
17
+ class FilterSchema:
18
+ class Filter:
19
+ type: GPTFilterTypeEnum = GPTFilterTypeEnum.eq
20
+ property: str = None
21
+ value: str = None
22
+
23
+ def __init__(self, **kwargs):
24
+ for k, v in kwargs:
25
+ if hasattr(self, k):
26
+ setattr(self, k, v)
27
+
28
+ def _convert_to_schema(self) -> Dict[str, str] | None:
29
+ return { "type": self.type, "property": self.property, "value": self.value } if self.property and self.value else None
30
+
31
+ logic_type: str = "and" # or
32
+ filters: List[Filter] = list()
33
+ filter_params: Dict[str, Any] = None
34
+
35
+ def __init__(self, logic_type: str = None, filters: List[Filter] | Filter = None, filter_params: Dict[str, Any] = None, **kwargs):
36
+ if logic_type == "and" | "or":
37
+ self.logic_type = logic_type
38
+
39
+ if filter_params:
40
+ filter = FilterSchema.Filter()
41
+ for k, v in filter_params.items():
42
+ if k in FilterSchema.Filter.__dict__.keys():
43
+ if k == "type" and is_valid_enum(val=v, enum=GPTFilterTypeEnum):
44
+ setattr(filter, k, v if isinstance(v, str) else v.value)
45
+ else:
46
+ setattr(filter, k, v)
47
+ self.filters.append(filter)
48
+
49
+ if filters:
50
+ match filters:
51
+ case list():
52
+ if self.filters:
53
+ self.filters.extend(filters)
54
+ else:
55
+ self.filters = filters
56
+
57
+ case FilterSchema.Filter():
58
+ if self.filters:
59
+ self.filters.append(filters)
60
+ else:
61
+ self.filters = [filters]
62
+
63
+ case _:
64
+ pass
65
+
66
+ if kwargs:
67
+ for k, v in kwargs:
68
+ if hasattr(self, k): setattr(self, k, v)
69
+
70
+ @property
71
+ def schema(self) -> Dict[str, Any] | None:
72
+ if self.type and len(self.items) > 1:
73
+ return {
74
+ "type": self.type,
75
+ "filters": [item._convert_to_schema() for item in self.items if isinstance(item, FilterSchema.Filter)]
76
+ }
77
+ elif self.filters:
78
+ return self.filters[0]._convert_to_schema()
79
+ else:
80
+ return None
81
+
82
+
83
+ class GPTToolFileSearch:
84
+ model: str = "gpt-4o"
85
+ input: str = None
86
+ vector_store_ids: List[str] = list()
87
+ max_num_results: int = 2
88
+ include: List[str] = ["output[*].file_search_call.search_results"]
89
+ filters: Optional[FilterSchema] = None
90
+
91
+ def __init__(
92
+ self,
93
+ input: str,
94
+ vector_store_ids: str | List[str],
95
+ model: str = None,
96
+ max_num_results: int = None,
97
+ include: List[str] = None,
98
+ filters: FilterSchema | Dict[str, Any] = None,
99
+ **kwargs,
100
+ ):
101
+ if not input or not vector_store_ids:
102
+ return None
103
+
104
+ if not is_valid_vector_store_id(id=vector_store_ids):
105
+ return None
106
+
107
+ self.input = input
108
+ self.vector_store_ids = vector_store_ids if isinstance(vector_store_ids, list) else [vector_store_ids]
109
+ self.model = model if model else self.model
110
+ self.max_num_results = max_num_results if max_num_results else self.max_num_results
111
+ self.include = include if include else self.include
112
+ self.filters = filters if filters else None
113
+ if kwargs:
114
+ for k, v in kwargs.items():
115
+ if hasattr(self, k):
116
+ setattr(self, k, v)
117
+
118
+
119
+ def run(self) -> Tuple[str, List[Dict[str, Any]], UsageMetrics] | None:
120
+ raw_res = ""
121
+ annotations = list()
122
+ usage = UsageMetrics()
123
+
124
+ try:
125
+ res = openai_client.responses.create(**self.schema)
126
+ if not res:
127
+ usage.record_errors(ErrorType.TOOL)
128
+ else:
129
+ raw_res = res.output[1].content[0].text
130
+ annotations = [{ "index": item.index, "file_id": item.file_id, "filename": item.filename }
131
+ for item in res.output[1].content[0].annotations]
132
+ usage.record_token_usage(**res.usage.__dict__)
133
+ return raw_res, annotations, usage
134
+ except:
135
+ usage.record_errors(ErrorType.TOOL)
136
+ return raw_res, annotations, usage
137
+
138
+
139
+ @property
140
+ def tool_schema(self) -> Dict[str, Any]:
141
+ if self.filters:
142
+ return [
143
+ {
144
+ "type": "file_search",
145
+ "vector_store_ids": self.vector_store_ids,
146
+ "max_num_results": self.max_num_results,
147
+ "filters": self.filters.schema,
148
+ }
149
+ ]
150
+ else:
151
+ return [
152
+ {
153
+ "type": "file_search",
154
+ "vector_store_ids": self.vector_store_ids,
155
+ "max_num_results": self.max_num_results,
156
+ }
157
+ ]
158
+
159
+
160
+ @property
161
+ def schema(self) -> Dict[str, Any]:
162
+ schema = dict(model=self.model, tools=self.tool_schema, input=self.input)
163
+ return schema
@@ -0,0 +1,89 @@
1
+ from typing import Dict, Any, Optional, Tuple, List
2
+
3
+ from versionhq.tool.gpt import openai_client
4
+ from versionhq.tool.gpt._enum import GPTSizeEnum
5
+ from versionhq._utils import is_valid_enum, UsageMetrics, ErrorType
6
+
7
+
8
+ class GPTToolWebSearch:
9
+ """A class to manage Web Search tools by OpenAI."""
10
+
11
+ model: str = "gpt-4o"
12
+ input: str = None
13
+ location_type: str = None # "approximate"
14
+ country: str = None # "GB"
15
+ city: str = None # "London"
16
+ region: str = None # "London"
17
+ search_content_size: str = GPTSizeEnum.MEDIUM.value
18
+ _user_location: Optional[Dict[str, str]] = None
19
+
20
+
21
+ def __init__(
22
+ self,
23
+ model: str = None,
24
+ input: str = None,
25
+ location_type: str = None,
26
+ country: str = None,
27
+ city: str = None,
28
+ region: str = None,
29
+ search_content_size = str | GPTSizeEnum,
30
+ **kwargs,
31
+ ):
32
+ self.model = model if model else self.model
33
+ self.input = input if input else self.input
34
+ if country and city and region:
35
+ self.location_type = location_type if location_type else "approximate"
36
+ self.country = country
37
+ self.city = city
38
+ self.region = region
39
+ self._user_location = dict(type=self.location_type, country=self.country, city=self.city, region=self.region)
40
+
41
+ if is_valid_enum(val=search_content_size, enum=GPTSizeEnum):
42
+ self.search_content_size = search_content_size if isinstance(search_content_size, str) else search_content_size.value
43
+
44
+ if kwargs:
45
+ for k, v in kwargs.items():
46
+ if hasattr(self, k):
47
+ setattr(self, k, v)
48
+
49
+
50
+ def run(self) -> Tuple[str, List[Dict[str, str]], UsageMetrics]:
51
+ """Runs the tool and returns text response, annotations, and usage metrics."""
52
+
53
+ raw_res = ""
54
+ annotations = list()
55
+ usage = UsageMetrics()
56
+
57
+ try:
58
+ res = openai_client.responses.create(**self.schema)
59
+ if not res:
60
+ usage.record_errors(ErrorType.TOOL)
61
+ else:
62
+ raw_res = res.output[1].content[0].text
63
+ annotations = [{ "title": item.title, "url": item.url } for item in res.output[1].content[0].annotations]
64
+ usage.record_token_usage(**res.usage.__dict__)
65
+ return raw_res, annotations, usage
66
+ except:
67
+ usage.record_errors(ErrorType.TOOL)
68
+ return raw_res, annotations, usage
69
+
70
+
71
+ @property
72
+ def tool_schema(self) -> Dict[str, Any]:
73
+ if self._user_location:
74
+ return {
75
+ "type": "web_search_preview",
76
+ "user_location": self._user_location,
77
+ "search_context_size": self.search_content_size
78
+ }
79
+ else:
80
+ return {
81
+ "type": "web_search_preview",
82
+ "search_context_size": self.search_content_size
83
+ }
84
+
85
+
86
+ @property
87
+ def schema(self) -> Dict[str, Any]:
88
+ schema = dict(model=self.model, tools=[self.tool_schema,], input=self.input)
89
+ return schema
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: versionhq
3
- Version: 1.2.4.3
3
+ Version: 1.2.4.6
4
4
  Summary: Autonomous agent networks for task automation with multi-step reasoning.
5
5
  Author-email: Kuriko Iwai <kuriko@versi0n.io>
6
6
  License: MIT License
@@ -295,7 +295,7 @@ def dummy_func(message: str, test1: str, test2: list[str]) -> str:
295
295
 
296
296
  task = vhq.Task(
297
297
  description="Amazing task",
298
- pydantic_output=CustomOutput,
298
+ response_schema=CustomOutput,
299
299
  callback=dummy_func,
300
300
  callback_kwargs=dict(message="Hi! Here is the result: ")
301
301
  )
@@ -317,13 +317,13 @@ agent_b = vhq.Agent(role="Leader", llm="gemini-2.0")
317
317
 
318
318
  task_1 = vhq.Task(
319
319
  description="Analyze the client's business model.",
320
- response_fields=[vhq.ResponseField(title="test1", data_type=str, required=True),],
320
+ response_schema=[vhq.ResponseField(title="test1", data_type=str, required=True),],
321
321
  allow_delegation=True
322
322
  )
323
323
 
324
324
  task_2 = vhq.Task(
325
325
  description="Define a cohort.",
326
- response_fields=[vhq.ResponseField(title="test1", data_type=int, required=True),],
326
+ response_schema=[vhq.ResponseField(title="test1", data_type=int, required=True),],
327
327
  allow_delegation=False
328
328
  )
329
329