versionhq 1.1.9.13__py3-none-any.whl → 1.1.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  import os
2
2
  from dotenv import load_dotenv
3
- from typing import Any, List, Optional
3
+ from typing import Any, List, Optional, Dict
4
4
  from pydantic import BaseModel, Field
5
5
 
6
6
  load_dotenv(override=True)
@@ -17,7 +17,7 @@ class TeamPlanner:
17
17
 
18
18
  def __init__(self, tasks: List[Task], planner_llm: Optional[Any] = None):
19
19
  self.tasks = tasks
20
- self.planner_llm = planner_llm if planner_llm else os.environ.get("LITELLM_MODEL_NAME")
20
+ self.planner_llm = planner_llm if planner_llm else os.environ.get("DEFAULT_MODEL_NAME")
21
21
 
22
22
 
23
23
  def _handle_assign_agents(self, unassigned_tasks: List[Task]) -> List[Any]:
@@ -42,10 +42,9 @@ class TeamPlanner:
42
42
  Based on the following task summary, draft a AI agent's role and goal in concise manner.
43
43
  Task summary: {unassgined_task.summary}
44
44
  """,
45
- expected_output_json=True,
46
- output_field_list=[
47
- ResponseField(title="goal", type=str, required=True),
48
- ResponseField(title="role", type=str, required=True),
45
+ response_fields=[
46
+ ResponseField(title="goal", data_type=str, required=True),
47
+ ResponseField(title="role", data_type=str, required=True),
49
48
  ],
50
49
  )
51
50
  res = task.execute_sync(agent=agent_creator)
@@ -67,7 +66,7 @@ class TeamPlanner:
67
66
  """
68
67
 
69
68
  from versionhq.agent.model import Agent
70
- from versionhq.task.model import Task, ResponseField
69
+ from versionhq.task.model import Task
71
70
 
72
71
  team_planner = Agent(
73
72
  role="team planner",
@@ -76,18 +75,18 @@ class TeamPlanner:
76
75
  )
77
76
 
78
77
  task_summary_list = [task.summary for task in self.tasks]
78
+
79
+ class TeamPlanIdea(BaseModel):
80
+ plan: str | Dict[str, Any] = Field(default=None, description="a decriptive plan to be executed by the team")
81
+
82
+
79
83
  task = Task(
80
84
  description=f"""
81
85
  Based on the following task summaries, create the most descriptive plan that the team can execute most efficiently. Take all the task summaries - task's description and tools available - into consideration. Your answer only contains a dictionary.
82
86
 
83
87
  Task summaries: {" ".join(task_summary_list)}
84
- """,
85
- expected_output_json=False,
86
- expected_output_pydantic=True,
87
- output_field_list=[
88
- ResponseField(title="task", type=str, required=True)
89
- for task in self.tasks
90
- ],
88
+ """,
89
+ pydantic_custom_output=TeamPlanIdea
91
90
  )
92
91
  output = task.execute_sync(agent=team_planner, context=context, tools=tools)
93
92
  return output
@@ -1,56 +0,0 @@
1
- from enum import Enum
2
-
3
- DEFAULT_AUTH_SCHEME = "OAUTH2"
4
-
5
- class ComposioAuthScheme(str, Enum):
6
- OAUTH2 = "OAUTH2"
7
- BEARER_TOKEN = "BEARER_TOKEN"
8
- API_KEY = "API_KEY"
9
-
10
-
11
- class ComposioAppName(str, Enum):
12
- """
13
- Enum to store app names that we can connect via Composio as data pipelines or destination services.
14
- """
15
-
16
- SALESFORCE = "salesforce"
17
- AIRTABLE = "airtable"
18
- MAILCHIMP = "mailchimp"
19
- HUBSPOT = "hubspot"
20
- KLAVIYO = "klaviyo"
21
- GOOGLESHEET = "googlesheets"
22
- GMAIL = "gmail"
23
- FACEBOOK = "facebook"
24
- TWITTER = "twitter"
25
- TWITTER_MEDIA = "twitter_media"
26
- LINKEDIN = "linkedin"
27
-
28
-
29
- composio_app_set = [
30
- (ComposioAppName.SALESFORCE, ComposioAuthScheme.OAUTH2),
31
- (ComposioAppName.AIRTABLE, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.API_KEY, ComposioAuthScheme.BEARER_TOKEN),
32
- (ComposioAppName.MAILCHIMP, ComposioAuthScheme.OAUTH2),
33
- (ComposioAppName.HUBSPOT, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.BEARER_TOKEN),
34
- (ComposioAppName.KLAVIYO, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.API_KEY),
35
- (ComposioAppName.GOOGLESHEET, ComposioAuthScheme.OAUTH2),
36
- (ComposioAppName.GMAIL, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.BEARER_TOKEN),
37
- (ComposioAppName.TWITTER, ComposioAuthScheme.OAUTH2),
38
- (ComposioAppName.TWITTER_MEDIA, ComposioAuthScheme.OAUTH2),
39
- (ComposioAppName.FACEBOOK, ComposioAuthScheme.OAUTH2),
40
- (ComposioAppName.LINKEDIN, ComposioAuthScheme.OAUTH2),
41
- ]
42
-
43
- class ComposioStatus(str, Enum):
44
- INITIATED = "INITIATED"
45
- ACTIVE = "ACTIVE"
46
- FAILED = "FAILED"
47
-
48
-
49
-
50
-
51
- class ComposioAction(str, Enum):
52
- """
53
- Enum to store composio's action that can be called via `Actions.xxx`
54
- """
55
- # HUBSPOT_INITIATE_DATA_IMPORT_PROCESS = "hubspot_initate_date_import_process"
56
- HUBSPOT_CREATE_PIPELINE_STAGE = "hubspot_create_pipeline_stage"
@@ -0,0 +1,40 @@
1
+ from typing import Any, Dict, Optional
2
+
3
+ from pydantic import BaseModel, PrivateAttr, Field, InstanceOf
4
+
5
+
6
+ class CacheHandler(BaseModel):
7
+ """
8
+ A class to add or read cache
9
+ """
10
+
11
+ _cache: Dict[str, Any] = PrivateAttr(default_factory=dict)
12
+
13
+ def add(self, tool_name: str, input: str, output: Any) -> None:
14
+ self._cache[f"{tool_name}-{input}"] = output
15
+
16
+ def read(self, tool_name: str, input: str) -> Optional[str]:
17
+ return self._cache.get(f"{tool_name}-{input}")
18
+
19
+
20
+
21
+ class CacheTool(BaseModel):
22
+ """
23
+ A cache tool to read the cached result.
24
+ """
25
+
26
+ name: str = "Cache Tool"
27
+ cache_handler: InstanceOf[CacheHandler] = Field(default_factory=CacheHandler)
28
+
29
+ def read_cache(self, key):
30
+ split = key.split("tool:")
31
+ tool = split[1].split("|input:")[0].strip()
32
+ tool_input = split[1].split("|input:")[1].strip()
33
+ return self.cache_handler.read(tool, tool_input)
34
+
35
+ def tool(self):
36
+ return Tool(
37
+ func=self.read_cache,
38
+ name=self.name,
39
+ description="Read from cache"
40
+ )
@@ -11,9 +11,10 @@ from pydantic_core import PydanticCustomError
11
11
  from composio import ComposioToolSet
12
12
  from composio_langchain import action
13
13
 
14
- from versionhq.tool import ComposioAppName, ComposioAuthScheme, composio_app_set, ComposioStatus, ComposioAction
14
+ from versionhq.tool.composio_tool_vars import ComposioAppName, ComposioAuthScheme, composio_app_set, ComposioStatus, ComposioAction
15
+ from versionhq.tool.cache_handler import CacheHandler
15
16
  from versionhq._utils.logger import Logger
16
- from versionhq._utils.cache_handler import CacheHandler
17
+
17
18
 
18
19
  load_dotenv(override=True)
19
20
 
@@ -0,0 +1,56 @@
1
+ from enum import Enum
2
+
3
+ DEFAULT_AUTH_SCHEME = "OAUTH2"
4
+
5
+ class ComposioAuthScheme(str, Enum):
6
+ OAUTH2 = "OAUTH2"
7
+ BEARER_TOKEN = "BEARER_TOKEN"
8
+ API_KEY = "API_KEY"
9
+
10
+
11
+ class ComposioAppName(str, Enum):
12
+ """
13
+ Enum to store app names that we can connect via Composio as data pipelines or destination services.
14
+ """
15
+
16
+ SALESFORCE = "salesforce"
17
+ AIRTABLE = "airtable"
18
+ MAILCHIMP = "mailchimp"
19
+ HUBSPOT = "hubspot"
20
+ KLAVIYO = "klaviyo"
21
+ GOOGLESHEET = "googlesheets"
22
+ GMAIL = "gmail"
23
+ FACEBOOK = "facebook"
24
+ TWITTER = "twitter"
25
+ TWITTER_MEDIA = "twitter_media"
26
+ LINKEDIN = "linkedin"
27
+
28
+
29
+ composio_app_set = [
30
+ (ComposioAppName.SALESFORCE, ComposioAuthScheme.OAUTH2),
31
+ (ComposioAppName.AIRTABLE, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.API_KEY, ComposioAuthScheme.BEARER_TOKEN),
32
+ (ComposioAppName.MAILCHIMP, ComposioAuthScheme.OAUTH2),
33
+ (ComposioAppName.HUBSPOT, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.BEARER_TOKEN),
34
+ (ComposioAppName.KLAVIYO, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.API_KEY),
35
+ (ComposioAppName.GOOGLESHEET, ComposioAuthScheme.OAUTH2),
36
+ (ComposioAppName.GMAIL, ComposioAuthScheme.OAUTH2, ComposioAuthScheme.BEARER_TOKEN),
37
+ (ComposioAppName.TWITTER, ComposioAuthScheme.OAUTH2),
38
+ (ComposioAppName.TWITTER_MEDIA, ComposioAuthScheme.OAUTH2),
39
+ (ComposioAppName.FACEBOOK, ComposioAuthScheme.OAUTH2),
40
+ (ComposioAppName.LINKEDIN, ComposioAuthScheme.OAUTH2),
41
+ ]
42
+
43
+ class ComposioStatus(str, Enum):
44
+ INITIATED = "INITIATED"
45
+ ACTIVE = "ACTIVE"
46
+ FAILED = "FAILED"
47
+
48
+
49
+
50
+
51
+ class ComposioAction(str, Enum):
52
+ """
53
+ Enum to store composio's action that can be called via `Actions.xxx`
54
+ """
55
+ # HUBSPOT_INITIATE_DATA_IMPORT_PROCESS = "hubspot_initate_date_import_process"
56
+ HUBSPOT_CREATE_PIPELINE_STAGE = "hubspot_create_pipeline_stage"
@@ -10,12 +10,11 @@ def tool(*args):
10
10
  """
11
11
 
12
12
  def create_tool(tool_name: str) -> Callable:
13
-
14
- def _make_tool(f: Callable) -> Tool:
15
- if f.__doc__ is None:
13
+ def _make_tool(func: Callable) -> Tool:
14
+ if func.__doc__ is None:
16
15
  raise ValueError("Function must have a docstring")
17
16
 
18
- if f.__annotations__ is None:
17
+ if func.__annotations__ is None:
19
18
  raise ValueError("Function must have type annotations")
20
19
 
21
20
  class_name = "".join(tool_name.split()).title()
@@ -24,11 +23,11 @@ def tool(*args):
24
23
  (BaseModel,),
25
24
  {
26
25
  "__annotations__": {
27
- k: v for k, v in f.__annotations__.items() if k != "return"
26
+ k: v for k, v in func.__annotations__.items() if k != "return"
28
27
  },
29
28
  },
30
29
  )
31
- return Tool(name=tool_name, function=f, args_schema=args_schema)
30
+ return Tool(name=tool_name, func=func, args_schema=args_schema)
32
31
 
33
32
  return _make_tool
34
33
 
versionhq/tool/model.py CHANGED
@@ -1,10 +1,13 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from inspect import signature
3
- from typing import Any, Dict, Callable, Type, Optional, get_args, get_origin
3
+ from typing import Any, Dict, Callable, Type, Optional, get_args, get_origin, get_type_hints
4
4
  from typing_extensions import Self
5
- from pydantic import InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator
5
+ from pydantic import InstanceOf, BaseModel, ConfigDict, Field, field_validator, model_validator, PrivateAttr, create_model
6
+ from pydantic_core import PydanticCustomError
6
7
 
7
- from versionhq._utils.cache_handler import CacheHandler
8
+ from versionhq.llm.llm_vars import SchemaType
9
+ from versionhq.tool.cache_handler import CacheHandler
10
+ from versionhq._utils.logger import Logger
8
11
 
9
12
 
10
13
  class BaseTool(ABC, BaseModel):
@@ -12,16 +15,27 @@ class BaseTool(ABC, BaseModel):
12
15
  Abstract class for Tool class.
13
16
  """
14
17
 
15
- class _ArgsSchemaPlaceholder(BaseModel):
18
+ class ArgsSchemaPlaceholder(BaseModel):
16
19
  pass
17
20
 
18
- args_schema: Type[BaseModel] = Field(default_factory=_ArgsSchemaPlaceholder)
21
+ _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
22
+
23
+ object_type: str = Field(default="function")
24
+ name: str = Field(default=None)
25
+ description: str = Field(default=None)
26
+ properties: Dict[str, Any] = Field(default_factory=dict, description="for llm func calling")
27
+ args_schema: Type[BaseModel] = Field(default_factory=ArgsSchemaPlaceholder)
28
+
29
+ tool_handler: Optional[Dict[str, Any] | Any] = Field(default=None, description="store tool_handler to record the tool usage")
30
+ should_cache: bool = Field(default=True, description="whether the tool usage should be cached")
31
+ cache_function: Callable = lambda _args=None, _result=None: True
32
+ cache_handler: Optional[InstanceOf[CacheHandler]] = Field(default=None)
19
33
 
20
34
 
21
35
  @field_validator("args_schema", mode="before")
22
36
  @classmethod
23
37
  def _default_args_schema(cls, v: Type[BaseModel]) -> Type[BaseModel]:
24
- if not isinstance(v, cls._ArgsSchemaPlaceholder):
38
+ if not isinstance(v, cls.ArgsSchemaPlaceholder):
25
39
  return v
26
40
 
27
41
  return type(
@@ -31,21 +45,29 @@ class BaseTool(ABC, BaseModel):
31
45
  )
32
46
 
33
47
 
34
- @abstractmethod
35
- def _run(self, *args: Any, **kwargs: Any,) -> Any:
36
- """any handling"""
37
-
38
-
39
-
40
- class Tool(BaseTool):
41
- name: str = Field(default=None)
42
- goal: str = Field(default=None)
43
- function: Callable = Field(default=None)
44
- tool_handler: Optional[Dict[str, Any] | Any] = Field(default=None, description="store tool_handler to record the usage of this tool")
45
- should_cache: bool = Field(default=True, description="whether the tool usage should be cached")
46
- cache_function: Callable = lambda _args=None, _result=None: True
47
- cache_handler: Optional[InstanceOf[CacheHandler]] = Field(default=None)
48
-
48
+ @field_validator("properties", mode="before")
49
+ @classmethod
50
+ def _default_properties(cls, v: Dict[str, Any]) -> Dict[str, Any]:
51
+ p, r = dict(), list()
52
+ for k, v in cls._run.__annotations__.items():
53
+ if k != "return":
54
+ p.update({ k: { "type": SchemaType(type(v)).convert(), "name": k, }} )
55
+ r.append(k)
56
+
57
+ return {
58
+ "type": cls.object_type,
59
+ "function": {
60
+ "name": cls.name.replace(" ", "_"),
61
+ "description": cls.description,
62
+ "parameters": {
63
+ "type": "object",
64
+ "properties": p,
65
+ "required": r,
66
+ "additionalProperties": False
67
+ },
68
+ "strict": True
69
+ }
70
+ }
49
71
 
50
72
  @model_validator(mode="after")
51
73
  def set_up_tool_handler(self) -> Self:
@@ -59,13 +81,9 @@ class Tool(BaseTool):
59
81
 
60
82
  return self
61
83
 
62
-
63
- @model_validator(mode="after")
64
- def set_up_function(self) -> Self:
65
- if self.function is None:
66
- self.function = self._run
67
- self._set_args_schema_from_func()
68
- return self
84
+ @abstractmethod
85
+ def _run(self, *args: Any, **kwargs: Any,) -> Any:
86
+ """any handling"""
69
87
 
70
88
 
71
89
  @staticmethod
@@ -95,108 +113,236 @@ class Tool(BaseTool):
95
113
  import json
96
114
  raw_args = json.loads(raw_args)
97
115
  except json.JSONDecodeError as e:
98
- raise ValueError(f"Failed to parse arguments as JSON: {e}")
116
+ raise ValueError(f"Failed to parse arguments as JSON: {str(e)}")
99
117
 
100
118
  try:
101
119
  validated_args = self.args_schema.model_validate(raw_args)
102
120
  return validated_args.model_dump()
103
121
 
104
122
  except Exception as e:
105
- raise ValueError(f"Arguments validation failed: {e}")
123
+ raise ValueError(f"Arguments validation failed: {str(e)}")
106
124
 
107
125
 
108
- def _set_args_schema_from_func(self):
109
- class_name = f"{self.__class__.__name__}Schema"
110
- self.args_schema = type(
111
- class_name,
112
- (BaseModel,),
113
- { "__annotations__": {
114
- k: v for k, v in self._run.__annotations__.items() if k != "return"
115
- } },
116
- )
126
+ def _create_schema(self) -> type[BaseModel]:
127
+ """
128
+ Create a Pydantic schema from a function's signature
129
+ """
130
+ import inspect
131
+
132
+ sig = inspect.signature(self.func)
133
+ type_hints = get_type_hints(self.func)
134
+ fields = {}
135
+ for param_name, param in sig.parameters.items():
136
+ if param_name in ("self", "cls"):
137
+ continue
138
+
139
+ annotation = type_hints.get(param_name, Any)
140
+ default = ... if param.default == param.empty else param.default
141
+ fields[param_name] = (annotation, Field(default=default))
142
+
143
+ schema_name = f"{self.func.__name__.title()}Schema"
144
+ return create_model(schema_name, **fields)
145
+
146
+
147
+
148
+ class Tool(BaseTool):
149
+ func: Callable = Field(default=None)
150
+
151
+
152
+ @model_validator(mode="after")
153
+ def validate_func(self) -> Self:
154
+ if not self.func and not self._run:
155
+ self._logger.log(level="error", message=f"Tool must have a function", color="red")
156
+ raise PydanticCustomError("function_missing", f"Function is missing in the tool.", {})
157
+
158
+ elif self.func and not isinstance(self.func, Callable):
159
+ self._logger.log(level="error", message=f"The tool is missing a valid function", color="red")
160
+ raise PydanticCustomError("invalid_function", f"The value in the function field must be callable.", {})
161
+
162
+ else:
163
+ try:
164
+ self.args_schema = self._create_schema_from_function()
165
+ self._validate_function_signature()
166
+
167
+ except Exception as e:
168
+ self._logger.log(level="error", message=f"The tool is missing a valid function: {str(e)}", color="red")
169
+ raise PydanticCustomError("invalid_function", f"Invalid function: {str(e)}", {})
170
+
171
+ return self
172
+
173
+
174
+ @model_validator(mode="after")
175
+ def set_up_name(self) -> Self:
176
+ if not self.name:
177
+ self.name = self.func.__name__ if self.func else ""
178
+
179
+ return self
180
+
181
+
182
+ @model_validator(mode="after")
183
+ def set_up_description(self) -> Self:
184
+ if not self.description:
185
+ if not self.args_schema:
186
+ self.args_schema = self._default_args_schema(self)
187
+
188
+ args_schema = {
189
+ name: {
190
+ "description": field.description,
191
+ "type": self._get_arg_annotations(field.annotation),
192
+ }
193
+ for name, field in self.args_schema.model_fields.items()
194
+ }
195
+ self.description = f"Tool: {self.name}\nArgs: {args_schema}"
196
+
197
+ return self
198
+
199
+
200
+ @model_validator(mode="after")
201
+ def set_up_args_schema(self) -> Self:
202
+ """
203
+ Set up args schema based on the given function.
204
+ """
205
+ if self.func:
206
+ self.args_schema = self._create_schema_from_function()
207
+ return self
208
+
209
+
210
+ @model_validator(mode="after")
211
+ def set_up_func_calling_properties(self) -> Self:
212
+ """
213
+ Format function_calling params from args_schema.
214
+ """
215
+
216
+ p, r = dict(), list()
217
+ if self.args_schema:
218
+ for name, field in self.args_schema.model_fields.items():
219
+ if name != "kwargs" and name != "args":
220
+ p.update(
221
+ {
222
+ name: {
223
+ "description": field.description if field.description else "",
224
+ "type": SchemaType(self._get_arg_annotations(field.annotation)).convert(),
225
+ }
226
+ }
227
+ )
228
+ r.append(name)
229
+
230
+ properties = {
231
+ "type": self.object_type,
232
+ "function": {
233
+ "name": self.name.replace(" ", "_"),
234
+ "description": self.description if self.description else "a tool function to execute",
235
+ "parameters": {
236
+ "type": "object",
237
+ "properties": p,
238
+ "required": r,
239
+ "additionalProperties": False
240
+ },
241
+ "strict": True,
242
+ },
243
+ }
244
+ self.properties = properties
117
245
  return self
118
246
 
119
247
 
248
+ def _create_schema_from_function(self) -> type[BaseModel]:
249
+ """
250
+ Create a Pydantic schema from a function's signature
251
+ """
252
+ import inspect
253
+
254
+ sig = inspect.signature(self.func)
255
+ type_hints = get_type_hints(self.func)
256
+ fields = {}
257
+ for param_name, param in sig.parameters.items():
258
+ if param_name in ("self", "cls"):
259
+ continue
260
+
261
+ annotation = type_hints.get(param_name, Any)
262
+ default = ... if param.default == param.empty else param.default
263
+ fields[param_name] = (annotation, Field(default=default))
264
+
265
+ schema_name = f"{self.func.__name__.title()}Schema"
266
+ return create_model(schema_name, **fields)
267
+
268
+
269
+ def _validate_function_signature(self) -> None:
270
+ """
271
+ Validate that the function signature matches the args schema.
272
+ """
273
+
274
+ import inspect
275
+
276
+ sig = inspect.signature(self.func)
277
+ schema_fields = self.args_schema.model_fields
278
+
279
+ for param_name, param in sig.parameters.items():
280
+ if param_name in ("self", "cls"):
281
+ continue
282
+
283
+ if param.kind in (inspect.Parameter.VAR_KEYWORD, inspect.Parameter.VAR_POSITIONAL):
284
+ continue
285
+
286
+ if param.default == inspect.Parameter.empty:
287
+ if param_name not in schema_fields:
288
+ raise ValueError(f"Required function parameter '{param_name}' not found in args_schema")
289
+
290
+
120
291
  def _run(self, *args: Any, **kwargs: Any) -> Any:
121
- return self.run(*args, **kwargs)
292
+ return self.func(*args, **kwargs)
122
293
 
123
294
 
124
- def run(self, *args, **kwargs) -> Any:
295
+ def _handle_toolset(self, params: Dict[str, Any] = None) -> Any:
125
296
  """
126
- Use tool and record its usage if should_cache is True.
297
+ Read the cache from the ToolHandler instance or execute _run() method.
127
298
  """
128
- from versionhq.tool.tool_handler import ToolHandler
129
299
 
130
- result = None
131
- tool_set = ToolSet(tool=self, kwargs={})
300
+ from versionhq.tool.tool_handler import ToolHandler
132
301
 
133
- if self.function:
134
- result = self.function(*args, **kwargs)
302
+ if not self.args_schema:
303
+ self.args_schema = self._create_schema_from_function()
135
304
 
136
- else:
137
- acceptable_args = self.args_schema.model_json_schema()["properties"].keys()
138
- acceptable_kwargs = { k: v for k, v in kwargs.items() if k in acceptable_args }
139
- tool_set = ToolSet(tool=self, kwargs=acceptable_kwargs)
305
+ result = None
306
+ acceptable_args = self.args_schema.model_json_schema()["properties"].keys()
307
+ acceptable_kwargs = { k: v for k, v in params.items() if k in acceptable_args } if params else dict()
308
+ parsed_kwargs = self._parse_args(raw_args=acceptable_kwargs)
309
+ tool_set = ToolSet(tool=self, kwargs=acceptable_kwargs)
140
310
 
141
- if self.tool_handler:
142
- if self.tool_handler.has_called_before(tool_set):
143
- self.tool_handler.error = "Agent execution error"
311
+ if self.tool_handler and isinstance(self.tool_handler, ToolHandler):
312
+ if self.tool_handler.has_called_before(tool_set):
313
+ self.tool_handler.error = "Agent execution error"
144
314
 
145
- elif self.tool_handler.cache:
146
- result = self.tools_handler.cache.read(tool=tool_set.tool.name, input=tool_set.kwargs)
147
- if result is None:
148
- parsed_kwargs = self._parse_args(raw_args=acceptable_kwargs)
149
- result = self.function(**parsed_kwargs) if self.function else None
315
+ elif self.tool_handler.cache:
316
+ result = self.tool_handler.cache.read(tool_name=tool_set.tool.name, input=str(tool_set.kwargs))
317
+ if not result:
318
+ result = self.func(**parsed_kwargs)
150
319
 
151
320
  else:
152
- tool_handler = ToolHandler(last_used_tool=tool_set, cache_handler=self.cache_handler, should_cache=self.should_cache)
153
- self.tool_handler = tool_handler
154
- parsed_kwargs = self._parse_args(raw_args=acceptable_kwargs)
155
- result = self.function(**parsed_kwargs) if self.function else None
321
+ result = self.func(**parsed_kwargs)
322
+
323
+ else:
324
+ tool_handler = ToolHandler(last_used_tool=tool_set, cache_handler=self.cache_handler, should_cache=self.should_cache)
325
+ self.tool_handler = tool_handler
326
+ result = self.func(**parsed_kwargs)
156
327
 
157
328
 
158
329
  if self.should_cache is True:
159
- self.tool_handler.record_last_tool_used(tool_set, result, self.should_cache)
330
+ self.tool_handler.record_last_tool_used(last_used_tool=tool_set, output=result, should_cache=self.should_cache)
160
331
 
161
332
  return result
162
333
 
163
334
 
164
- @property
165
- def description(self) -> str:
166
- args_schema = {
167
- name: {
168
- "description": field.description,
169
- "type": self._get_arg_annotations(field.annotation),
170
- }
171
- for name, field in self.args_schema.model_fields.items()
172
- }
173
-
174
- return f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nGoal: {self.goal}"
335
+ def run(self, params: Dict[str, Any] = None) -> Any:
336
+ """
337
+ Execute a tool using a toolset and cached tools
338
+ """
339
+ result = self._handle_toolset(params)
340
+ return result
175
341
 
176
342
 
177
343
  class ToolSet(BaseModel):
178
344
  """
179
- Store the tool called and any kwargs used.
345
+ Store the tool called and any kwargs used. (The tool name and kwargs will be stored in the cache.)
180
346
  """
181
- tool: InstanceOf[Tool] | Any = Field(..., description="store the tool instance to be called.")
182
- kwargs: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
183
-
184
-
185
- class InstructorToolSet(BaseModel):
186
- tool: InstanceOf[Tool] | Any = Field(..., description="store the tool instance to be called.")
347
+ tool: InstanceOf[Tool] | Type[Tool] = Field(..., description="store the tool instance to be called.")
187
348
  kwargs: Optional[Dict[str, Any]] = Field(..., description="kwargs passed to the tool")
188
-
189
-
190
- class CacheTool(BaseModel):
191
- """
192
- Default tools to hit the cache.
193
- """
194
-
195
- name: str = "Hit Cache"
196
- cache_handler: CacheHandler = Field(default_factory=CacheHandler)
197
-
198
- def hit_cache(self, key):
199
- split = key.split("tool:")
200
- tool = split[1].split("|input:")[0].strip()
201
- tool_input = split[1].split("|input:")[1].strip()
202
- return self.cache_handler.read(tool, tool_input)