versionhq 1.1.9.14__py3-none-any.whl → 1.1.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/_utils/logger.py +1 -6
- versionhq/_utils/process_config.py +9 -12
- versionhq/agent/TEMPLATES/Backstory.py +4 -3
- versionhq/agent/model.py +160 -108
- versionhq/clients/product/model.py +1 -1
- versionhq/clients/workflow/model.py +1 -1
- versionhq/llm/{llm_variables.py → llm_vars.py} +101 -39
- versionhq/llm/model.py +104 -76
- versionhq/task/model.py +291 -125
- versionhq/team/model.py +2 -5
- versionhq/team/team_planner.py +12 -13
- versionhq/tool/__init__.py +0 -56
- versionhq/tool/cache_handler.py +40 -0
- versionhq/tool/composio_tool.py +3 -2
- versionhq/tool/composio_tool_vars.py +56 -0
- versionhq/tool/decorator.py +5 -6
- versionhq/tool/model.py +243 -97
- versionhq/tool/tool_handler.py +11 -19
- {versionhq-1.1.9.14.dist-info → versionhq-1.1.10.2.dist-info}/LICENSE +0 -0
- {versionhq-1.1.9.14.dist-info → versionhq-1.1.10.2.dist-info}/METADATA +26 -25
- versionhq-1.1.10.2.dist-info/RECORD +44 -0
- versionhq/_utils/cache_handler.py +0 -13
- versionhq-1.1.9.14.dist-info/RECORD +0 -43
- {versionhq-1.1.9.14.dist-info → versionhq-1.1.10.2.dist-info}/WHEEL +0 -0
- {versionhq-1.1.9.14.dist-info → versionhq-1.1.10.2.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
versionhq/_utils/logger.py
CHANGED
@@ -12,8 +12,6 @@ class Printer:
|
|
12
12
|
self._print_red(content)
|
13
13
|
elif color == "green":
|
14
14
|
self._print_green(content)
|
15
|
-
elif color == "purple":
|
16
|
-
self._print_purple(content)
|
17
15
|
elif color == "blue":
|
18
16
|
self._print_blue(content)
|
19
17
|
elif color == "yellow":
|
@@ -27,9 +25,6 @@ class Printer:
|
|
27
25
|
def _print_green(self, content):
|
28
26
|
print("\033[1m\033[92m {}\033[00m".format(content))
|
29
27
|
|
30
|
-
def _print_purple(self, content):
|
31
|
-
print("\033[95m {}\033[00m".format(content))
|
32
|
-
|
33
28
|
def _print_red(self, content):
|
34
29
|
print("\033[91m {}\033[00m".format(content))
|
35
30
|
|
@@ -47,4 +42,4 @@ class Logger(BaseModel):
|
|
47
42
|
def log(self, level, message, color="yellow"):
|
48
43
|
if self.verbose:
|
49
44
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
50
|
-
self._printer.print(f"\n
|
45
|
+
self._printer.print(f"\n{timestamp} - versionHQ - {level.upper()}: {message}", color=color)
|
@@ -8,19 +8,16 @@ def process_config(values_to_update: Dict[str, Any], model_class: Type[BaseModel
|
|
8
8
|
Refer to the Pydantic model class for field validation.
|
9
9
|
"""
|
10
10
|
|
11
|
-
|
12
|
-
config = values_to_update.pop("config", {})
|
13
|
-
else:
|
14
|
-
return values_to_update
|
11
|
+
config = values_to_update.pop("config", {})
|
15
12
|
|
13
|
+
if config:
|
14
|
+
for k, v in config.items():
|
15
|
+
if k not in model_class.model_fields or values_to_update.get(k) is not None:
|
16
|
+
continue
|
16
17
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
if isinstance(value, dict) and isinstance(values_to_update.get(key), dict):
|
22
|
-
values_to_update[key].update(value)
|
23
|
-
else:
|
24
|
-
values_to_update[key] = value
|
18
|
+
if isinstance(v, dict) and isinstance(values_to_update.get(k), dict):
|
19
|
+
values_to_update[k].update(v)
|
20
|
+
else:
|
21
|
+
values_to_update[k] = v
|
25
22
|
|
26
23
|
return values_to_update
|
@@ -1,3 +1,4 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
1
|
+
BACKSTORY_FULL="""You are an expert {role} with deep understanding of {knowledge} and highly skilled in {skillsets}. You have abilities to call the RAG tools that can {rag_tool_overview}. Your primary goal is to identify competitive solutions by leveraging your knowledge and skillsets to achieve the following goal: {goal}."""
|
2
|
+
|
3
|
+
|
4
|
+
BACKSTORY_SHORT="""You are an expert {role} with the right skillsets and knowledge. Your primary goal is to identify competitive solutions by leveraging your knowledge and skillsets to achieve the following goal: {goal}."""
|
versionhq/agent/model.py
CHANGED
@@ -1,19 +1,20 @@
|
|
1
1
|
import os
|
2
2
|
import uuid
|
3
|
-
from typing import Any, Dict, List, Optional, TypeVar, Callable
|
3
|
+
from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
|
4
4
|
from typing_extensions import Self
|
5
5
|
from dotenv import load_dotenv
|
6
|
-
|
6
|
+
import litellm
|
7
|
+
|
8
|
+
from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator, ConfigDict
|
7
9
|
from pydantic_core import PydanticCustomError
|
8
10
|
|
11
|
+
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
|
12
|
+
from versionhq.tool.model import Tool, ToolSet
|
9
13
|
from versionhq._utils.logger import Logger
|
10
14
|
from versionhq._utils.rpm_controller import RPMController
|
11
15
|
from versionhq._utils.usage_metrics import UsageMetrics
|
12
|
-
from versionhq.
|
13
|
-
|
14
|
-
from versionhq.task import TaskOutputFormat
|
15
|
-
from versionhq.task.model import ResponseField
|
16
|
-
from versionhq.tool.model import Tool, ToolSet
|
16
|
+
from versionhq._utils.process_config import process_config
|
17
|
+
|
17
18
|
|
18
19
|
load_dotenv(override=True)
|
19
20
|
T = TypeVar("T", bound="Agent")
|
@@ -92,18 +93,22 @@ class Agent(BaseModel):
|
|
92
93
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
93
94
|
role: str = Field(description="role of the agent - used in summary and logs")
|
94
95
|
goal: str = Field(description="concise goal of the agent (details are set in the Task instance)")
|
95
|
-
backstory: Optional[str] = Field(default=None, description="
|
96
|
+
backstory: Optional[str] = Field(default=None, description="developer prompt to the llm")
|
96
97
|
knowledge: Optional[str] = Field(default=None, description="external knowledge fed to the agent")
|
97
98
|
skillsets: Optional[List[str]] = Field(default_factory=list)
|
98
|
-
tools: Optional[List[Tool |
|
99
|
+
tools: Optional[List[Tool | ToolSet | Type[Tool]]] = Field(default_factory=list)
|
99
100
|
|
100
|
-
#
|
101
|
+
# prompting
|
102
|
+
use_developer_prompt: Optional[bool] = Field(default=True, description="Use developer prompt when calling the llm")
|
103
|
+
developer_propmt_template: Optional[str] = Field(default=None, description="ddeveloper prompt template")
|
104
|
+
user_prompt_template: Optional[str] = Field(default=None, description="user prompt template")
|
105
|
+
|
106
|
+
# task execution rules
|
101
107
|
team: Optional[List[Any]] = Field(default=None, description="Team to which the agent belongs")
|
102
|
-
allow_delegation: bool = Field(default=False,description="
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
step_callback: Optional[Callable | Any] = Field(default=None, description="callback to be executed after each step of the agent execution")
|
108
|
+
allow_delegation: bool = Field(default=False,description="if the agent can delegate the task to another agent or ask some help")
|
109
|
+
max_retry_limit: int = Field(default=2 ,description="max. number of retry for the task execution when an error occurs")
|
110
|
+
maxit: Optional[int] = Field(default=25,description="max. number of total optimization loops conducted when an error occurs")
|
111
|
+
callbacks: Optional[List[Callable]] = Field(default_factory=list, description="callback functions to execute after any task execution")
|
107
112
|
|
108
113
|
# llm settings cascaded to the LLM model
|
109
114
|
llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
@@ -112,15 +117,10 @@ class Agent(BaseModel):
|
|
112
117
|
max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
|
113
118
|
max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
|
114
119
|
max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute for the agent execution")
|
115
|
-
|
116
|
-
# prompt rules
|
117
|
-
use_system_prompt: Optional[bool] = Field(default=True, description="Use system prompt for the agent")
|
118
|
-
system_template: Optional[str] = Field(default=None, description="System format for the agent.")
|
119
|
-
prompt_template: Optional[str] = Field(default=None, description="Prompt format for the agent.")
|
120
|
-
response_template: Optional[str] = Field(default=None, description="Response format for the agent.")
|
120
|
+
llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the model")
|
121
121
|
|
122
122
|
# config, cache, error handling
|
123
|
-
formatting_errors: int = Field(default=0, description="
|
123
|
+
formatting_errors: int = Field(default=0, description="number of formatting errors.")
|
124
124
|
agent_ops_agent_name: str = None
|
125
125
|
agent_ops_agent_id: str = None
|
126
126
|
|
@@ -132,6 +132,21 @@ class Agent(BaseModel):
|
|
132
132
|
raise PydanticCustomError("may_not_set_field", "This field is not to be set by the user.", {})
|
133
133
|
|
134
134
|
|
135
|
+
# @field_validator(mode="before")
|
136
|
+
# def set_up_from_config(cls) -> None:
|
137
|
+
# if cls.config is not None:
|
138
|
+
# try:
|
139
|
+
# for k, v in cls.config.items():
|
140
|
+
# setattr(cls, k, v)
|
141
|
+
# except:
|
142
|
+
# pass
|
143
|
+
|
144
|
+
@model_validator(mode="before")
|
145
|
+
@classmethod
|
146
|
+
def process_model_config(cls, values: Dict[str, Any]) -> None:
|
147
|
+
return process_config(values_to_update=values, model_class=cls)
|
148
|
+
|
149
|
+
|
135
150
|
@model_validator(mode="after")
|
136
151
|
def validate_required_fields(self) -> Self:
|
137
152
|
required_fields = ["role", "goal"]
|
@@ -145,7 +160,7 @@ class Agent(BaseModel):
|
|
145
160
|
def set_up_llm(self) -> Self:
|
146
161
|
"""
|
147
162
|
Set up the base model and function calling model (if any) using the LLM class.
|
148
|
-
Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `
|
163
|
+
Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `callbacks`,`respect_context_window` to the LLM class.
|
149
164
|
The base model is selected on the client app, else use the default model.
|
150
165
|
"""
|
151
166
|
|
@@ -231,6 +246,34 @@ class Agent(BaseModel):
|
|
231
246
|
return self
|
232
247
|
|
233
248
|
|
249
|
+
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
250
|
+
"""
|
251
|
+
After setting up an LLM instance, add params to the instance.
|
252
|
+
Prioritize the agent's settings over the model's base setups.
|
253
|
+
"""
|
254
|
+
|
255
|
+
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
256
|
+
llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
257
|
+
|
258
|
+
# if self.callbacks:
|
259
|
+
# llm.callbacks = self.callbacks
|
260
|
+
# llm._set_callbacks(llm.callbacks)
|
261
|
+
|
262
|
+
if self.respect_context_window == False:
|
263
|
+
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
264
|
+
|
265
|
+
config = self.config.update(config) if self.config else config
|
266
|
+
if config:
|
267
|
+
valid_params = litellm.get_supported_openai_params(model=llm.model)
|
268
|
+
for k, v in config.items():
|
269
|
+
try:
|
270
|
+
if k in valid_params and v is not None:
|
271
|
+
setattr(llm, k, v)
|
272
|
+
except:
|
273
|
+
pass
|
274
|
+
return llm
|
275
|
+
|
276
|
+
|
234
277
|
@model_validator(mode="after")
|
235
278
|
def set_up_tools(self) -> Self:
|
236
279
|
"""
|
@@ -241,25 +284,21 @@ class Agent(BaseModel):
|
|
241
284
|
|
242
285
|
else:
|
243
286
|
tool_list = []
|
244
|
-
def empty_func():
|
245
|
-
return "empty function"
|
246
287
|
|
247
288
|
for item in self.tools:
|
248
289
|
if isinstance(item, Tool):
|
249
290
|
tool_list.append(item)
|
250
291
|
|
251
|
-
elif isinstance(item, dict):
|
252
|
-
if "function" not in item:
|
253
|
-
setattr(item, "function", empty_func)
|
292
|
+
elif isinstance(item, dict) and "func" in item:
|
254
293
|
tool = Tool(**item)
|
255
294
|
tool_list.append(tool)
|
256
295
|
|
257
|
-
elif
|
258
|
-
|
259
|
-
tool_list.append(tool)
|
296
|
+
elif type(item) is Tool and hasattr(item, "func"):
|
297
|
+
tool_list.append(item)
|
260
298
|
|
261
299
|
else:
|
262
|
-
|
300
|
+
self._logger.log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
|
301
|
+
raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
|
263
302
|
|
264
303
|
self.tools = tool_list
|
265
304
|
|
@@ -273,108 +312,116 @@ class Agent(BaseModel):
|
|
273
312
|
"""
|
274
313
|
|
275
314
|
if self.backstory is None:
|
276
|
-
from versionhq.agent.TEMPLATES.Backstory import
|
277
|
-
backstory =
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
315
|
+
from versionhq.agent.TEMPLATES.Backstory import BACKSTORY_FULL, BACKSTORY_SHORT
|
316
|
+
backstory = ""
|
317
|
+
|
318
|
+
if self.tools or self.knowledge or self.skillsets:
|
319
|
+
backstory = BACKSTORY_FULL.format(
|
320
|
+
role=self.role,
|
321
|
+
goal=self.goal,
|
322
|
+
knowledge=self.knowledge if isinstance(self.knowledge, str) else None,
|
323
|
+
skillsets=", ".join([item for item in self.skillsets]),
|
324
|
+
rag_tool_overview=", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else "",
|
325
|
+
)
|
326
|
+
else:
|
327
|
+
backstory = BACKSTORY_SHORT.format(role=self.role, goal=self.goal)
|
328
|
+
|
284
329
|
self.backstory = backstory
|
285
330
|
|
286
331
|
return self
|
287
332
|
|
288
333
|
|
289
|
-
def
|
334
|
+
def invoke(
|
335
|
+
self,
|
336
|
+
prompts: str,
|
337
|
+
response_format: Optional[Dict[str, Any]] = None,
|
338
|
+
tools: Optional[List[Tool | ToolSet | Type[Tool]]] = None,
|
339
|
+
tool_res_as_final: bool = False
|
340
|
+
) -> Dict[str, Any]:
|
290
341
|
"""
|
291
|
-
|
292
|
-
|
342
|
+
Create formatted prompts using the developer prompt and the agent's backstory, then call the base model.
|
343
|
+
- Execute the task up to `self.max_retry_limit` times in case of receiving an error or empty response.
|
344
|
+
- Pass the task_tools to the model to let them execute.
|
293
345
|
"""
|
294
346
|
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
llm.callbacks = [self.step_callback, ]
|
300
|
-
llm._set_callbacks(llm.callbacks)
|
301
|
-
|
302
|
-
if self.respect_context_window == False:
|
303
|
-
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
347
|
+
task_execution_counter = 0
|
348
|
+
iterations = 0
|
349
|
+
raw_response = None
|
350
|
+
messages = []
|
304
351
|
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
except:
|
310
|
-
pass
|
311
|
-
return llm
|
352
|
+
messages.append({"role": "user", "content": prompts})
|
353
|
+
if self.use_developer_prompt:
|
354
|
+
messages.append({"role": "system", "content": self.backstory})
|
355
|
+
self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
312
356
|
|
357
|
+
try:
|
358
|
+
if tool_res_as_final is True:
|
359
|
+
func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
|
360
|
+
raw_response = func_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
|
361
|
+
else:
|
362
|
+
raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
|
313
363
|
|
314
|
-
|
315
|
-
|
316
|
-
Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
|
317
|
-
Then call the base model.
|
318
|
-
When encountering errors, we try the task execution up to `self.max_retry_limit` times.
|
319
|
-
"""
|
364
|
+
task_execution_counter += 1
|
365
|
+
self._logger.log(level="info", message=f"Agent response: {raw_response}", color="blue")
|
320
366
|
|
321
|
-
|
367
|
+
if raw_response and self.callbacks:
|
368
|
+
for item in self.callbacks:
|
369
|
+
raw_response = item(raw_response)
|
322
370
|
|
323
|
-
|
324
|
-
|
325
|
-
messages.append({"role": "assistant", "content": self.backstory})
|
326
|
-
self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
371
|
+
except Exception as e:
|
372
|
+
self._logger.log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
|
327
373
|
|
328
|
-
|
329
|
-
|
330
|
-
|
374
|
+
while not raw_response and task_execution_counter < self.max_retry_limit:
|
375
|
+
while not raw_response and iterations < self.maxit:
|
376
|
+
raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
|
377
|
+
iterations += 1
|
331
378
|
|
332
|
-
if (raw_response is None or raw_response == "") and task_execution_counter < self.max_retry_limit:
|
333
|
-
while task_execution_counter <= self.max_retry_limit:
|
334
|
-
raw_response = self.llm.call(messages=messages, output_formats=output_formats, field_list=response_fields)
|
335
379
|
task_execution_counter += 1
|
336
|
-
self._logger.log(level="info", message=f"Agent
|
380
|
+
self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="blue")
|
337
381
|
|
338
|
-
|
339
|
-
|
340
|
-
|
382
|
+
if raw_response and self.callbacks:
|
383
|
+
for item in self.callbacks:
|
384
|
+
raw_response = item(raw_response)
|
385
|
+
|
386
|
+
if not raw_response:
|
387
|
+
self._logger.log(level="error", message="Received None or empty response from the model", color="red")
|
388
|
+
raise ValueError("Invalid response from LLM call - None or empty.")
|
341
389
|
|
342
390
|
return raw_response
|
343
391
|
|
344
392
|
|
345
|
-
def execute_task(self, task, context: Optional[str] = None) -> str:
|
393
|
+
def execute_task(self, task, context: Optional[str] = None, task_tools: Optional[List[Tool | ToolSet]] = None) -> str:
|
346
394
|
"""
|
347
395
|
Execute the task and return the response in string.
|
348
396
|
The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
|
349
397
|
The agent must consider the context to excute the task as well when it is given.
|
350
398
|
"""
|
399
|
+
from versionhq.task.model import Task
|
351
400
|
|
352
|
-
|
353
|
-
|
354
|
-
task_prompt += context
|
355
|
-
|
356
|
-
tool_results = []
|
357
|
-
if task.tools:
|
358
|
-
for item in task.tools:
|
359
|
-
if isinstance(item, ToolSet):
|
360
|
-
tool_result = item.tool.run(**item.kwargs)
|
361
|
-
tool_results.append(tool_result)
|
362
|
-
elif isinstance(item, Tool):
|
363
|
-
tool_result = item.run()
|
364
|
-
tool_results.append(tool_result)
|
365
|
-
else:
|
366
|
-
try:
|
367
|
-
item.run()
|
368
|
-
except:
|
369
|
-
pass
|
401
|
+
task: InstanceOf[Task] = task
|
402
|
+
tools: Optional[List[Tool | ToolSet | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
|
370
403
|
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
tool_results.append(tool_result)
|
404
|
+
task_prompt = task.prompt(model_provider=self.llm.provider)
|
405
|
+
if context is not task.prompt_context:
|
406
|
+
task_prompt += context
|
375
407
|
|
376
|
-
|
377
|
-
|
408
|
+
# if agent_tools_to_run_without_llm:
|
409
|
+
# tool_results = []
|
410
|
+
# for item in agent_tools_to_run_without_llm:
|
411
|
+
# if isinstance(item, ToolSet):
|
412
|
+
# tool_result = item.tool.run(**item.kwargs)
|
413
|
+
# tool_results.append(tool_result)
|
414
|
+
# elif isinstance(item, Tool):
|
415
|
+
# tool_result = item.run()
|
416
|
+
# tool_results.append(tool_result)
|
417
|
+
# else:
|
418
|
+
# try:
|
419
|
+
# item.run()
|
420
|
+
# except:
|
421
|
+
# pass
|
422
|
+
|
423
|
+
# if task.tool_res_as_final is True:
|
424
|
+
# return tool_results
|
378
425
|
|
379
426
|
# if self.team and self.team._train:
|
380
427
|
# task_prompt = self._training_handler(task_prompt=task_prompt)
|
@@ -382,17 +429,22 @@ class Agent(BaseModel):
|
|
382
429
|
# task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
383
430
|
|
384
431
|
try:
|
432
|
+
self._times_executed += 1
|
385
433
|
raw_response = self.invoke(
|
386
434
|
prompts=task_prompt,
|
387
|
-
|
388
|
-
|
435
|
+
response_format=task._structure_response_format(model_provider=self.llm.provider),
|
436
|
+
tools=tools,
|
437
|
+
tool_res_as_final=task.tool_res_as_final,
|
389
438
|
)
|
390
439
|
|
391
440
|
except Exception as e:
|
392
441
|
self._times_executed += 1
|
442
|
+
self._logger.log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
|
443
|
+
raw_response = self.execute_task(task, context, task_tools)
|
444
|
+
|
393
445
|
if self._times_executed > self.max_retry_limit:
|
446
|
+
self._logger.log(level="error", message=f"Max retry limit has exceeded.", color="red")
|
394
447
|
raise e
|
395
|
-
raw_response = self.execute_task(task, context)
|
396
448
|
|
397
449
|
if self.max_rpm and self._rpm_controller:
|
398
450
|
self._rpm_controller.stop_rpm_counter()
|
@@ -5,7 +5,7 @@ from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_orig
|
|
5
5
|
from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
|
6
6
|
from pydantic_core import PydanticCustomError
|
7
7
|
|
8
|
-
from versionhq.tool import ComposioAppName
|
8
|
+
from versionhq.tool.composio_tool_vars import ComposioAppName
|
9
9
|
|
10
10
|
|
11
11
|
class ProductProvider(ABC, BaseModel):
|
@@ -10,7 +10,7 @@ from versionhq.clients.product.model import Product
|
|
10
10
|
from versionhq.clients.customer.model import Customer
|
11
11
|
from versionhq.agent.model import Agent
|
12
12
|
from versionhq.team.model import Team
|
13
|
-
from versionhq.tool import ComposioAppName
|
13
|
+
from versionhq.tool.composio_tool_vars import ComposioAppName
|
14
14
|
|
15
15
|
|
16
16
|
class ScoreFormat:
|