versionhq 1.1.9.13__py3-none-any.whl → 1.1.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -18,7 +18,7 @@ from versionhq.tool.model import Tool
18
18
  from versionhq.tool.composio_tool import ComposioHandler
19
19
 
20
20
 
21
- __version__ = "1.1.9.13"
21
+ __version__ = "1.1.10.2"
22
22
  __all__ = [
23
23
  "Agent",
24
24
  "Customer",
@@ -12,8 +12,6 @@ class Printer:
12
12
  self._print_red(content)
13
13
  elif color == "green":
14
14
  self._print_green(content)
15
- elif color == "purple":
16
- self._print_purple(content)
17
15
  elif color == "blue":
18
16
  self._print_blue(content)
19
17
  elif color == "yellow":
@@ -27,9 +25,6 @@ class Printer:
27
25
  def _print_green(self, content):
28
26
  print("\033[1m\033[92m {}\033[00m".format(content))
29
27
 
30
- def _print_purple(self, content):
31
- print("\033[95m {}\033[00m".format(content))
32
-
33
28
  def _print_red(self, content):
34
29
  print("\033[91m {}\033[00m".format(content))
35
30
 
@@ -47,4 +42,4 @@ class Logger(BaseModel):
47
42
  def log(self, level, message, color="yellow"):
48
43
  if self.verbose:
49
44
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
50
- self._printer.print(f"\n[{timestamp}][{level.upper()}]: {message}", color=color)
45
+ self._printer.print(f"\n{timestamp} - versionHQ - {level.upper()}: {message}", color=color)
@@ -8,19 +8,16 @@ def process_config(values_to_update: Dict[str, Any], model_class: Type[BaseModel
8
8
  Refer to the Pydantic model class for field validation.
9
9
  """
10
10
 
11
- if hasattr(values_to_update, "config"):
12
- config = values_to_update.pop("config", {})
13
- else:
14
- return values_to_update
11
+ config = values_to_update.pop("config", {})
15
12
 
13
+ if config:
14
+ for k, v in config.items():
15
+ if k not in model_class.model_fields or values_to_update.get(k) is not None:
16
+ continue
16
17
 
17
- for key, value in config.items():
18
- if key not in model_class.model_fields or values_to_update.get(key) is not None:
19
- continue
20
-
21
- if isinstance(value, dict) and isinstance(values_to_update.get(key), dict):
22
- values_to_update[key].update(value)
23
- else:
24
- values_to_update[key] = value
18
+ if isinstance(v, dict) and isinstance(values_to_update.get(k), dict):
19
+ values_to_update[k].update(v)
20
+ else:
21
+ values_to_update[k] = v
25
22
 
26
23
  return values_to_update
@@ -1,3 +1,4 @@
1
- BACKSTORY="""You are a {role} with deep understanding of {knowledge} and highly skilled in {skillsets}.
2
- You have access to call the RAG tools that can {rag_tool_overview}. By leveraging these tools, your knowledge, and skillsets, you can identify competitive strategies that have been proven effective to achieve the goal: {goal}. Take these into consideration, create innovative solutions.
3
- """
1
+ BACKSTORY_FULL="""You are an expert {role} with deep understanding of {knowledge} and highly skilled in {skillsets}. You have abilities to call the RAG tools that can {rag_tool_overview}. Your primary goal is to identify competitive solutions by leveraging your knowledge and skillsets to achieve the following goal: {goal}."""
2
+
3
+
4
+ BACKSTORY_SHORT="""You are an expert {role} with the right skillsets and knowledge. Your primary goal is to identify competitive solutions by leveraging your knowledge and skillsets to achieve the following goal: {goal}."""
versionhq/agent/model.py CHANGED
@@ -1,19 +1,20 @@
1
1
  import os
2
2
  import uuid
3
- from typing import Any, Dict, List, Optional, TypeVar
3
+ from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
4
4
  from typing_extensions import Self
5
5
  from dotenv import load_dotenv
6
- from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
6
+ import litellm
7
+
8
+ from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator, ConfigDict
7
9
  from pydantic_core import PydanticCustomError
8
10
 
11
+ from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
12
+ from versionhq.tool.model import Tool, ToolSet
9
13
  from versionhq._utils.logger import Logger
10
14
  from versionhq._utils.rpm_controller import RPMController
11
15
  from versionhq._utils.usage_metrics import UsageMetrics
12
- from versionhq.llm.llm_vars import LLM_VARS
13
- from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW
14
- from versionhq.task import TaskOutputFormat
15
- from versionhq.task.model import ResponseField
16
- from versionhq.tool.model import Tool, ToolSet
16
+ from versionhq._utils.process_config import process_config
17
+
17
18
 
18
19
  load_dotenv(override=True)
19
20
  T = TypeVar("T", bound="Agent")
@@ -87,48 +88,43 @@ class Agent(BaseModel):
87
88
  _request_within_rpm_limit: Any = PrivateAttr(default=None)
88
89
  _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
89
90
  _times_executed: int = PrivateAttr(default=0)
91
+ config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
90
92
 
91
93
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
92
94
  role: str = Field(description="role of the agent - used in summary and logs")
93
95
  goal: str = Field(description="concise goal of the agent (details are set in the Task instance)")
94
- backstory: Optional[str] = Field(default=None, description="system context passed to the LLM")
96
+ backstory: Optional[str] = Field(default=None, description="developer prompt to the llm")
95
97
  knowledge: Optional[str] = Field(default=None, description="external knowledge fed to the agent")
96
98
  skillsets: Optional[List[str]] = Field(default_factory=list)
97
- tools: Optional[List[Tool | Any]] = Field(default_factory=list)
99
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = Field(default_factory=list)
98
100
 
99
- # team, task execution rules
101
+ # prompting
102
+ use_developer_prompt: Optional[bool] = Field(default=True, description="Use developer prompt when calling the llm")
103
+ developer_propmt_template: Optional[str] = Field(default=None, description="ddeveloper prompt template")
104
+ user_prompt_template: Optional[str] = Field(default=None, description="user prompt template")
105
+
106
+ # task execution rules
100
107
  team: Optional[List[Any]] = Field(default=None, description="Team to which the agent belongs")
101
- allow_delegation: bool = Field(default=False,description="Enable agent to delegate and ask questions among each other")
102
- allow_code_execution: Optional[bool] = Field(default=False, description="Enable code execution for the agent.")
103
- max_retry_limit: int = Field(default=2,description="max. number of retries for the task execution when an error occurs. cascaed to the `invoke` function")
104
- max_iter: Optional[int] = Field(default=25,description="max. number of iterations for an agent to execute a task")
105
- step_callback: Optional[Any] = Field(default=None,description="Callback to be executed after each step of the agent execution")
108
+ allow_delegation: bool = Field(default=False,description="if the agent can delegate the task to another agent or ask some help")
109
+ max_retry_limit: int = Field(default=2 ,description="max. number of retry for the task execution when an error occurs")
110
+ maxit: Optional[int] = Field(default=25,description="max. number of total optimization loops conducted when an error occurs")
111
+ callbacks: Optional[List[Callable]] = Field(default_factory=list, description="callback functions to execute after any task execution")
106
112
 
107
113
  # llm settings cascaded to the LLM model
108
- llm: str | InstanceOf[LLM] | Any = Field(default=None)
109
- function_calling_llm: str | InstanceOf[LLM] | Any = Field(default=None)
114
+ llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
115
+ function_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
110
116
  respect_context_window: bool = Field(default=True,description="Keep messages under the context window size by summarizing content")
111
117
  max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
112
118
  max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
113
119
  max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute for the agent execution")
114
-
115
- # prompt rules
116
- use_system_prompt: Optional[bool] = Field(default=True, description="Use system prompt for the agent")
117
- system_template: Optional[str] = Field(default=None, description="System format for the agent.")
118
- prompt_template: Optional[str] = Field(default=None, description="Prompt format for the agent.")
119
- response_template: Optional[str] = Field(default=None, description="Response format for the agent.")
120
+ llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the model")
120
121
 
121
122
  # config, cache, error handling
122
- config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="Configuration for the agent")
123
- formatting_errors: int = Field(default=0, description="Number of formatting errors.")
123
+ formatting_errors: int = Field(default=0, description="number of formatting errors.")
124
124
  agent_ops_agent_name: str = None
125
125
  agent_ops_agent_id: str = None
126
126
 
127
127
 
128
- def __repr__(self):
129
- return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
130
-
131
-
132
128
  @field_validator("id", mode="before")
133
129
  @classmethod
134
130
  def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
@@ -136,12 +132,27 @@ class Agent(BaseModel):
136
132
  raise PydanticCustomError("may_not_set_field", "This field is not to be set by the user.", {})
137
133
 
138
134
 
135
+ # @field_validator(mode="before")
136
+ # def set_up_from_config(cls) -> None:
137
+ # if cls.config is not None:
138
+ # try:
139
+ # for k, v in cls.config.items():
140
+ # setattr(cls, k, v)
141
+ # except:
142
+ # pass
143
+
144
+ @model_validator(mode="before")
145
+ @classmethod
146
+ def process_model_config(cls, values: Dict[str, Any]) -> None:
147
+ return process_config(values_to_update=values, model_class=cls)
148
+
149
+
139
150
  @model_validator(mode="after")
140
151
  def validate_required_fields(self) -> Self:
141
152
  required_fields = ["role", "goal"]
142
153
  for field in required_fields:
143
154
  if getattr(self, field) is None:
144
- raise ValueError( f"{field} must be provided either directly or through config")
155
+ raise ValueError(f"{field} must be provided either directly or through config")
145
156
  return self
146
157
 
147
158
 
@@ -149,117 +160,120 @@ class Agent(BaseModel):
149
160
  def set_up_llm(self) -> Self:
150
161
  """
151
162
  Set up the base model and function calling model (if any) using the LLM class.
152
- Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `step_callback`,`respect_context_window` to the LLM class.
163
+ Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `callbacks`,`respect_context_window` to the LLM class.
153
164
  The base model is selected on the client app, else use the default model.
154
165
  """
155
166
 
156
167
  self.agent_ops_agent_name = self.role
157
- unaccepted_attributes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"]
158
- callbacks = ([self.step_callback,]if self.step_callback is not None else [])
168
+ # unaccepted_attributes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"]
159
169
 
160
170
  if isinstance(self.llm, LLM):
161
- self.llm.timeout = self.max_execution_time
162
- self.llm.max_tokens = self.max_tokens
163
- self.llm.context_window_size = (self.llm.get_context_window_size() if self.respect_context_window == True else DEFAULT_CONTEXT_WINDOW)
164
- self.llm.callbacks = callbacks
171
+ llm = self._set_llm_params(self.llm)
172
+ self.llm = llm
165
173
 
166
174
  elif isinstance(self.llm, str) or self.llm is None:
167
- model_name = os.environ.get("LITELLM_MODEL_NAME", os.environ.get("MODEL", "gpt-3.5-turbo"))
168
- llm_params = {
169
- "model": model_name if self.llm is None else self.llm,
170
- "timeout": self.max_execution_time,
171
- "max_tokens": self.max_tokens,
172
- "callbacks": callbacks,
173
- "api_key": os.environ.get("LITELLM_API_KEY", None),
174
- "base_url": os.environ.get("OPENAI_API_BASE", os.environ.get("OPENAI_BASE_URL", None))
175
- }
176
-
177
- set_provider = model_name.split("/")[0] if "/" in model_name else "openai" #! REFINEME
178
- for provider, env_vars in LLM_VARS.items():
179
- if provider == set_provider:
180
- for env_var in env_vars:
181
- key_name = env_var.get("key_name")
182
-
183
- if key_name and key_name not in unaccepted_attributes:
184
- env_value = os.environ.get(key_name)
185
- if env_value:
186
- key_name = ("api_key" if "API_KEY" in key_name else key_name)
187
- key_name = ("api_base" if "API_BASE" in key_name else key_name)
188
- key_name = ("api_version" if "API_VERSION" in key_name else key_name)
189
- llm_params[key_name] = env_value
190
- elif env_var.get("default", False):
191
- for key, value in env_var.items():
192
- if key not in ["prompt", "key_name", "default"]:
193
- if key in os.environ:
194
- llm_params[key] = value
195
- self.llm = LLM(**llm_params)
196
- context_window_size = (self.llm.get_context_window_size() if self.respect_context_window == True else DEFAULT_CONTEXT_WINDOW)
197
- self.llm.context_window_size = context_window_size
175
+ model_name = self.llm if self.llm is not None else DEFAULT_MODEL_NAME
176
+ llm = LLM(model=model_name)
177
+ updated_llm = self._set_llm_params(llm)
178
+ self.llm = updated_llm
198
179
 
199
180
  else:
200
- llm_params = {
201
- "model": (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm)),
202
- "max_tokens": (getattr(self.llm, "max_tokens") or self.max_tokens or 3000),
203
- "timeout": getattr(self.llm, "timeout", self.max_execution_time),
204
- "callbacks": getattr(self.llm, "callbacks") or callbacks,
205
- "temperature": getattr(self.llm, "temperature", None),
206
- "logprobs": getattr(self.llm, "logprobs", None),
207
- "api_key": getattr(self.llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
208
- "base_url": getattr(self.llm, "base_url", None),
209
- "organization": getattr(self.llm, "organization", None),
210
- }
211
- llm_params = { k: v for k, v in llm_params.items() if v is not None }
212
- self.llm = LLM(**llm_params)
181
+ if isinstance(self.llm, dict):
182
+ model_name = self.llm.pop("model_name", self.llm.pop("deployment_name", str(self.llm)))
183
+ llm = LLM(model=model_name if model_name is not None else DEFAULT_MODEL_NAME)
184
+ updated_llm = self._set_llm_params(llm, { k: v for k, v in self.llm.items() if v is not None })
185
+ self.llm = updated_llm
186
+
187
+ else:
188
+ model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
189
+ llm = LLM(model=model_name)
190
+ llm_params = {
191
+ "max_tokens": (getattr(self.llm, "max_tokens") or self.max_tokens or 3000),
192
+ "timeout": getattr(self.llm, "timeout", self.max_execution_time),
193
+ "callbacks": getattr(self.llm, "callbacks", None),
194
+ "temperature": getattr(self.llm, "temperature", None),
195
+ "logprobs": getattr(self.llm, "logprobs", None),
196
+ "api_key": getattr(self.llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
197
+ "base_url": getattr(self.llm, "base_url", None),
198
+ }
199
+ updated_llm = self._set_llm_params(llm, llm_params)
200
+ self.llm = updated_llm
201
+
213
202
 
214
203
  """
215
- Set up funcion_calling LLM as well. For the sake of convenience, use the same metrics as the base LLM settings.
204
+ Set up funcion_calling LLM as well.
205
+ Check if the model supports function calling, setup LLM instance accordingly, using the same params with the LLM.
216
206
  """
217
207
  if self.function_calling_llm:
218
208
  if isinstance(self.function_calling_llm, LLM):
219
- self.function_calling_llm.timeout = self.max_execution_time
220
- self.function_calling_llm.max_tokens = self.max_tokens
221
- self.function_calling_llm.callbacks = callbacks
222
- context_window_size = (
223
- self.function_calling_llm.get_context_window_size()
224
- if self.respect_context_window == True
225
- else DEFAULT_CONTEXT_WINDOW
226
- )
227
- self.function_calling_llm.context_window_size = context_window_size
209
+ if self.function_calling_llm._supports_function_calling() == False:
210
+ self.function_calling_llm = LLM(model=DEFAULT_MODEL_NAME)
211
+
212
+ updated_llm = self._set_llm_params(self.function_calling_llm)
213
+ self.function_calling_llm = updated_llm
228
214
 
229
215
  elif isinstance(self.function_calling_llm, str):
230
- self.function_calling_llm = LLM(
231
- model=self.function_calling_llm,
232
- timeout=self.max_execution_time,
233
- max_tokens=self.max_tokens,
234
- callbacks=callbacks,
235
- )
236
- context_window_size = (
237
- self.function_calling_llm.get_context_window_size()
238
- if self.respect_context_window == True
239
- else DEFAULT_CONTEXT_WINDOW
240
- )
241
- self.function_calling_llm.context_window_size = context_window_size
216
+ llm = LLM(model=self.function_calling_llm)
217
+
218
+ if llm._supports_function_calling() == False:
219
+ llm = LLM(model=DEFAULT_MODEL_NAME)
220
+
221
+ updated_llm = self._set_llm_params(llm)
222
+ self.function_calling_llm = updated_llm
242
223
 
243
224
  else:
244
- model_name = getattr(
245
- self.function_calling_llm,
246
- "model_name",
247
- getattr(
248
- self.function_calling_llm,
249
- "deployment_name",
250
- str(self.function_calling_llm),
251
- ),
252
- )
253
- if model_name is not None or model_name != "":
254
- self.function_calling_llm = LLM(
255
- model=model_name,
256
- timeout=self.max_execution_time,
257
- max_tokens=self.max_tokens,
258
- callbacks=callbacks,
259
- )
225
+ if isinstance(self.function_calling_llm, dict):
226
+ model_name = self.function_calling_llm.pop("model_name", self.function_calling_llm.pop("deployment_name", str(self.function_calling_llm)))
227
+ llm = LLM(model=model_name)
228
+ updated_llm = self._set_llm_params(llm, { k: v for k, v in self.function_calling_llm.items() if v is not None })
229
+ self.function_calling_llm = updated_llm
230
+
231
+ else:
232
+ model_name = (getattr(self.function_calling_llm, "model_name") or getattr(self.function_calling_llm, "deployment_name") or str(self.function_calling_llm))
233
+ llm = LLM(model=model_name)
234
+ llm_params = {
235
+ "max_tokens": (getattr(self.function_calling_llm, "max_tokens") or self.max_tokens or 3000),
236
+ "timeout": getattr(self.function_calling_llm, "timeout", self.max_execution_time),
237
+ "callbacks": getattr(self.function_calling_llm, "callbacks", None),
238
+ "temperature": getattr(self.function_calling_llm, "temperature", None),
239
+ "logprobs": getattr(self.function_calling_llm, "logprobs", None),
240
+ "api_key": getattr(self.function_calling_llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
241
+ "base_url": getattr(self.function_calling_llm, "base_url", None),
242
+ }
243
+ updated_llm = self._set_llm_params(llm, llm_params)
244
+ self.function_calling_llm = updated_llm
245
+
260
246
  return self
261
247
 
262
248
 
249
+ def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
250
+ """
251
+ After setting up an LLM instance, add params to the instance.
252
+ Prioritize the agent's settings over the model's base setups.
253
+ """
254
+
255
+ llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
256
+ llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
257
+
258
+ # if self.callbacks:
259
+ # llm.callbacks = self.callbacks
260
+ # llm._set_callbacks(llm.callbacks)
261
+
262
+ if self.respect_context_window == False:
263
+ llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
264
+
265
+ config = self.config.update(config) if self.config else config
266
+ if config:
267
+ valid_params = litellm.get_supported_openai_params(model=llm.model)
268
+ for k, v in config.items():
269
+ try:
270
+ if k in valid_params and v is not None:
271
+ setattr(llm, k, v)
272
+ except:
273
+ pass
274
+ return llm
275
+
276
+
263
277
  @model_validator(mode="after")
264
278
  def set_up_tools(self) -> Self:
265
279
  """
@@ -270,25 +284,21 @@ class Agent(BaseModel):
270
284
 
271
285
  else:
272
286
  tool_list = []
273
- def empty_func():
274
- return "empty function"
275
287
 
276
288
  for item in self.tools:
277
289
  if isinstance(item, Tool):
278
290
  tool_list.append(item)
279
291
 
280
- elif isinstance(item, dict):
281
- if "function" not in item:
282
- setattr(item, "function", empty_func)
292
+ elif isinstance(item, dict) and "func" in item:
283
293
  tool = Tool(**item)
284
294
  tool_list.append(tool)
285
295
 
286
- elif isinstance(item, str):
287
- tool = Tool(name=item, function=empty_func)
288
- tool_list.append(tool)
296
+ elif type(item) is Tool and hasattr(item, "func"):
297
+ tool_list.append(item)
289
298
 
290
299
  else:
291
- tool_list.append(item) # address custom tool
300
+ self._logger.log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
301
+ raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
292
302
 
293
303
  self.tools = tool_list
294
304
 
@@ -302,92 +312,116 @@ class Agent(BaseModel):
302
312
  """
303
313
 
304
314
  if self.backstory is None:
305
- from versionhq.agent.TEMPLATES.Backstory import BACKSTORY
306
- backstory = BACKSTORY.format(
307
- role=self.role,
308
- knowledge=self.knowledge if isinstance(self.knowledge, str) else None,
309
- skillsets=", ".join([item for item in self.skillsets]),
310
- rag_tool_overview=", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else "",
311
- goal=self.goal,
312
- )
315
+ from versionhq.agent.TEMPLATES.Backstory import BACKSTORY_FULL, BACKSTORY_SHORT
316
+ backstory = ""
317
+
318
+ if self.tools or self.knowledge or self.skillsets:
319
+ backstory = BACKSTORY_FULL.format(
320
+ role=self.role,
321
+ goal=self.goal,
322
+ knowledge=self.knowledge if isinstance(self.knowledge, str) else None,
323
+ skillsets=", ".join([item for item in self.skillsets]),
324
+ rag_tool_overview=", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else "",
325
+ )
326
+ else:
327
+ backstory = BACKSTORY_SHORT.format(role=self.role, goal=self.goal)
328
+
313
329
  self.backstory = backstory
314
330
 
315
331
  return self
316
332
 
317
333
 
318
- def invoke(self, prompts: str, output_formats: List[TaskOutputFormat], response_fields: List[ResponseField], **kwargs) -> Dict[str, Any]:
334
+ def invoke(
335
+ self,
336
+ prompts: str,
337
+ response_format: Optional[Dict[str, Any]] = None,
338
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = None,
339
+ tool_res_as_final: bool = False
340
+ ) -> Dict[str, Any]:
319
341
  """
320
- Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
321
- Then call the base model.
322
- When encountering errors, we try the task execution up to `self.max_retry_limit` times.
342
+ Create formatted prompts using the developer prompt and the agent's backstory, then call the base model.
343
+ - Execute the task up to `self.max_retry_limit` times in case of receiving an error or empty response.
344
+ - Pass the task_tools to the model to let them execute.
323
345
  """
324
346
 
325
- task_execution_counter, raw_response = 0, None
326
-
347
+ task_execution_counter = 0
348
+ iterations = 0
349
+ raw_response = None
327
350
  messages = []
328
- messages.append({"role": "user", "content": prompts}) #! REFINEME
329
- messages.append({"role": "assistant", "content": self.backstory})
351
+
352
+ messages.append({"role": "user", "content": prompts})
353
+ if self.use_developer_prompt:
354
+ messages.append({"role": "system", "content": self.backstory})
330
355
  self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
331
356
 
332
- callbacks = kwargs.get("callbacks", None)
357
+ try:
358
+ if tool_res_as_final is True:
359
+ func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
360
+ raw_response = func_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
361
+ else:
362
+ raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
363
+
364
+ task_execution_counter += 1
365
+ self._logger.log(level="info", message=f"Agent response: {raw_response}", color="blue")
366
+
367
+ if raw_response and self.callbacks:
368
+ for item in self.callbacks:
369
+ raw_response = item(raw_response)
370
+
371
+ except Exception as e:
372
+ self._logger.log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
373
+
374
+ while not raw_response and task_execution_counter < self.max_retry_limit:
375
+ while not raw_response and iterations < self.maxit:
376
+ raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
377
+ iterations += 1
333
378
 
334
- raw_response = self.llm.call(
335
- messages=messages, output_formats=output_formats, field_list=response_fields, callbacks=callbacks
336
- )
337
- task_execution_counter += 1
338
- self._logger.log(level="info", message=f"Agent's first response in {type(raw_response).__name__}: {raw_response}", color="blue")
339
-
340
- if (raw_response is None or raw_response == "") and task_execution_counter < self.max_retry_limit:
341
- while task_execution_counter <= self.max_retry_limit:
342
- raw_response = self.llm.call(
343
- messages=messages,
344
- output_formats=output_formats,
345
- field_list=response_fields,
346
- callbacks=callbacks,
347
- )
348
379
  task_execution_counter += 1
349
- self._logger.log(level="info", message=f"Agent's next response in {type(raw_response).__name__}: {raw_response}", color="blue")
380
+ self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="blue")
350
381
 
351
- elif raw_response is None or raw_response == "":
352
- self._logger.log(level="error", message="Received None or empty response from the model", color="red")
353
- raise ValueError("Invalid response from LLM call - None or empty.")
382
+ if raw_response and self.callbacks:
383
+ for item in self.callbacks:
384
+ raw_response = item(raw_response)
385
+
386
+ if not raw_response:
387
+ self._logger.log(level="error", message="Received None or empty response from the model", color="red")
388
+ raise ValueError("Invalid response from LLM call - None or empty.")
354
389
 
355
390
  return raw_response
356
391
 
357
392
 
358
- def execute_task(self, task, context: Optional[str] = None) -> str:
393
+ def execute_task(self, task, context: Optional[str] = None, task_tools: Optional[List[Tool | ToolSet]] = None) -> str:
359
394
  """
360
395
  Execute the task and return the response in string.
361
396
  The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
362
397
  The agent must consider the context to excute the task as well when it is given.
363
398
  """
399
+ from versionhq.task.model import Task
364
400
 
365
- task_prompt = task.prompt()
366
- if context is not task.prompt_context: # as `task.prompt()` includes adding `task.prompt_context` to the prompt.
367
- task_prompt += context
368
-
369
- tool_results = []
370
- if task.tools:
371
- for item in task.tools:
372
- if isinstance(item, ToolSet):
373
- tool_result = item.tool.run(**item.kwargs)
374
- tool_results.append(tool_result)
375
- elif isinstance(item, Tool):
376
- tool_result = item.run()
377
- tool_results.append(tool_result)
378
- else:
379
- try:
380
- item.run()
381
- except:
382
- pass
401
+ task: InstanceOf[Task] = task
402
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
383
403
 
384
- if task.can_use_agent_tools is True and self.tools:
385
- for tool in self.tools:
386
- tool_result = tool.run()
387
- tool_results.append(tool_result)
404
+ task_prompt = task.prompt(model_provider=self.llm.provider)
405
+ if context is not task.prompt_context:
406
+ task_prompt += context
388
407
 
389
- if task.take_tool_res_as_final:
390
- return tool_results
408
+ # if agent_tools_to_run_without_llm:
409
+ # tool_results = []
410
+ # for item in agent_tools_to_run_without_llm:
411
+ # if isinstance(item, ToolSet):
412
+ # tool_result = item.tool.run(**item.kwargs)
413
+ # tool_results.append(tool_result)
414
+ # elif isinstance(item, Tool):
415
+ # tool_result = item.run()
416
+ # tool_results.append(tool_result)
417
+ # else:
418
+ # try:
419
+ # item.run()
420
+ # except:
421
+ # pass
422
+
423
+ # if task.tool_res_as_final is True:
424
+ # return tool_results
391
425
 
392
426
  # if self.team and self.team._train:
393
427
  # task_prompt = self._training_handler(task_prompt=task_prompt)
@@ -395,19 +429,28 @@ class Agent(BaseModel):
395
429
  # task_prompt = self._use_trained_data(task_prompt=task_prompt)
396
430
 
397
431
  try:
432
+ self._times_executed += 1
398
433
  raw_response = self.invoke(
399
434
  prompts=task_prompt,
400
- output_formats=task.expected_output_formats,
401
- response_fields=task.output_field_list,
435
+ response_format=task._structure_response_format(model_provider=self.llm.provider),
436
+ tools=tools,
437
+ tool_res_as_final=task.tool_res_as_final,
402
438
  )
403
439
 
404
440
  except Exception as e:
405
441
  self._times_executed += 1
442
+ self._logger.log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
443
+ raw_response = self.execute_task(task, context, task_tools)
444
+
406
445
  if self._times_executed > self.max_retry_limit:
446
+ self._logger.log(level="error", message=f"Max retry limit has exceeded.", color="red")
407
447
  raise e
408
- raw_response = self.execute_task(task, context)
409
448
 
410
449
  if self.max_rpm and self._rpm_controller:
411
450
  self._rpm_controller.stop_rpm_counter()
412
451
 
413
452
  return raw_response
453
+
454
+
455
+ def __repr__(self):
456
+ return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"