versionhq 1.1.9.14__py3-none-any.whl → 1.1.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/__init__.py CHANGED
@@ -18,7 +18,7 @@ from versionhq.tool.model import Tool
18
18
  from versionhq.tool.composio_tool import ComposioHandler
19
19
 
20
20
 
21
- __version__ = "1.1.9.14"
21
+ __version__ = "1.1.10.3"
22
22
  __all__ = [
23
23
  "Agent",
24
24
  "Customer",
@@ -12,8 +12,6 @@ class Printer:
12
12
  self._print_red(content)
13
13
  elif color == "green":
14
14
  self._print_green(content)
15
- elif color == "purple":
16
- self._print_purple(content)
17
15
  elif color == "blue":
18
16
  self._print_blue(content)
19
17
  elif color == "yellow":
@@ -27,9 +25,6 @@ class Printer:
27
25
  def _print_green(self, content):
28
26
  print("\033[1m\033[92m {}\033[00m".format(content))
29
27
 
30
- def _print_purple(self, content):
31
- print("\033[95m {}\033[00m".format(content))
32
-
33
28
  def _print_red(self, content):
34
29
  print("\033[91m {}\033[00m".format(content))
35
30
 
@@ -47,4 +42,4 @@ class Logger(BaseModel):
47
42
  def log(self, level, message, color="yellow"):
48
43
  if self.verbose:
49
44
  timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
50
- self._printer.print(f"\n[{timestamp}][{level.upper()}]: {message}", color=color)
45
+ self._printer.print(f"\n{timestamp} - versionHQ - {level.upper()}: {message}", color=color)
@@ -8,19 +8,16 @@ def process_config(values_to_update: Dict[str, Any], model_class: Type[BaseModel
8
8
  Refer to the Pydantic model class for field validation.
9
9
  """
10
10
 
11
- if hasattr(values_to_update, "config"):
12
- config = values_to_update.pop("config", {})
13
- else:
14
- return values_to_update
11
+ config = values_to_update.pop("config", {})
15
12
 
13
+ if config:
14
+ for k, v in config.items():
15
+ if k not in model_class.model_fields or values_to_update.get(k) is not None:
16
+ continue
16
17
 
17
- for key, value in config.items():
18
- if key not in model_class.model_fields or values_to_update.get(key) is not None:
19
- continue
20
-
21
- if isinstance(value, dict) and isinstance(values_to_update.get(key), dict):
22
- values_to_update[key].update(value)
23
- else:
24
- values_to_update[key] = value
18
+ if isinstance(v, dict) and isinstance(values_to_update.get(k), dict):
19
+ values_to_update[k].update(v)
20
+ else:
21
+ values_to_update[k] = v
25
22
 
26
23
  return values_to_update
@@ -1,3 +1,4 @@
1
- BACKSTORY="""You are a {role} with deep understanding of {knowledge} and highly skilled in {skillsets}.
2
- You have access to call the RAG tools that can {rag_tool_overview}. By leveraging these tools, your knowledge, and skillsets, you can identify competitive strategies that have been proven effective to achieve the goal: {goal}. Take these into consideration, create innovative solutions.
3
- """
1
+ BACKSTORY_FULL="""You are an expert {role} with deep understanding of {knowledge} and highly skilled in {skillsets}. You have abilities to call the RAG tools that can {rag_tool_overview}. Your primary goal is to identify competitive solutions by leveraging your knowledge and skillsets to achieve the following goal: {goal}."""
2
+
3
+
4
+ BACKSTORY_SHORT="""You are an expert {role} with the right skillsets and knowledge. Your primary goal is to identify competitive solutions by leveraging your knowledge and skillsets to achieve the following goal: {goal}."""
versionhq/agent/model.py CHANGED
@@ -1,19 +1,20 @@
1
1
  import os
2
2
  import uuid
3
- from typing import Any, Dict, List, Optional, TypeVar, Callable
3
+ from typing import Any, Dict, List, Optional, TypeVar, Callable, Type
4
4
  from typing_extensions import Self
5
5
  from dotenv import load_dotenv
6
- from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
6
+ import litellm
7
+
8
+ from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator, ConfigDict
7
9
  from pydantic_core import PydanticCustomError
8
10
 
11
+ from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
12
+ from versionhq.tool.model import Tool, ToolSet
9
13
  from versionhq._utils.logger import Logger
10
14
  from versionhq._utils.rpm_controller import RPMController
11
15
  from versionhq._utils.usage_metrics import UsageMetrics
12
- from versionhq.llm.llm_variables import LLM_VARS
13
- from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
14
- from versionhq.task import TaskOutputFormat
15
- from versionhq.task.model import ResponseField
16
- from versionhq.tool.model import Tool, ToolSet
16
+ from versionhq._utils.process_config import process_config
17
+
17
18
 
18
19
  load_dotenv(override=True)
19
20
  T = TypeVar("T", bound="Agent")
@@ -92,18 +93,22 @@ class Agent(BaseModel):
92
93
  id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
93
94
  role: str = Field(description="role of the agent - used in summary and logs")
94
95
  goal: str = Field(description="concise goal of the agent (details are set in the Task instance)")
95
- backstory: Optional[str] = Field(default=None, description="system context passed to the LLM")
96
+ backstory: Optional[str] = Field(default=None, description="developer prompt to the llm")
96
97
  knowledge: Optional[str] = Field(default=None, description="external knowledge fed to the agent")
97
98
  skillsets: Optional[List[str]] = Field(default_factory=list)
98
- tools: Optional[List[Tool | Any]] = Field(default_factory=list)
99
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = Field(default_factory=list)
99
100
 
100
- # team, task execution rules
101
+ # prompting
102
+ use_developer_prompt: Optional[bool] = Field(default=True, description="Use developer prompt when calling the llm")
103
+ developer_propmt_template: Optional[str] = Field(default=None, description="ddeveloper prompt template")
104
+ user_prompt_template: Optional[str] = Field(default=None, description="user prompt template")
105
+
106
+ # task execution rules
101
107
  team: Optional[List[Any]] = Field(default=None, description="Team to which the agent belongs")
102
- allow_delegation: bool = Field(default=False,description="Enable agent to delegate and ask questions among each other")
103
- allow_code_execution: Optional[bool] = Field(default=False, description="Enable code execution for the agent.")
104
- max_retry_limit: int = Field(default=2,description="max. number of retries for the task execution when an error occurs. cascaed to the `invoke` function")
105
- max_iter: Optional[int] = Field(default=25,description="max. number of iterations for an agent to execute a task")
106
- step_callback: Optional[Callable | Any] = Field(default=None, description="callback to be executed after each step of the agent execution")
108
+ allow_delegation: bool = Field(default=False,description="if the agent can delegate the task to another agent or ask some help")
109
+ max_retry_limit: int = Field(default=2 ,description="max. number of retry for the task execution when an error occurs")
110
+ maxit: Optional[int] = Field(default=25,description="max. number of total optimization loops conducted when an error occurs")
111
+ callbacks: Optional[List[Callable]] = Field(default_factory=list, description="callback functions to execute after any task execution")
107
112
 
108
113
  # llm settings cascaded to the LLM model
109
114
  llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
@@ -112,15 +117,10 @@ class Agent(BaseModel):
112
117
  max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
113
118
  max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
114
119
  max_rpm: Optional[int] = Field(default=None, description="max. number of requests per minute for the agent execution")
115
-
116
- # prompt rules
117
- use_system_prompt: Optional[bool] = Field(default=True, description="Use system prompt for the agent")
118
- system_template: Optional[str] = Field(default=None, description="System format for the agent.")
119
- prompt_template: Optional[str] = Field(default=None, description="Prompt format for the agent.")
120
- response_template: Optional[str] = Field(default=None, description="Response format for the agent.")
120
+ llm_config: Optional[Dict[str, Any]] = Field(default=None, description="other llm config cascaded to the model")
121
121
 
122
122
  # config, cache, error handling
123
- formatting_errors: int = Field(default=0, description="Number of formatting errors.")
123
+ formatting_errors: int = Field(default=0, description="number of formatting errors.")
124
124
  agent_ops_agent_name: str = None
125
125
  agent_ops_agent_id: str = None
126
126
 
@@ -132,6 +132,21 @@ class Agent(BaseModel):
132
132
  raise PydanticCustomError("may_not_set_field", "This field is not to be set by the user.", {})
133
133
 
134
134
 
135
+ # @field_validator(mode="before")
136
+ # def set_up_from_config(cls) -> None:
137
+ # if cls.config is not None:
138
+ # try:
139
+ # for k, v in cls.config.items():
140
+ # setattr(cls, k, v)
141
+ # except:
142
+ # pass
143
+
144
+ @model_validator(mode="before")
145
+ @classmethod
146
+ def process_model_config(cls, values: Dict[str, Any]) -> None:
147
+ return process_config(values_to_update=values, model_class=cls)
148
+
149
+
135
150
  @model_validator(mode="after")
136
151
  def validate_required_fields(self) -> Self:
137
152
  required_fields = ["role", "goal"]
@@ -145,7 +160,7 @@ class Agent(BaseModel):
145
160
  def set_up_llm(self) -> Self:
146
161
  """
147
162
  Set up the base model and function calling model (if any) using the LLM class.
148
- Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `step_callback`,`respect_context_window` to the LLM class.
163
+ Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `callbacks`,`respect_context_window` to the LLM class.
149
164
  The base model is selected on the client app, else use the default model.
150
165
  """
151
166
 
@@ -231,6 +246,34 @@ class Agent(BaseModel):
231
246
  return self
232
247
 
233
248
 
249
+ def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
250
+ """
251
+ After setting up an LLM instance, add params to the instance.
252
+ Prioritize the agent's settings over the model's base setups.
253
+ """
254
+
255
+ llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
256
+ llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
257
+
258
+ # if self.callbacks:
259
+ # llm.callbacks = self.callbacks
260
+ # llm._set_callbacks(llm.callbacks)
261
+
262
+ if self.respect_context_window == False:
263
+ llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
264
+
265
+ config = self.config.update(config) if self.config else config
266
+ if config:
267
+ valid_params = litellm.get_supported_openai_params(model=llm.model)
268
+ for k, v in config.items():
269
+ try:
270
+ if k in valid_params and v is not None:
271
+ setattr(llm, k, v)
272
+ except:
273
+ pass
274
+ return llm
275
+
276
+
234
277
  @model_validator(mode="after")
235
278
  def set_up_tools(self) -> Self:
236
279
  """
@@ -241,25 +284,21 @@ class Agent(BaseModel):
241
284
 
242
285
  else:
243
286
  tool_list = []
244
- def empty_func():
245
- return "empty function"
246
287
 
247
288
  for item in self.tools:
248
289
  if isinstance(item, Tool):
249
290
  tool_list.append(item)
250
291
 
251
- elif isinstance(item, dict):
252
- if "function" not in item:
253
- setattr(item, "function", empty_func)
292
+ elif isinstance(item, dict) and "func" in item:
254
293
  tool = Tool(**item)
255
294
  tool_list.append(tool)
256
295
 
257
- elif isinstance(item, str):
258
- tool = Tool(name=item, function=empty_func)
259
- tool_list.append(tool)
296
+ elif type(item) is Tool and hasattr(item, "func"):
297
+ tool_list.append(item)
260
298
 
261
299
  else:
262
- tool_list.append(item) # address custom tool
300
+ self._logger.log(level="error", message=f"Tool {str(item)} is missing a function.", color="red")
301
+ raise PydanticCustomError("invalid_tool", f"The tool {str(item)} is missing a function.", {})
263
302
 
264
303
  self.tools = tool_list
265
304
 
@@ -273,108 +312,98 @@ class Agent(BaseModel):
273
312
  """
274
313
 
275
314
  if self.backstory is None:
276
- from versionhq.agent.TEMPLATES.Backstory import BACKSTORY
277
- backstory = BACKSTORY.format(
278
- role=self.role,
279
- knowledge=self.knowledge if isinstance(self.knowledge, str) else None,
280
- skillsets=", ".join([item for item in self.skillsets]),
281
- rag_tool_overview=", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else "",
282
- goal=self.goal,
283
- )
315
+ from versionhq.agent.TEMPLATES.Backstory import BACKSTORY_FULL, BACKSTORY_SHORT
316
+ backstory = ""
317
+
318
+ if self.tools or self.knowledge or self.skillsets:
319
+ backstory = BACKSTORY_FULL.format(
320
+ role=self.role,
321
+ goal=self.goal,
322
+ knowledge=self.knowledge if isinstance(self.knowledge, str) else None,
323
+ skillsets=", ".join([item for item in self.skillsets]),
324
+ rag_tool_overview=", ".join([item.name for item in self.tools if hasattr(item, "name")]) if self.tools else "",
325
+ )
326
+ else:
327
+ backstory = BACKSTORY_SHORT.format(role=self.role, goal=self.goal)
328
+
284
329
  self.backstory = backstory
285
330
 
286
331
  return self
287
332
 
288
333
 
289
- def _set_llm_params(self, llm: LLM, kwargs: Dict[str, Any] = None) -> LLM:
334
+ def invoke(
335
+ self,
336
+ prompts: str,
337
+ response_format: Optional[Dict[str, Any]] = None,
338
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = None,
339
+ tool_res_as_final: bool = False
340
+ ) -> Dict[str, Any]:
290
341
  """
291
- After setting up an LLM instance, add params to the instance.
292
- Prioritize the agent's settings over the model's base setups.
342
+ Create formatted prompts using the developer prompt and the agent's backstory, then call the base model.
343
+ - Execute the task up to `self.max_retry_limit` times in case of receiving an error or empty response.
344
+ - Pass the task_tools to the model to let them execute.
293
345
  """
294
346
 
295
- llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
296
- llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
297
-
298
- if self.step_callback is not None:
299
- llm.callbacks = [self.step_callback, ]
300
- llm._set_callbacks(llm.callbacks)
301
-
302
- if self.respect_context_window == False:
303
- llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
347
+ task_execution_counter = 0
348
+ iterations = 0
349
+ raw_response = None
350
+ messages = []
304
351
 
305
- if kwargs:
306
- for k, v in kwargs.items():
307
- try:
308
- setattr(llm, k, v)
309
- except:
310
- pass
311
- return llm
352
+ messages.append({"role": "user", "content": prompts})
353
+ if self.use_developer_prompt:
354
+ messages.append({"role": "system", "content": self.backstory})
355
+ self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
312
356
 
357
+ try:
358
+ if tool_res_as_final is True:
359
+ func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
360
+ raw_response = func_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
361
+ else:
362
+ raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
313
363
 
314
- def invoke(self, prompts: str, output_formats: List[str | TaskOutputFormat], response_fields: List[ResponseField]) -> Dict[str, Any]:
315
- """
316
- Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
317
- Then call the base model.
318
- When encountering errors, we try the task execution up to `self.max_retry_limit` times.
319
- """
364
+ task_execution_counter += 1
365
+ self._logger.log(level="info", message=f"Agent response: {raw_response}", color="blue")
320
366
 
321
- task_execution_counter, raw_response = 0, None
367
+ if raw_response and self.callbacks:
368
+ for item in self.callbacks:
369
+ raw_response = item(raw_response)
322
370
 
323
- messages = []
324
- messages.append({"role": "user", "content": prompts}) #! REFINEME
325
- messages.append({"role": "assistant", "content": self.backstory})
326
- self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
371
+ except Exception as e:
372
+ self._logger.log(level="error", message=f"An error occured. The agent will retry: {str(e)}", color="red")
327
373
 
328
- raw_response = self.llm.call(messages=messages, output_formats=output_formats, field_list=response_fields)
329
- task_execution_counter += 1
330
- self._logger.log(level="info", message=f"Agent's first response in {type(raw_response).__name__}: {raw_response}", color="blue")
374
+ while not raw_response and task_execution_counter < self.max_retry_limit:
375
+ while not raw_response and iterations < self.maxit:
376
+ raw_response = self.llm.call(messages=messages, response_format=response_format, tools=tools)
377
+ iterations += 1
331
378
 
332
- if (raw_response is None or raw_response == "") and task_execution_counter < self.max_retry_limit:
333
- while task_execution_counter <= self.max_retry_limit:
334
- raw_response = self.llm.call(messages=messages, output_formats=output_formats, field_list=response_fields)
335
379
  task_execution_counter += 1
336
- self._logger.log(level="info", message=f"Agent's next response in {type(raw_response).__name__}: {raw_response}", color="blue")
380
+ self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="blue")
337
381
 
338
- elif raw_response is None or raw_response == "":
339
- self._logger.log(level="error", message="Received None or empty response from the model", color="red")
340
- raise ValueError("Invalid response from LLM call - None or empty.")
382
+ if raw_response and self.callbacks:
383
+ for item in self.callbacks:
384
+ raw_response = item(raw_response)
385
+
386
+ if not raw_response:
387
+ self._logger.log(level="error", message="Received None or empty response from the model", color="red")
388
+ raise ValueError("Invalid response from LLM call - None or empty.")
341
389
 
342
390
  return raw_response
343
391
 
344
392
 
345
- def execute_task(self, task, context: Optional[str] = None) -> str:
393
+ def execute_task(self, task, context: Optional[str] = None, task_tools: Optional[List[Tool | ToolSet]] = list()) -> str:
346
394
  """
347
395
  Execute the task and return the response in string.
348
396
  The agent utilizes the tools in task or their own tools if the task.can_use_agent_tools is True.
349
397
  The agent must consider the context to excute the task as well when it is given.
350
398
  """
399
+ from versionhq.task.model import Task
351
400
 
352
- task_prompt = task.prompt()
353
- if context is not task.prompt_context: # as `task.prompt()` includes adding `task.prompt_context` to the prompt.
354
- task_prompt += context
355
-
356
- tool_results = []
357
- if task.tools:
358
- for item in task.tools:
359
- if isinstance(item, ToolSet):
360
- tool_result = item.tool.run(**item.kwargs)
361
- tool_results.append(tool_result)
362
- elif isinstance(item, Tool):
363
- tool_result = item.run()
364
- tool_results.append(tool_result)
365
- else:
366
- try:
367
- item.run()
368
- except:
369
- pass
370
-
371
- if task.can_use_agent_tools is True and self.tools:
372
- for tool in self.tools:
373
- tool_result = tool.run()
374
- tool_results.append(tool_result)
401
+ task: InstanceOf[Task] = task
402
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
375
403
 
376
- if task.take_tool_res_as_final:
377
- return tool_results
404
+ task_prompt = task.prompt(model_provider=self.llm.provider)
405
+ if context is not task.prompt_context:
406
+ task_prompt += context
378
407
 
379
408
  # if self.team and self.team._train:
380
409
  # task_prompt = self._training_handler(task_prompt=task_prompt)
@@ -382,17 +411,22 @@ class Agent(BaseModel):
382
411
  # task_prompt = self._use_trained_data(task_prompt=task_prompt)
383
412
 
384
413
  try:
414
+ self._times_executed += 1
385
415
  raw_response = self.invoke(
386
416
  prompts=task_prompt,
387
- output_formats=task.expected_output_formats,
388
- response_fields=task.output_field_list,
417
+ response_format=task._structure_response_format(model_provider=self.llm.provider),
418
+ tools=tools,
419
+ tool_res_as_final=task.tool_res_as_final,
389
420
  )
390
421
 
391
422
  except Exception as e:
392
423
  self._times_executed += 1
424
+ self._logger.log(level="error", message=f"The agent failed to execute the task. Error: {str(e)}", color="red")
425
+ raw_response = self.execute_task(task, context, task_tools)
426
+
393
427
  if self._times_executed > self.max_retry_limit:
428
+ self._logger.log(level="error", message=f"Max retry limit has exceeded.", color="red")
394
429
  raise e
395
- raw_response = self.execute_task(task, context)
396
430
 
397
431
  if self.max_rpm and self._rpm_controller:
398
432
  self._rpm_controller.stop_rpm_counter()
@@ -5,7 +5,7 @@ from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_orig
5
5
  from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
6
6
  from pydantic_core import PydanticCustomError
7
7
 
8
- from versionhq.tool import ComposioAppName
8
+ from versionhq.tool.composio_tool_vars import ComposioAppName
9
9
 
10
10
 
11
11
  class ProductProvider(ABC, BaseModel):
@@ -10,7 +10,7 @@ from versionhq.clients.product.model import Product
10
10
  from versionhq.clients.customer.model import Customer
11
11
  from versionhq.agent.model import Agent
12
12
  from versionhq.team.model import Team
13
- from versionhq.tool import ComposioAppName
13
+ from versionhq.tool.composio_tool_vars import ComposioAppName
14
14
 
15
15
 
16
16
  class ScoreFormat:
@@ -1,3 +1,6 @@
1
+ from enum import Enum
2
+ from typing import Type
3
+
1
4
  JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
2
5
 
3
6
 
@@ -6,11 +9,14 @@ List of models available on the framework.
6
9
  Model names align with the LiteLLM's key names defined in the JSON URL.
7
10
  Provider names align with the custom provider or model provider names.
8
11
  -> model_key = custom_provider_name/model_name
12
+
13
+ Option
14
+ litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=1)
9
15
  """
10
16
 
11
17
  MODELS = {
12
18
  "openai": [
13
- "gpt-3.5-turbo",
19
+ # "gpt-3.5-turbo",
14
20
  "gpt-4",
15
21
  "gpt-4o",
16
22
  "gpt-4o-mini",
@@ -21,9 +27,11 @@ MODELS = {
21
27
  "gemini/gemini-1.5-flash",
22
28
  "gemini/gemini-1.5-pro",
23
29
  "gemini/gemini-2.0-flash-exp",
24
- "gemini/gemini-gemma-2-9b-it",
25
- "gemini/gemini-gemma-2-27b-it",
30
+ # "gemini/gemini-gemma-2-9b-it",
31
+ # "gemini/gemini-gemma-2-27b-it",
26
32
  ],
33
+ # "vetrex_ai": [
34
+ # ],
27
35
  "anthropic": [
28
36
  "claude-3-5-sonnet-20241022",
29
37
  "claude-3-5-sonnet-20240620",
@@ -31,10 +39,10 @@ MODELS = {
31
39
  "claude-3-opus-20240229",
32
40
  "claude-3-haiku-20240307",
33
41
  ],
34
- "ollama": [
35
- "ollama/llama3.1",
36
- "ollama/mixtral",
37
- ],
42
+ # "ollama": [
43
+ # "ollama/llama3.1",
44
+ # "ollama/mixtral",
45
+ # ],
38
46
  # "watson": [
39
47
  # "watsonx/meta-llama/llama-3-1-70b-instruct",
40
48
  # "watsonx/meta-llama/llama-3-1-8b-instruct",
@@ -135,8 +143,8 @@ LLM_API_KEY_NAMES = {
135
143
 
136
144
  LLM_BASE_URL_KEY_NAMES = {
137
145
  "openai": "OPENAI_API_BASE",
146
+ "gemini": "GEMINI_API_BASE",
138
147
  "anthropic": "ANTHROPIC_API_BASE",
139
- "gemini": "GEMINI_API_BASE",
140
148
  }
141
149
 
142
150
  LLM_VARS = {
@@ -225,37 +233,91 @@ LLM_VARS = {
225
233
 
226
234
 
227
235
  """
228
- Params for litellm.completion() func
236
+ Params for litellm.completion() func. Address common/unique params to each provider.
229
237
  """
230
238
 
231
- LITELLM_COMPLETION_KEYS = [
232
- "model",
233
- "messages",
234
- "timeout",
235
- "temperature", "top_p",
236
- "n",
237
- "stream"
238
- "stream_options"
239
- "stop",
240
- "max_compl,etion_tokens"
241
- "max_tokens",
242
- "modalities",
243
- "prediction",
244
- "audio",
245
- "presen,ce_penalty"
246
- "frequency_penalty,"
247
- "logit_bias",
248
- "user",
249
- "response_format",
250
- "seed",
251
- "tools,"
252
- "tool_choice"
253
- "logprobs",
254
- "top_logpr,obs"
255
- "parallel_tool_calls"
256
- "extra_headers",
257
- "base_url",
258
- "api_versi,on"
259
- "api_key",
260
- "model_list"
261
- ]
239
+ PARAMS = {
240
+ "litellm": [
241
+ "api_base",
242
+ "api_version,"
243
+ "num_retries",
244
+ "context_window_fallback_dict",
245
+ "fallbacks",
246
+ "metadata",
247
+ ],
248
+ "common": [
249
+ "model",
250
+ "messages",
251
+ "temperature",
252
+ "top_p",
253
+ "max_tokens",
254
+ "stream",
255
+ "tools",
256
+ "tool_choice",
257
+ "response_format",
258
+ "n",
259
+ "stop",
260
+ "base_url",
261
+ "api_key",
262
+ ],
263
+ "openai": [
264
+ "timeout",
265
+ # "temperature",
266
+ # "top_p",
267
+ # "n",
268
+ # "stream",
269
+ "stream_options",
270
+ # "stop",
271
+ "max_compl,etion_tokens",
272
+ # "max_tokens",
273
+ "modalities",
274
+ "prediction",
275
+ "audio",
276
+ "presence_penalty",
277
+ "frequency_penalty",
278
+ "logit_bias",
279
+ "user",
280
+ # "response_format",
281
+ "seed",
282
+ # "tools",
283
+ # "tool_choice",
284
+ "logprobs",
285
+ "top_logprobs",
286
+ "parallel_tool_calls",
287
+ "extra_headers",
288
+ "model_list"
289
+ ],
290
+ "gemini": [
291
+ "topK",
292
+ ]
293
+ }
294
+
295
+
296
+ class SchemaType:
297
+ """
298
+ A class to store/convert a LLM-valid schema type from the Python Type object.
299
+ https://swagger.io/docs/specification/v3_0/data-models/data-types/
300
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/Schema#Type
301
+ """
302
+
303
+ def __init__(self, type: Type):
304
+ self.type = type
305
+
306
+ def convert(self) -> str:
307
+ if self.type is None:
308
+ return "string"
309
+
310
+ if self.type is int:
311
+ return "integer"
312
+ elif self.type is float:
313
+ return "number"
314
+ elif self.type is str:
315
+ return "string"
316
+ elif self.type is dict:
317
+ return "object"
318
+ elif self.type is list:
319
+ return "array"
320
+ elif self.type is bool:
321
+ return "boolean"
322
+ else:
323
+ return "string"