versionhq 1.1.9.14__py3-none-any.whl → 1.1.10.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,6 @@
1
+ from enum import Enum
2
+ from typing import Type
3
+
1
4
  JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
2
5
 
3
6
 
@@ -6,6 +9,9 @@ List of models available on the framework.
6
9
  Model names align with the LiteLLM's key names defined in the JSON URL.
7
10
  Provider names align with the custom provider or model provider names.
8
11
  -> model_key = custom_provider_name/model_name
12
+
13
+ Option
14
+ litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=1)
9
15
  """
10
16
 
11
17
  MODELS = {
@@ -21,9 +27,11 @@ MODELS = {
21
27
  "gemini/gemini-1.5-flash",
22
28
  "gemini/gemini-1.5-pro",
23
29
  "gemini/gemini-2.0-flash-exp",
24
- "gemini/gemini-gemma-2-9b-it",
25
- "gemini/gemini-gemma-2-27b-it",
30
+ # "gemini/gemini-gemma-2-9b-it",
31
+ # "gemini/gemini-gemma-2-27b-it",
26
32
  ],
33
+ # "vetrex_ai": [
34
+ # ],
27
35
  "anthropic": [
28
36
  "claude-3-5-sonnet-20241022",
29
37
  "claude-3-5-sonnet-20240620",
@@ -31,10 +39,10 @@ MODELS = {
31
39
  "claude-3-opus-20240229",
32
40
  "claude-3-haiku-20240307",
33
41
  ],
34
- "ollama": [
35
- "ollama/llama3.1",
36
- "ollama/mixtral",
37
- ],
42
+ # "ollama": [
43
+ # "ollama/llama3.1",
44
+ # "ollama/mixtral",
45
+ # ],
38
46
  # "watson": [
39
47
  # "watsonx/meta-llama/llama-3-1-70b-instruct",
40
48
  # "watsonx/meta-llama/llama-3-1-8b-instruct",
@@ -135,8 +143,8 @@ LLM_API_KEY_NAMES = {
135
143
 
136
144
  LLM_BASE_URL_KEY_NAMES = {
137
145
  "openai": "OPENAI_API_BASE",
146
+ "gemini": "GEMINI_API_BASE",
138
147
  "anthropic": "ANTHROPIC_API_BASE",
139
- "gemini": "GEMINI_API_BASE",
140
148
  }
141
149
 
142
150
  LLM_VARS = {
@@ -225,37 +233,91 @@ LLM_VARS = {
225
233
 
226
234
 
227
235
  """
228
- Params for litellm.completion() func
236
+ Params for litellm.completion() func. Address common/unique params to each provider.
229
237
  """
230
238
 
231
- LITELLM_COMPLETION_KEYS = [
232
- "model",
233
- "messages",
234
- "timeout",
235
- "temperature", "top_p",
236
- "n",
237
- "stream"
238
- "stream_options"
239
- "stop",
240
- "max_compl,etion_tokens"
241
- "max_tokens",
242
- "modalities",
243
- "prediction",
244
- "audio",
245
- "presen,ce_penalty"
246
- "frequency_penalty,"
247
- "logit_bias",
248
- "user",
249
- "response_format",
250
- "seed",
251
- "tools,"
252
- "tool_choice"
253
- "logprobs",
254
- "top_logpr,obs"
255
- "parallel_tool_calls"
256
- "extra_headers",
257
- "base_url",
258
- "api_versi,on"
259
- "api_key",
260
- "model_list"
261
- ]
239
+ PARAMS = {
240
+ "litellm": [
241
+ "api_base",
242
+ "api_version,"
243
+ "num_retries",
244
+ "context_window_fallback_dict",
245
+ "fallbacks",
246
+ "metadata",
247
+ ],
248
+ "common": [
249
+ "model",
250
+ "messages",
251
+ "temperature",
252
+ "top_p",
253
+ "max_tokens",
254
+ "stream",
255
+ "tools",
256
+ "tool_choice",
257
+ "response_format",
258
+ "n",
259
+ "stop",
260
+ "base_url",
261
+ "api_key",
262
+ ],
263
+ "openai": [
264
+ "timeout",
265
+ # "temperature",
266
+ # "top_p",
267
+ # "n",
268
+ # "stream",
269
+ "stream_options",
270
+ # "stop",
271
+ "max_compl,etion_tokens",
272
+ # "max_tokens",
273
+ "modalities",
274
+ "prediction",
275
+ "audio",
276
+ "presence_penalty",
277
+ "frequency_penalty",
278
+ "logit_bias",
279
+ "user",
280
+ # "response_format",
281
+ "seed",
282
+ # "tools",
283
+ # "tool_choice",
284
+ "logprobs",
285
+ "top_logprobs",
286
+ "parallel_tool_calls",
287
+ "extra_headers",
288
+ "model_list"
289
+ ],
290
+ "gemini": [
291
+ "topK",
292
+ ]
293
+ }
294
+
295
+
296
+ class SchemaType:
297
+ """
298
+ A class to store/convert a LLM-valid schema type from the Python Type object.
299
+ https://swagger.io/docs/specification/v3_0/data-models/data-types/
300
+ https://cloud.google.com/vertex-ai/docs/reference/rest/v1/Schema#Type
301
+ """
302
+
303
+ def __init__(self, type: Type):
304
+ self.type = type
305
+
306
+ def convert(self) -> str:
307
+ if self.type is None:
308
+ return "string"
309
+
310
+ if self.type is int:
311
+ return "integer"
312
+ elif self.type is float:
313
+ return "number"
314
+ elif self.type is str:
315
+ return "string"
316
+ elif self.type is dict:
317
+ return "object"
318
+ elif self.type is list:
319
+ return "array"
320
+ elif self.type is bool:
321
+ return "boolean"
322
+ else:
323
+ return "string"
versionhq/llm/model.py CHANGED
@@ -1,30 +1,40 @@
1
1
  import logging
2
+ import json
2
3
  import os
3
4
  import sys
4
5
  import threading
5
6
  import warnings
6
7
  import litellm
8
+ from litellm import JSONSchemaValidationError
7
9
  from abc import ABC
8
10
  from dotenv import load_dotenv
9
11
  from litellm import get_supported_openai_params
10
12
  from contextlib import contextmanager
11
- from typing import Any, Dict, List, Optional
13
+ from typing import Any, Dict, List, Optional, Type
12
14
  from typing_extensions import Self
13
15
 
14
16
  from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
15
17
  from pydantic_core import PydanticCustomError
16
18
 
17
- from versionhq.llm.llm_variables import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, LITELLM_COMPLETION_KEYS
19
+ from openai import OpenAI
20
+
21
+ from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, PARAMS, SchemaType
18
22
  from versionhq.task import TaskOutputFormat
19
- from versionhq.task.model import ResponseField
23
+ from versionhq.task.model import ResponseField, Task
24
+ from versionhq.tool.model import Tool, ToolSet
20
25
  from versionhq._utils.logger import Logger
21
26
 
22
27
 
23
28
  load_dotenv(override=True)
24
- API_KEY_LITELLM = os.environ.get("API_KEY_LITELLM")
29
+ LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
30
+ LITELLM_API_BASE = os.environ.get("LITELLM_API_BASE")
25
31
  DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
26
32
  DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME")
27
33
 
34
+ proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
35
+ openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
36
+
37
+
28
38
  class FilteredStream:
29
39
  def __init__(self, original_stream):
30
40
  self._original_stream = original_stream
@@ -62,49 +72,12 @@ def suppress_warnings():
62
72
  sys.stderr = old_stderr
63
73
 
64
74
 
65
- class LLMResponseSchema:
66
- """
67
- Use the response schema for LLM response.
68
- `field_list` contains the title, value type, bool if required of each field that needs to be returned.
69
- field_list: [{ title, type, required } ]
70
-
71
- i.e., reponse_schema
72
- response_type: "array" *options: "array", "dict"
73
- propeties: { "recipe_name": { "type": "string" }, },
74
- required: ["recipe_name"]
75
- """
76
-
77
- def __init__(self, response_type: str, field_list: List[ResponseField]):
78
- self.type = response_type
79
- self.field_list = field_list
80
-
81
- @property
82
- def schema(self):
83
- if len(self.field_list) == 0:
84
- return
85
-
86
- properties = [
87
- {
88
- field.title: {
89
- "type": field.type,
90
- }
91
- }
92
- for field in self.field_list
93
- ]
94
- required = [field.title for field in self.field_list if field.required == True]
95
- response_schema = {
96
- "type": self.type,
97
- "items": {"type": "object", "properties": {*properties}},
98
- "required": required,
99
- }
100
- return response_schema
101
-
102
-
103
75
  class LLM(BaseModel):
104
76
  """
105
77
  An LLM class to store params except for response formats which will be given in the task handling process.
106
78
  Use LiteLLM to connect with the model of choice.
107
79
  Some optional params are passed by the agent, else follow the default settings of the model provider.
80
+ Ref. https://docs.litellm.ai/docs/completion/input
108
81
  """
109
82
 
110
83
  _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
@@ -113,9 +86,8 @@ class LLM(BaseModel):
113
86
 
114
87
  model: str = Field(default=DEFAULT_MODEL_NAME)
115
88
  provider: Optional[str] = Field(default=None, description="model provider or custom model provider")
116
- base_url: Optional[str] = Field(default=None, description="litellm's api base")
117
- api_key: Optional[str] = Field(default=None)
118
- api_version: Optional[str] = Field(default=None)
89
+ base_url: Optional[str] = Field(default=None, description="api base of the model provider")
90
+ api_key: Optional[str] = Field(default=None, description="api key of the model provider")
119
91
 
120
92
  # optional params
121
93
  timeout: Optional[float | int] = Field(default=None)
@@ -133,23 +105,28 @@ class LLM(BaseModel):
133
105
  seed: Optional[int] = Field(default=None)
134
106
  logprobs: Optional[bool] = Field(default=None)
135
107
  top_logprobs: Optional[int] = Field(default=None)
108
+ response_format: Optional[Any] = Field(default=None)
109
+ tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
110
+
111
+ # LiteLLM specific fields
112
+ api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
113
+ api_version: Optional[str] = Field(default=None)
114
+ num_retries: Optional[int] = Field(default=2)
115
+ context_window_fallback_dict: Optional[Dict[str, Any]] = Field(default=None, description="A mapping of model to use if call fails due to context window error")
116
+ fallbacks: Optional[List[Any]]= Field(default=None, description="A list of model names + params to be used, in case the initial call fails")
117
+ metadata: Optional[Dict[str, Any]] = Field(default=None)
136
118
 
137
119
  litellm.drop_params = True
138
120
  litellm.set_verbose = True
139
121
  os.environ['LITELLM_LOG'] = 'DEBUG'
140
122
 
141
-
142
123
  @model_validator(mode="after")
143
124
  def validate_base_params(self) -> Self:
144
125
  """
145
- 1. Model name and provider
146
- Check the provided model name in the list and update it with the valid model key name.
147
- Then add the model provider if it is not provided.
148
- Assign a default model and provider when we cannot find a model key.
149
-
150
- 2. Set up other base parameters for the model and LiteLLM as below:
151
- 1. LiteLLM - drop_params, set_verbose, callbacks
152
- 2. Model setup - context_window_size, api_key, base_url
126
+ 1) Set up a valid model name with the provider name using the MODEL list.
127
+ * Assign a default model and provider based on the given information when no model key is found in the MODEL list.
128
+
129
+ 2) Set up other base parameters for the model and LiteLLM.
153
130
  """
154
131
 
155
132
  if self.model is None:
@@ -160,7 +137,7 @@ class LLM(BaseModel):
160
137
  self._init_model_name = self.model
161
138
  self.model = None
162
139
 
163
- if self.provider and MODELS.get(self.provider) is not None:
140
+ if self.provider and MODELS.get(self.provider):
164
141
  provider_model_list = MODELS.get(self.provider)
165
142
  for item in provider_model_list:
166
143
  if self.model is None:
@@ -172,7 +149,6 @@ class LLM(BaseModel):
172
149
  temp_model = provider_model_list[0]
173
150
  self._logger.log(level="info", message=f"The provided model: {self._init_model_name} is not in the list. We'll assign a model: {temp_model} from the selected model provider: {self.provider}.", color="yellow")
174
151
  self.model = temp_model
175
- # raise PydanticCustomError("invalid_model", "The provided model is not in the list.", {})
176
152
 
177
153
  else:
178
154
  for k, v in MODELS.items():
@@ -190,29 +166,32 @@ class LLM(BaseModel):
190
166
  self._logger.log(level="info", message=f"The provided model \'{self.model}\' is not in the list. We'll assign a default model.", color="yellow")
191
167
  self.model = DEFAULT_MODEL_NAME
192
168
  self.provider = "openai"
193
- # raise PydanticCustomError("invalid_model", "The provided model is not in the list.", {})
194
169
 
195
- if self.callbacks is not None:
170
+
171
+ if self.callbacks:
196
172
  self._set_callbacks(self.callbacks)
197
173
 
198
174
  self.context_window_size = self._get_context_window_size()
199
175
 
200
- api_key_name = LLM_API_KEY_NAMES.get(self.provider, "LITELLM_API_KEY")
201
- self.api_key = os.environ.get(api_key_name, None)
176
+ api_key_name = self.provider.upper() + "_API_KEY" if self.provider else None
177
+ if api_key_name:
178
+ self.api_key = os.environ.get(api_key_name, None)
202
179
 
203
- base_url_key_name = LLM_BASE_URL_KEY_NAMES.get(self.provider, "OPENAI_API_BASE")
204
- self.base_url = os.environ.get(base_url_key_name, None)
180
+ base_url_key_name = self.provider.upper() + "_API_BASE" if self.provider else None
181
+ if base_url_key_name:
182
+ self.base_url = os.environ.get(base_url_key_name)
183
+ self.api_base = self.base_url
205
184
 
206
185
  return self
207
186
 
208
187
 
209
188
  def call(
210
189
  self,
211
- output_formats: List[str | TaskOutputFormat],
212
- field_list: Optional[List[ResponseField]],
213
190
  messages: List[Dict[str, str]],
214
- **kwargs,
215
- # callbacks: List[Any] = [],
191
+ response_format: Optional[Dict[str, Any]] = None,
192
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = None,
193
+ config: Optional[Dict[str, Any]] = {}, # any other conditions to pass on to the model.
194
+ tool_res_as_final: bool = False
216
195
  ) -> str:
217
196
  """
218
197
  Execute LLM based on the agent's params and model params.
@@ -223,23 +202,72 @@ class LLM(BaseModel):
223
202
  self._set_callbacks(self.callbacks)
224
203
 
225
204
  try:
226
- # response_format = None
227
- # #! REFINEME
228
- # if TaskOutputFormat.JSON in output_formats:
229
- # response_format = LLMResponseSchema(
230
- # response_type="json_object", field_list=field_list
231
- # )
205
+ if tools:
206
+ self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
207
+
208
+ if response_format:
209
+ self.response_format = { "type": "json_object" } if self.model == "gpt-3.5-turbo" or tool_res_as_final else response_format
210
+
211
+ provider = self.provider if self.provider else "openai"
232
212
 
233
213
  params = {}
234
- for item in LITELLM_COMPLETION_KEYS:
235
- if hasattr(self, item) and getattr(self, item) is not None:
236
- params[item] = getattr(self, item)
214
+ valid_params = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(self.provider) if self.provider else PARAMS.get("litellm") + PARAMS.get("common")
215
+
216
+ for item in valid_params:
217
+ if item:
218
+ if hasattr(self, item) and getattr(self, item):
219
+ params[item] = getattr(self, item)
220
+ elif item in config:
221
+ params[item] = config[item]
222
+ else:
223
+ continue
224
+ else:
225
+ continue
237
226
 
238
227
  res = litellm.completion(messages=messages, stream=False, **params)
228
+
229
+ if self.tools:
230
+ tool_calls = res["choices"][0]["message"]["tool_calls"]
231
+ tool_res = ""
232
+
233
+ for item in tool_calls:
234
+ func_name = item.function.name
235
+ func_args = item.function.arguments
236
+
237
+ if not isinstance(func_args, dict):
238
+ func_args = json.loads(json.dumps(eval(str(func_args))))
239
+
240
+ for tool in tools:
241
+ if isinstance(tool, ToolSet) and (tool.tool.name.replace(" ", "_") == func_name or tool.tool.func.__name__ == func_name):
242
+ tool_instance = tool.tool
243
+ args = tool.kwargs
244
+ res = tool_instance.run(params=args)
245
+ tool_res += str(res)
246
+
247
+ elif (isinstance(tool, Tool) or type(tool) == Tool) and (tool.name.replace(" ", "_") == func_name or tool.func.__name__ == func_name):
248
+ res = tool.run(params=func_args)
249
+ tool_res += str(res)
250
+
251
+ if tool_res_as_final == True:
252
+ return tool_res
253
+ pass
254
+
255
+ else:
256
+ messages.append({ "role": "tool", "tool_call_id": tool_calls.id, "content": tool_res })
257
+ res = litellm.completion(messages=messages, stream=False, **params)
258
+
239
259
  return res["choices"][0]["message"]["content"]
240
260
 
261
+
262
+ except JSONSchemaValidationError as e:
263
+ self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
264
+ return None
265
+
241
266
  except Exception as e:
242
- self._logger.log(level="error", message=f"LiteLLM call failed: {str(e)}", color="red")
267
+ self._logger.log(level="error", message=f"{self.model} failed to execute: {str(e)}", color="red")
268
+ if "litellm.RateLimitError" in str(e):
269
+ raise e
270
+
243
271
  return None
244
272
 
245
273