versionhq 1.1.9.14__py3-none-any.whl → 1.1.10.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
versionhq/llm/model.py CHANGED
@@ -1,30 +1,40 @@
1
1
  import logging
2
+ import json
2
3
  import os
3
4
  import sys
4
5
  import threading
5
6
  import warnings
6
7
  import litellm
8
+ from litellm import JSONSchemaValidationError
7
9
  from abc import ABC
8
10
  from dotenv import load_dotenv
9
11
  from litellm import get_supported_openai_params
10
12
  from contextlib import contextmanager
11
- from typing import Any, Dict, List, Optional
13
+ from typing import Any, Dict, List, Optional, Type
12
14
  from typing_extensions import Self
13
15
 
14
16
  from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
15
17
  from pydantic_core import PydanticCustomError
16
18
 
17
- from versionhq.llm.llm_variables import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, LITELLM_COMPLETION_KEYS
19
+ from openai import OpenAI
20
+
21
+ from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, PARAMS, SchemaType
18
22
  from versionhq.task import TaskOutputFormat
19
- from versionhq.task.model import ResponseField
23
+ from versionhq.task.model import ResponseField, Task
24
+ from versionhq.tool.model import Tool, ToolSet
20
25
  from versionhq._utils.logger import Logger
21
26
 
22
27
 
23
28
  load_dotenv(override=True)
24
- API_KEY_LITELLM = os.environ.get("API_KEY_LITELLM")
29
+ LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
30
+ LITELLM_API_BASE = os.environ.get("LITELLM_API_BASE")
25
31
  DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
26
32
  DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME")
27
33
 
34
+ proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
35
+ openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
36
+
37
+
28
38
  class FilteredStream:
29
39
  def __init__(self, original_stream):
30
40
  self._original_stream = original_stream
@@ -62,49 +72,12 @@ def suppress_warnings():
62
72
  sys.stderr = old_stderr
63
73
 
64
74
 
65
- class LLMResponseSchema:
66
- """
67
- Use the response schema for LLM response.
68
- `field_list` contains the title, value type, bool if required of each field that needs to be returned.
69
- field_list: [{ title, type, required } ]
70
-
71
- i.e., reponse_schema
72
- response_type: "array" *options: "array", "dict"
73
- propeties: { "recipe_name": { "type": "string" }, },
74
- required: ["recipe_name"]
75
- """
76
-
77
- def __init__(self, response_type: str, field_list: List[ResponseField]):
78
- self.type = response_type
79
- self.field_list = field_list
80
-
81
- @property
82
- def schema(self):
83
- if len(self.field_list) == 0:
84
- return
85
-
86
- properties = [
87
- {
88
- field.title: {
89
- "type": field.type,
90
- }
91
- }
92
- for field in self.field_list
93
- ]
94
- required = [field.title for field in self.field_list if field.required == True]
95
- response_schema = {
96
- "type": self.type,
97
- "items": {"type": "object", "properties": {*properties}},
98
- "required": required,
99
- }
100
- return response_schema
101
-
102
-
103
75
  class LLM(BaseModel):
104
76
  """
105
77
  An LLM class to store params except for response formats which will be given in the task handling process.
106
78
  Use LiteLLM to connect with the model of choice.
107
79
  Some optional params are passed by the agent, else follow the default settings of the model provider.
80
+ Ref. https://docs.litellm.ai/docs/completion/input
108
81
  """
109
82
 
110
83
  _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
@@ -113,9 +86,8 @@ class LLM(BaseModel):
113
86
 
114
87
  model: str = Field(default=DEFAULT_MODEL_NAME)
115
88
  provider: Optional[str] = Field(default=None, description="model provider or custom model provider")
116
- base_url: Optional[str] = Field(default=None, description="litellm's api base")
117
- api_key: Optional[str] = Field(default=None)
118
- api_version: Optional[str] = Field(default=None)
89
+ base_url: Optional[str] = Field(default=None, description="api base of the model provider")
90
+ api_key: Optional[str] = Field(default=None, description="api key of the model provider")
119
91
 
120
92
  # optional params
121
93
  timeout: Optional[float | int] = Field(default=None)
@@ -133,23 +105,28 @@ class LLM(BaseModel):
133
105
  seed: Optional[int] = Field(default=None)
134
106
  logprobs: Optional[bool] = Field(default=None)
135
107
  top_logprobs: Optional[int] = Field(default=None)
108
+ response_format: Optional[Any] = Field(default=None)
109
+ tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
110
+
111
+ # LiteLLM specific fields
112
+ api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
113
+ api_version: Optional[str] = Field(default=None)
114
+ num_retries: Optional[int] = Field(default=2)
115
+ context_window_fallback_dict: Optional[Dict[str, Any]] = Field(default=None, description="A mapping of model to use if call fails due to context window error")
116
+ fallbacks: Optional[List[Any]]= Field(default=None, description="A list of model names + params to be used, in case the initial call fails")
117
+ metadata: Optional[Dict[str, Any]] = Field(default=None)
136
118
 
137
119
  litellm.drop_params = True
138
120
  litellm.set_verbose = True
139
121
  os.environ['LITELLM_LOG'] = 'DEBUG'
140
122
 
141
-
142
123
  @model_validator(mode="after")
143
124
  def validate_base_params(self) -> Self:
144
125
  """
145
- 1. Model name and provider
146
- Check the provided model name in the list and update it with the valid model key name.
147
- Then add the model provider if it is not provided.
148
- Assign a default model and provider when we cannot find a model key.
149
-
150
- 2. Set up other base parameters for the model and LiteLLM as below:
151
- 1. LiteLLM - drop_params, set_verbose, callbacks
152
- 2. Model setup - context_window_size, api_key, base_url
126
+ 1) Set up a valid model name with the provider name using the MODEL list.
127
+ * Assign a default model and provider based on the given information when no model key is found in the MODEL list.
128
+
129
+ 2) Set up other base parameters for the model and LiteLLM.
153
130
  """
154
131
 
155
132
  if self.model is None:
@@ -160,7 +137,7 @@ class LLM(BaseModel):
160
137
  self._init_model_name = self.model
161
138
  self.model = None
162
139
 
163
- if self.provider and MODELS.get(self.provider) is not None:
140
+ if self.provider and MODELS.get(self.provider):
164
141
  provider_model_list = MODELS.get(self.provider)
165
142
  for item in provider_model_list:
166
143
  if self.model is None:
@@ -172,7 +149,6 @@ class LLM(BaseModel):
172
149
  temp_model = provider_model_list[0]
173
150
  self._logger.log(level="info", message=f"The provided model: {self._init_model_name} is not in the list. We'll assign a model: {temp_model} from the selected model provider: {self.provider}.", color="yellow")
174
151
  self.model = temp_model
175
- # raise PydanticCustomError("invalid_model", "The provided model is not in the list.", {})
176
152
 
177
153
  else:
178
154
  for k, v in MODELS.items():
@@ -190,56 +166,116 @@ class LLM(BaseModel):
190
166
  self._logger.log(level="info", message=f"The provided model \'{self.model}\' is not in the list. We'll assign a default model.", color="yellow")
191
167
  self.model = DEFAULT_MODEL_NAME
192
168
  self.provider = "openai"
193
- # raise PydanticCustomError("invalid_model", "The provided model is not in the list.", {})
194
169
 
195
- if self.callbacks is not None:
170
+
171
+ if self.callbacks:
196
172
  self._set_callbacks(self.callbacks)
197
173
 
198
174
  self.context_window_size = self._get_context_window_size()
199
175
 
200
- api_key_name = LLM_API_KEY_NAMES.get(self.provider, "LITELLM_API_KEY")
201
- self.api_key = os.environ.get(api_key_name, None)
176
+ api_key_name = self.provider.upper() + "_API_KEY" if self.provider else None
177
+ if api_key_name:
178
+ self.api_key = os.environ.get(api_key_name, None)
202
179
 
203
- base_url_key_name = LLM_BASE_URL_KEY_NAMES.get(self.provider, "OPENAI_API_BASE")
204
- self.base_url = os.environ.get(base_url_key_name, None)
180
+ base_url_key_name = self.provider.upper() + "_API_BASE" if self.provider else None
181
+ if base_url_key_name:
182
+ self.base_url = os.environ.get(base_url_key_name)
183
+ self.api_base = self.base_url
205
184
 
206
185
  return self
207
186
 
208
187
 
209
188
  def call(
210
189
  self,
211
- output_formats: List[str | TaskOutputFormat],
212
- field_list: Optional[List[ResponseField]],
213
190
  messages: List[Dict[str, str]],
214
- **kwargs,
215
- # callbacks: List[Any] = [],
191
+ response_format: Optional[Dict[str, Any]] = None,
192
+ tools: Optional[List[Tool | ToolSet | Type[Tool]]] = None,
193
+ config: Optional[Dict[str, Any]] = {}, # any other conditions to pass on to the model.
194
+ tool_res_as_final: bool = False
216
195
  ) -> str:
217
196
  """
218
197
  Execute LLM based on the agent's params and model params.
219
198
  """
199
+ litellm.drop_params = True
220
200
 
221
201
  with suppress_warnings():
222
202
  if len(self.callbacks) > 0:
223
203
  self._set_callbacks(self.callbacks)
224
204
 
225
205
  try:
226
- # response_format = None
227
- # #! REFINEME
228
- # if TaskOutputFormat.JSON in output_formats:
229
- # response_format = LLMResponseSchema(
230
- # response_type="json_object", field_list=field_list
231
- # )
206
+ if tools:
207
+ self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
208
+
209
+ if response_format:
210
+ self.response_format = { "type": "json_object" } if tool_res_as_final else response_format
211
+
212
+ provider = self.provider if self.provider else "openai"
232
213
 
233
214
  params = {}
234
- for item in LITELLM_COMPLETION_KEYS:
235
- if hasattr(self, item) and getattr(self, item) is not None:
236
- params[item] = getattr(self, item)
215
+ valid_params = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(self.provider) if self.provider else PARAMS.get("litellm") + PARAMS.get("common")
216
+
217
+ for item in valid_params:
218
+ if item:
219
+ if hasattr(self, item) and getattr(self, item):
220
+ params[item] = getattr(self, item)
221
+ elif item in config:
222
+ params[item] = config[item]
223
+ else:
224
+ continue
225
+ else:
226
+ continue
237
227
 
238
228
  res = litellm.completion(messages=messages, stream=False, **params)
229
+
230
+ if self.tools:
231
+ messages.append(res["choices"][0]["message"])
232
+ tool_calls = res["choices"][0]["message"]["tool_calls"]
233
+ tool_res = ""
234
+
235
+ for item in tool_calls:
236
+ func_name = item.function.name
237
+ func_args = item.function.arguments
238
+
239
+ if not isinstance(func_args, dict):
240
+ func_args = json.loads(json.dumps(eval(str(func_args))))
241
+
242
+ for tool in tools:
243
+ if isinstance(tool, ToolSet) and (tool.tool.name.replace(" ", "_") == func_name or tool.tool.func.__name__ == func_name):
244
+ tool_instance = tool.tool
245
+ args = tool.kwargs
246
+ res = tool_instance.run(params=args)
247
+
248
+ if tool_res_as_final:
249
+ tool_res += str(res)
250
+ else:
251
+ messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(res) })
252
+
253
+ elif (isinstance(tool, Tool) or type(tool) == Tool) and (tool.name.replace(" ", "_") == func_name or tool.func.__name__ == func_name):
254
+ res = tool.run(params=func_args)
255
+ if tool_res_as_final:
256
+ tool_res += str(res)
257
+ else:
258
+ messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(res) })
259
+
260
+ if tool_res_as_final:
261
+ return tool_res
262
+
263
+ else:
264
+ print(messages)
265
+ res = litellm.completion(messages=messages, stream=False, **params)
266
+
239
267
  return res["choices"][0]["message"]["content"]
240
268
 
269
+
270
+ except JSONSchemaValidationError as e:
271
+ self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
272
+ return None
273
+
241
274
  except Exception as e:
242
- self._logger.log(level="error", message=f"LiteLLM call failed: {str(e)}", color="red")
275
+ self._logger.log(level="error", message=f"{self.model} failed to execute: {str(e)}", color="red")
276
+ if "litellm.RateLimitError" in str(e):
277
+ raise e
278
+
243
279
  return None
244
280
 
245
281