versionhq 1.1.9.13__py3-none-any.whl → 1.1.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/_utils/logger.py +1 -6
- versionhq/_utils/process_config.py +9 -12
- versionhq/agent/TEMPLATES/Backstory.py +4 -3
- versionhq/agent/model.py +234 -191
- versionhq/clients/product/model.py +1 -1
- versionhq/clients/workflow/model.py +1 -1
- versionhq/llm/llm_vars.py +208 -58
- versionhq/llm/model.py +211 -148
- versionhq/task/__init__.py +1 -1
- versionhq/task/model.py +292 -126
- versionhq/team/model.py +2 -5
- versionhq/team/team_planner.py +13 -14
- versionhq/tool/__init__.py +0 -56
- versionhq/tool/cache_handler.py +40 -0
- versionhq/tool/composio_tool.py +3 -2
- versionhq/tool/composio_tool_vars.py +56 -0
- versionhq/tool/decorator.py +5 -6
- versionhq/tool/model.py +243 -97
- versionhq/tool/tool_handler.py +11 -19
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/LICENSE +0 -0
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/METADATA +26 -25
- versionhq-1.1.10.2.dist-info/RECORD +44 -0
- versionhq/_utils/cache_handler.py +0 -13
- versionhq-1.1.9.13.dist-info/RECORD +0 -43
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/WHEEL +0 -0
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/top_level.txt +0 -0
versionhq/llm/model.py
CHANGED
@@ -1,22 +1,38 @@
|
|
1
1
|
import logging
|
2
|
+
import json
|
2
3
|
import os
|
3
4
|
import sys
|
4
5
|
import threading
|
5
6
|
import warnings
|
6
7
|
import litellm
|
8
|
+
from litellm import JSONSchemaValidationError
|
9
|
+
from abc import ABC
|
7
10
|
from dotenv import load_dotenv
|
8
11
|
from litellm import get_supported_openai_params
|
9
12
|
from contextlib import contextmanager
|
10
|
-
from typing import Any, Dict, List, Optional
|
13
|
+
from typing import Any, Dict, List, Optional, Type
|
14
|
+
from typing_extensions import Self
|
11
15
|
|
12
|
-
from
|
16
|
+
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
|
17
|
+
from pydantic_core import PydanticCustomError
|
18
|
+
|
19
|
+
from openai import OpenAI
|
20
|
+
|
21
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, PARAMS, SchemaType
|
13
22
|
from versionhq.task import TaskOutputFormat
|
14
|
-
from versionhq.task.model import ResponseField
|
23
|
+
from versionhq.task.model import ResponseField, Task
|
24
|
+
from versionhq.tool.model import Tool, ToolSet
|
25
|
+
from versionhq._utils.logger import Logger
|
26
|
+
|
15
27
|
|
16
28
|
load_dotenv(override=True)
|
17
|
-
|
18
|
-
|
19
|
-
|
29
|
+
LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
|
30
|
+
LITELLM_API_BASE = os.environ.get("LITELLM_API_BASE")
|
31
|
+
DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
|
32
|
+
DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME")
|
33
|
+
|
34
|
+
proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
|
35
|
+
openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
20
36
|
|
21
37
|
|
22
38
|
class FilteredStream:
|
@@ -56,184 +72,231 @@ def suppress_warnings():
|
|
56
72
|
sys.stderr = old_stderr
|
57
73
|
|
58
74
|
|
59
|
-
class
|
60
|
-
"""
|
61
|
-
Use the response schema for LLM response.
|
62
|
-
`field_list` contains the title, value type, bool if required of each field that needs to be returned.
|
63
|
-
field_list: [{ title, type, required } ]
|
64
|
-
|
65
|
-
i.e., reponse_schema
|
66
|
-
response_type: "array" *options: "array", "dict"
|
67
|
-
propeties: { "recipe_name": { "type": "string" }, },
|
68
|
-
required: ["recipe_name"]
|
69
|
-
"""
|
70
|
-
|
71
|
-
def __init__(self, response_type: str, field_list: List[ResponseField]):
|
72
|
-
self.type = response_type
|
73
|
-
self.field_list = field_list
|
74
|
-
|
75
|
-
@property
|
76
|
-
def schema(self):
|
77
|
-
if len(self.field_list) == 0:
|
78
|
-
return
|
79
|
-
|
80
|
-
properties = [
|
81
|
-
{
|
82
|
-
field.title: {
|
83
|
-
"type": field.type,
|
84
|
-
}
|
85
|
-
}
|
86
|
-
for field in self.field_list
|
87
|
-
]
|
88
|
-
required = [field.title for field in self.field_list if field.required == True]
|
89
|
-
response_schema = {
|
90
|
-
"type": self.type,
|
91
|
-
"items": {"type": "object", "properties": {*properties}},
|
92
|
-
"required": required,
|
93
|
-
}
|
94
|
-
return response_schema
|
95
|
-
|
96
|
-
|
97
|
-
class LLM:
|
75
|
+
class LLM(BaseModel):
|
98
76
|
"""
|
77
|
+
An LLM class to store params except for response formats which will be given in the task handling process.
|
99
78
|
Use LiteLLM to connect with the model of choice.
|
100
|
-
|
79
|
+
Some optional params are passed by the agent, else follow the default settings of the model provider.
|
80
|
+
Ref. https://docs.litellm.ai/docs/completion/input
|
101
81
|
"""
|
102
82
|
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
)
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
83
|
+
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
84
|
+
_init_model_name: str = PrivateAttr(default=None)
|
85
|
+
model_config = ConfigDict(extra="allow")
|
86
|
+
|
87
|
+
model: str = Field(default=DEFAULT_MODEL_NAME)
|
88
|
+
provider: Optional[str] = Field(default=None, description="model provider or custom model provider")
|
89
|
+
base_url: Optional[str] = Field(default=None, description="api base of the model provider")
|
90
|
+
api_key: Optional[str] = Field(default=None, description="api key of the model provider")
|
91
|
+
|
92
|
+
# optional params
|
93
|
+
timeout: Optional[float | int] = Field(default=None)
|
94
|
+
max_tokens: Optional[int] = Field(default=None)
|
95
|
+
max_completion_tokens: Optional[int] = Field(default=None)
|
96
|
+
context_window_size: Optional[int] = Field(default=DEFAULT_CONTEXT_WINDOW_SIZE)
|
97
|
+
callbacks: List[Any] = Field(default_factory=list)
|
98
|
+
temperature: Optional[float] = Field(default=None)
|
99
|
+
top_p: Optional[float] = Field(default=None)
|
100
|
+
n: Optional[int] = Field(default=None)
|
101
|
+
stop: Optional[str | List[str]] = Field(default=None)
|
102
|
+
presence_penalty: Optional[float] = Field(default=None)
|
103
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
104
|
+
logit_bias: Optional[Dict[int, float]] = Field(default=None)
|
105
|
+
seed: Optional[int] = Field(default=None)
|
106
|
+
logprobs: Optional[bool] = Field(default=None)
|
107
|
+
top_logprobs: Optional[int] = Field(default=None)
|
108
|
+
response_format: Optional[Any] = Field(default=None)
|
109
|
+
tools: Optional[List[Dict[str, Any]]] = Field(default_factory=list, description="store a list of tool properties")
|
110
|
+
|
111
|
+
# LiteLLM specific fields
|
112
|
+
api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
|
113
|
+
api_version: Optional[str] = Field(default=None)
|
114
|
+
num_retries: Optional[int] = Field(default=2)
|
115
|
+
context_window_fallback_dict: Optional[Dict[str, Any]] = Field(default=None, description="A mapping of model to use if call fails due to context window error")
|
116
|
+
fallbacks: Optional[List[Any]]= Field(default=None, description="A list of model names + params to be used, in case the initial call fails")
|
117
|
+
metadata: Optional[Dict[str, Any]] = Field(default=None)
|
118
|
+
|
119
|
+
litellm.drop_params = True
|
120
|
+
litellm.set_verbose = True
|
121
|
+
os.environ['LITELLM_LOG'] = 'DEBUG'
|
122
|
+
|
123
|
+
@model_validator(mode="after")
|
124
|
+
def validate_base_params(self) -> Self:
|
125
|
+
"""
|
126
|
+
1) Set up a valid model name with the provider name using the MODEL list.
|
127
|
+
* Assign a default model and provider based on the given information when no model key is found in the MODEL list.
|
128
|
+
|
129
|
+
2) Set up other base parameters for the model and LiteLLM.
|
130
|
+
"""
|
131
|
+
|
132
|
+
if self.model is None:
|
133
|
+
self._logger.log(level="error", message="Model name is missing.", color="red")
|
134
|
+
raise PydanticCustomError("model_missing", "The model name must be provided.", {})
|
135
|
+
|
136
|
+
|
137
|
+
self._init_model_name = self.model
|
138
|
+
self.model = None
|
139
|
+
|
140
|
+
if self.provider and MODELS.get(self.provider):
|
141
|
+
provider_model_list = MODELS.get(self.provider)
|
142
|
+
for item in provider_model_list:
|
143
|
+
if self.model is None:
|
144
|
+
if item == self._init_model_name:
|
145
|
+
self.model = item
|
146
|
+
elif self._init_model_name in item and self.model is None:
|
147
|
+
self.model = item
|
148
|
+
else:
|
149
|
+
temp_model = provider_model_list[0]
|
150
|
+
self._logger.log(level="info", message=f"The provided model: {self._init_model_name} is not in the list. We'll assign a model: {temp_model} from the selected model provider: {self.provider}.", color="yellow")
|
151
|
+
self.model = temp_model
|
152
|
+
|
153
|
+
else:
|
154
|
+
for k, v in MODELS.items():
|
155
|
+
for item in v:
|
156
|
+
if self.model is None:
|
157
|
+
if self._init_model_name == item:
|
158
|
+
self.model = item
|
159
|
+
self.provider = k
|
160
|
+
|
161
|
+
elif self.model is None and self._init_model_name in item:
|
162
|
+
self.model = item
|
163
|
+
self.provider = k
|
164
|
+
|
165
|
+
if self.model is None:
|
166
|
+
self._logger.log(level="info", message=f"The provided model \'{self.model}\' is not in the list. We'll assign a default model.", color="yellow")
|
167
|
+
self.model = DEFAULT_MODEL_NAME
|
168
|
+
self.provider = "openai"
|
169
|
+
|
170
|
+
|
171
|
+
if self.callbacks:
|
172
|
+
self._set_callbacks(self.callbacks)
|
173
|
+
|
174
|
+
self.context_window_size = self._get_context_window_size()
|
175
|
+
|
176
|
+
api_key_name = self.provider.upper() + "_API_KEY" if self.provider else None
|
177
|
+
if api_key_name:
|
178
|
+
self.api_key = os.environ.get(api_key_name, None)
|
179
|
+
|
180
|
+
base_url_key_name = self.provider.upper() + "_API_BASE" if self.provider else None
|
181
|
+
if base_url_key_name:
|
182
|
+
self.base_url = os.environ.get(base_url_key_name)
|
183
|
+
self.api_base = self.base_url
|
184
|
+
|
185
|
+
return self
|
186
|
+
|
155
187
|
|
156
188
|
def call(
|
157
189
|
self,
|
158
|
-
output_formats: List[TaskOutputFormat],
|
159
|
-
field_list: Optional[List[ResponseField]],
|
160
190
|
messages: List[Dict[str, str]],
|
161
|
-
|
191
|
+
response_format: Optional[Dict[str, Any]] = None,
|
192
|
+
tools: Optional[List[Tool | ToolSet | Type[Tool]]] = None,
|
193
|
+
config: Optional[Dict[str, Any]] = {}, # any other conditions to pass on to the model.
|
194
|
+
tool_res_as_final: bool = False
|
162
195
|
) -> str:
|
163
196
|
"""
|
164
|
-
Execute LLM based on
|
197
|
+
Execute LLM based on the agent's params and model params.
|
165
198
|
"""
|
166
199
|
|
167
200
|
with suppress_warnings():
|
168
|
-
if
|
169
|
-
self.
|
201
|
+
if len(self.callbacks) > 0:
|
202
|
+
self._set_callbacks(self.callbacks)
|
170
203
|
|
171
204
|
try:
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
if
|
176
|
-
response_format =
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
params = {
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
"
|
198
|
-
"
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
205
|
+
if tools:
|
206
|
+
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
207
|
+
|
208
|
+
if response_format:
|
209
|
+
self.response_format = { "type": "json_object" } if self.model == "gpt-3.5-turbo" or tool_res_as_final else response_format
|
210
|
+
|
211
|
+
provider = self.provider if self.provider else "openai"
|
212
|
+
|
213
|
+
params = {}
|
214
|
+
valid_params = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(self.provider) if self.provider else PARAMS.get("litellm") + PARAMS.get("common")
|
215
|
+
|
216
|
+
for item in valid_params:
|
217
|
+
if item:
|
218
|
+
if hasattr(self, item) and getattr(self, item):
|
219
|
+
params[item] = getattr(self, item)
|
220
|
+
elif item in config:
|
221
|
+
params[item] = config[item]
|
222
|
+
else:
|
223
|
+
continue
|
224
|
+
else:
|
225
|
+
continue
|
226
|
+
|
227
|
+
res = litellm.completion(messages=messages, stream=False, **params)
|
228
|
+
|
229
|
+
if self.tools:
|
230
|
+
tool_calls = res["choices"][0]["message"]["tool_calls"]
|
231
|
+
tool_res = ""
|
232
|
+
|
233
|
+
for item in tool_calls:
|
234
|
+
func_name = item.function.name
|
235
|
+
func_args = item.function.arguments
|
236
|
+
|
237
|
+
if not isinstance(func_args, dict):
|
238
|
+
func_args = json.loads(json.dumps(eval(str(func_args))))
|
239
|
+
|
240
|
+
for tool in tools:
|
241
|
+
if isinstance(tool, ToolSet) and (tool.tool.name.replace(" ", "_") == func_name or tool.tool.func.__name__ == func_name):
|
242
|
+
tool_instance = tool.tool
|
243
|
+
args = tool.kwargs
|
244
|
+
res = tool_instance.run(params=args)
|
245
|
+
tool_res += str(res)
|
246
|
+
|
247
|
+
elif (isinstance(tool, Tool) or type(tool) == Tool) and (tool.name.replace(" ", "_") == func_name or tool.func.__name__ == func_name):
|
248
|
+
res = tool.run(params=func_args)
|
249
|
+
tool_res += str(res)
|
250
|
+
|
251
|
+
if tool_res_as_final == True:
|
252
|
+
return tool_res
|
253
|
+
pass
|
254
|
+
|
255
|
+
else:
|
256
|
+
messages.append({ "role": "tool", "tool_call_id": tool_calls.id, "content": tool_res })
|
257
|
+
res = litellm.completion(messages=messages, stream=False, **params)
|
258
|
+
|
204
259
|
return res["choices"][0]["message"]["content"]
|
205
260
|
|
261
|
+
|
262
|
+
except JSONSchemaValidationError as e:
|
263
|
+
self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
|
264
|
+
return None
|
265
|
+
|
206
266
|
except Exception as e:
|
207
|
-
|
267
|
+
self._logger.log(level="error", message=f"{self.model} failed to execute: {str(e)}", color="red")
|
268
|
+
if "litellm.RateLimitError" in str(e):
|
269
|
+
raise e
|
270
|
+
|
208
271
|
return None
|
209
272
|
|
210
|
-
|
273
|
+
|
274
|
+
def _supports_function_calling(self) -> bool:
|
211
275
|
try:
|
212
276
|
params = get_supported_openai_params(model=self.model)
|
213
277
|
return "response_format" in params
|
214
278
|
except Exception as e:
|
215
|
-
|
279
|
+
self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
|
216
280
|
return False
|
217
281
|
|
218
|
-
|
282
|
+
|
283
|
+
def _supports_stop_words(self) -> bool:
|
219
284
|
try:
|
220
285
|
params = get_supported_openai_params(model=self.model)
|
221
286
|
return "stop" in params
|
222
287
|
except Exception as e:
|
223
|
-
|
288
|
+
self._logger.log(level="error", message=f"Failed to get supported params: {str(e)}", color="red")
|
224
289
|
return False
|
225
290
|
|
226
|
-
|
291
|
+
|
292
|
+
def _get_context_window_size(self) -> int:
|
227
293
|
"""
|
228
294
|
Only use 75% of the context window size to avoid cutting the message in the middle.
|
229
295
|
"""
|
230
|
-
return (
|
231
|
-
|
232
|
-
if hasattr(LLM_CONTEXT_WINDOW_SIZES, self.model)
|
233
|
-
else DEFAULT_CONTEXT_WINDOW
|
234
|
-
)
|
296
|
+
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model) * 0.75) if LLM_CONTEXT_WINDOW_SIZES.get(self.model) is not None else DEFAULT_CONTEXT_WINDOW_SIZE
|
297
|
+
|
235
298
|
|
236
|
-
def
|
299
|
+
def _set_callbacks(self, callbacks: List[Any]):
|
237
300
|
callback_types = [type(callback) for callback in callbacks]
|
238
301
|
for callback in litellm.success_callback[:]:
|
239
302
|
if type(callback) in callback_types:
|
versionhq/task/__init__.py
CHANGED