versionhq 1.1.9.13__py3-none-any.whl → 1.1.10.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/_utils/logger.py +1 -6
- versionhq/_utils/process_config.py +9 -12
- versionhq/agent/TEMPLATES/Backstory.py +4 -3
- versionhq/agent/model.py +234 -191
- versionhq/clients/product/model.py +1 -1
- versionhq/clients/workflow/model.py +1 -1
- versionhq/llm/llm_vars.py +208 -58
- versionhq/llm/model.py +211 -148
- versionhq/task/__init__.py +1 -1
- versionhq/task/model.py +292 -126
- versionhq/team/model.py +2 -5
- versionhq/team/team_planner.py +13 -14
- versionhq/tool/__init__.py +0 -56
- versionhq/tool/cache_handler.py +40 -0
- versionhq/tool/composio_tool.py +3 -2
- versionhq/tool/composio_tool_vars.py +56 -0
- versionhq/tool/decorator.py +5 -6
- versionhq/tool/model.py +243 -97
- versionhq/tool/tool_handler.py +11 -19
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/LICENSE +0 -0
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/METADATA +26 -25
- versionhq-1.1.10.2.dist-info/RECORD +44 -0
- versionhq/_utils/cache_handler.py +0 -13
- versionhq-1.1.9.13.dist-info/RECORD +0 -43
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/WHEEL +0 -0
- {versionhq-1.1.9.13.dist-info → versionhq-1.1.10.2.dist-info}/top_level.txt +0 -0
@@ -5,7 +5,7 @@ from typing import Any, Dict, List, Callable, Type, Optional, get_args, get_orig
|
|
5
5
|
from pydantic import UUID4, InstanceOf, BaseModel, ConfigDict, Field, create_model, field_validator, model_validator
|
6
6
|
from pydantic_core import PydanticCustomError
|
7
7
|
|
8
|
-
from versionhq.tool import ComposioAppName
|
8
|
+
from versionhq.tool.composio_tool_vars import ComposioAppName
|
9
9
|
|
10
10
|
|
11
11
|
class ProductProvider(ABC, BaseModel):
|
@@ -10,7 +10,7 @@ from versionhq.clients.product.model import Product
|
|
10
10
|
from versionhq.clients.customer.model import Customer
|
11
11
|
from versionhq.agent.model import Agent
|
12
12
|
from versionhq.team.model import Team
|
13
|
-
from versionhq.tool import ComposioAppName
|
13
|
+
from versionhq.tool.composio_tool_vars import ComposioAppName
|
14
14
|
|
15
15
|
|
16
16
|
class ScoreFormat:
|
versionhq/llm/llm_vars.py
CHANGED
@@ -1,3 +1,101 @@
|
|
1
|
+
from enum import Enum
|
2
|
+
from typing import Type
|
3
|
+
|
4
|
+
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
5
|
+
|
6
|
+
|
7
|
+
"""
|
8
|
+
List of models available on the framework.
|
9
|
+
Model names align with the LiteLLM's key names defined in the JSON URL.
|
10
|
+
Provider names align with the custom provider or model provider names.
|
11
|
+
-> model_key = custom_provider_name/model_name
|
12
|
+
|
13
|
+
Option
|
14
|
+
litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=1)
|
15
|
+
"""
|
16
|
+
|
17
|
+
MODELS = {
|
18
|
+
"openai": [
|
19
|
+
"gpt-3.5-turbo",
|
20
|
+
"gpt-4",
|
21
|
+
"gpt-4o",
|
22
|
+
"gpt-4o-mini",
|
23
|
+
"o1-mini",
|
24
|
+
"o1-preview",
|
25
|
+
],
|
26
|
+
"gemini": [
|
27
|
+
"gemini/gemini-1.5-flash",
|
28
|
+
"gemini/gemini-1.5-pro",
|
29
|
+
"gemini/gemini-2.0-flash-exp",
|
30
|
+
# "gemini/gemini-gemma-2-9b-it",
|
31
|
+
# "gemini/gemini-gemma-2-27b-it",
|
32
|
+
],
|
33
|
+
# "vetrex_ai": [
|
34
|
+
# ],
|
35
|
+
"anthropic": [
|
36
|
+
"claude-3-5-sonnet-20241022",
|
37
|
+
"claude-3-5-sonnet-20240620",
|
38
|
+
"claude-3-sonnet-20240229",
|
39
|
+
"claude-3-opus-20240229",
|
40
|
+
"claude-3-haiku-20240307",
|
41
|
+
],
|
42
|
+
# "ollama": [
|
43
|
+
# "ollama/llama3.1",
|
44
|
+
# "ollama/mixtral",
|
45
|
+
# ],
|
46
|
+
# "watson": [
|
47
|
+
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
48
|
+
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
49
|
+
# "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
50
|
+
# "watsonx/meta-llama/llama-3-2-1b-instruct",
|
51
|
+
# "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
52
|
+
# "watsonx/meta-llama/llama-3-405b-instruct",
|
53
|
+
# "watsonx/mistral/mistral-large",
|
54
|
+
# "watsonx/ibm/granite-3-8b-instruct",
|
55
|
+
# ],
|
56
|
+
# "bedrock": [
|
57
|
+
# "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
58
|
+
# "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
59
|
+
# "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
60
|
+
# "bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
61
|
+
# "bedrock/anthropic.claude-v2:1",
|
62
|
+
# "bedrock/anthropic.claude-v2",
|
63
|
+
# "bedrock/anthropic.claude-instant-v1",
|
64
|
+
# "bedrock/meta.llama3-1-405b-instruct-v1:0",
|
65
|
+
# "bedrock/meta.llama3-1-70b-instruct-v1:0",
|
66
|
+
# "bedrock/meta.llama3-1-8b-instruct-v1:0",
|
67
|
+
# "bedrock/meta.llama3-70b-instruct-v1:0",
|
68
|
+
# "bedrock/meta.llama3-8b-instruct-v1:0",
|
69
|
+
# "bedrock/amazon.titan-text-lite-v1",
|
70
|
+
# "bedrock/amazon.titan-text-express-v1",
|
71
|
+
# "bedrock/cohere.command-text-v14",
|
72
|
+
# "bedrock/ai21.j2-mid-v1",
|
73
|
+
# "bedrock/ai21.j2-ultra-v1",
|
74
|
+
# "bedrock/ai21.jamba-instruct-v1:0",
|
75
|
+
# "bedrock/meta.llama2-13b-chat-v1",
|
76
|
+
# "bedrock/meta.llama2-70b-chat-v1",
|
77
|
+
# "bedrock/mistral.mistral-7b-instruct-v0:2",
|
78
|
+
# "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
79
|
+
# ],
|
80
|
+
}
|
81
|
+
|
82
|
+
|
83
|
+
PROVIDERS = [
|
84
|
+
"openai",
|
85
|
+
"anthropic",
|
86
|
+
"gemini",
|
87
|
+
"ollama",
|
88
|
+
"watson",
|
89
|
+
"bedrock",
|
90
|
+
"azure",
|
91
|
+
"cerebras",
|
92
|
+
"llama",
|
93
|
+
]
|
94
|
+
|
95
|
+
|
96
|
+
"""
|
97
|
+
Max input token size by the model.
|
98
|
+
"""
|
1
99
|
LLM_CONTEXT_WINDOW_SIZES = {
|
2
100
|
"gpt-3.5-turbo": 8192,
|
3
101
|
"gpt-4": 8192,
|
@@ -6,6 +104,19 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
6
104
|
"gpt-4-turbo": 128000,
|
7
105
|
"o1-preview": 128000,
|
8
106
|
"o1-mini": 128000,
|
107
|
+
|
108
|
+
"gemini/gemini-1.5-flash": 1048576,
|
109
|
+
"gemini/gemini-1.5-pro": 2097152,
|
110
|
+
"gemini/gemini-2.0-flash-exp": 1048576,
|
111
|
+
"gemini/gemini-gemma-2-9b-it": 8192,
|
112
|
+
"gemini/gemini-gemma-2-27b-it": 8192,
|
113
|
+
|
114
|
+
"claude-3-5-sonnet-20241022": 200000,
|
115
|
+
"claude-3-5-sonnet-20240620": 200000,
|
116
|
+
"claude-3-sonnet-20240229": 200000,
|
117
|
+
"claude-3-opus-20240229": 200000,
|
118
|
+
"claude-3-haiku-20240307": 200000,
|
119
|
+
|
9
120
|
"deepseek-chat": 128000,
|
10
121
|
"gemma2-9b-it": 8192,
|
11
122
|
"gemma-7b-it": 8192,
|
@@ -20,9 +131,22 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
20
131
|
"llama3-70b-8192": 8192,
|
21
132
|
"llama3-8b-8192": 8192,
|
22
133
|
"mixtral-8x7b-32768": 32768,
|
134
|
+
"claude-3-5-sonnet-2024102": 200000,
|
23
135
|
}
|
24
136
|
|
25
137
|
|
138
|
+
LLM_API_KEY_NAMES = {
|
139
|
+
"openai": "OPENAI_API_KEY",
|
140
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
141
|
+
"gemini": "GEMINI_API_KEY",
|
142
|
+
}
|
143
|
+
|
144
|
+
LLM_BASE_URL_KEY_NAMES = {
|
145
|
+
"openai": "OPENAI_API_BASE",
|
146
|
+
"gemini": "GEMINI_API_BASE",
|
147
|
+
"anthropic": "ANTHROPIC_API_BASE",
|
148
|
+
}
|
149
|
+
|
26
150
|
LLM_VARS = {
|
27
151
|
"openai": [
|
28
152
|
{
|
@@ -107,67 +231,93 @@ LLM_VARS = {
|
|
107
231
|
}
|
108
232
|
|
109
233
|
|
110
|
-
PROVIDERS = [
|
111
|
-
"openai",
|
112
|
-
"anthropic",
|
113
|
-
"gemini",
|
114
|
-
"ollama",
|
115
|
-
"watson",
|
116
|
-
"bedrock",
|
117
|
-
"azure",
|
118
|
-
"cerebras",
|
119
|
-
"llama",
|
120
|
-
]
|
121
234
|
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
"
|
129
|
-
|
130
|
-
|
131
|
-
"
|
132
|
-
"
|
133
|
-
"
|
134
|
-
"gemini/gemini-gemma-2-27b-it",
|
235
|
+
"""
|
236
|
+
Params for litellm.completion() func. Address common/unique params to each provider.
|
237
|
+
"""
|
238
|
+
|
239
|
+
PARAMS = {
|
240
|
+
"litellm": [
|
241
|
+
"api_base",
|
242
|
+
"api_version,"
|
243
|
+
"num_retries",
|
244
|
+
"context_window_fallback_dict",
|
245
|
+
"fallbacks",
|
246
|
+
"metadata",
|
135
247
|
],
|
136
|
-
"
|
137
|
-
|
138
|
-
"
|
139
|
-
"
|
140
|
-
"
|
141
|
-
"
|
142
|
-
"
|
143
|
-
"
|
144
|
-
"
|
145
|
-
"
|
248
|
+
"common": [
|
249
|
+
"model",
|
250
|
+
"messages",
|
251
|
+
"temperature",
|
252
|
+
"top_p",
|
253
|
+
"max_tokens",
|
254
|
+
"stream",
|
255
|
+
"tools",
|
256
|
+
"tool_choice",
|
257
|
+
"response_format",
|
258
|
+
"n",
|
259
|
+
"stop",
|
260
|
+
"base_url",
|
261
|
+
"api_key",
|
146
262
|
],
|
147
|
-
"
|
148
|
-
"
|
149
|
-
"
|
150
|
-
"
|
151
|
-
"
|
152
|
-
"
|
153
|
-
"
|
154
|
-
"
|
155
|
-
"
|
156
|
-
"
|
157
|
-
"
|
158
|
-
"
|
159
|
-
"
|
160
|
-
"
|
161
|
-
"
|
162
|
-
"
|
163
|
-
"
|
164
|
-
"
|
165
|
-
"
|
166
|
-
"
|
167
|
-
"
|
168
|
-
"
|
169
|
-
"
|
263
|
+
"openai": [
|
264
|
+
"timeout",
|
265
|
+
# "temperature",
|
266
|
+
# "top_p",
|
267
|
+
# "n",
|
268
|
+
# "stream",
|
269
|
+
"stream_options",
|
270
|
+
# "stop",
|
271
|
+
"max_compl,etion_tokens",
|
272
|
+
# "max_tokens",
|
273
|
+
"modalities",
|
274
|
+
"prediction",
|
275
|
+
"audio",
|
276
|
+
"presence_penalty",
|
277
|
+
"frequency_penalty",
|
278
|
+
"logit_bias",
|
279
|
+
"user",
|
280
|
+
# "response_format",
|
281
|
+
"seed",
|
282
|
+
# "tools",
|
283
|
+
# "tool_choice",
|
284
|
+
"logprobs",
|
285
|
+
"top_logprobs",
|
286
|
+
"parallel_tool_calls",
|
287
|
+
"extra_headers",
|
288
|
+
"model_list"
|
170
289
|
],
|
290
|
+
"gemini": [
|
291
|
+
"topK",
|
292
|
+
]
|
171
293
|
}
|
172
294
|
|
173
|
-
|
295
|
+
|
296
|
+
class SchemaType:
|
297
|
+
"""
|
298
|
+
A class to store/convert a LLM-valid schema type from the Python Type object.
|
299
|
+
https://swagger.io/docs/specification/v3_0/data-models/data-types/
|
300
|
+
https://cloud.google.com/vertex-ai/docs/reference/rest/v1/Schema#Type
|
301
|
+
"""
|
302
|
+
|
303
|
+
def __init__(self, type: Type):
|
304
|
+
self.type = type
|
305
|
+
|
306
|
+
def convert(self) -> str:
|
307
|
+
if self.type is None:
|
308
|
+
return "string"
|
309
|
+
|
310
|
+
if self.type is int:
|
311
|
+
return "integer"
|
312
|
+
elif self.type is float:
|
313
|
+
return "number"
|
314
|
+
elif self.type is str:
|
315
|
+
return "string"
|
316
|
+
elif self.type is dict:
|
317
|
+
return "object"
|
318
|
+
elif self.type is list:
|
319
|
+
return "array"
|
320
|
+
elif self.type is bool:
|
321
|
+
return "boolean"
|
322
|
+
else:
|
323
|
+
return "string"
|