versionhq 1.2.4.9__py3-none-any.whl → 1.2.4.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +4 -4
- versionhq/agent/model.py +0 -1
- versionhq/llm/llm_vars.py +109 -32
- versionhq/llm/model.py +36 -41
- versionhq/task/model.py +6 -4
- {versionhq-1.2.4.9.dist-info → versionhq-1.2.4.11.dist-info}/METADATA +1 -1
- {versionhq-1.2.4.9.dist-info → versionhq-1.2.4.11.dist-info}/RECORD +10 -10
- {versionhq-1.2.4.9.dist-info → versionhq-1.2.4.11.dist-info}/WHEEL +1 -1
- {versionhq-1.2.4.9.dist-info → versionhq-1.2.4.11.dist-info}/licenses/LICENSE +0 -0
- {versionhq-1.2.4.9.dist-info → versionhq-1.2.4.11.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
@@ -10,7 +10,7 @@ load_dotenv(override=True)
|
|
10
10
|
from versionhq.agent.model import Agent
|
11
11
|
from versionhq.agent_network.model import AgentNetwork, Formation, Member, TaskHandlingProcess
|
12
12
|
from versionhq.llm.model import LLM
|
13
|
-
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES,
|
13
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODEL_PARAMS, PROVIDERS, TEXT_MODELS
|
14
14
|
from versionhq.clients.customer.model import Customer
|
15
15
|
from versionhq.clients.product.model import Product, ProductProvider
|
16
16
|
from versionhq.clients.workflow.model import MessagingWorkflow, MessagingComponent
|
@@ -35,7 +35,7 @@ from versionhq.agent_network.formation import form_agent_network
|
|
35
35
|
from versionhq.task_graph.draft import workflow
|
36
36
|
|
37
37
|
|
38
|
-
__version__ = "1.2.4.
|
38
|
+
__version__ = "1.2.4.11"
|
39
39
|
__all__ = [
|
40
40
|
"Agent",
|
41
41
|
|
@@ -46,9 +46,9 @@ __all__ = [
|
|
46
46
|
|
47
47
|
"LLM",
|
48
48
|
"LLM_CONTEXT_WINDOW_SIZES",
|
49
|
-
"
|
49
|
+
"MODEL_PARAMS",
|
50
50
|
"PROVIDERS",
|
51
|
-
"
|
51
|
+
"TEXT_MODELS",
|
52
52
|
|
53
53
|
"Customer",
|
54
54
|
"Product",
|
versionhq/agent/model.py
CHANGED
@@ -356,7 +356,6 @@ class Agent(BaseModel):
|
|
356
356
|
response_format: Optional[Dict[str, Any]] = None,
|
357
357
|
tools: Optional[List[InstanceOf[Tool]| InstanceOf[ToolSet] | Type[Tool]]] = None,
|
358
358
|
tool_res_as_final: bool = False,
|
359
|
-
# task: Any = None
|
360
359
|
) -> Tuple[str, UsageMetrics]:
|
361
360
|
"""
|
362
361
|
Create formatted prompts using the developer prompt and the agent's backstory, then call the base model.
|
versionhq/llm/llm_vars.py
CHANGED
@@ -2,22 +2,52 @@ from typing import Type
|
|
2
2
|
|
3
3
|
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
4
4
|
|
5
|
-
PROVIDERS =
|
6
|
-
"openai",
|
7
|
-
"gemini",
|
8
|
-
"openrouter",
|
9
|
-
"anthropic",
|
10
|
-
"bedrock"
|
11
|
-
|
12
|
-
|
13
|
-
|
5
|
+
PROVIDERS = {
|
6
|
+
"openai": { "api_key": "OPENAI_API_KEY" },
|
7
|
+
"gemini": { "api_key": "GEMINI_API_KEY" },
|
8
|
+
"openrouter": { "api_key": "OPENROUTER_API_KEY" },
|
9
|
+
"anthropic": { "api_key": "ANTHROPIC_API_KEY" },
|
10
|
+
"bedrock": {
|
11
|
+
"AWS_ACCESS_KEY_ID": "AWS_ACCESS_KEY_ID",
|
12
|
+
"AWS_SECRET_ACCESS_KEY": "AWS_SECRET_ACCESS_KEY",
|
13
|
+
"AWS_REGION_NAME": "AWS_REGION_NAME",
|
14
|
+
},
|
15
|
+
"bedrock/converse": {
|
16
|
+
"AWS_ACCESS_KEY_ID": "AWS_ACCESS_KEY_ID",
|
17
|
+
"AWS_SECRET_ACCESS_KEY": "AWS_SECRET_ACCESS_KEY",
|
18
|
+
"AWS_REGION_NAME": "AWS_REGION_NAME",
|
19
|
+
},
|
20
|
+
"sagemaker": {
|
21
|
+
"AWS_ACCESS_KEY_ID": "AWS_ACCESS_KEY_ID",
|
22
|
+
"AWS_SECRET_ACCESS_KEY": "AWS_SECRET_ACCESS_KEY",
|
23
|
+
"AWS_REGION_NAME": "AWS_REGION_NAME",
|
24
|
+
},
|
25
|
+
"huggingface": {
|
26
|
+
"api_key": "HUGGINGFACE_API_KEY",
|
27
|
+
"base_url": "HUGGINGFACE_API_BASE",
|
28
|
+
"HF_ENDPOINT": "HF_ENDPOINT",
|
29
|
+
},
|
30
|
+
"azure": {
|
31
|
+
"api_base": "AZURE_OPENAI_ENDPOINT",
|
32
|
+
"api_key": "AZURE_OPENAI_API_KEY",
|
33
|
+
"api_version": "AZURE_OPENAI_API_VERSION",
|
34
|
+
},
|
35
|
+
"azure_ai": {
|
36
|
+
"api_key": "AZURE_AI_API_KEY",
|
37
|
+
"base_url": "AZURE_AI_API_BASE",
|
38
|
+
|
39
|
+
}
|
40
|
+
}
|
14
41
|
|
15
|
-
|
42
|
+
ENDPOINTS = [
|
43
|
+
"azure", # endpoints must be aligned with the selected model.
|
44
|
+
"azure_ai", # endpoints must be aligned with the selected model.
|
16
45
|
"huggingface",
|
17
46
|
]
|
18
47
|
|
19
48
|
|
20
|
-
|
49
|
+
# Resaoning, text gen,
|
50
|
+
TEXT_MODELS = {
|
21
51
|
"openai": [
|
22
52
|
"gpt-4.5-preview-2025-02-27",
|
23
53
|
"gpt-4",
|
@@ -63,23 +93,74 @@ MODELS = {
|
|
63
93
|
"bedrock/cohere.command-text-v14",
|
64
94
|
"bedrock/cohere.command-light-text-v14",
|
65
95
|
],
|
96
|
+
"azure": [
|
97
|
+
"azure/DeepSeek-V3",
|
98
|
+
"azure/DeepSeek-R1",
|
99
|
+
"azure/Llama-3.3-70B-Instruct",
|
100
|
+
"azure/Llama-3.2-11B-Vision-Instruct",
|
101
|
+
"azure/Meta-Llama-3.1-405B-Instruct",
|
102
|
+
"azure/Meta-Llama-3.1-8B-Instruct",
|
103
|
+
"azure/Llama-3.2-1B-Instruct",
|
104
|
+
"azure/Meta-Llama-3.1-70B",
|
105
|
+
"azure/Meta-Llama-3.1-8B",
|
106
|
+
"azure/Llama-3.2-3B-Instruct",
|
107
|
+
"azure/Meta-Llama-3-8B-Instruct",
|
108
|
+
"azure/Meta-Llama-3.1-70B-Instruct",
|
109
|
+
"azure/Llama-3.2-90B-Vision-Instruct",
|
110
|
+
"azure/Llama-3.2-3B",
|
111
|
+
"azure/Llama-3.2-1B",
|
112
|
+
"azure/mistral-large-latest",
|
113
|
+
"azure/mistral-large-2402",
|
114
|
+
"azure/command-r-plus",
|
115
|
+
"azure/o3-mini-2025-01-31",
|
116
|
+
"azure/o3-mini",
|
117
|
+
"azure/o1-mini",
|
118
|
+
"azure/Phi-4-mini-instruct",
|
119
|
+
"azure/Phi-4-multimodal-instruct",
|
120
|
+
"azure/Mistral-Large-2411",
|
121
|
+
"azure/Mistral-small"
|
122
|
+
"azure/mistral-small-2503",
|
123
|
+
"azure/Ministral-3B",
|
124
|
+
"azure/mistralai-Mixtral-8x22B-v0-1"
|
125
|
+
"azure/Cohere-rerank-v3.5",
|
126
|
+
],
|
127
|
+
"azure_ai": [
|
128
|
+
"azure_ai/DeepSeek-V3",
|
129
|
+
"azure_ai/DeepSeek-R1",
|
130
|
+
"azure_ai/Llama-3.3-70B-Instruct",
|
131
|
+
"azure_ai/Llama-3.2-11B-Vision-Instruct",
|
132
|
+
"azure_ai/Meta-Llama-3.1-405B-Instruct",
|
133
|
+
"azure_ai/Meta-Llama-3.1-8B-Instruct",
|
134
|
+
"azure_ai/Llama-3.2-1B-Instruct",
|
135
|
+
"azure_ai/Meta-Llama-3.1-70B",
|
136
|
+
"azure_ai/Meta-Llama-3.1-8B",
|
137
|
+
"azure_ai/Llama-3.2-3B-Instruct",
|
138
|
+
"azure_ai/Meta-Llama-3-8B-Instruct",
|
139
|
+
"azure_ai/Meta-Llama-3.1-70B-Instruct",
|
140
|
+
"azure_ai/Llama-3.2-90B-Vision-Instruct",
|
141
|
+
"azure_ai/Llama-3.2-3B",
|
142
|
+
"azure_ai/Llama-3.2-1B",
|
143
|
+
"azure_ai/mistral-large-latest",
|
144
|
+
"azure_ai/mistral-large-2402",
|
145
|
+
"azure_ai/command-r-plus",
|
146
|
+
"azure_ai/o3-mini-2025-01-31",
|
147
|
+
"azure_ai/o3-mini",
|
148
|
+
"azure_ai/o1-mini",
|
149
|
+
"azure_ai/Phi-4-mini-instruct",
|
150
|
+
"azure_ai/Phi-4-multimodal-instruct",
|
151
|
+
"azure_ai/Mistral-Large-2411",
|
152
|
+
"azure_ai/Mistral-small"
|
153
|
+
"azure_ai/mistral-small-2503",
|
154
|
+
"azure_ai/Ministral-3B",
|
155
|
+
"azure_ai/mistralai-Mixtral-8x22B-v0-1"
|
156
|
+
"azure_ai/Cohere-rerank-v3.5",
|
157
|
+
],
|
66
158
|
"huggingface": [
|
67
159
|
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
68
160
|
],
|
69
161
|
}
|
70
162
|
|
71
163
|
|
72
|
-
ENV_VARS = {
|
73
|
-
"openai": ["OPENAI_API_KEY"],
|
74
|
-
"gemini": ["GEMINI_API_KEY"],
|
75
|
-
"anthropic": ["ANTHROPIC_API_KEY"],
|
76
|
-
"huggingface": ["HUGGINGFACE_API_KEY", ],
|
77
|
-
"bedrock": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
78
|
-
"sagemaker": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"],
|
79
|
-
"azure_ai": ["AZURE_AI_API_KEY", "AZURE_AI_API_BASE"],
|
80
|
-
}
|
81
|
-
|
82
|
-
|
83
164
|
"""
|
84
165
|
Max input token size by the model.
|
85
166
|
"""
|
@@ -128,31 +209,27 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
128
209
|
|
129
210
|
|
130
211
|
"""
|
131
|
-
|
212
|
+
Model config related params for litellm.completion().
|
132
213
|
"""
|
133
|
-
|
214
|
+
MODEL_PARAMS = {
|
134
215
|
"litellm": [
|
135
|
-
"api_base",
|
136
|
-
"api_version,",
|
137
216
|
"num_retries",
|
138
217
|
"context_window_fallback_dict",
|
139
218
|
"fallbacks",
|
140
219
|
"metadata",
|
141
|
-
"api_key",
|
142
220
|
],
|
143
221
|
"common": [
|
144
|
-
"model",
|
145
|
-
"messages",
|
222
|
+
# "model",
|
223
|
+
# "messages",
|
224
|
+
# "stream",
|
225
|
+
"response_format",
|
146
226
|
"temperature",
|
147
227
|
"top_p",
|
148
228
|
"max_tokens",
|
149
|
-
"stream",
|
150
229
|
"tools",
|
151
230
|
"tool_choice",
|
152
|
-
"response_format",
|
153
231
|
"n",
|
154
232
|
"stop",
|
155
|
-
# "base_url",
|
156
233
|
],
|
157
234
|
"openai": [
|
158
235
|
"timeout",
|
versionhq/llm/model.py
CHANGED
@@ -9,10 +9,10 @@ from typing import Any, Dict, List, Optional
|
|
9
9
|
from typing_extensions import Self
|
10
10
|
|
11
11
|
import litellm
|
12
|
-
from litellm import JSONSchemaValidationError, get_supported_openai_params
|
12
|
+
from litellm import JSONSchemaValidationError, get_supported_openai_params, supports_response_schema
|
13
13
|
from pydantic import BaseModel, Field, PrivateAttr, model_validator, ConfigDict
|
14
14
|
|
15
|
-
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES,
|
15
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, TEXT_MODELS, MODEL_PARAMS, PROVIDERS, ENDPOINTS
|
16
16
|
from versionhq.tool.model import Tool, ToolSet
|
17
17
|
from versionhq._utils import Logger
|
18
18
|
|
@@ -85,8 +85,6 @@ class LLM(BaseModel):
|
|
85
85
|
context_window_size: Optional[int] = Field(default=DEFAULT_CONTEXT_WINDOW_SIZE)
|
86
86
|
|
87
87
|
# LiteLLM specific config
|
88
|
-
api_base: Optional[str] = Field(default=None, description="litellm specific field - api base of the model provider")
|
89
|
-
api_version: Optional[str] = Field(default=None)
|
90
88
|
num_retries: Optional[int] = Field(default=1)
|
91
89
|
context_window_fallback_dict: Optional[Dict[str, Any]] = Field(default=None, description="A mapping of model to use if call fails due to context window error")
|
92
90
|
fallbacks: Optional[List[Any]]= Field(default=None, description="A list of model names + params to be used, in case the initial call fails")
|
@@ -117,7 +115,7 @@ class LLM(BaseModel):
|
|
117
115
|
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
118
116
|
|
119
117
|
else:
|
120
|
-
provider_model_list =
|
118
|
+
provider_model_list = TEXT_MODELS.get(self.provider)
|
121
119
|
if provider_model_list:
|
122
120
|
self.model = provider_model_list[0]
|
123
121
|
self.provider = self.provider
|
@@ -129,29 +127,29 @@ class LLM(BaseModel):
|
|
129
127
|
elif self.model and self.provider is None:
|
130
128
|
model_match = [
|
131
129
|
item for item in [
|
132
|
-
[val for val in v if val == self.model][0] for k, v in
|
130
|
+
[val for val in v if val == self.model][0] for k, v in TEXT_MODELS.items() if [val for val in v if val == self.model]
|
133
131
|
] if item
|
134
132
|
]
|
135
133
|
model_partial_match = [
|
136
134
|
item for item in [
|
137
|
-
[val for val in v if val.find(self.model) != -1][0] for k, v in
|
135
|
+
[val for val in v if val.find(self.model) != -1][0] for k, v in TEXT_MODELS.items() if [val for val in v if val.find(self.model) != -1]
|
138
136
|
] if item
|
139
137
|
]
|
140
|
-
provider_match = [k for k, v in
|
138
|
+
provider_match = [k for k, v in TEXT_MODELS.items() if k == self.model]
|
141
139
|
|
142
140
|
if model_match:
|
143
141
|
self.model = model_match[0]
|
144
|
-
self.provider = [k for k, v in
|
142
|
+
self.provider = [k for k, v in TEXT_MODELS.items() if self.model in v][0]
|
145
143
|
|
146
144
|
elif model_partial_match:
|
147
145
|
self.model = model_partial_match[0]
|
148
|
-
self.provider = [k for k, v in
|
146
|
+
self.provider = [k for k, v in TEXT_MODELS.items() if [item for item in v if item.find(self.model) != -1]][0]
|
149
147
|
|
150
148
|
elif provider_match:
|
151
149
|
provider = provider_match[0]
|
152
|
-
if self.
|
150
|
+
if self.TEXT_MODELS.get(provider):
|
153
151
|
self.provider = provider
|
154
|
-
self.model = self.
|
152
|
+
self.model = self.TEXT_MODELS.get(provider)[0]
|
155
153
|
else:
|
156
154
|
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
157
155
|
self.model = DEFAULT_MODEL_NAME
|
@@ -161,14 +159,14 @@ class LLM(BaseModel):
|
|
161
159
|
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
162
160
|
|
163
161
|
else:
|
164
|
-
provider_model_list =
|
162
|
+
provider_model_list = TEXT_MODELS.get(self.provider)
|
165
163
|
if self.model not in provider_model_list:
|
166
164
|
self._logger.log(level="warning", message=f"The provided model: {self._init_model_name} is not in the list. We will assign a default model.", color="yellow")
|
167
165
|
self.model = DEFAULT_MODEL_NAME
|
168
166
|
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
169
167
|
|
170
|
-
# trigger
|
171
|
-
if self.provider in
|
168
|
+
# trigger passing through litellm and use original endpoint.
|
169
|
+
if self.provider in ENDPOINTS:
|
172
170
|
self.endpoint_provider = self.provider
|
173
171
|
|
174
172
|
return self
|
@@ -181,16 +179,13 @@ class LLM(BaseModel):
|
|
181
179
|
"""
|
182
180
|
litellm.drop_params = True
|
183
181
|
|
182
|
+
self._set_credentials()
|
183
|
+
|
184
184
|
if self.callbacks:
|
185
185
|
self._set_callbacks(self.callbacks)
|
186
186
|
|
187
187
|
self.context_window_size = self._get_context_window_size()
|
188
188
|
|
189
|
-
base_url_key_name = self.endpoint_provider.upper() + "_API_BASE" if self.endpoint_provider else None
|
190
|
-
if base_url_key_name:
|
191
|
-
self.base_url = os.environ.get(base_url_key_name)
|
192
|
-
self.api_base = self.base_url
|
193
|
-
|
194
189
|
if self.llm_config:
|
195
190
|
self._create_valid_params(config=self.llm_config)
|
196
191
|
|
@@ -201,26 +196,24 @@ class LLM(BaseModel):
|
|
201
196
|
"""
|
202
197
|
Returns valid params incl. model + litellm original params) from the given config dict.
|
203
198
|
"""
|
204
|
-
|
199
|
+
valid_keys = list()
|
205
200
|
|
206
201
|
if self.model:
|
207
202
|
valid_keys = get_supported_openai_params(model=self.model, custom_llm_provider=self.endpoint_provider, request_type="chat_completion")
|
208
203
|
|
209
204
|
if not valid_keys:
|
210
|
-
valid_keys =
|
205
|
+
valid_keys = MODEL_PARAMS.get("common")
|
211
206
|
|
212
|
-
valid_keys +=
|
207
|
+
valid_keys += MODEL_PARAMS.get("litellm")
|
213
208
|
|
209
|
+
valid_config = dict()
|
214
210
|
for item in valid_keys:
|
215
211
|
if hasattr(self, item) and (getattr(self, item) or getattr(self, item) == False):
|
216
212
|
valid_config[item] = getattr(self, item)
|
217
|
-
|
218
213
|
elif item in self.llm_config and (self.llm_config[item] or self.llm_config[item]==False):
|
219
214
|
valid_config[item] = self.llm_config[item]
|
220
|
-
|
221
215
|
elif item in config and (config[item] or config[item] == False):
|
222
216
|
valid_config[item] = config[item]
|
223
|
-
|
224
217
|
else:
|
225
218
|
pass
|
226
219
|
|
@@ -228,29 +221,29 @@ class LLM(BaseModel):
|
|
228
221
|
if self.context_window_size and valid_config["max_tokens"] > self.context_window_size:
|
229
222
|
valid_config["max_tokens"] = self.context_window_size
|
230
223
|
|
231
|
-
if "model" in valid_config:
|
232
|
-
self.model = valid_config.pop("model")
|
233
|
-
|
234
224
|
self.llm_config = valid_config
|
235
225
|
return valid_config
|
236
226
|
|
237
227
|
|
238
|
-
def
|
239
|
-
if self.provider
|
228
|
+
def _set_credentials(self) -> Dict[str, Any]:
|
229
|
+
cred = PROVIDERS.get(self.provider, None) if self.provider else None
|
230
|
+
if not cred:
|
240
231
|
return {}
|
241
232
|
|
242
|
-
|
243
|
-
|
233
|
+
valid_cred = {}
|
234
|
+
for k, v in cred.items():
|
235
|
+
val = os.environ.get(v, None)
|
236
|
+
if val:
|
237
|
+
valid_cred[str(k)] = val
|
244
238
|
|
245
|
-
|
246
|
-
return {}
|
239
|
+
return valid_cred
|
247
240
|
|
248
|
-
for item in env_vars:
|
249
|
-
val = os.environ.get(item, None)
|
250
|
-
if val:
|
251
|
-
cred[str(item).lower()] = val
|
252
241
|
|
253
|
-
|
242
|
+
def _supports_response_schema(self) -> bool:
|
243
|
+
try:
|
244
|
+
return supports_response_schema(model=self.model, custom_llm_provider=self.endpoint_provider)
|
245
|
+
except:
|
246
|
+
return False
|
254
247
|
|
255
248
|
|
256
249
|
def _supports_function_calling(self) -> bool:
|
@@ -310,10 +303,12 @@ class LLM(BaseModel):
|
|
310
303
|
|
311
304
|
try:
|
312
305
|
res, tool_res = None, ""
|
313
|
-
cred = self.
|
306
|
+
cred = self._set_credentials()
|
314
307
|
|
315
308
|
if self.provider == "gemini":
|
316
309
|
self.response_format = { "type": "json_object" } if not tools and self.model != "gemini/gemini-2.0-flash-thinking-exp" else None
|
310
|
+
elif response_format and "json_schema" in response_format:
|
311
|
+
self.response_format = response_format if self._supports_function_calling() else { "type": "json_object" }
|
317
312
|
else:
|
318
313
|
self.response_format = response_format
|
319
314
|
|
versionhq/task/model.py
CHANGED
@@ -376,8 +376,6 @@ class Task(BaseModel):
|
|
376
376
|
match item:
|
377
377
|
case Tool() | ToolSet() | BaseTool() | RagTool() | GPTToolCUA() | GPTToolFileSearch() | GPTToolWebSearch():
|
378
378
|
tool_list.append(item)
|
379
|
-
case type(item, callable):
|
380
|
-
tool_list.append(Tool(func=item))
|
381
379
|
case dict():
|
382
380
|
tool = None
|
383
381
|
try:
|
@@ -390,13 +388,17 @@ class Task(BaseModel):
|
|
390
388
|
if tool:
|
391
389
|
tool_list.append(tool)
|
392
390
|
case _:
|
393
|
-
|
391
|
+
if type(item) == callable:
|
392
|
+
tool_list.append(Tool(func=item))
|
393
|
+
else:
|
394
|
+
pass
|
395
|
+
|
394
396
|
self.tools = tool_list
|
395
397
|
return self
|
396
398
|
|
397
399
|
|
398
400
|
def _structure_response_format(self, data_type: str = "object", model_provider: str = "gemini") -> Dict[str, Any] | None:
|
399
|
-
"""Structures `response_schema`
|
401
|
+
"""Structures `response_schema` to the LLM response format."""
|
400
402
|
|
401
403
|
from versionhq.task.structured_response import StructuredOutput
|
402
404
|
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=5dce8lm9xq8PjAQJ6N1jlSmSdhuq1GlDpDjnLKHKNOo,3356
|
2
2
|
versionhq/_prompt/auto_feedback.py,sha256=bbj37yTa11lRHpx-sV_Wmpb4dVnDBB7_v8ageUobHXY,3780
|
3
3
|
versionhq/_prompt/constants.py,sha256=DOwUFnVVObEFqgnaMCDnW8fnw1oPMgS8JAqOiTuqleI,932
|
4
4
|
versionhq/_prompt/model.py,sha256=wJlDM9yzrqlXWxyw4HkYQzPii2MPfqkgTF3qhXoJN2M,8038
|
@@ -14,7 +14,7 @@ versionhq/_utils/usage_metrics.py,sha256=gDK6fZgT1njX4iPIPFapWxfxIiz-zZYv72p0u6M
|
|
14
14
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
15
15
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
16
|
versionhq/agent/inhouse_agents.py,sha256=D2WAiXCYsnQK3_Fe7CbbtvXsHWOaN6vde6m_QoW7fH4,2629
|
17
|
-
versionhq/agent/model.py,sha256=
|
17
|
+
versionhq/agent/model.py,sha256=Cw9BdkDq45Ubzayq62A-nFqREBEIxMY0wfm_Xy8yP_w,26942
|
18
18
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
19
19
|
versionhq/agent/rpm_controller.py,sha256=grezIxyBci_lDlwAlgWFRyR5KOocXeOhYkgN02dNFNE,2360
|
20
20
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=dkfuATUQ2g2WoUKkmgAIch-RB--bektGoQaUlsDOn0g,529
|
@@ -38,8 +38,8 @@ versionhq/knowledge/source.py,sha256=-hEUPtJUHHMx4rUKtiHl19J8xAMw-WVBw34zwa2jZ08
|
|
38
38
|
versionhq/knowledge/source_docling.py,sha256=XpavmLvh4dLcuTikj8MCE9KG52oQMafy7_wBneliMK0,4994
|
39
39
|
versionhq/knowledge/storage.py,sha256=Kd-4r6aWM5EDaoXrzKXbgi1hY6tysSQARPGXM95qMmU,8266
|
40
40
|
versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
41
|
-
versionhq/llm/llm_vars.py,sha256=
|
42
|
-
versionhq/llm/model.py,sha256=
|
41
|
+
versionhq/llm/llm_vars.py,sha256=jPa7m-321w6NK_FjGeVBLR7w9HfuG6FuoKtum-86yLk,8908
|
42
|
+
versionhq/llm/model.py,sha256=BEjWyFk0IWSUibHNc9apdFp3QdbGeBMQv4ZfvdgRjgE,17305
|
43
43
|
versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
44
|
versionhq/memory/contextual_memory.py,sha256=QEMVvHuEXxY7M6-12S8HhyFKf108KfX8Zzt7paPW048,3882
|
45
45
|
versionhq/memory/model.py,sha256=VQR1229t7GQPMItlGAHLtJrb6LrZfSoRA1DRW4z0SOU,8234
|
@@ -53,7 +53,7 @@ versionhq/storage/utils.py,sha256=r5ghA_ktdR2IuzlzKqZYCjsNxztEMzyhWLneA4cFuWY,74
|
|
53
53
|
versionhq/task/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
54
|
versionhq/task/evaluation.py,sha256=9jFOmjP-yy1vxRn781KmpdQ_d4J_ZA1UX_21Q3m-iuE,4122
|
55
55
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
56
|
-
versionhq/task/model.py,sha256=-
|
56
|
+
versionhq/task/model.py,sha256=-dqCQVRYF918RDM9mK_J7r4lMRwFqZ2G9NSePAU7DJY,29613
|
57
57
|
versionhq/task/structured_response.py,sha256=tqOHpch8CVmMj0aZXjdDWtPNcVmBW8DVZnBvPBwS4PM,5053
|
58
58
|
versionhq/task/TEMPLATES/Description.py,sha256=hKhpbz0ztbkUMXz9KiL-P40fis9OB5ICOdL9jCtgAhU,864
|
59
59
|
versionhq/task_graph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -74,8 +74,8 @@ versionhq/tool/gpt/_enum.py,sha256=VaONDFZJNVe30Wf3Pl9s0XvxP_Xxqv3RNFcnqyigGFk,5
|
|
74
74
|
versionhq/tool/gpt/cua.py,sha256=5yrgz_fc3IH_uB70J51wmRBWkfH53Qx-a29nmwWyOcs,12078
|
75
75
|
versionhq/tool/gpt/file_search.py,sha256=r5JVlf-epKB8DDXyrzlkezguHUMir0JW-77LUHoy-w8,5813
|
76
76
|
versionhq/tool/gpt/web_search.py,sha256=bpqEQopbq9KtqQ_0W7QAAJ5TyoKGiVM94-SMp5oqNFE,3483
|
77
|
-
versionhq-1.2.4.
|
78
|
-
versionhq-1.2.4.
|
79
|
-
versionhq-1.2.4.
|
80
|
-
versionhq-1.2.4.
|
81
|
-
versionhq-1.2.4.
|
77
|
+
versionhq-1.2.4.11.dist-info/licenses/LICENSE,sha256=cRoGGdM73IiDs6nDWKqPlgSv7aR4n-qBXYnJlCMHCeE,1082
|
78
|
+
versionhq-1.2.4.11.dist-info/METADATA,sha256=Q0GiGODsDeyH1lgzQvZGdslcxLzQKKgzFD1wt7pMZko,21349
|
79
|
+
versionhq-1.2.4.11.dist-info/WHEEL,sha256=1tXe9gY0PYatrMPMDd6jXqjfpz_B-Wqm32CPfRC58XU,91
|
80
|
+
versionhq-1.2.4.11.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
81
|
+
versionhq-1.2.4.11.dist-info/RECORD,,
|
File without changes
|
File without changes
|