versionhq 1.1.11.6__py3-none-any.whl → 1.1.11.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- versionhq/__init__.py +1 -1
- versionhq/agent/model.py +78 -88
- versionhq/knowledge/source_docling.py +14 -13
- versionhq/llm/llm_vars.py +91 -58
- versionhq/llm/model.py +130 -104
- versionhq/task/model.py +4 -1
- versionhq/task/structured_response.py +4 -3
- {versionhq-1.1.11.6.dist-info → versionhq-1.1.11.8.dist-info}/METADATA +3 -1
- {versionhq-1.1.11.6.dist-info → versionhq-1.1.11.8.dist-info}/RECORD +12 -12
- {versionhq-1.1.11.6.dist-info → versionhq-1.1.11.8.dist-info}/LICENSE +0 -0
- {versionhq-1.1.11.6.dist-info → versionhq-1.1.11.8.dist-info}/WHEEL +0 -0
- {versionhq-1.1.11.6.dist-info → versionhq-1.1.11.8.dist-info}/top_level.txt +0 -0
versionhq/__init__.py
CHANGED
versionhq/agent/model.py
CHANGED
@@ -6,10 +6,10 @@ from typing_extensions import Self
|
|
6
6
|
from dotenv import load_dotenv
|
7
7
|
import litellm
|
8
8
|
|
9
|
-
from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
9
|
+
from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
10
10
|
from pydantic_core import PydanticCustomError
|
11
11
|
|
12
|
-
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
|
12
|
+
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME, PROVIDERS
|
13
13
|
from versionhq.tool.model import Tool, ToolSet
|
14
14
|
from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
|
15
15
|
from versionhq.memory.contextual_memory import ContextualMemory
|
@@ -99,7 +99,7 @@ class Agent(BaseModel):
|
|
99
99
|
tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool] | Any]] = Field(default_factory=list)
|
100
100
|
|
101
101
|
# knowledge
|
102
|
-
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(default=None)
|
102
|
+
knowledge_sources: Optional[List[BaseKnowledgeSource | Any]] = Field(default=None)
|
103
103
|
_knowledge: Optional[Knowledge] = PrivateAttr(default=None)
|
104
104
|
|
105
105
|
# memory
|
@@ -162,90 +162,44 @@ class Agent(BaseModel):
|
|
162
162
|
@model_validator(mode="after")
|
163
163
|
def set_up_llm(self) -> Self:
|
164
164
|
"""
|
165
|
-
Set up
|
166
|
-
Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `callbacks`,`respect_context_window` to the LLM class.
|
167
|
-
The base model is selected on the client app, else use the default model.
|
165
|
+
Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
|
168
166
|
"""
|
169
|
-
|
170
167
|
self.agent_ops_agent_name = self.role
|
168
|
+
self.llm = self._set_llm(llm=self.llm)
|
169
|
+
function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
|
170
|
+
self.function_calling_llm = self._set_llm(llm=function_calling_llm)
|
171
|
+
return self
|
171
172
|
|
172
|
-
if isinstance(self.llm, LLM):
|
173
|
-
llm = self._set_llm_params(self.llm)
|
174
|
-
self.llm = llm
|
175
173
|
|
176
|
-
|
177
|
-
|
178
|
-
llm = LLM(model=model_name)
|
179
|
-
updated_llm = self._set_llm_params(llm)
|
180
|
-
self.llm = updated_llm
|
174
|
+
def _set_llm(self, llm: Any | None) -> LLM:
|
175
|
+
llm = llm if llm is not None else DEFAULT_MODEL_NAME
|
181
176
|
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
llm = LLM(model=model_name if model_name is not None else DEFAULT_MODEL_NAME)
|
186
|
-
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.llm.items() if v is not None })
|
187
|
-
self.llm = updated_llm
|
177
|
+
match llm:
|
178
|
+
case LLM():
|
179
|
+
return self._set_llm_params(llm=llm)
|
188
180
|
|
189
|
-
|
181
|
+
case str():
|
182
|
+
llm_obj = LLM(model=llm)
|
183
|
+
return self._set_llm_params(llm=llm_obj)
|
184
|
+
|
185
|
+
case dict():
|
186
|
+
model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
|
187
|
+
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
188
|
+
return self._set_llm_params(llm_obj, { k: v for k, v in llm.items() if v is not None })
|
189
|
+
|
190
|
+
case _:
|
190
191
|
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
191
|
-
|
192
|
+
llm_obj = LLM(model=model_name)
|
192
193
|
llm_params = {
|
193
|
-
"max_tokens": (getattr(
|
194
|
-
"timeout": getattr(
|
195
|
-
"callbacks": getattr(
|
196
|
-
"temperature": getattr(
|
197
|
-
"logprobs": getattr(
|
198
|
-
"api_key": getattr(
|
199
|
-
"base_url": getattr(
|
194
|
+
"max_tokens": (getattr(llm, "max_tokens") or self.max_tokens or 3000),
|
195
|
+
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
196
|
+
"callbacks": getattr(llm, "callbacks", None),
|
197
|
+
"temperature": getattr(llm, "temperature", None),
|
198
|
+
"logprobs": getattr(llm, "logprobs", None),
|
199
|
+
"api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
200
|
+
"base_url": getattr(llm, "base_url", None),
|
200
201
|
}
|
201
|
-
|
202
|
-
self.llm = updated_llm
|
203
|
-
|
204
|
-
|
205
|
-
"""
|
206
|
-
Set up funcion_calling LLM as well.
|
207
|
-
Check if the model supports function calling, setup LLM instance accordingly, using the same params with the LLM.
|
208
|
-
"""
|
209
|
-
if self.function_calling_llm:
|
210
|
-
if isinstance(self.function_calling_llm, LLM):
|
211
|
-
if self.function_calling_llm._supports_function_calling() == False:
|
212
|
-
self.function_calling_llm = LLM(model=DEFAULT_MODEL_NAME)
|
213
|
-
|
214
|
-
updated_llm = self._set_llm_params(self.function_calling_llm)
|
215
|
-
self.function_calling_llm = updated_llm
|
216
|
-
|
217
|
-
elif isinstance(self.function_calling_llm, str):
|
218
|
-
llm = LLM(model=self.function_calling_llm)
|
219
|
-
|
220
|
-
if llm._supports_function_calling() == False:
|
221
|
-
llm = LLM(model=DEFAULT_MODEL_NAME)
|
222
|
-
|
223
|
-
updated_llm = self._set_llm_params(llm)
|
224
|
-
self.function_calling_llm = updated_llm
|
225
|
-
|
226
|
-
else:
|
227
|
-
if isinstance(self.function_calling_llm, dict):
|
228
|
-
model_name = self.function_calling_llm.pop("model_name", self.function_calling_llm.pop("deployment_name", str(self.function_calling_llm)))
|
229
|
-
llm = LLM(model=model_name)
|
230
|
-
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.function_calling_llm.items() if v is not None })
|
231
|
-
self.function_calling_llm = updated_llm
|
232
|
-
|
233
|
-
else:
|
234
|
-
model_name = (getattr(self.function_calling_llm, "model_name") or getattr(self.function_calling_llm, "deployment_name") or str(self.function_calling_llm))
|
235
|
-
llm = LLM(model=model_name)
|
236
|
-
llm_params = {
|
237
|
-
"max_tokens": (getattr(self.function_calling_llm, "max_tokens") or self.max_tokens or 3000),
|
238
|
-
"timeout": getattr(self.function_calling_llm, "timeout", self.max_execution_time),
|
239
|
-
"callbacks": getattr(self.function_calling_llm, "callbacks", None),
|
240
|
-
"temperature": getattr(self.function_calling_llm, "temperature", None),
|
241
|
-
"logprobs": getattr(self.function_calling_llm, "logprobs", None),
|
242
|
-
"api_key": getattr(self.function_calling_llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
243
|
-
"base_url": getattr(self.function_calling_llm, "base_url", None),
|
244
|
-
}
|
245
|
-
updated_llm = self._set_llm_params(llm, llm_params)
|
246
|
-
self.function_calling_llm = updated_llm
|
247
|
-
|
248
|
-
return self
|
202
|
+
return self._set_llm_params(llm=llm_obj, config=llm_params)
|
249
203
|
|
250
204
|
|
251
205
|
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
@@ -257,6 +211,11 @@ class Agent(BaseModel):
|
|
257
211
|
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
258
212
|
llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
259
213
|
|
214
|
+
if llm.provider is None:
|
215
|
+
provider_name = llm.model.split("/")[0]
|
216
|
+
valid_provider = provider_name if provider_name in PROVIDERS else None
|
217
|
+
llm.provider = valid_provider
|
218
|
+
|
260
219
|
if self.callbacks:
|
261
220
|
llm.callbacks = self.callbacks
|
262
221
|
llm._set_callbacks(llm.callbacks)
|
@@ -344,14 +303,46 @@ class Agent(BaseModel):
|
|
344
303
|
|
345
304
|
@model_validator(mode="after")
|
346
305
|
def set_up_knowledge(self) -> Self:
|
347
|
-
|
348
|
-
|
306
|
+
from versionhq.knowledge.source import BaseKnowledgeSource, StringKnowledgeSource, TextFileKnowledgeSource, CSVKnowledgeSource, ExcelKnowledgeSource, JSONKnowledgeSource
|
307
|
+
from versionhq.knowledge.source_docling import DoclingSource
|
349
308
|
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
309
|
+
if self.knowledge_sources:
|
310
|
+
try:
|
311
|
+
collection_name = f"{self.role.replace(' ', '_')}"
|
312
|
+
knowledge_sources = []
|
313
|
+
docling_fp, txt_fp, json_fp, excel_fp, csv_fp, pdf_fp = [], [], [], [], [], []
|
314
|
+
str_cont = ""
|
315
|
+
|
316
|
+
for item in self.knowledge_sources:
|
317
|
+
if isinstance(item, BaseKnowledgeSource):
|
318
|
+
knowledge_sources.append(item)
|
319
|
+
|
320
|
+
elif isinstance(item, str) and "http" in item:
|
321
|
+
docling_fp.append(item)
|
322
|
+
|
323
|
+
elif isinstance(item, str):
|
324
|
+
match os.path.splitext(item)[1]:
|
325
|
+
case ".txt": txt_fp.append(item)
|
326
|
+
case ".json": json_fp.append(item)
|
327
|
+
case ".xls" | ".xlsx": excel_fp.append(item)
|
328
|
+
case ".pdf": pdf_fp.append(item)
|
329
|
+
case ".csv": csv_fp.append(item)
|
330
|
+
case _: str_cont += str(item)
|
331
|
+
|
332
|
+
else:
|
333
|
+
str_cont += str(item)
|
334
|
+
|
335
|
+
if docling_fp: knowledge_sources.append(DoclingSource(file_paths=docling_fp))
|
336
|
+
if str_cont: knowledge_sources.append(StringKnowledgeSource(content=str_cont))
|
337
|
+
if txt_fp: knowledge_sources.append(TextFileKnowledgeSource(file_paths=txt_fp))
|
338
|
+
if csv_fp: knowledge_sources.append(CSVKnowledgeSource(file_path=csv_fp))
|
339
|
+
if excel_fp: knowledge_sources.append(ExcelKnowledgeSource(file_path=excel_fp))
|
340
|
+
if json_fp: knowledge_sources.append(JSONKnowledgeSource(file_paths=json_fp))
|
341
|
+
|
342
|
+
self._knowledge = Knowledge(sources=knowledge_sources, embedder_config=self.embedder_config, collection_name=collection_name)
|
343
|
+
|
344
|
+
except:
|
345
|
+
self._logger.log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
|
355
346
|
|
356
347
|
return self
|
357
348
|
|
@@ -414,7 +405,7 @@ class Agent(BaseModel):
|
|
414
405
|
self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
415
406
|
|
416
407
|
if tool_res_as_final:
|
417
|
-
func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
|
408
|
+
func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else self.llm if self.llm and self.llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
|
418
409
|
raw_response = func_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
|
419
410
|
task.tokens = func_llm._tokens
|
420
411
|
else:
|
@@ -458,7 +449,7 @@ class Agent(BaseModel):
|
|
458
449
|
from versionhq.knowledge._utils import extract_knowledge_context
|
459
450
|
|
460
451
|
task: InstanceOf[Task] = task
|
461
|
-
tools: Optional[List[InstanceOf[Tool
|
452
|
+
tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
|
462
453
|
|
463
454
|
if self.max_rpm and self._rpm_controller:
|
464
455
|
self._rpm_controller._reset_request_count()
|
@@ -474,7 +465,6 @@ class Agent(BaseModel):
|
|
474
465
|
if agent_knowledge_context:
|
475
466
|
task_prompt += agent_knowledge_context
|
476
467
|
|
477
|
-
|
478
468
|
if self.use_memory == True:
|
479
469
|
contextual_memory = ContextualMemory(
|
480
470
|
memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
|
@@ -3,7 +3,6 @@ from typing import Iterator, List, Optional
|
|
3
3
|
from urllib.parse import urlparse
|
4
4
|
|
5
5
|
try:
|
6
|
-
import docling
|
7
6
|
from docling.datamodel.base_models import InputFormat
|
8
7
|
from docling.document_converter import DocumentConverter
|
9
8
|
from docling.exceptions import ConversionError
|
@@ -12,19 +11,12 @@ try:
|
|
12
11
|
DOCLING_AVAILABLE = True
|
13
12
|
except ImportError:
|
14
13
|
import envoy
|
15
|
-
|
16
|
-
|
17
|
-
import docling
|
18
|
-
from docling.datamodel.base_models import InputFormat
|
19
|
-
from docling.document_converter import DocumentConverter
|
20
|
-
from docling.exceptions import ConversionError
|
21
|
-
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
22
|
-
from docling_core.types.doc.document import DoclingDocument
|
14
|
+
envoy.run("uv add docling --optional docling")
|
23
15
|
DOCLING_AVAILABLE = True
|
24
16
|
except:
|
25
17
|
DOCLING_AVAILABLE = False
|
26
18
|
|
27
|
-
from pydantic import Field
|
19
|
+
from pydantic import Field
|
28
20
|
|
29
21
|
from versionhq.knowledge.source import BaseKnowledgeSource
|
30
22
|
from versionhq.storage.utils import fetch_db_storage_path
|
@@ -54,11 +46,20 @@ class DoclingSource(BaseKnowledgeSource):
|
|
54
46
|
))
|
55
47
|
|
56
48
|
def __init__(self, *args, **kwargs):
|
57
|
-
if
|
58
|
-
|
59
|
-
|
49
|
+
if DOCLING_AVAILABLE:
|
50
|
+
from docling.datamodel.base_models import InputFormat
|
51
|
+
from docling.document_converter import DocumentConverter
|
52
|
+
from docling.exceptions import ConversionError
|
53
|
+
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
54
|
+
from docling_core.types.doc.document import DoclingDocument
|
55
|
+
|
60
56
|
super().__init__(*args, **kwargs)
|
61
57
|
|
58
|
+
else:
|
59
|
+
raise ImportError("The docling package is required. Please install the package using: $ uv add docling.")
|
60
|
+
# else:
|
61
|
+
# super().__init__(*args, **kwargs)
|
62
|
+
|
62
63
|
|
63
64
|
def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
|
64
65
|
conv_results_iter = self.document_converter.convert_all(self.valid_file_paths)
|
versionhq/llm/llm_vars.py
CHANGED
@@ -3,6 +3,33 @@ from typing import Type
|
|
3
3
|
|
4
4
|
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
5
5
|
|
6
|
+
PROVIDERS = [
|
7
|
+
"openai",
|
8
|
+
"gemini",
|
9
|
+
"sagemaker",
|
10
|
+
"huggingface", # need api base
|
11
|
+
"anthropic",
|
12
|
+
"ollama",
|
13
|
+
"watson",
|
14
|
+
"bedrock",
|
15
|
+
"azure",
|
16
|
+
"cerebras",
|
17
|
+
"llama",
|
18
|
+
]
|
19
|
+
|
20
|
+
ENDPOINT_PROVIDERS = [
|
21
|
+
# "openai",
|
22
|
+
# "gemini",
|
23
|
+
# "sagemaker",
|
24
|
+
"huggingface",
|
25
|
+
# "anthropic",
|
26
|
+
# "ollama",
|
27
|
+
# "watson",
|
28
|
+
# "bedrock",
|
29
|
+
# "azure",
|
30
|
+
# "cerebras",
|
31
|
+
# "llama",
|
32
|
+
]
|
6
33
|
|
7
34
|
"""
|
8
35
|
List of models available on the framework.
|
@@ -16,7 +43,6 @@ litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=
|
|
16
43
|
|
17
44
|
MODELS = {
|
18
45
|
"openai": [
|
19
|
-
# "gpt-3.5-turbo",
|
20
46
|
"gpt-4",
|
21
47
|
"gpt-4o",
|
22
48
|
"gpt-4o-mini",
|
@@ -27,11 +53,7 @@ MODELS = {
|
|
27
53
|
"gemini/gemini-1.5-flash",
|
28
54
|
"gemini/gemini-1.5-pro",
|
29
55
|
"gemini/gemini-2.0-flash-exp",
|
30
|
-
# "gemini/gemini-gemma-2-9b-it",
|
31
|
-
# "gemini/gemini-gemma-2-27b-it",
|
32
56
|
],
|
33
|
-
# "vetrex_ai": [
|
34
|
-
# ],
|
35
57
|
"anthropic": [
|
36
58
|
"claude-3-5-sonnet-20241022",
|
37
59
|
"claude-3-5-sonnet-20240620",
|
@@ -39,10 +61,28 @@ MODELS = {
|
|
39
61
|
"claude-3-opus-20240229",
|
40
62
|
"claude-3-haiku-20240307",
|
41
63
|
],
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
#
|
64
|
+
"huggingface": [
|
65
|
+
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
66
|
+
],
|
67
|
+
# "sagemaker": [
|
68
|
+
# "sagemaker/huggingface-text2text-flan-t5-base",
|
69
|
+
# "sagemaker/huggingface-llm-gemma-7b",
|
70
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b",
|
71
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b",
|
72
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-8b",
|
73
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-70b",
|
74
|
+
# "sagemaker/huggingface-llm-mistral-7b"
|
75
|
+
# ], #https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-latest.html
|
76
|
+
"ollama": [
|
77
|
+
"ollama/llama3.1",
|
78
|
+
"ollama/mixtral",
|
79
|
+
"ollama/mixtral-8x22B-Instruct-v0.1",
|
80
|
+
],
|
81
|
+
"deepseek": [
|
82
|
+
"deepseek/deepseek-reasoner",
|
83
|
+
|
84
|
+
],
|
85
|
+
|
46
86
|
# "watson": [
|
47
87
|
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
48
88
|
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
@@ -53,44 +93,48 @@ MODELS = {
|
|
53
93
|
# "watsonx/mistral/mistral-large",
|
54
94
|
# "watsonx/ibm/granite-3-8b-instruct",
|
55
95
|
# ],
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
96
|
+
"bedrock": [
|
97
|
+
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
98
|
+
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
99
|
+
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
100
|
+
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
101
|
+
# "bedrock/anthropic.claude-v2:1",
|
102
|
+
"bedrock/anthropic.claude-v2",
|
103
|
+
"bedrock/anthropic.claude-instant-v1",
|
104
|
+
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
105
|
+
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
106
|
+
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
107
|
+
"bedrock/meta.llama3-70b-instruct-v1:0",
|
108
|
+
"bedrock/meta.llama3-8b-instruct-v1:0",
|
109
|
+
"bedrock/amazon.titan-text-lite-v1",
|
110
|
+
"bedrock/amazon.titan-text-express-v1",
|
111
|
+
"bedrock/cohere.command-text-v14",
|
112
|
+
"bedrock/ai21.j2-mid-v1",
|
113
|
+
"bedrock/ai21.j2-ultra-v1",
|
114
|
+
"bedrock/ai21.jamba-instruct-v1:0",
|
115
|
+
"bedrock/meta.llama2-13b-chat-v1",
|
116
|
+
"bedrock/meta.llama2-70b-chat-v1",
|
117
|
+
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
118
|
+
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
119
|
+
],
|
80
120
|
}
|
81
121
|
|
82
122
|
|
83
|
-
|
84
|
-
|
85
|
-
"
|
86
|
-
"gemini",
|
87
|
-
"
|
88
|
-
"
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
123
|
+
|
124
|
+
KEYS = {
|
125
|
+
"openai": ["OPENAI_API_KEY"],
|
126
|
+
"gemini": ["GEMINI_API_KEY"],
|
127
|
+
"sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
|
128
|
+
"anthropic": ["ANTHROPIC_API_KEY"],
|
129
|
+
}
|
130
|
+
|
131
|
+
|
132
|
+
"""
|
133
|
+
Use base_url to specify
|
134
|
+
"""
|
135
|
+
BASE_URLS = {
|
136
|
+
"deepseek": "https://api.deepseek.com"
|
137
|
+
}
|
94
138
|
|
95
139
|
|
96
140
|
"""
|
@@ -118,6 +162,8 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
118
162
|
"claude-3-haiku-20240307": 200000,
|
119
163
|
|
120
164
|
"deepseek-chat": 128000,
|
165
|
+
"deepseek/deepseek-reasoner": 8192,
|
166
|
+
|
121
167
|
"gemma2-9b-it": 8192,
|
122
168
|
"gemma-7b-it": 8192,
|
123
169
|
"llama3-groq-70b-8192-tool-use-preview": 8192,
|
@@ -135,11 +181,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
135
181
|
}
|
136
182
|
|
137
183
|
|
138
|
-
|
139
|
-
"openai": "OPENAI_API_KEY",
|
140
|
-
"anthropic": "ANTHROPIC_API_KEY",
|
141
|
-
"gemini": "GEMINI_API_KEY",
|
142
|
-
}
|
184
|
+
|
143
185
|
|
144
186
|
LLM_BASE_URL_KEY_NAMES = {
|
145
187
|
"openai": "OPENAI_API_BASE",
|
@@ -262,14 +304,8 @@ PARAMS = {
|
|
262
304
|
],
|
263
305
|
"openai": [
|
264
306
|
"timeout",
|
265
|
-
# "temperature",
|
266
|
-
# "top_p",
|
267
|
-
# "n",
|
268
|
-
# "stream",
|
269
307
|
"stream_options",
|
270
|
-
# "stop",
|
271
308
|
"max_compl,etion_tokens",
|
272
|
-
# "max_tokens",
|
273
309
|
"modalities",
|
274
310
|
"prediction",
|
275
311
|
"audio",
|
@@ -277,10 +313,7 @@ PARAMS = {
|
|
277
313
|
"frequency_penalty",
|
278
314
|
"logit_bias",
|
279
315
|
"user",
|
280
|
-
# "response_format",
|
281
316
|
"seed",
|
282
|
-
# "tools",
|
283
|
-
# "tool_choice",
|
284
317
|
"logprobs",
|
285
318
|
"top_logprobs",
|
286
319
|
"parallel_tool_calls",
|
versionhq/llm/model.py
CHANGED
@@ -4,23 +4,16 @@ import os
|
|
4
4
|
import sys
|
5
5
|
import threading
|
6
6
|
import warnings
|
7
|
-
import litellm
|
8
|
-
from litellm import JSONSchemaValidationError
|
9
|
-
from abc import ABC
|
10
7
|
from dotenv import load_dotenv
|
11
|
-
|
8
|
+
import litellm
|
9
|
+
from litellm import get_supported_openai_params, JSONSchemaValidationError
|
12
10
|
from contextlib import contextmanager
|
13
|
-
from typing import Any, Dict, List, Optional
|
11
|
+
from typing import Any, Dict, List, Optional
|
14
12
|
from typing_extensions import Self
|
15
|
-
|
16
|
-
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
|
13
|
+
from pydantic import BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
|
17
14
|
from pydantic_core import PydanticCustomError
|
18
15
|
|
19
|
-
from
|
20
|
-
|
21
|
-
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, PARAMS, SchemaType
|
22
|
-
from versionhq.task import TaskOutputFormat
|
23
|
-
from versionhq.task.model import ResponseField, Task
|
16
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS, PROVIDERS, ENDPOINT_PROVIDERS
|
24
17
|
from versionhq.tool.model import Tool, ToolSet
|
25
18
|
from versionhq._utils.logger import Logger
|
26
19
|
|
@@ -29,10 +22,11 @@ load_dotenv(override=True)
|
|
29
22
|
LITELLM_API_KEY = os.environ.get("LITELLM_API_KEY")
|
30
23
|
LITELLM_API_BASE = os.environ.get("LITELLM_API_BASE")
|
31
24
|
DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
|
32
|
-
DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME")
|
25
|
+
DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME", "gpt-4o-mini")
|
26
|
+
DEFAULT_MODEL_PROVIDER_NAME = os.environ.get("DEFAULT_MODEL_PROVIDER_NAME", "openai")
|
33
27
|
|
34
|
-
proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
|
35
|
-
openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
28
|
+
# proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
|
29
|
+
# openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
36
30
|
|
37
31
|
|
38
32
|
class FilteredStream:
|
@@ -74,10 +68,7 @@ def suppress_warnings():
|
|
74
68
|
|
75
69
|
class LLM(BaseModel):
|
76
70
|
"""
|
77
|
-
An LLM class to store params
|
78
|
-
Use LiteLLM to connect with the model of choice.
|
79
|
-
Some optional params are passed by the agent, else follow the default settings of the model provider.
|
80
|
-
Ref. https://docs.litellm.ai/docs/completion/input
|
71
|
+
An LLM class to store params to send to the LLM. Use LiteLLM or custom providers for the endpoint.
|
81
72
|
"""
|
82
73
|
|
83
74
|
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=True))
|
@@ -85,10 +76,11 @@ class LLM(BaseModel):
|
|
85
76
|
_tokens: int = PrivateAttr(default=0) # accumulate total tokens used for the call
|
86
77
|
model_config = ConfigDict(extra="allow")
|
87
78
|
|
88
|
-
model: str = Field(default=
|
89
|
-
provider: Optional[str] = Field(default=None, description="model provider
|
90
|
-
|
91
|
-
|
79
|
+
model: str = Field(default=None)
|
80
|
+
provider: Optional[str] = Field(default=None, description="model provider")
|
81
|
+
endpoint_provider: Optional[str] = Field(default=None, description="custom endpoint provider for pass through llm call. must need base_url")
|
82
|
+
base_url: Optional[str] = Field(default=None, description="api base url for endpoint provider")
|
83
|
+
api_key: Optional[str] = Field(default=None, description="api key to access the model")
|
92
84
|
|
93
85
|
# optional params
|
94
86
|
timeout: Optional[float | int] = Field(default=None)
|
@@ -121,55 +113,90 @@ class LLM(BaseModel):
|
|
121
113
|
litellm.set_verbose = True
|
122
114
|
os.environ['LITELLM_LOG'] = 'DEBUG'
|
123
115
|
|
116
|
+
|
124
117
|
@model_validator(mode="after")
|
125
|
-
def
|
118
|
+
def validate_model_providers(self) -> Self:
|
126
119
|
"""
|
127
|
-
|
128
|
-
* Assign a default model and provider based on the given information when no model key is found in the MODEL list.
|
129
|
-
|
130
|
-
2) Set up other base parameters for the model and LiteLLM.
|
120
|
+
Validate the given model, provider, interface provider.
|
131
121
|
"""
|
132
122
|
|
133
|
-
|
134
|
-
self._logger.log(level="error", message="Model name is missing.", color="red")
|
135
|
-
raise PydanticCustomError("model_missing", "The model name must be provided.", {})
|
123
|
+
self._init_model_name = self.model
|
136
124
|
|
125
|
+
if self.model is None and self.provider is None:
|
126
|
+
self.model = DEFAULT_MODEL_NAME
|
127
|
+
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
137
128
|
|
138
|
-
self.
|
139
|
-
|
140
|
-
|
129
|
+
elif self.model is None and self.provider:
|
130
|
+
if self.provider not in PROVIDERS:
|
131
|
+
self._logger.log(level="warning", message=f"Invalid model provider is provided. We will assign a default model.", color="yellow")
|
132
|
+
self.model = DEFAULT_MODEL_NAME
|
133
|
+
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
141
134
|
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
135
|
+
else:
|
136
|
+
provider_model_list = MODELS.get(self.provider)
|
137
|
+
if provider_model_list:
|
138
|
+
self.model = provider_model_list[0]
|
139
|
+
self.provider = self.provider
|
140
|
+
else:
|
141
|
+
self._logger.log(level="warning", message=f"This provider has not models to be called. We will assign a default model.", color="yellow")
|
142
|
+
self.model = DEFAULT_MODEL_NAME
|
143
|
+
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
144
|
+
|
145
|
+
elif self.model and self.provider is None:
|
146
|
+
model_match = [
|
147
|
+
item for item in [
|
148
|
+
[val for val in v if val == self.model][0] for k, v in MODELS.items() if [val for val in v if val == self.model]
|
149
|
+
] if item
|
150
|
+
]
|
151
|
+
model_partial_match = [
|
152
|
+
item for item in [
|
153
|
+
[val for val in v if val.find(self.model) != -1][0] for k, v in MODELS.items() if [val for val in v if val.find(self.model) != -1]
|
154
|
+
] if item
|
155
|
+
]
|
156
|
+
provider_match = [k for k, v in MODELS.items() if k == self.model]
|
157
|
+
|
158
|
+
if model_match:
|
159
|
+
self.model = model_match[0]
|
160
|
+
self.provider = [k for k, v in MODELS.items() if self.model in v][0]
|
161
|
+
|
162
|
+
elif model_partial_match:
|
163
|
+
self.model = model_partial_match[0]
|
164
|
+
self.provider = [k for k, v in MODELS.items() if [item for item in v if item.find(self.model) != -1]][0]
|
165
|
+
|
166
|
+
elif provider_match:
|
167
|
+
provider = provider_match[0]
|
168
|
+
if self.MODELS.get(provider):
|
169
|
+
self.provider = provider
|
170
|
+
self.model = self.MODELS.get(provider)[0]
|
171
|
+
else:
|
172
|
+
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
173
|
+
self.model = DEFAULT_MODEL_NAME
|
174
|
+
|
175
|
+
else:
|
176
|
+
self.model = DEFAULT_MODEL_NAME
|
177
|
+
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
154
178
|
|
155
179
|
else:
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
if self._init_model_name == item:
|
160
|
-
self.model = item
|
161
|
-
self.provider = k
|
162
|
-
|
163
|
-
elif self.model is None and self._init_model_name in item:
|
164
|
-
self.model = item
|
165
|
-
self.provider = k
|
166
|
-
|
167
|
-
if self.model is None:
|
168
|
-
self._logger.log(level="warning", message=f"The provided model \'{self.model}\' is not in the list. We'll assign a default model.", color="yellow")
|
180
|
+
provider_model_list = MODELS.get(self.provider)
|
181
|
+
if self.model not in provider_model_list:
|
182
|
+
self._logger.log(level="warning", message=f"The provided model: {self._init_model_name} is not in the list. We will assign a default model.", color="yellow")
|
169
183
|
self.model = DEFAULT_MODEL_NAME
|
170
|
-
self.provider =
|
184
|
+
self.provider = DEFAULT_MODEL_PROVIDER_NAME
|
185
|
+
|
186
|
+
# trigger pass-through custom endpoint.
|
187
|
+
if self.provider in ENDPOINT_PROVIDERS:
|
188
|
+
self.endpoint_provider = self.provider
|
189
|
+
|
190
|
+
return self
|
171
191
|
|
172
192
|
|
193
|
+
@model_validator(mode="after")
|
194
|
+
def validate_model_params(self) -> Self:
|
195
|
+
"""
|
196
|
+
After setting up a valid model, provider, interface provider, add params to the model.
|
197
|
+
"""
|
198
|
+
self._tokens = 0
|
199
|
+
|
173
200
|
if self.callbacks:
|
174
201
|
self._set_callbacks(self.callbacks)
|
175
202
|
|
@@ -179,7 +206,9 @@ class LLM(BaseModel):
|
|
179
206
|
if api_key_name:
|
180
207
|
self.api_key = os.environ.get(api_key_name, None)
|
181
208
|
|
182
|
-
|
209
|
+
|
210
|
+
base_url_key_name = self.endpoint_provider.upper() + "_API_BASE" if self.endpoint_provider else None
|
211
|
+
|
183
212
|
if base_url_key_name:
|
184
213
|
self.base_url = os.environ.get(base_url_key_name)
|
185
214
|
self.api_base = self.base_url
|
@@ -190,11 +219,8 @@ class LLM(BaseModel):
|
|
190
219
|
def _create_valid_params(self, config: Dict[str, Any], provider: str = None) -> Dict[str, Any]:
|
191
220
|
params = dict()
|
192
221
|
valid_keys = list()
|
193
|
-
|
194
|
-
if
|
195
|
-
valid_keys = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(self.provider) if self.provider else PARAMS.get("litellm") + PARAMS.get("common")
|
196
|
-
else:
|
197
|
-
valid_keys = PARAMS.get("common") + PARAMS.get(self.provider)
|
222
|
+
provider = provider if provider else self.provider if self.provider else None
|
223
|
+
valid_keys = PARAMS.get("litellm") + PARAMS.get("common") + PARAMS.get(provider) if provider and PARAMS.get(provider) else PARAMS.get("litellm") + PARAMS.get("common")
|
198
224
|
|
199
225
|
for item in valid_keys:
|
200
226
|
if hasattr(self, item) and getattr(self, item):
|
@@ -224,7 +250,7 @@ class LLM(BaseModel):
|
|
224
250
|
self._set_callbacks(self.callbacks) # passed by agent
|
225
251
|
|
226
252
|
try:
|
227
|
-
provider = self.provider if self.provider else
|
253
|
+
provider = self.provider if self.provider else DEFAULT_MODEL_PROVIDER_NAME
|
228
254
|
self.response_format = { "type": "json_object" } if tool_res_as_final == True else response_format
|
229
255
|
|
230
256
|
if not tools:
|
@@ -236,51 +262,51 @@ class LLM(BaseModel):
|
|
236
262
|
else:
|
237
263
|
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
238
264
|
|
239
|
-
if provider == "openai":
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
265
|
+
# if provider == "openai":
|
266
|
+
params = self._create_valid_params(config=config, provider=provider)
|
267
|
+
res = litellm.completion(messages=messages, model=self.model, tools=self.tools)
|
268
|
+
tool_calls = res.choices[0].message.tool_calls
|
269
|
+
tool_res = ""
|
270
|
+
|
271
|
+
for item in tool_calls:
|
272
|
+
func_name = item.function.name
|
273
|
+
func_args = item.function.arguments
|
274
|
+
|
275
|
+
if not isinstance(func_args, dict):
|
276
|
+
try:
|
277
|
+
func_args = json.loads(json.dumps(eval(str(func_args))))
|
278
|
+
except:
|
279
|
+
pass
|
280
|
+
|
281
|
+
for tool in tools:
|
282
|
+
if isinstance(tool, ToolSet) and (tool.tool.name == func_name or tool.tool.func.__name__ == func_name or func_name == "random_func"):
|
283
|
+
tool_instance = tool.tool
|
284
|
+
args = tool.kwargs
|
285
|
+
tool_res_to_add = tool_instance.run(params=args)
|
286
|
+
|
287
|
+
if tool_res_as_final:
|
288
|
+
tool_res += str(tool_res_to_add)
|
289
|
+
else:
|
290
|
+
messages.append(res.choices[0].message)
|
291
|
+
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
248
292
|
|
249
|
-
|
293
|
+
else:
|
250
294
|
try:
|
251
|
-
|
252
|
-
except:
|
253
|
-
pass
|
254
|
-
|
255
|
-
for tool in tools:
|
256
|
-
if isinstance(tool, ToolSet) and (tool.tool.name == func_name or tool.tool.func.__name__ == func_name or func_name == "random_func"):
|
257
|
-
tool_instance = tool.tool
|
258
|
-
args = tool.kwargs
|
259
|
-
tool_res_to_add = tool_instance.run(params=args)
|
260
|
-
|
295
|
+
tool_res_to_add = tool.run(params=func_args)
|
261
296
|
if tool_res_as_final:
|
262
297
|
tool_res += str(tool_res_to_add)
|
263
298
|
else:
|
264
299
|
messages.append(res.choices[0].message)
|
265
300
|
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
301
|
+
except:
|
302
|
+
pass
|
266
303
|
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
messages.append(res.choices[0].message)
|
274
|
-
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
275
|
-
except:
|
276
|
-
pass
|
277
|
-
|
278
|
-
if tool_res_as_final:
|
279
|
-
return tool_res
|
280
|
-
else:
|
281
|
-
res = openai_client.chat.completions.create(messages=messages, model=self.model, tools=self.tools)
|
282
|
-
self._tokens += int(res["usage"]["total_tokens"])
|
283
|
-
return res.choices[0].message.content
|
304
|
+
if tool_res_as_final:
|
305
|
+
return tool_res
|
306
|
+
else:
|
307
|
+
res = litellm.completione(messages=messages, model=self.model, tools=self.tools)
|
308
|
+
self._tokens += int(res["usage"]["total_tokens"])
|
309
|
+
return res.choices[0].message.content
|
284
310
|
|
285
311
|
except JSONSchemaValidationError as e:
|
286
312
|
self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
|
versionhq/task/model.py
CHANGED
@@ -412,6 +412,8 @@ Ref. Output image: {output_formats_to_follow}
|
|
412
412
|
|
413
413
|
response_format: Dict[str, Any] = None
|
414
414
|
|
415
|
+
# match model_provider:
|
416
|
+
# case "openai":
|
415
417
|
if self.response_fields:
|
416
418
|
properties, required_fields = {}, []
|
417
419
|
for i, item in enumerate(self.response_fields):
|
@@ -439,6 +441,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
439
441
|
elif self.pydantic_output:
|
440
442
|
response_format = StructuredOutput(response_format=self.pydantic_output)._format()
|
441
443
|
|
444
|
+
# case "gemini":
|
442
445
|
return response_format
|
443
446
|
|
444
447
|
|
@@ -636,7 +639,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
636
639
|
|
637
640
|
if self.tool_res_as_final == True:
|
638
641
|
tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
639
|
-
task_output = TaskOutput(task_id=self.id, tool_output=tool_output)
|
642
|
+
task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=str(tool_output) if tool_output else "")
|
640
643
|
|
641
644
|
else:
|
642
645
|
raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
@@ -82,12 +82,13 @@ class StructuredList:
|
|
82
82
|
|
83
83
|
if nested_object_type == dict:
|
84
84
|
props.update({
|
85
|
-
"nest": {
|
85
|
+
# "nest": {
|
86
86
|
"type": "object",
|
87
87
|
"properties": { "item": { "type": "string"} }, #! REFINEME - field title <>`item`
|
88
88
|
"required": ["item",],
|
89
89
|
"additionalProperties": False
|
90
|
-
}
|
90
|
+
# }
|
91
|
+
})
|
91
92
|
|
92
93
|
elif nested_object_type == list:
|
93
94
|
props.update({
|
@@ -110,7 +111,7 @@ class StructuredList:
|
|
110
111
|
|
111
112
|
|
112
113
|
class StructuredOutput(BaseModel):
|
113
|
-
response_format: Any = None
|
114
|
+
response_format: Any = None # pydantic base model
|
114
115
|
provider: str = "openai"
|
115
116
|
applicable_models: List[InstanceOf[LLM] | str] = list()
|
116
117
|
name: str = ""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.11.
|
3
|
+
Version: 1.1.11.8
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -319,8 +319,10 @@ src/
|
|
319
319
|
pyenv install 3.12.8
|
320
320
|
pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
|
321
321
|
uv python pin 3.12.8
|
322
|
+
echo 3.12.8 > .python-version
|
322
323
|
```
|
323
324
|
|
325
|
+
|
324
326
|
3. Set up environment variables:
|
325
327
|
Create a `.env` file in the project root and add the following:
|
326
328
|
```
|
@@ -1,4 +1,4 @@
|
|
1
|
-
versionhq/__init__.py,sha256=
|
1
|
+
versionhq/__init__.py,sha256=9QPw8-DjsW5Z2vOHQUBb-AMSyIR2RFcFkR42aXVbUFc,863
|
2
2
|
versionhq/_utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
3
|
versionhq/_utils/i18n.py,sha256=TwA_PnYfDLA6VqlUDPuybdV9lgi3Frh_ASsb_X8jJo8,1483
|
4
4
|
versionhq/_utils/logger.py,sha256=U-MpeGueA6YS8Ptfy0VnU_ePsZP-8Pvkvi0tZ4s_UMg,1438
|
@@ -7,7 +7,7 @@ versionhq/_utils/usage_metrics.py,sha256=hhq1OCW8Z4V93vwW2O2j528EyjOlF8wlTsX5IL-
|
|
7
7
|
versionhq/_utils/vars.py,sha256=bZ5Dx_bFKlt3hi4-NNGXqdk7B23If_WaTIju2fiTyPQ,57
|
8
8
|
versionhq/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
9
|
versionhq/agent/default_agents.py,sha256=Sea3xDswxxMccer1vVDhp1E5etXW3ddf2n20JTMHgqs,503
|
10
|
-
versionhq/agent/model.py,sha256=
|
10
|
+
versionhq/agent/model.py,sha256=U6kz8EE4tJYk9HBg4qeB2_-297ROEORxP_gE6C89aH0,22232
|
11
11
|
versionhq/agent/parser.py,sha256=riG0dkdQCxH7uJ0AbdVdg7WvL0BXhUgJht0VtQvxJBc,4082
|
12
12
|
versionhq/agent/rpm_controller.py,sha256=7AKIEPbWBq_ESOZCaiKVOGjfSPHd2qwg6-wbBlhqC0g,2367
|
13
13
|
versionhq/agent/TEMPLATES/Backstory.py,sha256=IAhGnnt6VUMe3wO6IzeyZPDNu7XE7Uiu3VEXUreOcKs,532
|
@@ -25,11 +25,11 @@ versionhq/knowledge/_utils.py,sha256=YWRF8U533cfZes_gZqUvdj-K24MD2ri1R0gjc_aPYyc
|
|
25
25
|
versionhq/knowledge/embedding.py,sha256=KfHc__1THxb5jrg1EMrF-v944RDuIr2hE0l-MtM3Bp0,6826
|
26
26
|
versionhq/knowledge/model.py,sha256=n7kU4jQ24BUIxwosSVRK8tYhAFYhgc4yf7e4Q-bq4bk,1832
|
27
27
|
versionhq/knowledge/source.py,sha256=WOARChmm_cNtBD-xGo4RoYmcuodzdalctXI-gDBCW6k,13610
|
28
|
-
versionhq/knowledge/source_docling.py,sha256=
|
28
|
+
versionhq/knowledge/source_docling.py,sha256=hhHn3rS4KVsFKEPWcfllM8VxSL86PckZdAHDZNQNOq8,5411
|
29
29
|
versionhq/knowledge/storage.py,sha256=7oxCg3W9mFjYH1YmuH9kFtTbNxquzYFjuUjd_TlsB9E,8170
|
30
30
|
versionhq/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
|
-
versionhq/llm/llm_vars.py,sha256=
|
32
|
-
versionhq/llm/model.py,sha256=
|
31
|
+
versionhq/llm/llm_vars.py,sha256=48IvN6w6h6QJUWqO0A44begGLoUbBaaS-HPO_wp7c84,9588
|
32
|
+
versionhq/llm/model.py,sha256=0qe3oC5u42erVBHc76WRpDKH9PDTzXyaraIuDJ6bDAY,15426
|
33
33
|
versionhq/memory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
34
|
versionhq/memory/contextual_memory.py,sha256=tCsOOAUnfrOL7YiakqGoi3uShzzS870TmGnlGd3z_A4,3556
|
35
35
|
versionhq/memory/model.py,sha256=6Sy-cnrhHNIx3ZN38uNO7d8YywIl_uo_OvDVzVM-w14,5755
|
@@ -44,8 +44,8 @@ versionhq/task/__init__.py,sha256=l2r_g01i91JAGlOoHZP_Gh2WCk6mo9D19lcqt7sKMpQ,18
|
|
44
44
|
versionhq/task/evaluate.py,sha256=RCaFa9N4IibAYLWKUlTn6lWiQoI7t4f_XZVUvecjTxs,3486
|
45
45
|
versionhq/task/formatter.py,sha256=N8Kmk9vtrMtBdgJ8J7RmlKNMdZWSmV8O1bDexmCWgU0,643
|
46
46
|
versionhq/task/log_handler.py,sha256=KJRrcNZgFSKhlNzvtYFnvtp6xukaF1s7ifX9u4zWrN8,1683
|
47
|
-
versionhq/task/model.py,sha256=
|
48
|
-
versionhq/task/structured_response.py,sha256=
|
47
|
+
versionhq/task/model.py,sha256=DCm2jS0RFm4iPkepzKbNgODUdxOtGCV0RCINMaI0c_Q,30162
|
48
|
+
versionhq/task/structured_response.py,sha256=YxuWcDMHcZLzdxI1ihW99Y-i6nl8yXBQ5Q_dFQac8jw,4837
|
49
49
|
versionhq/task/TEMPLATES/Description.py,sha256=bChflSWGGQo9JpnO6QX6Ng9pnONiTf-zwQ3ke4xQgSQ,357
|
50
50
|
versionhq/team/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
51
51
|
versionhq/team/model.py,sha256=NzcRXWwP0adWL9vsnsmI-A5dOcE3199FGmGgemUB2VA,20043
|
@@ -57,8 +57,8 @@ versionhq/tool/composio_tool_vars.py,sha256=FvBuEXsOQUYnN7RTFxT20kAkiEYkxWKkiVtg
|
|
57
57
|
versionhq/tool/decorator.py,sha256=C4ZM7Xi2gwtEMaSeRo-geo_g_MAkY77WkSLkAuY0AyI,1205
|
58
58
|
versionhq/tool/model.py,sha256=7ccEnje_8LuxLVeog6pL38nToArXQXk4KY7A9hfprDo,12239
|
59
59
|
versionhq/tool/tool_handler.py,sha256=2m41K8qo5bGCCbwMFferEjT-XZ-mE9F0mDUOBkgivOI,1416
|
60
|
-
versionhq-1.1.11.
|
61
|
-
versionhq-1.1.11.
|
62
|
-
versionhq-1.1.11.
|
63
|
-
versionhq-1.1.11.
|
64
|
-
versionhq-1.1.11.
|
60
|
+
versionhq-1.1.11.8.dist-info/LICENSE,sha256=7CCXuMrAjPVsUvZrsBq9DsxI2rLDUSYXR_qj4yO_ZII,1077
|
61
|
+
versionhq-1.1.11.8.dist-info/METADATA,sha256=VKscq6wLejKxh7fF2p6V9fbgRveRHbzAjVw5KcBwpTg,18672
|
62
|
+
versionhq-1.1.11.8.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
|
63
|
+
versionhq-1.1.11.8.dist-info/top_level.txt,sha256=DClQwxDWqIUGeRJkA8vBlgeNsYZs4_nJWMonzFt5Wj0,10
|
64
|
+
versionhq-1.1.11.8.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|