versionhq 1.1.11.5__tar.gz → 1.1.11.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/.github/workflows/run_tests.yml +0 -2
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/PKG-INFO +3 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/README.md +2 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/pyproject.toml +1 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/__init__.py +1 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/agent/model.py +42 -11
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/knowledge/source_docling.py +1 -3
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/knowledge/storage.py +12 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/llm/llm_vars.py +76 -58
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/llm/model.py +44 -50
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/task/model.py +4 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/task/structured_response.py +4 -3
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq.egg-info/PKG-INFO +3 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/agent/agent_test.py +12 -3
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/llm/llm_test.py +7 -17
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/task/task_test.py +51 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/uv.lock +2 -1
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/.github/workflows/publish.yml +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/.gitignore +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/.pre-commit-config.yaml +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/.python-version +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/LICENSE +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/SECURITY.md +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/db/preprocess.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/requirements-dev.txt +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/requirements.txt +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/runtime.txt +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/setup.cfg +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/agent/default_agents.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/task/evaluate.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/task/log_handler.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/team/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/team/model.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/team/team_planner.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq.egg-info/SOURCES.txt +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/agent/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/cli/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/clients/customer_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/clients/product_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/conftest.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/llm/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/memory/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/memory/memory_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/task/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/team/Prompts/Demo_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/team/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/team/team_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/tool/__init__.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/tool/composio_test.py +0 -0
- {versionhq-1.1.11.5 → versionhq-1.1.11.7}/tests/tool/tool_test.py +0 -0
@@ -8,13 +8,11 @@ permissions:
|
|
8
8
|
env:
|
9
9
|
LITELLM_API_KEY: ${{ secrets.LITELLM_API_KEY }}
|
10
10
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
11
|
-
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
|
12
11
|
DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
|
13
12
|
DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
|
14
13
|
COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
|
15
14
|
DEFAULT_MODEL_NAME: ${{ secrets.DEFAULT_MODEL_NAME }}
|
16
15
|
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
17
|
-
GEMINI_API_BASE: ${{ secrets.GEMINI_API_BASE }}
|
18
16
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
19
17
|
ANTHROPIC_API_BASE: ${{ secrets.ANTHROPIC_API_BASE }}
|
20
18
|
MEM0_API_KEY: ${{ secrets.MEM0_API_KEY }}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.11.
|
3
|
+
Version: 1.1.11.7
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -319,8 +319,10 @@ src/
|
|
319
319
|
pyenv install 3.12.8
|
320
320
|
pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
|
321
321
|
uv python pin 3.12.8
|
322
|
+
echo 3.12.8 > .python-version
|
322
323
|
```
|
323
324
|
|
325
|
+
|
324
326
|
3. Set up environment variables:
|
325
327
|
Create a `.env` file in the project root and add the following:
|
326
328
|
```
|
@@ -243,8 +243,10 @@ src/
|
|
243
243
|
pyenv install 3.12.8
|
244
244
|
pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
|
245
245
|
uv python pin 3.12.8
|
246
|
+
echo 3.12.8 > .python-version
|
246
247
|
```
|
247
248
|
|
249
|
+
|
248
250
|
3. Set up environment variables:
|
249
251
|
Create a `.env` file in the project root and add the following:
|
250
252
|
```
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.1.11.
|
18
|
+
version = "1.1.11.7"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
|
21
21
|
readme = "README.md"
|
@@ -99,7 +99,7 @@ class Agent(BaseModel):
|
|
99
99
|
tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool] | Any]] = Field(default_factory=list)
|
100
100
|
|
101
101
|
# knowledge
|
102
|
-
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(default=None)
|
102
|
+
knowledge_sources: Optional[List[BaseKnowledgeSource | Any]] = Field(default=None)
|
103
103
|
_knowledge: Optional[Knowledge] = PrivateAttr(default=None)
|
104
104
|
|
105
105
|
# memory
|
@@ -344,14 +344,46 @@ class Agent(BaseModel):
|
|
344
344
|
|
345
345
|
@model_validator(mode="after")
|
346
346
|
def set_up_knowledge(self) -> Self:
|
347
|
-
|
348
|
-
|
347
|
+
from versionhq.knowledge.source import BaseKnowledgeSource, StringKnowledgeSource, TextFileKnowledgeSource, CSVKnowledgeSource, ExcelKnowledgeSource, JSONKnowledgeSource
|
348
|
+
from versionhq.knowledge.source_docling import DoclingSource
|
349
349
|
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
350
|
+
if self.knowledge_sources:
|
351
|
+
try:
|
352
|
+
collection_name = f"{self.role.replace(' ', '_')}"
|
353
|
+
knowledge_sources = []
|
354
|
+
docling_fp, txt_fp, json_fp, excel_fp, csv_fp, pdf_fp = [], [], [], [], [], []
|
355
|
+
str_cont = ""
|
356
|
+
|
357
|
+
for item in self.knowledge_sources:
|
358
|
+
if isinstance(item, BaseKnowledgeSource):
|
359
|
+
knowledge_sources.append(item)
|
360
|
+
|
361
|
+
elif isinstance(item, str) and "http" in item:
|
362
|
+
docling_fp.append(item)
|
363
|
+
|
364
|
+
elif isinstance(item, str):
|
365
|
+
match os.path.splitext(item)[1]:
|
366
|
+
case ".txt": txt_fp.append(item)
|
367
|
+
case ".json": json_fp.append(item)
|
368
|
+
case ".xls" | ".xlsx": excel_fp.append(item)
|
369
|
+
case ".pdf": pdf_fp.append(item)
|
370
|
+
case ".csv": csv_fp.append(item)
|
371
|
+
case _: str_cont += str(item)
|
372
|
+
|
373
|
+
else:
|
374
|
+
str_cont += str(item)
|
375
|
+
|
376
|
+
if docling_fp: knowledge_sources.append(DoclingSource(file_paths=docling_fp))
|
377
|
+
if str_cont: knowledge_sources.append(StringKnowledgeSource(content=str_cont))
|
378
|
+
if txt_fp: knowledge_sources.append(TextFileKnowledgeSource(file_paths=txt_fp))
|
379
|
+
if csv_fp: knowledge_sources.append(CSVKnowledgeSource(file_path=csv_fp))
|
380
|
+
if excel_fp: knowledge_sources.append(ExcelKnowledgeSource(file_path=excel_fp))
|
381
|
+
if json_fp: knowledge_sources.append(JSONKnowledgeSource(file_paths=json_fp))
|
382
|
+
|
383
|
+
self._knowledge = Knowledge(sources=knowledge_sources, embedder_config=self.embedder_config, collection_name=collection_name)
|
384
|
+
|
385
|
+
except:
|
386
|
+
self._logger.log(level="warning", message="We cannot find the format for the source. Add BaseKnowledgeSource objects instead.", color="yellow")
|
355
387
|
|
356
388
|
return self
|
357
389
|
|
@@ -414,7 +446,7 @@ class Agent(BaseModel):
|
|
414
446
|
self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
415
447
|
|
416
448
|
if tool_res_as_final:
|
417
|
-
func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
|
449
|
+
func_llm = self.function_calling_llm if self.function_calling_llm and self.function_calling_llm._supports_function_calling() else self.llm if self.llm and self.llm._supports_function_calling() else LLM(model=DEFAULT_MODEL_NAME)
|
418
450
|
raw_response = func_llm.call(messages=messages, tools=tools, tool_res_as_final=True)
|
419
451
|
task.tokens = func_llm._tokens
|
420
452
|
else:
|
@@ -458,7 +490,7 @@ class Agent(BaseModel):
|
|
458
490
|
from versionhq.knowledge._utils import extract_knowledge_context
|
459
491
|
|
460
492
|
task: InstanceOf[Task] = task
|
461
|
-
tools: Optional[List[InstanceOf[Tool
|
493
|
+
tools: Optional[List[InstanceOf[Tool | ToolSet] | Type[Tool]]] = task_tools + self.tools if task.can_use_agent_tools else task_tools
|
462
494
|
|
463
495
|
if self.max_rpm and self._rpm_controller:
|
464
496
|
self._rpm_controller._reset_request_count()
|
@@ -474,7 +506,6 @@ class Agent(BaseModel):
|
|
474
506
|
if agent_knowledge_context:
|
475
507
|
task_prompt += agent_knowledge_context
|
476
508
|
|
477
|
-
|
478
509
|
if self.use_memory == True:
|
479
510
|
contextual_memory = ContextualMemory(
|
480
511
|
memory_config=self.memory_config, stm=self.short_term_memory, ltm=self.long_term_memory, um=self.user_memory
|
@@ -3,7 +3,6 @@ from typing import Iterator, List, Optional
|
|
3
3
|
from urllib.parse import urlparse
|
4
4
|
|
5
5
|
try:
|
6
|
-
import docling
|
7
6
|
from docling.datamodel.base_models import InputFormat
|
8
7
|
from docling.document_converter import DocumentConverter
|
9
8
|
from docling.exceptions import ConversionError
|
@@ -12,9 +11,8 @@ try:
|
|
12
11
|
DOCLING_AVAILABLE = True
|
13
12
|
except ImportError:
|
14
13
|
import envoy
|
15
|
-
|
14
|
+
envoy.run("uv add docling --optional docling")
|
16
15
|
|
17
|
-
import docling
|
18
16
|
from docling.datamodel.base_models import InputFormat
|
19
17
|
from docling.document_converter import DocumentConverter
|
20
18
|
from docling.exceptions import ConversionError
|
@@ -73,11 +73,21 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|
73
73
|
|
74
74
|
|
75
75
|
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None, collection_name: Optional[str] = None):
|
76
|
-
self.collection_name = collection_name if collection_name else "knowledge"
|
76
|
+
self.collection_name = self._validate_collection_name(collection_name) if collection_name else "knowledge"
|
77
77
|
self.embedder_config = embedder_config
|
78
78
|
self.initialize_knowledge_storage()
|
79
79
|
|
80
80
|
|
81
|
+
def _validate_collection_name(self, collection_name: str = None) -> str:
|
82
|
+
"""
|
83
|
+
Return a valid collection name from the given collection name.
|
84
|
+
Expected collection name (1) contains 3-63 characters, (2) starts and ends with an alphanumeric character, (3) otherwise contains only alphanumeric characters, underscores or hyphens (-), (4) contains no two consecutive periods (..) and (5) is not a valid IPv4 address.
|
85
|
+
"""
|
86
|
+
collection_name = collection_name if collection_name else self.collection_name
|
87
|
+
valid_collection_name = collection_name.replace(' ', "-").replace("(", "-").replace(")", "").replace("..", "")
|
88
|
+
return valid_collection_name
|
89
|
+
|
90
|
+
|
81
91
|
def _create_default_embedding_function(self) -> Any:
|
82
92
|
from chromadb.utils.embedding_functions.openai_embedding_function import OpenAIEmbeddingFunction
|
83
93
|
|
@@ -101,6 +111,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
|
101
111
|
chroma_client = chromadb.PersistentClient(path=base_path, settings=Settings(allow_reset=True))
|
102
112
|
self.app = chroma_client
|
103
113
|
self._set_embedding_function(embedder_config=self.embedder_config)
|
114
|
+
self.collection_name = self.collection_name if self.collection_name else "knowledge"
|
104
115
|
|
105
116
|
try:
|
106
117
|
if self.app:
|
@@ -3,6 +3,20 @@ from typing import Type
|
|
3
3
|
|
4
4
|
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
5
5
|
|
6
|
+
PROVIDERS = [
|
7
|
+
"openai",
|
8
|
+
"gemini",
|
9
|
+
"sagemaker",
|
10
|
+
|
11
|
+
"anthropic",
|
12
|
+
"ollama",
|
13
|
+
"watson",
|
14
|
+
"bedrock",
|
15
|
+
"azure",
|
16
|
+
"cerebras",
|
17
|
+
"llama",
|
18
|
+
]
|
19
|
+
|
6
20
|
|
7
21
|
"""
|
8
22
|
List of models available on the framework.
|
@@ -16,7 +30,6 @@ litellm.pick_cheapest_chat_models_from_llm_provider(custom_llm_provider: str, n=
|
|
16
30
|
|
17
31
|
MODELS = {
|
18
32
|
"openai": [
|
19
|
-
# "gpt-3.5-turbo",
|
20
33
|
"gpt-4",
|
21
34
|
"gpt-4o",
|
22
35
|
"gpt-4o-mini",
|
@@ -27,11 +40,7 @@ MODELS = {
|
|
27
40
|
"gemini/gemini-1.5-flash",
|
28
41
|
"gemini/gemini-1.5-pro",
|
29
42
|
"gemini/gemini-2.0-flash-exp",
|
30
|
-
# "gemini/gemini-gemma-2-9b-it",
|
31
|
-
# "gemini/gemini-gemma-2-27b-it",
|
32
43
|
],
|
33
|
-
# "vetrex_ai": [
|
34
|
-
# ],
|
35
44
|
"anthropic": [
|
36
45
|
"claude-3-5-sonnet-20241022",
|
37
46
|
"claude-3-5-sonnet-20240620",
|
@@ -39,10 +48,26 @@ MODELS = {
|
|
39
48
|
"claude-3-opus-20240229",
|
40
49
|
"claude-3-haiku-20240307",
|
41
50
|
],
|
42
|
-
# "
|
43
|
-
#
|
44
|
-
#
|
45
|
-
#
|
51
|
+
# "sagemaker": [
|
52
|
+
# "sagemaker/huggingface-text2text-flan-t5-base",
|
53
|
+
# "sagemaker/huggingface-llm-gemma-7b",
|
54
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b",
|
55
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b",
|
56
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-8b",
|
57
|
+
# "sagemaker/jumpstart-dft-meta-textgeneration-llama-3-70b",
|
58
|
+
# "sagemaker/huggingface-llm-mistral-7b"
|
59
|
+
# ], #https://docs.aws.amazon.com/sagemaker/latest/dg/jumpstart-foundation-models-latest.html
|
60
|
+
"ollama": [
|
61
|
+
"ollama/llama3.1",
|
62
|
+
"ollama/mixtral",
|
63
|
+
"ollama/mixtral-8x22B-Instruct-v0.1",
|
64
|
+
|
65
|
+
],
|
66
|
+
"deepseek": [
|
67
|
+
"deepseek/deepseek-reasoner",
|
68
|
+
|
69
|
+
],
|
70
|
+
|
46
71
|
# "watson": [
|
47
72
|
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
48
73
|
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
@@ -53,44 +78,48 @@ MODELS = {
|
|
53
78
|
# "watsonx/mistral/mistral-large",
|
54
79
|
# "watsonx/ibm/granite-3-8b-instruct",
|
55
80
|
# ],
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
81
|
+
"bedrock": [
|
82
|
+
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
83
|
+
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
84
|
+
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
85
|
+
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
86
|
+
# "bedrock/anthropic.claude-v2:1",
|
87
|
+
"bedrock/anthropic.claude-v2",
|
88
|
+
"bedrock/anthropic.claude-instant-v1",
|
89
|
+
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
90
|
+
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
91
|
+
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
92
|
+
"bedrock/meta.llama3-70b-instruct-v1:0",
|
93
|
+
"bedrock/meta.llama3-8b-instruct-v1:0",
|
94
|
+
"bedrock/amazon.titan-text-lite-v1",
|
95
|
+
"bedrock/amazon.titan-text-express-v1",
|
96
|
+
"bedrock/cohere.command-text-v14",
|
97
|
+
"bedrock/ai21.j2-mid-v1",
|
98
|
+
"bedrock/ai21.j2-ultra-v1",
|
99
|
+
"bedrock/ai21.jamba-instruct-v1:0",
|
100
|
+
"bedrock/meta.llama2-13b-chat-v1",
|
101
|
+
"bedrock/meta.llama2-70b-chat-v1",
|
102
|
+
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
103
|
+
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
104
|
+
],
|
80
105
|
}
|
81
106
|
|
82
107
|
|
83
|
-
|
84
|
-
|
85
|
-
"
|
86
|
-
"gemini",
|
87
|
-
"
|
88
|
-
"
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
108
|
+
|
109
|
+
KEYS = {
|
110
|
+
"openai": ["OPENAI_API_KEY"],
|
111
|
+
"gemini": ["GEMINI_API_KEY"],
|
112
|
+
"sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
|
113
|
+
"anthropic": ["ANTHROPIC_API_KEY"],
|
114
|
+
}
|
115
|
+
|
116
|
+
|
117
|
+
"""
|
118
|
+
Use base_url to specify
|
119
|
+
"""
|
120
|
+
BASE_URLS = {
|
121
|
+
"deepseek": "https://api.deepseek.com"
|
122
|
+
}
|
94
123
|
|
95
124
|
|
96
125
|
"""
|
@@ -118,6 +147,8 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
118
147
|
"claude-3-haiku-20240307": 200000,
|
119
148
|
|
120
149
|
"deepseek-chat": 128000,
|
150
|
+
"deepseek/deepseek-reasoner": 8192,
|
151
|
+
|
121
152
|
"gemma2-9b-it": 8192,
|
122
153
|
"gemma-7b-it": 8192,
|
123
154
|
"llama3-groq-70b-8192-tool-use-preview": 8192,
|
@@ -135,11 +166,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
135
166
|
}
|
136
167
|
|
137
168
|
|
138
|
-
|
139
|
-
"openai": "OPENAI_API_KEY",
|
140
|
-
"anthropic": "ANTHROPIC_API_KEY",
|
141
|
-
"gemini": "GEMINI_API_KEY",
|
142
|
-
}
|
169
|
+
|
143
170
|
|
144
171
|
LLM_BASE_URL_KEY_NAMES = {
|
145
172
|
"openai": "OPENAI_API_BASE",
|
@@ -262,14 +289,8 @@ PARAMS = {
|
|
262
289
|
],
|
263
290
|
"openai": [
|
264
291
|
"timeout",
|
265
|
-
# "temperature",
|
266
|
-
# "top_p",
|
267
|
-
# "n",
|
268
|
-
# "stream",
|
269
292
|
"stream_options",
|
270
|
-
# "stop",
|
271
293
|
"max_compl,etion_tokens",
|
272
|
-
# "max_tokens",
|
273
294
|
"modalities",
|
274
295
|
"prediction",
|
275
296
|
"audio",
|
@@ -277,10 +298,7 @@ PARAMS = {
|
|
277
298
|
"frequency_penalty",
|
278
299
|
"logit_bias",
|
279
300
|
"user",
|
280
|
-
# "response_format",
|
281
301
|
"seed",
|
282
|
-
# "tools",
|
283
|
-
# "tool_choice",
|
284
302
|
"logprobs",
|
285
303
|
"top_logprobs",
|
286
304
|
"parallel_tool_calls",
|
@@ -4,23 +4,16 @@ import os
|
|
4
4
|
import sys
|
5
5
|
import threading
|
6
6
|
import warnings
|
7
|
-
import litellm
|
8
|
-
from litellm import JSONSchemaValidationError
|
9
|
-
from abc import ABC
|
10
7
|
from dotenv import load_dotenv
|
11
|
-
|
8
|
+
import litellm
|
9
|
+
from litellm import get_supported_openai_params, JSONSchemaValidationError
|
12
10
|
from contextlib import contextmanager
|
13
|
-
from typing import Any, Dict, List, Optional
|
11
|
+
from typing import Any, Dict, List, Optional
|
14
12
|
from typing_extensions import Self
|
15
|
-
|
16
13
|
from pydantic import UUID4, BaseModel, Field, PrivateAttr, field_validator, model_validator, create_model, InstanceOf, ConfigDict
|
17
14
|
from pydantic_core import PydanticCustomError
|
18
15
|
|
19
|
-
from
|
20
|
-
|
21
|
-
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, LLM_API_KEY_NAMES, LLM_BASE_URL_KEY_NAMES, MODELS, PARAMS, SchemaType
|
22
|
-
from versionhq.task import TaskOutputFormat
|
23
|
-
from versionhq.task.model import ResponseField, Task
|
16
|
+
from versionhq.llm.llm_vars import LLM_CONTEXT_WINDOW_SIZES, MODELS, PARAMS
|
24
17
|
from versionhq.tool.model import Tool, ToolSet
|
25
18
|
from versionhq._utils.logger import Logger
|
26
19
|
|
@@ -31,8 +24,8 @@ LITELLM_API_BASE = os.environ.get("LITELLM_API_BASE")
|
|
31
24
|
DEFAULT_CONTEXT_WINDOW_SIZE = int(8192 * 0.75)
|
32
25
|
DEFAULT_MODEL_NAME = os.environ.get("DEFAULT_MODEL_NAME")
|
33
26
|
|
34
|
-
proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
|
35
|
-
openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
27
|
+
# proxy_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"), organization="versionhq", base_url=LITELLM_API_BASE)
|
28
|
+
# openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
|
36
29
|
|
37
30
|
|
38
31
|
class FilteredStream:
|
@@ -179,6 +172,7 @@ class LLM(BaseModel):
|
|
179
172
|
if api_key_name:
|
180
173
|
self.api_key = os.environ.get(api_key_name, None)
|
181
174
|
|
175
|
+
|
182
176
|
base_url_key_name = self.provider.upper() + "_API_BASE" if self.provider else None
|
183
177
|
if base_url_key_name:
|
184
178
|
self.base_url = os.environ.get(base_url_key_name)
|
@@ -236,51 +230,51 @@ class LLM(BaseModel):
|
|
236
230
|
else:
|
237
231
|
self.tools = [item.tool.properties if isinstance(item, ToolSet) else item.properties for item in tools]
|
238
232
|
|
239
|
-
if provider == "openai":
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
233
|
+
# if provider == "openai":
|
234
|
+
params = self._create_valid_params(config=config, provider=provider)
|
235
|
+
res = litellm.completion(messages=messages, model=self.model, tools=self.tools)
|
236
|
+
tool_calls = res.choices[0].message.tool_calls
|
237
|
+
tool_res = ""
|
238
|
+
|
239
|
+
for item in tool_calls:
|
240
|
+
func_name = item.function.name
|
241
|
+
func_args = item.function.arguments
|
242
|
+
|
243
|
+
if not isinstance(func_args, dict):
|
244
|
+
try:
|
245
|
+
func_args = json.loads(json.dumps(eval(str(func_args))))
|
246
|
+
except:
|
247
|
+
pass
|
248
|
+
|
249
|
+
for tool in tools:
|
250
|
+
if isinstance(tool, ToolSet) and (tool.tool.name == func_name or tool.tool.func.__name__ == func_name or func_name == "random_func"):
|
251
|
+
tool_instance = tool.tool
|
252
|
+
args = tool.kwargs
|
253
|
+
tool_res_to_add = tool_instance.run(params=args)
|
254
|
+
|
255
|
+
if tool_res_as_final:
|
256
|
+
tool_res += str(tool_res_to_add)
|
257
|
+
else:
|
258
|
+
messages.append(res.choices[0].message)
|
259
|
+
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
248
260
|
|
249
|
-
|
261
|
+
else:
|
250
262
|
try:
|
251
|
-
|
252
|
-
except:
|
253
|
-
pass
|
254
|
-
|
255
|
-
for tool in tools:
|
256
|
-
if isinstance(tool, ToolSet) and (tool.tool.name == func_name or tool.tool.func.__name__ == func_name or func_name == "random_func"):
|
257
|
-
tool_instance = tool.tool
|
258
|
-
args = tool.kwargs
|
259
|
-
tool_res_to_add = tool_instance.run(params=args)
|
260
|
-
|
263
|
+
tool_res_to_add = tool.run(params=func_args)
|
261
264
|
if tool_res_as_final:
|
262
265
|
tool_res += str(tool_res_to_add)
|
263
266
|
else:
|
264
267
|
messages.append(res.choices[0].message)
|
265
268
|
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
269
|
+
except:
|
270
|
+
pass
|
266
271
|
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
messages.append(res.choices[0].message)
|
274
|
-
messages.append({ "role": "tool", "tool_call_id": item.id, "content": str(tool_res_to_add) })
|
275
|
-
except:
|
276
|
-
pass
|
277
|
-
|
278
|
-
if tool_res_as_final:
|
279
|
-
return tool_res
|
280
|
-
else:
|
281
|
-
res = openai_client.chat.completions.create(messages=messages, model=self.model, tools=self.tools)
|
282
|
-
self._tokens += int(res["usage"]["total_tokens"])
|
283
|
-
return res.choices[0].message.content
|
272
|
+
if tool_res_as_final:
|
273
|
+
return tool_res
|
274
|
+
else:
|
275
|
+
res = litellm.completione(messages=messages, model=self.model, tools=self.tools)
|
276
|
+
self._tokens += int(res["usage"]["total_tokens"])
|
277
|
+
return res.choices[0].message.content
|
284
278
|
|
285
279
|
except JSONSchemaValidationError as e:
|
286
280
|
self._logger.log(level="error", message="Raw Response: {}".format(e.raw_response), color="red")
|
@@ -412,6 +412,8 @@ Ref. Output image: {output_formats_to_follow}
|
|
412
412
|
|
413
413
|
response_format: Dict[str, Any] = None
|
414
414
|
|
415
|
+
# match model_provider:
|
416
|
+
# case "openai":
|
415
417
|
if self.response_fields:
|
416
418
|
properties, required_fields = {}, []
|
417
419
|
for i, item in enumerate(self.response_fields):
|
@@ -439,6 +441,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
439
441
|
elif self.pydantic_output:
|
440
442
|
response_format = StructuredOutput(response_format=self.pydantic_output)._format()
|
441
443
|
|
444
|
+
# case "gemini":
|
442
445
|
return response_format
|
443
446
|
|
444
447
|
|
@@ -636,7 +639,7 @@ Ref. Output image: {output_formats_to_follow}
|
|
636
639
|
|
637
640
|
if self.tool_res_as_final == True:
|
638
641
|
tool_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
639
|
-
task_output = TaskOutput(task_id=self.id, tool_output=tool_output)
|
642
|
+
task_output = TaskOutput(task_id=self.id, tool_output=tool_output, raw=tool_output)
|
640
643
|
|
641
644
|
else:
|
642
645
|
raw_output = agent.execute_task(task=self, context=context, task_tools=task_tools)
|
@@ -82,12 +82,13 @@ class StructuredList:
|
|
82
82
|
|
83
83
|
if nested_object_type == dict:
|
84
84
|
props.update({
|
85
|
-
"nest": {
|
85
|
+
# "nest": {
|
86
86
|
"type": "object",
|
87
87
|
"properties": { "item": { "type": "string"} }, #! REFINEME - field title <>`item`
|
88
88
|
"required": ["item",],
|
89
89
|
"additionalProperties": False
|
90
|
-
}
|
90
|
+
# }
|
91
|
+
})
|
91
92
|
|
92
93
|
elif nested_object_type == list:
|
93
94
|
props.update({
|
@@ -110,7 +111,7 @@ class StructuredList:
|
|
110
111
|
|
111
112
|
|
112
113
|
class StructuredOutput(BaseModel):
|
113
|
-
response_format: Any = None
|
114
|
+
response_format: Any = None # pydantic base model
|
114
115
|
provider: str = "openai"
|
115
116
|
applicable_models: List[InstanceOf[LLM] | str] = list()
|
116
117
|
name: str = ""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.11.
|
3
|
+
Version: 1.1.11.7
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -319,8 +319,10 @@ src/
|
|
319
319
|
pyenv install 3.12.8
|
320
320
|
pyenv global 3.12.8 (optional: `pyenv global system` to get back to the system default ver.)
|
321
321
|
uv python pin 3.12.8
|
322
|
+
echo 3.12.8 > .python-version
|
322
323
|
```
|
323
324
|
|
325
|
+
|
324
326
|
3. Set up environment variables:
|
325
327
|
Create a `.env` file in the project root and add the following:
|
326
328
|
```
|
@@ -231,22 +231,26 @@ def test_agent_custom_max_iterations():
|
|
231
231
|
|
232
232
|
def test_agent_with_knowledge_sources():
|
233
233
|
from versionhq.knowledge.source import StringKnowledgeSource
|
234
|
+
from versionhq.knowledge.source_docling import DoclingSource
|
234
235
|
from versionhq.task.model import Task
|
235
236
|
|
236
237
|
content = "Kuriko's favorite color is gold, and she enjoy Japanese food."
|
237
238
|
string_source = StringKnowledgeSource(content=content)
|
239
|
+
html = "https://github.blog/security/vulnerability-research/cybersecurity-researchers-digital-detectives-in-a-connected-world/"
|
240
|
+
knowledge_sources = [content, string_source, html,]
|
238
241
|
|
239
|
-
agent = Agent(role="Information Agent", goal="Provide information based on knowledge sources", knowledge_sources=
|
242
|
+
agent = Agent(role="Information Agent", goal="Provide information based on knowledge sources", knowledge_sources=knowledge_sources)
|
240
243
|
|
241
244
|
assert agent._knowledge.collection_name == f"{agent.role.replace(' ', '_')}"
|
242
|
-
assert
|
245
|
+
assert [isinstance(item, StringKnowledgeSource | DoclingSource) for item in agent.knowledge_sources]
|
246
|
+
assert agent._knowledge.embedder_config == agent.embedder_config
|
243
247
|
assert agent._knowledge.storage and agent._knowledge.storage.embedding_function and agent._knowledge.storage.app is not None and agent._knowledge.storage.collection_name is not None
|
244
248
|
|
245
249
|
task = Task(description="Answer the following question: What is Kuriko's favorite color?")
|
246
250
|
|
247
251
|
with patch("versionhq.knowledge.storage.KnowledgeStorage") as MockKnowledge:
|
248
252
|
mock_knowledge_instance = MockKnowledge.return_value
|
249
|
-
mock_knowledge_instance.sources
|
253
|
+
isinstance(mock_knowledge_instance.sources[0], StringKnowledgeSource)
|
250
254
|
mock_knowledge_instance.query.return_value = [{ "content": content }]
|
251
255
|
|
252
256
|
res = task.execute_sync(agent=agent)
|
@@ -308,3 +312,8 @@ def test_agent_with_memory_config():
|
|
308
312
|
assert agent_2.short_term_memory.memory_provider == "mem0" and agent_2.short_term_memory.storage.memory_type == "stm"
|
309
313
|
assert agent_2.long_term_memory and isinstance(agent_2.long_term_memory.storage, LTMSQLiteStorage)
|
310
314
|
assert agent_2.user_memory and agent_2.user_memory.storage and agent_2.user_memory.storage.memory_type == "user"
|
315
|
+
|
316
|
+
|
317
|
+
|
318
|
+
if __name__ == "__main__":
|
319
|
+
test_agent_with_knowledge_sources()
|
@@ -7,7 +7,7 @@ from versionhq.llm.llm_vars import MODELS, LLM_CONTEXT_WINDOW_SIZES
|
|
7
7
|
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
|
8
8
|
|
9
9
|
|
10
|
-
def
|
10
|
+
def dummy_func() -> str:
|
11
11
|
return "dummy"
|
12
12
|
|
13
13
|
|
@@ -18,49 +18,39 @@ def test_create_llm_from_valid_name():
|
|
18
18
|
|
19
19
|
for k, v in MODELS.items():
|
20
20
|
for model_name in v:
|
21
|
-
llm = LLM(model=model_name, callbacks=[
|
21
|
+
llm = LLM(model=model_name, callbacks=[dummy_func,])
|
22
22
|
|
23
23
|
assert llm._init_model_name == model_name
|
24
24
|
assert llm.model == model_name
|
25
25
|
assert llm.provider == k
|
26
|
-
assert llm.api_key is not None
|
27
|
-
assert llm.base_url is not None
|
28
26
|
assert llm.context_window_size == int(LLM_CONTEXT_WINDOW_SIZES.get(model_name) *0.75) if LLM_CONTEXT_WINDOW_SIZES.get(model_name) is not None else DEFAULT_CONTEXT_WINDOW_SIZE
|
29
27
|
assert llm._supports_function_calling() is not None
|
30
28
|
assert llm._supports_stop_words() is not None
|
31
|
-
assert litellm.callbacks == [
|
29
|
+
assert litellm.callbacks == [dummy_func,]
|
32
30
|
|
33
31
|
|
34
32
|
def test_create_llm_from_invalid_name():
|
35
33
|
"""
|
36
34
|
Test if all the params will be set properly with a givne invalid model name.
|
37
35
|
"""
|
38
|
-
llm = LLM(model="4o", callbacks=[
|
36
|
+
llm = LLM(model="4o", callbacks=[dummy_func,])
|
39
37
|
|
40
38
|
assert llm._init_model_name == "4o"
|
41
39
|
assert llm.model == "gpt-4o"
|
42
40
|
assert llm.provider == "openai"
|
43
|
-
assert llm.api_key is not None
|
44
|
-
assert llm.base_url is not None
|
45
41
|
assert llm.context_window_size == int(128000 * 0.75)
|
46
42
|
assert llm._supports_function_calling() == True
|
47
43
|
assert llm._supports_stop_words() == True
|
48
|
-
assert litellm.callbacks == [
|
44
|
+
assert litellm.callbacks == [dummy_func,]
|
49
45
|
|
50
46
|
|
51
47
|
def test_create_llm_from_provider():
|
52
|
-
llm = LLM(provider="gemini", callbacks=[
|
48
|
+
llm = LLM(provider="gemini", callbacks=[dummy_func,])
|
53
49
|
|
54
50
|
assert llm._init_model_name == DEFAULT_MODEL_NAME
|
55
51
|
assert llm.model == "gemini/gemini-1.5-flash"
|
56
52
|
assert llm.provider == "gemini"
|
57
|
-
assert llm.api_key is not None
|
58
|
-
assert llm.base_url is not None
|
59
53
|
assert llm.context_window_size == int(LLM_CONTEXT_WINDOW_SIZES.get(llm.model) *0.75) if LLM_CONTEXT_WINDOW_SIZES.get(llm.model) is not None else DEFAULT_CONTEXT_WINDOW_SIZE
|
60
54
|
assert llm._supports_function_calling() == True
|
61
55
|
assert llm._supports_stop_words() == True
|
62
|
-
assert litellm.callbacks == [
|
63
|
-
|
64
|
-
|
65
|
-
if __name__ == "__main__":
|
66
|
-
test_create_llm_from_valid_name()
|
56
|
+
assert litellm.callbacks == [dummy_func,]
|
@@ -350,7 +350,6 @@ def test_evaluation():
|
|
350
350
|
"""
|
351
351
|
See if the output will be evaluated accurately - when the task was given eval criteria
|
352
352
|
"""
|
353
|
-
from versionhq.task.model import Task
|
354
353
|
from versionhq.task.evaluate import Evaluation, EvaluationItem
|
355
354
|
from versionhq.agent.default_agents import task_evaluator
|
356
355
|
|
@@ -366,3 +365,54 @@ def test_evaluation():
|
|
366
365
|
assert [isinstance(item, EvaluationItem) and item.criteria in task.eval_criteria for item in res.evaluation.items]
|
367
366
|
assert res.evaluation.latency and res.evaluation.tokens and res.evaluation.responsible_agent == task_evaluator
|
368
367
|
assert res.evaluation.aggregate_score is not None and res.evaluation.suggestion_summary
|
368
|
+
|
369
|
+
|
370
|
+
|
371
|
+
def test_gemini_schema():
|
372
|
+
"""
|
373
|
+
See if response schema and tools (func_calling) works.
|
374
|
+
"""
|
375
|
+
from tests.task import DemoOutcome
|
376
|
+
agent = Agent(role="demo", goal="demo", llm="gemini/gemini-1.5-pro")
|
377
|
+
task = Task(
|
378
|
+
description="return random values strictly following the given response format.",
|
379
|
+
pydantic_output=DemoOutcome
|
380
|
+
)
|
381
|
+
res = task.execute_sync(agent=agent, context="We are running a test.")
|
382
|
+
assert [
|
383
|
+
getattr(res.pydantic, k) and type(getattr(res.pydantic, k)) == v for k, v in DemoOutcome.__annotations__.items()
|
384
|
+
]
|
385
|
+
|
386
|
+
|
387
|
+
def test_gemini_res_fields():
|
388
|
+
from tests.task import demo_response_fields
|
389
|
+
agent = Agent(role="demo", goal="demo", llm="gemini/gemini-1.5-pro")
|
390
|
+
task = Task(
|
391
|
+
description="return random values strictly following the given response format.",
|
392
|
+
response_fields=demo_response_fields
|
393
|
+
)
|
394
|
+
res = task.execute_sync(agent=agent, context="We are running a test.")
|
395
|
+
assert [k in item.title for item in demo_response_fields for k, v in res.json_dict.items()]
|
396
|
+
|
397
|
+
|
398
|
+
def test_gemini_func():
|
399
|
+
from tests.task import demo_response_fields
|
400
|
+
from versionhq.tool.model import Tool
|
401
|
+
|
402
|
+
class DemoTool(Tool):
|
403
|
+
func: Callable[..., Any] = lambda x: "Gemini"
|
404
|
+
|
405
|
+
agent = Agent(role="demo", goal="demo", llm="gemini/gemini-1.5-pro")
|
406
|
+
task = Task(
|
407
|
+
description="Simply execute the given tools.",
|
408
|
+
tools=[DemoTool,],
|
409
|
+
tool_res_as_final=True
|
410
|
+
)
|
411
|
+
res = task.execute_sync(agent=agent, context="We are running a test.")
|
412
|
+
assert res.tool_output and res.raw
|
413
|
+
|
414
|
+
|
415
|
+
if __name__ == "__main__":
|
416
|
+
from dotenv import load_dotenv
|
417
|
+
load_dotenv(override=True)
|
418
|
+
test_gemini_func()
|
@@ -1096,6 +1096,7 @@ dependencies = [
|
|
1096
1096
|
{ name = "hpack" },
|
1097
1097
|
{ name = "hyperframe" },
|
1098
1098
|
]
|
1099
|
+
sdist = { url = "https://files.pythonhosted.org/packages/1b/38/d7f80fd13e6582fb8e0df8c9a653dcc02b03ca34f4d72f34869298c5baf8/h2-4.2.0.tar.gz", hash = "sha256:c8a52129695e88b1a0578d8d2cc6842bbd79128ac685463b887ee278126ad01f", size = 2150682 }
|
1099
1100
|
wheels = [
|
1100
1101
|
{ url = "https://files.pythonhosted.org/packages/d0/9e/984486f2d0a0bd2b024bf4bc1c62688fcafa9e61991f041fb0e2def4a982/h2-4.2.0-py3-none-any.whl", hash = "sha256:479a53ad425bb29af087f3458a61d30780bc818e4ebcf01f0b536ba916462ed0", size = 60957 },
|
1101
1102
|
]
|
@@ -4445,7 +4446,7 @@ wheels = [
|
|
4445
4446
|
|
4446
4447
|
[[package]]
|
4447
4448
|
name = "versionhq"
|
4448
|
-
version = "1.1.11.
|
4449
|
+
version = "1.1.11.7"
|
4449
4450
|
source = { editable = "." }
|
4450
4451
|
dependencies = [
|
4451
4452
|
{ name = "appdirs" },
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|