versionhq 1.1.11.7__tar.gz → 1.1.12.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/.github/workflows/run_tests.yml +6 -5
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/LICENSE +1 -1
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/PKG-INFO +10 -9
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/README.md +8 -7
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/pyproject.toml +1 -1
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/__init__.py +5 -8
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/_utils/logger.py +6 -1
- versionhq-1.1.12.1/src/versionhq/agent/inhouse_agents.py +31 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/agent/model.py +42 -79
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/knowledge/source_docling.py +13 -10
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/llm/llm_vars.py +20 -115
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/llm/model.py +116 -86
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/task/evaluate.py +4 -6
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/task/model.py +38 -33
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/team/model.py +18 -18
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq.egg-info/PKG-INFO +10 -9
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq.egg-info/SOURCES.txt +2 -1
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/agent/agent_test.py +0 -5
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/llm/llm_test.py +1 -2
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/task/__init__.py +18 -12
- versionhq-1.1.12.1/tests/task/llm_connection_test.py +69 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/task/task_test.py +10 -62
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/uv.lock +41 -30
- versionhq-1.1.11.7/src/versionhq/agent/default_agents.py +0 -15
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/.github/workflows/publish.yml +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/.gitignore +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/.pre-commit-config.yaml +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/.python-version +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/SECURITY.md +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/db/preprocess.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/requirements-dev.txt +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/requirements.txt +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/runtime.txt +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/setup.cfg +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/_utils/vars.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/agent/rpm_controller.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/knowledge/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/knowledge/_utils.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/knowledge/embedding.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/knowledge/model.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/knowledge/source.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/knowledge/storage.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/memory/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/memory/contextual_memory.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/memory/model.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/storage/base.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/storage/ltm_sqlite_storage.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/storage/mem0_storage.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/storage/rag_storage.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/storage/utils.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/task/TEMPLATES/Description.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/task/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/task/log_handler.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/task/structured_response.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/team/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/team/team_planner.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/tool/cache_handler.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/tool/composio_tool_vars.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/agent/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/cli/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/clients/customer_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/clients/product_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/conftest.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/knowledge/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/knowledge/knowledge_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/knowledge/mock_report_compressed.pdf +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/llm/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/memory/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/memory/memory_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/team/Prompts/Demo_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/team/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/team/team_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/tool/__init__.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/tool/composio_test.py +0 -0
- {versionhq-1.1.11.7 → versionhq-1.1.12.1}/tests/tool/tool_test.py +0 -0
@@ -6,15 +6,16 @@ permissions:
|
|
6
6
|
contents: write
|
7
7
|
|
8
8
|
env:
|
9
|
+
DEFAULT_MODEL_NAME: ${{ secrets.DEFAULT_MODEL_NAME }}
|
10
|
+
DEFAULT_MODEL_PROVIDER_NAME: ${{ secrets.DEFAULT_MODEL_PROVIDER_NAME }}
|
9
11
|
LITELLM_API_KEY: ${{ secrets.LITELLM_API_KEY }}
|
10
12
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
11
|
-
DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
|
12
|
-
DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
|
13
|
-
COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
|
14
|
-
DEFAULT_MODEL_NAME: ${{ secrets.DEFAULT_MODEL_NAME }}
|
15
13
|
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
16
14
|
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
17
|
-
|
15
|
+
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
|
16
|
+
COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
|
17
|
+
DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
|
18
|
+
DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
|
18
19
|
MEM0_API_KEY: ${{ secrets.MEM0_API_KEY }}
|
19
20
|
|
20
21
|
jobs:
|
@@ -1,11 +1,11 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.
|
3
|
+
Version: 1.1.12.1
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
7
7
|
|
8
|
-
Copyright (c) 2024 Version IO Sdn. Bhd.
|
8
|
+
Copyright (c) 2024-2025 Version IO Sdn. Bhd.
|
9
9
|
|
10
10
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
11
11
|
of this software and associated documentation files (the "Software"), to deal
|
@@ -78,12 +78,12 @@ Requires-Dist: numpy>=1.26.4; extra == "numpy"
|
|
78
78
|
|
79
79
|

|
80
80
|
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
81
|
-

|
82
|
+

|
83
83
|

|
84
84
|
|
85
85
|
|
86
|
-
LLM orchestration frameworks to deploy multi-agent systems with
|
86
|
+
LLM orchestration frameworks to deploy multi-agent systems and automate complex tasks with network formations.
|
87
87
|
|
88
88
|
**Visit:**
|
89
89
|
|
@@ -122,15 +122,16 @@ LLM orchestration frameworks to deploy multi-agent systems with task-based forma
|
|
122
122
|
|
123
123
|
## Key Features
|
124
124
|
|
125
|
-
Generate
|
125
|
+
Generate multi-agent systems based on the task complexity, execute tasks, and evaluate output based on the given criteria.
|
126
126
|
|
127
|
-
|
127
|
+
Agents are model-agnostic, and can handle and share RAG tools, knowledge, memory, and callbacks among other agents. (self-learn)
|
128
128
|
|
129
129
|
|
130
130
|
### Agent formation
|
131
|
-
Depending on the task complexity, agents can make a different formation.
|
132
131
|
|
133
|
-
|
132
|
+
Agents adapt their formation based on task complexity.
|
133
|
+
|
134
|
+
You can specify a desired formation or allow the agents to determine it autonomously (default).
|
134
135
|
|
135
136
|
|
136
137
|
| | **Solo Agent** | **Supervising** | **Network** | **Random** |
|
@@ -2,12 +2,12 @@
|
|
2
2
|
|
3
3
|

|
4
4
|
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
5
|
-

|
6
|
+

|
7
7
|

|
8
8
|
|
9
9
|
|
10
|
-
LLM orchestration frameworks to deploy multi-agent systems with
|
10
|
+
LLM orchestration frameworks to deploy multi-agent systems and automate complex tasks with network formations.
|
11
11
|
|
12
12
|
**Visit:**
|
13
13
|
|
@@ -46,15 +46,16 @@ LLM orchestration frameworks to deploy multi-agent systems with task-based forma
|
|
46
46
|
|
47
47
|
## Key Features
|
48
48
|
|
49
|
-
Generate
|
49
|
+
Generate multi-agent systems based on the task complexity, execute tasks, and evaluate output based on the given criteria.
|
50
50
|
|
51
|
-
|
51
|
+
Agents are model-agnostic, and can handle and share RAG tools, knowledge, memory, and callbacks among other agents. (self-learn)
|
52
52
|
|
53
53
|
|
54
54
|
### Agent formation
|
55
|
-
Depending on the task complexity, agents can make a different formation.
|
56
55
|
|
57
|
-
|
56
|
+
Agents adapt their formation based on task complexity.
|
57
|
+
|
58
|
+
You can specify a desired formation or allow the agents to determine it autonomously (default).
|
58
59
|
|
59
60
|
|
60
61
|
| | **Solo Agent** | **Supervising** | **Network** | **Random** |
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__", "*.egg-info"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.1.
|
18
|
+
version = "1.1.12.1"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
|
21
21
|
readme = "README.md"
|
@@ -1,11 +1,8 @@
|
|
1
|
+
# silence some warnings
|
1
2
|
import warnings
|
2
|
-
|
3
|
-
warnings.filterwarnings(
|
4
|
-
|
5
|
-
message="Pydantic serializer warnings:",
|
6
|
-
category=UserWarning,
|
7
|
-
module="pydantic.main",
|
8
|
-
)
|
3
|
+
warnings.filterwarnings(action="ignore", message="Pydantic serializer warnings:", category=UserWarning, module="pydantic.main")
|
4
|
+
warnings.filterwarnings(action="ignore", category=UserWarning, module="pydantic._internal")
|
5
|
+
warnings.filterwarnings(action="ignore", module="LiteLLM:utils")
|
9
6
|
|
10
7
|
from versionhq.agent.model import Agent
|
11
8
|
from versionhq.clients.customer.model import Customer
|
@@ -17,7 +14,7 @@ from versionhq.tool.model import Tool
|
|
17
14
|
from versionhq.tool.composio_tool import ComposioHandler
|
18
15
|
|
19
16
|
|
20
|
-
__version__ = "1.1.
|
17
|
+
__version__ = "1.1.12.1"
|
21
18
|
__all__ = [
|
22
19
|
"Agent",
|
23
20
|
"Customer",
|
@@ -36,10 +36,15 @@ class Printer:
|
|
36
36
|
|
37
37
|
|
38
38
|
class Logger(BaseModel):
|
39
|
+
"""
|
40
|
+
Control CLI messages.
|
41
|
+
Color: red = error, yellow = warning, blue = info (from vhq), green = info (from third party)
|
42
|
+
"""
|
43
|
+
|
39
44
|
verbose: bool = Field(default=True)
|
40
45
|
_printer: Printer = PrivateAttr(default_factory=Printer)
|
41
46
|
|
42
47
|
def log(self, level, message, color="yellow"):
|
43
48
|
if self.verbose:
|
44
49
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
45
|
-
self._printer.print(f"\n{timestamp} - versionHQ
|
50
|
+
self._printer.print(f"\n{timestamp} - versionHQ [{level.upper()}]: {message}", color=color)
|
@@ -0,0 +1,31 @@
|
|
1
|
+
from versionhq.agent.model import Agent
|
2
|
+
from versionhq.llm.model import DEFAULT_MODEL_NAME
|
3
|
+
|
4
|
+
"""
|
5
|
+
In-house agents to be called across the project.
|
6
|
+
[Rules] agents' names and roles start with `vhq_`.
|
7
|
+
"""
|
8
|
+
|
9
|
+
vhq_client_manager = Agent(
|
10
|
+
role="vhq-Client Manager",
|
11
|
+
goal="Efficiently communicate with the client on the task progress",
|
12
|
+
llm=DEFAULT_MODEL_NAME
|
13
|
+
)
|
14
|
+
|
15
|
+
vhq_task_evaluator = Agent(
|
16
|
+
role="vhq-Task Evaluator",
|
17
|
+
goal="score the output according to the given evaluation criteria.",
|
18
|
+
llm=DEFAULT_MODEL_NAME,
|
19
|
+
llm_config=dict(top_p=0.8, top_k=30, max_tokens=5000, temperature=0.9),
|
20
|
+
maxit=1,
|
21
|
+
max_retry_limit=1
|
22
|
+
)
|
23
|
+
|
24
|
+
vhq_formation_planner = Agent(
|
25
|
+
role="vhq-Formation Planner",
|
26
|
+
goal="Plan a formation of agents based on the given task descirption.",
|
27
|
+
llm="gemini/gemini-2.0-flash-exp",
|
28
|
+
llm_config=dict(top_p=0.8, top_k=30, temperature=0.9),
|
29
|
+
maxit=1,
|
30
|
+
max_retry_limit=1
|
31
|
+
)
|
@@ -6,10 +6,10 @@ from typing_extensions import Self
|
|
6
6
|
from dotenv import load_dotenv
|
7
7
|
import litellm
|
8
8
|
|
9
|
-
from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
9
|
+
from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
10
10
|
from pydantic_core import PydanticCustomError
|
11
11
|
|
12
|
-
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
|
12
|
+
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME, PROVIDERS
|
13
13
|
from versionhq.tool.model import Tool, ToolSet
|
14
14
|
from versionhq.knowledge.model import BaseKnowledgeSource, Knowledge
|
15
15
|
from versionhq.memory.contextual_memory import ContextualMemory
|
@@ -162,90 +162,47 @@ class Agent(BaseModel):
|
|
162
162
|
@model_validator(mode="after")
|
163
163
|
def set_up_llm(self) -> Self:
|
164
164
|
"""
|
165
|
-
Set up
|
166
|
-
Pass the model config params: `llm`, `max_tokens`, `max_execution_time`, `callbacks`,`respect_context_window` to the LLM class.
|
167
|
-
The base model is selected on the client app, else use the default model.
|
165
|
+
Set up `llm` and `function_calling_llm` as valid LLM objects using the given values.
|
168
166
|
"""
|
169
|
-
|
170
167
|
self.agent_ops_agent_name = self.role
|
168
|
+
self.llm = self._convert_to_llm_class(llm=self.llm)
|
171
169
|
|
172
|
-
if
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
model_name = self.llm if self.llm is not None else DEFAULT_MODEL_NAME
|
178
|
-
llm = LLM(model=model_name)
|
179
|
-
updated_llm = self._set_llm_params(llm)
|
180
|
-
self.llm = updated_llm
|
181
|
-
|
182
|
-
else:
|
183
|
-
if isinstance(self.llm, dict):
|
184
|
-
model_name = self.llm.pop("model_name", self.llm.pop("deployment_name", str(self.llm)))
|
185
|
-
llm = LLM(model=model_name if model_name is not None else DEFAULT_MODEL_NAME)
|
186
|
-
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.llm.items() if v is not None })
|
187
|
-
self.llm = updated_llm
|
188
|
-
|
189
|
-
else:
|
190
|
-
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
191
|
-
llm = LLM(model=model_name)
|
192
|
-
llm_params = {
|
193
|
-
"max_tokens": (getattr(self.llm, "max_tokens") or self.max_tokens or 3000),
|
194
|
-
"timeout": getattr(self.llm, "timeout", self.max_execution_time),
|
195
|
-
"callbacks": getattr(self.llm, "callbacks", None),
|
196
|
-
"temperature": getattr(self.llm, "temperature", None),
|
197
|
-
"logprobs": getattr(self.llm, "logprobs", None),
|
198
|
-
"api_key": getattr(self.llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
199
|
-
"base_url": getattr(self.llm, "base_url", None),
|
200
|
-
}
|
201
|
-
updated_llm = self._set_llm_params(llm, llm_params)
|
202
|
-
self.llm = updated_llm
|
203
|
-
|
204
|
-
|
205
|
-
"""
|
206
|
-
Set up funcion_calling LLM as well.
|
207
|
-
Check if the model supports function calling, setup LLM instance accordingly, using the same params with the LLM.
|
208
|
-
"""
|
209
|
-
if self.function_calling_llm:
|
210
|
-
if isinstance(self.function_calling_llm, LLM):
|
211
|
-
if self.function_calling_llm._supports_function_calling() == False:
|
212
|
-
self.function_calling_llm = LLM(model=DEFAULT_MODEL_NAME)
|
213
|
-
|
214
|
-
updated_llm = self._set_llm_params(self.function_calling_llm)
|
215
|
-
self.function_calling_llm = updated_llm
|
170
|
+
function_calling_llm = self.function_calling_llm if self.function_calling_llm else self.llm if self.llm else None
|
171
|
+
function_calling_llm = self._convert_to_llm_class(llm=function_calling_llm)
|
172
|
+
if function_calling_llm._supports_function_calling():
|
173
|
+
self.function_calling_llm = function_calling_llm
|
174
|
+
return self
|
216
175
|
|
217
|
-
elif isinstance(self.function_calling_llm, str):
|
218
|
-
llm = LLM(model=self.function_calling_llm)
|
219
176
|
|
220
|
-
|
221
|
-
|
177
|
+
def _convert_to_llm_class(self, llm: Any | None) -> LLM:
|
178
|
+
llm = llm if llm is not None else DEFAULT_MODEL_NAME
|
222
179
|
|
223
|
-
|
224
|
-
|
180
|
+
match llm:
|
181
|
+
case LLM():
|
182
|
+
return self._set_llm_params(llm=llm)
|
225
183
|
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
llm = LLM(model=model_name)
|
230
|
-
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.function_calling_llm.items() if v is not None })
|
231
|
-
self.function_calling_llm = updated_llm
|
184
|
+
case str():
|
185
|
+
llm_obj = LLM(model=llm)
|
186
|
+
return self._set_llm_params(llm=llm_obj)
|
232
187
|
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
"max_tokens": (getattr(self.function_calling_llm, "max_tokens") or self.max_tokens or 3000),
|
238
|
-
"timeout": getattr(self.function_calling_llm, "timeout", self.max_execution_time),
|
239
|
-
"callbacks": getattr(self.function_calling_llm, "callbacks", None),
|
240
|
-
"temperature": getattr(self.function_calling_llm, "temperature", None),
|
241
|
-
"logprobs": getattr(self.function_calling_llm, "logprobs", None),
|
242
|
-
"api_key": getattr(self.function_calling_llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
243
|
-
"base_url": getattr(self.function_calling_llm, "base_url", None),
|
244
|
-
}
|
245
|
-
updated_llm = self._set_llm_params(llm, llm_params)
|
246
|
-
self.function_calling_llm = updated_llm
|
188
|
+
case dict():
|
189
|
+
model_name = llm.pop("model_name", llm.pop("deployment_name", str(llm)))
|
190
|
+
llm_obj = LLM(model=model_name if model_name else DEFAULT_MODEL_NAME)
|
191
|
+
return self._set_llm_params(llm_obj, { k: v for k, v in llm.items() if v is not None })
|
247
192
|
|
248
|
-
|
193
|
+
case _:
|
194
|
+
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
195
|
+
llm_obj = LLM(model=model_name)
|
196
|
+
llm_params = {
|
197
|
+
"max_tokens": (getattr(llm, "max_tokens") or self.max_tokens or 3000),
|
198
|
+
"timeout": getattr(llm, "timeout", self.max_execution_time),
|
199
|
+
"callbacks": getattr(llm, "callbacks", None),
|
200
|
+
"temperature": getattr(llm, "temperature", None),
|
201
|
+
"logprobs": getattr(llm, "logprobs", None),
|
202
|
+
"api_key": getattr(llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
203
|
+
"base_url": getattr(llm, "base_url", None),
|
204
|
+
}
|
205
|
+
return self._set_llm_params(llm=llm_obj, config=llm_params)
|
249
206
|
|
250
207
|
|
251
208
|
def _set_llm_params(self, llm: LLM, config: Dict[str, Any] = None) -> LLM:
|
@@ -257,6 +214,11 @@ class Agent(BaseModel):
|
|
257
214
|
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
258
215
|
llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
259
216
|
|
217
|
+
if llm.provider is None:
|
218
|
+
provider_name = llm.model.split("/")[0]
|
219
|
+
valid_provider = provider_name if provider_name in PROVIDERS else None
|
220
|
+
llm.provider = valid_provider
|
221
|
+
|
260
222
|
if self.callbacks:
|
261
223
|
llm.callbacks = self.callbacks
|
262
224
|
llm._set_callbacks(llm.callbacks)
|
@@ -454,7 +416,7 @@ class Agent(BaseModel):
|
|
454
416
|
task.tokens = self.llm._tokens
|
455
417
|
|
456
418
|
task_execution_counter += 1
|
457
|
-
self._logger.log(level="info", message=f"Agent response: {raw_response}", color="
|
419
|
+
self._logger.log(level="info", message=f"Agent response: {raw_response}", color="green")
|
458
420
|
return raw_response
|
459
421
|
|
460
422
|
except Exception as e:
|
@@ -470,7 +432,7 @@ class Agent(BaseModel):
|
|
470
432
|
iterations += 1
|
471
433
|
|
472
434
|
task_execution_counter += 1
|
473
|
-
self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="
|
435
|
+
self._logger.log(level="info", message=f"Agent #{task_execution_counter} response: {raw_response}", color="green")
|
474
436
|
return raw_response
|
475
437
|
|
476
438
|
if not raw_response:
|
@@ -515,6 +477,7 @@ class Agent(BaseModel):
|
|
515
477
|
task_prompt += memory.strip()
|
516
478
|
|
517
479
|
|
480
|
+
## comment out for now
|
518
481
|
# if self.team and self.team._train:
|
519
482
|
# task_prompt = self._training_handler(task_prompt=task_prompt)
|
520
483
|
# else:
|
@@ -12,17 +12,11 @@ try:
|
|
12
12
|
except ImportError:
|
13
13
|
import envoy
|
14
14
|
envoy.run("uv add docling --optional docling")
|
15
|
-
|
16
|
-
from docling.datamodel.base_models import InputFormat
|
17
|
-
from docling.document_converter import DocumentConverter
|
18
|
-
from docling.exceptions import ConversionError
|
19
|
-
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
20
|
-
from docling_core.types.doc.document import DoclingDocument
|
21
15
|
DOCLING_AVAILABLE = True
|
22
16
|
except:
|
23
17
|
DOCLING_AVAILABLE = False
|
24
18
|
|
25
|
-
from pydantic import Field
|
19
|
+
from pydantic import Field
|
26
20
|
|
27
21
|
from versionhq.knowledge.source import BaseKnowledgeSource
|
28
22
|
from versionhq.storage.utils import fetch_db_storage_path
|
@@ -52,11 +46,20 @@ class DoclingSource(BaseKnowledgeSource):
|
|
52
46
|
))
|
53
47
|
|
54
48
|
def __init__(self, *args, **kwargs):
|
55
|
-
if
|
56
|
-
|
57
|
-
|
49
|
+
if DOCLING_AVAILABLE:
|
50
|
+
from docling.datamodel.base_models import InputFormat
|
51
|
+
from docling.document_converter import DocumentConverter
|
52
|
+
from docling.exceptions import ConversionError
|
53
|
+
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
54
|
+
from docling_core.types.doc.document import DoclingDocument
|
55
|
+
|
58
56
|
super().__init__(*args, **kwargs)
|
59
57
|
|
58
|
+
else:
|
59
|
+
raise ImportError("The docling package is required. Please install the package using: $ uv add docling.")
|
60
|
+
# else:
|
61
|
+
# super().__init__(*args, **kwargs)
|
62
|
+
|
60
63
|
|
61
64
|
def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
|
62
65
|
conv_results_iter = self.document_converter.convert_all(self.valid_file_paths)
|
@@ -6,17 +6,21 @@ JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_
|
|
6
6
|
PROVIDERS = [
|
7
7
|
"openai",
|
8
8
|
"gemini",
|
9
|
-
"
|
10
|
-
|
9
|
+
"openrouter",
|
10
|
+
"huggingface",
|
11
11
|
"anthropic",
|
12
|
+
"sagemaker",
|
13
|
+
"bedrock",
|
12
14
|
"ollama",
|
13
15
|
"watson",
|
14
|
-
"bedrock",
|
15
16
|
"azure",
|
16
17
|
"cerebras",
|
17
18
|
"llama",
|
18
19
|
]
|
19
20
|
|
21
|
+
ENDPOINT_PROVIDERS = [
|
22
|
+
"huggingface",
|
23
|
+
]
|
20
24
|
|
21
25
|
"""
|
22
26
|
List of models available on the framework.
|
@@ -44,10 +48,17 @@ MODELS = {
|
|
44
48
|
"anthropic": [
|
45
49
|
"claude-3-5-sonnet-20241022",
|
46
50
|
"claude-3-5-sonnet-20240620",
|
47
|
-
"claude-3-
|
51
|
+
"claude-3-haiku-2024030",
|
48
52
|
"claude-3-opus-20240229",
|
49
53
|
"claude-3-haiku-20240307",
|
50
54
|
],
|
55
|
+
"openrouter": [
|
56
|
+
"openrouter/deepseek/deepseek-r1:free",
|
57
|
+
"openrouter/qwen/qwen-2.5-72b-instruct",
|
58
|
+
],
|
59
|
+
"huggingface": [
|
60
|
+
"huggingface/qwen/qwen2.5-VL-72B-Instruct",
|
61
|
+
],
|
51
62
|
# "sagemaker": [
|
52
63
|
# "sagemaker/huggingface-text2text-flan-t5-base",
|
53
64
|
# "sagemaker/huggingface-llm-gemma-7b",
|
@@ -61,13 +72,7 @@ MODELS = {
|
|
61
72
|
"ollama/llama3.1",
|
62
73
|
"ollama/mixtral",
|
63
74
|
"ollama/mixtral-8x22B-Instruct-v0.1",
|
64
|
-
|
65
|
-
],
|
66
|
-
"deepseek": [
|
67
|
-
"deepseek/deepseek-reasoner",
|
68
|
-
|
69
75
|
],
|
70
|
-
|
71
76
|
# "watson": [
|
72
77
|
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
73
78
|
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
@@ -83,7 +88,6 @@ MODELS = {
|
|
83
88
|
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
84
89
|
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
85
90
|
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
86
|
-
# "bedrock/anthropic.claude-v2:1",
|
87
91
|
"bedrock/anthropic.claude-v2",
|
88
92
|
"bedrock/anthropic.claude-instant-v1",
|
89
93
|
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
@@ -109,24 +113,17 @@ MODELS = {
|
|
109
113
|
KEYS = {
|
110
114
|
"openai": ["OPENAI_API_KEY"],
|
111
115
|
"gemini": ["GEMINI_API_KEY"],
|
112
|
-
"sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
|
113
116
|
"anthropic": ["ANTHROPIC_API_KEY"],
|
117
|
+
"huggingface": ["HUGGINGFACE_API_KEY", ],
|
118
|
+
"sagemaker": ["AWS_ACCESS_KEY_ID", "ADW_SECURET_ACCESS_KEY", "AWS_REGION_NAME"],
|
114
119
|
}
|
115
120
|
|
116
121
|
|
117
|
-
"""
|
118
|
-
Use base_url to specify
|
119
|
-
"""
|
120
|
-
BASE_URLS = {
|
121
|
-
"deepseek": "https://api.deepseek.com"
|
122
|
-
}
|
123
|
-
|
124
122
|
|
125
123
|
"""
|
126
124
|
Max input token size by the model.
|
127
125
|
"""
|
128
126
|
LLM_CONTEXT_WINDOW_SIZES = {
|
129
|
-
"gpt-3.5-turbo": 8192,
|
130
127
|
"gpt-4": 8192,
|
131
128
|
"gpt-4o": 128000,
|
132
129
|
"gpt-4o-mini": 128000,
|
@@ -145,6 +142,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
145
142
|
"claude-3-sonnet-20240229": 200000,
|
146
143
|
"claude-3-opus-20240229": 200000,
|
147
144
|
"claude-3-haiku-20240307": 200000,
|
145
|
+
"claude-3-5-sonnet-2024102": 200000,
|
148
146
|
|
149
147
|
"deepseek-chat": 128000,
|
150
148
|
"deepseek/deepseek-reasoner": 8192,
|
@@ -162,111 +160,18 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
162
160
|
"llama3-70b-8192": 8192,
|
163
161
|
"llama3-8b-8192": 8192,
|
164
162
|
"mixtral-8x7b-32768": 32768,
|
165
|
-
"claude-3-5-sonnet-2024102": 200000,
|
166
|
-
}
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
LLM_BASE_URL_KEY_NAMES = {
|
172
|
-
"openai": "OPENAI_API_BASE",
|
173
|
-
"gemini": "GEMINI_API_BASE",
|
174
|
-
"anthropic": "ANTHROPIC_API_BASE",
|
175
|
-
}
|
176
|
-
|
177
|
-
LLM_VARS = {
|
178
|
-
"openai": [
|
179
|
-
{
|
180
|
-
"prompt": "Enter your OPENAI API key (press Enter to skip)",
|
181
|
-
"key_name": "OPENAI_API_KEY",
|
182
|
-
}
|
183
|
-
],
|
184
|
-
"anthropic": [
|
185
|
-
{
|
186
|
-
"prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
|
187
|
-
"key_name": "ANTHROPIC_API_KEY",
|
188
|
-
}
|
189
|
-
],
|
190
|
-
"gemini": [
|
191
|
-
{
|
192
|
-
"prompt": "Enter your GEMINI API key (press Enter to skip)",
|
193
|
-
"key_name": "GEMINI_API_KEY",
|
194
|
-
}
|
195
|
-
],
|
196
|
-
"watson": [
|
197
|
-
{
|
198
|
-
"prompt": "Enter your WATSONX URL (press Enter to skip)",
|
199
|
-
"key_name": "WATSONX_URL",
|
200
|
-
},
|
201
|
-
{
|
202
|
-
"prompt": "Enter your WATSONX API Key (press Enter to skip)",
|
203
|
-
"key_name": "WATSONX_APIKEY",
|
204
|
-
},
|
205
|
-
{
|
206
|
-
"prompt": "Enter your WATSONX Project Id (press Enter to skip)",
|
207
|
-
"key_name": "WATSONX_PROJECT_ID",
|
208
|
-
},
|
209
|
-
],
|
210
|
-
"ollama": [
|
211
|
-
{
|
212
|
-
"default": True,
|
213
|
-
"API_BASE": "http://localhost:11434",
|
214
|
-
}
|
215
|
-
],
|
216
|
-
"bedrock": [
|
217
|
-
{
|
218
|
-
"prompt": "Enter your AWS Access Key ID (press Enter to skip)",
|
219
|
-
"key_name": "AWS_ACCESS_KEY_ID",
|
220
|
-
},
|
221
|
-
{
|
222
|
-
"prompt": "Enter your AWS Secret Access Key (press Enter to skip)",
|
223
|
-
"key_name": "AWS_SECRET_ACCESS_KEY",
|
224
|
-
},
|
225
|
-
{
|
226
|
-
"prompt": "Enter your AWS Region Name (press Enter to skip)",
|
227
|
-
"key_name": "AWS_REGION_NAME",
|
228
|
-
},
|
229
|
-
],
|
230
|
-
"azure": [
|
231
|
-
{
|
232
|
-
"prompt": "Enter your Azure deployment name (must start with 'azure/')",
|
233
|
-
"key_name": "model",
|
234
|
-
},
|
235
|
-
{
|
236
|
-
"prompt": "Enter your AZURE API key (press Enter to skip)",
|
237
|
-
"key_name": "AZURE_API_KEY",
|
238
|
-
},
|
239
|
-
{
|
240
|
-
"prompt": "Enter your AZURE API base URL (press Enter to skip)",
|
241
|
-
"key_name": "AZURE_API_BASE",
|
242
|
-
},
|
243
|
-
{
|
244
|
-
"prompt": "Enter your AZURE API version (press Enter to skip)",
|
245
|
-
"key_name": "AZURE_API_VERSION",
|
246
|
-
},
|
247
|
-
],
|
248
|
-
"cerebras": [
|
249
|
-
{
|
250
|
-
"prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
|
251
|
-
"key_name": "model",
|
252
|
-
},
|
253
|
-
{
|
254
|
-
"prompt": "Enter your Cerebras API version (press Enter to skip)",
|
255
|
-
"key_name": "CEREBRAS_API_KEY",
|
256
|
-
},
|
257
|
-
],
|
258
163
|
}
|
259
164
|
|
260
165
|
|
261
166
|
|
262
167
|
"""
|
263
|
-
Params for litellm.completion()
|
168
|
+
Params for litellm.completion().
|
264
169
|
"""
|
265
170
|
|
266
171
|
PARAMS = {
|
267
172
|
"litellm": [
|
268
173
|
"api_base",
|
269
|
-
"api_version,"
|
174
|
+
"api_version,",
|
270
175
|
"num_retries",
|
271
176
|
"context_window_fallback_dict",
|
272
177
|
"fallbacks",
|