versionhq 1.1.9.12__tar.gz → 1.1.9.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/.github/workflows/run_tests.yml +6 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/PKG-INFO +2 -2
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/README.md +1 -1
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/pyproject.toml +1 -1
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/__init__.py +1 -1
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/agent/model.py +103 -112
- versionhq-1.1.9.12/src/versionhq/llm/llm_vars.py → versionhq-1.1.9.14/src/versionhq/llm/llm_variables.py +150 -62
- versionhq-1.1.9.14/src/versionhq/llm/model.py +281 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/task/__init__.py +1 -1
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/task/model.py +8 -8
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/team/team_planner.py +1 -1
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq.egg-info/PKG-INFO +2 -2
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq.egg-info/SOURCES.txt +3 -1
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/agent/agent_test.py +88 -2
- versionhq-1.1.9.14/tests/llm/llm_test.py +62 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/task/task_test.py +3 -7
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/team/team_test.py +1 -5
- versionhq-1.1.9.14/tests/tool/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/tool/composio_test.py +1 -1
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/tool/tool_test.py +0 -4
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/uv.lock +106 -103
- versionhq-1.1.9.12/src/versionhq/llm/model.py +0 -246
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/.github/workflows/publish.yml +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/.github/workflows/publish_testpypi.yml +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/.github/workflows/security_check.yml +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/.gitignore +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/.pre-commit-config.yaml +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/.python-version +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/LICENSE +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/SECURITY.md +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/db/preprocess.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/requirements-dev.txt +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/requirements.txt +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/runtime.txt +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/setup.cfg +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/_utils/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/_utils/cache_handler.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/_utils/i18n.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/_utils/logger.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/_utils/process_config.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/_utils/rpm_controller.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/_utils/usage_metrics.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/agent/TEMPLATES/Backstory.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/agent/TEMPLATES/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/agent/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/agent/parser.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/cli/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/clients/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/clients/customer/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/clients/customer/model.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/clients/product/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/clients/product/model.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/clients/workflow/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/clients/workflow/model.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/llm/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/storage/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/storage/task_output_storage.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/task/formatter.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/task/log_handler.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/team/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/team/model.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/tool/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/tool/composio_tool.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/tool/decorator.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/tool/model.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq/tool/tool_handler.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq.egg-info/dependency_links.txt +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq.egg-info/requires.txt +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/src/versionhq.egg-info/top_level.txt +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/agent/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/cli/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/clients/customer_test.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/clients/product_test.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/clients/workflow_test.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/conftest.py +0 -0
- {versionhq-1.1.9.12/tests/task → versionhq-1.1.9.14/tests/llm}/__init__.py +0 -0
- {versionhq-1.1.9.12/tests/team → versionhq-1.1.9.14/tests/task}/__init__.py +0 -0
- {versionhq-1.1.9.12 → versionhq-1.1.9.14}/tests/team/Prompts/Demo_test.py +0 -0
- {versionhq-1.1.9.12/tests/tool → versionhq-1.1.9.14/tests/team}/__init__.py +0 -0
@@ -8,9 +8,15 @@ permissions:
|
|
8
8
|
env:
|
9
9
|
LITELLM_API_KEY: ${{ secrets.LITELLM_API_KEY }}
|
10
10
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
11
|
+
OPENAI_API_BASE: ${{ secrets.OPENAI_API_BASE }}
|
11
12
|
DEFAULT_REDIRECT_URL: ${{ secrets.DEFAULT_REDIRECT_URL }}
|
12
13
|
DEFAULT_USER_ID: ${{ secrets.DEFAULT_USER_ID }}
|
13
14
|
COMPOSIO_API_KEY: ${{ secrets.COMPOSIO_API_KEY }}
|
15
|
+
DEFAULT_MODEL_NAME: ${{ secrets.DEFAULT_MODEL_NAME }}
|
16
|
+
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
17
|
+
GEMINI_API_BASE: ${{ secrets.GEMINI_API_BASE }}
|
18
|
+
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
19
|
+
ANTHROPIC_API_BASE: ${{ secrets.ANTHROPIC_API_BASE }}
|
14
20
|
|
15
21
|
jobs:
|
16
22
|
run_test:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.2
|
2
2
|
Name: versionhq
|
3
|
-
Version: 1.1.9.
|
3
|
+
Version: 1.1.9.14
|
4
4
|
Summary: LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows
|
5
5
|
Author-email: Kuriko Iwai <kuriko@versi0n.io>
|
6
6
|
License: MIT License
|
@@ -60,7 +60,7 @@ Requires-Dist: composio-langchain>=0.6.12
|
|
60
60
|
|
61
61
|

|
62
62
|
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
63
|
-

|
64
64
|

|
65
65
|

|
66
66
|
|
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|

|
4
4
|
[](https://github.com/versionHQ/multi-agent-system/actions/workflows/publish.yml)
|
5
|
-

|
6
6
|

|
7
7
|

|
8
8
|
|
@@ -15,7 +15,7 @@ exclude = ["test*", "__pycache__"]
|
|
15
15
|
|
16
16
|
[project]
|
17
17
|
name = "versionhq"
|
18
|
-
version = "1.1.9.
|
18
|
+
version = "1.1.9.14"
|
19
19
|
authors = [{ name = "Kuriko Iwai", email = "kuriko@versi0n.io" }]
|
20
20
|
description = "LLM orchestration frameworks for model-agnostic AI agents that handle complex outbound workflows"
|
21
21
|
readme = "README.md"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import os
|
2
2
|
import uuid
|
3
|
-
from typing import Any, Dict, List, Optional, TypeVar
|
3
|
+
from typing import Any, Dict, List, Optional, TypeVar, Callable
|
4
4
|
from typing_extensions import Self
|
5
5
|
from dotenv import load_dotenv
|
6
6
|
from pydantic import UUID4, BaseModel, Field, InstanceOf, PrivateAttr, model_validator, field_validator
|
@@ -9,8 +9,8 @@ from pydantic_core import PydanticCustomError
|
|
9
9
|
from versionhq._utils.logger import Logger
|
10
10
|
from versionhq._utils.rpm_controller import RPMController
|
11
11
|
from versionhq._utils.usage_metrics import UsageMetrics
|
12
|
-
from versionhq.llm.
|
13
|
-
from versionhq.llm.model import LLM,
|
12
|
+
from versionhq.llm.llm_variables import LLM_VARS
|
13
|
+
from versionhq.llm.model import LLM, DEFAULT_CONTEXT_WINDOW_SIZE, DEFAULT_MODEL_NAME
|
14
14
|
from versionhq.task import TaskOutputFormat
|
15
15
|
from versionhq.task.model import ResponseField
|
16
16
|
from versionhq.tool.model import Tool, ToolSet
|
@@ -87,6 +87,7 @@ class Agent(BaseModel):
|
|
87
87
|
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
88
88
|
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
89
89
|
_times_executed: int = PrivateAttr(default=0)
|
90
|
+
config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="values to add to the Agent class")
|
90
91
|
|
91
92
|
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
92
93
|
role: str = Field(description="role of the agent - used in summary and logs")
|
@@ -102,11 +103,11 @@ class Agent(BaseModel):
|
|
102
103
|
allow_code_execution: Optional[bool] = Field(default=False, description="Enable code execution for the agent.")
|
103
104
|
max_retry_limit: int = Field(default=2,description="max. number of retries for the task execution when an error occurs. cascaed to the `invoke` function")
|
104
105
|
max_iter: Optional[int] = Field(default=25,description="max. number of iterations for an agent to execute a task")
|
105
|
-
step_callback: Optional[Any] = Field(default=None,description="
|
106
|
+
step_callback: Optional[Callable | Any] = Field(default=None, description="callback to be executed after each step of the agent execution")
|
106
107
|
|
107
108
|
# llm settings cascaded to the LLM model
|
108
|
-
llm: str | InstanceOf[LLM] | Any = Field(default=None)
|
109
|
-
function_calling_llm: str | InstanceOf[LLM] | Any = Field(default=None)
|
109
|
+
llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
110
|
+
function_calling_llm: str | InstanceOf[LLM] | Dict[str, Any] = Field(default=None)
|
110
111
|
respect_context_window: bool = Field(default=True,description="Keep messages under the context window size by summarizing content")
|
111
112
|
max_tokens: Optional[int] = Field(default=None, description="max. number of tokens for the agent's execution")
|
112
113
|
max_execution_time: Optional[int] = Field(default=None, description="max. execution time for an agent to execute a task")
|
@@ -119,16 +120,11 @@ class Agent(BaseModel):
|
|
119
120
|
response_template: Optional[str] = Field(default=None, description="Response format for the agent.")
|
120
121
|
|
121
122
|
# config, cache, error handling
|
122
|
-
config: Optional[Dict[str, Any]] = Field(default=None, exclude=True, description="Configuration for the agent")
|
123
123
|
formatting_errors: int = Field(default=0, description="Number of formatting errors.")
|
124
124
|
agent_ops_agent_name: str = None
|
125
125
|
agent_ops_agent_id: str = None
|
126
126
|
|
127
127
|
|
128
|
-
def __repr__(self):
|
129
|
-
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
130
|
-
|
131
|
-
|
132
128
|
@field_validator("id", mode="before")
|
133
129
|
@classmethod
|
134
130
|
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
@@ -141,7 +137,7 @@ class Agent(BaseModel):
|
|
141
137
|
required_fields = ["role", "goal"]
|
142
138
|
for field in required_fields:
|
143
139
|
if getattr(self, field) is None:
|
144
|
-
raise ValueError(
|
140
|
+
raise ValueError(f"{field} must be provided either directly or through config")
|
145
141
|
return self
|
146
142
|
|
147
143
|
|
@@ -154,109 +150,84 @@ class Agent(BaseModel):
|
|
154
150
|
"""
|
155
151
|
|
156
152
|
self.agent_ops_agent_name = self.role
|
157
|
-
unaccepted_attributes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"]
|
158
|
-
callbacks = ([self.step_callback,]if self.step_callback is not None else [])
|
153
|
+
# unaccepted_attributes = ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION_NAME"]
|
159
154
|
|
160
155
|
if isinstance(self.llm, LLM):
|
161
|
-
|
162
|
-
self.llm
|
163
|
-
self.llm.context_window_size = (self.llm.get_context_window_size() if self.respect_context_window == True else DEFAULT_CONTEXT_WINDOW)
|
164
|
-
self.llm.callbacks = callbacks
|
156
|
+
llm = self._set_llm_params(self.llm)
|
157
|
+
self.llm = llm
|
165
158
|
|
166
159
|
elif isinstance(self.llm, str) or self.llm is None:
|
167
|
-
model_name =
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
"max_tokens": self.max_tokens,
|
172
|
-
"callbacks": callbacks,
|
173
|
-
"api_key": os.environ.get("LITELLM_API_KEY", None),
|
174
|
-
"base_url": os.environ.get("OPENAI_API_BASE", os.environ.get("OPENAI_BASE_URL", None))
|
175
|
-
}
|
176
|
-
|
177
|
-
set_provider = model_name.split("/")[0] if "/" in model_name else "openai" #! REFINEME
|
178
|
-
for provider, env_vars in LLM_VARS.items():
|
179
|
-
if provider == set_provider:
|
180
|
-
for env_var in env_vars:
|
181
|
-
key_name = env_var.get("key_name")
|
182
|
-
|
183
|
-
if key_name and key_name not in unaccepted_attributes:
|
184
|
-
env_value = os.environ.get(key_name)
|
185
|
-
if env_value:
|
186
|
-
key_name = ("api_key" if "API_KEY" in key_name else key_name)
|
187
|
-
key_name = ("api_base" if "API_BASE" in key_name else key_name)
|
188
|
-
key_name = ("api_version" if "API_VERSION" in key_name else key_name)
|
189
|
-
llm_params[key_name] = env_value
|
190
|
-
elif env_var.get("default", False):
|
191
|
-
for key, value in env_var.items():
|
192
|
-
if key not in ["prompt", "key_name", "default"]:
|
193
|
-
if key in os.environ:
|
194
|
-
llm_params[key] = value
|
195
|
-
self.llm = LLM(**llm_params)
|
196
|
-
context_window_size = (self.llm.get_context_window_size() if self.respect_context_window == True else DEFAULT_CONTEXT_WINDOW)
|
197
|
-
self.llm.context_window_size = context_window_size
|
160
|
+
model_name = self.llm if self.llm is not None else DEFAULT_MODEL_NAME
|
161
|
+
llm = LLM(model=model_name)
|
162
|
+
updated_llm = self._set_llm_params(llm)
|
163
|
+
self.llm = updated_llm
|
198
164
|
|
199
165
|
else:
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
166
|
+
if isinstance(self.llm, dict):
|
167
|
+
model_name = self.llm.pop("model_name", self.llm.pop("deployment_name", str(self.llm)))
|
168
|
+
llm = LLM(model=model_name if model_name is not None else DEFAULT_MODEL_NAME)
|
169
|
+
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.llm.items() if v is not None })
|
170
|
+
self.llm = updated_llm
|
171
|
+
|
172
|
+
else:
|
173
|
+
model_name = (getattr(self.llm, "model_name") or getattr(self.llm, "deployment_name") or str(self.llm))
|
174
|
+
llm = LLM(model=model_name)
|
175
|
+
llm_params = {
|
176
|
+
"max_tokens": (getattr(self.llm, "max_tokens") or self.max_tokens or 3000),
|
177
|
+
"timeout": getattr(self.llm, "timeout", self.max_execution_time),
|
178
|
+
"callbacks": getattr(self.llm, "callbacks", None),
|
179
|
+
"temperature": getattr(self.llm, "temperature", None),
|
180
|
+
"logprobs": getattr(self.llm, "logprobs", None),
|
181
|
+
"api_key": getattr(self.llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
182
|
+
"base_url": getattr(self.llm, "base_url", None),
|
183
|
+
}
|
184
|
+
updated_llm = self._set_llm_params(llm, llm_params)
|
185
|
+
self.llm = updated_llm
|
186
|
+
|
213
187
|
|
214
188
|
"""
|
215
|
-
Set up funcion_calling LLM as well.
|
189
|
+
Set up funcion_calling LLM as well.
|
190
|
+
Check if the model supports function calling, setup LLM instance accordingly, using the same params with the LLM.
|
216
191
|
"""
|
217
192
|
if self.function_calling_llm:
|
218
193
|
if isinstance(self.function_calling_llm, LLM):
|
219
|
-
self.function_calling_llm.
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
if self.respect_context_window == True
|
225
|
-
else DEFAULT_CONTEXT_WINDOW
|
226
|
-
)
|
227
|
-
self.function_calling_llm.context_window_size = context_window_size
|
194
|
+
if self.function_calling_llm._supports_function_calling() == False:
|
195
|
+
self.function_calling_llm = LLM(model=DEFAULT_MODEL_NAME)
|
196
|
+
|
197
|
+
updated_llm = self._set_llm_params(self.function_calling_llm)
|
198
|
+
self.function_calling_llm = updated_llm
|
228
199
|
|
229
200
|
elif isinstance(self.function_calling_llm, str):
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
)
|
236
|
-
|
237
|
-
self.function_calling_llm.get_context_window_size()
|
238
|
-
if self.respect_context_window == True
|
239
|
-
else DEFAULT_CONTEXT_WINDOW
|
240
|
-
)
|
241
|
-
self.function_calling_llm.context_window_size = context_window_size
|
201
|
+
llm = LLM(model=self.function_calling_llm)
|
202
|
+
|
203
|
+
if llm._supports_function_calling() == False:
|
204
|
+
llm = LLM(model=DEFAULT_MODEL_NAME)
|
205
|
+
|
206
|
+
updated_llm = self._set_llm_params(llm)
|
207
|
+
self.function_calling_llm = updated_llm
|
242
208
|
|
243
209
|
else:
|
244
|
-
|
245
|
-
self.function_calling_llm,
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
),
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
210
|
+
if isinstance(self.function_calling_llm, dict):
|
211
|
+
model_name = self.function_calling_llm.pop("model_name", self.function_calling_llm.pop("deployment_name", str(self.function_calling_llm)))
|
212
|
+
llm = LLM(model=model_name)
|
213
|
+
updated_llm = self._set_llm_params(llm, { k: v for k, v in self.function_calling_llm.items() if v is not None })
|
214
|
+
self.function_calling_llm = updated_llm
|
215
|
+
|
216
|
+
else:
|
217
|
+
model_name = (getattr(self.function_calling_llm, "model_name") or getattr(self.function_calling_llm, "deployment_name") or str(self.function_calling_llm))
|
218
|
+
llm = LLM(model=model_name)
|
219
|
+
llm_params = {
|
220
|
+
"max_tokens": (getattr(self.function_calling_llm, "max_tokens") or self.max_tokens or 3000),
|
221
|
+
"timeout": getattr(self.function_calling_llm, "timeout", self.max_execution_time),
|
222
|
+
"callbacks": getattr(self.function_calling_llm, "callbacks", None),
|
223
|
+
"temperature": getattr(self.function_calling_llm, "temperature", None),
|
224
|
+
"logprobs": getattr(self.function_calling_llm, "logprobs", None),
|
225
|
+
"api_key": getattr(self.function_calling_llm, "api_key", os.environ.get("LITELLM_API_KEY", None)),
|
226
|
+
"base_url": getattr(self.function_calling_llm, "base_url", None),
|
227
|
+
}
|
228
|
+
updated_llm = self._set_llm_params(llm, llm_params)
|
229
|
+
self.function_calling_llm = updated_llm
|
230
|
+
|
260
231
|
return self
|
261
232
|
|
262
233
|
|
@@ -315,7 +286,32 @@ class Agent(BaseModel):
|
|
315
286
|
return self
|
316
287
|
|
317
288
|
|
318
|
-
def
|
289
|
+
def _set_llm_params(self, llm: LLM, kwargs: Dict[str, Any] = None) -> LLM:
|
290
|
+
"""
|
291
|
+
After setting up an LLM instance, add params to the instance.
|
292
|
+
Prioritize the agent's settings over the model's base setups.
|
293
|
+
"""
|
294
|
+
|
295
|
+
llm.timeout = self.max_execution_time if llm.timeout is None else llm.timeout
|
296
|
+
llm.max_tokens = self.max_tokens if self.max_tokens else llm.max_tokens
|
297
|
+
|
298
|
+
if self.step_callback is not None:
|
299
|
+
llm.callbacks = [self.step_callback, ]
|
300
|
+
llm._set_callbacks(llm.callbacks)
|
301
|
+
|
302
|
+
if self.respect_context_window == False:
|
303
|
+
llm.context_window_size = DEFAULT_CONTEXT_WINDOW_SIZE
|
304
|
+
|
305
|
+
if kwargs:
|
306
|
+
for k, v in kwargs.items():
|
307
|
+
try:
|
308
|
+
setattr(llm, k, v)
|
309
|
+
except:
|
310
|
+
pass
|
311
|
+
return llm
|
312
|
+
|
313
|
+
|
314
|
+
def invoke(self, prompts: str, output_formats: List[str | TaskOutputFormat], response_fields: List[ResponseField]) -> Dict[str, Any]:
|
319
315
|
"""
|
320
316
|
Receive the system prompt in string and create formatted prompts using the system prompt and the agent's backstory.
|
321
317
|
Then call the base model.
|
@@ -329,22 +325,13 @@ class Agent(BaseModel):
|
|
329
325
|
messages.append({"role": "assistant", "content": self.backstory})
|
330
326
|
self._logger.log(level="info", message=f"Messages sent to the model: {messages}", color="blue")
|
331
327
|
|
332
|
-
|
333
|
-
|
334
|
-
raw_response = self.llm.call(
|
335
|
-
messages=messages, output_formats=output_formats, field_list=response_fields, callbacks=callbacks
|
336
|
-
)
|
328
|
+
raw_response = self.llm.call(messages=messages, output_formats=output_formats, field_list=response_fields)
|
337
329
|
task_execution_counter += 1
|
338
330
|
self._logger.log(level="info", message=f"Agent's first response in {type(raw_response).__name__}: {raw_response}", color="blue")
|
339
331
|
|
340
332
|
if (raw_response is None or raw_response == "") and task_execution_counter < self.max_retry_limit:
|
341
333
|
while task_execution_counter <= self.max_retry_limit:
|
342
|
-
raw_response = self.llm.call(
|
343
|
-
messages=messages,
|
344
|
-
output_formats=output_formats,
|
345
|
-
field_list=response_fields,
|
346
|
-
callbacks=callbacks,
|
347
|
-
)
|
334
|
+
raw_response = self.llm.call(messages=messages, output_formats=output_formats, field_list=response_fields)
|
348
335
|
task_execution_counter += 1
|
349
336
|
self._logger.log(level="info", message=f"Agent's next response in {type(raw_response).__name__}: {raw_response}", color="blue")
|
350
337
|
|
@@ -411,3 +398,7 @@ class Agent(BaseModel):
|
|
411
398
|
self._rpm_controller.stop_rpm_counter()
|
412
399
|
|
413
400
|
return raw_response
|
401
|
+
|
402
|
+
|
403
|
+
def __repr__(self):
|
404
|
+
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
@@ -1,3 +1,93 @@
|
|
1
|
+
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
2
|
+
|
3
|
+
|
4
|
+
"""
|
5
|
+
List of models available on the framework.
|
6
|
+
Model names align with the LiteLLM's key names defined in the JSON URL.
|
7
|
+
Provider names align with the custom provider or model provider names.
|
8
|
+
-> model_key = custom_provider_name/model_name
|
9
|
+
"""
|
10
|
+
|
11
|
+
MODELS = {
|
12
|
+
"openai": [
|
13
|
+
"gpt-3.5-turbo",
|
14
|
+
"gpt-4",
|
15
|
+
"gpt-4o",
|
16
|
+
"gpt-4o-mini",
|
17
|
+
"o1-mini",
|
18
|
+
"o1-preview",
|
19
|
+
],
|
20
|
+
"gemini": [
|
21
|
+
"gemini/gemini-1.5-flash",
|
22
|
+
"gemini/gemini-1.5-pro",
|
23
|
+
"gemini/gemini-2.0-flash-exp",
|
24
|
+
"gemini/gemini-gemma-2-9b-it",
|
25
|
+
"gemini/gemini-gemma-2-27b-it",
|
26
|
+
],
|
27
|
+
"anthropic": [
|
28
|
+
"claude-3-5-sonnet-20241022",
|
29
|
+
"claude-3-5-sonnet-20240620",
|
30
|
+
"claude-3-sonnet-20240229",
|
31
|
+
"claude-3-opus-20240229",
|
32
|
+
"claude-3-haiku-20240307",
|
33
|
+
],
|
34
|
+
"ollama": [
|
35
|
+
"ollama/llama3.1",
|
36
|
+
"ollama/mixtral",
|
37
|
+
],
|
38
|
+
# "watson": [
|
39
|
+
# "watsonx/meta-llama/llama-3-1-70b-instruct",
|
40
|
+
# "watsonx/meta-llama/llama-3-1-8b-instruct",
|
41
|
+
# "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
42
|
+
# "watsonx/meta-llama/llama-3-2-1b-instruct",
|
43
|
+
# "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
44
|
+
# "watsonx/meta-llama/llama-3-405b-instruct",
|
45
|
+
# "watsonx/mistral/mistral-large",
|
46
|
+
# "watsonx/ibm/granite-3-8b-instruct",
|
47
|
+
# ],
|
48
|
+
# "bedrock": [
|
49
|
+
# "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
50
|
+
# "bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
51
|
+
# "bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
52
|
+
# "bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
53
|
+
# "bedrock/anthropic.claude-v2:1",
|
54
|
+
# "bedrock/anthropic.claude-v2",
|
55
|
+
# "bedrock/anthropic.claude-instant-v1",
|
56
|
+
# "bedrock/meta.llama3-1-405b-instruct-v1:0",
|
57
|
+
# "bedrock/meta.llama3-1-70b-instruct-v1:0",
|
58
|
+
# "bedrock/meta.llama3-1-8b-instruct-v1:0",
|
59
|
+
# "bedrock/meta.llama3-70b-instruct-v1:0",
|
60
|
+
# "bedrock/meta.llama3-8b-instruct-v1:0",
|
61
|
+
# "bedrock/amazon.titan-text-lite-v1",
|
62
|
+
# "bedrock/amazon.titan-text-express-v1",
|
63
|
+
# "bedrock/cohere.command-text-v14",
|
64
|
+
# "bedrock/ai21.j2-mid-v1",
|
65
|
+
# "bedrock/ai21.j2-ultra-v1",
|
66
|
+
# "bedrock/ai21.jamba-instruct-v1:0",
|
67
|
+
# "bedrock/meta.llama2-13b-chat-v1",
|
68
|
+
# "bedrock/meta.llama2-70b-chat-v1",
|
69
|
+
# "bedrock/mistral.mistral-7b-instruct-v0:2",
|
70
|
+
# "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
71
|
+
# ],
|
72
|
+
}
|
73
|
+
|
74
|
+
|
75
|
+
PROVIDERS = [
|
76
|
+
"openai",
|
77
|
+
"anthropic",
|
78
|
+
"gemini",
|
79
|
+
"ollama",
|
80
|
+
"watson",
|
81
|
+
"bedrock",
|
82
|
+
"azure",
|
83
|
+
"cerebras",
|
84
|
+
"llama",
|
85
|
+
]
|
86
|
+
|
87
|
+
|
88
|
+
"""
|
89
|
+
Max input token size by the model.
|
90
|
+
"""
|
1
91
|
LLM_CONTEXT_WINDOW_SIZES = {
|
2
92
|
"gpt-3.5-turbo": 8192,
|
3
93
|
"gpt-4": 8192,
|
@@ -6,6 +96,19 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
6
96
|
"gpt-4-turbo": 128000,
|
7
97
|
"o1-preview": 128000,
|
8
98
|
"o1-mini": 128000,
|
99
|
+
|
100
|
+
"gemini/gemini-1.5-flash": 1048576,
|
101
|
+
"gemini/gemini-1.5-pro": 2097152,
|
102
|
+
"gemini/gemini-2.0-flash-exp": 1048576,
|
103
|
+
"gemini/gemini-gemma-2-9b-it": 8192,
|
104
|
+
"gemini/gemini-gemma-2-27b-it": 8192,
|
105
|
+
|
106
|
+
"claude-3-5-sonnet-20241022": 200000,
|
107
|
+
"claude-3-5-sonnet-20240620": 200000,
|
108
|
+
"claude-3-sonnet-20240229": 200000,
|
109
|
+
"claude-3-opus-20240229": 200000,
|
110
|
+
"claude-3-haiku-20240307": 200000,
|
111
|
+
|
9
112
|
"deepseek-chat": 128000,
|
10
113
|
"gemma2-9b-it": 8192,
|
11
114
|
"gemma-7b-it": 8192,
|
@@ -20,8 +123,21 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
|
20
123
|
"llama3-70b-8192": 8192,
|
21
124
|
"llama3-8b-8192": 8192,
|
22
125
|
"mixtral-8x7b-32768": 32768,
|
126
|
+
"claude-3-5-sonnet-2024102": 200000,
|
127
|
+
}
|
128
|
+
|
129
|
+
|
130
|
+
LLM_API_KEY_NAMES = {
|
131
|
+
"openai": "OPENAI_API_KEY",
|
132
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
133
|
+
"gemini": "GEMINI_API_KEY",
|
23
134
|
}
|
24
135
|
|
136
|
+
LLM_BASE_URL_KEY_NAMES = {
|
137
|
+
"openai": "OPENAI_API_BASE",
|
138
|
+
"anthropic": "ANTHROPIC_API_BASE",
|
139
|
+
"gemini": "GEMINI_API_BASE",
|
140
|
+
}
|
25
141
|
|
26
142
|
LLM_VARS = {
|
27
143
|
"openai": [
|
@@ -107,67 +223,39 @@ LLM_VARS = {
|
|
107
223
|
}
|
108
224
|
|
109
225
|
|
110
|
-
PROVIDERS = [
|
111
|
-
"openai",
|
112
|
-
"anthropic",
|
113
|
-
"gemini",
|
114
|
-
"ollama",
|
115
|
-
"watson",
|
116
|
-
"bedrock",
|
117
|
-
"azure",
|
118
|
-
"cerebras",
|
119
|
-
"llama",
|
120
|
-
]
|
121
226
|
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
"claude-3-5-sonnet-20240620",
|
126
|
-
"claude-3-sonnet-20240229",
|
127
|
-
"claude-3-opus-20240229",
|
128
|
-
"claude-3-haiku-20240307",
|
129
|
-
],
|
130
|
-
"gemini": [
|
131
|
-
"gemini/gemini-1.5-flash",
|
132
|
-
"gemini/gemini-1.5-pro",
|
133
|
-
"gemini/gemini-gemma-2-9b-it",
|
134
|
-
"gemini/gemini-gemma-2-27b-it",
|
135
|
-
],
|
136
|
-
"ollama": ["ollama/llama3.1", "ollama/mixtral"],
|
137
|
-
"watson": [
|
138
|
-
"watsonx/meta-llama/llama-3-1-70b-instruct",
|
139
|
-
"watsonx/meta-llama/llama-3-1-8b-instruct",
|
140
|
-
"watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
141
|
-
"watsonx/meta-llama/llama-3-2-1b-instruct",
|
142
|
-
"watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
143
|
-
"watsonx/meta-llama/llama-3-405b-instruct",
|
144
|
-
"watsonx/mistral/mistral-large",
|
145
|
-
"watsonx/ibm/granite-3-8b-instruct",
|
146
|
-
],
|
147
|
-
"bedrock": [
|
148
|
-
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
149
|
-
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
150
|
-
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
151
|
-
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
152
|
-
"bedrock/anthropic.claude-v2:1",
|
153
|
-
"bedrock/anthropic.claude-v2",
|
154
|
-
"bedrock/anthropic.claude-instant-v1",
|
155
|
-
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
156
|
-
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
157
|
-
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
158
|
-
"bedrock/meta.llama3-70b-instruct-v1:0",
|
159
|
-
"bedrock/meta.llama3-8b-instruct-v1:0",
|
160
|
-
"bedrock/amazon.titan-text-lite-v1",
|
161
|
-
"bedrock/amazon.titan-text-express-v1",
|
162
|
-
"bedrock/cohere.command-text-v14",
|
163
|
-
"bedrock/ai21.j2-mid-v1",
|
164
|
-
"bedrock/ai21.j2-ultra-v1",
|
165
|
-
"bedrock/ai21.jamba-instruct-v1:0",
|
166
|
-
"bedrock/meta.llama2-13b-chat-v1",
|
167
|
-
"bedrock/meta.llama2-70b-chat-v1",
|
168
|
-
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
169
|
-
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
170
|
-
],
|
171
|
-
}
|
227
|
+
"""
|
228
|
+
Params for litellm.completion() func
|
229
|
+
"""
|
172
230
|
|
173
|
-
|
231
|
+
LITELLM_COMPLETION_KEYS = [
|
232
|
+
"model",
|
233
|
+
"messages",
|
234
|
+
"timeout",
|
235
|
+
"temperature", "top_p",
|
236
|
+
"n",
|
237
|
+
"stream"
|
238
|
+
"stream_options"
|
239
|
+
"stop",
|
240
|
+
"max_compl,etion_tokens"
|
241
|
+
"max_tokens",
|
242
|
+
"modalities",
|
243
|
+
"prediction",
|
244
|
+
"audio",
|
245
|
+
"presen,ce_penalty"
|
246
|
+
"frequency_penalty,"
|
247
|
+
"logit_bias",
|
248
|
+
"user",
|
249
|
+
"response_format",
|
250
|
+
"seed",
|
251
|
+
"tools,"
|
252
|
+
"tool_choice"
|
253
|
+
"logprobs",
|
254
|
+
"top_logpr,obs"
|
255
|
+
"parallel_tool_calls"
|
256
|
+
"extra_headers",
|
257
|
+
"base_url",
|
258
|
+
"api_versi,on"
|
259
|
+
"api_key",
|
260
|
+
"model_list"
|
261
|
+
]
|