agno 2.2.4__py3-none-any.whl → 2.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +82 -19
- agno/culture/manager.py +3 -4
- agno/knowledge/chunking/agentic.py +6 -2
- agno/memory/manager.py +9 -4
- agno/models/anthropic/claude.py +1 -2
- agno/models/azure/ai_foundry.py +31 -14
- agno/models/azure/openai_chat.py +12 -4
- agno/models/base.py +44 -11
- agno/models/cerebras/cerebras.py +11 -6
- agno/models/groq/groq.py +7 -4
- agno/models/meta/llama.py +12 -6
- agno/models/meta/llama_openai.py +5 -1
- agno/models/openai/chat.py +20 -12
- agno/models/openai/responses.py +10 -5
- agno/models/utils.py +254 -8
- agno/models/vertexai/claude.py +9 -13
- agno/os/app.py +48 -21
- agno/os/routers/evals/evals.py +8 -8
- agno/os/routers/evals/utils.py +1 -0
- agno/os/schema.py +48 -33
- agno/os/utils.py +27 -0
- agno/run/agent.py +5 -0
- agno/run/team.py +2 -0
- agno/run/workflow.py +39 -0
- agno/session/summary.py +8 -2
- agno/session/workflow.py +4 -3
- agno/team/team.py +50 -14
- agno/tools/file.py +153 -25
- agno/tools/function.py +5 -1
- agno/tools/notion.py +201 -0
- agno/utils/events.py +2 -0
- agno/utils/print_response/workflow.py +115 -16
- agno/vectordb/milvus/milvus.py +5 -0
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +298 -0
- agno/workflow/workflow.py +929 -64
- {agno-2.2.4.dist-info → agno-2.2.6.dist-info}/METADATA +4 -1
- {agno-2.2.4.dist-info → agno-2.2.6.dist-info}/RECORD +41 -39
- {agno-2.2.4.dist-info → agno-2.2.6.dist-info}/WHEEL +0 -0
- {agno-2.2.4.dist-info → agno-2.2.6.dist-info}/licenses/LICENSE +0 -0
- {agno-2.2.4.dist-info → agno-2.2.6.dist-info}/top_level.txt +0 -0
agno/models/cerebras/cerebras.py
CHANGED
|
@@ -63,7 +63,7 @@ class Cerebras(Model):
|
|
|
63
63
|
max_retries: Optional[int] = None
|
|
64
64
|
default_headers: Optional[Any] = None
|
|
65
65
|
default_query: Optional[Any] = None
|
|
66
|
-
http_client: Optional[httpx.Client] = None
|
|
66
|
+
http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
|
|
67
67
|
client_params: Optional[Dict[str, Any]] = None
|
|
68
68
|
|
|
69
69
|
# Cerebras clients
|
|
@@ -102,12 +102,15 @@ class Cerebras(Model):
|
|
|
102
102
|
Returns:
|
|
103
103
|
CerebrasClient: An instance of the Cerebras client.
|
|
104
104
|
"""
|
|
105
|
-
if self.client:
|
|
105
|
+
if self.client and not self.client.is_closed():
|
|
106
106
|
return self.client
|
|
107
107
|
|
|
108
108
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
109
|
-
if self.http_client
|
|
110
|
-
|
|
109
|
+
if self.http_client:
|
|
110
|
+
if isinstance(self.http_client, httpx.Client):
|
|
111
|
+
client_params["http_client"] = self.http_client
|
|
112
|
+
else:
|
|
113
|
+
log_debug("http_client is not an instance of httpx.Client.")
|
|
111
114
|
self.client = CerebrasClient(**client_params)
|
|
112
115
|
return self.client
|
|
113
116
|
|
|
@@ -118,13 +121,15 @@ class Cerebras(Model):
|
|
|
118
121
|
Returns:
|
|
119
122
|
AsyncCerebras: An instance of the asynchronous Cerebras client.
|
|
120
123
|
"""
|
|
121
|
-
if self.async_client:
|
|
124
|
+
if self.async_client and not self.async_client.is_closed():
|
|
122
125
|
return self.async_client
|
|
123
126
|
|
|
124
127
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
125
|
-
if self.http_client:
|
|
128
|
+
if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
|
|
126
129
|
client_params["http_client"] = self.http_client
|
|
127
130
|
else:
|
|
131
|
+
if self.http_client:
|
|
132
|
+
log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
|
|
128
133
|
# Create a new async HTTP client with custom limits
|
|
129
134
|
client_params["http_client"] = httpx.AsyncClient(
|
|
130
135
|
limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
agno/models/groq/groq.py
CHANGED
|
@@ -61,7 +61,7 @@ class Groq(Model):
|
|
|
61
61
|
max_retries: Optional[int] = None
|
|
62
62
|
default_headers: Optional[Any] = None
|
|
63
63
|
default_query: Optional[Any] = None
|
|
64
|
-
http_client: Optional[httpx.Client] = None
|
|
64
|
+
http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
|
|
65
65
|
client_params: Optional[Dict[str, Any]] = None
|
|
66
66
|
|
|
67
67
|
# Groq clients
|
|
@@ -115,18 +115,21 @@ class Groq(Model):
|
|
|
115
115
|
Returns:
|
|
116
116
|
AsyncGroqClient: An instance of the asynchronous Groq client.
|
|
117
117
|
"""
|
|
118
|
-
if self.async_client:
|
|
118
|
+
if self.async_client and not self.async_client.is_closed():
|
|
119
119
|
return self.async_client
|
|
120
120
|
|
|
121
121
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
122
|
-
if self.http_client:
|
|
122
|
+
if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
|
|
123
123
|
client_params["http_client"] = self.http_client
|
|
124
124
|
else:
|
|
125
|
+
if self.http_client:
|
|
126
|
+
log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
|
|
125
127
|
# Create a new async HTTP client with custom limits
|
|
126
128
|
client_params["http_client"] = httpx.AsyncClient(
|
|
127
129
|
limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
|
128
130
|
)
|
|
129
|
-
|
|
131
|
+
self.async_client = AsyncGroqClient(**client_params)
|
|
132
|
+
return self.async_client
|
|
130
133
|
|
|
131
134
|
def get_request_params(
|
|
132
135
|
self,
|
agno/models/meta/llama.py
CHANGED
|
@@ -61,7 +61,7 @@ class Llama(Model):
|
|
|
61
61
|
max_retries: Optional[int] = None
|
|
62
62
|
default_headers: Optional[Any] = None
|
|
63
63
|
default_query: Optional[Any] = None
|
|
64
|
-
http_client: Optional[httpx.Client] = None
|
|
64
|
+
http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
|
|
65
65
|
client_params: Optional[Dict[str, Any]] = None
|
|
66
66
|
|
|
67
67
|
# OpenAI clients
|
|
@@ -104,8 +104,11 @@ class Llama(Model):
|
|
|
104
104
|
return self.client
|
|
105
105
|
|
|
106
106
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
107
|
-
if self.http_client
|
|
108
|
-
|
|
107
|
+
if self.http_client:
|
|
108
|
+
if isinstance(self.http_client, httpx.Client):
|
|
109
|
+
client_params["http_client"] = self.http_client
|
|
110
|
+
else:
|
|
111
|
+
log_debug("http_client is not an instance of httpx.Client.")
|
|
109
112
|
self.client = LlamaAPIClient(**client_params)
|
|
110
113
|
return self.client
|
|
111
114
|
|
|
@@ -116,18 +119,21 @@ class Llama(Model):
|
|
|
116
119
|
Returns:
|
|
117
120
|
AsyncLlamaAPIClient: An instance of the asynchronous Llama client.
|
|
118
121
|
"""
|
|
119
|
-
if self.async_client:
|
|
122
|
+
if self.async_client and not self.async_client.is_closed():
|
|
120
123
|
return self.async_client
|
|
121
124
|
|
|
122
125
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
123
|
-
if self.http_client:
|
|
126
|
+
if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
|
|
124
127
|
client_params["http_client"] = self.http_client
|
|
125
128
|
else:
|
|
129
|
+
if self.http_client:
|
|
130
|
+
log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
|
|
126
131
|
# Create a new async HTTP client with custom limits
|
|
127
132
|
client_params["http_client"] = httpx.AsyncClient(
|
|
128
133
|
limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
|
129
134
|
)
|
|
130
|
-
|
|
135
|
+
self.async_client = AsyncLlamaAPIClient(**client_params)
|
|
136
|
+
return self.async_client
|
|
131
137
|
|
|
132
138
|
def get_request_params(
|
|
133
139
|
self,
|
agno/models/meta/llama_openai.py
CHANGED
|
@@ -62,6 +62,9 @@ class LlamaOpenAI(OpenAILike):
|
|
|
62
62
|
|
|
63
63
|
def get_async_client(self):
|
|
64
64
|
"""Override to provide custom httpx client that properly handles redirects"""
|
|
65
|
+
if self.async_client and not self.async_client.is_closed():
|
|
66
|
+
return self.async_client
|
|
67
|
+
|
|
65
68
|
client_params = self._get_client_params()
|
|
66
69
|
|
|
67
70
|
# Llama gives a 307 redirect error, so we need to set up a custom client to allow redirects
|
|
@@ -71,4 +74,5 @@ class LlamaOpenAI(OpenAILike):
|
|
|
71
74
|
timeout=httpx.Timeout(30.0),
|
|
72
75
|
)
|
|
73
76
|
|
|
74
|
-
|
|
77
|
+
self.async_client = AsyncOpenAIClient(**client_params)
|
|
78
|
+
return self.async_client
|
agno/models/openai/chat.py
CHANGED
|
@@ -81,6 +81,10 @@ class OpenAIChat(Model):
|
|
|
81
81
|
http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
|
|
82
82
|
client_params: Optional[Dict[str, Any]] = None
|
|
83
83
|
|
|
84
|
+
# OpenAI clients
|
|
85
|
+
client: Optional[OpenAIClient] = None
|
|
86
|
+
async_client: Optional[AsyncOpenAIClient] = None
|
|
87
|
+
|
|
84
88
|
# The role to map the message role to.
|
|
85
89
|
default_role_map = {
|
|
86
90
|
"system": "developer",
|
|
@@ -123,13 +127,18 @@ class OpenAIChat(Model):
|
|
|
123
127
|
Returns:
|
|
124
128
|
OpenAIClient: An instance of the OpenAI client.
|
|
125
129
|
"""
|
|
130
|
+
if self.client and not self.client.is_closed():
|
|
131
|
+
return self.client
|
|
132
|
+
|
|
126
133
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
127
134
|
if self.http_client:
|
|
128
135
|
if isinstance(self.http_client, httpx.Client):
|
|
129
136
|
client_params["http_client"] = self.http_client
|
|
130
137
|
else:
|
|
131
|
-
|
|
132
|
-
|
|
138
|
+
log_debug("http_client is not an instance of httpx.Client.")
|
|
139
|
+
|
|
140
|
+
self.client = OpenAIClient(**client_params)
|
|
141
|
+
return self.client
|
|
133
142
|
|
|
134
143
|
def get_async_client(self) -> AsyncOpenAIClient:
|
|
135
144
|
"""
|
|
@@ -138,22 +147,21 @@ class OpenAIChat(Model):
|
|
|
138
147
|
Returns:
|
|
139
148
|
AsyncOpenAIClient: An instance of the asynchronous OpenAI client.
|
|
140
149
|
"""
|
|
150
|
+
if self.async_client and not self.async_client.is_closed():
|
|
151
|
+
return self.async_client
|
|
152
|
+
|
|
141
153
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
142
|
-
if self.http_client:
|
|
143
|
-
|
|
144
|
-
client_params["http_client"] = self.http_client
|
|
145
|
-
else:
|
|
146
|
-
log_warning("http_client is not an instance of httpx.AsyncClient. Using default httpx.AsyncClient.")
|
|
147
|
-
# Create a new async HTTP client with custom limits
|
|
148
|
-
client_params["http_client"] = httpx.AsyncClient(
|
|
149
|
-
limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
|
150
|
-
)
|
|
154
|
+
if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
|
|
155
|
+
client_params["http_client"] = self.http_client
|
|
151
156
|
else:
|
|
157
|
+
if self.http_client:
|
|
158
|
+
log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
|
|
152
159
|
# Create a new async HTTP client with custom limits
|
|
153
160
|
client_params["http_client"] = httpx.AsyncClient(
|
|
154
161
|
limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
|
155
162
|
)
|
|
156
|
-
|
|
163
|
+
self.async_client = AsyncOpenAIClient(**client_params)
|
|
164
|
+
return self.async_client
|
|
157
165
|
|
|
158
166
|
def get_request_params(
|
|
159
167
|
self,
|
agno/models/openai/responses.py
CHANGED
|
@@ -66,7 +66,7 @@ class OpenAIResponses(Model):
|
|
|
66
66
|
max_retries: Optional[int] = None
|
|
67
67
|
default_headers: Optional[Dict[str, str]] = None
|
|
68
68
|
default_query: Optional[Dict[str, str]] = None
|
|
69
|
-
http_client: Optional[httpx.Client] = None
|
|
69
|
+
http_client: Optional[Union[httpx.Client, httpx.AsyncClient]] = None
|
|
70
70
|
client_params: Optional[Dict[str, Any]] = None
|
|
71
71
|
|
|
72
72
|
# Parameters affecting built-in tools
|
|
@@ -148,8 +148,11 @@ class OpenAIResponses(Model):
|
|
|
148
148
|
return self.client
|
|
149
149
|
|
|
150
150
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
151
|
-
if self.http_client
|
|
152
|
-
|
|
151
|
+
if self.http_client:
|
|
152
|
+
if isinstance(self.http_client, httpx.Client):
|
|
153
|
+
client_params["http_client"] = self.http_client
|
|
154
|
+
else:
|
|
155
|
+
log_debug("http_client is not an instance of httpx.Client.")
|
|
153
156
|
|
|
154
157
|
self.client = OpenAI(**client_params)
|
|
155
158
|
return self.client
|
|
@@ -161,13 +164,15 @@ class OpenAIResponses(Model):
|
|
|
161
164
|
Returns:
|
|
162
165
|
AsyncOpenAI: An instance of the asynchronous OpenAI client.
|
|
163
166
|
"""
|
|
164
|
-
if self.async_client:
|
|
167
|
+
if self.async_client and not self.async_client.is_closed():
|
|
165
168
|
return self.async_client
|
|
166
169
|
|
|
167
170
|
client_params: Dict[str, Any] = self._get_client_params()
|
|
168
|
-
if self.http_client:
|
|
171
|
+
if self.http_client and isinstance(self.http_client, httpx.AsyncClient):
|
|
169
172
|
client_params["http_client"] = self.http_client
|
|
170
173
|
else:
|
|
174
|
+
if self.http_client:
|
|
175
|
+
log_debug("The current http_client is not async. A default httpx.AsyncClient will be used instead.")
|
|
171
176
|
# Create a new async HTTP client with custom limits
|
|
172
177
|
client_params["http_client"] = httpx.AsyncClient(
|
|
173
178
|
limits=httpx.Limits(max_connections=1000, max_keepalive_connections=100)
|
agno/models/utils.py
CHANGED
|
@@ -1,20 +1,266 @@
|
|
|
1
|
+
from typing import Optional, Union
|
|
2
|
+
|
|
1
3
|
from agno.models.base import Model
|
|
2
4
|
|
|
3
5
|
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
6
|
+
def _get_model_class(model_id: str, model_provider: str) -> Model:
|
|
7
|
+
if model_provider == "aimlapi":
|
|
8
|
+
from agno.models.aimlapi import AIMLAPI
|
|
9
|
+
|
|
10
|
+
return AIMLAPI(id=model_id)
|
|
9
11
|
|
|
10
|
-
return OpenAIChat(id=model_id)
|
|
11
12
|
elif model_provider == "anthropic":
|
|
12
13
|
from agno.models.anthropic import Claude
|
|
13
14
|
|
|
14
15
|
return Claude(id=model_id)
|
|
15
|
-
|
|
16
|
+
|
|
17
|
+
elif model_provider == "aws-bedrock":
|
|
18
|
+
from agno.models.aws import AwsBedrock
|
|
19
|
+
|
|
20
|
+
return AwsBedrock(id=model_id)
|
|
21
|
+
|
|
22
|
+
elif model_provider == "aws-claude":
|
|
23
|
+
from agno.models.aws import Claude as AWSClaude
|
|
24
|
+
|
|
25
|
+
return AWSClaude(id=model_id)
|
|
26
|
+
|
|
27
|
+
elif model_provider == "azure-ai-foundry":
|
|
28
|
+
from agno.models.azure import AzureAIFoundry
|
|
29
|
+
|
|
30
|
+
return AzureAIFoundry(id=model_id)
|
|
31
|
+
|
|
32
|
+
elif model_provider == "azure-openai":
|
|
33
|
+
from agno.models.azure import AzureOpenAI
|
|
34
|
+
|
|
35
|
+
return AzureOpenAI(id=model_id)
|
|
36
|
+
|
|
37
|
+
elif model_provider == "cerebras":
|
|
38
|
+
from agno.models.cerebras import Cerebras
|
|
39
|
+
|
|
40
|
+
return Cerebras(id=model_id)
|
|
41
|
+
|
|
42
|
+
elif model_provider == "cerebras-openai":
|
|
43
|
+
from agno.models.cerebras import CerebrasOpenAI
|
|
44
|
+
|
|
45
|
+
return CerebrasOpenAI(id=model_id)
|
|
46
|
+
|
|
47
|
+
elif model_provider == "cohere":
|
|
48
|
+
from agno.models.cohere import Cohere
|
|
49
|
+
|
|
50
|
+
return Cohere(id=model_id)
|
|
51
|
+
|
|
52
|
+
elif model_provider == "cometapi":
|
|
53
|
+
from agno.models.cometapi import CometAPI
|
|
54
|
+
|
|
55
|
+
return CometAPI(id=model_id)
|
|
56
|
+
|
|
57
|
+
elif model_provider == "dashscope":
|
|
58
|
+
from agno.models.dashscope import DashScope
|
|
59
|
+
|
|
60
|
+
return DashScope(id=model_id)
|
|
61
|
+
|
|
62
|
+
elif model_provider == "deepinfra":
|
|
63
|
+
from agno.models.deepinfra import DeepInfra
|
|
64
|
+
|
|
65
|
+
return DeepInfra(id=model_id)
|
|
66
|
+
|
|
67
|
+
elif model_provider == "deepseek":
|
|
68
|
+
from agno.models.deepseek import DeepSeek
|
|
69
|
+
|
|
70
|
+
return DeepSeek(id=model_id)
|
|
71
|
+
|
|
72
|
+
elif model_provider == "fireworks":
|
|
73
|
+
from agno.models.fireworks import Fireworks
|
|
74
|
+
|
|
75
|
+
return Fireworks(id=model_id)
|
|
76
|
+
|
|
77
|
+
elif model_provider == "google":
|
|
16
78
|
from agno.models.google import Gemini
|
|
17
79
|
|
|
18
80
|
return Gemini(id=model_id)
|
|
81
|
+
|
|
82
|
+
elif model_provider == "groq":
|
|
83
|
+
from agno.models.groq import Groq
|
|
84
|
+
|
|
85
|
+
return Groq(id=model_id)
|
|
86
|
+
|
|
87
|
+
elif model_provider == "huggingface":
|
|
88
|
+
from agno.models.huggingface import HuggingFace
|
|
89
|
+
|
|
90
|
+
return HuggingFace(id=model_id)
|
|
91
|
+
|
|
92
|
+
elif model_provider == "ibm":
|
|
93
|
+
from agno.models.ibm import WatsonX
|
|
94
|
+
|
|
95
|
+
return WatsonX(id=model_id)
|
|
96
|
+
|
|
97
|
+
elif model_provider == "internlm":
|
|
98
|
+
from agno.models.internlm import InternLM
|
|
99
|
+
|
|
100
|
+
return InternLM(id=model_id)
|
|
101
|
+
|
|
102
|
+
elif model_provider == "langdb":
|
|
103
|
+
from agno.models.langdb import LangDB
|
|
104
|
+
|
|
105
|
+
return LangDB(id=model_id)
|
|
106
|
+
|
|
107
|
+
elif model_provider == "litellm":
|
|
108
|
+
from agno.models.litellm import LiteLLM
|
|
109
|
+
|
|
110
|
+
return LiteLLM(id=model_id)
|
|
111
|
+
|
|
112
|
+
elif model_provider == "litellm-openai":
|
|
113
|
+
from agno.models.litellm import LiteLLMOpenAI
|
|
114
|
+
|
|
115
|
+
return LiteLLMOpenAI(id=model_id)
|
|
116
|
+
|
|
117
|
+
elif model_provider == "llama-cpp":
|
|
118
|
+
from agno.models.llama_cpp import LlamaCpp
|
|
119
|
+
|
|
120
|
+
return LlamaCpp(id=model_id)
|
|
121
|
+
|
|
122
|
+
elif model_provider == "llama-openai":
|
|
123
|
+
from agno.models.meta import LlamaOpenAI
|
|
124
|
+
|
|
125
|
+
return LlamaOpenAI(id=model_id)
|
|
126
|
+
|
|
127
|
+
elif model_provider == "lmstudio":
|
|
128
|
+
from agno.models.lmstudio import LMStudio
|
|
129
|
+
|
|
130
|
+
return LMStudio(id=model_id)
|
|
131
|
+
|
|
132
|
+
elif model_provider == "meta":
|
|
133
|
+
from agno.models.meta import Llama
|
|
134
|
+
|
|
135
|
+
return Llama(id=model_id)
|
|
136
|
+
|
|
137
|
+
elif model_provider == "mistral":
|
|
138
|
+
from agno.models.mistral import MistralChat
|
|
139
|
+
|
|
140
|
+
return MistralChat(id=model_id)
|
|
141
|
+
|
|
142
|
+
elif model_provider == "nebius":
|
|
143
|
+
from agno.models.nebius import Nebius
|
|
144
|
+
|
|
145
|
+
return Nebius(id=model_id)
|
|
146
|
+
|
|
147
|
+
elif model_provider == "nexus":
|
|
148
|
+
from agno.models.nexus import Nexus
|
|
149
|
+
|
|
150
|
+
return Nexus(id=model_id)
|
|
151
|
+
|
|
152
|
+
elif model_provider == "nvidia":
|
|
153
|
+
from agno.models.nvidia import Nvidia
|
|
154
|
+
|
|
155
|
+
return Nvidia(id=model_id)
|
|
156
|
+
|
|
157
|
+
elif model_provider == "ollama":
|
|
158
|
+
from agno.models.ollama import Ollama
|
|
159
|
+
|
|
160
|
+
return Ollama(id=model_id)
|
|
161
|
+
|
|
162
|
+
elif model_provider == "openai":
|
|
163
|
+
from agno.models.openai import OpenAIChat
|
|
164
|
+
|
|
165
|
+
return OpenAIChat(id=model_id)
|
|
166
|
+
|
|
167
|
+
elif model_provider == "openai-responses":
|
|
168
|
+
from agno.models.openai import OpenAIResponses
|
|
169
|
+
|
|
170
|
+
return OpenAIResponses(id=model_id)
|
|
171
|
+
|
|
172
|
+
elif model_provider == "openrouter":
|
|
173
|
+
from agno.models.openrouter import OpenRouter
|
|
174
|
+
|
|
175
|
+
return OpenRouter(id=model_id)
|
|
176
|
+
|
|
177
|
+
elif model_provider == "perplexity":
|
|
178
|
+
from agno.models.perplexity import Perplexity
|
|
179
|
+
|
|
180
|
+
return Perplexity(id=model_id)
|
|
181
|
+
|
|
182
|
+
elif model_provider == "portkey":
|
|
183
|
+
from agno.models.portkey import Portkey
|
|
184
|
+
|
|
185
|
+
return Portkey(id=model_id)
|
|
186
|
+
|
|
187
|
+
elif model_provider == "requesty":
|
|
188
|
+
from agno.models.requesty import Requesty
|
|
189
|
+
|
|
190
|
+
return Requesty(id=model_id)
|
|
191
|
+
|
|
192
|
+
elif model_provider == "sambanova":
|
|
193
|
+
from agno.models.sambanova import Sambanova
|
|
194
|
+
|
|
195
|
+
return Sambanova(id=model_id)
|
|
196
|
+
|
|
197
|
+
elif model_provider == "siliconflow":
|
|
198
|
+
from agno.models.siliconflow import Siliconflow
|
|
199
|
+
|
|
200
|
+
return Siliconflow(id=model_id)
|
|
201
|
+
|
|
202
|
+
elif model_provider == "together":
|
|
203
|
+
from agno.models.together import Together
|
|
204
|
+
|
|
205
|
+
return Together(id=model_id)
|
|
206
|
+
|
|
207
|
+
elif model_provider == "vercel":
|
|
208
|
+
from agno.models.vercel import V0
|
|
209
|
+
|
|
210
|
+
return V0(id=model_id)
|
|
211
|
+
|
|
212
|
+
elif model_provider == "vertexai-claude":
|
|
213
|
+
from agno.models.vertexai.claude import Claude as VertexAIClaude
|
|
214
|
+
|
|
215
|
+
return VertexAIClaude(id=model_id)
|
|
216
|
+
|
|
217
|
+
elif model_provider == "vllm":
|
|
218
|
+
from agno.models.vllm import VLLM
|
|
219
|
+
|
|
220
|
+
return VLLM(id=model_id)
|
|
221
|
+
|
|
222
|
+
elif model_provider == "xai":
|
|
223
|
+
from agno.models.xai import xAI
|
|
224
|
+
|
|
225
|
+
return xAI(id=model_id)
|
|
226
|
+
|
|
227
|
+
else:
|
|
228
|
+
raise ValueError(f"Model provider '{model_provider}' is not supported.")
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def _parse_model_string(model_string: str) -> Model:
|
|
232
|
+
if not model_string or not isinstance(model_string, str):
|
|
233
|
+
raise ValueError(f"Model string must be a non-empty string, got: {model_string}")
|
|
234
|
+
|
|
235
|
+
if ":" not in model_string:
|
|
236
|
+
raise ValueError(
|
|
237
|
+
f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
parts = model_string.split(":", 1)
|
|
241
|
+
if len(parts) != 2:
|
|
242
|
+
raise ValueError(
|
|
243
|
+
f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
model_provider, model_id = parts
|
|
247
|
+
model_provider = model_provider.strip().lower()
|
|
248
|
+
model_id = model_id.strip()
|
|
249
|
+
|
|
250
|
+
if not model_provider or not model_id:
|
|
251
|
+
raise ValueError(
|
|
252
|
+
f"Invalid model string format: '{model_string}'. Model strings should be in format '<provider>:<model_id>' e.g. 'openai:gpt-4o'"
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
return _get_model_class(model_id, model_provider)
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def get_model(model: Union[Model, str, None]) -> Optional[Model]:
|
|
259
|
+
if model is None:
|
|
260
|
+
return None
|
|
261
|
+
elif isinstance(model, Model):
|
|
262
|
+
return model
|
|
263
|
+
elif isinstance(model, str):
|
|
264
|
+
return _parse_model_string(model)
|
|
19
265
|
else:
|
|
20
|
-
raise ValueError(
|
|
266
|
+
raise ValueError("Model must be a Model instance, string, or None")
|
agno/models/vertexai/claude.py
CHANGED
|
@@ -5,10 +5,7 @@ from typing import Any, Dict, Optional
|
|
|
5
5
|
from agno.models.anthropic import Claude as AnthropicClaude
|
|
6
6
|
|
|
7
7
|
try:
|
|
8
|
-
from anthropic import AnthropicVertex
|
|
9
|
-
from anthropic import (
|
|
10
|
-
AsyncAnthropicVertex as AsyncAnthropicClient,
|
|
11
|
-
)
|
|
8
|
+
from anthropic import AnthropicVertex, AsyncAnthropicVertex
|
|
12
9
|
except ImportError as e:
|
|
13
10
|
raise ImportError("`anthropic` not installed. Please install it with `pip install anthropic`") from e
|
|
14
11
|
|
|
@@ -25,15 +22,14 @@ class Claude(AnthropicClaude):
|
|
|
25
22
|
name: str = "Claude"
|
|
26
23
|
provider: str = "VertexAI"
|
|
27
24
|
|
|
25
|
+
client: Optional[AnthropicVertex] = None # type: ignore
|
|
26
|
+
async_client: Optional[AsyncAnthropicVertex] = None # type: ignore
|
|
27
|
+
|
|
28
28
|
# Client parameters
|
|
29
29
|
region: Optional[str] = None
|
|
30
30
|
project_id: Optional[str] = None
|
|
31
31
|
base_url: Optional[str] = None
|
|
32
32
|
|
|
33
|
-
# Anthropic clients
|
|
34
|
-
client: Optional[AnthropicClient] = None
|
|
35
|
-
async_client: Optional[AsyncAnthropicClient] = None
|
|
36
|
-
|
|
37
33
|
def _get_client_params(self) -> Dict[str, Any]:
|
|
38
34
|
client_params: Dict[str, Any] = {}
|
|
39
35
|
|
|
@@ -51,7 +47,7 @@ class Claude(AnthropicClaude):
|
|
|
51
47
|
client_params["default_headers"] = self.default_headers
|
|
52
48
|
return client_params
|
|
53
49
|
|
|
54
|
-
def get_client(self)
|
|
50
|
+
def get_client(self):
|
|
55
51
|
"""
|
|
56
52
|
Returns an instance of the Anthropic client.
|
|
57
53
|
"""
|
|
@@ -59,16 +55,16 @@ class Claude(AnthropicClaude):
|
|
|
59
55
|
return self.client
|
|
60
56
|
|
|
61
57
|
_client_params = self._get_client_params()
|
|
62
|
-
self.client =
|
|
58
|
+
self.client = AnthropicVertex(**_client_params)
|
|
63
59
|
return self.client
|
|
64
60
|
|
|
65
|
-
def get_async_client(self)
|
|
61
|
+
def get_async_client(self):
|
|
66
62
|
"""
|
|
67
63
|
Returns an instance of the async Anthropic client.
|
|
68
64
|
"""
|
|
69
|
-
if self.async_client:
|
|
65
|
+
if self.async_client and not self.async_client.is_closed():
|
|
70
66
|
return self.async_client
|
|
71
67
|
|
|
72
68
|
_client_params = self._get_client_params()
|
|
73
|
-
self.async_client =
|
|
69
|
+
self.async_client = AsyncAnthropicVertex(**_client_params)
|
|
74
70
|
return self.async_client
|
agno/os/app.py
CHANGED
|
@@ -224,13 +224,59 @@ class AgentOS:
|
|
|
224
224
|
except (ValueError, TypeError):
|
|
225
225
|
return lifespan
|
|
226
226
|
|
|
227
|
-
def resync(self) -> None:
|
|
227
|
+
def resync(self, app: FastAPI) -> None:
|
|
228
228
|
"""Resync the AgentOS to discover, initialize and configure: agents, teams, workflows, databases and knowledge bases."""
|
|
229
229
|
self._initialize_agents()
|
|
230
230
|
self._initialize_teams()
|
|
231
231
|
self._initialize_workflows()
|
|
232
232
|
self._auto_discover_databases()
|
|
233
233
|
self._auto_discover_knowledge_instances()
|
|
234
|
+
self._reprovision_routers(app=app)
|
|
235
|
+
|
|
236
|
+
def _reprovision_routers(self, app: FastAPI) -> None:
|
|
237
|
+
"""Re-provision all routes for the AgentOS."""
|
|
238
|
+
updated_routers = [
|
|
239
|
+
get_session_router(dbs=self.dbs),
|
|
240
|
+
get_memory_router(dbs=self.dbs),
|
|
241
|
+
get_eval_router(dbs=self.dbs, agents=self.agents, teams=self.teams),
|
|
242
|
+
get_metrics_router(dbs=self.dbs),
|
|
243
|
+
get_knowledge_router(knowledge_instances=self.knowledge_instances),
|
|
244
|
+
]
|
|
245
|
+
|
|
246
|
+
# Clear all previously existing routes
|
|
247
|
+
app.router.routes = []
|
|
248
|
+
|
|
249
|
+
# Add the updated routes
|
|
250
|
+
for router in updated_routers:
|
|
251
|
+
self._add_router(app, router)
|
|
252
|
+
|
|
253
|
+
# Add the built-in routes
|
|
254
|
+
self._add_built_in_routes(app=app)
|
|
255
|
+
|
|
256
|
+
def _add_built_in_routes(self, app: FastAPI) -> None:
|
|
257
|
+
"""Add all AgentOSbuilt-in routes to the given app."""
|
|
258
|
+
self._add_router(app, get_base_router(self, settings=self.settings))
|
|
259
|
+
self._add_router(app, get_websocket_router(self, settings=self.settings))
|
|
260
|
+
self._add_router(app, get_health_router())
|
|
261
|
+
self._add_router(app, get_home_router(self))
|
|
262
|
+
|
|
263
|
+
# Add A2A interface if relevant
|
|
264
|
+
has_a2a_interface = False
|
|
265
|
+
for interface in self.interfaces:
|
|
266
|
+
if not has_a2a_interface and interface.__class__.__name__ == "A2A":
|
|
267
|
+
has_a2a_interface = True
|
|
268
|
+
interface_router = interface.get_router()
|
|
269
|
+
self._add_router(app, interface_router)
|
|
270
|
+
if self.a2a_interface and not has_a2a_interface:
|
|
271
|
+
from agno.os.interfaces.a2a import A2A
|
|
272
|
+
|
|
273
|
+
a2a_interface = A2A(agents=self.agents, teams=self.teams, workflows=self.workflows)
|
|
274
|
+
self.interfaces.append(a2a_interface)
|
|
275
|
+
self._add_router(app, a2a_interface.get_router())
|
|
276
|
+
|
|
277
|
+
# Add the home router if MCP server is not enabled
|
|
278
|
+
if not self.enable_mcp_server:
|
|
279
|
+
self._add_router(app, get_home_router(self))
|
|
234
280
|
|
|
235
281
|
def _make_app(self, lifespan: Optional[Any] = None) -> FastAPI:
|
|
236
282
|
# Adjust the FastAPI app lifespan to handle MCP connections if relevant
|
|
@@ -381,26 +427,7 @@ class AgentOS:
|
|
|
381
427
|
|
|
382
428
|
fastapi_app = self._make_app(lifespan=wrapped_user_lifespan)
|
|
383
429
|
|
|
384
|
-
|
|
385
|
-
self._add_router(fastapi_app, get_base_router(self, settings=self.settings))
|
|
386
|
-
self._add_router(fastapi_app, get_websocket_router(self, settings=self.settings))
|
|
387
|
-
self._add_router(fastapi_app, get_health_router())
|
|
388
|
-
self._add_router(fastapi_app, get_home_router(self))
|
|
389
|
-
|
|
390
|
-
has_a2a_interface = False
|
|
391
|
-
for interface in self.interfaces:
|
|
392
|
-
if not has_a2a_interface and interface.__class__.__name__ == "A2A":
|
|
393
|
-
has_a2a_interface = True
|
|
394
|
-
interface_router = interface.get_router()
|
|
395
|
-
self._add_router(fastapi_app, interface_router)
|
|
396
|
-
|
|
397
|
-
# Add A2A interface if requested and not provided in self.interfaces
|
|
398
|
-
if self.a2a_interface and not has_a2a_interface:
|
|
399
|
-
from agno.os.interfaces.a2a import A2A
|
|
400
|
-
|
|
401
|
-
a2a_interface = A2A(agents=self.agents, teams=self.teams, workflows=self.workflows)
|
|
402
|
-
self.interfaces.append(a2a_interface)
|
|
403
|
-
self._add_router(fastapi_app, a2a_interface.get_router())
|
|
430
|
+
self._add_built_in_routes(app=fastapi_app)
|
|
404
431
|
|
|
405
432
|
self._auto_discover_databases()
|
|
406
433
|
self._auto_discover_knowledge_instances()
|