xpander-sdk 1.60.4__py3-none-any.whl → 2.0.155__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xpander_sdk/__init__.py +76 -7793
- xpander_sdk/consts/__init__.py +0 -0
- xpander_sdk/consts/api_routes.py +63 -0
- xpander_sdk/core/__init__.py +0 -0
- xpander_sdk/core/module_base.py +164 -0
- xpander_sdk/core/state.py +10 -0
- xpander_sdk/core/xpander_api_client.py +119 -0
- xpander_sdk/exceptions/__init__.py +0 -0
- xpander_sdk/exceptions/module_exception.py +45 -0
- xpander_sdk/models/__init__.py +0 -0
- xpander_sdk/models/activity.py +65 -0
- xpander_sdk/models/configuration.py +92 -0
- xpander_sdk/models/events.py +70 -0
- xpander_sdk/models/frameworks.py +64 -0
- xpander_sdk/models/shared.py +102 -0
- xpander_sdk/models/user.py +21 -0
- xpander_sdk/modules/__init__.py +0 -0
- xpander_sdk/modules/agents/__init__.py +0 -0
- xpander_sdk/modules/agents/agents_module.py +164 -0
- xpander_sdk/modules/agents/models/__init__.py +0 -0
- xpander_sdk/modules/agents/models/agent.py +477 -0
- xpander_sdk/modules/agents/models/agent_list.py +107 -0
- xpander_sdk/modules/agents/models/knowledge_bases.py +33 -0
- xpander_sdk/modules/agents/sub_modules/__init__.py +0 -0
- xpander_sdk/modules/agents/sub_modules/agent.py +953 -0
- xpander_sdk/modules/agents/utils/__init__.py +0 -0
- xpander_sdk/modules/agents/utils/generic.py +2 -0
- xpander_sdk/modules/backend/__init__.py +0 -0
- xpander_sdk/modules/backend/backend_module.py +425 -0
- xpander_sdk/modules/backend/frameworks/__init__.py +0 -0
- xpander_sdk/modules/backend/frameworks/agno.py +627 -0
- xpander_sdk/modules/backend/frameworks/dispatch.py +36 -0
- xpander_sdk/modules/backend/utils/__init__.py +0 -0
- xpander_sdk/modules/backend/utils/mcp_oauth.py +95 -0
- xpander_sdk/modules/events/__init__.py +0 -0
- xpander_sdk/modules/events/decorators/__init__.py +0 -0
- xpander_sdk/modules/events/decorators/on_boot.py +94 -0
- xpander_sdk/modules/events/decorators/on_shutdown.py +94 -0
- xpander_sdk/modules/events/decorators/on_task.py +203 -0
- xpander_sdk/modules/events/events_module.py +629 -0
- xpander_sdk/modules/events/models/__init__.py +0 -0
- xpander_sdk/modules/events/models/deployments.py +25 -0
- xpander_sdk/modules/events/models/events.py +57 -0
- xpander_sdk/modules/events/utils/__init__.py +0 -0
- xpander_sdk/modules/events/utils/generic.py +56 -0
- xpander_sdk/modules/events/utils/git_init.py +32 -0
- xpander_sdk/modules/knowledge_bases/__init__.py +0 -0
- xpander_sdk/modules/knowledge_bases/knowledge_bases_module.py +217 -0
- xpander_sdk/modules/knowledge_bases/models/__init__.py +0 -0
- xpander_sdk/modules/knowledge_bases/models/knowledge_bases.py +11 -0
- xpander_sdk/modules/knowledge_bases/sub_modules/__init__.py +0 -0
- xpander_sdk/modules/knowledge_bases/sub_modules/knowledge_base.py +107 -0
- xpander_sdk/modules/knowledge_bases/sub_modules/knowledge_base_document_item.py +40 -0
- xpander_sdk/modules/knowledge_bases/utils/__init__.py +0 -0
- xpander_sdk/modules/tasks/__init__.py +0 -0
- xpander_sdk/modules/tasks/models/__init__.py +0 -0
- xpander_sdk/modules/tasks/models/task.py +153 -0
- xpander_sdk/modules/tasks/models/tasks_list.py +107 -0
- xpander_sdk/modules/tasks/sub_modules/__init__.py +0 -0
- xpander_sdk/modules/tasks/sub_modules/task.py +887 -0
- xpander_sdk/modules/tasks/tasks_module.py +492 -0
- xpander_sdk/modules/tasks/utils/__init__.py +0 -0
- xpander_sdk/modules/tasks/utils/files.py +114 -0
- xpander_sdk/modules/tools_repository/__init__.py +0 -0
- xpander_sdk/modules/tools_repository/decorators/__init__.py +0 -0
- xpander_sdk/modules/tools_repository/decorators/register_tool.py +108 -0
- xpander_sdk/modules/tools_repository/models/__init__.py +0 -0
- xpander_sdk/modules/tools_repository/models/mcp.py +68 -0
- xpander_sdk/modules/tools_repository/models/tool_invocation_result.py +14 -0
- xpander_sdk/modules/tools_repository/sub_modules/__init__.py +0 -0
- xpander_sdk/modules/tools_repository/sub_modules/tool.py +578 -0
- xpander_sdk/modules/tools_repository/tools_repository_module.py +259 -0
- xpander_sdk/modules/tools_repository/utils/__init__.py +0 -0
- xpander_sdk/modules/tools_repository/utils/generic.py +57 -0
- xpander_sdk/modules/tools_repository/utils/local_tools.py +52 -0
- xpander_sdk/modules/tools_repository/utils/schemas.py +308 -0
- xpander_sdk/utils/__init__.py +0 -0
- xpander_sdk/utils/env.py +44 -0
- xpander_sdk/utils/event_loop.py +67 -0
- xpander_sdk/utils/tools.py +32 -0
- xpander_sdk-2.0.155.dist-info/METADATA +538 -0
- xpander_sdk-2.0.155.dist-info/RECORD +85 -0
- {xpander_sdk-1.60.4.dist-info → xpander_sdk-2.0.155.dist-info}/WHEEL +1 -1
- {xpander_sdk-1.60.4.dist-info → xpander_sdk-2.0.155.dist-info/licenses}/LICENSE +0 -1
- xpander_sdk/_jsii/__init__.py +0 -39
- xpander_sdk/_jsii/xpander-sdk@1.60.4.jsii.tgz +0 -0
- xpander_sdk/py.typed +0 -1
- xpander_sdk-1.60.4.dist-info/METADATA +0 -368
- xpander_sdk-1.60.4.dist-info/RECORD +0 -9
- {xpander_sdk-1.60.4.dist-info → xpander_sdk-2.0.155.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,627 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import shlex
|
|
3
|
+
from os import getenv, environ
|
|
4
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
5
|
+
|
|
6
|
+
from loguru import logger
|
|
7
|
+
|
|
8
|
+
from xpander_sdk import Configuration
|
|
9
|
+
from xpander_sdk.models.shared import OutputFormat, ThinkMode
|
|
10
|
+
from xpander_sdk.modules.agents.agents_module import Agents
|
|
11
|
+
from xpander_sdk.modules.agents.models.agent import AgentGraphItemType, LLMReasoningEffort
|
|
12
|
+
from xpander_sdk.modules.agents.sub_modules.agent import Agent
|
|
13
|
+
from xpander_sdk.modules.backend.utils.mcp_oauth import authenticate_mcp_server
|
|
14
|
+
from xpander_sdk.modules.tasks.sub_modules.task import Task
|
|
15
|
+
from xpander_sdk.modules.tools_repository.models.mcp import (
|
|
16
|
+
MCPOAuthGetTokenResponse,
|
|
17
|
+
MCPOAuthResponseType,
|
|
18
|
+
MCPServerAuthType,
|
|
19
|
+
MCPServerTransport,
|
|
20
|
+
MCPServerType,
|
|
21
|
+
)
|
|
22
|
+
from xpander_sdk.modules.tools_repository.sub_modules.tool import Tool
|
|
23
|
+
from xpander_sdk.modules.tools_repository.utils.schemas import build_model_from_schema
|
|
24
|
+
from agno.agent import Agent as AgnoAgent
|
|
25
|
+
from agno.team import Team as AgnoTeam
|
|
26
|
+
from agno.memory import MemoryManager
|
|
27
|
+
from agno.guardrails import PIIDetectionGuardrail
|
|
28
|
+
from agno.guardrails import PromptInjectionGuardrail
|
|
29
|
+
from agno.guardrails import OpenAIModerationGuardrail
|
|
30
|
+
|
|
31
|
+
async def build_agent_args(
|
|
32
|
+
xpander_agent: Agent,
|
|
33
|
+
task: Optional[Task] = None,
|
|
34
|
+
override: Optional[Dict[str, Any]] = None,
|
|
35
|
+
tools: Optional[List[Callable]] = None,
|
|
36
|
+
is_async: Optional[bool] = True,
|
|
37
|
+
) -> Dict[str, Any]:
|
|
38
|
+
model = _load_llm_model(agent=xpander_agent, override=override)
|
|
39
|
+
args: Dict[str, Any] = {
|
|
40
|
+
"id": xpander_agent.id,
|
|
41
|
+
"store_events": True
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
_configure_output(args=args, agent=xpander_agent, task=task)
|
|
45
|
+
_configure_session_storage(args=args, agent=xpander_agent, task=task)
|
|
46
|
+
_configure_agentic_memory(args=args, agent=xpander_agent, task=task)
|
|
47
|
+
_configure_tool_calls_compression(args=args, agent=xpander_agent)
|
|
48
|
+
await _attach_async_dependencies(
|
|
49
|
+
args=args, agent=xpander_agent, task=task, model=model, is_async=is_async
|
|
50
|
+
)
|
|
51
|
+
_configure_knowledge_bases(args=args, agent=xpander_agent)
|
|
52
|
+
_configure_additional_context(args=args, agent=xpander_agent, task=task)
|
|
53
|
+
# Configure pre-hooks (guardrails, etc.)
|
|
54
|
+
_configure_pre_hooks(args=args, agent=xpander_agent, model=model)
|
|
55
|
+
|
|
56
|
+
args["tools"] = await _resolve_agent_tools(agent=xpander_agent, task=task)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
if tools and len(tools) != 0:
|
|
60
|
+
args["tools"].extend(tools)
|
|
61
|
+
|
|
62
|
+
should_use_reasoning_tools = True if xpander_agent.agno_settings.reasoning_tools_enabled else False
|
|
63
|
+
if task and task.think_mode:
|
|
64
|
+
if task.think_mode == ThinkMode.Harder:
|
|
65
|
+
should_use_reasoning_tools = True
|
|
66
|
+
|
|
67
|
+
if not xpander_agent.is_a_team and should_use_reasoning_tools:
|
|
68
|
+
from agno.tools.reasoning import ReasoningTools
|
|
69
|
+
args["tools"].append(
|
|
70
|
+
ReasoningTools(
|
|
71
|
+
enable_think=True,
|
|
72
|
+
enable_analyze=True,
|
|
73
|
+
add_instructions=True,
|
|
74
|
+
add_few_shot=True,
|
|
75
|
+
instructions="use 'think' and 'analyze' ONLY when its not a simple task of 'hi', 'what can you do' and such low complexity tasks"
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# team
|
|
80
|
+
if xpander_agent.is_a_team:
|
|
81
|
+
sub_agents = xpander_agent.graph.sub_agents
|
|
82
|
+
|
|
83
|
+
# load sub agents
|
|
84
|
+
sub_agents = await asyncio.gather(
|
|
85
|
+
*[
|
|
86
|
+
Agents(
|
|
87
|
+
configuration=Configuration(
|
|
88
|
+
api_key=xpander_agent.configuration.api_key,
|
|
89
|
+
organization_id=xpander_agent.configuration.organization_id,
|
|
90
|
+
base_url=xpander_agent.configuration.base_url
|
|
91
|
+
)
|
|
92
|
+
).aget(agent_id=sub_agent_id)
|
|
93
|
+
for sub_agent_id in sub_agents
|
|
94
|
+
]
|
|
95
|
+
)
|
|
96
|
+
if sub_agents and len(sub_agents):
|
|
97
|
+
base_state = xpander_agent.configuration.state.model_copy()
|
|
98
|
+
for sub_agent in sub_agents:
|
|
99
|
+
sub_agent.configuration.state.task = base_state.task
|
|
100
|
+
# convert to members
|
|
101
|
+
members = await asyncio.gather(
|
|
102
|
+
*[
|
|
103
|
+
build_agent_args(xpander_agent=sub_agent, override=override, task=task)
|
|
104
|
+
for sub_agent in sub_agents
|
|
105
|
+
]
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
args.update(
|
|
109
|
+
{
|
|
110
|
+
"members": [
|
|
111
|
+
AgnoAgent(**member) if "id" in member else AgnoTeam(**member)
|
|
112
|
+
for member in members
|
|
113
|
+
],
|
|
114
|
+
"add_member_tools_to_context": True,
|
|
115
|
+
"share_member_interactions": True,
|
|
116
|
+
"show_members_responses": True,
|
|
117
|
+
}
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
args.update(
|
|
121
|
+
{
|
|
122
|
+
"name": xpander_agent.name,
|
|
123
|
+
"model": model,
|
|
124
|
+
"description": xpander_agent.instructions.description,
|
|
125
|
+
"instructions": xpander_agent.instructions.instructions,
|
|
126
|
+
"expected_output": (
|
|
127
|
+
task.expected_output
|
|
128
|
+
if task and task.expected_output
|
|
129
|
+
else xpander_agent.expected_output
|
|
130
|
+
),
|
|
131
|
+
"add_datetime_to_context": True,
|
|
132
|
+
}
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
if xpander_agent.is_a_team and xpander_agent.expected_output and len(xpander_agent.expected_output) != 0:
|
|
136
|
+
args["instructions"] += f"""\n
|
|
137
|
+
<expected_output>
|
|
138
|
+
{xpander_agent.expected_output}
|
|
139
|
+
</expected_output>
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
if override:
|
|
143
|
+
args.update(override)
|
|
144
|
+
|
|
145
|
+
# append tools hooks
|
|
146
|
+
async def on_tool_call_hook(
|
|
147
|
+
function_name: str, function_call: Callable, arguments: Dict[str, Any]
|
|
148
|
+
):
|
|
149
|
+
# preflight and monitoring + metrics
|
|
150
|
+
try:
|
|
151
|
+
matched_tool = (
|
|
152
|
+
(
|
|
153
|
+
xpander_agent.tools.get_tool_by_id(tool_id=function_name)
|
|
154
|
+
or xpander_agent.tools.get_tool_by_name(tool_name=function_name)
|
|
155
|
+
)
|
|
156
|
+
if xpander_agent.tools and len(xpander_agent.tools.list) != 0
|
|
157
|
+
else None
|
|
158
|
+
)
|
|
159
|
+
except Exception:
|
|
160
|
+
pass
|
|
161
|
+
|
|
162
|
+
error = None
|
|
163
|
+
try:
|
|
164
|
+
# Call the function
|
|
165
|
+
if asyncio.iscoroutinefunction(function_call):
|
|
166
|
+
result = await function_call(**arguments)
|
|
167
|
+
else:
|
|
168
|
+
result = function_call(**arguments)
|
|
169
|
+
|
|
170
|
+
except Exception as e:
|
|
171
|
+
error = str(e)
|
|
172
|
+
raise
|
|
173
|
+
finally:
|
|
174
|
+
try:
|
|
175
|
+
if not matched_tool and task: # agent / mcp tool
|
|
176
|
+
tool_instance = Tool(
|
|
177
|
+
configuration=xpander_agent.configuration,
|
|
178
|
+
id=function_name,
|
|
179
|
+
name=function_name,
|
|
180
|
+
method="GET",
|
|
181
|
+
path=f"/tools/{function_name}",
|
|
182
|
+
should_add_to_graph=False,
|
|
183
|
+
is_local=True,
|
|
184
|
+
is_synced=True,
|
|
185
|
+
description=function_name,
|
|
186
|
+
)
|
|
187
|
+
parsed_result = None
|
|
188
|
+
try:
|
|
189
|
+
parsed_result = dict(result)
|
|
190
|
+
except Exception:
|
|
191
|
+
parsed_result = result
|
|
192
|
+
|
|
193
|
+
await tool_instance.agraph_preflight_check(
|
|
194
|
+
agent_id=xpander_agent.id,
|
|
195
|
+
configuration=tool_instance.configuration,
|
|
196
|
+
task_id=task.id,
|
|
197
|
+
payload={"input": arguments, "output": error or parsed_result} if isinstance(arguments, dict) else None
|
|
198
|
+
)
|
|
199
|
+
except Exception:
|
|
200
|
+
pass
|
|
201
|
+
|
|
202
|
+
# Return the result
|
|
203
|
+
return result
|
|
204
|
+
|
|
205
|
+
if not "tool_hooks" in args:
|
|
206
|
+
args["tool_hooks"] = []
|
|
207
|
+
|
|
208
|
+
# disable hooks for NeMo due to issue with tool_hooks and NeMo
|
|
209
|
+
if xpander_agent.using_nemo == False:
|
|
210
|
+
args["tool_hooks"].append(on_tool_call_hook)
|
|
211
|
+
|
|
212
|
+
# fix gpt-5 temp
|
|
213
|
+
if args["model"] and args["model"].id and args["model"].id.startswith("gpt-5"):
|
|
214
|
+
del args["model"].temperature
|
|
215
|
+
|
|
216
|
+
return args
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _load_llm_model(agent: Agent, override: Optional[Dict[str, Any]]) -> Any:
|
|
220
|
+
"""
|
|
221
|
+
Load and configure the appropriate LLM model based on the agent's provider configuration.
|
|
222
|
+
|
|
223
|
+
This function supports multiple LLM providers including OpenAI, NVIDIA NIM, and Anthropic.
|
|
224
|
+
It handles API key resolution with proper precedence based on the deployment environment
|
|
225
|
+
(xpander.ai Cloud vs local deployment).
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
agent (Agent): The agent instance containing model configuration.
|
|
229
|
+
override (Optional[Dict[str, Any]]): Optional override parameters that can
|
|
230
|
+
include a pre-configured "model" to bypass the loading logic.
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
Any: A configured LLM model instance (OpenAIChat, Nvidia, or Claude).
|
|
234
|
+
|
|
235
|
+
Raises:
|
|
236
|
+
NotImplementedError: If the specified provider is not supported.
|
|
237
|
+
|
|
238
|
+
Supported Providers:
|
|
239
|
+
- "openai": Uses OpenAIChat with fallback API key resolution
|
|
240
|
+
- "nim": Uses NVIDIA NIM models via Nvidia class
|
|
241
|
+
- "anthropic": Uses Claude models via Anthropic integration
|
|
242
|
+
|
|
243
|
+
API Key Resolution Logic:
|
|
244
|
+
- xpander.ai Cloud: Custom credentials take precedence, fallback to environment
|
|
245
|
+
- Local deployment: Environment variables take precedence, fallback to custom
|
|
246
|
+
"""
|
|
247
|
+
if override and "model" in override:
|
|
248
|
+
return override["model"]
|
|
249
|
+
|
|
250
|
+
provider = agent.model_provider.lower()
|
|
251
|
+
|
|
252
|
+
is_xpander_cloud = getenv("IS_XPANDER_CLOUD", "false") == "true"
|
|
253
|
+
has_custom_llm_key = (
|
|
254
|
+
True if agent.llm_credentials and agent.llm_credentials.value else False
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
def get_llm_key(env_var_name: str) -> Optional[str]:
|
|
258
|
+
"""
|
|
259
|
+
Resolve API key based on deployment environment and availability.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
env_var_name (str): Name of the environment variable containing the API key.
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
Optional[str]: The resolved API key or None if not available.
|
|
266
|
+
"""
|
|
267
|
+
env_llm_key = getenv(env_var_name)
|
|
268
|
+
|
|
269
|
+
# If no custom key available, use environment variable
|
|
270
|
+
if not has_custom_llm_key:
|
|
271
|
+
return env_llm_key
|
|
272
|
+
|
|
273
|
+
# xpander.ai Cloud: prioritize custom credentials, fallback to environment
|
|
274
|
+
if is_xpander_cloud:
|
|
275
|
+
return agent.llm_credentials.value or env_llm_key
|
|
276
|
+
else:
|
|
277
|
+
# Local deployment: prioritize environment, fallback to custom
|
|
278
|
+
return env_llm_key or agent.llm_credentials.value
|
|
279
|
+
|
|
280
|
+
llm_args = {}
|
|
281
|
+
|
|
282
|
+
if agent.llm_reasoning_effort and agent.llm_reasoning_effort != LLMReasoningEffort.Medium and agent.model_name and "gpt-5" in agent.model_name.lower():
|
|
283
|
+
llm_args = { "reasoning_effort": agent.llm_reasoning_effort.value }
|
|
284
|
+
|
|
285
|
+
if agent.llm_api_base and len(agent.llm_api_base) != 0:
|
|
286
|
+
llm_args["base_url"] = agent.llm_api_base
|
|
287
|
+
|
|
288
|
+
# OpenAI Provider - supports GPT models with dual API key fallback
|
|
289
|
+
if provider == "openai":
|
|
290
|
+
from agno.models.openai import OpenAIChat
|
|
291
|
+
|
|
292
|
+
return OpenAIChat(
|
|
293
|
+
id=agent.model_name,
|
|
294
|
+
# Try xpander.ai-specific key first, fallback to standard OpenAI key
|
|
295
|
+
api_key=get_llm_key("AGENTS_OPENAI_API_KEY")
|
|
296
|
+
or get_llm_key("OPENAI_API_KEY"),
|
|
297
|
+
temperature=0.0,
|
|
298
|
+
**llm_args
|
|
299
|
+
)
|
|
300
|
+
# Helicone
|
|
301
|
+
elif provider == "helicone":
|
|
302
|
+
from agno.models.openai.like import OpenAILike
|
|
303
|
+
|
|
304
|
+
return OpenAILike(
|
|
305
|
+
id=agent.model_name,
|
|
306
|
+
# Try xpander.ai-specific key first, fallback to standard OpenAI key
|
|
307
|
+
api_key=get_llm_key("HELICONE_API_KEY"),
|
|
308
|
+
base_url="https://ai-gateway.helicone.ai/v1",
|
|
309
|
+
**llm_args
|
|
310
|
+
)
|
|
311
|
+
# Nebius
|
|
312
|
+
elif provider == "nebius":
|
|
313
|
+
from agno.models.nebius import Nebius
|
|
314
|
+
|
|
315
|
+
return Nebius(
|
|
316
|
+
id=agent.model_name,
|
|
317
|
+
# Try xpander.ai-specific key first, fallback to standard OpenAI key
|
|
318
|
+
api_key=get_llm_key("NEBIUS_API_KEY"),
|
|
319
|
+
**llm_args
|
|
320
|
+
)
|
|
321
|
+
# OpenRouter
|
|
322
|
+
elif provider == "open_router":
|
|
323
|
+
from agno.models.openrouter import OpenRouter
|
|
324
|
+
|
|
325
|
+
return OpenRouter(
|
|
326
|
+
id=agent.model_name,
|
|
327
|
+
# Try xpander.ai-specific key first, fallback to standard OpenAI key
|
|
328
|
+
api_key=get_llm_key("OPENROUTER_API_KEY"),
|
|
329
|
+
**llm_args
|
|
330
|
+
)
|
|
331
|
+
# Google AI Studio - supports gemini models
|
|
332
|
+
elif provider == "google_ai_studio":
|
|
333
|
+
from agno.models.google import Gemini
|
|
334
|
+
|
|
335
|
+
return Gemini(
|
|
336
|
+
id=agent.model_name,
|
|
337
|
+
# Try xpander.ai-specific key first, fallback to standard OpenAI key
|
|
338
|
+
api_key=get_llm_key("GOOGLE_API_KEY"),
|
|
339
|
+
**llm_args
|
|
340
|
+
)
|
|
341
|
+
# Fireworks AI Provider
|
|
342
|
+
elif provider == "fireworks":
|
|
343
|
+
from agno.models.fireworks import Fireworks
|
|
344
|
+
|
|
345
|
+
return Fireworks(
|
|
346
|
+
id=agent.model_name,
|
|
347
|
+
# Try xpander.ai-specific key first, fallback to standard OpenAI key
|
|
348
|
+
api_key=get_llm_key("FIREWORKS_API_KEY"),
|
|
349
|
+
**llm_args
|
|
350
|
+
)
|
|
351
|
+
# NVIDIA NIM Provider - supports NVIDIA's inference microservices
|
|
352
|
+
elif provider == "nim":
|
|
353
|
+
from agno.models.nvidia import Nvidia
|
|
354
|
+
|
|
355
|
+
return Nvidia(
|
|
356
|
+
id=agent.model_name,
|
|
357
|
+
api_key=get_llm_key("NVIDIA_API_KEY"),
|
|
358
|
+
temperature=0.0,
|
|
359
|
+
**llm_args
|
|
360
|
+
)
|
|
361
|
+
# Amazon Bedrock Provider
|
|
362
|
+
elif provider == "amazon_bedrock":
|
|
363
|
+
from agno.models.aws.bedrock import AwsBedrock
|
|
364
|
+
environ["AWS_BEARER_TOKEN_BEDROCK"] = get_llm_key("AWS_BEARER_TOKEN_BEDROCK") # set to env
|
|
365
|
+
return AwsBedrock(
|
|
366
|
+
id=agent.model_name,
|
|
367
|
+
temperature=0.0,
|
|
368
|
+
**llm_args
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
# Anthropic Provider - supports Claude models
|
|
372
|
+
elif provider == "anthropic":
|
|
373
|
+
from agno.models.anthropic import Claude
|
|
374
|
+
|
|
375
|
+
return Claude(
|
|
376
|
+
id=agent.model_name,
|
|
377
|
+
api_key=get_llm_key("ANTHROPIC_API_KEY"),
|
|
378
|
+
temperature=0.0,
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
raise NotImplementedError(
|
|
382
|
+
f"Provider '{provider}' is not supported for agno agents."
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
def _configure_output(args: Dict[str, Any], agent: Agent, task: Optional[Task]) -> None:
|
|
387
|
+
if agent.output.use_json_mode:
|
|
388
|
+
args["use_json_mode"] = True
|
|
389
|
+
args["output_schema"] = agent.output.output_schema
|
|
390
|
+
elif agent.output.is_markdown:
|
|
391
|
+
args["markdown"] = True
|
|
392
|
+
|
|
393
|
+
if task and task.output_format != agent.output_format:
|
|
394
|
+
if task.output_format == OutputFormat.Json:
|
|
395
|
+
args["use_json_mode"] = True
|
|
396
|
+
args["markdown"] = False
|
|
397
|
+
args["output_schema"] = build_model_from_schema(
|
|
398
|
+
"StructuredOutput", task.output_schema
|
|
399
|
+
)
|
|
400
|
+
elif task.output_format == OutputFormat.Markdown:
|
|
401
|
+
args["markdown"] = True
|
|
402
|
+
else:
|
|
403
|
+
args["markdown"] = False
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def _configure_session_storage(
|
|
407
|
+
args: Dict[str, Any], agent: Agent, task: Optional[Task]
|
|
408
|
+
) -> None:
|
|
409
|
+
if not agent.agno_settings.session_storage:
|
|
410
|
+
return
|
|
411
|
+
|
|
412
|
+
args["add_history_to_context"] = True
|
|
413
|
+
args["session_id"] = task.id if task else None
|
|
414
|
+
args["user_id"] = (
|
|
415
|
+
task.input.user.id if task and task.input and task.input.user else None
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
if agent.agno_settings.session_summaries:
|
|
419
|
+
args["enable_session_summaries"] = True
|
|
420
|
+
if agent.agno_settings.num_history_runs:
|
|
421
|
+
args["num_history_runs"] = agent.agno_settings.num_history_runs
|
|
422
|
+
if agent.agno_settings.max_tool_calls_from_history and agent.agno_settings.max_tool_calls_from_history >= 1:
|
|
423
|
+
args["max_tool_calls_from_history"] = agent.agno_settings.max_tool_calls_from_history
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def _configure_tool_calls_compression(
|
|
427
|
+
args: Dict[str, Any], agent: Agent
|
|
428
|
+
) -> None:
|
|
429
|
+
if agent.agno_settings.tool_calls_compression and agent.agno_settings.tool_calls_compression.enabled:
|
|
430
|
+
from agno.compression.manager import CompressionManager
|
|
431
|
+
args["compression_manager"] = CompressionManager(
|
|
432
|
+
compress_tool_results=True,
|
|
433
|
+
compress_tool_results_limit=agent.agno_settings.tool_calls_compression.threshold,
|
|
434
|
+
compress_tool_call_instructions=agent.agno_settings.tool_calls_compression.instructions,
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
def _configure_agentic_memory(
|
|
438
|
+
args: Dict[str, Any], agent: Agent, task: Optional[Task]
|
|
439
|
+
) -> None:
|
|
440
|
+
user = task.input.user if task and task.input and task.input.user else None
|
|
441
|
+
user_memories_enabled = True if agent.agno_settings.user_memories and user and user.id else False
|
|
442
|
+
agent_memories_enabled = True if agent.agno_settings.agent_memories else False
|
|
443
|
+
|
|
444
|
+
if user_memories_enabled:
|
|
445
|
+
args["enable_user_memories"] = True
|
|
446
|
+
args["memory_manager"] = MemoryManager(delete_memories=True,clear_memories=True)
|
|
447
|
+
args["enable_agentic_memory"] = agent.agno_settings.agentic_memory
|
|
448
|
+
|
|
449
|
+
if agent_memories_enabled:
|
|
450
|
+
args["add_culture_to_context"] = True
|
|
451
|
+
|
|
452
|
+
if agent.agno_settings.agentic_culture:
|
|
453
|
+
args["enable_agentic_culture"] = True
|
|
454
|
+
else:
|
|
455
|
+
args["update_cultural_knowledge"] = True
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
if user: # add user details to the agent
|
|
459
|
+
args["additional_context"] = f"User details: {user.model_dump_json()}"
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
async def _attach_async_dependencies(
|
|
463
|
+
args: Dict[str, Any], agent: Agent, task: Optional[Task], model: Any, is_async: Optional[bool] = True
|
|
464
|
+
) -> None:
|
|
465
|
+
user = task.input.user if task and task.input and task.input.user else None
|
|
466
|
+
should_use_db = True if (agent.agno_settings.user_memories and user and user.id) or agent.agno_settings.agent_memories else False
|
|
467
|
+
if agent.agno_settings.session_storage or should_use_db:
|
|
468
|
+
args["db"] = await agent.aget_db(async_db=is_async)
|
|
469
|
+
|
|
470
|
+
def _configure_knowledge_bases(args: Dict[str, Any], agent: Agent) -> None:
|
|
471
|
+
if agent.knowledge_bases:
|
|
472
|
+
args["knowledge_retriever"] = agent.knowledge_bases_retriever()
|
|
473
|
+
args["search_knowledge"] = True
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
def _configure_additional_context(
|
|
477
|
+
args: Dict[str, Any], agent: Agent, task: Optional[Task]
|
|
478
|
+
) -> None:
|
|
479
|
+
if task and task.additional_context:
|
|
480
|
+
existing = args.get("additional_context", "")
|
|
481
|
+
args["additional_context"] = (
|
|
482
|
+
f"{existing}\n{task.additional_context}"
|
|
483
|
+
if existing
|
|
484
|
+
else task.additional_context
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
if agent.agno_settings.tool_call_limit:
|
|
488
|
+
args["tool_call_limit"] = agent.agno_settings.tool_call_limit
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def _configure_pre_hooks(args: Dict[str, Any], agent: Agent, model: Any) -> None:
|
|
492
|
+
"""
|
|
493
|
+
Configure pre-hooks (guardrails) for the agent based on settings.
|
|
494
|
+
|
|
495
|
+
Pre-hooks are executed before the agent processes input. This includes
|
|
496
|
+
guardrails like PII detection, prompt injection detection, and content
|
|
497
|
+
moderation that validate or transform input.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
args (Dict[str, Any]): Agent configuration arguments to be updated.
|
|
501
|
+
agent (Agent): The agent instance containing pre-hook settings.
|
|
502
|
+
"""
|
|
503
|
+
# Add PII detection guardrail with optional masking
|
|
504
|
+
if agent.agno_settings.pii_detection_enabled:
|
|
505
|
+
if "pre_hooks" not in args:
|
|
506
|
+
args["pre_hooks"] = []
|
|
507
|
+
|
|
508
|
+
pii_guardrail = PIIDetectionGuardrail(
|
|
509
|
+
mask_pii=agent.agno_settings.pii_detection_mask
|
|
510
|
+
)
|
|
511
|
+
args["pre_hooks"].append(pii_guardrail)
|
|
512
|
+
|
|
513
|
+
# Add prompt injection detection guardrail
|
|
514
|
+
if agent.agno_settings.prompt_injection_detection_enabled:
|
|
515
|
+
if "pre_hooks" not in args:
|
|
516
|
+
args["pre_hooks"] = []
|
|
517
|
+
|
|
518
|
+
prompt_injection_guardrail = PromptInjectionGuardrail()
|
|
519
|
+
args["pre_hooks"].append(prompt_injection_guardrail)
|
|
520
|
+
|
|
521
|
+
# Add OpenAI moderation guardrail
|
|
522
|
+
if agent.agno_settings.openai_moderation_enabled:
|
|
523
|
+
if "pre_hooks" not in args:
|
|
524
|
+
args["pre_hooks"] = []
|
|
525
|
+
|
|
526
|
+
moderation_kwargs = {}
|
|
527
|
+
try:
|
|
528
|
+
if model and model.provider == "OpenAI":
|
|
529
|
+
moderation_kwargs["api_key"] = model.api_key
|
|
530
|
+
except:
|
|
531
|
+
pass
|
|
532
|
+
|
|
533
|
+
if agent.agno_settings.openai_moderation_categories:
|
|
534
|
+
moderation_kwargs["raise_for_categories"] = agent.agno_settings.openai_moderation_categories
|
|
535
|
+
|
|
536
|
+
openai_moderation_guardrail = OpenAIModerationGuardrail(**moderation_kwargs)
|
|
537
|
+
args["pre_hooks"].append(openai_moderation_guardrail)
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
async def _resolve_agent_tools(agent: Agent, task: Optional[Task] = None) -> List[Any]:
|
|
541
|
+
mcp_servers = agent.mcp_servers
|
|
542
|
+
|
|
543
|
+
# combine task mcps and agent mcps
|
|
544
|
+
if task and task.mcp_servers:
|
|
545
|
+
mcp_servers.extend(task.mcp_servers)
|
|
546
|
+
|
|
547
|
+
if not mcp_servers:
|
|
548
|
+
return agent.tools.functions
|
|
549
|
+
|
|
550
|
+
# Import MCP only if mcp_servers is present
|
|
551
|
+
from agno.tools.mcp import (
|
|
552
|
+
MCPTools,
|
|
553
|
+
SSEClientParams,
|
|
554
|
+
StreamableHTTPClientParams,
|
|
555
|
+
)
|
|
556
|
+
from mcp import StdioServerParameters
|
|
557
|
+
|
|
558
|
+
mcp_tools: List[MCPTools] = []
|
|
559
|
+
is_xpander_cloud = getenv("IS_XPANDER_CLOUD", "false") == "true"
|
|
560
|
+
|
|
561
|
+
for mcp in mcp_servers:
|
|
562
|
+
transport = mcp.transport.value.lower()
|
|
563
|
+
if mcp.type == MCPServerType.Local:
|
|
564
|
+
|
|
565
|
+
# protection for serverless xpander
|
|
566
|
+
is_aws_mcp = (
|
|
567
|
+
True if mcp.command and "aws-api-mcp-server" in mcp.command else False
|
|
568
|
+
)
|
|
569
|
+
if is_aws_mcp and is_xpander_cloud:
|
|
570
|
+
logger.warning(
|
|
571
|
+
f"skipping aws mcp on agent {agent.id} due to xpander serverless"
|
|
572
|
+
)
|
|
573
|
+
continue
|
|
574
|
+
|
|
575
|
+
command_parts = shlex.split(mcp.command)
|
|
576
|
+
mcp_tools.append(
|
|
577
|
+
MCPTools(
|
|
578
|
+
transport=transport,
|
|
579
|
+
server_params=StdioServerParameters(
|
|
580
|
+
command=command_parts[0],
|
|
581
|
+
args=command_parts[1:],
|
|
582
|
+
env=mcp.env_vars,
|
|
583
|
+
),
|
|
584
|
+
include_tools=mcp.allowed_tools or None,
|
|
585
|
+
timeout_seconds=120,
|
|
586
|
+
tool_name_prefix="mcp_tool"
|
|
587
|
+
)
|
|
588
|
+
)
|
|
589
|
+
elif mcp.url:
|
|
590
|
+
params_cls = (
|
|
591
|
+
SSEClientParams
|
|
592
|
+
if mcp.transport == MCPServerTransport.SSE
|
|
593
|
+
else StreamableHTTPClientParams
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
# handle mcp auth
|
|
597
|
+
if mcp.auth_type == MCPServerAuthType.OAuth2:
|
|
598
|
+
if not task:
|
|
599
|
+
raise ValueError("MCP server with OAuth authentication detected but task not sent")
|
|
600
|
+
|
|
601
|
+
if not task.input.user or not task.input.user.id:
|
|
602
|
+
raise ValueError("MCP server with OAuth authentication detected but user id not set on the task (task.input.user.id)")
|
|
603
|
+
|
|
604
|
+
auth_result: MCPOAuthGetTokenResponse = await authenticate_mcp_server(mcp_server=mcp,task=task,user_id=task.input.user.id)
|
|
605
|
+
if not auth_result:
|
|
606
|
+
raise ValueError("MCP Server authentication failed")
|
|
607
|
+
if auth_result.type != MCPOAuthResponseType.TOKEN_READY:
|
|
608
|
+
raise ValueError("MCP Server authentication timeout")
|
|
609
|
+
mcp.api_key = auth_result.data.access_token
|
|
610
|
+
|
|
611
|
+
if mcp.api_key:
|
|
612
|
+
if not mcp.headers:
|
|
613
|
+
mcp.headers = {}
|
|
614
|
+
mcp.headers["Authorization"] = f"Bearer {mcp.api_key}"
|
|
615
|
+
mcp_tools.append(
|
|
616
|
+
MCPTools(
|
|
617
|
+
transport=transport,
|
|
618
|
+
server_params=params_cls(url=mcp.url, headers=mcp.headers),
|
|
619
|
+
include_tools=mcp.allowed_tools or None,
|
|
620
|
+
timeout_seconds=120,
|
|
621
|
+
tool_name_prefix="mcp_tool"
|
|
622
|
+
)
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
return agent.tools.functions + await asyncio.gather(
|
|
626
|
+
*[mcp.__aenter__() for mcp in mcp_tools]
|
|
627
|
+
)
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
2
|
+
|
|
3
|
+
from xpander_sdk.models.frameworks import Framework
|
|
4
|
+
from xpander_sdk.modules.agents.sub_modules.agent import Agent
|
|
5
|
+
from xpander_sdk.modules.tasks.sub_modules.task import Task
|
|
6
|
+
|
|
7
|
+
async def dispatch_get_args(
|
|
8
|
+
agent: Agent,
|
|
9
|
+
task: Optional[Task] = None,
|
|
10
|
+
override: Optional[Dict[str, Any]] = None,
|
|
11
|
+
tools: Optional[List[Callable]] = None,
|
|
12
|
+
is_async: Optional[bool] = True,
|
|
13
|
+
) -> Dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Dispatch to the correct framework-specific argument resolver.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
agent (Agent): The agent to generate args for.
|
|
19
|
+
task (Optional[Task]): Optional runtime task.
|
|
20
|
+
override (Optional[Dict[str, Any]]): Dict of override values.
|
|
21
|
+
tools (Optional[List[Callable]]): Optional additional tools to be added to the agent arguments.
|
|
22
|
+
is_async (Optional[bool]): Is in Async Context?.
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Dict[str, Any]: Arguments for instantiating the framework agent.
|
|
26
|
+
"""
|
|
27
|
+
agent.framework = Framework.Agno
|
|
28
|
+
match agent.framework:
|
|
29
|
+
case Framework.Agno:
|
|
30
|
+
from .agno import build_agent_args
|
|
31
|
+
return await build_agent_args(xpander_agent=agent, task=task, override=override, tools=tools, is_async=is_async)
|
|
32
|
+
# case Framework.Langchain: # PLACEHOLDER
|
|
33
|
+
# from .langchain import build_agent_args
|
|
34
|
+
# return await build_agent_args(xpander_agent=agent, task=task, override=override)
|
|
35
|
+
case _:
|
|
36
|
+
raise NotImplementedError(f"Framework '{agent.framework.value}' is not supported.")
|
|
File without changes
|