realtimex-agent-a2a-agent 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,729 @@
1
+ from any_agent import AgentConfig, AnyAgent
2
+ from any_agent.config import MCPStdio
3
+ import json
4
+ import os
5
+ from typing import Callable, Dict, Literal, Optional, Union, Any
6
+
7
+ from .tools import send_email
8
+ from .callbacks.tool_execution import ShowToolCalling
9
+ from any_agent.callbacks import get_default_callbacks
10
+ from mem0 import Memory
11
+ from any_agent.tools import a2a_tool_async
12
+ from any_agent.serving import A2AServingConfig
13
+
14
+
15
+ def get_user_dir():
16
+ return os.path.join(os.path.expanduser("~"),".realtimex.ai")
17
+
18
+ def get_base_user_dir():
19
+ return os.path.join(os.path.expanduser("~"))
20
+
21
+ def get_uvx_executable():
22
+ unix_realtimex_uvx_path = os.path.join(get_user_dir(),"Resources","envs","bin","uvx")
23
+ if os.path.exists(unix_realtimex_uvx_path):
24
+ return unix_realtimex_uvx_path
25
+ win_realtimex_uvx_path = os.path.join(get_user_dir(),"Resources","envs","Scripts","uvx.exe")
26
+ if os.path.exists(win_realtimex_uvx_path):
27
+ return win_realtimex_uvx_path
28
+ return "uvx"
29
+
30
+ def get_nvm_dir():
31
+ path = os.path.join(get_base_user_dir(),".nvm")
32
+ if os.path.exists(path):
33
+ return path
34
+ path = os.path.join('c:', os.sep, "nvm")
35
+ if os.path.exists(path):
36
+ return path
37
+ return ""
38
+
39
+
40
+ def get_nvm_inc():
41
+ # /Users/phuongnguyen/.nvm/versions/node/v22.16.0/include/node
42
+ path = os.path.join(get_nvm_dir(),"versions","node","v22.16.0","include","node")
43
+ if os.path.exists(path):
44
+ return path
45
+ path = os.path.join('c:', os.sep, "nvm")
46
+ if os.path.exists(path):
47
+ return path
48
+ return ""
49
+
50
+ def get_nvm_bin():
51
+ # /Users/phuongnguyen/.nvm/versions/node/v22.16.0/include/node
52
+ path = os.path.join(get_nvm_dir(),"versions","node","v22.16.0","bin")
53
+ if os.path.exists(path):
54
+ return path
55
+ path = os.path.join('c:', os.sep, "nvm")
56
+ if os.path.exists(path):
57
+ return path
58
+ return ""
59
+
60
+ def get_npx_executable():
61
+ unix_realtimex_npx_path = os.path.join(get_base_user_dir(),".nvm","versions","node","v22.16.0","bin","npx")
62
+ if os.path.exists(unix_realtimex_npx_path):
63
+ return unix_realtimex_npx_path
64
+ win_realtimex_npx_path = os.path.join('c:', os.sep, "nvm", "v22.16.0", "npx.cmd")
65
+ if os.path.exists(win_realtimex_npx_path):
66
+ return win_realtimex_npx_path
67
+ return "npx"
68
+
69
+
70
+ def get_deepagents_agent_path(scope, agent_id, workspace_slug=None):
71
+ """
72
+ Get the agent.md path for DeepAgents.
73
+
74
+ Args:
75
+ scope: 'global' or 'workspace'
76
+ agent_id: The agent identifier
77
+ workspace_slug: Required when scope is 'workspace'
78
+
79
+ Returns:
80
+ Path string if file exists, None otherwise
81
+ """
82
+ if scope == "global":
83
+ path = os.path.join(
84
+ get_user_dir(), "Resources", "agent-skills", "global", agent_id, "agent.md"
85
+ )
86
+ elif scope == "workspace" and workspace_slug:
87
+ path = os.path.join(
88
+ get_user_dir(), "Resources", "agent-skills", "workspaces", workspace_slug, agent_id, "agent.md"
89
+ )
90
+ else:
91
+ return None
92
+
93
+ return path if os.path.isfile(path) else None
94
+
95
+
96
+ def get_deepagents_skills_dir(scope, agent_id, workspace_slug=None):
97
+ """
98
+ Get the skills directory path for DeepAgents.
99
+
100
+ Args:
101
+ scope: 'global' or 'workspace'
102
+ agent_id: The agent identifier
103
+ workspace_slug: Required when scope is 'workspace'
104
+
105
+ Returns:
106
+ Path string if directory exists, None otherwise
107
+ """
108
+ if scope == "global":
109
+ path = os.path.join(
110
+ get_user_dir(), "Resources", "agent-skills", "global", agent_id, "skills"
111
+ )
112
+ elif scope == "workspace" and workspace_slug:
113
+ path = os.path.join(
114
+ get_user_dir(), "Resources", "agent-skills", "workspaces", workspace_slug, agent_id, "skills"
115
+ )
116
+ else:
117
+ return None
118
+
119
+ return path if os.path.isdir(path) else None
120
+
121
+ class RealTimeXAgent():
122
+ def __init__(
123
+ self, current_session_id
124
+ ):
125
+ self.agent_id: str = None
126
+ self.agent_data: Dict = None
127
+ self.agent: AnyAgent = None
128
+ self.agent_name: str = None
129
+ self.system_prompt: str = None
130
+ self.agent_framework: str = None
131
+ self.agent_description: str = None
132
+ self.default_model:str = None
133
+ self.recommended_agent_flows = None
134
+ self.recommended_aci_mcp_apps = None
135
+ self.recommended_local_mcp_apps = None
136
+ self.recommended_team_members = None
137
+ self.memory:Memory = None
138
+
139
+ self.current_session_id = current_session_id
140
+
141
+ async def prepare_llm(self, provider_name,model_name,api_base,api_key):
142
+ def get_provider_name(provider_name):
143
+ if provider_name == "realtimexai":
144
+ return "openai"
145
+ elif provider_name == "litellm":
146
+ return "openai"
147
+ elif provider_name == "openai":
148
+ return "openai"
149
+ elif provider_name == "ollama":
150
+ return "ollama"
151
+ elif provider_name == "gemini":
152
+ return "google"
153
+ elif provider_name == "google":
154
+ return "google"
155
+
156
+ def get_model_name(model_name):
157
+ return model_name
158
+
159
+ provider_name = get_provider_name(provider_name)
160
+ model_name = get_model_name(model_name)
161
+
162
+ # if provider_name == "openai":
163
+ # os.environ['OPENAI_BASE_URL'] = api_base
164
+ # os.environ['OPENAI_API_KEY'] = api_key
165
+
166
+ # return {
167
+ # "api_base": api_base,
168
+ # "api_key": api_key,
169
+ # "model_id": f"{provider_name}:{model_name}"
170
+ # }
171
+
172
+ return {
173
+ "api_base": api_base,
174
+ "api_key": api_key,
175
+ "model_id": f"{provider_name}/{model_name}"
176
+ }
177
+
178
+ # return {
179
+ # "api_base": api_base,
180
+ # "api_key": api_key,
181
+ # "model_id": model_name
182
+ # }
183
+
184
+ async def create_subagents(self,instructions=None,tools=[],llm_config=None):
185
+ from openai import OpenAI
186
+ client = OpenAI(
187
+ api_key=llm_config["api_key"],
188
+ base_url=llm_config["api_base"],
189
+ )
190
+
191
+ system_prompt = self.system_prompt
192
+ if not instructions:
193
+ system_prompt = instructions
194
+
195
+ schema = {
196
+ "title": "subagents",
197
+ "description": "The list of subagents to do the task well and effectively.",
198
+ "required": [
199
+ "subagents",
200
+ ],
201
+ "type": "object",
202
+ "properties": {
203
+ "subagents":{
204
+ "type": "array",
205
+ "description": "The list of subagents to do the task well and effectively.",
206
+ "items": {
207
+ "type": "object",
208
+ "properties": {
209
+ "name": {
210
+ "type": "string",
211
+ "description": "The name of the sub-agent"
212
+ },
213
+ "description": {
214
+ "type": "string",
215
+ "description": "A description of the sub-agent"
216
+ },
217
+ "prompt": {
218
+ "type": "string",
219
+ "description": "The prompt used by the sub-agent"
220
+ },
221
+ # "tools": {
222
+ # "type": "array",
223
+ # "description": "Optional list of tools name the sub-agent can use",
224
+ # "items": {
225
+ # "type": "string"
226
+ # }
227
+ # }
228
+ },
229
+ "required": ["name", "description", "prompt"],
230
+ "additionalProperties": False
231
+ }
232
+ }
233
+ },
234
+ "additionalProperties": False
235
+ }
236
+
237
+ # print("schema", json.dumps(schema))
238
+
239
+ response_format = { "type": "json_schema", "json_schema": {"strict": True, "name": schema["title"], "schema": schema}}
240
+
241
+ # tools_str = ""
242
+ # for tool in tools:
243
+ # tools_str = f"""{tools_str}
244
+ # - {tool.__name__}: {tool.__doc__}"""
245
+
246
+ completion = client.beta.chat.completions.parse(
247
+ model=llm_config["model_id"],
248
+ messages=[{"role": "system", "content":
249
+ f"""You are tasked with designing a small team of specialized subagents to work together under the guidance of the main agent.
250
+
251
+ * The main agent’s role and purpose is defined by:
252
+ {system_prompt}
253
+
254
+ Your job is to create **no more than 5 subagents**. Each subagent must include:
255
+
256
+ 1. Name: lowercase, short, clear, and distinct, only alphabets and underscore allowed.
257
+ 2. Description: what this subagent is specialized at and how it contributes to the team.
258
+ 3. System Prompt: clear instructions that define the subagent’s behavior, style, and responsibilities.
259
+
260
+ Guidelines:
261
+
262
+ * Each subagent should have a well-defined, non-overlapping role.
263
+ * The team should collectively cover all major aspects required for the main agent’s purpose.
264
+ * Avoid redundancy—each subagent must bring unique value.
265
+ * Keep the team **small (≤5 subagents)** but **effective**.
266
+
267
+ Finally, return the result as a list of subagent objects in JSON format."""},
268
+ {"role": "user", "content": "Create a team of subagents to do task: {}"},
269
+ ],
270
+
271
+ response_format=response_format,
272
+ )
273
+
274
+
275
+
276
+ result = json.loads(completion.choices[0].message.content)
277
+
278
+ return result["subagents"]
279
+
280
+
281
+ async def prepare_realtimex_agent(self, agent_id, agent_data):
282
+ # directus_client = DirectusClient(server_url = directus_server_url,access_token = directus_access_token)
283
+
284
+ # d_agent = directus_client.get_directus_item_by_id("realtimex_agents",agent_id)
285
+
286
+ agent_name = agent_data["name"]
287
+ agent_description = agent_data["description"]
288
+ system_prompt = agent_data["instructions"]
289
+ agent_framework = agent_data["execution_config"]["framework"]
290
+ default_model = agent_data["execution_config"]["models"]["default_model"]
291
+
292
+ recommended_agent_flows = None
293
+ if "recommended_agent_flows" in agent_data:
294
+ recommended_agent_flows = agent_data["recommended_agent_flows"]
295
+
296
+ recommended_aci_mcp_apps = None
297
+ if "recommended_aci_mcp_apps" in agent_data:
298
+ recommended_aci_mcp_apps = agent_data["recommended_aci_mcp_apps"]
299
+
300
+ recommended_local_mcp_apps = None
301
+ if "recommended_local_mcp_apps" in agent_data:
302
+ recommended_local_mcp_apps = agent_data["recommended_local_mcp_apps"]
303
+
304
+ recommended_team_members = None
305
+ if "recommended_team_members" in agent_data:
306
+ recommended_team_members = agent_data["recommended_team_members"]
307
+
308
+ self.agent_name = agent_name
309
+ self.system_prompt = system_prompt
310
+ self.agent_framework = agent_framework
311
+ self.agent_description = agent_description
312
+ self.default_model = default_model
313
+ self.agent_id = agent_id
314
+ self.agent_data = agent_data
315
+ self.recommended_agent_flows = recommended_agent_flows
316
+ self.recommended_aci_mcp_apps = recommended_aci_mcp_apps
317
+ self.recommended_local_mcp_apps = recommended_local_mcp_apps
318
+ self.recommended_team_members = recommended_team_members
319
+
320
+ async def prepare_memory(self,memory_id, memory_path, litellm_base_url, litellm_api_key):
321
+ pass
322
+ # config_dict = {
323
+ # "version": "v1.1",
324
+ # "vector_store": {
325
+ # "provider": "chroma",
326
+ # "config": {
327
+ # "collection_name": memory_id,
328
+ # "path": memory_path,
329
+ # }
330
+ # },
331
+ # "llm": {"provider": "openai", "config": {"api_key": litellm_api_key, "openai_base_url": litellm_base_url, "temperature": 0.2, "model": "gpt-4o-mini"}},
332
+ # "embedder": {"provider": "openai", "config": {"api_key": litellm_api_key, "openai_base_url": litellm_base_url, "model": "text-embedding-3-small"}},
333
+ # "history_db_path": "",
334
+ # }
335
+ # print("config_dict",config_dict)
336
+ # memory = Memory.from_config(config_dict=config_dict)
337
+ # self.memory = memory
338
+
339
+ async def load_default_callbacks(self):
340
+ return [ShowToolCalling(),*get_default_callbacks()]
341
+
342
+ async def load_default_tools(self):
343
+ return []
344
+
345
+ async def load_aci_mcp_tools(self, linked_account_owner_id, aci_api_key):
346
+ from any_agent.config import MCPStdio
347
+ mcp_apps = []
348
+ for app in self.recommended_aci_mcp_apps:
349
+ if "realtimex_mcp_server_id" in app:
350
+ mcp_apps.append(app["realtimex_mcp_server_id"]["name"])
351
+ else:
352
+ mcp_apps.append(app["name"])
353
+ # mcp_apps = [app["realtimex_mcp_server_id"]["name"] for app in self.recommended_aci_mcp_apps]
354
+ if len(mcp_apps)>0:
355
+ mcp = MCPStdio(
356
+ # command=get_uvx_executable(),
357
+ # args=["aci-mcp@latest", 'apps-server', '--apps', ','.join(mcp_apps) , '--linked-account-owner-id', linked_account_owner_id],
358
+ command="aci-mcp",
359
+ args=['apps-server', '--apps', ','.join(mcp_apps) , '--linked-account-owner-id', linked_account_owner_id],
360
+ client_session_timeout_seconds=300,
361
+ env={
362
+ "ACI_SERVER_URL":"https://mcp.realtimex.ai/v1/",
363
+ "ACI_API_KEY":aci_api_key
364
+ }
365
+ )
366
+ return mcp
367
+ return None
368
+
369
+ async def load_local_mcp_tools(self, workspace_slug, thread_id):
370
+ from any_agent.config import MCPStdio
371
+ mcp_apps = [app["config"] for app in self.recommended_local_mcp_apps]
372
+ # mcp_apps = [
373
+ # {"command":"uvx","args":["mcp-shell-server"],"env":{"ALLOW_COMMANDS":"ls,cat,pwd,grep,wc,touch,find"}}
374
+ # ]
375
+ mcps = []
376
+ default_env = {
377
+ 'NVM_INC': get_nvm_inc(),
378
+ 'NVM_CD_FLAGS': '-q',
379
+ 'NVM_DIR': get_nvm_dir(),
380
+ 'PATH': f'{os.environ.copy()["PATH"]}{os.pathsep}{get_nvm_bin()}',
381
+ 'NVM_BIN': get_nvm_bin(),
382
+ "SESSION_ID": self.current_session_id,
383
+ "WORKSPACE_SLUG": workspace_slug,
384
+ "THREAD_ID": thread_id,
385
+ "AGENT_ID": self.agent_id,
386
+ }
387
+ for mcp_app in mcp_apps:
388
+ if mcp_app["command"] == "uvx":
389
+ mcp_app["command"] = get_uvx_executable()
390
+ if mcp_app["command"] == "npx":
391
+ mcp_app["command"] = get_npx_executable()
392
+ if "env" not in mcp_app:
393
+ mcp_app["env"] = {}
394
+ mcp_app["env"] = {**mcp_app["env"],**default_env}
395
+ # print("mcp_app",mcp_app)
396
+ mcp = MCPStdio(
397
+ **mcp_app,
398
+ client_session_timeout_seconds=300,
399
+ )
400
+ mcps.append(mcp)
401
+ return mcps
402
+
403
+ async def load_mcp_agent_flow_tools(self, linked_account_owner_id, aci_api_key, litellm_base_url, litellm_api_key, realtimex_access_token, workspace_slug, thread_id):
404
+ from any_agent.config import MCPStdio
405
+ agent_flow_ids = [flow["realtimex_agent_flows_id"]["id"] for flow in self.recommended_agent_flows]
406
+ if len(agent_flow_ids)>0:
407
+ mcp = MCPStdio(
408
+ # command=get_uvx_executable(),
409
+ # args=["--from", "git+https://oauth2:5yTHSE9k34jbWgzXmsxQ@rtgit.rta.vn/rtlab/rtwebteam/mcp-servers/realtimex-ai-agent-flows@main","agent-flows-mcp-server",'--flows',','.join(agent_flow_ids)],
410
+ command="agent-flows-mcp-server",
411
+ args=['--flows',','.join(agent_flow_ids)],
412
+ client_session_timeout_seconds=300,
413
+ env={
414
+ "AGENT_FLOWS_API_KEY": realtimex_access_token,
415
+ "LITELLM_API_KEY": litellm_api_key,
416
+ "LITELLM_API_BASE": litellm_base_url,
417
+ "MCP_ACI_API_KEY": aci_api_key,
418
+ "MCP_ACI_LINKED_ACCOUNT_OWNER_ID": linked_account_owner_id,
419
+ "SESSION_ID": self.current_session_id,
420
+ "WORKSPACE_SLUG": workspace_slug,
421
+ "THREAD_ID": thread_id,
422
+ "AGENT_ID": self.agent_id,
423
+ # "AGENT_FLOWS_BASE_URL": "https://your-custom-instance.com" # Optional
424
+ }
425
+ )
426
+ return mcp
427
+ return None
428
+
429
+ async def load_a2a_agents_as_tools(self):
430
+ def get_free_port():
431
+ import socket
432
+ sock = socket.socket()
433
+ sock.bind(('', 0))
434
+ return sock.getsockname()[1]
435
+
436
+
437
+ tools = []
438
+ for a2a_agent in self.recommended_team_members:
439
+ agent_id = a2a_agent["agent_id"]
440
+ agent_data = a2a_agent["agent_data"]
441
+ a2a_port = get_free_port()
442
+ # Create agent
443
+ agent = RealTimeXAgent()
444
+
445
+ await agent.load_default_agent(agent_id, agent_data, payload=a2a_agent)
446
+
447
+ agent_server_url = await agent.serve_as_a2a(
448
+ a2a_serving_config={"port":a2a_port,"stream_tool_usage":True},
449
+ )
450
+
451
+ # print(agent_server_url)
452
+
453
+ tools.append(
454
+ await a2a_tool_async(
455
+ agent_server_url,
456
+ http_kwargs={
457
+ "timeout": 300
458
+ }, # This gives the time agent up to 30 seconds to respond to each request
459
+ )
460
+ )
461
+
462
+
463
+ return tools
464
+
465
+ async def load_knowledges(self,query, user_id, workspace_slug, thread_id, knowledges=["thread"]):
466
+ memory_session_id = None
467
+ if "user" in knowledges:
468
+ memory_session_id = None
469
+ elif "workspace" in knowledges:
470
+ memory_session_id = workspace_slug
471
+ elif "thread" in knowledges:
472
+ memory_session_id = f"{workspace_slug}_{thread_id}"
473
+ # print("memory_session_id",memory_session_id)
474
+ history_memories = self.memory.search(query=query, user_id=user_id, run_id=memory_session_id,limit=5)
475
+ # history_memories = self.memory.get_all(user_id=user_id, run_id=memory_session_id,limit=20)
476
+ # print("history_memories",history_memories)
477
+
478
+ memories_str = "\n".join(f"- {entry['memory']}" for entry in history_memories["results"])
479
+
480
+ knowledges_str = ""
481
+ all_knowledge_memories = []
482
+ for knowledge_id in knowledges:
483
+ if knowledge_id in ["account","workspace","thread"]:
484
+ continue
485
+ knowledge_memories = self.memory.search(query=query, user_id=user_id, run_id=knowledge_id, limit=5)
486
+ all_knowledge_memories = [*all_knowledge_memories,*knowledge_memories["results"]]
487
+
488
+ knowledges_str = "\n".join(f"- {entry['memory']}" for entry in all_knowledge_memories)
489
+ return memories_str, knowledges_str
490
+
491
+
492
+ async def create_agent(self, agent_framework="tinyagent",agent_config=None,tools=[],callbacks=[], memories_str=None, knowledges_str=None):
493
+ default_agent_config = {
494
+ "name": self.agent_name,
495
+ "model_id": self.default_model,
496
+ "description": self.agent_description,
497
+ "instructions": self.system_prompt,
498
+ "tools": tools,
499
+ "callbacks": callbacks
500
+ }
501
+
502
+ default_agent_framework = self.agent_framework
503
+
504
+ if agent_config:
505
+ default_agent_config.update(agent_config)
506
+
507
+ if agent_framework:
508
+ default_agent_framework = agent_framework
509
+
510
+ # print("default_agent_config", default_agent_config)
511
+ # print("agent_config", agent_config)
512
+
513
+ if memories_str:
514
+ default_agent_config["instructions"] = default_agent_config["instructions"].replace("##MEMORIES##",memories_str)
515
+ if knowledges_str:
516
+ default_agent_config["instructions"] = default_agent_config["instructions"].replace("##KNOWLEDGES##",knowledges_str)
517
+ # print(default_agent_framework)
518
+ # print(default_agent_config)
519
+
520
+ self.agent = await AnyAgent.create_async(
521
+ default_agent_framework, # See all options in https://mozilla-ai.github.io/any-agent/
522
+ AgentConfig(
523
+ **default_agent_config,
524
+ current_session_id=self.current_session_id,
525
+ # agent_args={
526
+ # "interrupt_after":["get_user_info"]
527
+ # }
528
+ ),
529
+ )
530
+
531
+ return self.agent
532
+
533
+ async def serve_as_a2a(self, a2a_serving_config):
534
+ handle = await self.agent.serve_async(A2AServingConfig(**a2a_serving_config))
535
+ server_port = handle.port
536
+ server_url = f"http://localhost:{server_port}"
537
+
538
+ return server_url
539
+
540
+ async def load_default_agent(self, agent_id, agent_data, payload):
541
+ system_prompt = None
542
+ agent_framework = None
543
+ agent_description = None
544
+ agent_name = None
545
+ default_model = None
546
+ provider_name = None
547
+ llm_setting = None
548
+
549
+ user_id = payload["user_id"]
550
+ workspace_slug = payload["workspace_slug"]
551
+ thread_id = payload["thread_id"]
552
+ knowledges = payload["knowledges"]
553
+ memory_id = payload["memory_id"]
554
+ memory_path = payload["memory_path"]
555
+ execution_id = payload["session_id"]
556
+ aci_linked_account_owner_id = payload["aci_linked_account_owner_id"]
557
+ aci_agent_first_api_key = payload["aci_api_key"]
558
+ realtimex_access_token = payload["realtimex_access_token"]
559
+
560
+ agent_description = agent_data["description"]
561
+ agent_name = agent_data["name"]
562
+ agent_framework = agent_data["execution_config"]["framework"]
563
+
564
+ if "agent_description" in payload:
565
+ agent_description = payload["agent_description"]
566
+ if "agent_name" in payload:
567
+ agent_name = payload["agent_name"]
568
+ if "agent_framework" in payload:
569
+ agent_framework = payload["agent_framework"]
570
+ if "system_prompt" in payload:
571
+ system_prompt = payload["system_prompt"]
572
+ if "llm_setting" in payload:
573
+ llm_setting = payload["llm_setting"]
574
+
575
+
576
+ default_openai_base_url = payload["litellm_api_base"]
577
+ default_openai_api_key = payload["litellm_api_key"]
578
+
579
+ # Load MCP tools
580
+
581
+ # # Create agent
582
+ # agent = RealTimeXAgent()
583
+
584
+ # print("agent_data")
585
+
586
+ await self.prepare_realtimex_agent(
587
+ agent_id=agent_id,
588
+ agent_data=agent_data
589
+ )
590
+
591
+ # await self.prepare_memory(memory_id=memory_id, memory_path=memory_path, litellm_base_url=default_openai_base_url, litellm_api_key=default_openai_api_key)
592
+
593
+ default_callbacks = await self.load_default_callbacks()
594
+
595
+ default_tools = await self.load_default_tools()
596
+ all_tools = [*default_tools]
597
+
598
+ if self.recommended_aci_mcp_apps:
599
+ aci_mcp = await self.load_aci_mcp_tools(
600
+ linked_account_owner_id=aci_linked_account_owner_id,
601
+ aci_api_key=aci_agent_first_api_key
602
+ )
603
+ if aci_mcp:
604
+ all_tools = [*all_tools,aci_mcp]
605
+
606
+ if self.recommended_local_mcp_apps:
607
+ local_mcps = await self.load_local_mcp_tools(
608
+ workspace_slug=workspace_slug,
609
+ thread_id=thread_id
610
+ )
611
+ # print("local_mcps",local_mcps)
612
+ all_tools = [*all_tools,*local_mcps]
613
+
614
+ if self.recommended_agent_flows:
615
+ aci_mcp_agent_flow = await self.load_mcp_agent_flow_tools(
616
+ linked_account_owner_id=aci_linked_account_owner_id,
617
+ aci_api_key=aci_agent_first_api_key,
618
+ realtimex_access_token=realtimex_access_token,
619
+ litellm_base_url=default_openai_base_url,
620
+ litellm_api_key=default_openai_api_key,
621
+ workspace_slug=workspace_slug,
622
+ thread_id=thread_id
623
+ )
624
+ if aci_mcp_agent_flow:
625
+ all_tools = [*all_tools,aci_mcp_agent_flow]
626
+
627
+ if self.recommended_team_members:
628
+
629
+ team_members = await self.load_a2a_agents_as_tools()
630
+ # print(team_members)
631
+ if team_members:
632
+ all_tools = [*all_tools,*team_members]
633
+
634
+
635
+
636
+ agent_config = {
637
+ "api_base": default_openai_base_url,
638
+ "api_key": default_openai_api_key,
639
+ }
640
+
641
+ # print("agent_framework",agent_framework)
642
+
643
+ if agent_description:
644
+ agent_config["description"] = agent_description
645
+ if system_prompt:
646
+ agent_config["instructions"] = system_prompt
647
+
648
+
649
+ if llm_setting:
650
+ llm_config = await self.prepare_llm(**llm_setting["default"])
651
+ # print("llm_config",llm_config)
652
+ agent_config.update(llm_config)
653
+
654
+
655
+ memories_str = ""
656
+ knowledges_str = ""
657
+ # if knowledges:
658
+ # memories_str, knowledges_str = await self.load_knowledges(message, user_id, workspace_slug, thread_id, knowledges)
659
+
660
+ if agent_framework == "deepagents":
661
+ from deepagents import create_realtimex_deep_agent
662
+ from deepagents.backends import CompositeBackend
663
+ from deepagents.backends.filesystem import FilesystemBackend
664
+ agent_framework = "langchain"
665
+ agent_config["agent_type"] = create_realtimex_deep_agent
666
+
667
+ # Backend is always required for deepagents
668
+ agent_config["agent_args"] = {
669
+ "assistant_id": agent_id,
670
+ "backend": CompositeBackend(
671
+ default=FilesystemBackend(),
672
+ routes={},
673
+ )
674
+ }
675
+
676
+ # Resolve memory paths (global and workspace)
677
+ global_agent_path = get_deepagents_agent_path("global", agent_id)
678
+ workspace_agent_path = get_deepagents_agent_path("workspace", agent_id, workspace_slug)
679
+
680
+ # Enable memory if either global or workspace config exists
681
+ agent_config["agent_args"]["enable_memory"] = bool(global_agent_path or workspace_agent_path)
682
+ if global_agent_path:
683
+ agent_config["agent_args"]["global_agent_path"] = global_agent_path
684
+ if workspace_agent_path:
685
+ agent_config["agent_args"]["workspace_agent_path"] = workspace_agent_path
686
+
687
+ # Resolve skills paths (global and workspace)
688
+ global_skills_dir = get_deepagents_skills_dir("global", agent_id)
689
+ workspace_skills_dir = get_deepagents_skills_dir("workspace", agent_id, workspace_slug)
690
+
691
+ # Enable skills if either global or workspace config exists
692
+ agent_config["agent_args"]["enable_skills"] = bool(global_skills_dir or workspace_skills_dir)
693
+ if global_skills_dir:
694
+ agent_config["agent_args"]["global_skills_dir"] = global_skills_dir
695
+ if workspace_skills_dir:
696
+ agent_config["agent_args"]["workspace_skills_dir"] = workspace_skills_dir
697
+
698
+ if "subagents" in agent_data["execution_config"]:
699
+ if "_auto" in agent_data["execution_config"]["subagents"]:
700
+ try:
701
+ subagents = await self.create_subagents(
702
+ instructions=system_prompt,
703
+ tools=[],
704
+ llm_config={
705
+ "api_base": agent_config["api_base"],
706
+ "api_key": agent_config["api_key"],
707
+ "model_id": agent_config["model_id"]
708
+ }
709
+ )
710
+ if subagents:
711
+ # Merge subagents into existing agent_args if present
712
+ if "agent_args" not in agent_config:
713
+ agent_config["agent_args"] = {}
714
+ agent_config["agent_args"]["subagents"] = subagents
715
+ except Exception as e:
716
+ print(e)
717
+
718
+ # print(agent_config["agent_args"])
719
+
720
+ await self.create_agent(
721
+ agent_framework=agent_framework, # See all options in https://mozilla-ai.github.io/any-agent/
722
+ agent_config=agent_config,
723
+ tools=all_tools,
724
+ callbacks=[*default_callbacks],
725
+ memories_str=memories_str,
726
+ knowledges_str=knowledges_str
727
+ )
728
+
729
+ # return agent