realtimex-agent-a2a-agent 0.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,755 @@
1
+ from any_agent import AgentConfig, AnyAgent
2
+ from any_agent.config import MCPStdio
3
+ import json
4
+ import os
5
+ from typing import Callable, Dict, Literal, Optional, Union, Any
6
+
7
+ from .tools import send_email
8
+ from .callbacks.tool_execution import ShowToolCalling
9
+ from any_agent.callbacks import get_default_callbacks
10
+ from mem0 import Memory
11
+ from any_agent.tools import a2a_tool_async
12
+ from any_agent.serving import A2AServingConfig
13
+
14
+
15
+ MEMORIES_GLOBAL_VIRTUAL_ROOT = "/memories/global/"
16
+ MEMORIES_WORKSPACE_VIRTUAL_ROOT_PREFIX = "/memories/workspace/"
17
+ SKILLS_GLOBAL_VIRTUAL_ROOT = "/skills/global/"
18
+ SKILLS_WORKSPACE_VIRTUAL_ROOT_PREFIX = "/skills/workspace/"
19
+
20
+
21
+ def get_user_dir():
22
+ return os.path.join(os.path.expanduser("~"),".realtimex.ai")
23
+
24
+ def get_base_user_dir():
25
+ return os.path.join(os.path.expanduser("~"))
26
+
27
+ def get_uvx_executable():
28
+ unix_realtimex_uvx_path = os.path.join(get_user_dir(),"Resources","envs","bin","uvx")
29
+ if os.path.exists(unix_realtimex_uvx_path):
30
+ return unix_realtimex_uvx_path
31
+ win_realtimex_uvx_path = os.path.join(get_user_dir(),"Resources","envs","Scripts","uvx.exe")
32
+ if os.path.exists(win_realtimex_uvx_path):
33
+ return win_realtimex_uvx_path
34
+ return "uvx"
35
+
36
+ def get_nvm_dir():
37
+ path = os.path.join(get_base_user_dir(),".nvm")
38
+ if os.path.exists(path):
39
+ return path
40
+ path = os.path.join('c:', os.sep, "nvm")
41
+ if os.path.exists(path):
42
+ return path
43
+ return ""
44
+
45
+
46
+ def get_nvm_inc():
47
+ # /Users/phuongnguyen/.nvm/versions/node/v22.16.0/include/node
48
+ path = os.path.join(get_nvm_dir(),"versions","node","v22.16.0","include","node")
49
+ if os.path.exists(path):
50
+ return path
51
+ path = os.path.join('c:', os.sep, "nvm")
52
+ if os.path.exists(path):
53
+ return path
54
+ return ""
55
+
56
+ def get_nvm_bin():
57
+ # /Users/phuongnguyen/.nvm/versions/node/v22.16.0/include/node
58
+ path = os.path.join(get_nvm_dir(),"versions","node","v22.16.0","bin")
59
+ if os.path.exists(path):
60
+ return path
61
+ path = os.path.join('c:', os.sep, "nvm")
62
+ if os.path.exists(path):
63
+ return path
64
+ return ""
65
+
66
+ def get_npx_executable():
67
+ unix_realtimex_npx_path = os.path.join(get_base_user_dir(),".nvm","versions","node","v22.16.0","bin","npx")
68
+ if os.path.exists(unix_realtimex_npx_path):
69
+ return unix_realtimex_npx_path
70
+ win_realtimex_npx_path = os.path.join('c:', os.sep, "nvm", "v22.16.0", "npx.cmd")
71
+ if os.path.exists(win_realtimex_npx_path):
72
+ return win_realtimex_npx_path
73
+ return "npx"
74
+
75
+
76
+ def get_deepagents_agent_path(scope, agent_id, workspace_slug=None):
77
+ """
78
+ Get the agent.md path for DeepAgents.
79
+
80
+ Args:
81
+ scope: 'global' or 'workspace'
82
+ agent_id: The agent identifier
83
+ workspace_slug: Required when scope is 'workspace'
84
+
85
+ Returns:
86
+ Path string if file exists, None otherwise
87
+ """
88
+ if scope == "global":
89
+ path = os.path.join(
90
+ get_user_dir(), "Resources", "agent-skills", "global", agent_id, "agent.md"
91
+ )
92
+ elif scope == "workspace" and workspace_slug:
93
+ path = os.path.join(
94
+ get_user_dir(), "Resources", "agent-skills", "workspaces", workspace_slug, agent_id, "agent.md"
95
+ )
96
+ else:
97
+ return None
98
+
99
+ return path if os.path.isfile(path) else None
100
+
101
+
102
+ def get_deepagents_skills_dir(scope, agent_id, workspace_slug=None):
103
+ """
104
+ Get the skills directory path for DeepAgents.
105
+
106
+ Args:
107
+ scope: 'global' or 'workspace'
108
+ agent_id: The agent identifier
109
+ workspace_slug: Required when scope is 'workspace'
110
+
111
+ Returns:
112
+ Path string if directory exists, None otherwise
113
+ """
114
+ if scope == "global":
115
+ path = os.path.join(
116
+ get_user_dir(), "Resources", "agent-skills", "global", agent_id, "skills"
117
+ )
118
+ elif scope == "workspace" and workspace_slug:
119
+ path = os.path.join(
120
+ get_user_dir(), "Resources", "agent-skills", "workspaces", workspace_slug, agent_id, "skills"
121
+ )
122
+ else:
123
+ return None
124
+
125
+ return path if os.path.isdir(path) else None
126
+
127
+ class RealTimeXAgent():
128
+ def __init__(
129
+ self, current_session_id
130
+ ):
131
+ self.agent_id: str = None
132
+ self.agent_data: Dict = None
133
+ self.agent: AnyAgent = None
134
+ self.agent_name: str = None
135
+ self.system_prompt: str = None
136
+ self.agent_framework: str = None
137
+ self.agent_description: str = None
138
+ self.default_model:str = None
139
+ self.recommended_agent_flows = None
140
+ self.recommended_aci_mcp_apps = None
141
+ self.recommended_local_mcp_apps = None
142
+ self.recommended_team_members = None
143
+ self.memory:Memory = None
144
+
145
+ self.current_session_id = current_session_id
146
+
147
+ async def prepare_llm(self, provider_name,model_name,api_base,api_key):
148
+ def get_provider_name(provider_name):
149
+ if provider_name == "realtimexai":
150
+ return "openai"
151
+ elif provider_name == "litellm":
152
+ return "openai"
153
+ elif provider_name == "openai":
154
+ return "openai"
155
+ elif provider_name == "ollama":
156
+ return "ollama"
157
+ elif provider_name == "gemini":
158
+ return "google"
159
+ elif provider_name == "google":
160
+ return "google"
161
+
162
+ def get_model_name(model_name):
163
+ return model_name
164
+
165
+ provider_name = get_provider_name(provider_name)
166
+ model_name = get_model_name(model_name)
167
+
168
+ # if provider_name == "openai":
169
+ # os.environ['OPENAI_BASE_URL'] = api_base
170
+ # os.environ['OPENAI_API_KEY'] = api_key
171
+
172
+ # return {
173
+ # "api_base": api_base,
174
+ # "api_key": api_key,
175
+ # "model_id": f"{provider_name}:{model_name}"
176
+ # }
177
+
178
+ return {
179
+ "api_base": api_base,
180
+ "api_key": api_key,
181
+ "model_id": f"{provider_name}/{model_name}"
182
+ }
183
+
184
+ # return {
185
+ # "api_base": api_base,
186
+ # "api_key": api_key,
187
+ # "model_id": model_name
188
+ # }
189
+
190
+ async def create_subagents(self,instructions=None,tools=[],llm_config=None):
191
+ from openai import OpenAI
192
+ client = OpenAI(
193
+ api_key=llm_config["api_key"],
194
+ base_url=llm_config["api_base"],
195
+ )
196
+
197
+ system_prompt = self.system_prompt
198
+ if not instructions:
199
+ system_prompt = instructions
200
+
201
+ schema = {
202
+ "title": "subagents",
203
+ "description": "The list of subagents to do the task well and effectively.",
204
+ "required": [
205
+ "subagents",
206
+ ],
207
+ "type": "object",
208
+ "properties": {
209
+ "subagents":{
210
+ "type": "array",
211
+ "description": "The list of subagents to do the task well and effectively.",
212
+ "items": {
213
+ "type": "object",
214
+ "properties": {
215
+ "name": {
216
+ "type": "string",
217
+ "description": "The name of the sub-agent"
218
+ },
219
+ "description": {
220
+ "type": "string",
221
+ "description": "A description of the sub-agent"
222
+ },
223
+ "prompt": {
224
+ "type": "string",
225
+ "description": "The prompt used by the sub-agent"
226
+ },
227
+ # "tools": {
228
+ # "type": "array",
229
+ # "description": "Optional list of tools name the sub-agent can use",
230
+ # "items": {
231
+ # "type": "string"
232
+ # }
233
+ # }
234
+ },
235
+ "required": ["name", "description", "prompt"],
236
+ "additionalProperties": False
237
+ }
238
+ }
239
+ },
240
+ "additionalProperties": False
241
+ }
242
+
243
+ # print("schema", json.dumps(schema))
244
+
245
+ response_format = { "type": "json_schema", "json_schema": {"strict": True, "name": schema["title"], "schema": schema}}
246
+
247
+ # tools_str = ""
248
+ # for tool in tools:
249
+ # tools_str = f"""{tools_str}
250
+ # - {tool.__name__}: {tool.__doc__}"""
251
+
252
+ completion = client.beta.chat.completions.parse(
253
+ model=llm_config["model_id"],
254
+ messages=[{"role": "system", "content":
255
+ f"""You are tasked with designing a small team of specialized subagents to work together under the guidance of the main agent.
256
+
257
+ * The main agent’s role and purpose is defined by:
258
+ {system_prompt}
259
+
260
+ Your job is to create **no more than 5 subagents**. Each subagent must include:
261
+
262
+ 1. Name: lowercase, short, clear, and distinct, only alphabets and underscore allowed.
263
+ 2. Description: what this subagent is specialized at and how it contributes to the team.
264
+ 3. System Prompt: clear instructions that define the subagent’s behavior, style, and responsibilities.
265
+
266
+ Guidelines:
267
+
268
+ * Each subagent should have a well-defined, non-overlapping role.
269
+ * The team should collectively cover all major aspects required for the main agent’s purpose.
270
+ * Avoid redundancy—each subagent must bring unique value.
271
+ * Keep the team **small (≤5 subagents)** but **effective**.
272
+
273
+ Finally, return the result as a list of subagent objects in JSON format."""},
274
+ {"role": "user", "content": "Create a team of subagents to do task: {}"},
275
+ ],
276
+
277
+ response_format=response_format,
278
+ )
279
+
280
+
281
+
282
+ result = json.loads(completion.choices[0].message.content)
283
+
284
+ return result["subagents"]
285
+
286
+
287
+ async def prepare_realtimex_agent(self, agent_id, agent_data):
288
+ # directus_client = DirectusClient(server_url = directus_server_url,access_token = directus_access_token)
289
+
290
+ # d_agent = directus_client.get_directus_item_by_id("realtimex_agents",agent_id)
291
+
292
+ agent_name = agent_data["name"]
293
+ agent_description = agent_data["description"]
294
+ system_prompt = agent_data["instructions"]
295
+ agent_framework = agent_data["execution_config"]["framework"]
296
+ default_model = agent_data["execution_config"]["models"]["default_model"]
297
+
298
+ recommended_agent_flows = None
299
+ if "recommended_agent_flows" in agent_data:
300
+ recommended_agent_flows = agent_data["recommended_agent_flows"]
301
+
302
+ recommended_aci_mcp_apps = None
303
+ if "recommended_aci_mcp_apps" in agent_data:
304
+ recommended_aci_mcp_apps = agent_data["recommended_aci_mcp_apps"]
305
+
306
+ recommended_local_mcp_apps = None
307
+ if "recommended_local_mcp_apps" in agent_data:
308
+ recommended_local_mcp_apps = agent_data["recommended_local_mcp_apps"]
309
+
310
+ recommended_team_members = None
311
+ if "recommended_team_members" in agent_data:
312
+ recommended_team_members = agent_data["recommended_team_members"]
313
+
314
+ self.agent_name = agent_name
315
+ self.system_prompt = system_prompt
316
+ self.agent_framework = agent_framework
317
+ self.agent_description = agent_description
318
+ self.default_model = default_model
319
+ self.agent_id = agent_id
320
+ self.agent_data = agent_data
321
+ self.recommended_agent_flows = recommended_agent_flows
322
+ self.recommended_aci_mcp_apps = recommended_aci_mcp_apps
323
+ self.recommended_local_mcp_apps = recommended_local_mcp_apps
324
+ self.recommended_team_members = recommended_team_members
325
+
326
+ async def prepare_memory(self,memory_id, memory_path, litellm_base_url, litellm_api_key):
327
+ pass
328
+ # config_dict = {
329
+ # "version": "v1.1",
330
+ # "vector_store": {
331
+ # "provider": "chroma",
332
+ # "config": {
333
+ # "collection_name": memory_id,
334
+ # "path": memory_path,
335
+ # }
336
+ # },
337
+ # "llm": {"provider": "openai", "config": {"api_key": litellm_api_key, "openai_base_url": litellm_base_url, "temperature": 0.2, "model": "gpt-4o-mini"}},
338
+ # "embedder": {"provider": "openai", "config": {"api_key": litellm_api_key, "openai_base_url": litellm_base_url, "model": "text-embedding-3-small"}},
339
+ # "history_db_path": "",
340
+ # }
341
+ # print("config_dict",config_dict)
342
+ # memory = Memory.from_config(config_dict=config_dict)
343
+ # self.memory = memory
344
+
345
+ async def load_default_callbacks(self):
346
+ return [ShowToolCalling(),*get_default_callbacks()]
347
+
348
+ async def load_default_tools(self):
349
+ return []
350
+
351
+ async def load_aci_mcp_tools(self, linked_account_owner_id, aci_api_key):
352
+ from any_agent.config import MCPStdio
353
+ mcp_apps = []
354
+ for app in self.recommended_aci_mcp_apps:
355
+ if "realtimex_mcp_server_id" in app:
356
+ mcp_apps.append(app["realtimex_mcp_server_id"]["name"])
357
+ else:
358
+ mcp_apps.append(app["name"])
359
+ # mcp_apps = [app["realtimex_mcp_server_id"]["name"] for app in self.recommended_aci_mcp_apps]
360
+ if len(mcp_apps)>0:
361
+ mcp = MCPStdio(
362
+ # command=get_uvx_executable(),
363
+ # args=["aci-mcp@latest", 'apps-server', '--apps', ','.join(mcp_apps) , '--linked-account-owner-id', linked_account_owner_id],
364
+ command="aci-mcp",
365
+ args=['apps-server', '--apps', ','.join(mcp_apps) , '--linked-account-owner-id', linked_account_owner_id],
366
+ client_session_timeout_seconds=300,
367
+ env={
368
+ "ACI_SERVER_URL":"https://mcp.realtimex.ai/v1/",
369
+ "ACI_API_KEY":aci_api_key
370
+ }
371
+ )
372
+ return mcp
373
+ return None
374
+
375
+ async def load_local_mcp_tools(self, workspace_slug, thread_id):
376
+ from any_agent.config import MCPStdio
377
+ mcp_apps = [app["config"] for app in self.recommended_local_mcp_apps]
378
+ # mcp_apps = [
379
+ # {"command":"uvx","args":["mcp-shell-server"],"env":{"ALLOW_COMMANDS":"ls,cat,pwd,grep,wc,touch,find"}}
380
+ # ]
381
+ mcps = []
382
+ default_env = {
383
+ 'NVM_INC': get_nvm_inc(),
384
+ 'NVM_CD_FLAGS': '-q',
385
+ 'NVM_DIR': get_nvm_dir(),
386
+ 'PATH': f'{os.environ.copy()["PATH"]}{os.pathsep}{get_nvm_bin()}',
387
+ 'NVM_BIN': get_nvm_bin(),
388
+ "SESSION_ID": self.current_session_id,
389
+ "WORKSPACE_SLUG": workspace_slug,
390
+ "THREAD_ID": thread_id,
391
+ "AGENT_ID": self.agent_id,
392
+ }
393
+ for mcp_app in mcp_apps:
394
+ if mcp_app["command"] == "uvx":
395
+ mcp_app["command"] = get_uvx_executable()
396
+ if mcp_app["command"] == "npx":
397
+ mcp_app["command"] = get_npx_executable()
398
+ if "env" not in mcp_app:
399
+ mcp_app["env"] = {}
400
+ mcp_app["env"] = {**mcp_app["env"],**default_env}
401
+ # print("mcp_app",mcp_app)
402
+ mcp = MCPStdio(
403
+ **mcp_app,
404
+ client_session_timeout_seconds=300,
405
+ )
406
+ mcps.append(mcp)
407
+ return mcps
408
+
409
+ async def load_mcp_agent_flow_tools(self, linked_account_owner_id, aci_api_key, litellm_base_url, litellm_api_key, realtimex_access_token, workspace_slug, thread_id):
410
+ from any_agent.config import MCPStdio
411
+ agent_flow_ids = [flow["realtimex_agent_flows_id"]["id"] for flow in self.recommended_agent_flows]
412
+ if len(agent_flow_ids)>0:
413
+ mcp = MCPStdio(
414
+ # command=get_uvx_executable(),
415
+ # args=["--from", "git+https://oauth2:5yTHSE9k34jbWgzXmsxQ@rtgit.rta.vn/rtlab/rtwebteam/mcp-servers/realtimex-ai-agent-flows@main","agent-flows-mcp-server",'--flows',','.join(agent_flow_ids)],
416
+ command="agent-flows-mcp-server",
417
+ args=['--flows',','.join(agent_flow_ids)],
418
+ client_session_timeout_seconds=300,
419
+ env={
420
+ "AGENT_FLOWS_API_KEY": realtimex_access_token,
421
+ "LITELLM_API_KEY": litellm_api_key,
422
+ "LITELLM_API_BASE": litellm_base_url,
423
+ "MCP_ACI_API_KEY": aci_api_key,
424
+ "MCP_ACI_LINKED_ACCOUNT_OWNER_ID": linked_account_owner_id,
425
+ "SESSION_ID": self.current_session_id,
426
+ "WORKSPACE_SLUG": workspace_slug,
427
+ "THREAD_ID": thread_id,
428
+ "AGENT_ID": self.agent_id,
429
+ # "AGENT_FLOWS_BASE_URL": "https://your-custom-instance.com" # Optional
430
+ }
431
+ )
432
+ return mcp
433
+ return None
434
+
435
+ async def load_a2a_agents_as_tools(self):
436
+ def get_free_port():
437
+ import socket
438
+ sock = socket.socket()
439
+ sock.bind(('', 0))
440
+ return sock.getsockname()[1]
441
+
442
+
443
+ tools = []
444
+ for a2a_agent in self.recommended_team_members:
445
+ agent_id = a2a_agent["agent_id"]
446
+ agent_data = a2a_agent["agent_data"]
447
+ a2a_port = get_free_port()
448
+ # Create agent
449
+ agent = RealTimeXAgent()
450
+
451
+ await agent.load_default_agent(agent_id, agent_data, payload=a2a_agent)
452
+
453
+ agent_server_url = await agent.serve_as_a2a(
454
+ a2a_serving_config={"port":a2a_port,"stream_tool_usage":True},
455
+ )
456
+
457
+ # print(agent_server_url)
458
+
459
+ tools.append(
460
+ await a2a_tool_async(
461
+ agent_server_url,
462
+ http_kwargs={
463
+ "timeout": 300
464
+ }, # This gives the time agent up to 30 seconds to respond to each request
465
+ )
466
+ )
467
+
468
+
469
+ return tools
470
+
471
+ async def load_knowledges(self,query, user_id, workspace_slug, thread_id, knowledges=["thread"]):
472
+ memory_session_id = None
473
+ if "user" in knowledges:
474
+ memory_session_id = None
475
+ elif "workspace" in knowledges:
476
+ memory_session_id = workspace_slug
477
+ elif "thread" in knowledges:
478
+ memory_session_id = f"{workspace_slug}_{thread_id}"
479
+ # print("memory_session_id",memory_session_id)
480
+ history_memories = self.memory.search(query=query, user_id=user_id, run_id=memory_session_id,limit=5)
481
+ # history_memories = self.memory.get_all(user_id=user_id, run_id=memory_session_id,limit=20)
482
+ # print("history_memories",history_memories)
483
+
484
+ memories_str = "\n".join(f"- {entry['memory']}" for entry in history_memories["results"])
485
+
486
+ knowledges_str = ""
487
+ all_knowledge_memories = []
488
+ for knowledge_id in knowledges:
489
+ if knowledge_id in ["account","workspace","thread"]:
490
+ continue
491
+ knowledge_memories = self.memory.search(query=query, user_id=user_id, run_id=knowledge_id, limit=5)
492
+ all_knowledge_memories = [*all_knowledge_memories,*knowledge_memories["results"]]
493
+
494
+ knowledges_str = "\n".join(f"- {entry['memory']}" for entry in all_knowledge_memories)
495
+ return memories_str, knowledges_str
496
+
497
+
498
+ async def create_agent(self, agent_framework="tinyagent",agent_config=None,tools=[],callbacks=[], memories_str=None, knowledges_str=None):
499
+ default_agent_config = {
500
+ "name": self.agent_name,
501
+ "model_id": self.default_model,
502
+ "description": self.agent_description,
503
+ "instructions": self.system_prompt,
504
+ "tools": tools,
505
+ "callbacks": callbacks
506
+ }
507
+
508
+ default_agent_framework = self.agent_framework
509
+
510
+ if agent_config:
511
+ default_agent_config.update(agent_config)
512
+
513
+ if agent_framework:
514
+ default_agent_framework = agent_framework
515
+
516
+ # print("default_agent_config", default_agent_config)
517
+ # print("agent_config", agent_config)
518
+
519
+ if memories_str:
520
+ default_agent_config["instructions"] = default_agent_config["instructions"].replace("##MEMORIES##",memories_str)
521
+ if knowledges_str:
522
+ default_agent_config["instructions"] = default_agent_config["instructions"].replace("##KNOWLEDGES##",knowledges_str)
523
+ # print(default_agent_framework)
524
+ # print(default_agent_config)
525
+
526
+ self.agent = await AnyAgent.create_async(
527
+ default_agent_framework, # See all options in https://mozilla-ai.github.io/any-agent/
528
+ AgentConfig(
529
+ **default_agent_config,
530
+ current_session_id=self.current_session_id,
531
+ # agent_args={
532
+ # "interrupt_after":["get_user_info"]
533
+ # }
534
+ ),
535
+ )
536
+
537
+ return self.agent
538
+
539
+ async def serve_as_a2a(self, a2a_serving_config):
540
+ handle = await self.agent.serve_async(A2AServingConfig(**a2a_serving_config))
541
+ server_port = handle.port
542
+ server_url = f"http://localhost:{server_port}"
543
+
544
+ return server_url
545
+
546
+ async def load_default_agent(self, agent_id, agent_data, payload):
547
+ system_prompt = None
548
+ agent_framework = None
549
+ agent_description = None
550
+ agent_name = None
551
+ default_model = None
552
+ provider_name = None
553
+ llm_setting = None
554
+
555
+ user_id = payload["user_id"]
556
+ workspace_slug = payload["workspace_slug"]
557
+ thread_id = payload["thread_id"]
558
+ knowledges = payload["knowledges"]
559
+ memory_id = payload["memory_id"]
560
+ memory_path = payload["memory_path"]
561
+ execution_id = payload["session_id"]
562
+ aci_linked_account_owner_id = payload["aci_linked_account_owner_id"]
563
+ aci_agent_first_api_key = payload["aci_api_key"]
564
+ realtimex_access_token = payload["realtimex_access_token"]
565
+
566
+ agent_description = agent_data["description"]
567
+ agent_name = agent_data["name"]
568
+ agent_framework = agent_data["execution_config"]["framework"]
569
+
570
+ if "agent_description" in payload:
571
+ agent_description = payload["agent_description"]
572
+ if "agent_name" in payload:
573
+ agent_name = payload["agent_name"]
574
+ if "agent_framework" in payload:
575
+ agent_framework = payload["agent_framework"]
576
+ if "system_prompt" in payload:
577
+ system_prompt = payload["system_prompt"]
578
+ if "llm_setting" in payload:
579
+ llm_setting = payload["llm_setting"]
580
+
581
+
582
+ default_openai_base_url = payload["litellm_api_base"]
583
+ default_openai_api_key = payload["litellm_api_key"]
584
+
585
+ # Load MCP tools
586
+
587
+ # # Create agent
588
+ # agent = RealTimeXAgent()
589
+
590
+ # print("agent_data")
591
+
592
+ await self.prepare_realtimex_agent(
593
+ agent_id=agent_id,
594
+ agent_data=agent_data
595
+ )
596
+
597
+ # await self.prepare_memory(memory_id=memory_id, memory_path=memory_path, litellm_base_url=default_openai_base_url, litellm_api_key=default_openai_api_key)
598
+
599
+ default_callbacks = await self.load_default_callbacks()
600
+
601
+ default_tools = await self.load_default_tools()
602
+ all_tools = [*default_tools]
603
+
604
+ if self.recommended_aci_mcp_apps:
605
+ aci_mcp = await self.load_aci_mcp_tools(
606
+ linked_account_owner_id=aci_linked_account_owner_id,
607
+ aci_api_key=aci_agent_first_api_key
608
+ )
609
+ if aci_mcp:
610
+ all_tools = [*all_tools,aci_mcp]
611
+
612
+ if self.recommended_local_mcp_apps:
613
+ local_mcps = await self.load_local_mcp_tools(
614
+ workspace_slug=workspace_slug,
615
+ thread_id=thread_id
616
+ )
617
+ # print("local_mcps",local_mcps)
618
+ all_tools = [*all_tools,*local_mcps]
619
+
620
+ if self.recommended_agent_flows:
621
+ aci_mcp_agent_flow = await self.load_mcp_agent_flow_tools(
622
+ linked_account_owner_id=aci_linked_account_owner_id,
623
+ aci_api_key=aci_agent_first_api_key,
624
+ realtimex_access_token=realtimex_access_token,
625
+ litellm_base_url=default_openai_base_url,
626
+ litellm_api_key=default_openai_api_key,
627
+ workspace_slug=workspace_slug,
628
+ thread_id=thread_id
629
+ )
630
+ if aci_mcp_agent_flow:
631
+ all_tools = [*all_tools,aci_mcp_agent_flow]
632
+
633
+ if self.recommended_team_members:
634
+
635
+ team_members = await self.load_a2a_agents_as_tools()
636
+ # print(team_members)
637
+ if team_members:
638
+ all_tools = [*all_tools,*team_members]
639
+
640
+
641
+
642
+ agent_config = {
643
+ "api_base": default_openai_base_url,
644
+ "api_key": default_openai_api_key,
645
+ }
646
+
647
+ # print("agent_framework",agent_framework)
648
+
649
+ if agent_description:
650
+ agent_config["description"] = agent_description
651
+ if system_prompt:
652
+ agent_config["instructions"] = system_prompt
653
+
654
+
655
+ if llm_setting:
656
+ llm_config = await self.prepare_llm(**llm_setting["default"])
657
+ # print("llm_config",llm_config)
658
+ agent_config.update(llm_config)
659
+
660
+
661
+ memories_str = ""
662
+ knowledges_str = ""
663
+ # if knowledges:
664
+ # memories_str, knowledges_str = await self.load_knowledges(message, user_id, workspace_slug, thread_id, knowledges)
665
+
666
+ if agent_framework == "deepagents":
667
+ from deepagents import create_realtimex_deep_agent
668
+ from deepagents.backends import CompositeBackend
669
+ from deepagents.backends.filesystem import FilesystemBackend
670
+ agent_framework = "langchain"
671
+ agent_config["agent_type"] = create_realtimex_deep_agent
672
+
673
+ routes = {}
674
+ agent_config["agent_args"] = {"assistant_id": agent_id}
675
+
676
+ # Resolve memory paths (global and workspace)
677
+ global_agent_path = get_deepagents_agent_path("global", agent_id)
678
+ workspace_agent_path = get_deepagents_agent_path("workspace", agent_id, workspace_slug)
679
+
680
+ # Enable memory if either global or workspace config exists
681
+ agent_config["agent_args"]["enable_memory"] = bool(global_agent_path or workspace_agent_path)
682
+ if global_agent_path:
683
+ global_agent_root = os.path.dirname(global_agent_path)
684
+ routes[MEMORIES_GLOBAL_VIRTUAL_ROOT] = FilesystemBackend(
685
+ root_dir=global_agent_root,
686
+ virtual_mode=True,
687
+ )
688
+ agent_config["agent_args"]["global_agent_path"] = f"{MEMORIES_GLOBAL_VIRTUAL_ROOT}agent.md"
689
+ if workspace_agent_path:
690
+ workspace_agent_root = os.path.dirname(workspace_agent_path)
691
+ workspace_memories_root = f"{MEMORIES_WORKSPACE_VIRTUAL_ROOT_PREFIX}{workspace_slug}/"
692
+ routes[workspace_memories_root] = FilesystemBackend(
693
+ root_dir=workspace_agent_root,
694
+ virtual_mode=True,
695
+ )
696
+ agent_config["agent_args"]["workspace_agent_path"] = f"{workspace_memories_root}agent.md"
697
+
698
+ # Resolve skills paths (global and workspace)
699
+ global_skills_dir = get_deepagents_skills_dir("global", agent_id)
700
+ workspace_skills_dir = get_deepagents_skills_dir("workspace", agent_id, workspace_slug)
701
+
702
+ # Enable skills if either global or workspace config exists
703
+ agent_config["agent_args"]["enable_skills"] = bool(global_skills_dir or workspace_skills_dir)
704
+ if global_skills_dir:
705
+ routes[SKILLS_GLOBAL_VIRTUAL_ROOT] = FilesystemBackend(
706
+ root_dir=global_skills_dir,
707
+ virtual_mode=True,
708
+ )
709
+ agent_config["agent_args"]["global_skills_dir"] = SKILLS_GLOBAL_VIRTUAL_ROOT
710
+ if workspace_skills_dir:
711
+ workspace_skills_root = f"{SKILLS_WORKSPACE_VIRTUAL_ROOT_PREFIX}{workspace_slug}/"
712
+ routes[workspace_skills_root] = FilesystemBackend(
713
+ root_dir=workspace_skills_dir,
714
+ virtual_mode=True,
715
+ )
716
+ agent_config["agent_args"]["workspace_skills_dir"] = workspace_skills_root
717
+
718
+ # Backend is always required for deepagents
719
+ agent_config["agent_args"]["backend"] = lambda runtime: CompositeBackend(
720
+ default=FilesystemBackend(),
721
+ routes=routes,
722
+ )
723
+
724
+ if "subagents" in agent_data["execution_config"]:
725
+ if "_auto" in agent_data["execution_config"]["subagents"]:
726
+ try:
727
+ subagents = await self.create_subagents(
728
+ instructions=system_prompt,
729
+ tools=[],
730
+ llm_config={
731
+ "api_base": agent_config["api_base"],
732
+ "api_key": agent_config["api_key"],
733
+ "model_id": agent_config["model_id"]
734
+ }
735
+ )
736
+ if subagents:
737
+ # Merge subagents into existing agent_args if present
738
+ if "agent_args" not in agent_config:
739
+ agent_config["agent_args"] = {}
740
+ agent_config["agent_args"]["subagents"] = subagents
741
+ except Exception as e:
742
+ print(e)
743
+
744
+ # print(agent_config["agent_args"])
745
+
746
+ await self.create_agent(
747
+ agent_framework=agent_framework, # See all options in https://mozilla-ai.github.io/any-agent/
748
+ agent_config=agent_config,
749
+ tools=all_tools,
750
+ callbacks=[*default_callbacks],
751
+ memories_str=memories_str,
752
+ knowledges_str=knowledges_str
753
+ )
754
+
755
+ # return agent