realtimex-agent-a2a-agent 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- realtimex_agent_a2a_agent-0.2.0/PKG-INFO +20 -0
- realtimex_agent_a2a_agent-0.2.0/README.md +0 -0
- realtimex_agent_a2a_agent-0.2.0/pyproject.toml +33 -0
- realtimex_agent_a2a_agent-0.2.0/src/realtimex_agent_a2a_agent/__init__.py +5 -0
- realtimex_agent_a2a_agent-0.2.0/src/realtimex_agent_a2a_agent/agent.py +721 -0
- realtimex_agent_a2a_agent-0.2.0/src/realtimex_agent_a2a_agent/callbacks/tool_execution.py +249 -0
- realtimex_agent_a2a_agent-0.2.0/src/realtimex_agent_a2a_agent/cli.py +228 -0
- realtimex_agent_a2a_agent-0.2.0/src/realtimex_agent_a2a_agent/tools.py +14 -0
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: realtimex-agent-a2a-agent
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Author: rta_phuongnguyen
|
|
6
|
+
Author-email: rta_phuongnguyen <phuongnguyen@rtanalytics.vn>
|
|
7
|
+
Requires-Dist: realtimex-any-agent
|
|
8
|
+
Requires-Dist: ollama
|
|
9
|
+
Requires-Dist: google-genai
|
|
10
|
+
Requires-Dist: realtimex-any-agent[a2a]
|
|
11
|
+
Requires-Dist: realtimex-any-agent[langchain]
|
|
12
|
+
Requires-Dist: realtimex-any-agent[deepagents]
|
|
13
|
+
Requires-Dist: nest-asyncio
|
|
14
|
+
Requires-Dist: openai
|
|
15
|
+
Requires-Dist: aci-mcp==1.0.0b13
|
|
16
|
+
Requires-Dist: mem0ai==0.1.116
|
|
17
|
+
Requires-Dist: redis
|
|
18
|
+
Requires-Python: >=3.11
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
|
|
File without changes
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "realtimex-agent-a2a-agent"
|
|
3
|
+
version = "0.2.0"
|
|
4
|
+
description = "Add your description here"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
authors = [
|
|
7
|
+
{ name = "rta_phuongnguyen", email = "phuongnguyen@rtanalytics.vn" }
|
|
8
|
+
]
|
|
9
|
+
requires-python = ">=3.11"
|
|
10
|
+
dependencies = [
|
|
11
|
+
"realtimex-any-agent",
|
|
12
|
+
"ollama",
|
|
13
|
+
"google-genai",
|
|
14
|
+
"realtimex-any_agent[a2a]",
|
|
15
|
+
"realtimex-any-agent[langchain]",
|
|
16
|
+
"realtimex-any_agent[deepagents]",
|
|
17
|
+
"nest_asyncio",
|
|
18
|
+
"openai",
|
|
19
|
+
"aci-mcp==1.0.0b13",
|
|
20
|
+
"mem0ai==v0.1.116",
|
|
21
|
+
"redis",
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
# [tool.uv.sources]
|
|
25
|
+
# any_agent = { git = "https://github.com/therealtimex/any-agent", branch = "realtimex" }
|
|
26
|
+
# any_agent = { path = "/Users/phuongnguyen/Documents/projects/any-agent" }
|
|
27
|
+
|
|
28
|
+
[project.scripts]
|
|
29
|
+
realtimex-agent-a2a-agent = "realtimex_agent_a2a_agent.cli:main"
|
|
30
|
+
|
|
31
|
+
[build-system]
|
|
32
|
+
requires = ["uv_build>=0.8.2,<0.9.0"]
|
|
33
|
+
build-backend = "uv_build"
|
|
@@ -0,0 +1,721 @@
|
|
|
1
|
+
from any_agent import AgentConfig, AnyAgent
|
|
2
|
+
from any_agent.config import MCPStdio
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
from typing import Callable, Dict, Literal, Optional, Union, Any
|
|
6
|
+
|
|
7
|
+
from .tools import send_email
|
|
8
|
+
from .callbacks.tool_execution import ShowToolCalling
|
|
9
|
+
from any_agent.callbacks import get_default_callbacks
|
|
10
|
+
from mem0 import Memory
|
|
11
|
+
from any_agent.tools import a2a_tool_async
|
|
12
|
+
from any_agent.serving import A2AServingConfig
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_user_dir():
|
|
16
|
+
return os.path.join(os.path.expanduser("~"),".realtimex.ai")
|
|
17
|
+
|
|
18
|
+
def get_base_user_dir():
|
|
19
|
+
return os.path.join(os.path.expanduser("~"))
|
|
20
|
+
|
|
21
|
+
def get_uvx_executable():
|
|
22
|
+
unix_realtimex_uvx_path = os.path.join(get_user_dir(),"Resources","envs","bin","uvx")
|
|
23
|
+
if os.path.exists(unix_realtimex_uvx_path):
|
|
24
|
+
return unix_realtimex_uvx_path
|
|
25
|
+
win_realtimex_uvx_path = os.path.join(get_user_dir(),"Resources","envs","Scripts","uvx.exe")
|
|
26
|
+
if os.path.exists(win_realtimex_uvx_path):
|
|
27
|
+
return win_realtimex_uvx_path
|
|
28
|
+
return "uvx"
|
|
29
|
+
|
|
30
|
+
def get_nvm_dir():
|
|
31
|
+
path = os.path.join(get_base_user_dir(),".nvm")
|
|
32
|
+
if os.path.exists(path):
|
|
33
|
+
return path
|
|
34
|
+
path = os.path.join('c:', os.sep, "nvm")
|
|
35
|
+
if os.path.exists(path):
|
|
36
|
+
return path
|
|
37
|
+
return ""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def get_nvm_inc():
|
|
41
|
+
# /Users/phuongnguyen/.nvm/versions/node/v22.16.0/include/node
|
|
42
|
+
path = os.path.join(get_nvm_dir(),"versions","node","v22.16.0","include","node")
|
|
43
|
+
if os.path.exists(path):
|
|
44
|
+
return path
|
|
45
|
+
path = os.path.join('c:', os.sep, "nvm")
|
|
46
|
+
if os.path.exists(path):
|
|
47
|
+
return path
|
|
48
|
+
return ""
|
|
49
|
+
|
|
50
|
+
def get_nvm_bin():
|
|
51
|
+
# /Users/phuongnguyen/.nvm/versions/node/v22.16.0/include/node
|
|
52
|
+
path = os.path.join(get_nvm_dir(),"versions","node","v22.16.0","bin")
|
|
53
|
+
if os.path.exists(path):
|
|
54
|
+
return path
|
|
55
|
+
path = os.path.join('c:', os.sep, "nvm")
|
|
56
|
+
if os.path.exists(path):
|
|
57
|
+
return path
|
|
58
|
+
return ""
|
|
59
|
+
|
|
60
|
+
def get_npx_executable():
|
|
61
|
+
unix_realtimex_npx_path = os.path.join(get_base_user_dir(),".nvm","versions","node","v22.16.0","bin","npx")
|
|
62
|
+
if os.path.exists(unix_realtimex_npx_path):
|
|
63
|
+
return unix_realtimex_npx_path
|
|
64
|
+
win_realtimex_npx_path = os.path.join('c:', os.sep, "nvm", "v22.16.0", "npx.cmd")
|
|
65
|
+
if os.path.exists(win_realtimex_npx_path):
|
|
66
|
+
return win_realtimex_npx_path
|
|
67
|
+
return "npx"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def get_deepagents_agent_path(scope, agent_id, workspace_slug=None):
|
|
71
|
+
"""
|
|
72
|
+
Get the agent.md path for DeepAgents.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
scope: 'global' or 'workspace'
|
|
76
|
+
agent_id: The agent identifier
|
|
77
|
+
workspace_slug: Required when scope is 'workspace'
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Path string if file exists, None otherwise
|
|
81
|
+
"""
|
|
82
|
+
if scope == "global":
|
|
83
|
+
path = os.path.join(
|
|
84
|
+
get_user_dir(), "Resources", "agent-skills", "global", agent_id, "agent.md"
|
|
85
|
+
)
|
|
86
|
+
elif scope == "workspace" and workspace_slug:
|
|
87
|
+
path = os.path.join(
|
|
88
|
+
get_user_dir(), "Resources", "agent-skills", "workspaces", workspace_slug, agent_id, "agent.md"
|
|
89
|
+
)
|
|
90
|
+
else:
|
|
91
|
+
return None
|
|
92
|
+
|
|
93
|
+
return path if os.path.isfile(path) else None
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def get_deepagents_skills_dir(scope, agent_id, workspace_slug=None):
|
|
97
|
+
"""
|
|
98
|
+
Get the skills directory path for DeepAgents.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
scope: 'global' or 'workspace'
|
|
102
|
+
agent_id: The agent identifier
|
|
103
|
+
workspace_slug: Required when scope is 'workspace'
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Path string if directory exists, None otherwise
|
|
107
|
+
"""
|
|
108
|
+
if scope == "global":
|
|
109
|
+
path = os.path.join(
|
|
110
|
+
get_user_dir(), "Resources", "agent-skills", "global", agent_id, "skills"
|
|
111
|
+
)
|
|
112
|
+
elif scope == "workspace" and workspace_slug:
|
|
113
|
+
path = os.path.join(
|
|
114
|
+
get_user_dir(), "Resources", "agent-skills", "workspaces", workspace_slug, agent_id, "skills"
|
|
115
|
+
)
|
|
116
|
+
else:
|
|
117
|
+
return None
|
|
118
|
+
|
|
119
|
+
return path if os.path.isdir(path) else None
|
|
120
|
+
|
|
121
|
+
class RealTimeXAgent():
|
|
122
|
+
def __init__(
|
|
123
|
+
self, current_session_id
|
|
124
|
+
):
|
|
125
|
+
self.agent_id: str = None
|
|
126
|
+
self.agent_data: Dict = None
|
|
127
|
+
self.agent: AnyAgent = None
|
|
128
|
+
self.agent_name: str = None
|
|
129
|
+
self.system_prompt: str = None
|
|
130
|
+
self.agent_framework: str = None
|
|
131
|
+
self.agent_description: str = None
|
|
132
|
+
self.default_model:str = None
|
|
133
|
+
self.recommended_agent_flows = None
|
|
134
|
+
self.recommended_aci_mcp_apps = None
|
|
135
|
+
self.recommended_local_mcp_apps = None
|
|
136
|
+
self.recommended_team_members = None
|
|
137
|
+
self.memory:Memory = None
|
|
138
|
+
|
|
139
|
+
self.current_session_id = current_session_id
|
|
140
|
+
|
|
141
|
+
async def prepare_llm(self, provider_name,model_name,api_base,api_key):
|
|
142
|
+
def get_provider_name(provider_name):
|
|
143
|
+
if provider_name == "realtimexai":
|
|
144
|
+
return "openai"
|
|
145
|
+
elif provider_name == "litellm":
|
|
146
|
+
return "openai"
|
|
147
|
+
elif provider_name == "openai":
|
|
148
|
+
return "openai"
|
|
149
|
+
elif provider_name == "ollama":
|
|
150
|
+
return "ollama"
|
|
151
|
+
elif provider_name == "gemini":
|
|
152
|
+
return "google"
|
|
153
|
+
elif provider_name == "google":
|
|
154
|
+
return "google"
|
|
155
|
+
|
|
156
|
+
def get_model_name(model_name):
|
|
157
|
+
return model_name
|
|
158
|
+
|
|
159
|
+
provider_name = get_provider_name(provider_name)
|
|
160
|
+
model_name = get_model_name(model_name)
|
|
161
|
+
|
|
162
|
+
# if provider_name == "openai":
|
|
163
|
+
# os.environ['OPENAI_BASE_URL'] = api_base
|
|
164
|
+
# os.environ['OPENAI_API_KEY'] = api_key
|
|
165
|
+
|
|
166
|
+
# return {
|
|
167
|
+
# "api_base": api_base,
|
|
168
|
+
# "api_key": api_key,
|
|
169
|
+
# "model_id": f"{provider_name}:{model_name}"
|
|
170
|
+
# }
|
|
171
|
+
|
|
172
|
+
return {
|
|
173
|
+
"api_base": api_base,
|
|
174
|
+
"api_key": api_key,
|
|
175
|
+
"model_id": f"{provider_name}/{model_name}"
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
# return {
|
|
179
|
+
# "api_base": api_base,
|
|
180
|
+
# "api_key": api_key,
|
|
181
|
+
# "model_id": model_name
|
|
182
|
+
# }
|
|
183
|
+
|
|
184
|
+
async def create_subagents(self,instructions=None,tools=[],llm_config=None):
|
|
185
|
+
from openai import OpenAI
|
|
186
|
+
client = OpenAI(
|
|
187
|
+
api_key=llm_config["api_key"],
|
|
188
|
+
base_url=llm_config["api_base"],
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
system_prompt = self.system_prompt
|
|
192
|
+
if not instructions:
|
|
193
|
+
system_prompt = instructions
|
|
194
|
+
|
|
195
|
+
schema = {
|
|
196
|
+
"title": "subagents",
|
|
197
|
+
"description": "The list of subagents to do the task well and effectively.",
|
|
198
|
+
"required": [
|
|
199
|
+
"subagents",
|
|
200
|
+
],
|
|
201
|
+
"type": "object",
|
|
202
|
+
"properties": {
|
|
203
|
+
"subagents":{
|
|
204
|
+
"type": "array",
|
|
205
|
+
"description": "The list of subagents to do the task well and effectively.",
|
|
206
|
+
"items": {
|
|
207
|
+
"type": "object",
|
|
208
|
+
"properties": {
|
|
209
|
+
"name": {
|
|
210
|
+
"type": "string",
|
|
211
|
+
"description": "The name of the sub-agent"
|
|
212
|
+
},
|
|
213
|
+
"description": {
|
|
214
|
+
"type": "string",
|
|
215
|
+
"description": "A description of the sub-agent"
|
|
216
|
+
},
|
|
217
|
+
"prompt": {
|
|
218
|
+
"type": "string",
|
|
219
|
+
"description": "The prompt used by the sub-agent"
|
|
220
|
+
},
|
|
221
|
+
# "tools": {
|
|
222
|
+
# "type": "array",
|
|
223
|
+
# "description": "Optional list of tools name the sub-agent can use",
|
|
224
|
+
# "items": {
|
|
225
|
+
# "type": "string"
|
|
226
|
+
# }
|
|
227
|
+
# }
|
|
228
|
+
},
|
|
229
|
+
"required": ["name", "description", "prompt"],
|
|
230
|
+
"additionalProperties": False
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
},
|
|
234
|
+
"additionalProperties": False
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
# print("schema", json.dumps(schema))
|
|
238
|
+
|
|
239
|
+
response_format = { "type": "json_schema", "json_schema": {"strict": True, "name": schema["title"], "schema": schema}}
|
|
240
|
+
|
|
241
|
+
# tools_str = ""
|
|
242
|
+
# for tool in tools:
|
|
243
|
+
# tools_str = f"""{tools_str}
|
|
244
|
+
# - {tool.__name__}: {tool.__doc__}"""
|
|
245
|
+
|
|
246
|
+
completion = client.beta.chat.completions.parse(
|
|
247
|
+
model=llm_config["model_id"],
|
|
248
|
+
messages=[{"role": "system", "content":
|
|
249
|
+
f"""You are tasked with designing a small team of specialized subagents to work together under the guidance of the main agent.
|
|
250
|
+
|
|
251
|
+
* The main agent’s role and purpose is defined by:
|
|
252
|
+
{system_prompt}
|
|
253
|
+
|
|
254
|
+
Your job is to create **no more than 5 subagents**. Each subagent must include:
|
|
255
|
+
|
|
256
|
+
1. Name: lowercase, short, clear, and distinct, only alphabets and underscore allowed.
|
|
257
|
+
2. Description: what this subagent is specialized at and how it contributes to the team.
|
|
258
|
+
3. System Prompt: clear instructions that define the subagent’s behavior, style, and responsibilities.
|
|
259
|
+
|
|
260
|
+
Guidelines:
|
|
261
|
+
|
|
262
|
+
* Each subagent should have a well-defined, non-overlapping role.
|
|
263
|
+
* The team should collectively cover all major aspects required for the main agent’s purpose.
|
|
264
|
+
* Avoid redundancy—each subagent must bring unique value.
|
|
265
|
+
* Keep the team **small (≤5 subagents)** but **effective**.
|
|
266
|
+
|
|
267
|
+
Finally, return the result as a list of subagent objects in JSON format."""},
|
|
268
|
+
{"role": "user", "content": "Create a team of subagents to do task: {}"},
|
|
269
|
+
],
|
|
270
|
+
|
|
271
|
+
response_format=response_format,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
result = json.loads(completion.choices[0].message.content)
|
|
277
|
+
|
|
278
|
+
return result["subagents"]
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
async def prepare_realtimex_agent(self, agent_id, agent_data):
|
|
282
|
+
# directus_client = DirectusClient(server_url = directus_server_url,access_token = directus_access_token)
|
|
283
|
+
|
|
284
|
+
# d_agent = directus_client.get_directus_item_by_id("realtimex_agents",agent_id)
|
|
285
|
+
|
|
286
|
+
agent_name = agent_data["name"]
|
|
287
|
+
agent_description = agent_data["description"]
|
|
288
|
+
system_prompt = agent_data["instructions"]
|
|
289
|
+
agent_framework = agent_data["execution_config"]["framework"]
|
|
290
|
+
default_model = agent_data["execution_config"]["models"]["default_model"]
|
|
291
|
+
|
|
292
|
+
recommended_agent_flows = None
|
|
293
|
+
if "recommended_agent_flows" in agent_data:
|
|
294
|
+
recommended_agent_flows = agent_data["recommended_agent_flows"]
|
|
295
|
+
|
|
296
|
+
recommended_aci_mcp_apps = None
|
|
297
|
+
if "recommended_aci_mcp_apps" in agent_data:
|
|
298
|
+
recommended_aci_mcp_apps = agent_data["recommended_aci_mcp_apps"]
|
|
299
|
+
|
|
300
|
+
recommended_local_mcp_apps = None
|
|
301
|
+
if "recommended_local_mcp_apps" in agent_data:
|
|
302
|
+
recommended_local_mcp_apps = agent_data["recommended_local_mcp_apps"]
|
|
303
|
+
|
|
304
|
+
recommended_team_members = None
|
|
305
|
+
if "recommended_team_members" in agent_data:
|
|
306
|
+
recommended_team_members = agent_data["recommended_team_members"]
|
|
307
|
+
|
|
308
|
+
self.agent_name = agent_name
|
|
309
|
+
self.system_prompt = system_prompt
|
|
310
|
+
self.agent_framework = agent_framework
|
|
311
|
+
self.agent_description = agent_description
|
|
312
|
+
self.default_model = default_model
|
|
313
|
+
self.agent_id = agent_id
|
|
314
|
+
self.agent_data = agent_data
|
|
315
|
+
self.recommended_agent_flows = recommended_agent_flows
|
|
316
|
+
self.recommended_aci_mcp_apps = recommended_aci_mcp_apps
|
|
317
|
+
self.recommended_local_mcp_apps = recommended_local_mcp_apps
|
|
318
|
+
self.recommended_team_members = recommended_team_members
|
|
319
|
+
|
|
320
|
+
async def prepare_memory(self,memory_id, memory_path, litellm_base_url, litellm_api_key):
|
|
321
|
+
pass
|
|
322
|
+
# config_dict = {
|
|
323
|
+
# "version": "v1.1",
|
|
324
|
+
# "vector_store": {
|
|
325
|
+
# "provider": "chroma",
|
|
326
|
+
# "config": {
|
|
327
|
+
# "collection_name": memory_id,
|
|
328
|
+
# "path": memory_path,
|
|
329
|
+
# }
|
|
330
|
+
# },
|
|
331
|
+
# "llm": {"provider": "openai", "config": {"api_key": litellm_api_key, "openai_base_url": litellm_base_url, "temperature": 0.2, "model": "gpt-4o-mini"}},
|
|
332
|
+
# "embedder": {"provider": "openai", "config": {"api_key": litellm_api_key, "openai_base_url": litellm_base_url, "model": "text-embedding-3-small"}},
|
|
333
|
+
# "history_db_path": "",
|
|
334
|
+
# }
|
|
335
|
+
# print("config_dict",config_dict)
|
|
336
|
+
# memory = Memory.from_config(config_dict=config_dict)
|
|
337
|
+
# self.memory = memory
|
|
338
|
+
|
|
339
|
+
async def load_default_callbacks(self):
|
|
340
|
+
return [ShowToolCalling(),*get_default_callbacks()]
|
|
341
|
+
|
|
342
|
+
async def load_default_tools(self):
|
|
343
|
+
return []
|
|
344
|
+
|
|
345
|
+
async def load_aci_mcp_tools(self, linked_account_owner_id, aci_api_key):
|
|
346
|
+
from any_agent.config import MCPStdio
|
|
347
|
+
mcp_apps = []
|
|
348
|
+
for app in self.recommended_aci_mcp_apps:
|
|
349
|
+
if "realtimex_mcp_server_id" in app:
|
|
350
|
+
mcp_apps.append(app["realtimex_mcp_server_id"]["name"])
|
|
351
|
+
else:
|
|
352
|
+
mcp_apps.append(app["name"])
|
|
353
|
+
# mcp_apps = [app["realtimex_mcp_server_id"]["name"] for app in self.recommended_aci_mcp_apps]
|
|
354
|
+
if len(mcp_apps)>0:
|
|
355
|
+
mcp = MCPStdio(
|
|
356
|
+
# command=get_uvx_executable(),
|
|
357
|
+
# args=["aci-mcp@latest", 'apps-server', '--apps', ','.join(mcp_apps) , '--linked-account-owner-id', linked_account_owner_id],
|
|
358
|
+
command="aci-mcp",
|
|
359
|
+
args=['apps-server', '--apps', ','.join(mcp_apps) , '--linked-account-owner-id', linked_account_owner_id],
|
|
360
|
+
client_session_timeout_seconds=300,
|
|
361
|
+
env={
|
|
362
|
+
"ACI_SERVER_URL":"https://mcp.realtimex.ai/v1/",
|
|
363
|
+
"ACI_API_KEY":aci_api_key
|
|
364
|
+
}
|
|
365
|
+
)
|
|
366
|
+
return mcp
|
|
367
|
+
return None
|
|
368
|
+
|
|
369
|
+
async def load_local_mcp_tools(self):
|
|
370
|
+
from any_agent.config import MCPStdio
|
|
371
|
+
mcp_apps = [app["config"] for app in self.recommended_local_mcp_apps]
|
|
372
|
+
# mcp_apps = [
|
|
373
|
+
# {"command":"uvx","args":["mcp-shell-server"],"env":{"ALLOW_COMMANDS":"ls,cat,pwd,grep,wc,touch,find"}}
|
|
374
|
+
# ]
|
|
375
|
+
mcps = []
|
|
376
|
+
default_env = {
|
|
377
|
+
'NVM_INC': get_nvm_inc(),
|
|
378
|
+
'NVM_CD_FLAGS': '-q',
|
|
379
|
+
'NVM_DIR': get_nvm_dir(),
|
|
380
|
+
'PATH': f'{os.environ.copy()["PATH"]}{os.pathsep}{get_nvm_bin()}',
|
|
381
|
+
'NVM_BIN': get_nvm_bin()
|
|
382
|
+
}
|
|
383
|
+
for mcp_app in mcp_apps:
|
|
384
|
+
if mcp_app["command"] == "uvx":
|
|
385
|
+
mcp_app["command"] = get_uvx_executable()
|
|
386
|
+
if mcp_app["command"] == "npx":
|
|
387
|
+
mcp_app["command"] = get_npx_executable()
|
|
388
|
+
if "env" not in mcp_app:
|
|
389
|
+
mcp_app["env"] = {}
|
|
390
|
+
mcp_app["env"] = {**mcp_app["env"],**default_env}
|
|
391
|
+
# print("mcp_app",mcp_app)
|
|
392
|
+
mcp = MCPStdio(
|
|
393
|
+
**mcp_app,
|
|
394
|
+
client_session_timeout_seconds=300,
|
|
395
|
+
)
|
|
396
|
+
mcps.append(mcp)
|
|
397
|
+
return mcps
|
|
398
|
+
|
|
399
|
+
async def load_mcp_agent_flow_tools(self, linked_account_owner_id, aci_api_key, litellm_base_url, litellm_api_key, realtimex_access_token, workspace_slug, thread_id):
|
|
400
|
+
from any_agent.config import MCPStdio
|
|
401
|
+
agent_flow_ids = [flow["realtimex_agent_flows_id"]["id"] for flow in self.recommended_agent_flows]
|
|
402
|
+
if len(agent_flow_ids)>0:
|
|
403
|
+
mcp = MCPStdio(
|
|
404
|
+
# command=get_uvx_executable(),
|
|
405
|
+
# args=["--from", "git+https://oauth2:5yTHSE9k34jbWgzXmsxQ@rtgit.rta.vn/rtlab/rtwebteam/mcp-servers/realtimex-ai-agent-flows@main","agent-flows-mcp-server",'--flows',','.join(agent_flow_ids)],
|
|
406
|
+
command="agent-flows-mcp-server",
|
|
407
|
+
args=['--flows',','.join(agent_flow_ids)],
|
|
408
|
+
client_session_timeout_seconds=300,
|
|
409
|
+
env={
|
|
410
|
+
"AGENT_FLOWS_API_KEY": realtimex_access_token,
|
|
411
|
+
"LITELLM_API_KEY": litellm_api_key,
|
|
412
|
+
"LITELLM_API_BASE": litellm_base_url,
|
|
413
|
+
"MCP_ACI_API_KEY": aci_api_key,
|
|
414
|
+
"MCP_ACI_LINKED_ACCOUNT_OWNER_ID": linked_account_owner_id,
|
|
415
|
+
"SESSION_ID": self.current_session_id,
|
|
416
|
+
"WORKSPACE_SLUG": workspace_slug,
|
|
417
|
+
"THREAD_ID": thread_id
|
|
418
|
+
# "AGENT_FLOWS_BASE_URL": "https://your-custom-instance.com" # Optional
|
|
419
|
+
}
|
|
420
|
+
)
|
|
421
|
+
return mcp
|
|
422
|
+
return None
|
|
423
|
+
|
|
424
|
+
async def load_a2a_agents_as_tools(self):
|
|
425
|
+
def get_free_port():
|
|
426
|
+
import socket
|
|
427
|
+
sock = socket.socket()
|
|
428
|
+
sock.bind(('', 0))
|
|
429
|
+
return sock.getsockname()[1]
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
tools = []
|
|
433
|
+
for a2a_agent in self.recommended_team_members:
|
|
434
|
+
agent_id = a2a_agent["agent_id"]
|
|
435
|
+
agent_data = a2a_agent["agent_data"]
|
|
436
|
+
a2a_port = get_free_port()
|
|
437
|
+
# Create agent
|
|
438
|
+
agent = RealTimeXAgent()
|
|
439
|
+
|
|
440
|
+
await agent.load_default_agent(agent_id, agent_data, payload=a2a_agent)
|
|
441
|
+
|
|
442
|
+
agent_server_url = await agent.serve_as_a2a(
|
|
443
|
+
a2a_serving_config={"port":a2a_port,"stream_tool_usage":True},
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
# print(agent_server_url)
|
|
447
|
+
|
|
448
|
+
tools.append(
|
|
449
|
+
await a2a_tool_async(
|
|
450
|
+
agent_server_url,
|
|
451
|
+
http_kwargs={
|
|
452
|
+
"timeout": 300
|
|
453
|
+
}, # This gives the time agent up to 30 seconds to respond to each request
|
|
454
|
+
)
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
|
|
458
|
+
return tools
|
|
459
|
+
|
|
460
|
+
async def load_knowledges(self,query, user_id, workspace_slug, thread_id, knowledges=["thread"]):
|
|
461
|
+
memory_session_id = None
|
|
462
|
+
if "user" in knowledges:
|
|
463
|
+
memory_session_id = None
|
|
464
|
+
elif "workspace" in knowledges:
|
|
465
|
+
memory_session_id = workspace_slug
|
|
466
|
+
elif "thread" in knowledges:
|
|
467
|
+
memory_session_id = f"{workspace_slug}_{thread_id}"
|
|
468
|
+
# print("memory_session_id",memory_session_id)
|
|
469
|
+
history_memories = self.memory.search(query=query, user_id=user_id, run_id=memory_session_id,limit=5)
|
|
470
|
+
# history_memories = self.memory.get_all(user_id=user_id, run_id=memory_session_id,limit=20)
|
|
471
|
+
# print("history_memories",history_memories)
|
|
472
|
+
|
|
473
|
+
memories_str = "\n".join(f"- {entry['memory']}" for entry in history_memories["results"])
|
|
474
|
+
|
|
475
|
+
knowledges_str = ""
|
|
476
|
+
all_knowledge_memories = []
|
|
477
|
+
for knowledge_id in knowledges:
|
|
478
|
+
if knowledge_id in ["account","workspace","thread"]:
|
|
479
|
+
continue
|
|
480
|
+
knowledge_memories = self.memory.search(query=query, user_id=user_id, run_id=knowledge_id, limit=5)
|
|
481
|
+
all_knowledge_memories = [*all_knowledge_memories,*knowledge_memories["results"]]
|
|
482
|
+
|
|
483
|
+
knowledges_str = "\n".join(f"- {entry['memory']}" for entry in all_knowledge_memories)
|
|
484
|
+
return memories_str, knowledges_str
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
async def create_agent(self, agent_framework="tinyagent",agent_config=None,tools=[],callbacks=[], memories_str=None, knowledges_str=None):
|
|
488
|
+
default_agent_config = {
|
|
489
|
+
"name": self.agent_name,
|
|
490
|
+
"model_id": self.default_model,
|
|
491
|
+
"description": self.agent_description,
|
|
492
|
+
"instructions": self.system_prompt,
|
|
493
|
+
"tools": tools,
|
|
494
|
+
"callbacks": callbacks
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
default_agent_framework = self.agent_framework
|
|
498
|
+
|
|
499
|
+
if agent_config:
|
|
500
|
+
default_agent_config.update(agent_config)
|
|
501
|
+
|
|
502
|
+
if agent_framework:
|
|
503
|
+
default_agent_framework = agent_framework
|
|
504
|
+
|
|
505
|
+
# print("default_agent_config", default_agent_config)
|
|
506
|
+
# print("agent_config", agent_config)
|
|
507
|
+
|
|
508
|
+
if memories_str:
|
|
509
|
+
default_agent_config["instructions"] = default_agent_config["instructions"].replace("##MEMORIES##",memories_str)
|
|
510
|
+
if knowledges_str:
|
|
511
|
+
default_agent_config["instructions"] = default_agent_config["instructions"].replace("##KNOWLEDGES##",knowledges_str)
|
|
512
|
+
# print(default_agent_framework)
|
|
513
|
+
# print(default_agent_config)
|
|
514
|
+
|
|
515
|
+
self.agent = await AnyAgent.create_async(
|
|
516
|
+
default_agent_framework, # See all options in https://mozilla-ai.github.io/any-agent/
|
|
517
|
+
AgentConfig(
|
|
518
|
+
**default_agent_config,
|
|
519
|
+
current_session_id=self.current_session_id,
|
|
520
|
+
# agent_args={
|
|
521
|
+
# "interrupt_after":["get_user_info"]
|
|
522
|
+
# }
|
|
523
|
+
),
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
return self.agent
|
|
527
|
+
|
|
528
|
+
async def serve_as_a2a(self, a2a_serving_config):
|
|
529
|
+
handle = await self.agent.serve_async(A2AServingConfig(**a2a_serving_config))
|
|
530
|
+
server_port = handle.port
|
|
531
|
+
server_url = f"http://localhost:{server_port}"
|
|
532
|
+
|
|
533
|
+
return server_url
|
|
534
|
+
|
|
535
|
+
async def load_default_agent(self, agent_id, agent_data, payload):
|
|
536
|
+
system_prompt = None
|
|
537
|
+
agent_framework = None
|
|
538
|
+
agent_description = None
|
|
539
|
+
agent_name = None
|
|
540
|
+
default_model = None
|
|
541
|
+
provider_name = None
|
|
542
|
+
llm_setting = None
|
|
543
|
+
|
|
544
|
+
user_id = payload["user_id"]
|
|
545
|
+
workspace_slug = payload["workspace_slug"]
|
|
546
|
+
thread_id = payload["thread_id"]
|
|
547
|
+
knowledges = payload["knowledges"]
|
|
548
|
+
memory_id = payload["memory_id"]
|
|
549
|
+
memory_path = payload["memory_path"]
|
|
550
|
+
execution_id = payload["session_id"]
|
|
551
|
+
aci_linked_account_owner_id = payload["aci_linked_account_owner_id"]
|
|
552
|
+
aci_agent_first_api_key = payload["aci_api_key"]
|
|
553
|
+
realtimex_access_token = payload["realtimex_access_token"]
|
|
554
|
+
|
|
555
|
+
agent_description = agent_data["description"]
|
|
556
|
+
agent_name = agent_data["name"]
|
|
557
|
+
agent_framework = agent_data["execution_config"]["framework"]
|
|
558
|
+
|
|
559
|
+
if "agent_description" in payload:
|
|
560
|
+
agent_description = payload["agent_description"]
|
|
561
|
+
if "agent_name" in payload:
|
|
562
|
+
agent_name = payload["agent_name"]
|
|
563
|
+
if "agent_framework" in payload:
|
|
564
|
+
agent_framework = payload["agent_framework"]
|
|
565
|
+
if "system_prompt" in payload:
|
|
566
|
+
system_prompt = payload["system_prompt"]
|
|
567
|
+
if "llm_setting" in payload:
|
|
568
|
+
llm_setting = payload["llm_setting"]
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
default_openai_base_url = payload["litellm_api_base"]
|
|
572
|
+
default_openai_api_key = payload["litellm_api_key"]
|
|
573
|
+
|
|
574
|
+
# Load MCP tools
|
|
575
|
+
|
|
576
|
+
# # Create agent
|
|
577
|
+
# agent = RealTimeXAgent()
|
|
578
|
+
|
|
579
|
+
# print("agent_data")
|
|
580
|
+
|
|
581
|
+
await self.prepare_realtimex_agent(
|
|
582
|
+
agent_id=agent_id,
|
|
583
|
+
agent_data=agent_data
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
# await self.prepare_memory(memory_id=memory_id, memory_path=memory_path, litellm_base_url=default_openai_base_url, litellm_api_key=default_openai_api_key)
|
|
587
|
+
|
|
588
|
+
default_callbacks = await self.load_default_callbacks()
|
|
589
|
+
|
|
590
|
+
default_tools = await self.load_default_tools()
|
|
591
|
+
all_tools = [*default_tools]
|
|
592
|
+
|
|
593
|
+
if self.recommended_aci_mcp_apps:
|
|
594
|
+
aci_mcp = await self.load_aci_mcp_tools(
|
|
595
|
+
linked_account_owner_id=aci_linked_account_owner_id,
|
|
596
|
+
aci_api_key=aci_agent_first_api_key
|
|
597
|
+
)
|
|
598
|
+
if aci_mcp:
|
|
599
|
+
all_tools = [*all_tools,aci_mcp]
|
|
600
|
+
|
|
601
|
+
if self.recommended_local_mcp_apps:
|
|
602
|
+
local_mcps = await self.load_local_mcp_tools()
|
|
603
|
+
# print("local_mcps",local_mcps)
|
|
604
|
+
all_tools = [*all_tools,*local_mcps]
|
|
605
|
+
|
|
606
|
+
if self.recommended_agent_flows:
|
|
607
|
+
aci_mcp_agent_flow = await self.load_mcp_agent_flow_tools(
|
|
608
|
+
linked_account_owner_id=aci_linked_account_owner_id,
|
|
609
|
+
aci_api_key=aci_agent_first_api_key,
|
|
610
|
+
realtimex_access_token=realtimex_access_token,
|
|
611
|
+
litellm_base_url=default_openai_base_url,
|
|
612
|
+
litellm_api_key=default_openai_api_key,
|
|
613
|
+
workspace_slug=workspace_slug,
|
|
614
|
+
thread_id=thread_id
|
|
615
|
+
)
|
|
616
|
+
if aci_mcp_agent_flow:
|
|
617
|
+
all_tools = [*all_tools,aci_mcp_agent_flow]
|
|
618
|
+
|
|
619
|
+
if self.recommended_team_members:
|
|
620
|
+
|
|
621
|
+
team_members = await self.load_a2a_agents_as_tools()
|
|
622
|
+
# print(team_members)
|
|
623
|
+
if team_members:
|
|
624
|
+
all_tools = [*all_tools,*team_members]
|
|
625
|
+
|
|
626
|
+
|
|
627
|
+
|
|
628
|
+
agent_config = {
|
|
629
|
+
"api_base": default_openai_base_url,
|
|
630
|
+
"api_key": default_openai_api_key,
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
# print("agent_framework",agent_framework)
|
|
634
|
+
|
|
635
|
+
if agent_description:
|
|
636
|
+
agent_config["description"] = agent_description
|
|
637
|
+
if system_prompt:
|
|
638
|
+
agent_config["instructions"] = system_prompt
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
if llm_setting:
|
|
642
|
+
llm_config = await self.prepare_llm(**llm_setting["default"])
|
|
643
|
+
# print("llm_config",llm_config)
|
|
644
|
+
agent_config.update(llm_config)
|
|
645
|
+
|
|
646
|
+
|
|
647
|
+
memories_str = ""
|
|
648
|
+
knowledges_str = ""
|
|
649
|
+
# if knowledges:
|
|
650
|
+
# memories_str, knowledges_str = await self.load_knowledges(message, user_id, workspace_slug, thread_id, knowledges)
|
|
651
|
+
|
|
652
|
+
if agent_framework == "deepagents":
|
|
653
|
+
from deepagents import create_realtimex_deep_agent
|
|
654
|
+
from deepagents.backends import CompositeBackend
|
|
655
|
+
from deepagents.backends.filesystem import FilesystemBackend
|
|
656
|
+
agent_framework = "langchain"
|
|
657
|
+
agent_config["agent_type"] = create_realtimex_deep_agent
|
|
658
|
+
|
|
659
|
+
# Backend is always required for deepagents
|
|
660
|
+
agent_config["agent_args"] = {
|
|
661
|
+
"assistant_id": agent_id,
|
|
662
|
+
"backend": CompositeBackend(
|
|
663
|
+
default=FilesystemBackend(),
|
|
664
|
+
routes={},
|
|
665
|
+
)
|
|
666
|
+
}
|
|
667
|
+
|
|
668
|
+
# Resolve memory paths (global and workspace)
|
|
669
|
+
global_agent_path = get_deepagents_agent_path("global", agent_id)
|
|
670
|
+
workspace_agent_path = get_deepagents_agent_path("workspace", agent_id, workspace_slug)
|
|
671
|
+
|
|
672
|
+
# Enable memory if either global or workspace config exists
|
|
673
|
+
agent_config["agent_args"]["enable_memory"] = bool(global_agent_path or workspace_agent_path)
|
|
674
|
+
if global_agent_path:
|
|
675
|
+
agent_config["agent_args"]["global_agent_path"] = global_agent_path
|
|
676
|
+
if workspace_agent_path:
|
|
677
|
+
agent_config["agent_args"]["workspace_agent_path"] = workspace_agent_path
|
|
678
|
+
|
|
679
|
+
# Resolve skills paths (global and workspace)
|
|
680
|
+
global_skills_dir = get_deepagents_skills_dir("global", agent_id)
|
|
681
|
+
workspace_skills_dir = get_deepagents_skills_dir("workspace", agent_id, workspace_slug)
|
|
682
|
+
|
|
683
|
+
# Enable skills if either global or workspace config exists
|
|
684
|
+
agent_config["agent_args"]["enable_skills"] = bool(global_skills_dir or workspace_skills_dir)
|
|
685
|
+
if global_skills_dir:
|
|
686
|
+
agent_config["agent_args"]["global_skills_dir"] = global_skills_dir
|
|
687
|
+
if workspace_skills_dir:
|
|
688
|
+
agent_config["agent_args"]["workspace_skills_dir"] = workspace_skills_dir
|
|
689
|
+
|
|
690
|
+
if "subagents" in agent_data["execution_config"]:
|
|
691
|
+
if "_auto" in agent_data["execution_config"]["subagents"]:
|
|
692
|
+
try:
|
|
693
|
+
subagents = await self.create_subagents(
|
|
694
|
+
instructions=system_prompt,
|
|
695
|
+
tools=[],
|
|
696
|
+
llm_config={
|
|
697
|
+
"api_base": agent_config["api_base"],
|
|
698
|
+
"api_key": agent_config["api_key"],
|
|
699
|
+
"model_id": agent_config["model_id"]
|
|
700
|
+
}
|
|
701
|
+
)
|
|
702
|
+
if subagents:
|
|
703
|
+
# Merge subagents into existing agent_args if present
|
|
704
|
+
if "agent_args" not in agent_config:
|
|
705
|
+
agent_config["agent_args"] = {}
|
|
706
|
+
agent_config["agent_args"]["subagents"] = subagents
|
|
707
|
+
except Exception as e:
|
|
708
|
+
print(e)
|
|
709
|
+
|
|
710
|
+
# print(agent_config["agent_args"])
|
|
711
|
+
|
|
712
|
+
await self.create_agent(
|
|
713
|
+
agent_framework=agent_framework, # See all options in https://mozilla-ai.github.io/any-agent/
|
|
714
|
+
agent_config=agent_config,
|
|
715
|
+
tools=all_tools,
|
|
716
|
+
callbacks=[*default_callbacks],
|
|
717
|
+
memories_str=memories_str,
|
|
718
|
+
knowledges_str=knowledges_str
|
|
719
|
+
)
|
|
720
|
+
|
|
721
|
+
# return agent
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
from uuid import uuid4
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import os
|
|
5
|
+
import redis
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
from any_agent.callbacks import Callback, Context
|
|
9
|
+
from any_agent.tracing.attributes import GenAI
|
|
10
|
+
|
|
11
|
+
pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=1)
|
|
12
|
+
r = redis.Redis(connection_pool=pool)
|
|
13
|
+
|
|
14
|
+
class ShowToolCalling(Callback):
|
|
15
|
+
def before_tool_execution(self, context: Context, *args, **kwargs) -> Context:
|
|
16
|
+
span = context.current_span
|
|
17
|
+
|
|
18
|
+
operation_name = span.attributes.get(GenAI.OPERATION_NAME, "")
|
|
19
|
+
|
|
20
|
+
if operation_name != "execute_tool":
|
|
21
|
+
return context
|
|
22
|
+
|
|
23
|
+
tool_name = span.attributes.get(GenAI.TOOL_NAME, "")
|
|
24
|
+
tool_input = span.attributes.get(GenAI.TOOL_ARGS, "{}")
|
|
25
|
+
tool_call_id = span.attributes.get("gen_ai.tool.call.id","")
|
|
26
|
+
|
|
27
|
+
toolName = tool_name
|
|
28
|
+
toolIcon = "pencil-ruler"
|
|
29
|
+
mainTask = "Tool is executing..."
|
|
30
|
+
|
|
31
|
+
if tool_name.startswith("call_"):
|
|
32
|
+
toolIcon = "bot"
|
|
33
|
+
toolName = f"Call sub-agent {tool_name.replace('call_','')}"
|
|
34
|
+
mainTask = "Sub-agent is working..."
|
|
35
|
+
|
|
36
|
+
if tool_name == "final_answer":
|
|
37
|
+
return context
|
|
38
|
+
|
|
39
|
+
block_id = str(uuid4())
|
|
40
|
+
context.shared[f"show_tool_calling_block_id_{tool_call_id}"] = block_id
|
|
41
|
+
context.shared[f"show_tool_calling_start_time_{tool_call_id}"] = round(time.time() * 1000)
|
|
42
|
+
|
|
43
|
+
current_session_id = None
|
|
44
|
+
if "_current_session_id" in context.shared:
|
|
45
|
+
current_session_id = context.shared['_current_session_id']
|
|
46
|
+
|
|
47
|
+
print("current_session_id", current_session_id)
|
|
48
|
+
|
|
49
|
+
message_content = {
|
|
50
|
+
"uuid": str(uuid4()),
|
|
51
|
+
"type": "responseData",
|
|
52
|
+
"dataType": "toolUse",
|
|
53
|
+
"data": {
|
|
54
|
+
"toolName": toolName,
|
|
55
|
+
"toolIcon": toolIcon,
|
|
56
|
+
"mainTask" : mainTask,
|
|
57
|
+
"input": [tool_input],
|
|
58
|
+
"content": "",
|
|
59
|
+
"meta": {"blockId":block_id, "status": "processing", "durationMs": 0 }
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
if message_content:
|
|
64
|
+
# print(f"<message>{json.dumps(message_content)}</message>")
|
|
65
|
+
pub = r.publish(
|
|
66
|
+
current_session_id,
|
|
67
|
+
f'.message {json.dumps(message_content)}'
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
return context
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def after_tool_execution(self, context: Context, *args, **kwargs) -> Context:
|
|
75
|
+
span = context.current_span
|
|
76
|
+
|
|
77
|
+
operation_name = span.attributes.get(GenAI.OPERATION_NAME, "")
|
|
78
|
+
|
|
79
|
+
if operation_name != "execute_tool":
|
|
80
|
+
return context
|
|
81
|
+
|
|
82
|
+
tool_name = span.attributes.get(GenAI.TOOL_NAME, "")
|
|
83
|
+
tool_input = span.attributes.get(GenAI.TOOL_ARGS, "{}")
|
|
84
|
+
tool_call_id = span.attributes.get("gen_ai.tool.call.id","")
|
|
85
|
+
|
|
86
|
+
toolName = tool_name
|
|
87
|
+
toolIcon = "pencil-ruler"
|
|
88
|
+
mainTask = "Tool executed completely."
|
|
89
|
+
|
|
90
|
+
if tool_name.startswith("call_"):
|
|
91
|
+
toolIcon = "bot"
|
|
92
|
+
toolName = f"Call sub-agent {tool_name.replace('call_','')}"
|
|
93
|
+
mainTask = "Sub-agent has completed the task."
|
|
94
|
+
|
|
95
|
+
# if tool_name == "final_answer":
|
|
96
|
+
# return context
|
|
97
|
+
|
|
98
|
+
if f"show_tool_calling_block_id_{tool_call_id}" in context.shared:
|
|
99
|
+
block_id = context.shared[f"show_tool_calling_block_id_{tool_call_id}"]
|
|
100
|
+
else:
|
|
101
|
+
block_id = str(uuid4())
|
|
102
|
+
|
|
103
|
+
execution_time = 0
|
|
104
|
+
if f"show_tool_calling_start_time_{tool_call_id}" in context.shared:
|
|
105
|
+
execution_time = round(time.time() * 1000) - context.shared[f"show_tool_calling_start_time_{tool_call_id}"]
|
|
106
|
+
else:
|
|
107
|
+
execution_time = 1000
|
|
108
|
+
|
|
109
|
+
current_session_id = None
|
|
110
|
+
if "_current_session_id" in context.shared:
|
|
111
|
+
current_session_id = context.shared['_current_session_id']
|
|
112
|
+
|
|
113
|
+
# print("current_session_id", current_session_id)
|
|
114
|
+
|
|
115
|
+
# context.shared[f"show_tool_calling_start_time_{tool_call_id}"] = round(time.time() * 1000)
|
|
116
|
+
ui_components = []
|
|
117
|
+
flow_as_output = False
|
|
118
|
+
flow_output_data = None
|
|
119
|
+
|
|
120
|
+
message_content = None
|
|
121
|
+
if output := span.attributes.get(GenAI.OUTPUT, None):
|
|
122
|
+
output_type = span.attributes.get(GenAI.OUTPUT_TYPE, "text")
|
|
123
|
+
|
|
124
|
+
if toolName == "final_answer":
|
|
125
|
+
# message_content = {
|
|
126
|
+
# "uuid": str(uuid4()),
|
|
127
|
+
# "type": "responseData",
|
|
128
|
+
# "content": output,
|
|
129
|
+
# "dataType": "markdown",
|
|
130
|
+
# "data": {"content": output, "language": None},
|
|
131
|
+
# }
|
|
132
|
+
# pub = r.publish(
|
|
133
|
+
# current_session_id,
|
|
134
|
+
# f'.message {json.dumps(message_content)}'
|
|
135
|
+
# )
|
|
136
|
+
return context
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
if output_type == "json":
|
|
140
|
+
output_data = json.loads(output)
|
|
141
|
+
|
|
142
|
+
if "ui-components" in output_data:
|
|
143
|
+
if output_data["ui-components"]:
|
|
144
|
+
for ui_component in output_data["ui-components"]:
|
|
145
|
+
ui_component["uuid"] = str(uuid4())
|
|
146
|
+
ui_component["type"] = "responseData"
|
|
147
|
+
ui_components.append(ui_component)
|
|
148
|
+
|
|
149
|
+
del output_data['ui-components']
|
|
150
|
+
|
|
151
|
+
if "flow_as_output" in output_data:
|
|
152
|
+
if output_data["flow_as_output"]:
|
|
153
|
+
flow_as_output = True
|
|
154
|
+
if "output" in output_data:
|
|
155
|
+
flow_output_data = str(output_data["output"])
|
|
156
|
+
|
|
157
|
+
message_content = {
|
|
158
|
+
"uuid": str(uuid4()),
|
|
159
|
+
"type": "responseData",
|
|
160
|
+
"dataType": "toolUse",
|
|
161
|
+
"data": {
|
|
162
|
+
"toolName": toolName,
|
|
163
|
+
"toolIcon": toolIcon,
|
|
164
|
+
"mainTask" : mainTask,
|
|
165
|
+
"input": [tool_input],
|
|
166
|
+
"content": {
|
|
167
|
+
"dataType": "json",
|
|
168
|
+
"data": {
|
|
169
|
+
"content": output_data,
|
|
170
|
+
"language": None
|
|
171
|
+
}
|
|
172
|
+
},
|
|
173
|
+
"meta": {"blockId":block_id, "status": "completed", "durationMs": execution_time }
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
else:
|
|
179
|
+
message_content = {
|
|
180
|
+
"uuid": str(uuid4()),
|
|
181
|
+
"type": "responseData",
|
|
182
|
+
"dataType": "toolUse",
|
|
183
|
+
"data": {
|
|
184
|
+
"toolName": toolName,
|
|
185
|
+
"toolIcon": toolIcon,
|
|
186
|
+
"mainTask" : mainTask,
|
|
187
|
+
"input": [tool_input],
|
|
188
|
+
"content": {
|
|
189
|
+
"dataType": "markdown",
|
|
190
|
+
"data": {
|
|
191
|
+
"content": output,
|
|
192
|
+
"language": None
|
|
193
|
+
}
|
|
194
|
+
},
|
|
195
|
+
"meta": {"blockId":block_id, "status": "completed", "durationMs": execution_time }
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
else:
|
|
199
|
+
message_content = {
|
|
200
|
+
"uuid": str(uuid4()),
|
|
201
|
+
"type": "responseData",
|
|
202
|
+
"dataType": "toolUse",
|
|
203
|
+
"data": {
|
|
204
|
+
"toolName": toolName,
|
|
205
|
+
"toolIcon": toolIcon,
|
|
206
|
+
"mainTask" : mainTask,
|
|
207
|
+
"input": [tool_input],
|
|
208
|
+
"content": {
|
|
209
|
+
"dataType": "markdown",
|
|
210
|
+
"data": {
|
|
211
|
+
"content": "No outputs.",
|
|
212
|
+
"language": None
|
|
213
|
+
}
|
|
214
|
+
},
|
|
215
|
+
"meta": {"blockId":block_id, "status": "completed", "durationMs": execution_time }
|
|
216
|
+
}
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
if message_content:
|
|
220
|
+
# print(f"<message>{json.dumps(message_content)}</message>")
|
|
221
|
+
pub = r.publish(
|
|
222
|
+
current_session_id,
|
|
223
|
+
f'.message {json.dumps(message_content)}'
|
|
224
|
+
)
|
|
225
|
+
if len(ui_components) > 0:
|
|
226
|
+
for ui_component in ui_components:
|
|
227
|
+
pub = r.publish(
|
|
228
|
+
current_session_id,
|
|
229
|
+
f'.message {json.dumps(ui_component)}'
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
if flow_as_output:
|
|
233
|
+
pass
|
|
234
|
+
# message_content = {
|
|
235
|
+
# "uuid": str(uuid4()),
|
|
236
|
+
# "type": "responseData",
|
|
237
|
+
# "dataType": "markdown",
|
|
238
|
+
# "data": {
|
|
239
|
+
# "content": flow_output_data,
|
|
240
|
+
# "language": None
|
|
241
|
+
# }
|
|
242
|
+
# }
|
|
243
|
+
# pub = r.publish(
|
|
244
|
+
# current_session_id,
|
|
245
|
+
# f'.message {json.dumps(message_content)}'
|
|
246
|
+
# )
|
|
247
|
+
# raise RuntimeError("Reached Return Direct Tool.")
|
|
248
|
+
|
|
249
|
+
return context
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
def print_time(text):
|
|
3
|
+
# Get the current date and time
|
|
4
|
+
now = datetime.now()
|
|
5
|
+
|
|
6
|
+
# Extract and print only the time
|
|
7
|
+
current_time = now.strftime("%H:%M:%S")
|
|
8
|
+
print(f"{text}: ", current_time)
|
|
9
|
+
|
|
10
|
+
print_time("begin")
|
|
11
|
+
|
|
12
|
+
from .agent import RealTimeXAgent
|
|
13
|
+
|
|
14
|
+
import json
|
|
15
|
+
import os
|
|
16
|
+
import asyncio
|
|
17
|
+
import httpx
|
|
18
|
+
import sys
|
|
19
|
+
# from inputimeout import inputimeout, TimeoutOccurred
|
|
20
|
+
import threading
|
|
21
|
+
|
|
22
|
+
from any_agent.serving import A2AServingConfig
|
|
23
|
+
from any_agent import AgentConfig, AnyAgent
|
|
24
|
+
|
|
25
|
+
from a2a.client import A2ACardResolver, A2AClient
|
|
26
|
+
from a2a.types import AgentCard
|
|
27
|
+
|
|
28
|
+
from uuid import uuid4
|
|
29
|
+
|
|
30
|
+
from a2a.types import MessageSendParams, SendMessageRequest, TaskState
|
|
31
|
+
|
|
32
|
+
print_time("after import")
|
|
33
|
+
|
|
34
|
+
# Create the httpx client
|
|
35
|
+
httpx_client = httpx.AsyncClient()
|
|
36
|
+
|
|
37
|
+
import nest_asyncio
|
|
38
|
+
|
|
39
|
+
nest_asyncio.apply()
|
|
40
|
+
|
|
41
|
+
async def run():
|
|
42
|
+
print_time("begin run")
|
|
43
|
+
# Prepare kwargs
|
|
44
|
+
# print(sys.argv)
|
|
45
|
+
payload = json.loads(sys.argv[1])
|
|
46
|
+
# kwargs = {"workspace_slug": "test", "query": "hi", "messages": [{"role": "user", "content": "hi"}], "user_id": "user_14", "session_id": "500b4358-5d85-415d-acf0-8b12b4d896cc", "db_url": "sqlite:///C:\\Users\\Web team\\.realtimex.ai\\Resources/test_accounting_sessions.db", "default_model": "gpt-4o-mini", "litellm_api_key": "sk-tYJFmnnGzFpI9Tr1735989A9252944A9A8960c95FcCaD9Bc", "litellm_api_base": "https://llm.realtimex.ai/v1", "aci_linked_account_owner_id": "3e0bb6de-d64e-41f4-8cc7-f01a00398826", "aci_api_key": "b3bc24bb36ec940309e0dc31e22578f711b64038c7091ae92d59890dd03480a0", "agent_id": "63a42f8d-239d-4e2e-82bd-016d54110a90", "agent_data": {"name": "test-agent", "description": "Default RealTimeX Agent", "instructions": "You are the agent - please keep going until the user's query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved, or if you need more info from the user to solve the problem.\n\nALWAYS ask required inputs from user to make sure to have correct inputs while calling functions.\n\nIf you are not sure about anything pertaining to the user's request, use your tools to read files and gather the relevant information: do NOT guess or make up an answer.\n\nYou MUST plan extensively before each function call, and reflect extensively on the outcomes of the previous function calls. DO NOT do this entire process by making function calls only, as this can impair your ability to solve the problem and think insightfully.\n\nRELATED MEMORIES:\n{##MEMORIES##}\n\nRELATED KNOWLEDGES:\n{##KNOWLEDGES##}", "execution_config": {"cmd": ["uvx", "realtimex-agent-a2a-agent"], "data": {}, "models": {"provider": "realtimexai", "default_model": "gpt-3.5-turbo"}, "framework": "tinyagent"}, "recommended_agent_flows": [], "recommended_aci_mcp_apps": []}, "thread_id": None, "knowledges": [], "memory_id": "aab7f86e-8f94-46a2-9f16-5bf0c58902dc", "memory_path": "C:\\Users\\Web team\\.realtimex.ai\\memories", "realtimex_access_token": "eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJORlVmWGlMZHlxVmtSdGFJUlFGVGlZVXZBWXUwdXpSSXp2OEZaTGpnbGdnIn0.eyJleHAiOjE3NTYyODE2ODgsImlhdCI6MTc1NjI4MDc4OCwiYXV0aF90aW1lIjoxNzU2MTczNTA4LCJqdGkiOiI5NWRkYmFlNy03YmFjLTRlMzgtYTBlNi1kNDg0ODJkNWM2YjUiLCJpc3MiOiJodHRwczovL2FjY291bnRzLnJlYWx0aW1leC5jby9hdXRoL3JlYWxtcy9yZWFsdGltZXgiLCJhdWQiOiJhY2NvdW50Iiwic3ViIjoiMGNhYTM5ZmItYzRiOS00MzYzLTljMmMtYWZjNDQ4ZDUyZjkwIiwidHlwIjoiQmVhcmVyIiwiYXpwIjoicmVhbHRpbWV4LWFwcCIsIm5vbmNlIjoiY2Y0NGE3ZDctYjAyMy00NmQyLTllODktNDgyNmU0MGUxZDdmIiwic2Vzc2lvbl9zdGF0ZSI6IjEzOTRlZmRmLWEyZmMtNDgzMS1hM2FlLTBkYzgwNTMzZWVlNiIsImFjciI6IjEiLCJhbGxvd2VkLW9yaWdpbnMiOlsiKiJdLCJyZWFsbV9hY2Nlc3MiOnsicm9sZXMiOlsib2ZmbGluZV9hY2Nlc3MiLCJ1bWFfYXV0aG9yaXphdGlvbiIsImRlZmF1bHQtcm9sZXMtcnR3b3JrLWluYyJdfSwicmVzb3VyY2VfYWNjZXNzIjp7ImFjY291bnQiOnsicm9sZXMiOlsibWFuYWdlLWFjY291bnQiLCJtYW5hZ2UtYWNjb3VudC1saW5rcyIsInZpZXctcHJvZmlsZSJdfX0sInNjb3BlIjoib3BlbmlkIG9mZmxpbmVfYWNjZXNzIGVtYWlsIHByb2ZpbGUiLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibmFtZSI6IlBodW9uZyBOZ3V5ZW4iLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJwaHVvbmdubTE1OTMiLCJnaXZlbl9uYW1lIjoiUGh1b25nIiwiZmFtaWx5X25hbWUiOiJOZ3V5ZW4iLCJlbWFpbCI6InBodW9uZ25tMTU5M0BnbWFpbC5jb20ifQ.jYWqbKAvXND5fACOBCsi6PPJkU7yDGwnqgJ-qSn0Wz2hSzRYl4Gymc2HSlLASO4aDO24Ar7Ob4R26xpAyfkXMjFWHR94MxajK9CZlVfV4NOpVCRXq1R--3aNA1oNWu-FbRMDKNQIJDd1se2fjJfMa79C9yVXlUs2R_-1ktTHjBnblsWkkh2p8frGNKsLq_kacXpV4YGe5IAf96z5XfYJKTV2ykVFPV67f5-brlNfXLBx88I0ZGY41K_VCkoL0wsHUB1FlBjHzfomsRfHrjMzrIFvx1B6tIV-kLOnjeuDZdysmtjR0L7w4gKsXxRl2bV8B0lWqHknytEnvciMZduDEQ"}
|
|
47
|
+
|
|
48
|
+
a2a_port = sys.argv[2]
|
|
49
|
+
|
|
50
|
+
system_prompt = None
|
|
51
|
+
agent_framework = None
|
|
52
|
+
agent_description = None
|
|
53
|
+
default_model = None
|
|
54
|
+
provider_name = None
|
|
55
|
+
llm_setting = None
|
|
56
|
+
|
|
57
|
+
agent_id = payload["agent_id"]
|
|
58
|
+
agent_data = payload["agent_data"]
|
|
59
|
+
user_id = payload["user_id"]
|
|
60
|
+
workspace_slug = payload["workspace_slug"]
|
|
61
|
+
thread_id = payload["thread_id"]
|
|
62
|
+
knowledges = payload["knowledges"]
|
|
63
|
+
memory_id = payload["memory_id"]
|
|
64
|
+
memory_path = payload["memory_path"]
|
|
65
|
+
execution_id = payload["session_id"]
|
|
66
|
+
message = payload["query"]
|
|
67
|
+
messages = payload["messages"]
|
|
68
|
+
aci_linked_account_owner_id = payload["aci_linked_account_owner_id"]
|
|
69
|
+
aci_agent_first_api_key = payload["aci_api_key"]
|
|
70
|
+
realtimex_access_token = payload["realtimex_access_token"]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
if "agent_description" in payload:
|
|
74
|
+
agent_description = payload["agent_description"]
|
|
75
|
+
if "agent_framework" in payload:
|
|
76
|
+
agent_framework = payload["agent_framework"]
|
|
77
|
+
if "system_prompt" in payload:
|
|
78
|
+
system_prompt = payload["system_prompt"]
|
|
79
|
+
if "llm_setting" in payload:
|
|
80
|
+
llm_setting = payload["llm_setting"]
|
|
81
|
+
|
|
82
|
+
default_openai_base_url = payload["litellm_api_base"]
|
|
83
|
+
default_openai_api_key = payload["litellm_api_key"]
|
|
84
|
+
|
|
85
|
+
# Load MCP tools
|
|
86
|
+
|
|
87
|
+
# Create agent
|
|
88
|
+
agent = RealTimeXAgent(current_session_id=execution_id)
|
|
89
|
+
|
|
90
|
+
await agent.load_default_agent(agent_id, agent_data, payload)
|
|
91
|
+
|
|
92
|
+
server_url = await agent.serve_as_a2a(
|
|
93
|
+
a2a_serving_config={"port":a2a_port,"stream_tool_usage":True}
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
print(f"<server-url>{server_url}</server-url>")
|
|
97
|
+
|
|
98
|
+
# input("Waiting...")
|
|
99
|
+
|
|
100
|
+
# async with httpx.AsyncClient() as client:
|
|
101
|
+
# while True:
|
|
102
|
+
# try:
|
|
103
|
+
# await client.get(server_url, timeout=1.0)
|
|
104
|
+
# print(f"Server is ready at {server_url}")
|
|
105
|
+
|
|
106
|
+
# agent_card: AgentCard = await A2ACardResolver(
|
|
107
|
+
# httpx_client,
|
|
108
|
+
# base_url=server_url,
|
|
109
|
+
# ).get_agent_card(http_kwargs=None)
|
|
110
|
+
# # print(agent_card.model_dump_json(indent=2))
|
|
111
|
+
|
|
112
|
+
# client = A2AClient(httpx_client=httpx_client, agent_card=agent_card)
|
|
113
|
+
|
|
114
|
+
# send_message_payload = {
|
|
115
|
+
# "message": {
|
|
116
|
+
# "role": "user",
|
|
117
|
+
# "parts": [{"kind": "text", "text": f"{message}"}],
|
|
118
|
+
# "messageId": uuid4().hex,
|
|
119
|
+
# # "contextId": "bce7de7a-5050-4b74-822c-af0e13073036", # Same context to continue conversation
|
|
120
|
+
# # "taskId": "159e8a1b-788c-47ef-998d-9419b8f5fb5a", # type: ignore[union-attr]
|
|
121
|
+
# },
|
|
122
|
+
# }
|
|
123
|
+
# request = SendMessageRequest(
|
|
124
|
+
# id=str(uuid4()), params=MessageSendParams(**send_message_payload)
|
|
125
|
+
# )
|
|
126
|
+
# response = await client.send_message(request, http_kwargs={"timeout": 300.0})
|
|
127
|
+
# print("response",response)
|
|
128
|
+
# # response = json.loads(response.root.result.status.message.parts[0].root.text)
|
|
129
|
+
# # # print(response["result"])
|
|
130
|
+
# # print("response",response)
|
|
131
|
+
# response_message = response.root.result.status.message
|
|
132
|
+
# response_text = json.loads(response_message.parts[0].root.text)["result"]
|
|
133
|
+
# response_state = response.root.result.status.state
|
|
134
|
+
# context_id = response.root.result.context_id
|
|
135
|
+
# task_id = response.root.result.id
|
|
136
|
+
|
|
137
|
+
# message_content = {
|
|
138
|
+
# "uuid": str(uuid4()),
|
|
139
|
+
# "type": "responseData",
|
|
140
|
+
# "content": response_text,
|
|
141
|
+
# "dataType": "markdown",
|
|
142
|
+
# "data": {"content": response_text, "language": None},
|
|
143
|
+
# }
|
|
144
|
+
|
|
145
|
+
# print(f"<message>{json.dumps(message_content)}</message>")
|
|
146
|
+
# print(f"<signal>session-end</signal>")
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# # while response_state == TaskState.input_required:
|
|
151
|
+
# while True:
|
|
152
|
+
# # user_input = input(response_text)
|
|
153
|
+
# user_input = input('Enter message: ')
|
|
154
|
+
# if user_input is None:
|
|
155
|
+
# break
|
|
156
|
+
|
|
157
|
+
# if response_state == TaskState.completed:
|
|
158
|
+
# send_message_payload = {
|
|
159
|
+
# "message": {
|
|
160
|
+
# "role": "user",
|
|
161
|
+
# "parts": [{"kind": "text", "text": f"{user_input}"}],
|
|
162
|
+
# "messageId": uuid4().hex,
|
|
163
|
+
# "contextId": response.root.result.context_id, # Same context to continue conversation
|
|
164
|
+
# },
|
|
165
|
+
# }
|
|
166
|
+
# else:
|
|
167
|
+
# send_message_payload = {
|
|
168
|
+
# "message": {
|
|
169
|
+
# "role": "user",
|
|
170
|
+
# "parts": [{"kind": "text", "text": f"{user_input}"}],
|
|
171
|
+
# "messageId": uuid4().hex,
|
|
172
|
+
# "contextId": response.root.result.context_id, # Same context to continue conversation
|
|
173
|
+
# "taskId": response.root.result.id, # type: ignore[union-attr]
|
|
174
|
+
# },
|
|
175
|
+
# }
|
|
176
|
+
|
|
177
|
+
# request = SendMessageRequest(
|
|
178
|
+
# id=str(uuid4()), params=MessageSendParams(**send_message_payload)
|
|
179
|
+
# )
|
|
180
|
+
# response = await client.send_message(request, http_kwargs={"timeout": 300.0})
|
|
181
|
+
|
|
182
|
+
# print("send_message_payload",send_message_payload)
|
|
183
|
+
# print("response",response)
|
|
184
|
+
|
|
185
|
+
# response_message = response.root.result.status.message
|
|
186
|
+
# response_text = json.loads(response_message.parts[0].root.text)["result"]
|
|
187
|
+
# response_state = response.root.result.status.state
|
|
188
|
+
|
|
189
|
+
# message_content = {
|
|
190
|
+
# "uuid": str(uuid4()),
|
|
191
|
+
# "type": "responseData",
|
|
192
|
+
# "content": response_text,
|
|
193
|
+
# "dataType": "markdown",
|
|
194
|
+
# "data": {"content": response_text, "language": None},
|
|
195
|
+
# }
|
|
196
|
+
|
|
197
|
+
# print(f"<message>{json.dumps(message_content)}</message>")
|
|
198
|
+
# print(f"<signal>session-end</signal>")
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
# # print(response)
|
|
204
|
+
# # Close the httpx client when done
|
|
205
|
+
# await httpx_client.aclose()
|
|
206
|
+
|
|
207
|
+
# break
|
|
208
|
+
# except (httpx.RequestError, httpx.TimeoutException):
|
|
209
|
+
# await asyncio.sleep(poll_interval)
|
|
210
|
+
# attempts += 1
|
|
211
|
+
# if attempts >= max_attempts:
|
|
212
|
+
# msg = f"Could not connect to {server_url}. Tried {max_attempts} times with {poll_interval} second interval."
|
|
213
|
+
# raise ConnectionError(msg)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
async with httpx.AsyncClient() as client:
|
|
217
|
+
while True:
|
|
218
|
+
try:
|
|
219
|
+
await client.get(server_url, timeout=1.0)
|
|
220
|
+
print(f"<signal>server-listening</signal>")
|
|
221
|
+
except (httpx.RequestError, httpx.TimeoutException):
|
|
222
|
+
print(f"<signal>server-stopped</signal>")
|
|
223
|
+
await asyncio.sleep(30)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def main():
|
|
227
|
+
print_time("begin main")
|
|
228
|
+
asyncio.run(run())
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
def send_email(body:str, to_email:str, from_email: str = "me") -> str:
|
|
2
|
+
"""Send Email to someone.
|
|
3
|
+
|
|
4
|
+
Args:
|
|
5
|
+
from_email (str): The sender
|
|
6
|
+
to_email (str): The receiver email
|
|
7
|
+
body (str): The body of email
|
|
8
|
+
|
|
9
|
+
Returns:
|
|
10
|
+
The string to say hello.
|
|
11
|
+
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
return f"Email has been sent to {to_email}"
|