letta-nightly 0.6.39.dev20250314104053__py3-none-any.whl → 0.6.40.dev20250314222759__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (67) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +14 -4
  3. letta/agents/ephemeral_agent.py +2 -1
  4. letta/agents/low_latency_agent.py +8 -0
  5. letta/dynamic_multi_agent.py +274 -0
  6. letta/functions/function_sets/base.py +1 -0
  7. letta/functions/function_sets/extras.py +2 -1
  8. letta/functions/function_sets/multi_agent.py +17 -0
  9. letta/functions/helpers.py +41 -0
  10. letta/functions/mcp_client/__init__.py +0 -0
  11. letta/functions/mcp_client/base_client.py +61 -0
  12. letta/functions/mcp_client/sse_client.py +21 -0
  13. letta/functions/mcp_client/stdio_client.py +103 -0
  14. letta/functions/mcp_client/types.py +48 -0
  15. letta/functions/schema_generator.py +1 -1
  16. letta/helpers/converters.py +67 -0
  17. letta/llm_api/openai.py +1 -1
  18. letta/memory.py +2 -1
  19. letta/orm/__init__.py +2 -0
  20. letta/orm/agent.py +69 -20
  21. letta/orm/custom_columns.py +15 -0
  22. letta/orm/group.py +33 -0
  23. letta/orm/groups_agents.py +13 -0
  24. letta/orm/message.py +7 -4
  25. letta/orm/organization.py +1 -0
  26. letta/orm/sqlalchemy_base.py +3 -3
  27. letta/round_robin_multi_agent.py +152 -0
  28. letta/schemas/agent.py +3 -0
  29. letta/schemas/enums.py +0 -4
  30. letta/schemas/group.py +65 -0
  31. letta/schemas/letta_message.py +167 -106
  32. letta/schemas/letta_message_content.py +192 -0
  33. letta/schemas/message.py +28 -36
  34. letta/schemas/tool.py +1 -1
  35. letta/serialize_schemas/__init__.py +1 -1
  36. letta/serialize_schemas/marshmallow_agent.py +108 -0
  37. letta/serialize_schemas/{agent_environment_variable.py → marshmallow_agent_environment_variable.py} +1 -1
  38. letta/serialize_schemas/marshmallow_base.py +52 -0
  39. letta/serialize_schemas/{block.py → marshmallow_block.py} +1 -1
  40. letta/serialize_schemas/{custom_fields.py → marshmallow_custom_fields.py} +12 -0
  41. letta/serialize_schemas/marshmallow_message.py +42 -0
  42. letta/serialize_schemas/{tag.py → marshmallow_tag.py} +12 -2
  43. letta/serialize_schemas/{tool.py → marshmallow_tool.py} +1 -1
  44. letta/serialize_schemas/pydantic_agent_schema.py +111 -0
  45. letta/server/rest_api/app.py +15 -0
  46. letta/server/rest_api/routers/v1/__init__.py +2 -0
  47. letta/server/rest_api/routers/v1/agents.py +46 -40
  48. letta/server/rest_api/routers/v1/groups.py +233 -0
  49. letta/server/rest_api/routers/v1/tools.py +31 -3
  50. letta/server/rest_api/utils.py +1 -1
  51. letta/server/server.py +272 -22
  52. letta/services/agent_manager.py +65 -28
  53. letta/services/group_manager.py +147 -0
  54. letta/services/helpers/agent_manager_helper.py +151 -1
  55. letta/services/message_manager.py +11 -3
  56. letta/services/passage_manager.py +15 -0
  57. letta/settings.py +5 -0
  58. letta/supervisor_multi_agent.py +103 -0
  59. {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314222759.dist-info}/METADATA +1 -2
  60. {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314222759.dist-info}/RECORD +63 -49
  61. letta/helpers/mcp_helpers.py +0 -108
  62. letta/serialize_schemas/agent.py +0 -80
  63. letta/serialize_schemas/base.py +0 -64
  64. letta/serialize_schemas/message.py +0 -29
  65. {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314222759.dist-info}/LICENSE +0 -0
  66. {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314222759.dist-info}/WHEEL +0 -0
  67. {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314222759.dist-info}/entry_points.txt +0 -0
letta/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.6.39"
1
+ __version__ = "0.6.40"
2
2
 
3
3
  # import clients
4
4
  from letta.client.client import LocalClient, RESTClient, create_client
letta/agent.py CHANGED
@@ -22,11 +22,11 @@ from letta.errors import ContextWindowExceededError
22
22
  from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_function_annotations_from_source
23
23
  from letta.functions.functions import get_function_from_module
24
24
  from letta.functions.helpers import execute_composio_action, generate_composio_action_from_func_name
25
+ from letta.functions.mcp_client.base_client import BaseMCPClient
25
26
  from letta.helpers import ToolRulesSolver
26
27
  from letta.helpers.composio_helpers import get_composio_api_key
27
28
  from letta.helpers.datetime_helpers import get_utc_time
28
29
  from letta.helpers.json_helpers import json_dumps, json_loads
29
- from letta.helpers.mcp_helpers import BaseMCPClient
30
30
  from letta.interface import AgentInterface
31
31
  from letta.llm_api.helpers import calculate_summarizer_cutoff, get_token_counts_for_messages, is_context_overflow_error
32
32
  from letta.llm_api.llm_api_tools import create
@@ -39,7 +39,8 @@ from letta.orm.enums import ToolType
39
39
  from letta.schemas.agent import AgentState, AgentStepResponse, UpdateAgent
40
40
  from letta.schemas.block import BlockUpdate
41
41
  from letta.schemas.embedding_config import EmbeddingConfig
42
- from letta.schemas.enums import MessageContentType, MessageRole
42
+ from letta.schemas.enums import MessageRole
43
+ from letta.schemas.letta_message_content import TextContent
43
44
  from letta.schemas.memory import ContextWindowOverview, Memory
44
45
  from letta.schemas.message import Message, ToolReturn
45
46
  from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
@@ -95,6 +96,7 @@ class Agent(BaseAgent):
95
96
  first_message_verify_mono: bool = True, # TODO move to config?
96
97
  # MCP sessions, state held in-memory in the server
97
98
  mcp_clients: Optional[Dict[str, BaseMCPClient]] = None,
99
+ save_last_response: bool = False,
98
100
  ):
99
101
  assert isinstance(agent_state.memory, Memory), f"Memory object is not of type Memory: {type(agent_state.memory)}"
100
102
  # Hold a copy of the state that was used to init the agent
@@ -149,6 +151,10 @@ class Agent(BaseAgent):
149
151
  # Load last function response from message history
150
152
  self.last_function_response = self.load_last_function_response()
151
153
 
154
+ # Save last responses in memory
155
+ self.save_last_response = save_last_response
156
+ self.last_response_messages = []
157
+
152
158
  # Logger that the Agent specifically can use, will also report the agent_state ID with the logs
153
159
  self.logger = get_logger(agent_state.id)
154
160
 
@@ -160,7 +166,7 @@ class Agent(BaseAgent):
160
166
  in_context_messages = self.agent_manager.get_in_context_messages(agent_id=self.agent_state.id, actor=self.user)
161
167
  for i in range(len(in_context_messages) - 1, -1, -1):
162
168
  msg = in_context_messages[i]
163
- if msg.role == MessageRole.tool and msg.content and len(msg.content) == 1 and msg.content[0].type == MessageContentType.text:
169
+ if msg.role == MessageRole.tool and msg.content and len(msg.content) == 1 and isinstance(msg.content[0], TextContent):
164
170
  text_content = msg.content[0].text
165
171
  try:
166
172
  response_json = json.loads(text_content)
@@ -926,6 +932,9 @@ class Agent(BaseAgent):
926
932
  else:
927
933
  all_new_messages = all_response_messages
928
934
 
935
+ if self.save_last_response:
936
+ self.last_response_messages = all_response_messages
937
+
929
938
  # Check the memory pressure and potentially issue a memory pressure warning
930
939
  current_total_tokens = response.usage.total_tokens
931
940
  active_memory_warning = False
@@ -1052,6 +1061,7 @@ class Agent(BaseAgent):
1052
1061
 
1053
1062
  else:
1054
1063
  logger.error(f"step() failed with an unrecognized exception: '{str(e)}'")
1064
+ traceback.print_exc()
1055
1065
  raise e
1056
1066
 
1057
1067
  def step_user_message(self, user_message_str: str, **kwargs) -> AgentStepResponse:
@@ -1201,7 +1211,7 @@ class Agent(BaseAgent):
1201
1211
  and in_context_messages[1].role == MessageRole.user
1202
1212
  and in_context_messages[1].content
1203
1213
  and len(in_context_messages[1].content) == 1
1204
- and in_context_messages[1].content[0].type == MessageContentType.text
1214
+ and isinstance(in_context_messages[1].content[0], TextContent)
1205
1215
  # TODO remove hardcoding
1206
1216
  and "The following is a summary of the previous " in in_context_messages[1].content[0].text
1207
1217
  ):
@@ -5,7 +5,8 @@ import openai
5
5
  from letta.agents.base_agent import BaseAgent
6
6
  from letta.schemas.agent import AgentState
7
7
  from letta.schemas.enums import MessageRole
8
- from letta.schemas.letta_message import TextContent, UserMessage
8
+ from letta.schemas.letta_message import UserMessage
9
+ from letta.schemas.letta_message_content import TextContent
9
10
  from letta.schemas.message import Message
10
11
  from letta.schemas.openai.chat_completion_request import ChatCompletionRequest
11
12
  from letta.schemas.user import User
@@ -40,6 +40,7 @@ from letta.services.agent_manager import AgentManager
40
40
  from letta.services.block_manager import BlockManager
41
41
  from letta.services.helpers.agent_manager_helper import compile_system_message
42
42
  from letta.services.message_manager import MessageManager
43
+ from letta.services.passage_manager import PassageManager
43
44
  from letta.services.summarizer.enums import SummarizationMode
44
45
  from letta.services.summarizer.summarizer import Summarizer
45
46
  from letta.utils import united_diff
@@ -75,6 +76,7 @@ class LowLatencyAgent(BaseAgent):
75
76
  # TODO: Make this more general, factorable
76
77
  # Summarizer settings
77
78
  self.block_manager = block_manager
79
+ self.passage_manager = PassageManager() # TODO: pass this in
78
80
  # TODO: This is not guaranteed to exist!
79
81
  self.summary_block_label = "human"
80
82
  self.summarizer = Summarizer(
@@ -246,10 +248,16 @@ class LowLatencyAgent(BaseAgent):
246
248
  return in_context_messages
247
249
 
248
250
  memory_edit_timestamp = get_utc_time()
251
+
252
+ num_messages = self.message_manager.size(actor=actor, agent_id=agent_id)
253
+ num_archival_memories = self.passage_manager.size(actor=actor, agent_id=agent_id)
254
+
249
255
  new_system_message_str = compile_system_message(
250
256
  system_prompt=agent_state.system,
251
257
  in_context_memory=agent_state.memory,
252
258
  in_context_memory_last_edit=memory_edit_timestamp,
259
+ previous_message_count=num_messages,
260
+ archival_memory_size=num_archival_memories,
253
261
  )
254
262
 
255
263
  diff = united_diff(curr_system_message_text, new_system_message_str)
@@ -0,0 +1,274 @@
1
+ from typing import List, Optional
2
+
3
+ from letta.agent import Agent, AgentState
4
+ from letta.interface import AgentInterface
5
+ from letta.orm import User
6
+ from letta.schemas.block import Block
7
+ from letta.schemas.letta_message_content import TextContent
8
+ from letta.schemas.message import Message, MessageCreate
9
+ from letta.schemas.openai.chat_completion_response import UsageStatistics
10
+ from letta.schemas.usage import LettaUsageStatistics
11
+ from letta.services.tool_manager import ToolManager
12
+
13
+
14
+ class DynamicMultiAgent(Agent):
15
+ def __init__(
16
+ self,
17
+ interface: AgentInterface,
18
+ agent_state: AgentState,
19
+ user: User = None,
20
+ # custom
21
+ group_id: str = "",
22
+ agent_ids: List[str] = [],
23
+ description: str = "",
24
+ max_turns: Optional[int] = None,
25
+ termination_token: str = "DONE!",
26
+ ):
27
+ super().__init__(interface, agent_state, user)
28
+ self.group_id = group_id
29
+ self.agent_ids = agent_ids
30
+ self.description = description
31
+ self.max_turns = max_turns or len(agent_ids)
32
+ self.termination_token = termination_token
33
+
34
+ self.tool_manager = ToolManager()
35
+
36
+ def step(
37
+ self,
38
+ messages: List[MessageCreate],
39
+ chaining: bool = True,
40
+ max_chaining_steps: Optional[int] = None,
41
+ put_inner_thoughts_first: bool = True,
42
+ **kwargs,
43
+ ) -> LettaUsageStatistics:
44
+ total_usage = UsageStatistics()
45
+ step_count = 0
46
+
47
+ token_streaming = self.interface.streaming_mode if hasattr(self.interface, "streaming_mode") else False
48
+ metadata = self.interface.metadata if hasattr(self.interface, "metadata") else None
49
+
50
+ agents = {}
51
+ message_index = {self.agent_state.id: 0}
52
+ agents[self.agent_state.id] = self.load_manager_agent()
53
+ for agent_id in self.agent_ids:
54
+ agents[agent_id] = self.load_participant_agent(agent_id=agent_id)
55
+ message_index[agent_id] = 0
56
+
57
+ chat_history: List[Message] = []
58
+ new_messages = messages
59
+ speaker_id = None
60
+ try:
61
+ for _ in range(self.max_turns):
62
+ agent_id_options = [agent_id for agent_id in self.agent_ids if agent_id != speaker_id]
63
+ manager_message = self.ask_manager_to_choose_participant_message(new_messages, chat_history, agent_id_options)
64
+ manager_agent = agents[self.agent_state.id]
65
+ usage_stats = manager_agent.step(
66
+ messages=[manager_message],
67
+ chaining=chaining,
68
+ max_chaining_steps=max_chaining_steps,
69
+ stream=token_streaming,
70
+ skip_verify=True,
71
+ metadata=metadata,
72
+ put_inner_thoughts_first=put_inner_thoughts_first,
73
+ )
74
+ responses = Message.to_letta_messages_from_list(manager_agent.last_response_messages)
75
+ assistant_message = [response for response in responses if response.message_type == "assistant_message"][0]
76
+ for name, agent_id in [(agents[agent_id].agent_state.name, agent_id) for agent_id in agent_id_options]:
77
+ if name.lower() in assistant_message.content.lower():
78
+ speaker_id = agent_id
79
+
80
+ # sum usage
81
+ total_usage.prompt_tokens += usage_stats.prompt_tokens
82
+ total_usage.completion_tokens += usage_stats.completion_tokens
83
+ total_usage.total_tokens += usage_stats.total_tokens
84
+ step_count += 1
85
+
86
+ # initialize input messages
87
+ for message in chat_history[message_index[speaker_id] :]:
88
+ message.id = Message.generate_id()
89
+ message.agent_id = speaker_id
90
+
91
+ for message in new_messages:
92
+ chat_history.append(
93
+ Message(
94
+ agent_id=speaker_id,
95
+ role=message.role,
96
+ content=[TextContent(text=message.content)],
97
+ name=message.name,
98
+ model=None,
99
+ tool_calls=None,
100
+ tool_call_id=None,
101
+ group_id=self.group_id,
102
+ )
103
+ )
104
+
105
+ # load agent and perform step
106
+ participant_agent = agents[speaker_id]
107
+ usage_stats = participant_agent.step(
108
+ messages=chat_history[message_index[speaker_id] :],
109
+ chaining=chaining,
110
+ max_chaining_steps=max_chaining_steps,
111
+ stream=token_streaming,
112
+ skip_verify=True,
113
+ metadata=metadata,
114
+ put_inner_thoughts_first=put_inner_thoughts_first,
115
+ )
116
+
117
+ # parse new messages for next step
118
+ responses = Message.to_letta_messages_from_list(
119
+ participant_agent.last_response_messages,
120
+ )
121
+
122
+ assistant_messages = [response for response in responses if response.message_type == "assistant_message"]
123
+ new_messages = [
124
+ MessageCreate(
125
+ role="system",
126
+ content=message.content,
127
+ name=participant_agent.agent_state.name,
128
+ )
129
+ for message in assistant_messages
130
+ ]
131
+ message_index[agent_id] = len(chat_history) + len(new_messages)
132
+
133
+ # sum usage
134
+ total_usage.prompt_tokens += usage_stats.prompt_tokens
135
+ total_usage.completion_tokens += usage_stats.completion_tokens
136
+ total_usage.total_tokens += usage_stats.total_tokens
137
+ step_count += 1
138
+
139
+ # check for termination token
140
+ if any(self.termination_token in message.content for message in new_messages):
141
+ break
142
+
143
+ # persist remaining chat history
144
+ for message in new_messages:
145
+ chat_history.append(
146
+ Message(
147
+ agent_id=agent_id,
148
+ role=message.role,
149
+ content=[TextContent(text=message.content)],
150
+ name=message.name,
151
+ model=None,
152
+ tool_calls=None,
153
+ tool_call_id=None,
154
+ group_id=self.group_id,
155
+ )
156
+ )
157
+ for agent_id, index in message_index.items():
158
+ if agent_id == speaker_id:
159
+ continue
160
+ for message in chat_history[index:]:
161
+ message.id = Message.generate_id()
162
+ message.agent_id = agent_id
163
+ self.message_manager.create_many_messages(chat_history[index:], actor=self.user)
164
+
165
+ except Exception as e:
166
+ raise e
167
+ finally:
168
+ self.interface.step_yield()
169
+
170
+ self.interface.step_complete()
171
+
172
+ return LettaUsageStatistics(**total_usage.model_dump(), step_count=step_count)
173
+
174
+ def load_manager_agent(self) -> Agent:
175
+ for participant_agent_id in self.agent_ids:
176
+ participant_agent_state = self.agent_manager.get_agent_by_id(agent_id=participant_agent_id, actor=self.user)
177
+ participant_persona_block = participant_agent_state.memory.get_block(label="persona")
178
+ new_block = self.block_manager.create_or_update_block(
179
+ block=Block(
180
+ label=participant_agent_id,
181
+ value=participant_persona_block.value,
182
+ ),
183
+ actor=self.user,
184
+ )
185
+ self.agent_state = self.agent_manager.update_block_with_label(
186
+ agent_id=self.agent_state.id,
187
+ block_label=participant_agent_id,
188
+ new_block_id=new_block.id,
189
+ actor=self.user,
190
+ )
191
+
192
+ persona_block = self.agent_state.memory.get_block(label="persona")
193
+ group_chat_manager_persona = (
194
+ f"You are overseeing a group chat with {len(self.agent_ids) - 1} agents and "
195
+ f"one user. Description of the group: {self.description}\n"
196
+ "On each turn, you will be provided with the chat history and latest message. "
197
+ "Your task is to decide which participant should speak next in the chat based "
198
+ "on the chat history. Each agent has a memory block labeled with their ID which "
199
+ "holds info about them, and you should use this context to inform your decision."
200
+ )
201
+ self.agent_state.memory.update_block_value(label="persona", value=persona_block.value + group_chat_manager_persona)
202
+ return Agent(
203
+ agent_state=self.agent_state,
204
+ interface=self.interface,
205
+ user=self.user,
206
+ save_last_response=True,
207
+ )
208
+
209
+ def load_participant_agent(self, agent_id: str) -> Agent:
210
+ agent_state = self.agent_manager.get_agent_by_id(agent_id=agent_id, actor=self.user)
211
+ persona_block = agent_state.memory.get_block(label="persona")
212
+ group_chat_participant_persona = (
213
+ f"You are a participant in a group chat with {len(self.agent_ids) - 1} other "
214
+ "agents and one user. Respond to new messages in the group chat when prompted. "
215
+ f"Description of the group: {self.description}. About you: "
216
+ )
217
+ agent_state.memory.update_block_value(label="persona", value=group_chat_participant_persona + persona_block.value)
218
+ return Agent(
219
+ agent_state=agent_state,
220
+ interface=self.interface,
221
+ user=self.user,
222
+ save_last_response=True,
223
+ )
224
+
225
+ '''
226
+ def attach_choose_next_participant_tool(self) -> AgentState:
227
+ def choose_next_participant(next_speaker_agent_id: str) -> str:
228
+ """
229
+ Returns ID of the agent in the group chat that should reply to the latest message in the conversation. The agent ID will always be in the format: `agent-{UUID}`.
230
+ Args:
231
+ next_speaker_agent_id (str): The ID of the agent that is most suitable to be the next speaker.
232
+ Returns:
233
+ str: The ID of the agent that should be the next speaker.
234
+ """
235
+ return next_speaker_agent_id
236
+ source_code = parse_source_code(choose_next_participant)
237
+ tool = self.tool_manager.create_or_update_tool(
238
+ Tool(
239
+ source_type="python",
240
+ source_code=source_code,
241
+ name="choose_next_participant",
242
+ ),
243
+ actor=self.user,
244
+ )
245
+ return self.agent_manager.attach_tool(agent_id=self.agent_state.id, tool_id=tool.id, actor=self.user)
246
+ '''
247
+
248
+ def ask_manager_to_choose_participant_message(
249
+ self,
250
+ new_messages: List[MessageCreate],
251
+ chat_history: List[Message],
252
+ agent_id_options: List[str],
253
+ ) -> Message:
254
+ chat_history = [f"{message.name or 'user'}: {message.content[0].text}" for message in chat_history]
255
+ for message in new_messages:
256
+ chat_history.append(f"{message.name or 'user'}: {message.content}")
257
+ context_messages = "\n".join(chat_history)
258
+
259
+ message_text = (
260
+ "Choose the most suitable agent to reply to the latest message in the "
261
+ f"group chat from the following options: {agent_id_options}. Do not "
262
+ "respond to the messages yourself, your task is only to decide the "
263
+ f"next speaker, not to participate. \nChat history:\n{context_messages}"
264
+ )
265
+ return Message(
266
+ agent_id=self.agent_state.id,
267
+ role="user",
268
+ content=[TextContent(text=message_text)],
269
+ name=None,
270
+ model=None,
271
+ tool_calls=None,
272
+ tool_call_id=None,
273
+ group_id=self.group_id,
274
+ )
@@ -77,6 +77,7 @@ def archival_memory_insert(self: "Agent", content: str) -> Optional[str]:
77
77
  text=content,
78
78
  actor=self.user,
79
79
  )
80
+ self.agent_manager.rebuild_system_prompt(agent_id=self.agent_state.id, actor=self.user, force=True)
80
81
  return None
81
82
 
82
83
 
@@ -7,7 +7,8 @@ import requests
7
7
  from letta.constants import MESSAGE_CHATGPT_FUNCTION_MODEL, MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE
8
8
  from letta.helpers.json_helpers import json_dumps, json_loads
9
9
  from letta.llm_api.llm_api_tools import create
10
- from letta.schemas.message import Message, TextContent
10
+ from letta.schemas.letta_message_content import TextContent
11
+ from letta.schemas.message import Message
11
12
 
12
13
 
13
14
  def message_chatgpt(self, message: str):
@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, List
3
3
 
4
4
  from letta.functions.helpers import (
5
5
  _send_message_to_agents_matching_tags_async,
6
+ _send_message_to_all_agents_in_group_async,
6
7
  execute_send_message_to_agent,
7
8
  fire_and_forget_send_to_agent,
8
9
  )
@@ -86,3 +87,19 @@ def send_message_to_agents_matching_tags(self: "Agent", message: str, match_all:
86
87
  """
87
88
 
88
89
  return asyncio.run(_send_message_to_agents_matching_tags_async(self, message, match_all, match_some))
90
+
91
+
92
+ def send_message_to_all_agents_in_group(self: "Agent", message: str) -> List[str]:
93
+ """
94
+ Sends a message to all agents within the same multi-agent group.
95
+
96
+ Args:
97
+ message (str): The content of the message to be sent to each matching agent.
98
+
99
+ Returns:
100
+ List[str]: A list of responses from the agents that matched the filtering criteria. Each
101
+ response corresponds to a single agent. Agents that do not respond will not have an entry
102
+ in the returned list.
103
+ """
104
+
105
+ return asyncio.run(_send_message_to_all_agents_in_group_async(self, message))
@@ -604,6 +604,47 @@ async def _send_message_to_agents_matching_tags_async(
604
604
  return final
605
605
 
606
606
 
607
+ async def _send_message_to_all_agents_in_group_async(sender_agent: "Agent", message: str) -> List[str]:
608
+ server = get_letta_server()
609
+
610
+ augmented_message = (
611
+ f"[Incoming message from agent with ID '{sender_agent.agent_state.id}' - to reply to this message, "
612
+ f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] "
613
+ f"{message}"
614
+ )
615
+
616
+ worker_agents_ids = sender_agent.agent_state.multi_agent_group.agent_ids
617
+ worker_agents = [server.agent_manager.get_agent_by_id(agent_id=agent_id, actor=sender_agent.user) for agent_id in worker_agents_ids]
618
+
619
+ # Create a system message
620
+ messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=sender_agent.agent_state.name)]
621
+
622
+ # Possibly limit concurrency to avoid meltdown:
623
+ sem = asyncio.Semaphore(settings.multi_agent_concurrent_sends)
624
+
625
+ async def _send_single(agent_state):
626
+ async with sem:
627
+ return await async_send_message_with_retries(
628
+ server=server,
629
+ sender_agent=sender_agent,
630
+ target_agent_id=agent_state.id,
631
+ messages=messages,
632
+ max_retries=3,
633
+ timeout=settings.multi_agent_send_message_timeout,
634
+ )
635
+
636
+ tasks = [asyncio.create_task(_send_single(agent_state)) for agent_state in worker_agents]
637
+ results = await asyncio.gather(*tasks, return_exceptions=True)
638
+ final = []
639
+ for r in results:
640
+ if isinstance(r, Exception):
641
+ final.append(str(r))
642
+ else:
643
+ final.append(r)
644
+
645
+ return final
646
+
647
+
607
648
  def generate_model_from_args_json_schema(schema: Dict[str, Any]) -> Type[BaseModel]:
608
649
  """Creates a Pydantic model from a JSON schema.
609
650
 
File without changes
@@ -0,0 +1,61 @@
1
+ import asyncio
2
+ from typing import List, Optional, Tuple
3
+
4
+ from mcp import ClientSession, Tool
5
+
6
+ from letta.functions.mcp_client.types import BaseServerConfig
7
+ from letta.log import get_logger
8
+
9
+ logger = get_logger(__name__)
10
+
11
+
12
+ class BaseMCPClient:
13
+ def __init__(self):
14
+ self.session: Optional[ClientSession] = None
15
+ self.stdio = None
16
+ self.write = None
17
+ self.initialized = False
18
+ self.loop = asyncio.new_event_loop()
19
+ self.cleanup_funcs = []
20
+
21
+ def connect_to_server(self, server_config: BaseServerConfig):
22
+ asyncio.set_event_loop(self.loop)
23
+ success = self._initialize_connection(server_config)
24
+
25
+ if success:
26
+ self.loop.run_until_complete(self.session.initialize())
27
+ self.initialized = True
28
+ else:
29
+ raise RuntimeError(
30
+ f"Connecting to MCP server failed. Please review your server config: {server_config.model_dump_json(indent=4)}"
31
+ )
32
+
33
+ def _initialize_connection(self, server_config: BaseServerConfig) -> bool:
34
+ raise NotImplementedError("Subclasses must implement _initialize_connection")
35
+
36
+ def list_tools(self) -> List[Tool]:
37
+ self._check_initialized()
38
+ response = self.loop.run_until_complete(self.session.list_tools())
39
+ return response.tools
40
+
41
+ def execute_tool(self, tool_name: str, tool_args: dict) -> Tuple[str, bool]:
42
+ self._check_initialized()
43
+ result = self.loop.run_until_complete(self.session.call_tool(tool_name, tool_args))
44
+ return str(result.content), result.isError
45
+
46
+ def _check_initialized(self):
47
+ if not self.initialized:
48
+ logger.error("MCPClient has not been initialized")
49
+ raise RuntimeError("MCPClient has not been initialized")
50
+
51
+ def cleanup(self):
52
+ try:
53
+ for cleanup_func in self.cleanup_funcs:
54
+ cleanup_func()
55
+ self.initialized = False
56
+ if not self.loop.is_closed():
57
+ self.loop.close()
58
+ except Exception as e:
59
+ logger.warning(e)
60
+ finally:
61
+ logger.info("Cleaned up MCP clients on shutdown.")
@@ -0,0 +1,21 @@
1
+ from mcp import ClientSession
2
+ from mcp.client.sse import sse_client
3
+
4
+ from letta.functions.mcp_client.base_client import BaseMCPClient
5
+ from letta.functions.mcp_client.types import SSEServerConfig
6
+
7
+ # see: https://modelcontextprotocol.io/quickstart/user
8
+ MCP_CONFIG_TOPLEVEL_KEY = "mcpServers"
9
+
10
+
11
+ class SSEMCPClient(BaseMCPClient):
12
+ def _initialize_connection(self, server_config: SSEServerConfig) -> bool:
13
+ sse_cm = sse_client(url=server_config.server_url)
14
+ sse_transport = self.loop.run_until_complete(sse_cm.__aenter__())
15
+ self.stdio, self.write = sse_transport
16
+ self.cleanup_funcs.append(lambda: self.loop.run_until_complete(sse_cm.__aexit__(None, None, None)))
17
+
18
+ session_cm = ClientSession(self.stdio, self.write)
19
+ self.session = self.loop.run_until_complete(session_cm.__aenter__())
20
+ self.cleanup_funcs.append(lambda: self.loop.run_until_complete(session_cm.__aexit__(None, None, None)))
21
+ return True