letta-nightly 0.6.39.dev20250314104053__py3-none-any.whl → 0.6.40.dev20250314173529__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/agent.py +13 -3
- letta/agents/ephemeral_agent.py +2 -1
- letta/agents/low_latency_agent.py +8 -0
- letta/dynamic_multi_agent.py +274 -0
- letta/functions/function_sets/base.py +1 -0
- letta/functions/function_sets/extras.py +2 -1
- letta/functions/function_sets/multi_agent.py +17 -0
- letta/functions/helpers.py +41 -0
- letta/helpers/converters.py +67 -0
- letta/helpers/mcp_helpers.py +26 -5
- letta/llm_api/openai.py +1 -1
- letta/memory.py +2 -1
- letta/orm/__init__.py +2 -0
- letta/orm/agent.py +69 -20
- letta/orm/custom_columns.py +15 -0
- letta/orm/group.py +33 -0
- letta/orm/groups_agents.py +13 -0
- letta/orm/message.py +7 -4
- letta/orm/organization.py +1 -0
- letta/orm/sqlalchemy_base.py +3 -3
- letta/round_robin_multi_agent.py +152 -0
- letta/schemas/agent.py +3 -0
- letta/schemas/enums.py +0 -4
- letta/schemas/group.py +65 -0
- letta/schemas/letta_message.py +167 -106
- letta/schemas/letta_message_content.py +192 -0
- letta/schemas/message.py +28 -36
- letta/serialize_schemas/__init__.py +1 -1
- letta/serialize_schemas/marshmallow_agent.py +108 -0
- letta/serialize_schemas/{agent_environment_variable.py → marshmallow_agent_environment_variable.py} +1 -1
- letta/serialize_schemas/marshmallow_base.py +52 -0
- letta/serialize_schemas/{block.py → marshmallow_block.py} +1 -1
- letta/serialize_schemas/{custom_fields.py → marshmallow_custom_fields.py} +12 -0
- letta/serialize_schemas/marshmallow_message.py +42 -0
- letta/serialize_schemas/{tag.py → marshmallow_tag.py} +12 -2
- letta/serialize_schemas/{tool.py → marshmallow_tool.py} +1 -1
- letta/serialize_schemas/pydantic_agent_schema.py +111 -0
- letta/server/rest_api/app.py +15 -0
- letta/server/rest_api/routers/v1/__init__.py +2 -0
- letta/server/rest_api/routers/v1/agents.py +46 -40
- letta/server/rest_api/routers/v1/groups.py +233 -0
- letta/server/rest_api/routers/v1/tools.py +31 -3
- letta/server/rest_api/utils.py +1 -1
- letta/server/server.py +267 -12
- letta/services/agent_manager.py +65 -28
- letta/services/group_manager.py +147 -0
- letta/services/helpers/agent_manager_helper.py +151 -1
- letta/services/message_manager.py +11 -3
- letta/services/passage_manager.py +15 -0
- letta/settings.py +5 -0
- letta/supervisor_multi_agent.py +103 -0
- {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314173529.dist-info}/METADATA +1 -2
- {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314173529.dist-info}/RECORD +56 -46
- letta/serialize_schemas/agent.py +0 -80
- letta/serialize_schemas/base.py +0 -64
- letta/serialize_schemas/message.py +0 -29
- {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314173529.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314173529.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.39.dev20250314104053.dist-info → letta_nightly-0.6.40.dev20250314173529.dist-info}/entry_points.txt +0 -0
letta/agent.py
CHANGED
|
@@ -39,7 +39,8 @@ from letta.orm.enums import ToolType
|
|
|
39
39
|
from letta.schemas.agent import AgentState, AgentStepResponse, UpdateAgent
|
|
40
40
|
from letta.schemas.block import BlockUpdate
|
|
41
41
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
42
|
-
from letta.schemas.enums import
|
|
42
|
+
from letta.schemas.enums import MessageRole
|
|
43
|
+
from letta.schemas.letta_message_content import TextContent
|
|
43
44
|
from letta.schemas.memory import ContextWindowOverview, Memory
|
|
44
45
|
from letta.schemas.message import Message, ToolReturn
|
|
45
46
|
from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
|
|
@@ -95,6 +96,7 @@ class Agent(BaseAgent):
|
|
|
95
96
|
first_message_verify_mono: bool = True, # TODO move to config?
|
|
96
97
|
# MCP sessions, state held in-memory in the server
|
|
97
98
|
mcp_clients: Optional[Dict[str, BaseMCPClient]] = None,
|
|
99
|
+
save_last_response: bool = False,
|
|
98
100
|
):
|
|
99
101
|
assert isinstance(agent_state.memory, Memory), f"Memory object is not of type Memory: {type(agent_state.memory)}"
|
|
100
102
|
# Hold a copy of the state that was used to init the agent
|
|
@@ -149,6 +151,10 @@ class Agent(BaseAgent):
|
|
|
149
151
|
# Load last function response from message history
|
|
150
152
|
self.last_function_response = self.load_last_function_response()
|
|
151
153
|
|
|
154
|
+
# Save last responses in memory
|
|
155
|
+
self.save_last_response = save_last_response
|
|
156
|
+
self.last_response_messages = []
|
|
157
|
+
|
|
152
158
|
# Logger that the Agent specifically can use, will also report the agent_state ID with the logs
|
|
153
159
|
self.logger = get_logger(agent_state.id)
|
|
154
160
|
|
|
@@ -160,7 +166,7 @@ class Agent(BaseAgent):
|
|
|
160
166
|
in_context_messages = self.agent_manager.get_in_context_messages(agent_id=self.agent_state.id, actor=self.user)
|
|
161
167
|
for i in range(len(in_context_messages) - 1, -1, -1):
|
|
162
168
|
msg = in_context_messages[i]
|
|
163
|
-
if msg.role == MessageRole.tool and msg.content and len(msg.content) == 1 and msg.content[0]
|
|
169
|
+
if msg.role == MessageRole.tool and msg.content and len(msg.content) == 1 and isinstance(msg.content[0], TextContent):
|
|
164
170
|
text_content = msg.content[0].text
|
|
165
171
|
try:
|
|
166
172
|
response_json = json.loads(text_content)
|
|
@@ -926,6 +932,9 @@ class Agent(BaseAgent):
|
|
|
926
932
|
else:
|
|
927
933
|
all_new_messages = all_response_messages
|
|
928
934
|
|
|
935
|
+
if self.save_last_response:
|
|
936
|
+
self.last_response_messages = all_response_messages
|
|
937
|
+
|
|
929
938
|
# Check the memory pressure and potentially issue a memory pressure warning
|
|
930
939
|
current_total_tokens = response.usage.total_tokens
|
|
931
940
|
active_memory_warning = False
|
|
@@ -1052,6 +1061,7 @@ class Agent(BaseAgent):
|
|
|
1052
1061
|
|
|
1053
1062
|
else:
|
|
1054
1063
|
logger.error(f"step() failed with an unrecognized exception: '{str(e)}'")
|
|
1064
|
+
traceback.print_exc()
|
|
1055
1065
|
raise e
|
|
1056
1066
|
|
|
1057
1067
|
def step_user_message(self, user_message_str: str, **kwargs) -> AgentStepResponse:
|
|
@@ -1201,7 +1211,7 @@ class Agent(BaseAgent):
|
|
|
1201
1211
|
and in_context_messages[1].role == MessageRole.user
|
|
1202
1212
|
and in_context_messages[1].content
|
|
1203
1213
|
and len(in_context_messages[1].content) == 1
|
|
1204
|
-
and in_context_messages[1].content[0]
|
|
1214
|
+
and isinstance(in_context_messages[1].content[0], TextContent)
|
|
1205
1215
|
# TODO remove hardcoding
|
|
1206
1216
|
and "The following is a summary of the previous " in in_context_messages[1].content[0].text
|
|
1207
1217
|
):
|
letta/agents/ephemeral_agent.py
CHANGED
|
@@ -5,7 +5,8 @@ import openai
|
|
|
5
5
|
from letta.agents.base_agent import BaseAgent
|
|
6
6
|
from letta.schemas.agent import AgentState
|
|
7
7
|
from letta.schemas.enums import MessageRole
|
|
8
|
-
from letta.schemas.letta_message import
|
|
8
|
+
from letta.schemas.letta_message import UserMessage
|
|
9
|
+
from letta.schemas.letta_message_content import TextContent
|
|
9
10
|
from letta.schemas.message import Message
|
|
10
11
|
from letta.schemas.openai.chat_completion_request import ChatCompletionRequest
|
|
11
12
|
from letta.schemas.user import User
|
|
@@ -40,6 +40,7 @@ from letta.services.agent_manager import AgentManager
|
|
|
40
40
|
from letta.services.block_manager import BlockManager
|
|
41
41
|
from letta.services.helpers.agent_manager_helper import compile_system_message
|
|
42
42
|
from letta.services.message_manager import MessageManager
|
|
43
|
+
from letta.services.passage_manager import PassageManager
|
|
43
44
|
from letta.services.summarizer.enums import SummarizationMode
|
|
44
45
|
from letta.services.summarizer.summarizer import Summarizer
|
|
45
46
|
from letta.utils import united_diff
|
|
@@ -75,6 +76,7 @@ class LowLatencyAgent(BaseAgent):
|
|
|
75
76
|
# TODO: Make this more general, factorable
|
|
76
77
|
# Summarizer settings
|
|
77
78
|
self.block_manager = block_manager
|
|
79
|
+
self.passage_manager = PassageManager() # TODO: pass this in
|
|
78
80
|
# TODO: This is not guaranteed to exist!
|
|
79
81
|
self.summary_block_label = "human"
|
|
80
82
|
self.summarizer = Summarizer(
|
|
@@ -246,10 +248,16 @@ class LowLatencyAgent(BaseAgent):
|
|
|
246
248
|
return in_context_messages
|
|
247
249
|
|
|
248
250
|
memory_edit_timestamp = get_utc_time()
|
|
251
|
+
|
|
252
|
+
num_messages = self.message_manager.size(actor=actor, agent_id=agent_id)
|
|
253
|
+
num_archival_memories = self.passage_manager.size(actor=actor, agent_id=agent_id)
|
|
254
|
+
|
|
249
255
|
new_system_message_str = compile_system_message(
|
|
250
256
|
system_prompt=agent_state.system,
|
|
251
257
|
in_context_memory=agent_state.memory,
|
|
252
258
|
in_context_memory_last_edit=memory_edit_timestamp,
|
|
259
|
+
previous_message_count=num_messages,
|
|
260
|
+
archival_memory_size=num_archival_memories,
|
|
253
261
|
)
|
|
254
262
|
|
|
255
263
|
diff = united_diff(curr_system_message_text, new_system_message_str)
|
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
from typing import List, Optional
|
|
2
|
+
|
|
3
|
+
from letta.agent import Agent, AgentState
|
|
4
|
+
from letta.interface import AgentInterface
|
|
5
|
+
from letta.orm import User
|
|
6
|
+
from letta.schemas.block import Block
|
|
7
|
+
from letta.schemas.letta_message_content import TextContent
|
|
8
|
+
from letta.schemas.message import Message, MessageCreate
|
|
9
|
+
from letta.schemas.openai.chat_completion_response import UsageStatistics
|
|
10
|
+
from letta.schemas.usage import LettaUsageStatistics
|
|
11
|
+
from letta.services.tool_manager import ToolManager
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DynamicMultiAgent(Agent):
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
interface: AgentInterface,
|
|
18
|
+
agent_state: AgentState,
|
|
19
|
+
user: User = None,
|
|
20
|
+
# custom
|
|
21
|
+
group_id: str = "",
|
|
22
|
+
agent_ids: List[str] = [],
|
|
23
|
+
description: str = "",
|
|
24
|
+
max_turns: Optional[int] = None,
|
|
25
|
+
termination_token: str = "DONE!",
|
|
26
|
+
):
|
|
27
|
+
super().__init__(interface, agent_state, user)
|
|
28
|
+
self.group_id = group_id
|
|
29
|
+
self.agent_ids = agent_ids
|
|
30
|
+
self.description = description
|
|
31
|
+
self.max_turns = max_turns or len(agent_ids)
|
|
32
|
+
self.termination_token = termination_token
|
|
33
|
+
|
|
34
|
+
self.tool_manager = ToolManager()
|
|
35
|
+
|
|
36
|
+
def step(
|
|
37
|
+
self,
|
|
38
|
+
messages: List[MessageCreate],
|
|
39
|
+
chaining: bool = True,
|
|
40
|
+
max_chaining_steps: Optional[int] = None,
|
|
41
|
+
put_inner_thoughts_first: bool = True,
|
|
42
|
+
**kwargs,
|
|
43
|
+
) -> LettaUsageStatistics:
|
|
44
|
+
total_usage = UsageStatistics()
|
|
45
|
+
step_count = 0
|
|
46
|
+
|
|
47
|
+
token_streaming = self.interface.streaming_mode if hasattr(self.interface, "streaming_mode") else False
|
|
48
|
+
metadata = self.interface.metadata if hasattr(self.interface, "metadata") else None
|
|
49
|
+
|
|
50
|
+
agents = {}
|
|
51
|
+
message_index = {self.agent_state.id: 0}
|
|
52
|
+
agents[self.agent_state.id] = self.load_manager_agent()
|
|
53
|
+
for agent_id in self.agent_ids:
|
|
54
|
+
agents[agent_id] = self.load_participant_agent(agent_id=agent_id)
|
|
55
|
+
message_index[agent_id] = 0
|
|
56
|
+
|
|
57
|
+
chat_history: List[Message] = []
|
|
58
|
+
new_messages = messages
|
|
59
|
+
speaker_id = None
|
|
60
|
+
try:
|
|
61
|
+
for _ in range(self.max_turns):
|
|
62
|
+
agent_id_options = [agent_id for agent_id in self.agent_ids if agent_id != speaker_id]
|
|
63
|
+
manager_message = self.ask_manager_to_choose_participant_message(new_messages, chat_history, agent_id_options)
|
|
64
|
+
manager_agent = agents[self.agent_state.id]
|
|
65
|
+
usage_stats = manager_agent.step(
|
|
66
|
+
messages=[manager_message],
|
|
67
|
+
chaining=chaining,
|
|
68
|
+
max_chaining_steps=max_chaining_steps,
|
|
69
|
+
stream=token_streaming,
|
|
70
|
+
skip_verify=True,
|
|
71
|
+
metadata=metadata,
|
|
72
|
+
put_inner_thoughts_first=put_inner_thoughts_first,
|
|
73
|
+
)
|
|
74
|
+
responses = Message.to_letta_messages_from_list(manager_agent.last_response_messages)
|
|
75
|
+
assistant_message = [response for response in responses if response.message_type == "assistant_message"][0]
|
|
76
|
+
for name, agent_id in [(agents[agent_id].agent_state.name, agent_id) for agent_id in agent_id_options]:
|
|
77
|
+
if name.lower() in assistant_message.content.lower():
|
|
78
|
+
speaker_id = agent_id
|
|
79
|
+
|
|
80
|
+
# sum usage
|
|
81
|
+
total_usage.prompt_tokens += usage_stats.prompt_tokens
|
|
82
|
+
total_usage.completion_tokens += usage_stats.completion_tokens
|
|
83
|
+
total_usage.total_tokens += usage_stats.total_tokens
|
|
84
|
+
step_count += 1
|
|
85
|
+
|
|
86
|
+
# initialize input messages
|
|
87
|
+
for message in chat_history[message_index[speaker_id] :]:
|
|
88
|
+
message.id = Message.generate_id()
|
|
89
|
+
message.agent_id = speaker_id
|
|
90
|
+
|
|
91
|
+
for message in new_messages:
|
|
92
|
+
chat_history.append(
|
|
93
|
+
Message(
|
|
94
|
+
agent_id=speaker_id,
|
|
95
|
+
role=message.role,
|
|
96
|
+
content=[TextContent(text=message.content)],
|
|
97
|
+
name=message.name,
|
|
98
|
+
model=None,
|
|
99
|
+
tool_calls=None,
|
|
100
|
+
tool_call_id=None,
|
|
101
|
+
group_id=self.group_id,
|
|
102
|
+
)
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# load agent and perform step
|
|
106
|
+
participant_agent = agents[speaker_id]
|
|
107
|
+
usage_stats = participant_agent.step(
|
|
108
|
+
messages=chat_history[message_index[speaker_id] :],
|
|
109
|
+
chaining=chaining,
|
|
110
|
+
max_chaining_steps=max_chaining_steps,
|
|
111
|
+
stream=token_streaming,
|
|
112
|
+
skip_verify=True,
|
|
113
|
+
metadata=metadata,
|
|
114
|
+
put_inner_thoughts_first=put_inner_thoughts_first,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# parse new messages for next step
|
|
118
|
+
responses = Message.to_letta_messages_from_list(
|
|
119
|
+
participant_agent.last_response_messages,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
assistant_messages = [response for response in responses if response.message_type == "assistant_message"]
|
|
123
|
+
new_messages = [
|
|
124
|
+
MessageCreate(
|
|
125
|
+
role="system",
|
|
126
|
+
content=message.content,
|
|
127
|
+
name=participant_agent.agent_state.name,
|
|
128
|
+
)
|
|
129
|
+
for message in assistant_messages
|
|
130
|
+
]
|
|
131
|
+
message_index[agent_id] = len(chat_history) + len(new_messages)
|
|
132
|
+
|
|
133
|
+
# sum usage
|
|
134
|
+
total_usage.prompt_tokens += usage_stats.prompt_tokens
|
|
135
|
+
total_usage.completion_tokens += usage_stats.completion_tokens
|
|
136
|
+
total_usage.total_tokens += usage_stats.total_tokens
|
|
137
|
+
step_count += 1
|
|
138
|
+
|
|
139
|
+
# check for termination token
|
|
140
|
+
if any(self.termination_token in message.content for message in new_messages):
|
|
141
|
+
break
|
|
142
|
+
|
|
143
|
+
# persist remaining chat history
|
|
144
|
+
for message in new_messages:
|
|
145
|
+
chat_history.append(
|
|
146
|
+
Message(
|
|
147
|
+
agent_id=agent_id,
|
|
148
|
+
role=message.role,
|
|
149
|
+
content=[TextContent(text=message.content)],
|
|
150
|
+
name=message.name,
|
|
151
|
+
model=None,
|
|
152
|
+
tool_calls=None,
|
|
153
|
+
tool_call_id=None,
|
|
154
|
+
group_id=self.group_id,
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
for agent_id, index in message_index.items():
|
|
158
|
+
if agent_id == speaker_id:
|
|
159
|
+
continue
|
|
160
|
+
for message in chat_history[index:]:
|
|
161
|
+
message.id = Message.generate_id()
|
|
162
|
+
message.agent_id = agent_id
|
|
163
|
+
self.message_manager.create_many_messages(chat_history[index:], actor=self.user)
|
|
164
|
+
|
|
165
|
+
except Exception as e:
|
|
166
|
+
raise e
|
|
167
|
+
finally:
|
|
168
|
+
self.interface.step_yield()
|
|
169
|
+
|
|
170
|
+
self.interface.step_complete()
|
|
171
|
+
|
|
172
|
+
return LettaUsageStatistics(**total_usage.model_dump(), step_count=step_count)
|
|
173
|
+
|
|
174
|
+
def load_manager_agent(self) -> Agent:
|
|
175
|
+
for participant_agent_id in self.agent_ids:
|
|
176
|
+
participant_agent_state = self.agent_manager.get_agent_by_id(agent_id=participant_agent_id, actor=self.user)
|
|
177
|
+
participant_persona_block = participant_agent_state.memory.get_block(label="persona")
|
|
178
|
+
new_block = self.block_manager.create_or_update_block(
|
|
179
|
+
block=Block(
|
|
180
|
+
label=participant_agent_id,
|
|
181
|
+
value=participant_persona_block.value,
|
|
182
|
+
),
|
|
183
|
+
actor=self.user,
|
|
184
|
+
)
|
|
185
|
+
self.agent_state = self.agent_manager.update_block_with_label(
|
|
186
|
+
agent_id=self.agent_state.id,
|
|
187
|
+
block_label=participant_agent_id,
|
|
188
|
+
new_block_id=new_block.id,
|
|
189
|
+
actor=self.user,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
persona_block = self.agent_state.memory.get_block(label="persona")
|
|
193
|
+
group_chat_manager_persona = (
|
|
194
|
+
f"You are overseeing a group chat with {len(self.agent_ids) - 1} agents and "
|
|
195
|
+
f"one user. Description of the group: {self.description}\n"
|
|
196
|
+
"On each turn, you will be provided with the chat history and latest message. "
|
|
197
|
+
"Your task is to decide which participant should speak next in the chat based "
|
|
198
|
+
"on the chat history. Each agent has a memory block labeled with their ID which "
|
|
199
|
+
"holds info about them, and you should use this context to inform your decision."
|
|
200
|
+
)
|
|
201
|
+
self.agent_state.memory.update_block_value(label="persona", value=persona_block.value + group_chat_manager_persona)
|
|
202
|
+
return Agent(
|
|
203
|
+
agent_state=self.agent_state,
|
|
204
|
+
interface=self.interface,
|
|
205
|
+
user=self.user,
|
|
206
|
+
save_last_response=True,
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
def load_participant_agent(self, agent_id: str) -> Agent:
|
|
210
|
+
agent_state = self.agent_manager.get_agent_by_id(agent_id=agent_id, actor=self.user)
|
|
211
|
+
persona_block = agent_state.memory.get_block(label="persona")
|
|
212
|
+
group_chat_participant_persona = (
|
|
213
|
+
f"You are a participant in a group chat with {len(self.agent_ids) - 1} other "
|
|
214
|
+
"agents and one user. Respond to new messages in the group chat when prompted. "
|
|
215
|
+
f"Description of the group: {self.description}. About you: "
|
|
216
|
+
)
|
|
217
|
+
agent_state.memory.update_block_value(label="persona", value=group_chat_participant_persona + persona_block.value)
|
|
218
|
+
return Agent(
|
|
219
|
+
agent_state=agent_state,
|
|
220
|
+
interface=self.interface,
|
|
221
|
+
user=self.user,
|
|
222
|
+
save_last_response=True,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
'''
|
|
226
|
+
def attach_choose_next_participant_tool(self) -> AgentState:
|
|
227
|
+
def choose_next_participant(next_speaker_agent_id: str) -> str:
|
|
228
|
+
"""
|
|
229
|
+
Returns ID of the agent in the group chat that should reply to the latest message in the conversation. The agent ID will always be in the format: `agent-{UUID}`.
|
|
230
|
+
Args:
|
|
231
|
+
next_speaker_agent_id (str): The ID of the agent that is most suitable to be the next speaker.
|
|
232
|
+
Returns:
|
|
233
|
+
str: The ID of the agent that should be the next speaker.
|
|
234
|
+
"""
|
|
235
|
+
return next_speaker_agent_id
|
|
236
|
+
source_code = parse_source_code(choose_next_participant)
|
|
237
|
+
tool = self.tool_manager.create_or_update_tool(
|
|
238
|
+
Tool(
|
|
239
|
+
source_type="python",
|
|
240
|
+
source_code=source_code,
|
|
241
|
+
name="choose_next_participant",
|
|
242
|
+
),
|
|
243
|
+
actor=self.user,
|
|
244
|
+
)
|
|
245
|
+
return self.agent_manager.attach_tool(agent_id=self.agent_state.id, tool_id=tool.id, actor=self.user)
|
|
246
|
+
'''
|
|
247
|
+
|
|
248
|
+
def ask_manager_to_choose_participant_message(
|
|
249
|
+
self,
|
|
250
|
+
new_messages: List[MessageCreate],
|
|
251
|
+
chat_history: List[Message],
|
|
252
|
+
agent_id_options: List[str],
|
|
253
|
+
) -> Message:
|
|
254
|
+
chat_history = [f"{message.name or 'user'}: {message.content[0].text}" for message in chat_history]
|
|
255
|
+
for message in new_messages:
|
|
256
|
+
chat_history.append(f"{message.name or 'user'}: {message.content}")
|
|
257
|
+
context_messages = "\n".join(chat_history)
|
|
258
|
+
|
|
259
|
+
message_text = (
|
|
260
|
+
"Choose the most suitable agent to reply to the latest message in the "
|
|
261
|
+
f"group chat from the following options: {agent_id_options}. Do not "
|
|
262
|
+
"respond to the messages yourself, your task is only to decide the "
|
|
263
|
+
f"next speaker, not to participate. \nChat history:\n{context_messages}"
|
|
264
|
+
)
|
|
265
|
+
return Message(
|
|
266
|
+
agent_id=self.agent_state.id,
|
|
267
|
+
role="user",
|
|
268
|
+
content=[TextContent(text=message_text)],
|
|
269
|
+
name=None,
|
|
270
|
+
model=None,
|
|
271
|
+
tool_calls=None,
|
|
272
|
+
tool_call_id=None,
|
|
273
|
+
group_id=self.group_id,
|
|
274
|
+
)
|
|
@@ -7,7 +7,8 @@ import requests
|
|
|
7
7
|
from letta.constants import MESSAGE_CHATGPT_FUNCTION_MODEL, MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE
|
|
8
8
|
from letta.helpers.json_helpers import json_dumps, json_loads
|
|
9
9
|
from letta.llm_api.llm_api_tools import create
|
|
10
|
-
from letta.schemas.
|
|
10
|
+
from letta.schemas.letta_message_content import TextContent
|
|
11
|
+
from letta.schemas.message import Message
|
|
11
12
|
|
|
12
13
|
|
|
13
14
|
def message_chatgpt(self, message: str):
|
|
@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, List
|
|
|
3
3
|
|
|
4
4
|
from letta.functions.helpers import (
|
|
5
5
|
_send_message_to_agents_matching_tags_async,
|
|
6
|
+
_send_message_to_all_agents_in_group_async,
|
|
6
7
|
execute_send_message_to_agent,
|
|
7
8
|
fire_and_forget_send_to_agent,
|
|
8
9
|
)
|
|
@@ -86,3 +87,19 @@ def send_message_to_agents_matching_tags(self: "Agent", message: str, match_all:
|
|
|
86
87
|
"""
|
|
87
88
|
|
|
88
89
|
return asyncio.run(_send_message_to_agents_matching_tags_async(self, message, match_all, match_some))
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def send_message_to_all_agents_in_group(self: "Agent", message: str) -> List[str]:
|
|
93
|
+
"""
|
|
94
|
+
Sends a message to all agents within the same multi-agent group.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
message (str): The content of the message to be sent to each matching agent.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
List[str]: A list of responses from the agents that matched the filtering criteria. Each
|
|
101
|
+
response corresponds to a single agent. Agents that do not respond will not have an entry
|
|
102
|
+
in the returned list.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
return asyncio.run(_send_message_to_all_agents_in_group_async(self, message))
|
letta/functions/helpers.py
CHANGED
|
@@ -604,6 +604,47 @@ async def _send_message_to_agents_matching_tags_async(
|
|
|
604
604
|
return final
|
|
605
605
|
|
|
606
606
|
|
|
607
|
+
async def _send_message_to_all_agents_in_group_async(sender_agent: "Agent", message: str) -> List[str]:
|
|
608
|
+
server = get_letta_server()
|
|
609
|
+
|
|
610
|
+
augmented_message = (
|
|
611
|
+
f"[Incoming message from agent with ID '{sender_agent.agent_state.id}' - to reply to this message, "
|
|
612
|
+
f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] "
|
|
613
|
+
f"{message}"
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
worker_agents_ids = sender_agent.agent_state.multi_agent_group.agent_ids
|
|
617
|
+
worker_agents = [server.agent_manager.get_agent_by_id(agent_id=agent_id, actor=sender_agent.user) for agent_id in worker_agents_ids]
|
|
618
|
+
|
|
619
|
+
# Create a system message
|
|
620
|
+
messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=sender_agent.agent_state.name)]
|
|
621
|
+
|
|
622
|
+
# Possibly limit concurrency to avoid meltdown:
|
|
623
|
+
sem = asyncio.Semaphore(settings.multi_agent_concurrent_sends)
|
|
624
|
+
|
|
625
|
+
async def _send_single(agent_state):
|
|
626
|
+
async with sem:
|
|
627
|
+
return await async_send_message_with_retries(
|
|
628
|
+
server=server,
|
|
629
|
+
sender_agent=sender_agent,
|
|
630
|
+
target_agent_id=agent_state.id,
|
|
631
|
+
messages=messages,
|
|
632
|
+
max_retries=3,
|
|
633
|
+
timeout=settings.multi_agent_send_message_timeout,
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
tasks = [asyncio.create_task(_send_single(agent_state)) for agent_state in worker_agents]
|
|
637
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
638
|
+
final = []
|
|
639
|
+
for r in results:
|
|
640
|
+
if isinstance(r, Exception):
|
|
641
|
+
final.append(str(r))
|
|
642
|
+
else:
|
|
643
|
+
final.append(r)
|
|
644
|
+
|
|
645
|
+
return final
|
|
646
|
+
|
|
647
|
+
|
|
607
648
|
def generate_model_from_args_json_schema(schema: Dict[str, Any]) -> Type[BaseModel]:
|
|
608
649
|
"""Creates a Pydantic model from a JSON schema.
|
|
609
650
|
|
letta/helpers/converters.py
CHANGED
|
@@ -8,6 +8,16 @@ from sqlalchemy import Dialect
|
|
|
8
8
|
|
|
9
9
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
10
10
|
from letta.schemas.enums import ToolRuleType
|
|
11
|
+
from letta.schemas.letta_message_content import (
|
|
12
|
+
MessageContent,
|
|
13
|
+
MessageContentType,
|
|
14
|
+
OmittedReasoningContent,
|
|
15
|
+
ReasoningContent,
|
|
16
|
+
RedactedReasoningContent,
|
|
17
|
+
TextContent,
|
|
18
|
+
ToolCallContent,
|
|
19
|
+
ToolReturnContent,
|
|
20
|
+
)
|
|
11
21
|
from letta.schemas.llm_config import LLMConfig
|
|
12
22
|
from letta.schemas.message import ToolReturn
|
|
13
23
|
from letta.schemas.tool_rule import ChildToolRule, ConditionalToolRule, ContinueToolRule, InitToolRule, TerminalToolRule, ToolRule
|
|
@@ -80,10 +90,13 @@ def deserialize_tool_rule(data: Dict) -> Union[ChildToolRule, InitToolRule, Term
|
|
|
80
90
|
rule_type = ToolRuleType(data.get("type"))
|
|
81
91
|
|
|
82
92
|
if rule_type == ToolRuleType.run_first or rule_type == ToolRuleType.InitToolRule:
|
|
93
|
+
data["type"] = ToolRuleType.run_first
|
|
83
94
|
return InitToolRule(**data)
|
|
84
95
|
elif rule_type == ToolRuleType.exit_loop or rule_type == ToolRuleType.TerminalToolRule:
|
|
96
|
+
data["type"] = ToolRuleType.exit_loop
|
|
85
97
|
return TerminalToolRule(**data)
|
|
86
98
|
elif rule_type == ToolRuleType.constrain_child_tools or rule_type == ToolRuleType.ToolRule:
|
|
99
|
+
data["type"] = ToolRuleType.constrain_child_tools
|
|
87
100
|
return ChildToolRule(**data)
|
|
88
101
|
elif rule_type == ToolRuleType.conditional:
|
|
89
102
|
return ConditionalToolRule(**data)
|
|
@@ -163,6 +176,60 @@ def deserialize_tool_returns(data: Optional[List[Dict]]) -> List[ToolReturn]:
|
|
|
163
176
|
return tool_returns
|
|
164
177
|
|
|
165
178
|
|
|
179
|
+
# ----------------------------
|
|
180
|
+
# MessageContent Serialization
|
|
181
|
+
# ----------------------------
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def serialize_message_content(message_content: Optional[List[Union[MessageContent, dict]]]) -> List[Dict]:
|
|
185
|
+
"""Convert a list of MessageContent objects into JSON-serializable format."""
|
|
186
|
+
if not message_content:
|
|
187
|
+
return []
|
|
188
|
+
|
|
189
|
+
serialized_message_content = []
|
|
190
|
+
for content in message_content:
|
|
191
|
+
if isinstance(content, MessageContent):
|
|
192
|
+
serialized_message_content.append(content.model_dump())
|
|
193
|
+
elif isinstance(content, dict):
|
|
194
|
+
serialized_message_content.append(content) # Already a dictionary, leave it as-is
|
|
195
|
+
else:
|
|
196
|
+
raise TypeError(f"Unexpected message content type: {type(content)}")
|
|
197
|
+
|
|
198
|
+
return serialized_message_content
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def deserialize_message_content(data: Optional[List[Dict]]) -> List[MessageContent]:
|
|
202
|
+
"""Convert a JSON list back into MessageContent objects."""
|
|
203
|
+
if not data:
|
|
204
|
+
return []
|
|
205
|
+
|
|
206
|
+
message_content = []
|
|
207
|
+
for item in data:
|
|
208
|
+
if not item:
|
|
209
|
+
continue
|
|
210
|
+
|
|
211
|
+
content_type = item.get("type")
|
|
212
|
+
if content_type == MessageContentType.text:
|
|
213
|
+
content = TextContent(**item)
|
|
214
|
+
elif content_type == MessageContentType.tool_call:
|
|
215
|
+
content = ToolCallContent(**item)
|
|
216
|
+
elif content_type == MessageContentType.tool_return:
|
|
217
|
+
content = ToolReturnContent(**item)
|
|
218
|
+
elif content_type == MessageContentType.reasoning:
|
|
219
|
+
content = ReasoningContent(**item)
|
|
220
|
+
elif content_type == MessageContentType.redacted_reasoning:
|
|
221
|
+
content = RedactedReasoningContent(**item)
|
|
222
|
+
elif content_type == MessageContentType.omitted_reasoning:
|
|
223
|
+
content = OmittedReasoningContent(**item)
|
|
224
|
+
else:
|
|
225
|
+
# Skip invalid content
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
message_content.append(content)
|
|
229
|
+
|
|
230
|
+
return message_content
|
|
231
|
+
|
|
232
|
+
|
|
166
233
|
# --------------------------
|
|
167
234
|
# Vector Serialization
|
|
168
235
|
# --------------------------
|
letta/helpers/mcp_helpers.py
CHANGED
|
@@ -11,6 +11,9 @@ from letta.log import get_logger
|
|
|
11
11
|
|
|
12
12
|
logger = get_logger(__name__)
|
|
13
13
|
|
|
14
|
+
# see: https://modelcontextprotocol.io/quickstart/user
|
|
15
|
+
MCP_CONFIG_TOPLEVEL_KEY = "mcpServers"
|
|
16
|
+
|
|
14
17
|
|
|
15
18
|
class MCPTool(Tool):
|
|
16
19
|
"""A simple wrapper around MCP's tool definition (to avoid conflict with our own)"""
|
|
@@ -18,7 +21,7 @@ class MCPTool(Tool):
|
|
|
18
21
|
|
|
19
22
|
class MCPServerType(str, Enum):
|
|
20
23
|
SSE = "sse"
|
|
21
|
-
|
|
24
|
+
STDIO = "stdio"
|
|
22
25
|
|
|
23
26
|
|
|
24
27
|
class BaseServerConfig(BaseModel):
|
|
@@ -30,11 +33,29 @@ class SSEServerConfig(BaseServerConfig):
|
|
|
30
33
|
type: MCPServerType = MCPServerType.SSE
|
|
31
34
|
server_url: str = Field(..., description="The URL of the server (MCP SSE client will connect to this URL)")
|
|
32
35
|
|
|
36
|
+
def to_dict(self) -> dict:
|
|
37
|
+
values = {
|
|
38
|
+
"transport": "sse",
|
|
39
|
+
"url": self.server_url,
|
|
40
|
+
}
|
|
41
|
+
return values
|
|
42
|
+
|
|
33
43
|
|
|
34
|
-
class
|
|
35
|
-
type: MCPServerType = MCPServerType.
|
|
44
|
+
class StdioServerConfig(BaseServerConfig):
|
|
45
|
+
type: MCPServerType = MCPServerType.STDIO
|
|
36
46
|
command: str = Field(..., description="The command to run (MCP 'local' client will run this command)")
|
|
37
47
|
args: List[str] = Field(..., description="The arguments to pass to the command")
|
|
48
|
+
env: Optional[dict[str, str]] = Field(None, description="Environment variables to set")
|
|
49
|
+
|
|
50
|
+
def to_dict(self) -> dict:
|
|
51
|
+
values = {
|
|
52
|
+
"transport": "stdio",
|
|
53
|
+
"command": self.command,
|
|
54
|
+
"args": self.args,
|
|
55
|
+
}
|
|
56
|
+
if self.env is not None:
|
|
57
|
+
values["env"] = self.env
|
|
58
|
+
return values
|
|
38
59
|
|
|
39
60
|
|
|
40
61
|
class BaseMCPClient:
|
|
@@ -83,8 +104,8 @@ class BaseMCPClient:
|
|
|
83
104
|
logger.info("Cleaned up MCP clients on shutdown.")
|
|
84
105
|
|
|
85
106
|
|
|
86
|
-
class
|
|
87
|
-
def _initialize_connection(self, server_config:
|
|
107
|
+
class StdioMCPClient(BaseMCPClient):
|
|
108
|
+
def _initialize_connection(self, server_config: StdioServerConfig):
|
|
88
109
|
server_params = StdioServerParameters(command=server_config.command, args=server_config.args)
|
|
89
110
|
stdio_cm = stdio_client(server_params)
|
|
90
111
|
stdio_transport = self.loop.run_until_complete(stdio_cm.__aenter__())
|
letta/llm_api/openai.py
CHANGED
letta/memory.py
CHANGED
|
@@ -5,8 +5,9 @@ from letta.llm_api.llm_api_tools import create
|
|
|
5
5
|
from letta.prompts.gpt_summarize import SYSTEM as SUMMARY_PROMPT_SYSTEM
|
|
6
6
|
from letta.schemas.agent import AgentState
|
|
7
7
|
from letta.schemas.enums import MessageRole
|
|
8
|
+
from letta.schemas.letta_message_content import TextContent
|
|
8
9
|
from letta.schemas.memory import Memory
|
|
9
|
-
from letta.schemas.message import Message
|
|
10
|
+
from letta.schemas.message import Message
|
|
10
11
|
from letta.settings import summarizer_settings
|
|
11
12
|
from letta.utils import count_tokens, printd
|
|
12
13
|
|
letta/orm/__init__.py
CHANGED
|
@@ -4,6 +4,8 @@ from letta.orm.base import Base
|
|
|
4
4
|
from letta.orm.block import Block
|
|
5
5
|
from letta.orm.blocks_agents import BlocksAgents
|
|
6
6
|
from letta.orm.file import FileMetadata
|
|
7
|
+
from letta.orm.group import Group
|
|
8
|
+
from letta.orm.groups_agents import GroupsAgents
|
|
7
9
|
from letta.orm.identities_agents import IdentitiesAgents
|
|
8
10
|
from letta.orm.identities_blocks import IdentitiesBlocks
|
|
9
11
|
from letta.orm.identity import Identity
|