letta-nightly 0.7.0.dev20250423003112__py3-none-any.whl → 0.7.2.dev20250423222439__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +113 -81
  3. letta/agents/letta_agent.py +2 -2
  4. letta/agents/letta_agent_batch.py +38 -34
  5. letta/client/client.py +10 -2
  6. letta/constants.py +4 -3
  7. letta/functions/function_sets/multi_agent.py +1 -3
  8. letta/functions/helpers.py +3 -3
  9. letta/groups/dynamic_multi_agent.py +58 -59
  10. letta/groups/round_robin_multi_agent.py +43 -49
  11. letta/groups/sleeptime_multi_agent.py +28 -18
  12. letta/groups/supervisor_multi_agent.py +21 -20
  13. letta/helpers/composio_helpers.py +1 -1
  14. letta/helpers/converters.py +29 -0
  15. letta/helpers/datetime_helpers.py +9 -0
  16. letta/helpers/message_helper.py +1 -0
  17. letta/helpers/tool_execution_helper.py +3 -3
  18. letta/jobs/llm_batch_job_polling.py +2 -1
  19. letta/llm_api/anthropic.py +10 -6
  20. letta/llm_api/anthropic_client.py +2 -2
  21. letta/llm_api/cohere.py +2 -2
  22. letta/llm_api/google_ai_client.py +2 -2
  23. letta/llm_api/google_vertex_client.py +2 -2
  24. letta/llm_api/openai.py +11 -4
  25. letta/llm_api/openai_client.py +34 -2
  26. letta/local_llm/chat_completion_proxy.py +2 -2
  27. letta/orm/agent.py +8 -1
  28. letta/orm/custom_columns.py +15 -0
  29. letta/schemas/agent.py +6 -0
  30. letta/schemas/letta_message_content.py +2 -1
  31. letta/schemas/llm_config.py +12 -2
  32. letta/schemas/message.py +18 -0
  33. letta/schemas/openai/chat_completion_response.py +52 -3
  34. letta/schemas/response_format.py +78 -0
  35. letta/schemas/tool_execution_result.py +14 -0
  36. letta/server/rest_api/chat_completions_interface.py +2 -2
  37. letta/server/rest_api/interface.py +3 -2
  38. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +1 -1
  39. letta/server/rest_api/routers/v1/agents.py +4 -4
  40. letta/server/rest_api/routers/v1/groups.py +2 -2
  41. letta/server/rest_api/routers/v1/messages.py +41 -19
  42. letta/server/server.py +24 -57
  43. letta/services/agent_manager.py +6 -1
  44. letta/services/llm_batch_manager.py +28 -26
  45. letta/services/tool_executor/tool_execution_manager.py +37 -28
  46. letta/services/tool_executor/tool_execution_sandbox.py +35 -16
  47. letta/services/tool_executor/tool_executor.py +299 -68
  48. letta/services/tool_sandbox/base.py +3 -2
  49. letta/services/tool_sandbox/e2b_sandbox.py +5 -4
  50. letta/services/tool_sandbox/local_sandbox.py +11 -6
  51. {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.2.dev20250423222439.dist-info}/METADATA +1 -1
  52. {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.2.dev20250423222439.dist-info}/RECORD +55 -53
  53. {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.2.dev20250423222439.dist-info}/LICENSE +0 -0
  54. {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.2.dev20250423222439.dist-info}/WHEEL +0 -0
  55. {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.2.dev20250423222439.dist-info}/entry_points.txt +0 -0
letta/client/client.py CHANGED
@@ -32,6 +32,7 @@ from letta.schemas.message import Message, MessageCreate
32
32
  from letta.schemas.openai.chat_completion_response import UsageStatistics
33
33
  from letta.schemas.organization import Organization
34
34
  from letta.schemas.passage import Passage
35
+ from letta.schemas.response_format import ResponseFormatUnion
35
36
  from letta.schemas.run import Run
36
37
  from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfig, SandboxConfigCreate, SandboxConfigUpdate
37
38
  from letta.schemas.source import Source, SourceCreate, SourceUpdate
@@ -100,6 +101,7 @@ class AbstractClient(object):
100
101
  message_ids: Optional[List[str]] = None,
101
102
  memory: Optional[Memory] = None,
102
103
  tags: Optional[List[str]] = None,
104
+ response_format: Optional[ResponseFormatUnion] = None,
103
105
  ):
104
106
  raise NotImplementedError
105
107
 
@@ -553,6 +555,7 @@ class RESTClient(AbstractClient):
553
555
  initial_message_sequence: Optional[List[Message]] = None,
554
556
  tags: Optional[List[str]] = None,
555
557
  message_buffer_autoclear: bool = False,
558
+ response_format: Optional[ResponseFormatUnion] = None,
556
559
  ) -> AgentState:
557
560
  """Create an agent
558
561
 
@@ -615,6 +618,7 @@ class RESTClient(AbstractClient):
615
618
  "include_base_tools": include_base_tools,
616
619
  "message_buffer_autoclear": message_buffer_autoclear,
617
620
  "include_multi_agent_tools": include_multi_agent_tools,
621
+ "response_format": response_format,
618
622
  }
619
623
 
620
624
  # Only add name if it's not None
@@ -653,6 +657,7 @@ class RESTClient(AbstractClient):
653
657
  embedding_config: Optional[EmbeddingConfig] = None,
654
658
  message_ids: Optional[List[str]] = None,
655
659
  tags: Optional[List[str]] = None,
660
+ response_format: Optional[ResponseFormatUnion] = None,
656
661
  ) -> AgentState:
657
662
  """
658
663
  Update an existing agent
@@ -682,6 +687,7 @@ class RESTClient(AbstractClient):
682
687
  llm_config=llm_config,
683
688
  embedding_config=embedding_config,
684
689
  message_ids=message_ids,
690
+ response_format=response_format,
685
691
  )
686
692
  response = requests.patch(f"{self.base_url}/{self.api_prefix}/agents/{agent_id}", json=request.model_dump(), headers=self.headers)
687
693
  if response.status_code != 200:
@@ -2425,6 +2431,7 @@ class LocalClient(AbstractClient):
2425
2431
  llm_config: Optional[LLMConfig] = None,
2426
2432
  embedding_config: Optional[EmbeddingConfig] = None,
2427
2433
  message_ids: Optional[List[str]] = None,
2434
+ response_format: Optional[ResponseFormatUnion] = None,
2428
2435
  ):
2429
2436
  """
2430
2437
  Update an existing agent
@@ -2458,6 +2465,7 @@ class LocalClient(AbstractClient):
2458
2465
  llm_config=llm_config,
2459
2466
  embedding_config=embedding_config,
2460
2467
  message_ids=message_ids,
2468
+ response_format=response_format,
2461
2469
  ),
2462
2470
  actor=self.user,
2463
2471
  )
@@ -2661,7 +2669,7 @@ class LocalClient(AbstractClient):
2661
2669
  response (LettaResponse): Response from the agent
2662
2670
  """
2663
2671
  self.interface.clear()
2664
- usage = self.server.send_messages(actor=self.user, agent_id=agent_id, messages=messages)
2672
+ usage = self.server.send_messages(actor=self.user, agent_id=agent_id, input_messages=messages)
2665
2673
 
2666
2674
  # format messages
2667
2675
  return LettaResponse(messages=messages, usage=usage)
@@ -2703,7 +2711,7 @@ class LocalClient(AbstractClient):
2703
2711
  usage = self.server.send_messages(
2704
2712
  actor=self.user,
2705
2713
  agent_id=agent_id,
2706
- messages=[MessageCreate(role=MessageRole(role), content=message, name=name)],
2714
+ input_messages=[MessageCreate(role=MessageRole(role), content=message, name=name)],
2707
2715
  )
2708
2716
 
2709
2717
  ## TODO: need to make sure date/timestamp is propely passed
letta/constants.py CHANGED
@@ -47,13 +47,14 @@ DEFAULT_PERSONA = "sam_pov"
47
47
  DEFAULT_HUMAN = "basic"
48
48
  DEFAULT_PRESET = "memgpt_chat"
49
49
 
50
+ SEND_MESSAGE_TOOL_NAME = "send_message"
50
51
  # Base tools that cannot be edited, as they access agent state directly
51
52
  # Note that we don't include "conversation_search_date" for now
52
- BASE_TOOLS = ["send_message", "conversation_search", "archival_memory_insert", "archival_memory_search"]
53
+ BASE_TOOLS = [SEND_MESSAGE_TOOL_NAME, "conversation_search", "archival_memory_insert", "archival_memory_search"]
53
54
  # Base memory tools CAN be edited, and are added by default by the server
54
55
  BASE_MEMORY_TOOLS = ["core_memory_append", "core_memory_replace"]
55
56
  # Base tools if the memgpt agent has enable_sleeptime on
56
- BASE_SLEEPTIME_CHAT_TOOLS = ["send_message", "conversation_search", "archival_memory_search"]
57
+ BASE_SLEEPTIME_CHAT_TOOLS = [SEND_MESSAGE_TOOL_NAME, "conversation_search", "archival_memory_search"]
57
58
  # Base memory tools for sleeptime agent
58
59
  BASE_SLEEPTIME_TOOLS = [
59
60
  "memory_replace",
@@ -72,7 +73,7 @@ LETTA_TOOL_SET = set(BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS + BASE_S
72
73
  # The name of the tool used to send message to the user
73
74
  # May not be relevant in cases where the agent has multiple ways to message to user (send_imessage, send_discord_mesasge, ...)
74
75
  # or in cases where the agent has no concept of messaging a user (e.g. a workflow agent)
75
- DEFAULT_MESSAGE_TOOL = "send_message"
76
+ DEFAULT_MESSAGE_TOOL = SEND_MESSAGE_TOOL_NAME
76
77
  DEFAULT_MESSAGE_TOOL_KWARG = "message"
77
78
 
78
79
  PRE_EXECUTION_MESSAGE_ARG = "pre_exec_msg"
@@ -9,7 +9,6 @@ from letta.functions.helpers import (
9
9
  extract_send_message_from_steps_messages,
10
10
  fire_and_forget_send_to_agent,
11
11
  )
12
- from letta.helpers.message_helper import prepare_input_message_create
13
12
  from letta.schemas.enums import MessageRole
14
13
  from letta.schemas.message import MessageCreate
15
14
  from letta.server.rest_api.utils import get_letta_server
@@ -109,11 +108,10 @@ def send_message_to_agents_matching_tags(self: "Agent", message: str, match_all:
109
108
 
110
109
  # Prepare the message
111
110
  messages = [MessageCreate(role=MessageRole.system, content=augmented_message, name=self.agent_state.name)]
112
- input_messages = [prepare_input_message_create(m, agent_id) for m in messages]
113
111
 
114
112
  # Run .step() and return the response
115
113
  usage_stats = agent.step(
116
- messages=input_messages,
114
+ input_messages=messages,
117
115
  chaining=True,
118
116
  max_chaining_steps=None,
119
117
  stream=False,
@@ -352,7 +352,7 @@ async def send_message_to_agent_no_stream(
352
352
  server: "SyncServer",
353
353
  agent_id: str,
354
354
  actor: User,
355
- messages: Union[List[Message], List[MessageCreate]],
355
+ messages: List[MessageCreate],
356
356
  metadata: Optional[dict] = None,
357
357
  ) -> LettaResponse:
358
358
  """
@@ -368,7 +368,7 @@ async def send_message_to_agent_no_stream(
368
368
  server.send_messages,
369
369
  actor=actor,
370
370
  agent_id=agent_id,
371
- messages=messages,
371
+ input_messages=messages,
372
372
  interface=interface,
373
373
  metadata=metadata,
374
374
  )
@@ -478,7 +478,7 @@ def fire_and_forget_send_to_agent(
478
478
  await server.send_message_to_agent(
479
479
  agent_id=other_agent_id,
480
480
  actor=sender_agent.user,
481
- messages=messages,
481
+ input_messages=messages,
482
482
  stream_steps=False,
483
483
  stream_tokens=False,
484
484
  use_assistant_message=True,
@@ -35,7 +35,7 @@ class DynamicMultiAgent(Agent):
35
35
 
36
36
  def step(
37
37
  self,
38
- messages: List[MessageCreate],
38
+ input_messages: List[MessageCreate],
39
39
  chaining: bool = True,
40
40
  max_chaining_steps: Optional[int] = None,
41
41
  put_inner_thoughts_first: bool = True,
@@ -43,27 +43,43 @@ class DynamicMultiAgent(Agent):
43
43
  ) -> LettaUsageStatistics:
44
44
  total_usage = UsageStatistics()
45
45
  step_count = 0
46
+ speaker_id = None
46
47
 
48
+ # Load settings
47
49
  token_streaming = self.interface.streaming_mode if hasattr(self.interface, "streaming_mode") else False
48
50
  metadata = self.interface.metadata if hasattr(self.interface, "metadata") else None
49
51
 
50
- agents = {}
52
+ # Load agents and initialize chat history with indexing
53
+ agents = {self.agent_state.id: self.load_manager_agent()}
51
54
  message_index = {self.agent_state.id: 0}
52
- agents[self.agent_state.id] = self.load_manager_agent()
55
+ chat_history: List[MessageCreate] = []
53
56
  for agent_id in self.agent_ids:
54
57
  agents[agent_id] = self.load_participant_agent(agent_id=agent_id)
55
58
  message_index[agent_id] = 0
56
59
 
57
- chat_history: List[Message] = []
58
- new_messages = messages
59
- speaker_id = None
60
+ # Prepare new messages
61
+ new_messages = []
62
+ for message in input_messages:
63
+ if isinstance(message.content, str):
64
+ message.content = [TextContent(text=message.content)]
65
+ message.group_id = self.group_id
66
+ new_messages.append(message)
67
+
60
68
  try:
61
69
  for _ in range(self.max_turns):
70
+ # Prepare manager message
62
71
  agent_id_options = [agent_id for agent_id in self.agent_ids if agent_id != speaker_id]
63
- manager_message = self.ask_manager_to_choose_participant_message(new_messages, chat_history, agent_id_options)
72
+ manager_message = self.ask_manager_to_choose_participant_message(
73
+ manager_agent_id=self.agent_state.id,
74
+ new_messages=new_messages,
75
+ chat_history=chat_history,
76
+ agent_id_options=agent_id_options,
77
+ )
78
+
79
+ # Perform manager step
64
80
  manager_agent = agents[self.agent_state.id]
65
81
  usage_stats = manager_agent.step(
66
- messages=[manager_message],
82
+ input_messages=[manager_message],
67
83
  chaining=chaining,
68
84
  max_chaining_steps=max_chaining_steps,
69
85
  stream=token_streaming,
@@ -71,42 +87,27 @@ class DynamicMultiAgent(Agent):
71
87
  metadata=metadata,
72
88
  put_inner_thoughts_first=put_inner_thoughts_first,
73
89
  )
90
+
91
+ # Parse manager response
74
92
  responses = Message.to_letta_messages_from_list(manager_agent.last_response_messages)
75
93
  assistant_message = [response for response in responses if response.message_type == "assistant_message"][0]
76
94
  for name, agent_id in [(agents[agent_id].agent_state.name, agent_id) for agent_id in agent_id_options]:
77
95
  if name.lower() in assistant_message.content.lower():
78
96
  speaker_id = agent_id
79
97
 
80
- # sum usage
98
+ # Sum usage
81
99
  total_usage.prompt_tokens += usage_stats.prompt_tokens
82
100
  total_usage.completion_tokens += usage_stats.completion_tokens
83
101
  total_usage.total_tokens += usage_stats.total_tokens
84
102
  step_count += 1
85
103
 
86
- # initialize input messages
87
- for message in chat_history[message_index[speaker_id] :]:
88
- message.id = Message.generate_id()
89
- message.agent_id = speaker_id
104
+ # Update chat history
105
+ chat_history.extend(new_messages)
90
106
 
91
- for message in new_messages:
92
- chat_history.append(
93
- Message(
94
- agent_id=speaker_id,
95
- role=message.role,
96
- content=[TextContent(text=message.content)],
97
- name=message.name,
98
- model=None,
99
- tool_calls=None,
100
- tool_call_id=None,
101
- group_id=self.group_id,
102
- otid=message.otid,
103
- )
104
- )
105
-
106
- # load agent and perform step
107
+ # Perform participant step
107
108
  participant_agent = agents[speaker_id]
108
109
  usage_stats = participant_agent.step(
109
- messages=chat_history[message_index[speaker_id] :],
110
+ input_messages=chat_history[message_index[speaker_id] :],
110
111
  chaining=chaining,
111
112
  max_chaining_steps=max_chaining_steps,
112
113
  stream=token_streaming,
@@ -115,54 +116,54 @@ class DynamicMultiAgent(Agent):
115
116
  put_inner_thoughts_first=put_inner_thoughts_first,
116
117
  )
117
118
 
118
- # parse new messages for next step
119
+ # Parse participant response
119
120
  responses = Message.to_letta_messages_from_list(
120
121
  participant_agent.last_response_messages,
121
122
  )
122
-
123
123
  assistant_messages = [response for response in responses if response.message_type == "assistant_message"]
124
124
  new_messages = [
125
125
  MessageCreate(
126
126
  role="system",
127
- content=message.content,
127
+ content=[TextContent(text=message.content)] if isinstance(message.content, str) else message.content,
128
128
  name=participant_agent.agent_state.name,
129
129
  otid=message.otid,
130
+ sender_id=participant_agent.agent_state.id,
131
+ group_id=self.group_id,
130
132
  )
131
133
  for message in assistant_messages
132
134
  ]
135
+
136
+ # Update message index
133
137
  message_index[speaker_id] = len(chat_history) + len(new_messages)
134
138
 
135
- # sum usage
139
+ # Sum usage
136
140
  total_usage.prompt_tokens += usage_stats.prompt_tokens
137
141
  total_usage.completion_tokens += usage_stats.completion_tokens
138
142
  total_usage.total_tokens += usage_stats.total_tokens
139
143
  step_count += 1
140
144
 
141
- # check for termination token
145
+ # Check for termination token
142
146
  if any(self.termination_token in message.content for message in new_messages):
143
147
  break
144
148
 
145
- # persist remaining chat history
146
- for message in new_messages:
147
- chat_history.append(
148
- Message(
149
- agent_id=agent_id,
150
- role=message.role,
151
- content=[TextContent(text=message.content)],
152
- name=message.name,
153
- model=None,
154
- tool_calls=None,
155
- tool_call_id=None,
156
- group_id=self.group_id,
157
- )
158
- )
149
+ # Persist remaining chat history
150
+ chat_history.extend(new_messages)
159
151
  for agent_id, index in message_index.items():
160
152
  if agent_id == speaker_id:
161
153
  continue
154
+ messages_to_persist = []
162
155
  for message in chat_history[index:]:
163
- message.id = Message.generate_id()
164
- message.agent_id = agent_id
165
- self.message_manager.create_many_messages(chat_history[index:], actor=self.user)
156
+ message_to_persist = Message(
157
+ role=message.role,
158
+ content=message.content,
159
+ name=message.name,
160
+ otid=message.otid,
161
+ sender_id=message.sender_id,
162
+ group_id=message.group_id,
163
+ agent_id=agent_id,
164
+ )
165
+ messages_to_persist.append(message_to_persist)
166
+ self.message_manager.create_many_messages(messages_to_persist, actor=self.user)
166
167
 
167
168
  except Exception as e:
168
169
  raise e
@@ -249,10 +250,11 @@ class DynamicMultiAgent(Agent):
249
250
 
250
251
  def ask_manager_to_choose_participant_message(
251
252
  self,
253
+ manager_agent_id: str,
252
254
  new_messages: List[MessageCreate],
253
255
  chat_history: List[Message],
254
256
  agent_id_options: List[str],
255
- ) -> Message:
257
+ ) -> MessageCreate:
256
258
  text_chat_history = [f"{message.name or 'user'}: {message.content[0].text}" for message in chat_history]
257
259
  for message in new_messages:
258
260
  text_chat_history.append(f"{message.name or 'user'}: {message.content}")
@@ -264,14 +266,11 @@ class DynamicMultiAgent(Agent):
264
266
  "respond to the messages yourself, your task is only to decide the "
265
267
  f"next speaker, not to participate. \nChat history:\n{context_messages}"
266
268
  )
267
- return Message(
268
- agent_id=self.agent_state.id,
269
+ return MessageCreate(
269
270
  role="user",
270
271
  content=[TextContent(text=message_text)],
271
272
  name=None,
272
- model=None,
273
- tool_calls=None,
274
- tool_call_id=None,
275
- group_id=self.group_id,
276
273
  otid=Message.generate_otid(),
274
+ sender_id=manager_agent_id,
275
+ group_id=self.group_id,
277
276
  )
@@ -29,7 +29,7 @@ class RoundRobinMultiAgent(Agent):
29
29
 
30
30
  def step(
31
31
  self,
32
- messages: List[MessageCreate],
32
+ input_messages: List[MessageCreate],
33
33
  chaining: bool = True,
34
34
  max_chaining_steps: Optional[int] = None,
35
35
  put_inner_thoughts_first: bool = True,
@@ -37,46 +37,39 @@ class RoundRobinMultiAgent(Agent):
37
37
  ) -> LettaUsageStatistics:
38
38
  total_usage = UsageStatistics()
39
39
  step_count = 0
40
+ speaker_id = None
40
41
 
42
+ # Load settings
41
43
  token_streaming = self.interface.streaming_mode if hasattr(self.interface, "streaming_mode") else False
42
44
  metadata = self.interface.metadata if hasattr(self.interface, "metadata") else None
43
45
 
44
- agents = {}
46
+ # Load agents and initialize chat history with indexing
47
+ agents, message_index = {}, {}
48
+ chat_history: List[MessageCreate] = []
45
49
  for agent_id in self.agent_ids:
46
50
  agents[agent_id] = self.load_participant_agent(agent_id=agent_id)
51
+ message_index[agent_id] = 0
52
+
53
+ # Prepare new messages
54
+ new_messages = []
55
+ for message in input_messages:
56
+ if isinstance(message.content, str):
57
+ message.content = [TextContent(text=message.content)]
58
+ message.group_id = self.group_id
59
+ new_messages.append(message)
47
60
 
48
- message_index = {agent_id: 0 for agent_id in self.agent_ids}
49
- chat_history: List[Message] = []
50
- new_messages = messages
51
- speaker_id = None
52
61
  try:
53
62
  for i in range(self.max_turns):
63
+ # Select speaker
54
64
  speaker_id = self.agent_ids[i % len(self.agent_ids)]
55
- # initialize input messages
56
- start_index = message_index[speaker_id] if speaker_id in message_index else 0
57
- for message in chat_history[start_index:]:
58
- message.id = Message.generate_id()
59
- message.agent_id = speaker_id
60
-
61
- for message in new_messages:
62
- chat_history.append(
63
- Message(
64
- agent_id=speaker_id,
65
- role=message.role,
66
- content=[TextContent(text=message.content)],
67
- name=message.name,
68
- model=None,
69
- tool_calls=None,
70
- tool_call_id=None,
71
- group_id=self.group_id,
72
- otid=message.otid,
73
- )
74
- )
75
65
 
76
- # load agent and perform step
66
+ # Update chat history
67
+ chat_history.extend(new_messages)
68
+
69
+ # Perform participant step
77
70
  participant_agent = agents[speaker_id]
78
71
  usage_stats = participant_agent.step(
79
- messages=chat_history[start_index:],
72
+ input_messages=chat_history[message_index[speaker_id] :],
80
73
  chaining=chaining,
81
74
  max_chaining_steps=max_chaining_steps,
82
75
  stream=token_streaming,
@@ -85,47 +78,48 @@ class RoundRobinMultiAgent(Agent):
85
78
  put_inner_thoughts_first=put_inner_thoughts_first,
86
79
  )
87
80
 
88
- # parse new messages for next step
81
+ # Parse participant response
89
82
  responses = Message.to_letta_messages_from_list(participant_agent.last_response_messages)
90
83
  assistant_messages = [response for response in responses if response.message_type == "assistant_message"]
91
84
  new_messages = [
92
85
  MessageCreate(
93
86
  role="system",
94
- content=message.content,
95
- name=message.name,
87
+ content=[TextContent(text=message.content)] if isinstance(message.content, str) else message.content,
88
+ name=participant_agent.agent_state.name,
96
89
  otid=message.otid,
90
+ sender_id=participant_agent.agent_state.id,
91
+ group_id=self.group_id,
97
92
  )
98
93
  for message in assistant_messages
99
94
  ]
95
+
96
+ # Update message index
100
97
  message_index[speaker_id] = len(chat_history) + len(new_messages)
101
98
 
102
- # sum usage
99
+ # Sum usage
103
100
  total_usage.prompt_tokens += usage_stats.prompt_tokens
104
101
  total_usage.completion_tokens += usage_stats.completion_tokens
105
102
  total_usage.total_tokens += usage_stats.total_tokens
106
103
  step_count += 1
107
104
 
108
- # persist remaining chat history
109
- for message in new_messages:
110
- chat_history.append(
111
- Message(
112
- agent_id=agent_id,
113
- role=message.role,
114
- content=[TextContent(text=message.content)],
115
- name=message.name,
116
- model=None,
117
- tool_calls=None,
118
- tool_call_id=None,
119
- group_id=self.group_id,
120
- )
121
- )
105
+ # Persist remaining chat history
106
+ chat_history.extend(new_messages)
122
107
  for agent_id, index in message_index.items():
123
108
  if agent_id == speaker_id:
124
109
  continue
110
+ messages_to_persist = []
125
111
  for message in chat_history[index:]:
126
- message.id = Message.generate_id()
127
- message.agent_id = agent_id
128
- self.message_manager.create_many_messages(chat_history[index:], actor=self.user)
112
+ message_to_persist = Message(
113
+ role=message.role,
114
+ content=message.content,
115
+ name=message.name,
116
+ otid=message.otid,
117
+ sender_id=message.sender_id,
118
+ group_id=self.group_id,
119
+ agent_id=agent_id,
120
+ )
121
+ messages_to_persist.append(message_to_persist)
122
+ self.message_manager.create_many_messages(messages_to_persist, actor=self.user)
129
123
 
130
124
  except Exception as e:
131
125
  raise e
@@ -143,8 +143,21 @@ class SleeptimeMultiAgent(Agent):
143
143
  group_id=self.group_id,
144
144
  )
145
145
  ]
146
+
147
+ # Convert Message objects to MessageCreate objects
148
+ message_creates = [
149
+ MessageCreate(
150
+ role=m.role,
151
+ content=m.content[0].text if m.content and len(m.content) == 1 else m.content,
152
+ name=m.name,
153
+ otid=m.otid,
154
+ sender_id=m.sender_id,
155
+ )
156
+ for m in participant_agent_messages
157
+ ]
158
+
146
159
  result = participant_agent.step(
147
- messages=participant_agent_messages,
160
+ input_messages=message_creates,
148
161
  chaining=chaining,
149
162
  max_chaining_steps=max_chaining_steps,
150
163
  stream=token_streaming,
@@ -173,7 +186,7 @@ class SleeptimeMultiAgent(Agent):
173
186
 
174
187
  def step(
175
188
  self,
176
- messages: List[MessageCreate],
189
+ input_messages: List[MessageCreate],
177
190
  chaining: bool = True,
178
191
  max_chaining_steps: Optional[int] = None,
179
192
  put_inner_thoughts_first: bool = True,
@@ -181,33 +194,28 @@ class SleeptimeMultiAgent(Agent):
181
194
  ) -> LettaUsageStatistics:
182
195
  run_ids = []
183
196
 
197
+ # Load settings
184
198
  token_streaming = self.interface.streaming_mode if hasattr(self.interface, "streaming_mode") else False
185
199
  metadata = self.interface.metadata if hasattr(self.interface, "metadata") else None
186
200
 
187
- messages = [
188
- Message(
189
- id=Message.generate_id(),
190
- agent_id=self.agent_state.id,
191
- role=message.role,
192
- content=[TextContent(text=message.content)] if isinstance(message.content, str) else message.content,
193
- name=message.name,
194
- model=None,
195
- tool_calls=None,
196
- tool_call_id=None,
197
- group_id=self.group_id,
198
- otid=message.otid,
199
- )
200
- for message in messages
201
- ]
201
+ # Prepare new messages
202
+ new_messages = []
203
+ for message in input_messages:
204
+ if isinstance(message.content, str):
205
+ message.content = [TextContent(text=message.content)]
206
+ message.group_id = self.group_id
207
+ new_messages.append(message)
202
208
 
203
209
  try:
210
+ # Load main agent
204
211
  main_agent = Agent(
205
212
  agent_state=self.agent_state,
206
213
  interface=self.interface,
207
214
  user=self.user,
208
215
  )
216
+ # Perform main agent step
209
217
  usage_stats = main_agent.step(
210
- messages=messages,
218
+ input_messages=new_messages,
211
219
  chaining=chaining,
212
220
  max_chaining_steps=max_chaining_steps,
213
221
  stream=token_streaming,
@@ -216,10 +224,12 @@ class SleeptimeMultiAgent(Agent):
216
224
  put_inner_thoughts_first=put_inner_thoughts_first,
217
225
  )
218
226
 
227
+ # Update turns counter
219
228
  turns_counter = None
220
229
  if self.sleeptime_agent_frequency is not None and self.sleeptime_agent_frequency > 0:
221
230
  turns_counter = self.group_manager.bump_turns_counter(group_id=self.group_id, actor=self.user)
222
231
 
232
+ # Perform participant steps
223
233
  if self.sleeptime_agent_frequency is None or (
224
234
  turns_counter is not None and turns_counter % self.sleeptime_agent_frequency == 0
225
235
  ):