letta-nightly 0.7.8.dev20250501104226__py3-none-any.whl → 0.7.9.dev20250502222710__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. letta/__init__.py +2 -2
  2. letta/agents/helpers.py +58 -1
  3. letta/agents/letta_agent.py +13 -3
  4. letta/agents/letta_agent_batch.py +33 -17
  5. letta/agents/voice_agent.py +1 -2
  6. letta/agents/voice_sleeptime_agent.py +75 -320
  7. letta/functions/function_sets/multi_agent.py +1 -1
  8. letta/functions/function_sets/voice.py +20 -32
  9. letta/functions/helpers.py +7 -7
  10. letta/helpers/datetime_helpers.py +6 -0
  11. letta/helpers/message_helper.py +19 -18
  12. letta/jobs/scheduler.py +233 -49
  13. letta/llm_api/google_ai_client.py +13 -4
  14. letta/llm_api/google_vertex_client.py +5 -1
  15. letta/llm_api/openai.py +10 -2
  16. letta/llm_api/openai_client.py +14 -2
  17. letta/orm/message.py +4 -0
  18. letta/prompts/system/voice_sleeptime.txt +2 -3
  19. letta/schemas/letta_message.py +1 -0
  20. letta/schemas/letta_request.py +8 -1
  21. letta/schemas/letta_response.py +5 -0
  22. letta/schemas/llm_batch_job.py +6 -4
  23. letta/schemas/llm_config.py +9 -0
  24. letta/schemas/message.py +23 -2
  25. letta/schemas/providers.py +3 -1
  26. letta/server/rest_api/app.py +15 -7
  27. letta/server/rest_api/routers/v1/agents.py +3 -0
  28. letta/server/rest_api/routers/v1/messages.py +46 -1
  29. letta/server/rest_api/routers/v1/steps.py +1 -1
  30. letta/server/rest_api/utils.py +25 -6
  31. letta/server/server.py +11 -3
  32. letta/services/llm_batch_manager.py +60 -1
  33. letta/services/message_manager.py +1 -0
  34. letta/services/summarizer/summarizer.py +42 -36
  35. letta/settings.py +1 -0
  36. letta/tracing.py +5 -0
  37. {letta_nightly-0.7.8.dev20250501104226.dist-info → letta_nightly-0.7.9.dev20250502222710.dist-info}/METADATA +2 -2
  38. {letta_nightly-0.7.8.dev20250501104226.dist-info → letta_nightly-0.7.9.dev20250502222710.dist-info}/RECORD +41 -41
  39. {letta_nightly-0.7.8.dev20250501104226.dist-info → letta_nightly-0.7.9.dev20250502222710.dist-info}/LICENSE +0 -0
  40. {letta_nightly-0.7.8.dev20250501104226.dist-info → letta_nightly-0.7.9.dev20250502222710.dist-info}/WHEEL +0 -0
  41. {letta_nightly-0.7.8.dev20250501104226.dist-info → letta_nightly-0.7.9.dev20250502222710.dist-info}/entry_points.txt +0 -0
letta/__init__.py CHANGED
@@ -1,9 +1,9 @@
1
- __version__ = "0.7.8"
1
+ __version__ = "0.7.9"
2
2
 
3
3
  # import clients
4
4
  from letta.client.client import LocalClient, RESTClient, create_client
5
5
 
6
- # # imports for easier access
6
+ # imports for easier access
7
7
  from letta.schemas.agent import AgentState
8
8
  from letta.schemas.block import Block
9
9
  from letta.schemas.embedding_config import EmbeddingConfig
letta/agents/helpers.py CHANGED
@@ -1,3 +1,4 @@
1
+ import xml.etree.ElementTree as ET
1
2
  from typing import List, Tuple
2
3
 
3
4
  from letta.schemas.agent import AgentState
@@ -20,7 +21,10 @@ def _create_letta_response(new_in_context_messages: list[Message], use_assistant
20
21
 
21
22
 
22
23
  def _prepare_in_context_messages(
23
- input_messages: List[MessageCreate], agent_state: AgentState, message_manager: MessageManager, actor: User
24
+ input_messages: List[MessageCreate],
25
+ agent_state: AgentState,
26
+ message_manager: MessageManager,
27
+ actor: User,
24
28
  ) -> Tuple[List[Message], List[Message]]:
25
29
  """
26
30
  Prepares in-context messages for an agent, based on the current state and a new user input.
@@ -50,3 +54,56 @@ def _prepare_in_context_messages(
50
54
  )
51
55
 
52
56
  return current_in_context_messages, new_in_context_messages
57
+
58
+
59
+ def serialize_message_history(messages: List[str], context: str) -> str:
60
+ """
61
+ Produce an XML document like:
62
+
63
+ <memory>
64
+ <messages>
65
+ <message>…</message>
66
+ <message>…</message>
67
+
68
+ </messages>
69
+ <context>…</context>
70
+ </memory>
71
+ """
72
+ root = ET.Element("memory")
73
+
74
+ msgs_el = ET.SubElement(root, "messages")
75
+ for msg in messages:
76
+ m = ET.SubElement(msgs_el, "message")
77
+ m.text = msg
78
+
79
+ sum_el = ET.SubElement(root, "context")
80
+ sum_el.text = context
81
+
82
+ # ET.tostring will escape reserved chars for you
83
+ return ET.tostring(root, encoding="unicode")
84
+
85
+
86
+ def deserialize_message_history(xml_str: str) -> Tuple[List[str], str]:
87
+ """
88
+ Parse the XML back into (messages, context). Raises ValueError if tags are missing.
89
+ """
90
+ try:
91
+ root = ET.fromstring(xml_str)
92
+ except ET.ParseError as e:
93
+ raise ValueError(f"Invalid XML: {e}")
94
+
95
+ msgs_el = root.find("messages")
96
+ if msgs_el is None:
97
+ raise ValueError("Missing <messages> section")
98
+
99
+ messages = []
100
+ for m in msgs_el.findall("message"):
101
+ # .text may be None if empty, so coerce to empty string
102
+ messages.append(m.text or "")
103
+
104
+ sum_el = root.find("context")
105
+ if sum_el is None:
106
+ raise ValueError("Missing <context> section")
107
+ context = sum_el.text or ""
108
+
109
+ return messages, context
@@ -62,6 +62,14 @@ class LettaAgent(BaseAgent):
62
62
  @trace_method
63
63
  async def step(self, input_messages: List[MessageCreate], max_steps: int = 10) -> LettaResponse:
64
64
  agent_state = self.agent_manager.get_agent_by_id(self.agent_id, actor=self.actor)
65
+ current_in_context_messages, new_in_context_messages = await self._step(
66
+ agent_state=agent_state, input_messages=input_messages, max_steps=max_steps
67
+ )
68
+ return _create_letta_response(new_in_context_messages=new_in_context_messages, use_assistant_message=self.use_assistant_message)
69
+
70
+ async def _step(
71
+ self, agent_state: AgentState, input_messages: List[MessageCreate], max_steps: int = 10
72
+ ) -> Tuple[List[Message], List[Message]]:
65
73
  current_in_context_messages, new_in_context_messages = _prepare_in_context_messages(
66
74
  input_messages, agent_state, self.message_manager, self.actor
67
75
  )
@@ -72,7 +80,7 @@ class LettaAgent(BaseAgent):
72
80
  put_inner_thoughts_first=True,
73
81
  actor_id=self.actor.id,
74
82
  )
75
- for step in range(max_steps):
83
+ for _ in range(max_steps):
76
84
  response = await self._get_ai_reply(
77
85
  llm_client=llm_client,
78
86
  in_context_messages=current_in_context_messages + new_in_context_messages,
@@ -83,6 +91,7 @@ class LettaAgent(BaseAgent):
83
91
  )
84
92
 
85
93
  tool_call = response.choices[0].message.tool_calls[0]
94
+
86
95
  persisted_messages, should_continue = await self._handle_ai_response(tool_call, agent_state, tool_rules_solver)
87
96
  self.response_messages.extend(persisted_messages)
88
97
  new_in_context_messages.extend(persisted_messages)
@@ -95,7 +104,7 @@ class LettaAgent(BaseAgent):
95
104
  message_ids = [m.id for m in (current_in_context_messages + new_in_context_messages)]
96
105
  self.agent_manager.set_in_context_messages(agent_id=self.agent_id, message_ids=message_ids, actor=self.actor)
97
106
 
98
- return _create_letta_response(new_in_context_messages=new_in_context_messages, use_assistant_message=self.use_assistant_message)
107
+ return current_in_context_messages, new_in_context_messages
99
108
 
100
109
  @trace_method
101
110
  async def step_stream(
@@ -117,7 +126,7 @@ class LettaAgent(BaseAgent):
117
126
  actor_id=self.actor.id,
118
127
  )
119
128
 
120
- for step in range(max_steps):
129
+ for _ in range(max_steps):
121
130
  stream = await self._get_ai_reply(
122
131
  llm_client=llm_client,
123
132
  in_context_messages=current_in_context_messages + new_in_context_messages,
@@ -181,6 +190,7 @@ class LettaAgent(BaseAgent):
181
190
  ToolType.LETTA_MEMORY_CORE,
182
191
  ToolType.LETTA_MULTI_AGENT_CORE,
183
192
  ToolType.LETTA_SLEEPTIME_CORE,
193
+ ToolType.LETTA_VOICE_SLEEPTIME_CORE,
184
194
  }
185
195
  or (t.tool_type == ToolType.LETTA_MULTI_AGENT_CORE and t.name == "send_message_to_agents_matching_tags")
186
196
  or (t.tool_type == ToolType.EXTERNAL_COMPOSIO)
@@ -137,21 +137,37 @@ class LettaAgentBatch:
137
137
  log_event(name="load_and_prepare_agents")
138
138
  agent_messages_mapping: Dict[str, List[Message]] = {}
139
139
  agent_tools_mapping: Dict[str, List[dict]] = {}
140
+ # TODO: This isn't optimal, moving fast - prone to bugs because we pass around this half formed pydantic object
141
+ agent_batch_item_mapping: Dict[str, LLMBatchItem] = {}
140
142
  agent_states = []
141
143
  for batch_request in batch_requests:
142
144
  agent_id = batch_request.agent_id
143
145
  agent_state = self.agent_manager.get_agent_by_id(agent_id, actor=self.actor)
144
146
  agent_states.append(agent_state)
145
147
 
146
- agent_messages_mapping[agent_id] = self._get_in_context_messages_per_agent(
147
- agent_state=agent_state, input_messages=batch_request.messages
148
- )
149
-
150
148
  if agent_id not in agent_step_state_mapping:
151
149
  agent_step_state_mapping[agent_id] = AgentStepState(
152
150
  step_number=0, tool_rules_solver=ToolRulesSolver(tool_rules=agent_state.tool_rules)
153
151
  )
154
152
 
153
+ llm_batch_item = LLMBatchItem(
154
+ llm_batch_id="", # TODO: This is hacky, it gets filled in later
155
+ agent_id=agent_state.id,
156
+ llm_config=agent_state.llm_config,
157
+ request_status=JobStatus.created,
158
+ step_status=AgentStepStatus.paused,
159
+ step_state=agent_step_state_mapping[agent_id],
160
+ )
161
+ agent_batch_item_mapping[agent_id] = llm_batch_item
162
+
163
+ # Fill in the batch_item_id for the message
164
+ for msg in batch_request.messages:
165
+ msg.batch_item_id = llm_batch_item.id
166
+
167
+ agent_messages_mapping[agent_id] = self._prepare_in_context_messages_per_agent(
168
+ agent_state=agent_state, input_messages=batch_request.messages
169
+ )
170
+
155
171
  agent_tools_mapping[agent_id] = self._prepare_tools_per_agent(agent_state, agent_step_state_mapping[agent_id].tool_rules_solver)
156
172
 
157
173
  log_event(name="init_llm_client")
@@ -182,21 +198,14 @@ class LettaAgentBatch:
182
198
  log_event(name="prepare_batch_items")
183
199
  batch_items = []
184
200
  for state in agent_states:
185
- step_state = agent_step_state_mapping[state.id]
186
- batch_items.append(
187
- LLMBatchItem(
188
- llm_batch_id=llm_batch_job.id,
189
- agent_id=state.id,
190
- llm_config=state.llm_config,
191
- request_status=JobStatus.created,
192
- step_status=AgentStepStatus.paused,
193
- step_state=step_state,
194
- )
195
- )
201
+ llm_batch_item = agent_batch_item_mapping[state.id]
202
+ # TODO This is hacky
203
+ llm_batch_item.llm_batch_id = llm_batch_job.id
204
+ batch_items.append(llm_batch_item)
196
205
 
197
206
  if batch_items:
198
207
  log_event(name="bulk_create_batch_items")
199
- self.batch_manager.create_llm_batch_items_bulk(batch_items, actor=self.actor)
208
+ batch_items_persisted = self.batch_manager.create_llm_batch_items_bulk(batch_items, actor=self.actor)
200
209
 
201
210
  log_event(name="return_batch_response")
202
211
  return LettaBatchResponse(
@@ -335,9 +344,14 @@ class LettaAgentBatch:
335
344
  exec_results: Sequence[Tuple[str, Tuple[str, bool]]],
336
345
  ctx: _ResumeContext,
337
346
  ) -> Dict[str, List[Message]]:
347
+ # TODO: This is redundant, we should have this ready on the ctx
348
+ # TODO: I am doing it quick and dirty for now
349
+ agent_item_map: Dict[str, LLMBatchItem] = {item.agent_id: item for item in ctx.batch_items}
350
+
338
351
  msg_map: Dict[str, List[Message]] = {}
339
352
  for aid, (tool_res, success) in exec_results:
340
353
  msgs = self._create_tool_call_messages(
354
+ llm_batch_item_id=agent_item_map[aid].id,
341
355
  agent_state=ctx.agent_state_map[aid],
342
356
  tool_call_name=ctx.tool_call_name_map[aid],
343
357
  tool_call_args=ctx.tool_call_args_map[aid],
@@ -399,6 +413,7 @@ class LettaAgentBatch:
399
413
 
400
414
  def _create_tool_call_messages(
401
415
  self,
416
+ llm_batch_item_id: str,
402
417
  agent_state: AgentState,
403
418
  tool_call_name: str,
404
419
  tool_call_args: Dict[str, Any],
@@ -421,6 +436,7 @@ class LettaAgentBatch:
421
436
  reasoning_content=reasoning_content,
422
437
  pre_computed_assistant_message_id=None,
423
438
  pre_computed_tool_message_id=None,
439
+ llm_batch_item_id=llm_batch_item_id,
424
440
  )
425
441
 
426
442
  return tool_call_messages
@@ -477,7 +493,7 @@ class LettaAgentBatch:
477
493
  valid_tool_names = tool_rules_solver.get_allowed_tool_names(available_tools=set([t.name for t in tools]))
478
494
  return [enable_strict_mode(t.json_schema) for t in tools if t.name in set(valid_tool_names)]
479
495
 
480
- def _get_in_context_messages_per_agent(self, agent_state: AgentState, input_messages: List[MessageCreate]) -> List[Message]:
496
+ def _prepare_in_context_messages_per_agent(self, agent_state: AgentState, input_messages: List[MessageCreate]) -> List[Message]:
481
497
  current_in_context_messages, new_in_context_messages = _prepare_in_context_messages(
482
498
  input_messages, agent_state, self.message_manager, self.actor
483
499
  )
@@ -97,13 +97,12 @@ class VoiceAgent(BaseAgent):
97
97
  summarizer_agent=VoiceSleeptimeAgent(
98
98
  agent_id=voice_sleeptime_agent_id,
99
99
  convo_agent_state=agent_state,
100
- openai_client=self.openai_client,
101
100
  message_manager=self.message_manager,
102
101
  agent_manager=self.agent_manager,
103
102
  actor=self.actor,
104
103
  block_manager=self.block_manager,
104
+ passage_manager=self.passage_manager,
105
105
  target_block_label=self.summary_block_label,
106
- message_transcripts=[],
107
106
  ),
108
107
  message_buffer_limit=agent_state.multi_agent_group.max_message_buffer_length,
109
108
  message_buffer_min=agent_state.multi_agent_group.min_message_buffer_length,