letta-nightly 0.6.45.dev20250329104117__py3-none-any.whl → 0.6.46.dev20250330104049__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (48) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +25 -8
  3. letta/agents/base_agent.py +6 -5
  4. letta/agents/letta_agent.py +323 -0
  5. letta/agents/voice_agent.py +4 -3
  6. letta/client/client.py +2 -0
  7. letta/dynamic_multi_agent.py +5 -5
  8. letta/errors.py +20 -0
  9. letta/helpers/tool_execution_helper.py +1 -1
  10. letta/helpers/tool_rule_solver.py +1 -1
  11. letta/llm_api/anthropic.py +2 -0
  12. letta/llm_api/anthropic_client.py +153 -167
  13. letta/llm_api/google_ai_client.py +112 -29
  14. letta/llm_api/llm_api_tools.py +5 -0
  15. letta/llm_api/llm_client.py +6 -7
  16. letta/llm_api/llm_client_base.py +38 -17
  17. letta/llm_api/openai.py +2 -0
  18. letta/orm/group.py +2 -5
  19. letta/round_robin_multi_agent.py +18 -7
  20. letta/schemas/group.py +6 -0
  21. letta/schemas/message.py +23 -14
  22. letta/schemas/openai/chat_completion_request.py +6 -1
  23. letta/schemas/providers.py +3 -3
  24. letta/serialize_schemas/marshmallow_agent.py +34 -10
  25. letta/serialize_schemas/pydantic_agent_schema.py +23 -3
  26. letta/server/rest_api/app.py +9 -0
  27. letta/server/rest_api/interface.py +25 -2
  28. letta/server/rest_api/optimistic_json_parser.py +1 -1
  29. letta/server/rest_api/routers/v1/agents.py +57 -23
  30. letta/server/rest_api/routers/v1/groups.py +72 -49
  31. letta/server/rest_api/routers/v1/sources.py +1 -0
  32. letta/server/rest_api/utils.py +0 -1
  33. letta/server/server.py +73 -80
  34. letta/server/startup.sh +1 -1
  35. letta/services/agent_manager.py +7 -0
  36. letta/services/group_manager.py +87 -29
  37. letta/services/message_manager.py +5 -0
  38. letta/services/tool_executor/async_tool_execution_sandbox.py +397 -0
  39. letta/services/tool_executor/tool_execution_manager.py +27 -0
  40. letta/services/{tool_execution_sandbox.py → tool_executor/tool_execution_sandbox.py} +40 -12
  41. letta/services/tool_executor/tool_executor.py +23 -6
  42. letta/settings.py +17 -1
  43. letta/supervisor_multi_agent.py +3 -1
  44. {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330104049.dist-info}/METADATA +1 -1
  45. {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330104049.dist-info}/RECORD +48 -46
  46. {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330104049.dist-info}/LICENSE +0 -0
  47. {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330104049.dist-info}/WHEEL +0 -0
  48. {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330104049.dist-info}/entry_points.txt +0 -0
@@ -18,17 +18,13 @@ class LLMClientBase:
18
18
 
19
19
  def __init__(
20
20
  self,
21
- agent_id: str,
22
21
  llm_config: LLMConfig,
23
22
  put_inner_thoughts_first: Optional[bool] = True,
24
23
  use_structured_output: Optional[bool] = True,
25
24
  use_tool_naming: bool = True,
26
- actor_id: Optional[str] = None,
27
25
  ):
28
- self.agent_id = agent_id
29
26
  self.llm_config = llm_config
30
27
  self.put_inner_thoughts_first = put_inner_thoughts_first
31
- self.actor_id = actor_id
32
28
  self.use_tool_naming = use_tool_naming
33
29
 
34
30
  def send_llm_request(
@@ -46,13 +42,18 @@ class LLMClientBase:
46
42
  Otherwise returns a ChatCompletionResponse.
47
43
  """
48
44
  request_data = self.build_request_data(messages, tools, tool_call)
49
- log_event(name="llm_request_sent", attributes=request_data)
50
- if stream:
51
- return self.stream(request_data)
52
- else:
53
- response_data = self.request(request_data)
45
+
46
+ try:
47
+ log_event(name="llm_request_sent", attributes=request_data)
48
+ if stream:
49
+ return self.stream(request_data)
50
+ else:
51
+ response_data = self.request(request_data)
54
52
  log_event(name="llm_response_received", attributes=response_data)
55
- return self.convert_response_to_chat_completion(response_data, messages)
53
+ except Exception as e:
54
+ raise self.handle_llm_error(e)
55
+
56
+ return self.convert_response_to_chat_completion(response_data, messages)
56
57
 
57
58
  async def send_llm_request_async(
58
59
  self,
@@ -68,14 +69,20 @@ class LLMClientBase:
68
69
  If stream=True, returns an AsyncStream[ChatCompletionChunk] that can be async iterated over.
69
70
  Otherwise returns a ChatCompletionResponse.
70
71
  """
71
- request_data = self.build_request_data(messages, tools, tool_call)
72
- log_event(name="llm_request_sent", attributes=request_data)
73
- if stream:
74
- return await self.stream_async(request_data)
75
- else:
76
- response_data = await self.request_async(request_data)
72
+ request_data = self.build_request_data(messages, tools, tool_call, force_tool_call)
73
+ response_data = {}
74
+
75
+ try:
76
+ log_event(name="llm_request_sent", attributes=request_data)
77
+ if stream:
78
+ return await self.stream_async(request_data)
79
+ else:
80
+ response_data = await self.request_async(request_data)
77
81
  log_event(name="llm_response_received", attributes=response_data)
78
- return self.convert_response_to_chat_completion(response_data, messages)
82
+ except Exception as e:
83
+ raise self.handle_llm_error(e)
84
+
85
+ return self.convert_response_to_chat_completion(response_data, messages)
79
86
 
80
87
  @abstractmethod
81
88
  def build_request_data(
@@ -129,3 +136,17 @@ class LLMClientBase:
129
136
  Performs underlying streaming request to llm and returns raw response.
130
137
  """
131
138
  raise NotImplementedError(f"Streaming is not supported for {self.llm_config.model_endpoint_type}")
139
+
140
+ @abstractmethod
141
+ def handle_llm_error(self, e: Exception) -> Exception:
142
+ """
143
+ Maps provider-specific errors to common LLMError types.
144
+ Each LLM provider should implement this to translate their specific errors.
145
+
146
+ Args:
147
+ e: The original provider-specific exception
148
+
149
+ Returns:
150
+ An LLMError subclass that represents the error in a provider-agnostic way
151
+ """
152
+ return LLMError(f"Unhandled LLM error: {str(e)}")
letta/llm_api/openai.py CHANGED
@@ -185,6 +185,7 @@ def openai_chat_completions_process_stream(
185
185
  # however, we don't necessarily want to put these
186
186
  # expect_reasoning_content: bool = False,
187
187
  expect_reasoning_content: bool = True,
188
+ name: Optional[str] = None,
188
189
  ) -> ChatCompletionResponse:
189
190
  """Process a streaming completion response, and return a ChatCompletionRequest at the end.
190
191
 
@@ -272,6 +273,7 @@ def openai_chat_completions_process_stream(
272
273
  message_id=chat_completion_response.id if create_message_id else chat_completion_chunk.id,
273
274
  message_date=chat_completion_response.created if create_message_datetime else chat_completion_chunk.created,
274
275
  expect_reasoning_content=expect_reasoning_content,
276
+ name=name,
275
277
  )
276
278
  elif isinstance(stream_interface, AgentRefreshStreamingInterface):
277
279
  stream_interface.process_refresh(chat_completion_response)
letta/orm/group.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import uuid
2
2
  from typing import List, Optional
3
3
 
4
- from sqlalchemy import ForeignKey, String
4
+ from sqlalchemy import JSON, ForeignKey, String
5
5
  from sqlalchemy.orm import Mapped, mapped_column, relationship
6
6
 
7
7
  from letta.orm.mixins import OrganizationMixin
@@ -23,11 +23,8 @@ class Group(SqlalchemyBase, OrganizationMixin):
23
23
 
24
24
  # relationships
25
25
  organization: Mapped["Organization"] = relationship("Organization", back_populates="groups")
26
+ agent_ids: Mapped[List[str]] = mapped_column(JSON, nullable=False, doc="Ordered list of agent IDs in this group")
26
27
  agents: Mapped[List["Agent"]] = relationship(
27
28
  "Agent", secondary="groups_agents", lazy="selectin", passive_deletes=True, back_populates="groups"
28
29
  )
29
30
  manager_agent: Mapped["Agent"] = relationship("Agent", lazy="joined", back_populates="multi_agent_group")
30
-
31
- @property
32
- def agent_ids(self) -> List[str]:
33
- return [agent.id for agent in self.agents]
@@ -14,7 +14,7 @@ class RoundRobinMultiAgent(Agent):
14
14
  self,
15
15
  interface: AgentInterface,
16
16
  agent_state: AgentState,
17
- user: User = None,
17
+ user: User,
18
18
  # custom
19
19
  group_id: str = "",
20
20
  agent_ids: List[str] = [],
@@ -45,7 +45,7 @@ class RoundRobinMultiAgent(Agent):
45
45
  for agent_id in self.agent_ids:
46
46
  agents[agent_id] = self.load_participant_agent(agent_id=agent_id)
47
47
 
48
- message_index = {}
48
+ message_index = {agent_id: 0 for agent_id in self.agent_ids}
49
49
  chat_history: List[Message] = []
50
50
  new_messages = messages
51
51
  speaker_id = None
@@ -91,7 +91,7 @@ class RoundRobinMultiAgent(Agent):
91
91
  MessageCreate(
92
92
  role="system",
93
93
  content=message.content,
94
- name=participant_agent.agent_state.name,
94
+ name=message.name,
95
95
  )
96
96
  for message in assistant_messages
97
97
  ]
@@ -138,10 +138,21 @@ class RoundRobinMultiAgent(Agent):
138
138
  agent_state = self.agent_manager.get_agent_by_id(agent_id=agent_id, actor=self.user)
139
139
  persona_block = agent_state.memory.get_block(label="persona")
140
140
  group_chat_participant_persona = (
141
- "\n\n====Group Chat Contex===="
142
- f"\nYou are speaking in a group chat with {len(self.agent_ids) - 1} other "
143
- "agents and one user. Respond to new messages in the group chat when prompted. "
144
- f"Description of the group: {self.description}"
141
+ f"%%% GROUP CHAT CONTEXT %%% "
142
+ f"You are speaking in a group chat with {len(self.agent_ids)} other participants. "
143
+ f"Group Description: {self.description} "
144
+ "INTERACTION GUIDELINES:\n"
145
+ "1. Be aware that others can see your messages - communicate as if in a real group conversation\n"
146
+ "2. Acknowledge and build upon others' contributions when relevant\n"
147
+ "3. Stay on topic while adding your unique perspective based on your role and personality\n"
148
+ "4. Be concise but engaging - give others space to contribute\n"
149
+ "5. Maintain your character's personality while being collaborative\n"
150
+ "6. Feel free to ask questions to other participants to encourage discussion\n"
151
+ "7. If someone addresses you directly, acknowledge their message\n"
152
+ "8. Share relevant experiences or knowledge that adds value to the conversation\n\n"
153
+ "Remember: This is a natural group conversation. Interact as you would in a real group setting, "
154
+ "staying true to your character while fostering meaningful dialogue. "
155
+ "%%% END GROUP CHAT CONTEXT %%%"
145
156
  )
146
157
  agent_state.memory.update_block_value(label="persona", value=persona_block.value + group_chat_participant_persona)
147
158
  return Agent(
letta/schemas/group.py CHANGED
@@ -62,4 +62,10 @@ ManagerConfigUnion = Annotated[
62
62
  class GroupCreate(BaseModel):
63
63
  agent_ids: List[str] = Field(..., description="")
64
64
  description: str = Field(..., description="")
65
+ manager_config: ManagerConfigUnion = Field(RoundRobinManager(), description="")
66
+
67
+
68
+ class GroupUpdate(BaseModel):
69
+ agent_ids: Optional[List[str]] = Field(None, description="")
70
+ description: Optional[str] = Field(None, description="")
65
71
  manager_config: Optional[ManagerConfigUnion] = Field(None, description="")
letta/schemas/message.py CHANGED
@@ -226,6 +226,7 @@ class Message(BaseMessage):
226
226
  id=self.id,
227
227
  date=self.created_at,
228
228
  reasoning=self.content[0].text,
229
+ name=self.name,
229
230
  )
230
231
  )
231
232
  # Otherwise, we may have a list of multiple types
@@ -239,6 +240,7 @@ class Message(BaseMessage):
239
240
  id=self.id,
240
241
  date=self.created_at,
241
242
  reasoning=content_part.text,
243
+ name=self.name,
242
244
  )
243
245
  )
244
246
  elif isinstance(content_part, ReasoningContent):
@@ -250,6 +252,7 @@ class Message(BaseMessage):
250
252
  reasoning=content_part.reasoning,
251
253
  source="reasoner_model", # TODO do we want to tag like this?
252
254
  signature=content_part.signature,
255
+ name=self.name,
253
256
  )
254
257
  )
255
258
  elif isinstance(content_part, RedactedReasoningContent):
@@ -260,6 +263,7 @@ class Message(BaseMessage):
260
263
  date=self.created_at,
261
264
  state="redacted",
262
265
  hidden_reasoning=content_part.data,
266
+ name=self.name,
263
267
  )
264
268
  )
265
269
  else:
@@ -282,6 +286,7 @@ class Message(BaseMessage):
282
286
  id=self.id,
283
287
  date=self.created_at,
284
288
  content=message_string,
289
+ name=self.name,
285
290
  )
286
291
  )
287
292
  else:
@@ -294,6 +299,7 @@ class Message(BaseMessage):
294
299
  arguments=tool_call.function.arguments,
295
300
  tool_call_id=tool_call.id,
296
301
  ),
302
+ name=self.name,
297
303
  )
298
304
  )
299
305
  elif self.role == MessageRole.tool:
@@ -334,6 +340,7 @@ class Message(BaseMessage):
334
340
  tool_call_id=self.tool_call_id,
335
341
  stdout=self.tool_returns[0].stdout if self.tool_returns else None,
336
342
  stderr=self.tool_returns[0].stderr if self.tool_returns else None,
343
+ name=self.name,
337
344
  )
338
345
  )
339
346
  elif self.role == MessageRole.user:
@@ -349,6 +356,7 @@ class Message(BaseMessage):
349
356
  id=self.id,
350
357
  date=self.created_at,
351
358
  content=message_str or text_content,
359
+ name=self.name,
352
360
  )
353
361
  )
354
362
  elif self.role == MessageRole.system:
@@ -363,6 +371,7 @@ class Message(BaseMessage):
363
371
  id=self.id,
364
372
  date=self.created_at,
365
373
  content=text_content,
374
+ name=self.name,
366
375
  )
367
376
  )
368
377
  else:
@@ -379,6 +388,8 @@ class Message(BaseMessage):
379
388
  allow_functions_style: bool = False, # allow deprecated functions style?
380
389
  created_at: Optional[datetime] = None,
381
390
  id: Optional[str] = None,
391
+ name: Optional[str] = None,
392
+ group_id: Optional[str] = None,
382
393
  tool_returns: Optional[List[ToolReturn]] = None,
383
394
  ):
384
395
  """Convert a ChatCompletion message object into a Message object (synced to DB)"""
@@ -426,12 +437,13 @@ class Message(BaseMessage):
426
437
  # standard fields expected in an OpenAI ChatCompletion message object
427
438
  role=MessageRole.tool, # NOTE
428
439
  content=content,
429
- name=openai_message_dict["name"] if "name" in openai_message_dict else None,
440
+ name=name,
430
441
  tool_calls=openai_message_dict["tool_calls"] if "tool_calls" in openai_message_dict else None,
431
442
  tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
432
443
  created_at=created_at,
433
444
  id=str(id),
434
445
  tool_returns=tool_returns,
446
+ group_id=group_id,
435
447
  )
436
448
  else:
437
449
  return Message(
@@ -440,11 +452,12 @@ class Message(BaseMessage):
440
452
  # standard fields expected in an OpenAI ChatCompletion message object
441
453
  role=MessageRole.tool, # NOTE
442
454
  content=content,
443
- name=openai_message_dict["name"] if "name" in openai_message_dict else None,
455
+ name=name,
444
456
  tool_calls=openai_message_dict["tool_calls"] if "tool_calls" in openai_message_dict else None,
445
457
  tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
446
458
  created_at=created_at,
447
459
  tool_returns=tool_returns,
460
+ group_id=group_id,
448
461
  )
449
462
 
450
463
  elif "function_call" in openai_message_dict and openai_message_dict["function_call"] is not None:
@@ -473,12 +486,13 @@ class Message(BaseMessage):
473
486
  # standard fields expected in an OpenAI ChatCompletion message object
474
487
  role=MessageRole(openai_message_dict["role"]),
475
488
  content=content,
476
- name=openai_message_dict["name"] if "name" in openai_message_dict else None,
489
+ name=name,
477
490
  tool_calls=tool_calls,
478
491
  tool_call_id=None, # NOTE: None, since this field is only non-null for role=='tool'
479
492
  created_at=created_at,
480
493
  id=str(id),
481
494
  tool_returns=tool_returns,
495
+ group_id=group_id,
482
496
  )
483
497
  else:
484
498
  return Message(
@@ -492,6 +506,7 @@ class Message(BaseMessage):
492
506
  tool_call_id=None, # NOTE: None, since this field is only non-null for role=='tool'
493
507
  created_at=created_at,
494
508
  tool_returns=tool_returns,
509
+ group_id=group_id,
495
510
  )
496
511
 
497
512
  else:
@@ -520,12 +535,13 @@ class Message(BaseMessage):
520
535
  # standard fields expected in an OpenAI ChatCompletion message object
521
536
  role=MessageRole(openai_message_dict["role"]),
522
537
  content=content,
523
- name=openai_message_dict["name"] if "name" in openai_message_dict else None,
538
+ name=name,
524
539
  tool_calls=tool_calls,
525
540
  tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
526
541
  created_at=created_at,
527
542
  id=str(id),
528
543
  tool_returns=tool_returns,
544
+ group_id=group_id,
529
545
  )
530
546
  else:
531
547
  return Message(
@@ -534,11 +550,12 @@ class Message(BaseMessage):
534
550
  # standard fields expected in an OpenAI ChatCompletion message object
535
551
  role=MessageRole(openai_message_dict["role"]),
536
552
  content=content,
537
- name=openai_message_dict["name"] if "name" in openai_message_dict else None,
553
+ name=name,
538
554
  tool_calls=tool_calls,
539
555
  tool_call_id=openai_message_dict["tool_call_id"] if "tool_call_id" in openai_message_dict else None,
540
556
  created_at=created_at,
541
557
  tool_returns=tool_returns,
558
+ group_id=group_id,
542
559
  )
543
560
 
544
561
  def to_openai_dict_search_results(self, max_tool_id_length: int = TOOL_CALL_ID_MAX_LEN) -> dict:
@@ -579,9 +596,6 @@ class Message(BaseMessage):
579
596
  "content": text_content,
580
597
  "role": self.role,
581
598
  }
582
- # Optional field, do not include if null
583
- if self.name is not None:
584
- openai_message["name"] = self.name
585
599
 
586
600
  elif self.role == "user":
587
601
  assert all([v is not None for v in [text_content, self.role]]), vars(self)
@@ -589,9 +603,6 @@ class Message(BaseMessage):
589
603
  "content": text_content,
590
604
  "role": self.role,
591
605
  }
592
- # Optional field, do not include if null
593
- if self.name is not None:
594
- openai_message["name"] = self.name
595
606
 
596
607
  elif self.role == "assistant":
597
608
  assert self.tool_calls is not None or text_content is not None
@@ -599,9 +610,7 @@ class Message(BaseMessage):
599
610
  "content": None if put_inner_thoughts_in_kwargs else text_content,
600
611
  "role": self.role,
601
612
  }
602
- # Optional fields, do not include if null
603
- if self.name is not None:
604
- openai_message["name"] = self.name
613
+
605
614
  if self.tool_calls is not None:
606
615
  if put_inner_thoughts_in_kwargs:
607
616
  # put the inner thoughts inside the tool call before casting to a dict
@@ -1,6 +1,6 @@
1
1
  from typing import Any, Dict, List, Literal, Optional, Union
2
2
 
3
- from pydantic import BaseModel, Field
3
+ from pydantic import BaseModel, Field, field_validator
4
4
 
5
5
 
6
6
  class SystemMessage(BaseModel):
@@ -140,3 +140,8 @@ class ChatCompletionRequest(BaseModel):
140
140
  # deprecated scheme
141
141
  functions: Optional[List[FunctionSchema]] = None
142
142
  function_call: Optional[FunctionCallChoice] = None
143
+
144
+ @field_validator("messages", mode="before")
145
+ @classmethod
146
+ def cast_all_messages(cls, v):
147
+ return [cast_message_to_subtype(m) if isinstance(m, dict) else m for m in v]
@@ -1120,7 +1120,7 @@ class GoogleAIProvider(Provider):
1120
1120
  base_url: str = "https://generativelanguage.googleapis.com"
1121
1121
 
1122
1122
  def list_llm_models(self):
1123
- from letta.llm_api.google_ai import google_ai_get_model_list
1123
+ from letta.llm_api.google_ai_client import google_ai_get_model_list
1124
1124
 
1125
1125
  model_options = google_ai_get_model_list(base_url=self.base_url, api_key=self.api_key)
1126
1126
  # filter by 'generateContent' models
@@ -1149,7 +1149,7 @@ class GoogleAIProvider(Provider):
1149
1149
  return configs
1150
1150
 
1151
1151
  def list_embedding_models(self):
1152
- from letta.llm_api.google_ai import google_ai_get_model_list
1152
+ from letta.llm_api.google_ai_client import google_ai_get_model_list
1153
1153
 
1154
1154
  # TODO: use base_url instead
1155
1155
  model_options = google_ai_get_model_list(base_url=self.base_url, api_key=self.api_key)
@@ -1173,7 +1173,7 @@ class GoogleAIProvider(Provider):
1173
1173
  return configs
1174
1174
 
1175
1175
  def get_model_context_window(self, model_name: str) -> Optional[int]:
1176
- from letta.llm_api.google_ai import google_ai_get_model_context_window
1176
+ from letta.llm_api.google_ai_client import google_ai_get_model_context_window
1177
1177
 
1178
1178
  return google_ai_get_model_context_window(self.base_url, self.api_key, model_name)
1179
1179
 
@@ -27,11 +27,12 @@ class MarshmallowAgentSchema(BaseSchema):
27
27
  FIELD_VERSION = "version"
28
28
  FIELD_MESSAGES = "messages"
29
29
  FIELD_MESSAGE_IDS = "message_ids"
30
- FIELD_IN_CONTEXT = "in_context"
30
+ FIELD_IN_CONTEXT_INDICES = "in_context_message_indices"
31
31
  FIELD_ID = "id"
32
32
 
33
33
  llm_config = LLMConfigField()
34
34
  embedding_config = EmbeddingConfigField()
35
+
35
36
  tool_rules = ToolRulesField()
36
37
 
37
38
  messages = fields.List(fields.Nested(SerializedMessageSchema))
@@ -58,20 +59,39 @@ class MarshmallowAgentSchema(BaseSchema):
58
59
  """
59
60
  - Removes `message_ids`
60
61
  - Adds versioning
61
- - Marks messages as in-context
62
+ - Marks messages as in-context, preserving the order of the original `message_ids`
62
63
  - Removes individual message `id` fields
63
64
  """
64
65
  data = super().sanitize_ids(data, **kwargs)
65
66
  data[self.FIELD_VERSION] = letta.__version__
66
67
 
67
- message_ids = set(data.pop(self.FIELD_MESSAGE_IDS, [])) # Store and remove message_ids
68
+ original_message_ids = data.pop(self.FIELD_MESSAGE_IDS, [])
69
+ messages = data.get(self.FIELD_MESSAGES, [])
70
+
71
+ # Build a mapping from message id to its first occurrence index and remove the id in one pass
72
+ id_to_index = {}
73
+ for idx, message in enumerate(messages):
74
+ msg_id = message.pop(self.FIELD_ID, None)
75
+ if msg_id is not None and msg_id not in id_to_index:
76
+ id_to_index[msg_id] = idx
77
+
78
+ # Build in-context indices in the same order as the original message_ids
79
+ in_context_indices = [id_to_index[msg_id] for msg_id in original_message_ids if msg_id in id_to_index]
68
80
 
69
- for message in data.get(self.FIELD_MESSAGES, []):
70
- message[self.FIELD_IN_CONTEXT] = message[self.FIELD_ID] in message_ids # Mark messages as in-context
71
- message.pop(self.FIELD_ID, None) # Remove the id field
81
+ data[self.FIELD_IN_CONTEXT_INDICES] = in_context_indices
82
+ data[self.FIELD_MESSAGES] = messages
72
83
 
73
84
  return data
74
85
 
86
+ @post_dump
87
+ def hide_tool_exec_environment_variables(self, data: Dict, **kwargs):
88
+ """Hide the value of tool_exec_environment_variables"""
89
+
90
+ for env_var in data.get("tool_exec_environment_variables", []):
91
+ # need to be re-set at load time
92
+ env_var["value"] = ""
93
+ return data
94
+
75
95
  @pre_load
76
96
  def check_version(self, data, **kwargs):
77
97
  """Check version and remove it from the schema"""
@@ -87,13 +107,17 @@ class MarshmallowAgentSchema(BaseSchema):
87
107
  Restores `message_ids` by collecting message IDs where `in_context` is True,
88
108
  generates new IDs for all messages, and removes `in_context` from all messages.
89
109
  """
90
- message_ids = []
91
- for msg in data.get(self.FIELD_MESSAGES, []):
110
+ messages = data.get(self.FIELD_MESSAGES, [])
111
+ for msg in messages:
92
112
  msg[self.FIELD_ID] = SerializedMessageSchema.generate_id() # Generate new ID
93
- if msg.pop(self.FIELD_IN_CONTEXT, False): # If it was in-context, track its new ID
94
- message_ids.append(msg[self.FIELD_ID])
113
+
114
+ message_ids = []
115
+ in_context_message_indices = data.pop(self.FIELD_IN_CONTEXT_INDICES)
116
+ for idx in in_context_message_indices:
117
+ message_ids.append(messages[idx][self.FIELD_ID])
95
118
 
96
119
  data[self.FIELD_MESSAGE_IDS] = message_ids
120
+
97
121
  return data
98
122
 
99
123
  class Meta(BaseSchema.Meta):
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, List, Optional
1
+ from typing import Any, Dict, List, Optional, Union
2
2
 
3
3
  from pydantic import BaseModel, Field
4
4
 
@@ -22,7 +22,6 @@ class CoreMemoryBlockSchema(BaseModel):
22
22
  class MessageSchema(BaseModel):
23
23
  created_at: str
24
24
  group_id: Optional[str]
25
- in_context: bool
26
25
  model: Optional[str]
27
26
  name: Optional[str]
28
27
  role: str
@@ -45,11 +44,31 @@ class ToolEnvVarSchema(BaseModel):
45
44
  value: str
46
45
 
47
46
 
48
- class ToolRuleSchema(BaseModel):
47
+ # Tool rules
48
+
49
+
50
+ class BaseToolRuleSchema(BaseModel):
49
51
  tool_name: str
50
52
  type: str
51
53
 
52
54
 
55
+ class ChildToolRuleSchema(BaseToolRuleSchema):
56
+ children: List[str]
57
+
58
+
59
+ class MaxCountPerStepToolRuleSchema(BaseToolRuleSchema):
60
+ max_count_limit: int
61
+
62
+
63
+ class ConditionalToolRuleSchema(BaseToolRuleSchema):
64
+ default_child: Optional[str]
65
+ child_output_mapping: Dict[Any, str]
66
+ require_output_mapping: bool
67
+
68
+
69
+ ToolRuleSchema = Union[BaseToolRuleSchema, ChildToolRuleSchema, MaxCountPerStepToolRuleSchema, ConditionalToolRuleSchema]
70
+
71
+
53
72
  class ParameterProperties(BaseModel):
54
73
  type: str
55
74
  description: Optional[str] = None
@@ -92,6 +111,7 @@ class AgentSchema(BaseModel):
92
111
  embedding_config: EmbeddingConfig
93
112
  llm_config: LLMConfig
94
113
  message_buffer_autoclear: bool
114
+ in_context_message_indices: List[int]
95
115
  messages: List[MessageSchema]
96
116
  metadata_: Optional[Dict] = None
97
117
  multi_agent_group: Optional[Any]
@@ -1,3 +1,5 @@
1
+ import asyncio
2
+ import concurrent.futures
1
3
  import json
2
4
  import logging
3
5
  import os
@@ -135,6 +137,13 @@ def create_application() -> "FastAPI":
135
137
  debug=debug_mode, # if True, the stack trace will be printed in the response
136
138
  )
137
139
 
140
+ @app.on_event("startup")
141
+ async def configure_executor():
142
+ print(f"Configured event loop executor with {settings.event_loop_threadpool_max_workers} workers.")
143
+ loop = asyncio.get_running_loop()
144
+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=settings.event_loop_threadpool_max_workers)
145
+ loop.set_default_executor(executor)
146
+
138
147
  @app.on_event("shutdown")
139
148
  def shutdown_mcp_clients():
140
149
  global server