letta-nightly 0.6.43.dev20250324104208__py3-none-any.whl → 0.6.44.dev20250325104221__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (42) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +106 -104
  3. letta/agents/voice_agent.py +1 -1
  4. letta/client/streaming.py +3 -1
  5. letta/functions/function_sets/base.py +2 -1
  6. letta/functions/function_sets/multi_agent.py +51 -40
  7. letta/functions/helpers.py +26 -22
  8. letta/helpers/message_helper.py +41 -0
  9. letta/llm_api/anthropic.py +150 -44
  10. letta/llm_api/aws_bedrock.py +5 -3
  11. letta/llm_api/azure_openai.py +0 -1
  12. letta/llm_api/llm_api_tools.py +4 -0
  13. letta/orm/organization.py +1 -0
  14. letta/orm/sqlalchemy_base.py +2 -4
  15. letta/schemas/agent.py +8 -0
  16. letta/schemas/letta_message.py +8 -4
  17. letta/schemas/llm_config.py +6 -0
  18. letta/schemas/message.py +143 -24
  19. letta/schemas/openai/chat_completion_response.py +5 -0
  20. letta/schemas/organization.py +7 -0
  21. letta/schemas/providers.py +17 -0
  22. letta/schemas/tool.py +5 -1
  23. letta/schemas/usage.py +5 -1
  24. letta/serialize_schemas/pydantic_agent_schema.py +1 -1
  25. letta/server/rest_api/interface.py +44 -7
  26. letta/server/rest_api/routers/v1/agents.py +13 -2
  27. letta/server/rest_api/routers/v1/organizations.py +19 -1
  28. letta/server/rest_api/utils.py +1 -1
  29. letta/server/server.py +49 -70
  30. letta/services/agent_manager.py +6 -2
  31. letta/services/helpers/agent_manager_helper.py +24 -38
  32. letta/services/message_manager.py +7 -6
  33. letta/services/organization_manager.py +13 -0
  34. letta/services/tool_execution_sandbox.py +5 -1
  35. letta/services/tool_executor/__init__.py +0 -0
  36. letta/services/tool_executor/tool_execution_manager.py +74 -0
  37. letta/services/tool_executor/tool_executor.py +380 -0
  38. {letta_nightly-0.6.43.dev20250324104208.dist-info → letta_nightly-0.6.44.dev20250325104221.dist-info}/METADATA +2 -3
  39. {letta_nightly-0.6.43.dev20250324104208.dist-info → letta_nightly-0.6.44.dev20250325104221.dist-info}/RECORD +42 -38
  40. {letta_nightly-0.6.43.dev20250324104208.dist-info → letta_nightly-0.6.44.dev20250325104221.dist-info}/LICENSE +0 -0
  41. {letta_nightly-0.6.43.dev20250324104208.dist-info → letta_nightly-0.6.44.dev20250325104221.dist-info}/WHEEL +0 -0
  42. {letta_nightly-0.6.43.dev20250324104208.dist-info → letta_nightly-0.6.44.dev20250325104221.dist-info}/entry_points.txt +0 -0
letta/server/server.py CHANGED
@@ -26,6 +26,7 @@ from letta.functions.mcp_client.stdio_client import StdioMCPClient
26
26
  from letta.functions.mcp_client.types import MCPServerType, MCPTool, SSEServerConfig, StdioServerConfig
27
27
  from letta.helpers.datetime_helpers import get_utc_time
28
28
  from letta.helpers.json_helpers import json_dumps, json_loads
29
+ from letta.helpers.message_helper import prepare_input_message_create
29
30
 
30
31
  # TODO use custom interface
31
32
  from letta.interface import AgentInterface # abstract
@@ -48,7 +49,7 @@ from letta.schemas.letta_message_content import TextContent
48
49
  from letta.schemas.letta_response import LettaResponse
49
50
  from letta.schemas.llm_config import LLMConfig
50
51
  from letta.schemas.memory import ArchivalMemorySummary, ContextWindowOverview, Memory, RecallMemorySummary
51
- from letta.schemas.message import Message, MessageCreate, MessageRole, MessageUpdate
52
+ from letta.schemas.message import Message, MessageCreate, MessageUpdate
52
53
  from letta.schemas.organization import Organization
53
54
  from letta.schemas.passage import Passage, PassageUpdate
54
55
  from letta.schemas.providers import (
@@ -85,7 +86,6 @@ from letta.services.job_manager import JobManager
85
86
  from letta.services.message_manager import MessageManager
86
87
  from letta.services.organization_manager import OrganizationManager
87
88
  from letta.services.passage_manager import PassageManager
88
- from letta.services.per_agent_lock_manager import PerAgentLockManager
89
89
  from letta.services.provider_manager import ProviderManager
90
90
  from letta.services.sandbox_config_manager import SandboxConfigManager
91
91
  from letta.services.source_manager import SourceManager
@@ -210,9 +210,6 @@ class SyncServer(Server):
210
210
  self.identity_manager = IdentityManager()
211
211
  self.group_manager = GroupManager()
212
212
 
213
- # Managers that interface with parallelism
214
- self.per_agent_lock_manager = PerAgentLockManager()
215
-
216
213
  # Make default user and org
217
214
  if init_with_default_org_and_user:
218
215
  self.default_org = self.organization_manager.create_default_organization()
@@ -353,21 +350,19 @@ class SyncServer(Server):
353
350
 
354
351
  def load_agent(self, agent_id: str, actor: User, interface: Union[AgentInterface, None] = None) -> Agent:
355
352
  """Updated method to load agents from persisted storage"""
356
- agent_lock = self.per_agent_lock_manager.get_lock(agent_id)
357
- with agent_lock:
358
- agent_state = self.agent_manager.get_agent_by_id(agent_id=agent_id, actor=actor)
359
- if agent_state.multi_agent_group:
360
- return self.load_multi_agent(agent_state.multi_agent_group, actor, interface, agent_state)
361
-
362
- interface = interface or self.default_interface_factory()
363
- if agent_state.agent_type == AgentType.memgpt_agent:
364
- agent = Agent(agent_state=agent_state, interface=interface, user=actor, mcp_clients=self.mcp_clients)
365
- elif agent_state.agent_type == AgentType.offline_memory_agent:
366
- agent = OfflineMemoryAgent(agent_state=agent_state, interface=interface, user=actor)
367
- else:
368
- raise ValueError(f"Invalid agent type {agent_state.agent_type}")
353
+ agent_state = self.agent_manager.get_agent_by_id(agent_id=agent_id, actor=actor)
354
+ if agent_state.multi_agent_group:
355
+ return self.load_multi_agent(agent_state.multi_agent_group, actor, interface, agent_state)
356
+
357
+ interface = interface or self.default_interface_factory()
358
+ if agent_state.agent_type == AgentType.memgpt_agent:
359
+ agent = Agent(agent_state=agent_state, interface=interface, user=actor, mcp_clients=self.mcp_clients)
360
+ elif agent_state.agent_type == AgentType.offline_memory_agent:
361
+ agent = OfflineMemoryAgent(agent_state=agent_state, interface=interface, user=actor)
362
+ else:
363
+ raise ValueError(f"Invalid agent type {agent_state.agent_type}")
369
364
 
370
- return agent
365
+ return agent
371
366
 
372
367
  def load_multi_agent(
373
368
  self, group: Group, actor: User, interface: Union[AgentInterface, None] = None, agent_state: Optional[AgentState] = None
@@ -702,63 +697,22 @@ class SyncServer(Server):
702
697
  actor: User,
703
698
  agent_id: str,
704
699
  messages: Union[List[MessageCreate], List[Message]],
705
- # whether or not to wrap user and system message as MemGPT-style stringified JSON
706
700
  wrap_user_message: bool = True,
707
701
  wrap_system_message: bool = True,
708
- interface: Union[AgentInterface, ChatCompletionsStreamingInterface, None] = None, # needed to getting responses
702
+ interface: Union[AgentInterface, ChatCompletionsStreamingInterface, None] = None, # needed for responses
709
703
  metadata: Optional[dict] = None, # Pass through metadata to interface
710
704
  put_inner_thoughts_first: bool = True,
711
705
  ) -> LettaUsageStatistics:
712
- """Send a list of messages to the agent
706
+ """Send a list of messages to the agent.
713
707
 
714
- If the messages are of type MessageCreate, we need to turn them into
715
- Message objects first before sending them through step.
716
-
717
- Otherwise, we can pass them in directly.
708
+ If messages are of type MessageCreate, convert them to Message objects before sending.
718
709
  """
719
- message_objects: List[Message] = []
720
-
721
710
  if all(isinstance(m, MessageCreate) for m in messages):
722
- for message in messages:
723
- assert isinstance(message, MessageCreate)
724
-
725
- # If wrapping is enabled, wrap with metadata before placing content inside the Message object
726
- if isinstance(message.content, str):
727
- message_content = message.content
728
- elif message.content and len(message.content) > 0 and isinstance(message.content[0], TextContent):
729
- message_content = message.content[0].text
730
- else:
731
- assert message_content is not None, "Message content is empty"
732
-
733
- if message.role == MessageRole.user and wrap_user_message:
734
- message_content = system.package_user_message(user_message=message_content)
735
- elif message.role == MessageRole.system and wrap_system_message:
736
- message_content = system.package_system_message(system_message=message_content)
737
- else:
738
- raise ValueError(f"Invalid message role: {message.role}")
739
-
740
- # Create the Message object
741
- message_objects.append(
742
- Message(
743
- agent_id=agent_id,
744
- role=message.role,
745
- content=[TextContent(text=message_content)] if message_content else [],
746
- name=message.name,
747
- # assigned later?
748
- model=None,
749
- # irrelevant
750
- tool_calls=None,
751
- tool_call_id=None,
752
- )
753
- )
754
-
711
+ message_objects = [prepare_input_message_create(m, agent_id, wrap_user_message, wrap_system_message) for m in messages]
755
712
  elif all(isinstance(m, Message) for m in messages):
756
- for message in messages:
757
- assert isinstance(message, Message)
758
- message_objects.append(message)
759
-
713
+ message_objects = messages
760
714
  else:
761
- raise ValueError(f"All messages must be of type Message or MessageCreate, got {[type(message) for message in messages]}")
715
+ raise ValueError(f"All messages must be of type Message or MessageCreate, got {[type(m) for m in messages]}")
762
716
 
763
717
  # Store metadata in interface if provided
764
718
  if metadata and hasattr(interface, "metadata"):
@@ -792,7 +746,13 @@ class SyncServer(Server):
792
746
  if request.llm_config is None:
793
747
  if request.model is None:
794
748
  raise ValueError("Must specify either model or llm_config in request")
795
- request.llm_config = self.get_llm_config_from_handle(handle=request.model, context_window_limit=request.context_window_limit)
749
+ request.llm_config = self.get_llm_config_from_handle(
750
+ handle=request.model,
751
+ context_window_limit=request.context_window_limit,
752
+ max_tokens=request.max_tokens,
753
+ max_reasoning_tokens=request.max_reasoning_tokens,
754
+ enable_reasoner=request.enable_reasoner,
755
+ )
796
756
 
797
757
  if request.embedding_config is None:
798
758
  if request.embedding is None:
@@ -830,6 +790,8 @@ class SyncServer(Server):
830
790
  limit: Optional[int] = 100,
831
791
  order_by: Optional[str] = "created_at",
832
792
  reverse: Optional[bool] = False,
793
+ query_text: Optional[str] = None,
794
+ ascending: Optional[bool] = True,
833
795
  ) -> List[Passage]:
834
796
  # TODO: Thread actor directly through this function, since the top level caller most likely already retrieved the user
835
797
  actor = self.user_manager.get_user_or_default(user_id=user_id)
@@ -839,9 +801,10 @@ class SyncServer(Server):
839
801
  actor=actor,
840
802
  agent_id=agent_id,
841
803
  after=after,
804
+ query_text=query_text,
842
805
  before=before,
806
+ ascending=ascending,
843
807
  limit=limit,
844
- ascending=not reverse,
845
808
  )
846
809
  return records
847
810
 
@@ -1099,7 +1062,14 @@ class SyncServer(Server):
1099
1062
  # Merge the two dictionaries, keeping the values from providers_from_db where conflicts occur
1100
1063
  return {**providers_from_env, **providers_from_db}.values()
1101
1064
 
1102
- def get_llm_config_from_handle(self, handle: str, context_window_limit: Optional[int] = None) -> LLMConfig:
1065
+ def get_llm_config_from_handle(
1066
+ self,
1067
+ handle: str,
1068
+ context_window_limit: Optional[int] = None,
1069
+ max_tokens: Optional[int] = None,
1070
+ max_reasoning_tokens: Optional[int] = None,
1071
+ enable_reasoner: Optional[bool] = None,
1072
+ ) -> LLMConfig:
1103
1073
  try:
1104
1074
  provider_name, model_name = handle.split("/", 1)
1105
1075
  provider = self.get_provider_from_name(provider_name)
@@ -1121,13 +1091,22 @@ class SyncServer(Server):
1121
1091
  else:
1122
1092
  llm_config = llm_configs[0]
1123
1093
 
1124
- if context_window_limit:
1094
+ if context_window_limit is not None:
1125
1095
  if context_window_limit > llm_config.context_window:
1126
1096
  raise ValueError(f"Context window limit ({context_window_limit}) is greater than maximum of ({llm_config.context_window})")
1127
1097
  llm_config.context_window = context_window_limit
1128
1098
  else:
1129
1099
  llm_config.context_window = min(llm_config.context_window, model_settings.global_max_context_window_limit)
1130
1100
 
1101
+ if max_tokens is not None:
1102
+ llm_config.max_tokens = max_tokens
1103
+ if max_reasoning_tokens is not None:
1104
+ if not max_tokens or max_reasoning_tokens > max_tokens:
1105
+ raise ValueError(f"Max reasoning tokens ({max_reasoning_tokens}) must be less than max tokens ({max_tokens})")
1106
+ llm_config.max_reasoning_tokens = max_reasoning_tokens
1107
+ if enable_reasoner is not None:
1108
+ llm_config.enable_reasoner = enable_reasoner
1109
+
1131
1110
  return llm_config
1132
1111
 
1133
1112
  def get_embedding_config_from_handle(
@@ -349,6 +349,7 @@ class AgentManager:
349
349
  identity_id: Optional[str] = None,
350
350
  identifier_keys: Optional[List[str]] = None,
351
351
  include_relationships: Optional[List[str]] = None,
352
+ ascending: bool = True,
352
353
  ) -> List[PydanticAgentState]:
353
354
  """
354
355
  Retrieves agents with optimized filtering and optional field selection.
@@ -368,6 +369,7 @@ class AgentManager:
368
369
  identity_id (Optional[str]): Filter by identifier ID.
369
370
  identifier_keys (Optional[List[str]]): Search agents by identifier keys.
370
371
  include_relationships (Optional[List[str]]): List of fields to load for performance optimization.
372
+ ascending
371
373
 
372
374
  Returns:
373
375
  List[PydanticAgentState]: The filtered list of matching agents.
@@ -380,7 +382,7 @@ class AgentManager:
380
382
  query = _apply_filters(query, name, query_text, project_id, template_id, base_template_id)
381
383
  query = _apply_identity_filters(query, identity_id, identifier_keys)
382
384
  query = _apply_tag_filter(query, tags, match_all_tags)
383
- query = _apply_pagination(query, before, after, session)
385
+ query = _apply_pagination(query, before, after, session, ascending=ascending)
384
386
 
385
387
  query = query.limit(limit)
386
388
 
@@ -639,7 +641,7 @@ class AgentManager:
639
641
 
640
642
  diff = united_diff(curr_system_message_openai["content"], new_system_message_str)
641
643
  if len(diff) > 0: # there was a diff
642
- logger.info(f"Rebuilding system with new memory...\nDiff:\n{diff}")
644
+ logger.debug(f"Rebuilding system with new memory...\nDiff:\n{diff}")
643
645
 
644
646
  # Swap the system message out (only if there is a diff)
645
647
  message = PydanticMessage.dict_to_message(
@@ -742,6 +744,8 @@ class AgentManager:
742
744
  Update internal memory object and system prompt if there have been modifications.
743
745
 
744
746
  Args:
747
+ actor:
748
+ agent_id:
745
749
  new_memory (Memory): the new memory object to compare to the current memory object
746
750
 
747
751
  Returns:
@@ -1,7 +1,7 @@
1
1
  import datetime
2
2
  from typing import List, Literal, Optional
3
3
 
4
- from sqlalchemy import and_, func, literal, or_, select
4
+ from sqlalchemy import and_, asc, desc, func, literal, or_, select
5
5
 
6
6
  from letta import system
7
7
  from letta.constants import IN_CONTEXT_MEMORY_KEYWORD, STRUCTURED_OUTPUT_MODELS
@@ -299,55 +299,41 @@ def check_supports_structured_output(model: str, tool_rules: List[ToolRule]) ->
299
299
  return True
300
300
 
301
301
 
302
- def _apply_pagination(query, before: Optional[str], after: Optional[str], session) -> any:
302
+ def _cursor_filter(created_at_col, id_col, ref_created_at, ref_id, forward: bool):
303
303
  """
304
- Apply cursor-based pagination filters using the agent's created_at timestamp with id as a tie-breaker.
304
+ Returns a SQLAlchemy filter expression for cursor-based pagination.
305
305
 
306
- Instead of relying on the UUID ordering, this function uses the agent's creation time
307
- (and id for tie-breaking) to paginate the results. It performs a minimal lookup to fetch
308
- only the created_at and id for the agent corresponding to the provided cursor.
306
+ If `forward` is True, returns records after the reference.
307
+ If `forward` is False, returns records before the reference.
308
+ """
309
+ if forward:
310
+ return or_(
311
+ created_at_col > ref_created_at,
312
+ and_(created_at_col == ref_created_at, id_col > ref_id),
313
+ )
314
+ else:
315
+ return or_(
316
+ created_at_col < ref_created_at,
317
+ and_(created_at_col == ref_created_at, id_col < ref_id),
318
+ )
309
319
 
310
- Args:
311
- query: The SQLAlchemy query object to modify.
312
- before (Optional[str]): Cursor (agent id) to return agents created before this agent.
313
- after (Optional[str]): Cursor (agent id) to return agents created after this agent.
314
- session: The active database session used to execute the minimal lookup.
315
320
 
316
- Returns:
317
- The modified query with pagination filters applied and ordered by created_at and id.
318
- """
321
+ def _apply_pagination(query, before: Optional[str], after: Optional[str], session, ascending: bool = True) -> any:
319
322
  if after:
320
- # Retrieve only the created_at and id for the agent corresponding to the 'after' cursor.
321
323
  result = session.execute(select(AgentModel.created_at, AgentModel.id).where(AgentModel.id == after)).first()
322
324
  if result:
323
325
  after_created_at, after_id = result
324
- # Filter: include agents created after the reference, or at the same time but with a greater id.
325
- query = query.where(
326
- or_(
327
- AgentModel.created_at > after_created_at,
328
- and_(
329
- AgentModel.created_at == after_created_at,
330
- AgentModel.id > after_id,
331
- ),
332
- )
333
- )
326
+ query = query.where(_cursor_filter(AgentModel.created_at, AgentModel.id, after_created_at, after_id, forward=ascending))
327
+
334
328
  if before:
335
- # Retrieve only the created_at and id for the agent corresponding to the 'before' cursor.
336
329
  result = session.execute(select(AgentModel.created_at, AgentModel.id).where(AgentModel.id == before)).first()
337
330
  if result:
338
331
  before_created_at, before_id = result
339
- # Filter: include agents created before the reference, or at the same time but with a smaller id.
340
- query = query.where(
341
- or_(
342
- AgentModel.created_at < before_created_at,
343
- and_(
344
- AgentModel.created_at == before_created_at,
345
- AgentModel.id < before_id,
346
- ),
347
- )
348
- )
349
- # Enforce a deterministic ordering: first by created_at, then by id.
350
- query = query.order_by(AgentModel.created_at.asc(), AgentModel.id.asc())
332
+ query = query.where(_cursor_filter(AgentModel.created_at, AgentModel.id, before_created_at, before_id, forward=not ascending))
333
+
334
+ # Apply ordering
335
+ order_fn = asc if ascending else desc
336
+ query = query.order_by(order_fn(AgentModel.created_at), order_fn(AgentModel.id))
351
337
  return query
352
338
 
353
339
 
@@ -1,5 +1,5 @@
1
1
  import json
2
- from typing import List, Optional
2
+ from typing import List, Optional, Sequence
3
3
 
4
4
  from sqlalchemy import and_, exists, func, or_, select, text
5
5
 
@@ -248,7 +248,7 @@ class MessageManager:
248
248
  after=after,
249
249
  before=before,
250
250
  query_text=query_text,
251
- role=MessageRole.user,
251
+ roles=[MessageRole.user],
252
252
  limit=limit,
253
253
  ascending=ascending,
254
254
  )
@@ -261,7 +261,7 @@ class MessageManager:
261
261
  after: Optional[str] = None,
262
262
  before: Optional[str] = None,
263
263
  query_text: Optional[str] = None,
264
- role: Optional[MessageRole] = None, # New parameter for filtering by role
264
+ roles: Optional[Sequence[MessageRole]] = None,
265
265
  limit: Optional[int] = 50,
266
266
  ascending: bool = True,
267
267
  ) -> List[PydanticMessage]:
@@ -279,7 +279,7 @@ class MessageManager:
279
279
  after: A message ID; if provided, only messages *after* this message (per sort order) are returned.
280
280
  before: A message ID; if provided, only messages *before* this message are returned.
281
281
  query_text: Optional string to partially match the message text content.
282
- role: Optional MessageRole to filter messages by role.
282
+ roles: Optional MessageRole to filter messages by role.
283
283
  limit: Maximum number of messages to return.
284
284
  ascending: If True, sort by (created_at, id) ascending; if False, sort descending.
285
285
 
@@ -309,8 +309,9 @@ class MessageManager:
309
309
  )
310
310
 
311
311
  # If role is provided, filter messages by role.
312
- if role:
313
- query = query.filter(MessageModel.role == role.value) # Enum.value ensures comparison is against the string value
312
+ if roles:
313
+ role_values = [r.value for r in roles]
314
+ query = query.filter(MessageModel.role.in_(role_values))
314
315
 
315
316
  # Apply 'after' pagination if specified.
316
317
  if after:
@@ -3,6 +3,7 @@ from typing import List, Optional
3
3
  from letta.orm.errors import NoResultFound
4
4
  from letta.orm.organization import Organization as OrganizationModel
5
5
  from letta.schemas.organization import Organization as PydanticOrganization
6
+ from letta.schemas.organization import OrganizationUpdate
6
7
  from letta.utils import enforce_types
7
8
 
8
9
 
@@ -63,6 +64,18 @@ class OrganizationManager:
63
64
  org.update(session)
64
65
  return org.to_pydantic()
65
66
 
67
+ @enforce_types
68
+ def update_organization(self, org_id: str, org_update: OrganizationUpdate) -> PydanticOrganization:
69
+ """Update an organization."""
70
+ with self.session_maker() as session:
71
+ org = OrganizationModel.read(db_session=session, identifier=org_id)
72
+ if org_update.name:
73
+ org.name = org_update.name
74
+ if org_update.privileged_tools:
75
+ org.privileged_tools = org_update.privileged_tools
76
+ org.update(session)
77
+ return org.to_pydantic()
78
+
66
79
  @enforce_types
67
80
  def delete_organization_by_id(self, org_id: str):
68
81
  """Delete an organization by marking it as deleted."""
@@ -23,6 +23,7 @@ from letta.services.helpers.tool_execution_helper import (
23
23
  find_python_executable,
24
24
  install_pip_requirements_for_sandbox,
25
25
  )
26
+ from letta.services.organization_manager import OrganizationManager
26
27
  from letta.services.sandbox_config_manager import SandboxConfigManager
27
28
  from letta.services.tool_manager import ToolManager
28
29
  from letta.settings import tool_settings
@@ -50,6 +51,9 @@ class ToolExecutionSandbox:
50
51
  self.tool_name = tool_name
51
52
  self.args = args
52
53
  self.user = user
54
+ # get organization
55
+ self.organization = OrganizationManager().get_organization_by_id(self.user.organization_id)
56
+ self.privileged_tools = self.organization.privileged_tools
53
57
 
54
58
  # If a tool object is provided, we use it directly, otherwise pull via name
55
59
  if tool_object is not None:
@@ -79,7 +83,7 @@ class ToolExecutionSandbox:
79
83
  Returns:
80
84
  Tuple[Any, Optional[AgentState]]: Tuple containing (tool_result, agent_state)
81
85
  """
82
- if tool_settings.e2b_api_key:
86
+ if tool_settings.e2b_api_key and not self.privileged_tools:
83
87
  logger.debug(f"Using e2b sandbox to execute {self.tool_name}")
84
88
  result = self.run_e2b_sandbox(agent_state=agent_state, additional_env_vars=additional_env_vars)
85
89
  else:
File without changes
@@ -0,0 +1,74 @@
1
+ from typing import Any, Dict, Optional, Tuple, Type
2
+
3
+ from letta.log import get_logger
4
+ from letta.orm.enums import ToolType
5
+ from letta.schemas.agent import AgentState
6
+ from letta.schemas.sandbox_config import SandboxRunResult
7
+ from letta.schemas.tool import Tool
8
+ from letta.schemas.user import User
9
+ from letta.services.tool_executor.tool_executor import (
10
+ ExternalComposioToolExecutor,
11
+ ExternalMCPToolExecutor,
12
+ LettaCoreToolExecutor,
13
+ LettaMemoryToolExecutor,
14
+ LettaMultiAgentToolExecutor,
15
+ SandboxToolExecutor,
16
+ ToolExecutor,
17
+ )
18
+ from letta.utils import get_friendly_error_msg
19
+
20
+
21
+ class ToolExecutorFactory:
22
+ """Factory for creating appropriate tool executors based on tool type."""
23
+
24
+ _executor_map: Dict[ToolType, Type[ToolExecutor]] = {
25
+ ToolType.LETTA_CORE: LettaCoreToolExecutor,
26
+ ToolType.LETTA_MULTI_AGENT_CORE: LettaMultiAgentToolExecutor,
27
+ ToolType.LETTA_MEMORY_CORE: LettaMemoryToolExecutor,
28
+ ToolType.EXTERNAL_COMPOSIO: ExternalComposioToolExecutor,
29
+ ToolType.EXTERNAL_MCP: ExternalMCPToolExecutor,
30
+ }
31
+
32
+ @classmethod
33
+ def get_executor(cls, tool_type: ToolType) -> ToolExecutor:
34
+ """Get the appropriate executor for the given tool type."""
35
+ executor_class = cls._executor_map.get(tool_type)
36
+
37
+ if executor_class:
38
+ return executor_class()
39
+
40
+ # Default to sandbox executor for unknown types
41
+ return SandboxToolExecutor()
42
+
43
+
44
+ class ToolExecutionManager:
45
+ """Manager class for tool execution operations."""
46
+
47
+ def __init__(self, agent_state: AgentState, actor: User):
48
+ self.agent_state = agent_state
49
+ self.logger = get_logger(__name__)
50
+ self.actor = actor
51
+
52
+ def execute_tool(self, function_name: str, function_args: dict, tool: Tool) -> Tuple[Any, Optional[SandboxRunResult]]:
53
+ """
54
+ Execute a tool and persist any state changes.
55
+
56
+ Args:
57
+ function_name: Name of the function to execute
58
+ function_args: Arguments to pass to the function
59
+ tool: Tool object containing metadata about the tool
60
+
61
+ Returns:
62
+ Tuple containing the function response and sandbox run result (if applicable)
63
+ """
64
+ try:
65
+ # Get the appropriate executor for this tool type
66
+ executor = ToolExecutorFactory.get_executor(tool.tool_type)
67
+
68
+ # Execute the tool
69
+ return executor.execute(function_name, function_args, self.agent_state, tool, self.actor)
70
+
71
+ except Exception as e:
72
+ self.logger.error(f"Error executing tool {function_name}: {str(e)}")
73
+ error_message = get_friendly_error_msg(function_name=function_name, exception_name=type(e).__name__, exception_message=str(e))
74
+ return error_message, SandboxRunResult(status="error")