letta-nightly 0.7.0.dev20250423003112__py3-none-any.whl → 0.7.1.dev20250423104245__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +1 -1
- letta/agent.py +113 -81
- letta/agents/letta_agent.py +2 -2
- letta/agents/letta_agent_batch.py +38 -34
- letta/client/client.py +10 -2
- letta/constants.py +4 -3
- letta/functions/function_sets/multi_agent.py +1 -3
- letta/functions/helpers.py +3 -3
- letta/groups/dynamic_multi_agent.py +58 -59
- letta/groups/round_robin_multi_agent.py +43 -49
- letta/groups/sleeptime_multi_agent.py +28 -18
- letta/groups/supervisor_multi_agent.py +21 -20
- letta/helpers/converters.py +29 -0
- letta/helpers/message_helper.py +1 -0
- letta/helpers/tool_execution_helper.py +3 -3
- letta/orm/agent.py +8 -1
- letta/orm/custom_columns.py +15 -0
- letta/schemas/agent.py +6 -0
- letta/schemas/message.py +1 -0
- letta/schemas/response_format.py +78 -0
- letta/schemas/tool_execution_result.py +14 -0
- letta/server/rest_api/interface.py +2 -1
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +1 -1
- letta/server/rest_api/routers/v1/agents.py +4 -4
- letta/server/rest_api/routers/v1/groups.py +2 -2
- letta/server/rest_api/routers/v1/messages.py +32 -18
- letta/server/server.py +24 -57
- letta/services/agent_manager.py +1 -0
- letta/services/llm_batch_manager.py +28 -26
- letta/services/tool_executor/tool_execution_manager.py +37 -28
- letta/services/tool_executor/tool_execution_sandbox.py +35 -16
- letta/services/tool_executor/tool_executor.py +299 -68
- letta/services/tool_sandbox/base.py +3 -2
- letta/services/tool_sandbox/e2b_sandbox.py +5 -4
- letta/services/tool_sandbox/local_sandbox.py +11 -6
- {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.1.dev20250423104245.dist-info}/METADATA +1 -1
- {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.1.dev20250423104245.dist-info}/RECORD +40 -38
- {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.1.dev20250423104245.dist-info}/LICENSE +0 -0
- {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.1.dev20250423104245.dist-info}/WHEEL +0 -0
- {letta_nightly-0.7.0.dev20250423003112.dist-info → letta_nightly-0.7.1.dev20250423104245.dist-info}/entry_points.txt +0 -0
letta/server/server.py
CHANGED
@@ -28,7 +28,6 @@ from letta.functions.mcp_client.types import MCPServerType, MCPTool, SSEServerCo
|
|
28
28
|
from letta.groups.helpers import load_multi_agent
|
29
29
|
from letta.helpers.datetime_helpers import get_utc_time
|
30
30
|
from letta.helpers.json_helpers import json_dumps, json_loads
|
31
|
-
from letta.helpers.message_helper import prepare_input_message_create
|
32
31
|
|
33
32
|
# TODO use custom interface
|
34
33
|
from letta.interface import AgentInterface # abstract
|
@@ -148,7 +147,7 @@ class Server(object):
|
|
148
147
|
raise NotImplementedError
|
149
148
|
|
150
149
|
@abstractmethod
|
151
|
-
def send_messages(self, user_id: str, agent_id: str,
|
150
|
+
def send_messages(self, user_id: str, agent_id: str, input_messages: List[MessageCreate]) -> None:
|
152
151
|
"""Send a list of messages to the agent"""
|
153
152
|
raise NotImplementedError
|
154
153
|
|
@@ -372,19 +371,13 @@ class SyncServer(Server):
|
|
372
371
|
self,
|
373
372
|
actor: User,
|
374
373
|
agent_id: str,
|
375
|
-
input_messages:
|
374
|
+
input_messages: List[MessageCreate],
|
376
375
|
interface: Union[AgentInterface, None] = None, # needed to getting responses
|
377
376
|
put_inner_thoughts_first: bool = True,
|
378
377
|
# timestamp: Optional[datetime],
|
379
378
|
) -> LettaUsageStatistics:
|
380
379
|
"""Send the input message through the agent"""
|
381
380
|
# TODO: Thread actor directly through this function, since the top level caller most likely already retrieved the user
|
382
|
-
# Input validation
|
383
|
-
if isinstance(input_messages, Message):
|
384
|
-
input_messages = [input_messages]
|
385
|
-
if not all(isinstance(m, Message) for m in input_messages):
|
386
|
-
raise ValueError(f"messages should be a Message or a list of Message, got {type(input_messages)}")
|
387
|
-
|
388
381
|
logger.debug(f"Got input messages: {input_messages}")
|
389
382
|
letta_agent = None
|
390
383
|
try:
|
@@ -400,8 +393,9 @@ class SyncServer(Server):
|
|
400
393
|
metadata = interface.metadata if hasattr(interface, "metadata") else None
|
401
394
|
else:
|
402
395
|
metadata = None
|
396
|
+
|
403
397
|
usage_stats = letta_agent.step(
|
404
|
-
|
398
|
+
input_messages=input_messages,
|
405
399
|
chaining=self.chaining,
|
406
400
|
max_chaining_steps=self.max_chaining_steps,
|
407
401
|
stream=token_streaming,
|
@@ -572,23 +566,14 @@ class SyncServer(Server):
|
|
572
566
|
)
|
573
567
|
|
574
568
|
# NOTE: eventually deprecate and only allow passing Message types
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
content=[TextContent(text=packaged_user_message)],
|
581
|
-
created_at=timestamp,
|
582
|
-
)
|
583
|
-
else:
|
584
|
-
message = Message(
|
585
|
-
agent_id=agent_id,
|
586
|
-
role="user",
|
587
|
-
content=[TextContent(text=packaged_user_message)],
|
588
|
-
)
|
569
|
+
message = MessageCreate(
|
570
|
+
agent_id=agent_id,
|
571
|
+
role="user",
|
572
|
+
content=[TextContent(text=packaged_user_message)],
|
573
|
+
)
|
589
574
|
|
590
575
|
# Run the agent state forward
|
591
|
-
usage = self._step(actor=actor, agent_id=agent_id, input_messages=message)
|
576
|
+
usage = self._step(actor=actor, agent_id=agent_id, input_messages=[message])
|
592
577
|
return usage
|
593
578
|
|
594
579
|
def system_message(
|
@@ -660,23 +645,14 @@ class SyncServer(Server):
|
|
660
645
|
self,
|
661
646
|
actor: User,
|
662
647
|
agent_id: str,
|
663
|
-
|
648
|
+
input_messages: List[MessageCreate],
|
664
649
|
wrap_user_message: bool = True,
|
665
650
|
wrap_system_message: bool = True,
|
666
651
|
interface: Union[AgentInterface, ChatCompletionsStreamingInterface, None] = None, # needed for responses
|
667
652
|
metadata: Optional[dict] = None, # Pass through metadata to interface
|
668
653
|
put_inner_thoughts_first: bool = True,
|
669
654
|
) -> LettaUsageStatistics:
|
670
|
-
"""Send a list of messages to the agent.
|
671
|
-
|
672
|
-
If messages are of type MessageCreate, convert them to Message objects before sending.
|
673
|
-
"""
|
674
|
-
if all(isinstance(m, MessageCreate) for m in messages):
|
675
|
-
message_objects = [prepare_input_message_create(m, agent_id, wrap_user_message, wrap_system_message) for m in messages]
|
676
|
-
elif all(isinstance(m, Message) for m in messages):
|
677
|
-
message_objects = messages
|
678
|
-
else:
|
679
|
-
raise ValueError(f"All messages must be of type Message or MessageCreate, got {[type(m) for m in messages]}")
|
655
|
+
"""Send a list of messages to the agent."""
|
680
656
|
|
681
657
|
# Store metadata in interface if provided
|
682
658
|
if metadata and hasattr(interface, "metadata"):
|
@@ -686,7 +662,7 @@ class SyncServer(Server):
|
|
686
662
|
return self._step(
|
687
663
|
actor=actor,
|
688
664
|
agent_id=agent_id,
|
689
|
-
input_messages=
|
665
|
+
input_messages=input_messages,
|
690
666
|
interface=interface,
|
691
667
|
put_inner_thoughts_first=put_inner_thoughts_first,
|
692
668
|
)
|
@@ -703,8 +679,6 @@ class SyncServer(Server):
|
|
703
679
|
@trace_method
|
704
680
|
def get_cached_llm_config(self, **kwargs):
|
705
681
|
key = make_key(**kwargs)
|
706
|
-
print(self._llm_config_cache)
|
707
|
-
print("KEY", key)
|
708
682
|
if key not in self._llm_config_cache:
|
709
683
|
self._llm_config_cache[key] = self.get_llm_config_from_handle(**kwargs)
|
710
684
|
return self._llm_config_cache[key]
|
@@ -1019,12 +993,8 @@ class SyncServer(Server):
|
|
1019
993
|
agent = self.load_agent(agent_id=sleeptime_agent.id, actor=actor)
|
1020
994
|
for passage in self.list_data_source_passages(source_id=source.id, user_id=actor.id):
|
1021
995
|
agent.step(
|
1022
|
-
|
1023
|
-
|
1024
|
-
role="user",
|
1025
|
-
content=[TextContent(text=passage.text)],
|
1026
|
-
agent_id=sleeptime_agent.id,
|
1027
|
-
),
|
996
|
+
input_messages=[
|
997
|
+
MessageCreate(role="user", content=passage.text),
|
1028
998
|
]
|
1029
999
|
)
|
1030
1000
|
self.agent_manager.delete_agent(agent_id=sleeptime_agent.id, actor=actor)
|
@@ -1182,7 +1152,6 @@ class SyncServer(Server):
|
|
1182
1152
|
provider = self.get_provider_from_name(provider_name)
|
1183
1153
|
|
1184
1154
|
llm_configs = [config for config in provider.list_llm_models() if config.handle == handle]
|
1185
|
-
print("LLM CONFIGS", llm_configs)
|
1186
1155
|
if not llm_configs:
|
1187
1156
|
llm_configs = [config for config in provider.list_llm_models() if config.model == model_name]
|
1188
1157
|
if not llm_configs:
|
@@ -1195,8 +1164,6 @@ class SyncServer(Server):
|
|
1195
1164
|
if not llm_configs:
|
1196
1165
|
raise e
|
1197
1166
|
|
1198
|
-
print("CONFIGS", llm_configs)
|
1199
|
-
|
1200
1167
|
if len(llm_configs) == 1:
|
1201
1168
|
llm_config = llm_configs[0]
|
1202
1169
|
elif len(llm_configs) > 1:
|
@@ -1343,17 +1310,17 @@ class SyncServer(Server):
|
|
1343
1310
|
|
1344
1311
|
# Next, attempt to run the tool with the sandbox
|
1345
1312
|
try:
|
1346
|
-
|
1313
|
+
tool_execution_result = ToolExecutionSandbox(tool.name, tool_args, actor, tool_object=tool).run(
|
1347
1314
|
agent_state=agent_state, additional_env_vars=tool_env_vars
|
1348
1315
|
)
|
1349
1316
|
return ToolReturnMessage(
|
1350
1317
|
id="null",
|
1351
1318
|
tool_call_id="null",
|
1352
1319
|
date=get_utc_time(),
|
1353
|
-
status=
|
1354
|
-
tool_return=str(
|
1355
|
-
stdout=
|
1356
|
-
stderr=
|
1320
|
+
status=tool_execution_result.status,
|
1321
|
+
tool_return=str(tool_execution_result.func_return),
|
1322
|
+
stdout=tool_execution_result.stdout,
|
1323
|
+
stderr=tool_execution_result.stderr,
|
1357
1324
|
)
|
1358
1325
|
|
1359
1326
|
except Exception as e:
|
@@ -1567,7 +1534,7 @@ class SyncServer(Server):
|
|
1567
1534
|
agent_id: str,
|
1568
1535
|
actor: User,
|
1569
1536
|
# role: MessageRole,
|
1570
|
-
|
1537
|
+
input_messages: List[MessageCreate],
|
1571
1538
|
stream_steps: bool,
|
1572
1539
|
stream_tokens: bool,
|
1573
1540
|
# related to whether or not we return `LettaMessage`s or `Message`s
|
@@ -1647,7 +1614,7 @@ class SyncServer(Server):
|
|
1647
1614
|
self.send_messages,
|
1648
1615
|
actor=actor,
|
1649
1616
|
agent_id=agent_id,
|
1650
|
-
|
1617
|
+
input_messages=input_messages,
|
1651
1618
|
interface=streaming_interface,
|
1652
1619
|
metadata=metadata,
|
1653
1620
|
)
|
@@ -1701,7 +1668,7 @@ class SyncServer(Server):
|
|
1701
1668
|
self,
|
1702
1669
|
group_id: str,
|
1703
1670
|
actor: User,
|
1704
|
-
|
1671
|
+
input_messages: Union[List[Message], List[MessageCreate]],
|
1705
1672
|
stream_steps: bool,
|
1706
1673
|
stream_tokens: bool,
|
1707
1674
|
chat_completion_mode: bool = False,
|
@@ -1751,7 +1718,7 @@ class SyncServer(Server):
|
|
1751
1718
|
task = asyncio.create_task(
|
1752
1719
|
asyncio.to_thread(
|
1753
1720
|
letta_multi_agent.step,
|
1754
|
-
|
1721
|
+
input_messages=input_messages,
|
1755
1722
|
chaining=self.chaining,
|
1756
1723
|
max_chaining_steps=self.max_chaining_steps,
|
1757
1724
|
)
|
letta/services/agent_manager.py
CHANGED
@@ -364,6 +364,7 @@ class AgentManager:
|
|
364
364
|
"base_template_id": agent_update.base_template_id,
|
365
365
|
"message_buffer_autoclear": agent_update.message_buffer_autoclear,
|
366
366
|
"enable_sleeptime": agent_update.enable_sleeptime,
|
367
|
+
"response_format": agent_update.response_format,
|
367
368
|
}
|
368
369
|
for col, val in scalar_updates.items():
|
369
370
|
if val is not None:
|
@@ -291,9 +291,7 @@ class LLMBatchManager:
|
|
291
291
|
return [item.to_pydantic() for item in results]
|
292
292
|
|
293
293
|
def bulk_update_llm_batch_items(
|
294
|
-
self,
|
295
|
-
llm_batch_id_agent_id_pairs: List[Tuple[str, str]],
|
296
|
-
field_updates: List[Dict[str, Any]],
|
294
|
+
self, llm_batch_id_agent_id_pairs: List[Tuple[str, str]], field_updates: List[Dict[str, Any]], strict: bool = True
|
297
295
|
) -> None:
|
298
296
|
"""
|
299
297
|
Efficiently update multiple LLMBatchItem rows by (llm_batch_id, agent_id) pairs.
|
@@ -301,30 +299,43 @@ class LLMBatchManager:
|
|
301
299
|
Args:
|
302
300
|
llm_batch_id_agent_id_pairs: List of (llm_batch_id, agent_id) tuples identifying items to update
|
303
301
|
field_updates: List of dictionaries containing the fields to update for each item
|
302
|
+
strict: Whether to error if any of the requested keys don't exist (default True).
|
303
|
+
If False, missing pairs are skipped.
|
304
304
|
"""
|
305
305
|
if not llm_batch_id_agent_id_pairs or not field_updates:
|
306
306
|
return
|
307
307
|
|
308
308
|
if len(llm_batch_id_agent_id_pairs) != len(field_updates):
|
309
|
-
raise ValueError("
|
309
|
+
raise ValueError("llm_batch_id_agent_id_pairs and field_updates must have the same length")
|
310
310
|
|
311
311
|
with self.session_maker() as session:
|
312
|
-
# Lookup primary keys
|
312
|
+
# Lookup primary keys for all requested (batch_id, agent_id) pairs
|
313
313
|
items = (
|
314
314
|
session.query(LLMBatchItem.id, LLMBatchItem.llm_batch_id, LLMBatchItem.agent_id)
|
315
315
|
.filter(tuple_(LLMBatchItem.llm_batch_id, LLMBatchItem.agent_id).in_(llm_batch_id_agent_id_pairs))
|
316
316
|
.all()
|
317
317
|
)
|
318
|
-
pair_to_pk = {(
|
319
|
-
|
318
|
+
pair_to_pk = {(batch_id, agent_id): pk for pk, batch_id, agent_id in items}
|
319
|
+
|
320
|
+
if strict:
|
321
|
+
requested = set(llm_batch_id_agent_id_pairs)
|
322
|
+
found = set(pair_to_pk.keys())
|
323
|
+
missing = requested - found
|
324
|
+
if missing:
|
325
|
+
raise ValueError(
|
326
|
+
f"Cannot bulk-update batch items: no records for the following " f"(llm_batch_id, agent_id) pairs: {missing}"
|
327
|
+
)
|
328
|
+
|
329
|
+
# Build mappings, skipping any missing when strict=False
|
320
330
|
mappings = []
|
321
|
-
for (
|
322
|
-
|
323
|
-
if
|
331
|
+
for (batch_id, agent_id), fields in zip(llm_batch_id_agent_id_pairs, field_updates):
|
332
|
+
pk = pair_to_pk.get((batch_id, agent_id))
|
333
|
+
if pk is None:
|
334
|
+
# skip missing in non-strict mode
|
324
335
|
continue
|
325
336
|
|
326
337
|
update_fields = fields.copy()
|
327
|
-
update_fields["id"] =
|
338
|
+
update_fields["id"] = pk
|
328
339
|
mappings.append(update_fields)
|
329
340
|
|
330
341
|
if mappings:
|
@@ -332,10 +343,7 @@ class LLMBatchManager:
|
|
332
343
|
session.commit()
|
333
344
|
|
334
345
|
@enforce_types
|
335
|
-
def bulk_update_batch_llm_items_results_by_agent(
|
336
|
-
self,
|
337
|
-
updates: List[ItemUpdateInfo],
|
338
|
-
) -> None:
|
346
|
+
def bulk_update_batch_llm_items_results_by_agent(self, updates: List[ItemUpdateInfo], strict: bool = True) -> None:
|
339
347
|
"""Update request status and batch results for multiple batch items."""
|
340
348
|
batch_id_agent_id_pairs = [(update.llm_batch_id, update.agent_id) for update in updates]
|
341
349
|
field_updates = [
|
@@ -346,29 +354,23 @@ class LLMBatchManager:
|
|
346
354
|
for update in updates
|
347
355
|
]
|
348
356
|
|
349
|
-
self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates)
|
357
|
+
self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates, strict=strict)
|
350
358
|
|
351
359
|
@enforce_types
|
352
|
-
def bulk_update_llm_batch_items_step_status_by_agent(
|
353
|
-
self,
|
354
|
-
updates: List[StepStatusUpdateInfo],
|
355
|
-
) -> None:
|
360
|
+
def bulk_update_llm_batch_items_step_status_by_agent(self, updates: List[StepStatusUpdateInfo], strict: bool = True) -> None:
|
356
361
|
"""Update step status for multiple batch items."""
|
357
362
|
batch_id_agent_id_pairs = [(update.llm_batch_id, update.agent_id) for update in updates]
|
358
363
|
field_updates = [{"step_status": update.step_status} for update in updates]
|
359
364
|
|
360
|
-
self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates)
|
365
|
+
self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates, strict=strict)
|
361
366
|
|
362
367
|
@enforce_types
|
363
|
-
def bulk_update_llm_batch_items_request_status_by_agent(
|
364
|
-
self,
|
365
|
-
updates: List[RequestStatusUpdateInfo],
|
366
|
-
) -> None:
|
368
|
+
def bulk_update_llm_batch_items_request_status_by_agent(self, updates: List[RequestStatusUpdateInfo], strict: bool = True) -> None:
|
367
369
|
"""Update request status for multiple batch items."""
|
368
370
|
batch_id_agent_id_pairs = [(update.llm_batch_id, update.agent_id) for update in updates]
|
369
371
|
field_updates = [{"request_status": update.request_status} for update in updates]
|
370
372
|
|
371
|
-
self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates)
|
373
|
+
self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates, strict=strict)
|
372
374
|
|
373
375
|
@enforce_types
|
374
376
|
def delete_llm_batch_item(self, item_id: str, actor: PydanticUser) -> None:
|
@@ -1,16 +1,17 @@
|
|
1
|
-
|
1
|
+
import traceback
|
2
|
+
from typing import Any, Dict, Optional, Type
|
2
3
|
|
3
4
|
from letta.log import get_logger
|
4
5
|
from letta.orm.enums import ToolType
|
5
6
|
from letta.schemas.agent import AgentState
|
6
|
-
from letta.schemas.sandbox_config import SandboxConfig
|
7
|
+
from letta.schemas.sandbox_config import SandboxConfig
|
7
8
|
from letta.schemas.tool import Tool
|
9
|
+
from letta.schemas.tool_execution_result import ToolExecutionResult
|
8
10
|
from letta.schemas.user import User
|
9
11
|
from letta.services.tool_executor.tool_executor import (
|
10
12
|
ExternalComposioToolExecutor,
|
11
13
|
ExternalMCPToolExecutor,
|
12
14
|
LettaCoreToolExecutor,
|
13
|
-
LettaMemoryToolExecutor,
|
14
15
|
LettaMultiAgentToolExecutor,
|
15
16
|
SandboxToolExecutor,
|
16
17
|
ToolExecutor,
|
@@ -24,8 +25,9 @@ class ToolExecutorFactory:
|
|
24
25
|
|
25
26
|
_executor_map: Dict[ToolType, Type[ToolExecutor]] = {
|
26
27
|
ToolType.LETTA_CORE: LettaCoreToolExecutor,
|
28
|
+
ToolType.LETTA_MEMORY_CORE: LettaCoreToolExecutor,
|
29
|
+
ToolType.LETTA_SLEEPTIME_CORE: LettaCoreToolExecutor,
|
27
30
|
ToolType.LETTA_MULTI_AGENT_CORE: LettaMultiAgentToolExecutor,
|
28
|
-
ToolType.LETTA_MEMORY_CORE: LettaMemoryToolExecutor,
|
29
31
|
ToolType.EXTERNAL_COMPOSIO: ExternalComposioToolExecutor,
|
30
32
|
ToolType.EXTERNAL_MCP: ExternalMCPToolExecutor,
|
31
33
|
}
|
@@ -33,13 +35,8 @@ class ToolExecutorFactory:
|
|
33
35
|
@classmethod
|
34
36
|
def get_executor(cls, tool_type: ToolType) -> ToolExecutor:
|
35
37
|
"""Get the appropriate executor for the given tool type."""
|
36
|
-
executor_class = cls._executor_map.get(tool_type)
|
37
|
-
|
38
|
-
if executor_class:
|
39
|
-
return executor_class()
|
40
|
-
|
41
|
-
# Default to sandbox executor for unknown types
|
42
|
-
return SandboxToolExecutor()
|
38
|
+
executor_class = cls._executor_map.get(tool_type, SandboxToolExecutor)
|
39
|
+
return executor_class()
|
43
40
|
|
44
41
|
|
45
42
|
class ToolExecutionManager:
|
@@ -58,7 +55,7 @@ class ToolExecutionManager:
|
|
58
55
|
self.sandbox_config = sandbox_config
|
59
56
|
self.sandbox_env_vars = sandbox_env_vars
|
60
57
|
|
61
|
-
def execute_tool(self, function_name: str, function_args: dict, tool: Tool) ->
|
58
|
+
def execute_tool(self, function_name: str, function_args: dict, tool: Tool) -> ToolExecutionResult:
|
62
59
|
"""
|
63
60
|
Execute a tool and persist any state changes.
|
64
61
|
|
@@ -71,35 +68,43 @@ class ToolExecutionManager:
|
|
71
68
|
Tuple containing the function response and sandbox run result (if applicable)
|
72
69
|
"""
|
73
70
|
try:
|
74
|
-
# Get the appropriate executor for this tool type
|
75
71
|
executor = ToolExecutorFactory.get_executor(tool.tool_type)
|
76
|
-
|
77
|
-
# Execute the tool
|
78
72
|
return executor.execute(
|
79
|
-
function_name,
|
73
|
+
function_name,
|
74
|
+
function_args,
|
75
|
+
self.agent_state,
|
76
|
+
tool,
|
77
|
+
self.actor,
|
78
|
+
self.sandbox_config,
|
79
|
+
self.sandbox_env_vars,
|
80
80
|
)
|
81
81
|
|
82
82
|
except Exception as e:
|
83
83
|
self.logger.error(f"Error executing tool {function_name}: {str(e)}")
|
84
|
-
error_message = get_friendly_error_msg(
|
85
|
-
|
84
|
+
error_message = get_friendly_error_msg(
|
85
|
+
function_name=function_name,
|
86
|
+
exception_name=type(e).__name__,
|
87
|
+
exception_message=str(e),
|
88
|
+
)
|
89
|
+
return ToolExecutionResult(
|
90
|
+
status="error",
|
91
|
+
func_return=error_message,
|
92
|
+
stderr=[traceback.format_exc()],
|
93
|
+
)
|
86
94
|
|
87
95
|
@trace_method
|
88
|
-
async def execute_tool_async(self, function_name: str, function_args: dict, tool: Tool) ->
|
96
|
+
async def execute_tool_async(self, function_name: str, function_args: dict, tool: Tool) -> ToolExecutionResult:
|
89
97
|
"""
|
90
98
|
Execute a tool asynchronously and persist any state changes.
|
91
99
|
"""
|
92
100
|
try:
|
93
|
-
|
101
|
+
executor = ToolExecutorFactory.get_executor(tool.tool_type)
|
94
102
|
# TODO: Extend this async model to composio
|
95
|
-
|
96
|
-
|
97
|
-
executor = SandboxToolExecutor()
|
98
|
-
result_tuple = await executor.execute(function_name, function_args, self.agent_state, tool, self.actor)
|
103
|
+
if isinstance(executor, SandboxToolExecutor):
|
104
|
+
result = await executor.execute(function_name, function_args, self.agent_state, tool, self.actor)
|
99
105
|
else:
|
100
|
-
|
101
|
-
|
102
|
-
return result_tuple
|
106
|
+
result = executor.execute(function_name, function_args, self.agent_state, tool, self.actor)
|
107
|
+
return result
|
103
108
|
|
104
109
|
except Exception as e:
|
105
110
|
self.logger.error(f"Error executing tool {function_name}: {str(e)}")
|
@@ -108,4 +113,8 @@ class ToolExecutionManager:
|
|
108
113
|
exception_name=type(e).__name__,
|
109
114
|
exception_message=str(e),
|
110
115
|
)
|
111
|
-
return
|
116
|
+
return ToolExecutionResult(
|
117
|
+
status="error",
|
118
|
+
func_return=error_message,
|
119
|
+
stderr=[traceback.format_exc()],
|
120
|
+
)
|
@@ -13,8 +13,9 @@ from typing import Any, Dict, Optional
|
|
13
13
|
from letta.functions.helpers import generate_model_from_args_json_schema
|
14
14
|
from letta.log import get_logger
|
15
15
|
from letta.schemas.agent import AgentState
|
16
|
-
from letta.schemas.sandbox_config import SandboxConfig,
|
16
|
+
from letta.schemas.sandbox_config import SandboxConfig, SandboxType
|
17
17
|
from letta.schemas.tool import Tool
|
18
|
+
from letta.schemas.tool_execution_result import ToolExecutionResult
|
18
19
|
from letta.schemas.user import User
|
19
20
|
from letta.services.helpers.tool_execution_helper import (
|
20
21
|
add_imports_and_pydantic_schemas_for_args,
|
@@ -72,7 +73,11 @@ class ToolExecutionSandbox:
|
|
72
73
|
self.force_recreate = force_recreate
|
73
74
|
self.force_recreate_venv = force_recreate_venv
|
74
75
|
|
75
|
-
def run(
|
76
|
+
def run(
|
77
|
+
self,
|
78
|
+
agent_state: Optional[AgentState] = None,
|
79
|
+
additional_env_vars: Optional[Dict] = None,
|
80
|
+
) -> ToolExecutionResult:
|
76
81
|
"""
|
77
82
|
Run the tool in a sandbox environment.
|
78
83
|
|
@@ -81,7 +86,7 @@ class ToolExecutionSandbox:
|
|
81
86
|
additional_env_vars (Optional[Dict]): Environment variables to inject into the sandbox
|
82
87
|
|
83
88
|
Returns:
|
84
|
-
|
89
|
+
ToolExecutionResult: Object containing tool execution outcome (e.g. status, response)
|
85
90
|
"""
|
86
91
|
if tool_settings.e2b_api_key and not self.privileged_tools:
|
87
92
|
logger.debug(f"Using e2b sandbox to execute {self.tool_name}")
|
@@ -115,7 +120,7 @@ class ToolExecutionSandbox:
|
|
115
120
|
@trace_method
|
116
121
|
def run_local_dir_sandbox(
|
117
122
|
self, agent_state: Optional[AgentState] = None, additional_env_vars: Optional[Dict] = None
|
118
|
-
) ->
|
123
|
+
) -> ToolExecutionResult:
|
119
124
|
sbx_config = self.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.LOCAL, actor=self.user)
|
120
125
|
local_configs = sbx_config.get_local_config()
|
121
126
|
|
@@ -162,7 +167,12 @@ class ToolExecutionSandbox:
|
|
162
167
|
os.remove(temp_file_path)
|
163
168
|
|
164
169
|
@trace_method
|
165
|
-
def run_local_dir_sandbox_venv(
|
170
|
+
def run_local_dir_sandbox_venv(
|
171
|
+
self,
|
172
|
+
sbx_config: SandboxConfig,
|
173
|
+
env: Dict[str, str],
|
174
|
+
temp_file_path: str,
|
175
|
+
) -> ToolExecutionResult:
|
166
176
|
local_configs = sbx_config.get_local_config()
|
167
177
|
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir) # Expand tilde
|
168
178
|
venv_path = os.path.join(sandbox_dir, local_configs.venv_name)
|
@@ -205,12 +215,12 @@ class ToolExecutionSandbox:
|
|
205
215
|
func_result, stdout = self.parse_out_function_results_markers(result.stdout)
|
206
216
|
func_return, agent_state = self.parse_best_effort(func_result)
|
207
217
|
|
208
|
-
return
|
218
|
+
return ToolExecutionResult(
|
219
|
+
status="success",
|
209
220
|
func_return=func_return,
|
210
221
|
agent_state=agent_state,
|
211
222
|
stdout=[stdout] if stdout else [],
|
212
223
|
stderr=[result.stderr] if result.stderr else [],
|
213
|
-
status="success",
|
214
224
|
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
215
225
|
)
|
216
226
|
|
@@ -221,12 +231,12 @@ class ToolExecutionSandbox:
|
|
221
231
|
exception_name=type(e).__name__,
|
222
232
|
exception_message=str(e),
|
223
233
|
)
|
224
|
-
return
|
234
|
+
return ToolExecutionResult(
|
235
|
+
status="error",
|
225
236
|
func_return=func_return,
|
226
237
|
agent_state=None,
|
227
238
|
stdout=[e.stdout] if e.stdout else [],
|
228
239
|
stderr=[e.stderr] if e.stderr else [],
|
229
|
-
status="error",
|
230
240
|
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
231
241
|
)
|
232
242
|
|
@@ -238,7 +248,12 @@ class ToolExecutionSandbox:
|
|
238
248
|
raise e
|
239
249
|
|
240
250
|
@trace_method
|
241
|
-
def run_local_dir_sandbox_directly(
|
251
|
+
def run_local_dir_sandbox_directly(
|
252
|
+
self,
|
253
|
+
sbx_config: SandboxConfig,
|
254
|
+
env: Dict[str, str],
|
255
|
+
temp_file_path: str,
|
256
|
+
) -> ToolExecutionResult:
|
242
257
|
status = "success"
|
243
258
|
func_return, agent_state, stderr = None, None, None
|
244
259
|
|
@@ -288,12 +303,12 @@ class ToolExecutionSandbox:
|
|
288
303
|
stdout_output = [captured_stdout.getvalue()] if captured_stdout.getvalue() else []
|
289
304
|
stderr_output = [captured_stderr.getvalue()] if captured_stderr.getvalue() else []
|
290
305
|
|
291
|
-
return
|
306
|
+
return ToolExecutionResult(
|
307
|
+
status=status,
|
292
308
|
func_return=func_return,
|
293
309
|
agent_state=agent_state,
|
294
310
|
stdout=stdout_output,
|
295
311
|
stderr=stderr_output,
|
296
|
-
status=status,
|
297
312
|
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
298
313
|
)
|
299
314
|
|
@@ -307,7 +322,11 @@ class ToolExecutionSandbox:
|
|
307
322
|
|
308
323
|
# e2b sandbox specific functions
|
309
324
|
|
310
|
-
def run_e2b_sandbox(
|
325
|
+
def run_e2b_sandbox(
|
326
|
+
self,
|
327
|
+
agent_state: Optional[AgentState] = None,
|
328
|
+
additional_env_vars: Optional[Dict] = None,
|
329
|
+
) -> ToolExecutionResult:
|
311
330
|
sbx_config = self.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.E2B, actor=self.user)
|
312
331
|
sbx = self.get_running_e2b_sandbox_with_same_state(sbx_config)
|
313
332
|
if not sbx or self.force_recreate:
|
@@ -348,12 +367,12 @@ class ToolExecutionSandbox:
|
|
348
367
|
else:
|
349
368
|
raise ValueError(f"Tool {self.tool_name} returned execution with None")
|
350
369
|
|
351
|
-
return
|
370
|
+
return ToolExecutionResult(
|
371
|
+
status="error" if execution.error else "success",
|
352
372
|
func_return=func_return,
|
353
373
|
agent_state=agent_state,
|
354
374
|
stdout=execution.logs.stdout,
|
355
375
|
stderr=execution.logs.stderr,
|
356
|
-
status="error" if execution.error else "success",
|
357
376
|
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
358
377
|
)
|
359
378
|
|
@@ -535,7 +554,7 @@ class ToolExecutionSandbox:
|
|
535
554
|
Generate the code string to call the function.
|
536
555
|
|
537
556
|
Args:
|
538
|
-
inject_agent_state (bool): Whether to inject the
|
557
|
+
inject_agent_state (bool): Whether to inject the agent's state as an input into the tool
|
539
558
|
|
540
559
|
Returns:
|
541
560
|
str: Generated code string for calling the tool
|