letta-nightly 0.6.5.dev20241218213641__py3-none-any.whl → 0.6.5.dev20241220104040__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (30) hide show
  1. letta/agent.py +37 -6
  2. letta/client/client.py +2 -2
  3. letta/client/streaming.py +9 -9
  4. letta/errors.py +60 -25
  5. letta/functions/function_sets/base.py +0 -54
  6. letta/helpers/tool_rule_solver.py +82 -51
  7. letta/llm_api/llm_api_tools.py +2 -2
  8. letta/orm/custom_columns.py +5 -2
  9. letta/orm/message.py +2 -1
  10. letta/orm/passage.py +14 -15
  11. letta/providers.py +2 -1
  12. letta/schemas/enums.py +1 -0
  13. letta/schemas/letta_message.py +76 -40
  14. letta/schemas/letta_response.py +9 -1
  15. letta/schemas/message.py +13 -13
  16. letta/schemas/tool_rule.py +12 -2
  17. letta/server/rest_api/interface.py +48 -48
  18. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +2 -2
  19. letta/server/rest_api/routers/v1/agents.py +3 -0
  20. letta/server/rest_api/routers/v1/tools.py +5 -20
  21. letta/server/rest_api/utils.py +23 -22
  22. letta/server/server.py +12 -18
  23. letta/services/agent_manager.py +32 -46
  24. letta/services/message_manager.py +1 -0
  25. letta/services/tool_manager.py +3 -3
  26. {letta_nightly-0.6.5.dev20241218213641.dist-info → letta_nightly-0.6.5.dev20241220104040.dist-info}/METADATA +1 -1
  27. {letta_nightly-0.6.5.dev20241218213641.dist-info → letta_nightly-0.6.5.dev20241220104040.dist-info}/RECORD +30 -30
  28. {letta_nightly-0.6.5.dev20241218213641.dist-info → letta_nightly-0.6.5.dev20241220104040.dist-info}/LICENSE +0 -0
  29. {letta_nightly-0.6.5.dev20241218213641.dist-info → letta_nightly-0.6.5.dev20241220104040.dist-info}/WHEEL +0 -0
  30. {letta_nightly-0.6.5.dev20241218213641.dist-info → letta_nightly-0.6.5.dev20241220104040.dist-info}/entry_points.txt +0 -0
@@ -12,11 +12,11 @@ from letta.local_llm.constants import INNER_THOUGHTS_KWARG
12
12
  from letta.schemas.enums import MessageStreamStatus
13
13
  from letta.schemas.letta_message import (
14
14
  AssistantMessage,
15
- FunctionCall,
16
- FunctionCallDelta,
17
- FunctionCallMessage,
18
- FunctionReturn,
19
- InternalMonologue,
15
+ ToolCall,
16
+ ToolCallDelta,
17
+ ToolCallMessage,
18
+ ToolReturnMessage,
19
+ ReasoningMessage,
20
20
  LegacyFunctionCallMessage,
21
21
  LegacyLettaMessage,
22
22
  LettaMessage,
@@ -411,7 +411,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
411
411
 
412
412
  def _process_chunk_to_letta_style(
413
413
  self, chunk: ChatCompletionChunkResponse, message_id: str, message_date: datetime
414
- ) -> Optional[Union[InternalMonologue, FunctionCallMessage, AssistantMessage]]:
414
+ ) -> Optional[Union[ReasoningMessage, ToolCallMessage, AssistantMessage]]:
415
415
  """
416
416
  Example data from non-streaming response looks like:
417
417
 
@@ -426,10 +426,10 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
426
426
 
427
427
  # inner thoughts
428
428
  if message_delta.content is not None:
429
- processed_chunk = InternalMonologue(
429
+ processed_chunk = ReasoningMessage(
430
430
  id=message_id,
431
431
  date=message_date,
432
- internal_monologue=message_delta.content,
432
+ reasoning=message_delta.content,
433
433
  )
434
434
 
435
435
  # tool calls
@@ -442,7 +442,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
442
442
  if self.inner_thoughts_in_kwargs:
443
443
  raise NotImplementedError("inner_thoughts_in_kwargs with use_assistant_message not yet supported")
444
444
 
445
- # If we just received a chunk with the message in it, we either enter "send_message" mode, or we do standard FunctionCallMessage passthrough mode
445
+ # If we just received a chunk with the message in it, we either enter "send_message" mode, or we do standard ToolCallMessage passthrough mode
446
446
 
447
447
  # Track the function name while streaming
448
448
  # If we were previously on a 'send_message', we need to 'toggle' into 'content' mode
@@ -474,7 +474,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
474
474
  assistant_message=cleaned_func_args,
475
475
  )
476
476
 
477
- # otherwise we just do a regular passthrough of a FunctionCallDelta via a FunctionCallMessage
477
+ # otherwise we just do a regular passthrough of a ToolCallDelta via a ToolCallMessage
478
478
  else:
479
479
  tool_call_delta = {}
480
480
  if tool_call.id:
@@ -485,13 +485,13 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
485
485
  if tool_call.function.name:
486
486
  tool_call_delta["name"] = tool_call.function.name
487
487
 
488
- processed_chunk = FunctionCallMessage(
488
+ processed_chunk = ToolCallMessage(
489
489
  id=message_id,
490
490
  date=message_date,
491
- function_call=FunctionCallDelta(
491
+ tool_call=ToolCallDelta(
492
492
  name=tool_call_delta.get("name"),
493
493
  arguments=tool_call_delta.get("arguments"),
494
- function_call_id=tool_call_delta.get("id"),
494
+ tool_call_id=tool_call_delta.get("id"),
495
495
  ),
496
496
  )
497
497
 
@@ -518,10 +518,10 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
518
518
 
519
519
  # If we have inner thoughts, we should output them as a chunk
520
520
  if updates_inner_thoughts:
521
- processed_chunk = InternalMonologue(
521
+ processed_chunk = ReasoningMessage(
522
522
  id=message_id,
523
523
  date=message_date,
524
- internal_monologue=updates_inner_thoughts,
524
+ reasoning=updates_inner_thoughts,
525
525
  )
526
526
  # Additionally inner thoughts may stream back with a chunk of main JSON
527
527
  # In that case, since we can only return a chunk at a time, we should buffer it
@@ -531,7 +531,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
531
531
  else:
532
532
  self.function_args_buffer += updates_main_json
533
533
 
534
- # If we have main_json, we should output a FunctionCallMessage
534
+ # If we have main_json, we should output a ToolCallMessage
535
535
  elif updates_main_json:
536
536
 
537
537
  # If there's something in the function_name buffer, we should release it first
@@ -539,13 +539,13 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
539
539
  # however the frontend may expect name first, then args, so to be
540
540
  # safe we'll output name first in a separate chunk
541
541
  if self.function_name_buffer:
542
- processed_chunk = FunctionCallMessage(
542
+ processed_chunk = ToolCallMessage(
543
543
  id=message_id,
544
544
  date=message_date,
545
- function_call=FunctionCallDelta(
545
+ tool_call=ToolCallDelta(
546
546
  name=self.function_name_buffer,
547
547
  arguments=None,
548
- function_call_id=self.function_id_buffer,
548
+ tool_call_id=self.function_id_buffer,
549
549
  ),
550
550
  )
551
551
  # Clear the buffer
@@ -561,20 +561,20 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
561
561
  self.function_args_buffer += updates_main_json
562
562
 
563
563
  # If there was nothing in the name buffer, we can proceed to
564
- # output the arguments chunk as a FunctionCallMessage
564
+ # output the arguments chunk as a ToolCallMessage
565
565
  else:
566
566
  # There may be a buffer from a previous chunk, for example
567
567
  # if the previous chunk had arguments but we needed to flush name
568
568
  if self.function_args_buffer:
569
569
  # In this case, we should release the buffer + new data at once
570
570
  combined_chunk = self.function_args_buffer + updates_main_json
571
- processed_chunk = FunctionCallMessage(
571
+ processed_chunk = ToolCallMessage(
572
572
  id=message_id,
573
573
  date=message_date,
574
- function_call=FunctionCallDelta(
574
+ tool_call=ToolCallDelta(
575
575
  name=None,
576
576
  arguments=combined_chunk,
577
- function_call_id=self.function_id_buffer,
577
+ tool_call_id=self.function_id_buffer,
578
578
  ),
579
579
  )
580
580
  # clear buffer
@@ -582,13 +582,13 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
582
582
  self.function_id_buffer = None
583
583
  else:
584
584
  # If there's no buffer to clear, just output a new chunk with new data
585
- processed_chunk = FunctionCallMessage(
585
+ processed_chunk = ToolCallMessage(
586
586
  id=message_id,
587
587
  date=message_date,
588
- function_call=FunctionCallDelta(
588
+ tool_call=ToolCallDelta(
589
589
  name=None,
590
590
  arguments=updates_main_json,
591
- function_call_id=self.function_id_buffer,
591
+ tool_call_id=self.function_id_buffer,
592
592
  ),
593
593
  )
594
594
  self.function_id_buffer = None
@@ -608,10 +608,10 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
608
608
  # # if tool_call.function.name:
609
609
  # # tool_call_delta["name"] = tool_call.function.name
610
610
 
611
- # processed_chunk = FunctionCallMessage(
611
+ # processed_chunk = ToolCallMessage(
612
612
  # id=message_id,
613
613
  # date=message_date,
614
- # function_call=FunctionCallDelta(name=tool_call_delta.get("name"), arguments=tool_call_delta.get("arguments")),
614
+ # tool_call=ToolCallDelta(name=tool_call_delta.get("name"), arguments=tool_call_delta.get("arguments")),
615
615
  # )
616
616
 
617
617
  else:
@@ -642,10 +642,10 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
642
642
  # if tool_call.function.name:
643
643
  # tool_call_delta["name"] = tool_call.function.name
644
644
 
645
- # processed_chunk = FunctionCallMessage(
645
+ # processed_chunk = ToolCallMessage(
646
646
  # id=message_id,
647
647
  # date=message_date,
648
- # function_call=FunctionCallDelta(name=tool_call_delta.get("name"), arguments=tool_call_delta.get("arguments")),
648
+ # tool_call=ToolCallDelta(name=tool_call_delta.get("name"), arguments=tool_call_delta.get("arguments")),
649
649
  # )
650
650
 
651
651
  # elif False and self.inner_thoughts_in_kwargs and tool_call.function:
@@ -680,15 +680,15 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
680
680
  # Once we get a complete key, check if the key matches
681
681
 
682
682
  # If it does match, start processing the value (stringified-JSON string
683
- # And with each new chunk, output it as a chunk of type InternalMonologue
683
+ # And with each new chunk, output it as a chunk of type ReasoningMessage
684
684
 
685
- # If the key doesn't match, then flush the buffer as a single FunctionCallMessage chunk
685
+ # If the key doesn't match, then flush the buffer as a single ToolCallMessage chunk
686
686
 
687
687
  # If we're reading a value
688
688
 
689
- # If we're reading the inner thoughts value, we output chunks of type InternalMonologue
689
+ # If we're reading the inner thoughts value, we output chunks of type ReasoningMessage
690
690
 
691
- # Otherwise, do simple chunks of FunctionCallMessage
691
+ # Otherwise, do simple chunks of ToolCallMessage
692
692
 
693
693
  else:
694
694
 
@@ -701,13 +701,13 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
701
701
  if tool_call.function.name:
702
702
  tool_call_delta["name"] = tool_call.function.name
703
703
 
704
- processed_chunk = FunctionCallMessage(
704
+ processed_chunk = ToolCallMessage(
705
705
  id=message_id,
706
706
  date=message_date,
707
- function_call=FunctionCallDelta(
707
+ tool_call=ToolCallDelta(
708
708
  name=tool_call_delta.get("name"),
709
709
  arguments=tool_call_delta.get("arguments"),
710
- function_call_id=tool_call_delta.get("id"),
710
+ tool_call_id=tool_call_delta.get("id"),
711
711
  ),
712
712
  )
713
713
 
@@ -823,10 +823,10 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
823
823
  # "id": str(msg_obj.id) if msg_obj is not None else None,
824
824
  # }
825
825
  assert msg_obj is not None, "Internal monologue requires msg_obj references for metadata"
826
- processed_chunk = InternalMonologue(
826
+ processed_chunk = ReasoningMessage(
827
827
  id=msg_obj.id,
828
828
  date=msg_obj.created_at,
829
- internal_monologue=msg,
829
+ reasoning=msg,
830
830
  )
831
831
 
832
832
  self._push_to_buffer(processed_chunk)
@@ -911,13 +911,13 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
911
911
  assistant_message=func_args[self.assistant_message_tool_kwarg],
912
912
  )
913
913
  else:
914
- processed_chunk = FunctionCallMessage(
914
+ processed_chunk = ToolCallMessage(
915
915
  id=msg_obj.id,
916
916
  date=msg_obj.created_at,
917
- function_call=FunctionCall(
917
+ tool_call=ToolCall(
918
918
  name=function_call.function.name,
919
919
  arguments=function_call.function.arguments,
920
- function_call_id=function_call.id,
920
+ tool_call_id=function_call.id,
921
921
  ),
922
922
  )
923
923
 
@@ -942,24 +942,24 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
942
942
  msg = msg.replace("Success: ", "")
943
943
  # new_message = {"function_return": msg, "status": "success"}
944
944
  assert msg_obj.tool_call_id is not None
945
- new_message = FunctionReturn(
945
+ new_message = ToolReturnMessage(
946
946
  id=msg_obj.id,
947
947
  date=msg_obj.created_at,
948
- function_return=msg,
948
+ tool_return=msg,
949
949
  status="success",
950
- function_call_id=msg_obj.tool_call_id,
950
+ tool_call_id=msg_obj.tool_call_id,
951
951
  )
952
952
 
953
953
  elif msg.startswith("Error: "):
954
954
  msg = msg.replace("Error: ", "")
955
955
  # new_message = {"function_return": msg, "status": "error"}
956
956
  assert msg_obj.tool_call_id is not None
957
- new_message = FunctionReturn(
957
+ new_message = ToolReturnMessage(
958
958
  id=msg_obj.id,
959
959
  date=msg_obj.created_at,
960
- function_return=msg,
960
+ tool_return=msg,
961
961
  status="error",
962
- function_call_id=msg_obj.tool_call_id,
962
+ tool_call_id=msg_obj.tool_call_id,
963
963
  )
964
964
 
965
965
  else:
@@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, Optional
4
4
  from fastapi import APIRouter, Body, Depends, Header, HTTPException
5
5
 
6
6
  from letta.schemas.enums import MessageRole
7
- from letta.schemas.letta_message import FunctionCall, LettaMessage
7
+ from letta.schemas.letta_message import ToolCall, LettaMessage
8
8
  from letta.schemas.openai.chat_completion_request import ChatCompletionRequest
9
9
  from letta.schemas.openai.chat_completion_response import (
10
10
  ChatCompletionResponse,
@@ -94,7 +94,7 @@ async def create_chat_completion(
94
94
  created_at = None
95
95
  for letta_msg in response_messages.messages:
96
96
  assert isinstance(letta_msg, LettaMessage)
97
- if isinstance(letta_msg, FunctionCall):
97
+ if isinstance(letta_msg, ToolCall):
98
98
  if letta_msg.name and letta_msg.name == "send_message":
99
99
  try:
100
100
  letta_function_call_args = json.loads(letta_msg.arguments)
@@ -17,6 +17,7 @@ from fastapi.responses import JSONResponse, StreamingResponse
17
17
  from pydantic import Field
18
18
 
19
19
  from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
20
+ from letta.log import get_logger
20
21
  from letta.orm.errors import NoResultFound
21
22
  from letta.schemas.agent import AgentState, CreateAgent, UpdateAgent
22
23
  from letta.schemas.block import ( # , BlockLabelUpdate, BlockLimitUpdate
@@ -54,6 +55,8 @@ from letta.server.server import SyncServer
54
55
 
55
56
  router = APIRouter(prefix="/agents", tags=["agents"])
56
57
 
58
+ logger = get_logger(__name__)
59
+
57
60
 
58
61
  # TODO: This should be paginated
59
62
  @router.get("/", response_model=List[AgentState], operation_id="list_agents")
@@ -7,7 +7,7 @@ from fastapi import APIRouter, Body, Depends, Header, HTTPException
7
7
 
8
8
  from letta.errors import LettaToolCreateError
9
9
  from letta.orm.errors import UniqueConstraintViolationError
10
- from letta.schemas.letta_message import FunctionReturn
10
+ from letta.schemas.letta_message import ToolReturnMessage
11
11
  from letta.schemas.tool import Tool, ToolCreate, ToolRunFromSource, ToolUpdate
12
12
  from letta.schemas.user import User
13
13
  from letta.server.rest_api.utils import get_letta_server
@@ -152,33 +152,18 @@ def update_tool(
152
152
 
153
153
 
154
154
  @router.post("/add-base-tools", response_model=List[Tool], operation_id="add_base_tools")
155
- def add_base_tools(
155
+ def upsert_base_tools(
156
156
  server: SyncServer = Depends(get_letta_server),
157
157
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
158
158
  ):
159
159
  """
160
- Add base tools
160
+ Upsert base tools
161
161
  """
162
162
  actor = server.user_manager.get_user_or_default(user_id=user_id)
163
- return server.tool_manager.add_base_tools(actor=actor)
163
+ return server.tool_manager.upsert_base_tools(actor=actor)
164
164
 
165
165
 
166
- # NOTE: can re-enable if needed
167
- # @router.post("/{tool_id}/run", response_model=FunctionReturn, operation_id="run_tool")
168
- # def run_tool(
169
- # server: SyncServer = Depends(get_letta_server),
170
- # request: ToolRun = Body(...),
171
- # user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
172
- # ):
173
- # """
174
- # Run an existing tool on provided arguments
175
- # """
176
- # actor = server.user_manager.get_user_or_default(user_id=user_id)
177
-
178
- # return server.run_tool(tool_id=request.tool_id, tool_args=request.tool_args, user_id=actor.id)
179
-
180
-
181
- @router.post("/run", response_model=FunctionReturn, operation_id="run_tool_from_source")
166
+ @router.post("/run", response_model=ToolReturnMessage, operation_id="run_tool_from_source")
182
167
  def run_tool_from_source(
183
168
  server: SyncServer = Depends(get_letta_server),
184
169
  request: ToolRunFromSource = Body(...),
@@ -8,6 +8,7 @@ from typing import AsyncGenerator, Optional, Union
8
8
  from fastapi import Header
9
9
  from pydantic import BaseModel
10
10
 
11
+ from letta.errors import ContextWindowExceededError, RateLimitExceededError
11
12
  from letta.schemas.usage import LettaUsageStatistics
12
13
  from letta.server.rest_api.interface import StreamingServerInterface
13
14
  from letta.server.server import SyncServer
@@ -61,34 +62,21 @@ async def sse_async_generator(
61
62
  if not isinstance(usage, LettaUsageStatistics):
62
63
  raise ValueError(f"Expected LettaUsageStatistics, got {type(usage)}")
63
64
  yield sse_formatter({"usage": usage.model_dump()})
64
- except Exception as e:
65
- import traceback
66
-
67
- traceback.print_exc()
68
- warnings.warn(f"SSE stream generator failed: {e}")
69
65
 
70
- # Log the error, since the exception handler upstack (in FastAPI) won't catch it, because this may be a 200 response
71
- # Print the stack trace
72
- if (os.getenv("SENTRY_DSN") is not None) and (os.getenv("SENTRY_DSN") != ""):
73
- import sentry_sdk
66
+ except ContextWindowExceededError as e:
67
+ log_error_to_sentry(e)
68
+ yield sse_formatter({"error": f"Stream failed: {e}", "code": str(e.code.value) if e.code else None})
74
69
 
75
- sentry_sdk.capture_exception(e)
70
+ except RateLimitExceededError as e:
71
+ log_error_to_sentry(e)
72
+ yield sse_formatter({"error": f"Stream failed: {e}", "code": str(e.code.value) if e.code else None})
76
73
 
74
+ except Exception as e:
75
+ log_error_to_sentry(e)
77
76
  yield sse_formatter({"error": f"Stream failed (internal error occured)"})
78
77
 
79
78
  except Exception as e:
80
- import traceback
81
-
82
- traceback.print_exc()
83
- warnings.warn(f"SSE stream generator failed: {e}")
84
-
85
- # Log the error, since the exception handler upstack (in FastAPI) won't catch it, because this may be a 200 response
86
- # Print the stack trace
87
- if (os.getenv("SENTRY_DSN") is not None) and (os.getenv("SENTRY_DSN") != ""):
88
- import sentry_sdk
89
-
90
- sentry_sdk.capture_exception(e)
91
-
79
+ log_error_to_sentry(e)
92
80
  yield sse_formatter({"error": "Stream failed (decoder encountered an error)"})
93
81
 
94
82
  finally:
@@ -113,3 +101,16 @@ def get_user_id(user_id: Optional[str] = Header(None, alias="user_id")) -> Optio
113
101
 
114
102
  def get_current_interface() -> StreamingServerInterface:
115
103
  return StreamingServerInterface
104
+
105
+ def log_error_to_sentry(e):
106
+ import traceback
107
+
108
+ traceback.print_exc()
109
+ warnings.warn(f"SSE stream generator failed: {e}")
110
+
111
+ # Log the error, since the exception handler upstack (in FastAPI) won't catch it, because this may be a 200 response
112
+ # Print the stack trace
113
+ if (os.getenv("SENTRY_DSN") is not None) and (os.getenv("SENTRY_DSN") != ""):
114
+ import sentry_sdk
115
+
116
+ sentry_sdk.capture_exception(e)
letta/server/server.py CHANGED
@@ -47,7 +47,7 @@ from letta.schemas.embedding_config import EmbeddingConfig
47
47
  # openai schemas
48
48
  from letta.schemas.enums import JobStatus
49
49
  from letta.schemas.job import Job, JobUpdate
50
- from letta.schemas.letta_message import FunctionReturn, LettaMessage
50
+ from letta.schemas.letta_message import ToolReturnMessage, LettaMessage
51
51
  from letta.schemas.llm_config import LLMConfig
52
52
  from letta.schemas.memory import (
53
53
  ArchivalMemorySummary,
@@ -301,7 +301,7 @@ class SyncServer(Server):
301
301
  self.default_org = self.organization_manager.create_default_organization()
302
302
  self.default_user = self.user_manager.create_default_user()
303
303
  self.block_manager.add_default_blocks(actor=self.default_user)
304
- self.tool_manager.add_base_tools(actor=self.default_user)
304
+ self.tool_manager.upsert_base_tools(actor=self.default_user)
305
305
 
306
306
  # If there is a default org/user
307
307
  # This logic may have to change in the future
@@ -981,15 +981,12 @@ class SyncServer(Server):
981
981
  assistant_message_tool_kwarg: str = constants.DEFAULT_MESSAGE_TOOL_KWARG,
982
982
  ) -> Union[List[Message], List[LettaMessage]]:
983
983
  # TODO: Thread actor directly through this function, since the top level caller most likely already retrieved the user
984
- actor = self.user_manager.get_user_or_default(user_id=user_id)
985
-
986
- # Get the agent object (loaded in memory)
987
- letta_agent = self.load_agent(agent_id=agent_id, actor=actor)
988
984
 
989
- # iterate over records
985
+ actor = self.user_manager.get_user_or_default(user_id=user_id)
990
986
  start_date = self.message_manager.get_message_by_id(after, actor=actor).created_at if after else None
991
987
  end_date = self.message_manager.get_message_by_id(before, actor=actor).created_at if before else None
992
- records = letta_agent.message_manager.list_messages_for_agent(
988
+
989
+ records = self.message_manager.list_messages_for_agent(
993
990
  agent_id=agent_id,
994
991
  actor=actor,
995
992
  start_date=start_date,
@@ -998,10 +995,7 @@ class SyncServer(Server):
998
995
  ascending=not reverse,
999
996
  )
1000
997
 
1001
- assert all(isinstance(m, Message) for m in records)
1002
-
1003
998
  if not return_message_object:
1004
- # If we're GETing messages in reverse, we need to reverse the inner list (generated by to_letta_message)
1005
999
  records = [
1006
1000
  msg
1007
1001
  for m in records
@@ -1356,7 +1350,7 @@ class SyncServer(Server):
1356
1350
  tool_source: str,
1357
1351
  tool_source_type: Optional[str] = None,
1358
1352
  tool_name: Optional[str] = None,
1359
- ) -> FunctionReturn:
1353
+ ) -> ToolReturnMessage:
1360
1354
  """Run a tool from source code"""
1361
1355
 
1362
1356
  try:
@@ -1380,24 +1374,24 @@ class SyncServer(Server):
1380
1374
  # Next, attempt to run the tool with the sandbox
1381
1375
  try:
1382
1376
  sandbox_run_result = ToolExecutionSandbox(tool.name, tool_args_dict, actor, tool_object=tool).run(agent_state=agent_state)
1383
- return FunctionReturn(
1377
+ return ToolReturnMessage(
1384
1378
  id="null",
1385
- function_call_id="null",
1379
+ tool_call_id="null",
1386
1380
  date=get_utc_time(),
1387
1381
  status=sandbox_run_result.status,
1388
- function_return=str(sandbox_run_result.func_return),
1382
+ tool_return=str(sandbox_run_result.func_return),
1389
1383
  stdout=sandbox_run_result.stdout,
1390
1384
  stderr=sandbox_run_result.stderr,
1391
1385
  )
1392
1386
 
1393
1387
  except Exception as e:
1394
1388
  func_return = get_friendly_error_msg(function_name=tool.name, exception_name=type(e).__name__, exception_message=str(e))
1395
- return FunctionReturn(
1389
+ return ToolReturnMessage(
1396
1390
  id="null",
1397
- function_call_id="null",
1391
+ tool_call_id="null",
1398
1392
  date=get_utc_time(),
1399
1393
  status="error",
1400
- function_return=func_return,
1394
+ tool_return=func_return,
1401
1395
  stdout=[],
1402
1396
  stderr=[traceback.format_exc()],
1403
1397
  )