letta-nightly 0.6.45.dev20250328104141__py3-none-any.whl → 0.6.46.dev20250330050944__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (48) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +25 -8
  3. letta/agents/base_agent.py +6 -5
  4. letta/agents/letta_agent.py +323 -0
  5. letta/agents/voice_agent.py +4 -3
  6. letta/client/client.py +2 -0
  7. letta/dynamic_multi_agent.py +5 -5
  8. letta/errors.py +20 -0
  9. letta/helpers/tool_execution_helper.py +1 -1
  10. letta/helpers/tool_rule_solver.py +1 -1
  11. letta/llm_api/anthropic.py +2 -0
  12. letta/llm_api/anthropic_client.py +153 -167
  13. letta/llm_api/google_ai_client.py +112 -29
  14. letta/llm_api/llm_api_tools.py +5 -0
  15. letta/llm_api/llm_client.py +6 -7
  16. letta/llm_api/llm_client_base.py +38 -17
  17. letta/llm_api/openai.py +2 -0
  18. letta/orm/group.py +2 -5
  19. letta/round_robin_multi_agent.py +18 -7
  20. letta/schemas/group.py +6 -0
  21. letta/schemas/message.py +23 -14
  22. letta/schemas/openai/chat_completion_request.py +6 -1
  23. letta/schemas/providers.py +3 -3
  24. letta/serialize_schemas/marshmallow_agent.py +34 -10
  25. letta/serialize_schemas/pydantic_agent_schema.py +23 -3
  26. letta/server/rest_api/app.py +9 -0
  27. letta/server/rest_api/interface.py +25 -2
  28. letta/server/rest_api/optimistic_json_parser.py +1 -1
  29. letta/server/rest_api/routers/v1/agents.py +57 -23
  30. letta/server/rest_api/routers/v1/groups.py +72 -49
  31. letta/server/rest_api/routers/v1/sources.py +1 -0
  32. letta/server/rest_api/utils.py +0 -1
  33. letta/server/server.py +73 -80
  34. letta/server/startup.sh +1 -1
  35. letta/services/agent_manager.py +7 -0
  36. letta/services/group_manager.py +87 -29
  37. letta/services/message_manager.py +5 -0
  38. letta/services/tool_executor/async_tool_execution_sandbox.py +397 -0
  39. letta/services/tool_executor/tool_execution_manager.py +27 -0
  40. letta/services/{tool_execution_sandbox.py → tool_executor/tool_execution_sandbox.py} +40 -12
  41. letta/services/tool_executor/tool_executor.py +23 -6
  42. letta/settings.py +17 -1
  43. letta/supervisor_multi_agent.py +3 -1
  44. {letta_nightly-0.6.45.dev20250328104141.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/METADATA +1 -1
  45. {letta_nightly-0.6.45.dev20250328104141.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/RECORD +48 -46
  46. {letta_nightly-0.6.45.dev20250328104141.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/LICENSE +0 -0
  47. {letta_nightly-0.6.45.dev20250328104141.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/WHEEL +0 -0
  48. {letta_nightly-0.6.45.dev20250328104141.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/entry_points.txt +0 -0
@@ -465,6 +465,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
465
465
  # if we expect `reasoning_content``, then that's what gets mapped to ReasoningMessage
466
466
  # and `content` needs to be handled outside the interface
467
467
  expect_reasoning_content: bool = False,
468
+ name: Optional[str] = None,
468
469
  ) -> Optional[Union[ReasoningMessage, ToolCallMessage, AssistantMessage]]:
469
470
  """
470
471
  Example data from non-streaming response looks like:
@@ -497,6 +498,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
497
498
  reasoning=message_delta.reasoning_content,
498
499
  signature=message_delta.reasoning_content_signature,
499
500
  source="reasoner_model" if message_delta.reasoning_content_signature else "non_reasoner_model",
501
+ name=name,
500
502
  )
501
503
  elif expect_reasoning_content and message_delta.redacted_reasoning_content is not None:
502
504
  processed_chunk = HiddenReasoningMessage(
@@ -504,6 +506,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
504
506
  date=message_date,
505
507
  hidden_reasoning=message_delta.redacted_reasoning_content,
506
508
  state="redacted",
509
+ name=name,
507
510
  )
508
511
  elif expect_reasoning_content and message_delta.content is not None:
509
512
  # "ignore" content if we expect reasoning content
@@ -530,6 +533,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
530
533
  arguments=json.dumps(json_reasoning_content.get("arguments")),
531
534
  tool_call_id=None,
532
535
  ),
536
+ name=name,
533
537
  )
534
538
 
535
539
  except json.JSONDecodeError as e:
@@ -559,6 +563,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
559
563
  id=message_id,
560
564
  date=message_date,
561
565
  reasoning=message_delta.content,
566
+ name=name,
562
567
  )
563
568
 
564
569
  # tool calls
@@ -607,7 +612,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
607
612
  # TODO: Assumes consistent state and that prev_content is subset of new_content
608
613
  diff = new_content.replace(prev_content, "", 1)
609
614
  self.current_json_parse_result = parsed_args
610
- processed_chunk = AssistantMessage(id=message_id, date=message_date, content=diff)
615
+ processed_chunk = AssistantMessage(id=message_id, date=message_date, content=diff, name=name)
611
616
  else:
612
617
  return None
613
618
 
@@ -639,6 +644,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
639
644
  arguments=tool_call_delta.get("arguments"),
640
645
  tool_call_id=tool_call_delta.get("id"),
641
646
  ),
647
+ name=name,
642
648
  )
643
649
 
644
650
  elif self.inner_thoughts_in_kwargs and tool_call.function:
@@ -674,6 +680,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
674
680
  id=message_id,
675
681
  date=message_date,
676
682
  reasoning=updates_inner_thoughts,
683
+ name=name,
677
684
  )
678
685
  # Additionally inner thoughts may stream back with a chunk of main JSON
679
686
  # In that case, since we can only return a chunk at a time, we should buffer it
@@ -709,6 +716,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
709
716
  arguments=None,
710
717
  tool_call_id=self.function_id_buffer,
711
718
  ),
719
+ name=name,
712
720
  )
713
721
 
714
722
  # Record what the last function name we flushed was
@@ -765,6 +773,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
765
773
  id=message_id,
766
774
  date=message_date,
767
775
  content=combined_chunk,
776
+ name=name,
768
777
  )
769
778
  # Store the ID of the tool call so allow skipping the corresponding response
770
779
  if self.function_id_buffer:
@@ -789,7 +798,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
789
798
  # TODO: Assumes consistent state and that prev_content is subset of new_content
790
799
  diff = new_content.replace(prev_content, "", 1)
791
800
  self.current_json_parse_result = parsed_args
792
- processed_chunk = AssistantMessage(id=message_id, date=message_date, content=diff)
801
+ processed_chunk = AssistantMessage(id=message_id, date=message_date, content=diff, name=name)
793
802
  else:
794
803
  return None
795
804
 
@@ -813,6 +822,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
813
822
  arguments=combined_chunk,
814
823
  tool_call_id=self.function_id_buffer,
815
824
  ),
825
+ name=name,
816
826
  )
817
827
  # clear buffer
818
828
  self.function_args_buffer = None
@@ -827,6 +837,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
827
837
  arguments=updates_main_json,
828
838
  tool_call_id=self.function_id_buffer,
829
839
  ),
840
+ name=name,
830
841
  )
831
842
  self.function_id_buffer = None
832
843
 
@@ -955,6 +966,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
955
966
  arguments=tool_call_delta.get("arguments"),
956
967
  tool_call_id=tool_call_delta.get("id"),
957
968
  ),
969
+ name=name,
958
970
  )
959
971
 
960
972
  elif choice.finish_reason is not None:
@@ -1035,6 +1047,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1035
1047
  message_id: str,
1036
1048
  message_date: datetime,
1037
1049
  expect_reasoning_content: bool = False,
1050
+ name: Optional[str] = None,
1038
1051
  ):
1039
1052
  """Process a streaming chunk from an OpenAI-compatible server.
1040
1053
 
@@ -1060,6 +1073,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1060
1073
  message_id=message_id,
1061
1074
  message_date=message_date,
1062
1075
  expect_reasoning_content=expect_reasoning_content,
1076
+ name=name,
1063
1077
  )
1064
1078
 
1065
1079
  if processed_chunk is None:
@@ -1087,6 +1101,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1087
1101
  id=msg_obj.id,
1088
1102
  date=msg_obj.created_at,
1089
1103
  reasoning=msg,
1104
+ name=msg_obj.name,
1090
1105
  )
1091
1106
 
1092
1107
  self._push_to_buffer(processed_chunk)
@@ -1097,6 +1112,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1097
1112
  id=msg_obj.id,
1098
1113
  date=msg_obj.created_at,
1099
1114
  reasoning=content.text,
1115
+ name=msg_obj.name,
1100
1116
  )
1101
1117
  elif isinstance(content, ReasoningContent):
1102
1118
  processed_chunk = ReasoningMessage(
@@ -1105,6 +1121,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1105
1121
  source="reasoner_model",
1106
1122
  reasoning=content.reasoning,
1107
1123
  signature=content.signature,
1124
+ name=msg_obj.name,
1108
1125
  )
1109
1126
  elif isinstance(content, RedactedReasoningContent):
1110
1127
  processed_chunk = HiddenReasoningMessage(
@@ -1112,6 +1129,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1112
1129
  date=msg_obj.created_at,
1113
1130
  state="redacted",
1114
1131
  hidden_reasoning=content.data,
1132
+ name=msg_obj.name,
1115
1133
  )
1116
1134
 
1117
1135
  self._push_to_buffer(processed_chunk)
@@ -1172,6 +1190,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1172
1190
  id=msg_obj.id,
1173
1191
  date=msg_obj.created_at,
1174
1192
  content=func_args["message"],
1193
+ name=msg_obj.name,
1175
1194
  )
1176
1195
  self._push_to_buffer(processed_chunk)
1177
1196
  except Exception as e:
@@ -1194,6 +1213,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1194
1213
  id=msg_obj.id,
1195
1214
  date=msg_obj.created_at,
1196
1215
  content=func_args[self.assistant_message_tool_kwarg],
1216
+ name=msg_obj.name,
1197
1217
  )
1198
1218
  # Store the ID of the tool call so allow skipping the corresponding response
1199
1219
  self.prev_assistant_message_id = function_call.id
@@ -1206,6 +1226,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1206
1226
  arguments=function_call.function.arguments,
1207
1227
  tool_call_id=function_call.id,
1208
1228
  ),
1229
+ name=msg_obj.name,
1209
1230
  )
1210
1231
 
1211
1232
  # processed_chunk = {
@@ -1245,6 +1266,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1245
1266
  tool_call_id=msg_obj.tool_call_id,
1246
1267
  stdout=msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else None,
1247
1268
  stderr=msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else None,
1269
+ name=msg_obj.name,
1248
1270
  )
1249
1271
 
1250
1272
  elif msg.startswith("Error: "):
@@ -1259,6 +1281,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1259
1281
  tool_call_id=msg_obj.tool_call_id,
1260
1282
  stdout=msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else None,
1261
1283
  stderr=msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else None,
1284
+ name=msg_obj.name,
1262
1285
  )
1263
1286
 
1264
1287
  else:
@@ -10,7 +10,7 @@ class OptimisticJSONParser:
10
10
  tries to tolerate incomplete strings and incomplete numbers.
11
11
  """
12
12
 
13
- def __init__(self, strict=True):
13
+ def __init__(self, strict=False):
14
14
  self.strict = strict
15
15
  self.parsers = {
16
16
  " ": self.parse_space,
@@ -9,6 +9,7 @@ from marshmallow import ValidationError
9
9
  from pydantic import Field
10
10
  from sqlalchemy.exc import IntegrityError, OperationalError
11
11
 
12
+ from letta.agents.letta_agent import LettaAgent
12
13
  from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
13
14
  from letta.log import get_logger
14
15
  from letta.orm.errors import NoResultFound
@@ -19,6 +20,8 @@ from letta.schemas.letta_message import LettaMessageUnion, LettaMessageUpdateUni
19
20
  from letta.schemas.letta_request import LettaRequest, LettaStreamingRequest
20
21
  from letta.schemas.letta_response import LettaResponse
21
22
  from letta.schemas.memory import ContextWindowOverview, CreateArchivalMemory, Memory
23
+ from letta.schemas.message import MessageCreate
24
+ from letta.schemas.openai.chat_completion_request import UserMessage
22
25
  from letta.schemas.passage import Passage, PassageUpdate
23
26
  from letta.schemas.run import Run
24
27
  from letta.schemas.source import Source
@@ -27,6 +30,7 @@ from letta.schemas.user import User
27
30
  from letta.serialize_schemas.pydantic_agent_schema import AgentSchema
28
31
  from letta.server.rest_api.utils import get_letta_server
29
32
  from letta.server.server import SyncServer
33
+ from letta.settings import settings
30
34
 
31
35
  # These can be forward refs, but because Fastapi needs them at runtime the must be imported normally
32
36
 
@@ -523,6 +527,7 @@ def list_messages(
523
527
  after: Optional[str] = Query(None, description="Message after which to retrieve the returned messages."),
524
528
  before: Optional[str] = Query(None, description="Message before which to retrieve the returned messages."),
525
529
  limit: int = Query(10, description="Maximum number of messages to retrieve."),
530
+ group_id: Optional[str] = Query(None, description="Group ID to filter messages by."),
526
531
  use_assistant_message: bool = Query(True, description="Whether to use assistant messages"),
527
532
  assistant_message_tool_name: str = Query(DEFAULT_MESSAGE_TOOL, description="The name of the designated message tool."),
528
533
  assistant_message_tool_kwarg: str = Query(DEFAULT_MESSAGE_TOOL_KWARG, description="The name of the message argument."),
@@ -539,6 +544,7 @@ def list_messages(
539
544
  after=after,
540
545
  before=before,
541
546
  limit=limit,
547
+ group_id=group_id,
542
548
  reverse=True,
543
549
  return_message_object=False,
544
550
  use_assistant_message=use_assistant_message,
@@ -579,17 +585,32 @@ async def send_message(
579
585
  This endpoint accepts a message from a user and processes it through the agent.
580
586
  """
581
587
  actor = server.user_manager.get_user_or_default(user_id=actor_id)
582
- result = await server.send_message_to_agent(
583
- agent_id=agent_id,
584
- actor=actor,
585
- messages=request.messages,
586
- stream_steps=False,
587
- stream_tokens=False,
588
- # Support for AssistantMessage
589
- use_assistant_message=request.use_assistant_message,
590
- assistant_message_tool_name=request.assistant_message_tool_name,
591
- assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
592
- )
588
+ if settings.use_experimental:
589
+ logger.warning("USING EXPERIMENTAL!")
590
+ experimental_agent = LettaAgent(
591
+ agent_id=agent_id,
592
+ message_manager=server.message_manager,
593
+ agent_manager=server.agent_manager,
594
+ block_manager=server.block_manager,
595
+ passage_manager=server.passage_manager,
596
+ actor=actor,
597
+ )
598
+
599
+ messages = request.messages
600
+ content = messages[0].content[0].text if messages and not isinstance(messages[0].content, str) else messages[0].content
601
+ result = await experimental_agent.step(UserMessage(content=content), max_steps=10)
602
+ else:
603
+ result = await server.send_message_to_agent(
604
+ agent_id=agent_id,
605
+ actor=actor,
606
+ messages=request.messages,
607
+ stream_steps=False,
608
+ stream_tokens=False,
609
+ # Support for AssistantMessage
610
+ use_assistant_message=request.use_assistant_message,
611
+ assistant_message_tool_name=request.assistant_message_tool_name,
612
+ assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
613
+ )
593
614
  return result
594
615
 
595
616
 
@@ -637,7 +658,7 @@ async def process_message_background(
637
658
  server: SyncServer,
638
659
  actor: User,
639
660
  agent_id: str,
640
- messages: list,
661
+ messages: List[MessageCreate],
641
662
  use_assistant_message: bool,
642
663
  assistant_message_tool_name: str,
643
664
  assistant_message_tool_kwarg: str,
@@ -645,17 +666,30 @@ async def process_message_background(
645
666
  """Background task to process the message and update job status."""
646
667
  try:
647
668
  # TODO(matt) we should probably make this stream_steps and log each step as it progresses, so the job update GET can see the total steps so far + partial usage?
648
- result = await server.send_message_to_agent(
649
- agent_id=agent_id,
650
- actor=actor,
651
- messages=messages,
652
- stream_steps=False, # NOTE(matt)
653
- stream_tokens=False,
654
- use_assistant_message=use_assistant_message,
655
- assistant_message_tool_name=assistant_message_tool_name,
656
- assistant_message_tool_kwarg=assistant_message_tool_kwarg,
657
- metadata={"job_id": job_id}, # Pass job_id through metadata
658
- )
669
+ if settings.use_experimental:
670
+ logger.warning("USING EXPERIMENTAL!")
671
+ experimental_agent = LettaAgent(
672
+ agent_id=agent_id,
673
+ message_manager=server.message_manager,
674
+ agent_manager=server.agent_manager,
675
+ block_manager=server.block_manager,
676
+ passage_manager=server.passage_manager,
677
+ actor=actor,
678
+ )
679
+ content = messages[0].content[0].text if messages and not isinstance(messages[0].content, str) else messages[0].content
680
+ result = await experimental_agent.step(UserMessage(content=content), max_steps=10)
681
+ else:
682
+ result = await server.send_message_to_agent(
683
+ agent_id=agent_id,
684
+ actor=actor,
685
+ messages=messages,
686
+ stream_steps=False, # NOTE(matt)
687
+ stream_tokens=False,
688
+ use_assistant_message=use_assistant_message,
689
+ assistant_message_tool_name=assistant_message_tool_name,
690
+ assistant_message_tool_kwarg=assistant_message_tool_kwarg,
691
+ metadata={"job_id": job_id}, # Pass job_id through metadata
692
+ )
659
693
 
660
694
  # Update job status to completed
661
695
  job_update = JobUpdate(
@@ -1,11 +1,13 @@
1
1
  from typing import Annotated, List, Optional
2
2
 
3
- from fastapi import APIRouter, Body, Depends, Header, Query
3
+ from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query, status
4
+ from fastapi.responses import JSONResponse
4
5
  from pydantic import Field
5
6
 
6
7
  from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
7
- from letta.schemas.group import Group, GroupCreate, ManagerType
8
- from letta.schemas.letta_message import LettaMessageUnion
8
+ from letta.orm.errors import NoResultFound
9
+ from letta.schemas.group import Group, GroupCreate, GroupUpdate, ManagerType
10
+ from letta.schemas.letta_message import LettaMessageUnion, LettaMessageUpdateUnion
9
11
  from letta.schemas.letta_request import LettaRequest, LettaStreamingRequest
10
12
  from letta.schemas.letta_response import LettaResponse
11
13
  from letta.server.rest_api.utils import get_letta_server
@@ -14,21 +16,6 @@ from letta.server.server import SyncServer
14
16
  router = APIRouter(prefix="/groups", tags=["groups"])
15
17
 
16
18
 
17
- @router.post("/", response_model=Group, operation_id="create_group")
18
- async def create_group(
19
- server: SyncServer = Depends(get_letta_server),
20
- request: GroupCreate = Body(...),
21
- actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
22
- ):
23
- """
24
- Create a multi-agent group with a specified management pattern. When no
25
- management config is specified, this endpoint will use round robin for
26
- speaker selection.
27
- """
28
- actor = server.user_manager.get_user_or_default(user_id=actor_id)
29
- return server.group_manager.create_group(request, actor=actor)
30
-
31
-
32
19
  @router.get("/", response_model=List[Group], operation_id="list_groups")
33
20
  def list_groups(
34
21
  server: "SyncServer" = Depends(get_letta_server),
@@ -53,6 +40,23 @@ def list_groups(
53
40
  )
54
41
 
55
42
 
43
+ @router.get("/{group_id}", response_model=Group, operation_id="retrieve_group")
44
+ def retrieve_group(
45
+ group_id: str,
46
+ server: "SyncServer" = Depends(get_letta_server),
47
+ actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
48
+ ):
49
+ """
50
+ Retrieve the group by id.
51
+ """
52
+ actor = server.user_manager.get_user_or_default(user_id=actor_id)
53
+
54
+ try:
55
+ return server.group_manager.retrieve_group(group_id=group_id, actor=actor)
56
+ except NoResultFound as e:
57
+ raise HTTPException(status_code=404, detail=str(e))
58
+
59
+
56
60
  @router.post("/", response_model=Group, operation_id="create_group")
57
61
  def create_group(
58
62
  group: GroupCreate = Body(...),
@@ -70,9 +74,10 @@ def create_group(
70
74
  raise HTTPException(status_code=500, detail=str(e))
71
75
 
72
76
 
73
- @router.put("/", response_model=Group, operation_id="upsert_group")
74
- def upsert_group(
75
- group: GroupCreate = Body(...),
77
+ @router.put("/{group_id}", response_model=Group, operation_id="modify_group")
78
+ def modify_group(
79
+ group_id: str,
80
+ group: GroupUpdate = Body(...),
76
81
  server: "SyncServer" = Depends(get_letta_server),
77
82
  actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
78
83
  x_project: Optional[str] = Header(None, alias="X-Project"), # Only handled by next js middleware
@@ -82,7 +87,7 @@ def upsert_group(
82
87
  """
83
88
  try:
84
89
  actor = server.user_manager.get_user_or_default(user_id=actor_id)
85
- return server.group_manager.create_group(group, actor=actor)
90
+ return server.group_manager.modify_group(group_id=group_id, group_update=group, actor=actor)
86
91
  except Exception as e:
87
92
  raise HTTPException(status_code=500, detail=str(e))
88
93
 
@@ -110,7 +115,7 @@ def delete_group(
110
115
  operation_id="send_group_message",
111
116
  )
112
117
  async def send_group_message(
113
- agent_id: str,
118
+ group_id: str,
114
119
  server: SyncServer = Depends(get_letta_server),
115
120
  request: LettaRequest = Body(...),
116
121
  actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
@@ -178,6 +183,22 @@ GroupMessagesResponse = Annotated[
178
183
  ]
179
184
 
180
185
 
186
+ @router.patch("/{group_id}/messages/{message_id}", response_model=LettaMessageUnion, operation_id="modify_group_message")
187
+ def modify_group_message(
188
+ group_id: str,
189
+ message_id: str,
190
+ request: LettaMessageUpdateUnion = Body(...),
191
+ server: "SyncServer" = Depends(get_letta_server),
192
+ actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
193
+ ):
194
+ """
195
+ Update the details of a message associated with an agent.
196
+ """
197
+ # TODO: support modifying tool calls/returns
198
+ actor = server.user_manager.get_user_or_default(user_id=actor_id)
199
+ return server.message_manager.update_message_by_letta_message(message_id=message_id, letta_message_update=request, actor=actor)
200
+
201
+
181
202
  @router.get("/{group_id}/messages", response_model=GroupMessagesResponse, operation_id="list_group_messages")
182
203
  def list_group_messages(
183
204
  group_id: str,
@@ -194,40 +215,42 @@ def list_group_messages(
194
215
  Retrieve message history for an agent.
195
216
  """
196
217
  actor = server.user_manager.get_user_or_default(user_id=actor_id)
197
-
198
- return server.group_manager.list_group_messages(
199
- group_id=group_id,
200
- before=before,
201
- after=after,
202
- limit=limit,
203
- actor=actor,
204
- use_assistant_message=use_assistant_message,
205
- assistant_message_tool_name=assistant_message_tool_name,
206
- assistant_message_tool_kwarg=assistant_message_tool_kwarg,
207
- )
218
+ group = server.group_manager.retrieve_group(group_id=group_id, actor=actor)
219
+ if group.manager_agent_id:
220
+ return server.get_agent_recall(
221
+ user_id=actor.id,
222
+ agent_id=group.manager_agent_id,
223
+ after=after,
224
+ before=before,
225
+ limit=limit,
226
+ group_id=group_id,
227
+ reverse=True,
228
+ return_message_object=False,
229
+ use_assistant_message=use_assistant_message,
230
+ assistant_message_tool_name=assistant_message_tool_name,
231
+ assistant_message_tool_kwarg=assistant_message_tool_kwarg,
232
+ )
233
+ else:
234
+ return server.group_manager.list_group_messages(
235
+ group_id=group_id,
236
+ after=after,
237
+ before=before,
238
+ limit=limit,
239
+ actor=actor,
240
+ use_assistant_message=use_assistant_message,
241
+ assistant_message_tool_name=assistant_message_tool_name,
242
+ assistant_message_tool_kwarg=assistant_message_tool_kwarg,
243
+ )
208
244
 
209
245
 
210
- '''
211
246
  @router.patch("/{group_id}/reset-messages", response_model=None, operation_id="reset_group_messages")
212
247
  def reset_group_messages(
213
248
  group_id: str,
214
- add_default_initial_messages: bool = Query(default=False, description="If true, adds the default initial messages after resetting."),
215
249
  server: "SyncServer" = Depends(get_letta_server),
216
250
  actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
217
251
  ):
218
252
  """
219
- Resets the messages for all agents that are part of the multi-agent group.
220
- TODO: only delete group messages not all messages!
253
+ Delete the group messages for all agents that are part of the multi-agent group.
221
254
  """
222
255
  actor = server.user_manager.get_user_or_default(user_id=actor_id)
223
- group = server.group_manager.retrieve_group(group_id=group_id, actor=actor)
224
- agent_ids = group.agent_ids
225
- if group.manager_agent_id:
226
- agent_ids.append(group.manager_agent_id)
227
- for agent_id in agent_ids:
228
- server.agent_manager.reset_messages(
229
- agent_id=agent_id,
230
- actor=actor,
231
- add_default_initial_messages=add_default_initial_messages,
232
- )
233
- '''
256
+ server.group_manager.reset_messages(group_id=group_id, actor=actor)
@@ -4,6 +4,7 @@ from typing import List, Optional
4
4
 
5
5
  from fastapi import APIRouter, BackgroundTasks, Depends, Header, HTTPException, Query, UploadFile
6
6
 
7
+ import letta.constants as constants
7
8
  from letta.schemas.file import FileMetadata
8
9
  from letta.schemas.job import Job
9
10
  from letta.schemas.passage import Passage
@@ -211,7 +211,6 @@ def create_tool_call_messages_from_openai_response(
211
211
  tool_calls=[],
212
212
  tool_call_id=tool_call_id,
213
213
  created_at=get_utc_time(),
214
- name=function_name,
215
214
  )
216
215
  messages.append(tool_message)
217
216