letta-nightly 0.6.5.dev20241220104040__py3-none-any.whl → 0.6.6.dev20241221104005__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

letta/chat_only_agent.py CHANGED
@@ -63,8 +63,8 @@ class ChatOnlyAgent(Agent):
63
63
  conversation_persona_block_new = Block(
64
64
  name="chat_agent_persona_new", label="chat_agent_persona_new", value=conversation_persona_block.value, limit=2000
65
65
  )
66
-
67
- recent_convo = "".join([str(message) for message in self.messages[3:]])[-self.recent_convo_limit :]
66
+ in_context_messages = self.agent_manager.get_in_context_messages(agent_id=self.agent_state.id, actor=self.user)
67
+ recent_convo = "".join([str(message) for message in in_context_messages[3:]])[-self.recent_convo_limit :]
68
68
  conversation_messages_block = Block(
69
69
  name="conversation_block", label="conversation_block", value=recent_convo, limit=self.recent_convo_limit
70
70
  )
letta/client/client.py CHANGED
@@ -2234,7 +2234,7 @@ class LocalClient(AbstractClient):
2234
2234
  """
2235
2235
  # TODO: add the abilitty to reset linked block_ids
2236
2236
  self.interface.clear()
2237
- agent_state = self.server.update_agent(
2237
+ agent_state = self.server.agent_manager.update_agent(
2238
2238
  agent_id,
2239
2239
  UpdateAgent(
2240
2240
  name=name,
@@ -2262,7 +2262,7 @@ class LocalClient(AbstractClient):
2262
2262
  List[Tool]: A list of Tool objs
2263
2263
  """
2264
2264
  self.interface.clear()
2265
- return self.server.get_tools_from_agent(agent_id=agent_id, user_id=self.user_id)
2265
+ return self.server.agent_manager.get_agent_by_id(agent_id=agent_id, actor=self.user).tools
2266
2266
 
2267
2267
  def add_tool_to_agent(self, agent_id: str, tool_id: str):
2268
2268
  """
@@ -2276,7 +2276,7 @@ class LocalClient(AbstractClient):
2276
2276
  agent_state (AgentState): State of the updated agent
2277
2277
  """
2278
2278
  self.interface.clear()
2279
- agent_state = self.server.add_tool_to_agent(agent_id=agent_id, tool_id=tool_id, user_id=self.user_id)
2279
+ agent_state = self.server.agent_manager.attach_tool(agent_id=agent_id, tool_id=tool_id, actor=self.user)
2280
2280
  return agent_state
2281
2281
 
2282
2282
  def remove_tool_from_agent(self, agent_id: str, tool_id: str):
@@ -2291,7 +2291,7 @@ class LocalClient(AbstractClient):
2291
2291
  agent_state (AgentState): State of the updated agent
2292
2292
  """
2293
2293
  self.interface.clear()
2294
- agent_state = self.server.remove_tool_from_agent(agent_id=agent_id, tool_id=tool_id, user_id=self.user_id)
2294
+ agent_state = self.server.agent_manager.detach_tool(agent_id=agent_id, tool_id=tool_id, actor=self.user)
2295
2295
  return agent_state
2296
2296
 
2297
2297
  def rename_agent(self, agent_id: str, new_name: str):
@@ -2426,7 +2426,7 @@ class LocalClient(AbstractClient):
2426
2426
  Returns:
2427
2427
  messages (List[Message]): List of in-context messages
2428
2428
  """
2429
- return self.server.get_in_context_messages(agent_id=agent_id, actor=self.user)
2429
+ return self.server.agent_manager.get_in_context_messages(agent_id=agent_id, actor=self.user)
2430
2430
 
2431
2431
  # agent interactions
2432
2432
 
@@ -2909,7 +2909,7 @@ class LocalClient(AbstractClient):
2909
2909
  job = self.server.job_manager.create_job(pydantic_job=job, actor=self.user)
2910
2910
 
2911
2911
  # TODO: implement blocking vs. non-blocking
2912
- self.server.load_file_to_source(source_id=source_id, file_path=filename, job_id=job.id)
2912
+ self.server.load_file_to_source(source_id=source_id, file_path=filename, job_id=job.id, actor=self.user)
2913
2913
  return job
2914
2914
 
2915
2915
  def delete_file_from_source(self, source_id: str, file_id: str):
@@ -2987,7 +2987,11 @@ class LocalClient(AbstractClient):
2987
2987
  source_id (str): ID of the source
2988
2988
  source_name (str): Name of the source
2989
2989
  """
2990
- self.server.attach_source_to_agent(source_id=source_id, source_name=source_name, agent_id=agent_id, user_id=self.user_id)
2990
+ if source_name:
2991
+ source = self.server.source_manager.get_source_by_id(source_id=source_id, actor=self.user)
2992
+ source_id = source.id
2993
+
2994
+ self.server.agent_manager.attach_source(source_id=source_id, agent_id=agent_id, actor=self.user)
2991
2995
 
2992
2996
  def detach_source_from_agent(self, agent_id: str, source_id: Optional[str] = None, source_name: Optional[str] = None):
2993
2997
  """
@@ -2999,7 +3003,10 @@ class LocalClient(AbstractClient):
2999
3003
  Returns:
3000
3004
  source (Source): Detached source
3001
3005
  """
3002
- return self.server.detach_source_from_agent(source_id=source_id, source_name=source_name, agent_id=agent_id, user_id=self.user_id)
3006
+ if source_name:
3007
+ source = self.server.source_manager.get_source_by_id(source_id=source_id, actor=self.user)
3008
+ source_id = source.id
3009
+ return self.server.agent_manager.detach_source(agent_id=agent_id, source_id=source_id, actor=self.user)
3003
3010
 
3004
3011
  def list_sources(self) -> List[Source]:
3005
3012
  """
@@ -3075,7 +3082,7 @@ class LocalClient(AbstractClient):
3075
3082
  agent_id (str): ID of the agent
3076
3083
  memory_id (str): ID of the memory
3077
3084
  """
3078
- self.server.delete_archival_memory(agent_id=agent_id, memory_id=memory_id, actor=self.user)
3085
+ self.server.delete_archival_memory(memory_id=memory_id, actor=self.user)
3079
3086
 
3080
3087
  def get_archival_memory(
3081
3088
  self, agent_id: str, before: Optional[str] = None, after: Optional[str] = None, limit: Optional[int] = 1000
letta/client/streaming.py CHANGED
@@ -59,8 +59,8 @@ def _sse_post(url: str, data: dict, headers: dict) -> Generator[LettaStreamingRe
59
59
  yield ToolCallMessage(**chunk_data)
60
60
  elif "tool_return" in chunk_data:
61
61
  yield ToolReturnMessage(**chunk_data)
62
- elif "usage" in chunk_data:
63
- yield LettaUsageStatistics(**chunk_data["usage"])
62
+ elif "step_count" in chunk_data:
63
+ yield LettaUsageStatistics(**chunk_data)
64
64
  else:
65
65
  raise ValueError(f"Unknown message type in chunk_data: {chunk_data}")
66
66
 
letta/constants.py CHANGED
@@ -69,6 +69,8 @@ INITIAL_BOOT_MESSAGE_SEND_MESSAGE_FIRST_MSG = STARTUP_QUOTES[2]
69
69
 
70
70
  CLI_WARNING_PREFIX = "Warning: "
71
71
 
72
+ ERROR_MESSAGE_PREFIX = "Error"
73
+
72
74
  NON_USER_MSG_PREFIX = "[This is an automated system message hidden from the user] "
73
75
 
74
76
  # Constants to do with summarization / conversation length window
letta/main.py CHANGED
@@ -194,46 +194,6 @@ def run_agent_loop(
194
194
  print(f"Current model: {letta_agent.agent_state.llm_config.model}")
195
195
  continue
196
196
 
197
- elif user_input.lower() == "/pop" or user_input.lower().startswith("/pop "):
198
- # Check if there's an additional argument that's an integer
199
- command = user_input.strip().split()
200
- pop_amount = int(command[1]) if len(command) > 1 and command[1].isdigit() else 3
201
- try:
202
- popped_messages = letta_agent.pop_message(count=pop_amount)
203
- except ValueError as e:
204
- print(f"Error popping messages: {e}")
205
- continue
206
-
207
- elif user_input.lower() == "/retry":
208
- print(f"Retrying for another answer...")
209
- try:
210
- letta_agent.retry_message()
211
- except Exception as e:
212
- print(f"Error retrying message: {e}")
213
- continue
214
-
215
- elif user_input.lower() == "/rethink" or user_input.lower().startswith("/rethink "):
216
- if len(user_input) < len("/rethink "):
217
- print("Missing text after the command")
218
- continue
219
- try:
220
- letta_agent.rethink_message(new_thought=user_input[len("/rethink ") :].strip())
221
- except Exception as e:
222
- print(f"Error rethinking message: {e}")
223
- continue
224
-
225
- elif user_input.lower() == "/rewrite" or user_input.lower().startswith("/rewrite "):
226
- if len(user_input) < len("/rewrite "):
227
- print("Missing text after the command")
228
- continue
229
-
230
- text = user_input[len("/rewrite ") :].strip()
231
- try:
232
- letta_agent.rewrite_message(new_text=text)
233
- except Exception as e:
234
- print(f"Error rewriting message: {e}")
235
- continue
236
-
237
197
  elif user_input.lower() == "/summarize":
238
198
  try:
239
199
  letta_agent.summarize_messages_inplace()
@@ -319,42 +279,6 @@ def run_agent_loop(
319
279
  questionary.print(cmd, "bold")
320
280
  questionary.print(f" {desc}")
321
281
  continue
322
-
323
- elif user_input.lower().startswith("/systemswap"):
324
- if len(user_input) < len("/systemswap "):
325
- print("Missing new system prompt after the command")
326
- continue
327
- old_system_prompt = letta_agent.system
328
- new_system_prompt = user_input[len("/systemswap ") :].strip()
329
-
330
- # Show warning and prompts to user
331
- typer.secho(
332
- "\nWARNING: You are about to change the system prompt.",
333
- # fg=typer.colors.BRIGHT_YELLOW,
334
- bold=True,
335
- )
336
- typer.secho(
337
- f"\nOld system prompt:\n{old_system_prompt}",
338
- fg=typer.colors.RED,
339
- bold=True,
340
- )
341
- typer.secho(
342
- f"\nNew system prompt:\n{new_system_prompt}",
343
- fg=typer.colors.GREEN,
344
- bold=True,
345
- )
346
-
347
- # Ask for confirmation
348
- confirm = questionary.confirm("Do you want to proceed with the swap?").ask()
349
-
350
- if confirm:
351
- letta_agent.update_system_prompt(new_system_prompt=new_system_prompt)
352
- print("System prompt updated successfully.")
353
- else:
354
- print("System prompt swap cancelled.")
355
-
356
- continue
357
-
358
282
  else:
359
283
  print(f"Unrecognized command: {user_input}")
360
284
  continue
@@ -129,9 +129,8 @@ class OfflineMemoryAgent(Agent):
129
129
  # extras
130
130
  first_message_verify_mono: bool = False,
131
131
  max_memory_rethinks: int = 10,
132
- initial_message_sequence: Optional[List[Message]] = None,
133
132
  ):
134
- super().__init__(interface, agent_state, user, initial_message_sequence=initial_message_sequence)
133
+ super().__init__(interface, agent_state, user)
135
134
  self.first_message_verify_mono = first_message_verify_mono
136
135
  self.max_memory_rethinks = max_memory_rethinks
137
136
 
letta/orm/source.py CHANGED
@@ -11,10 +11,10 @@ from letta.schemas.embedding_config import EmbeddingConfig
11
11
  from letta.schemas.source import Source as PydanticSource
12
12
 
13
13
  if TYPE_CHECKING:
14
- from letta.orm.organization import Organization
14
+ from letta.orm.agent import Agent
15
15
  from letta.orm.file import FileMetadata
16
+ from letta.orm.organization import Organization
16
17
  from letta.orm.passage import SourcePassage
17
- from letta.orm.agent import Agent
18
18
 
19
19
 
20
20
  class Source(SqlalchemyBase, OrganizationMixin):
@@ -32,4 +32,11 @@ class Source(SqlalchemyBase, OrganizationMixin):
32
32
  organization: Mapped["Organization"] = relationship("Organization", back_populates="sources")
33
33
  files: Mapped[List["FileMetadata"]] = relationship("FileMetadata", back_populates="source", cascade="all, delete-orphan")
34
34
  passages: Mapped[List["SourcePassage"]] = relationship("SourcePassage", back_populates="source", cascade="all, delete-orphan")
35
- agents: Mapped[List["Agent"]] = relationship("Agent", secondary="sources_agents", back_populates="sources")
35
+ agents: Mapped[List["Agent"]] = relationship(
36
+ "Agent",
37
+ secondary="sources_agents",
38
+ back_populates="sources",
39
+ lazy="selectin",
40
+ cascade="all, delete", # Ensures rows in sources_agents are deleted when the source is deleted
41
+ passive_deletes=True, # Allows the database to handle deletion of orphaned rows
42
+ )
@@ -9,5 +9,5 @@ class SourcesAgents(Base):
9
9
 
10
10
  __tablename__ = "sources_agents"
11
11
 
12
- agent_id: Mapped[String] = mapped_column(String, ForeignKey("agents.id"), primary_key=True)
13
- source_id: Mapped[String] = mapped_column(String, ForeignKey("sources.id"), primary_key=True)
12
+ agent_id: Mapped[String] = mapped_column(String, ForeignKey("agents.id", ondelete="CASCADE"), primary_key=True)
13
+ source_id: Mapped[String] = mapped_column(String, ForeignKey("sources.id", ondelete="CASCADE"), primary_key=True)
letta/providers.py CHANGED
@@ -27,6 +27,10 @@ class Provider(BaseModel):
27
27
  def provider_tag(self) -> str:
28
28
  """String representation of the provider for display purposes"""
29
29
  raise NotImplementedError
30
+
31
+ def get_handle(self, model_name: str) -> str:
32
+ return f"{self.name}/{model_name}"
33
+
30
34
 
31
35
 
32
36
  class LettaProvider(Provider):
@@ -40,6 +44,7 @@ class LettaProvider(Provider):
40
44
  model_endpoint_type="openai",
41
45
  model_endpoint="https://inference.memgpt.ai",
42
46
  context_window=16384,
47
+ handle=self.get_handle("letta-free")
43
48
  )
44
49
  ]
45
50
 
@@ -51,6 +56,7 @@ class LettaProvider(Provider):
51
56
  embedding_endpoint="https://embeddings.memgpt.ai",
52
57
  embedding_dim=1024,
53
58
  embedding_chunk_size=300,
59
+ handle=self.get_handle("letta-free")
54
60
  )
55
61
  ]
56
62
 
@@ -115,7 +121,7 @@ class OpenAIProvider(Provider):
115
121
  # continue
116
122
 
117
123
  configs.append(
118
- LLMConfig(model=model_name, model_endpoint_type="openai", model_endpoint=self.base_url, context_window=context_window_size)
124
+ LLMConfig(model=model_name, model_endpoint_type="openai", model_endpoint=self.base_url, context_window=context_window_size, handle=self.get_handle(model_name))
119
125
  )
120
126
 
121
127
  # for OpenAI, sort in reverse order
@@ -135,6 +141,7 @@ class OpenAIProvider(Provider):
135
141
  embedding_endpoint="https://api.openai.com/v1",
136
142
  embedding_dim=1536,
137
143
  embedding_chunk_size=300,
144
+ handle=self.get_handle("text-embedding-ada-002")
138
145
  )
139
146
  ]
140
147
 
@@ -163,6 +170,7 @@ class AnthropicProvider(Provider):
163
170
  model_endpoint_type="anthropic",
164
171
  model_endpoint=self.base_url,
165
172
  context_window=model["context_window"],
173
+ handle=self.get_handle(model["name"])
166
174
  )
167
175
  )
168
176
  return configs
@@ -195,6 +203,7 @@ class MistralProvider(Provider):
195
203
  model_endpoint_type="openai",
196
204
  model_endpoint=self.base_url,
197
205
  context_window=model["max_context_length"],
206
+ handle=self.get_handle(model["id"])
198
207
  )
199
208
  )
200
209
 
@@ -250,6 +259,7 @@ class OllamaProvider(OpenAIProvider):
250
259
  model_endpoint=self.base_url,
251
260
  model_wrapper=self.default_prompt_formatter,
252
261
  context_window=context_window,
262
+ handle=self.get_handle(model["name"])
253
263
  )
254
264
  )
255
265
  return configs
@@ -325,6 +335,7 @@ class OllamaProvider(OpenAIProvider):
325
335
  embedding_endpoint=self.base_url,
326
336
  embedding_dim=embedding_dim,
327
337
  embedding_chunk_size=300,
338
+ handle=self.get_handle(model["name"])
328
339
  )
329
340
  )
330
341
  return configs
@@ -345,7 +356,7 @@ class GroqProvider(OpenAIProvider):
345
356
  continue
346
357
  configs.append(
347
358
  LLMConfig(
348
- model=model["id"], model_endpoint_type="groq", model_endpoint=self.base_url, context_window=model["context_window"]
359
+ model=model["id"], model_endpoint_type="groq", model_endpoint=self.base_url, context_window=model["context_window"], handle=self.get_handle(model["id"])
349
360
  )
350
361
  )
351
362
  return configs
@@ -413,6 +424,7 @@ class TogetherProvider(OpenAIProvider):
413
424
  model_endpoint=self.base_url,
414
425
  model_wrapper=self.default_prompt_formatter,
415
426
  context_window=context_window_size,
427
+ handle=self.get_handle(model_name)
416
428
  )
417
429
  )
418
430
 
@@ -493,6 +505,7 @@ class GoogleAIProvider(Provider):
493
505
  model_endpoint_type="google_ai",
494
506
  model_endpoint=self.base_url,
495
507
  context_window=self.get_model_context_window(model),
508
+ handle=self.get_handle(model)
496
509
  )
497
510
  )
498
511
  return configs
@@ -516,6 +529,7 @@ class GoogleAIProvider(Provider):
516
529
  embedding_endpoint=self.base_url,
517
530
  embedding_dim=768,
518
531
  embedding_chunk_size=300, # NOTE: max is 2048
532
+ handle=self.get_handle(model)
519
533
  )
520
534
  )
521
535
  return configs
@@ -556,7 +570,7 @@ class AzureProvider(Provider):
556
570
  context_window_size = self.get_model_context_window(model_name)
557
571
  model_endpoint = get_azure_chat_completions_endpoint(self.base_url, model_name, self.api_version)
558
572
  configs.append(
559
- LLMConfig(model=model_name, model_endpoint_type="azure", model_endpoint=model_endpoint, context_window=context_window_size)
573
+ LLMConfig(model=model_name, model_endpoint_type="azure", model_endpoint=model_endpoint, context_window=context_window_size), handle=self.get_handle(model_name)
560
574
  )
561
575
  return configs
562
576
 
@@ -577,6 +591,7 @@ class AzureProvider(Provider):
577
591
  embedding_endpoint=model_endpoint,
578
592
  embedding_dim=768,
579
593
  embedding_chunk_size=300, # NOTE: max is 2048
594
+ handle=self.get_handle(model_name)
580
595
  )
581
596
  )
582
597
  return configs
@@ -610,6 +625,7 @@ class VLLMChatCompletionsProvider(Provider):
610
625
  model_endpoint_type="openai",
611
626
  model_endpoint=self.base_url,
612
627
  context_window=model["max_model_len"],
628
+ handle=self.get_handle(model["id"])
613
629
  )
614
630
  )
615
631
  return configs
@@ -642,6 +658,7 @@ class VLLMCompletionsProvider(Provider):
642
658
  model_endpoint=self.base_url,
643
659
  model_wrapper=self.default_prompt_formatter,
644
660
  context_window=model["max_model_len"],
661
+ handle=self.get_handle(model["id"])
645
662
  )
646
663
  )
647
664
  return configs
letta/schemas/agent.py CHANGED
@@ -118,6 +118,7 @@ class CreateAgent(BaseModel, validate_assignment=True): #
118
118
  )
119
119
  context_window_limit: Optional[int] = Field(None, description="The context window limit used by the agent.")
120
120
  embedding_chunk_size: Optional[int] = Field(DEFAULT_EMBEDDING_CHUNK_SIZE, description="The embedding chunk size used by the agent.")
121
+ from_template: Optional[str] = Field(None, description="The template id used to configure the agent")
121
122
 
122
123
  @field_validator("name")
123
124
  @classmethod
@@ -43,6 +43,7 @@ class EmbeddingConfig(BaseModel):
43
43
  embedding_model: str = Field(..., description="The model for the embedding.")
44
44
  embedding_dim: int = Field(..., description="The dimension of the embedding.")
45
45
  embedding_chunk_size: Optional[int] = Field(300, description="The chunk size of the embedding.")
46
+ handle: Optional[str] = Field(None, description="The handle for this config, in the format provider/model-name.")
46
47
 
47
48
  # azure only
48
49
  azure_endpoint: Optional[str] = Field(None, description="The Azure endpoint for the model.")
@@ -44,6 +44,7 @@ class LLMConfig(BaseModel):
44
44
  True,
45
45
  description="Puts 'inner_thoughts' as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.",
46
46
  )
47
+ handle: Optional[str] = Field(None, description="The handle for this config, in the format provider/model-name.")
47
48
 
48
49
  # FIXME hack to silence pydantic protected namespace warning
49
50
  model_config = ConfigDict(protected_namespaces=())
letta/schemas/usage.py CHANGED
@@ -1,3 +1,4 @@
1
+ from typing import Literal
1
2
  from pydantic import BaseModel, Field
2
3
 
3
4
 
@@ -11,7 +12,7 @@ class LettaUsageStatistics(BaseModel):
11
12
  total_tokens (int): The total number of tokens processed by the agent.
12
13
  step_count (int): The number of steps taken by the agent.
13
14
  """
14
-
15
+ message_type: Literal["usage_statistics"] = "usage_statistics"
15
16
  completion_tokens: int = Field(0, description="The number of tokens generated by the agent.")
16
17
  prompt_tokens: int = Field(0, description="The number of tokens in the prompt.")
17
18
  total_tokens: int = Field(0, description="The total number of tokens processed by the agent.")
@@ -238,7 +238,7 @@ class QueuingInterface(AgentInterface):
238
238
  new_message = {"function_return": msg, "status": "success"}
239
239
 
240
240
  elif msg.startswith("Error: "):
241
- msg = msg.replace("Error: ", "")
241
+ msg = msg.replace("Error: ", "", 1)
242
242
  new_message = {"function_return": msg, "status": "error"}
243
243
 
244
244
  else:
@@ -951,7 +951,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
951
951
  )
952
952
 
953
953
  elif msg.startswith("Error: "):
954
- msg = msg.replace("Error: ", "")
954
+ msg = msg.replace("Error: ", "", 1)
955
955
  # new_message = {"function_return": msg, "status": "error"}
956
956
  assert msg_obj.tool_call_id is not None
957
957
  new_message = ToolReturnMessage(
@@ -104,7 +104,7 @@ def get_agent_context_window(
104
104
  """
105
105
  actor = server.user_manager.get_user_or_default(user_id=user_id)
106
106
 
107
- return server.get_agent_context_window(user_id=actor.id, agent_id=agent_id)
107
+ return server.get_agent_context_window(agent_id=agent_id, actor=actor)
108
108
 
109
109
 
110
110
  class CreateAgentRequest(CreateAgent):
@@ -138,7 +138,7 @@ def update_agent(
138
138
  ):
139
139
  """Update an exsiting agent"""
140
140
  actor = server.user_manager.get_user_or_default(user_id=user_id)
141
- return server.update_agent(agent_id, update_agent, actor=actor)
141
+ return server.agent_manager.update_agent(agent_id=agent_id, agent_update=update_agent, actor=actor)
142
142
 
143
143
 
144
144
  @router.get("/{agent_id}/tools", response_model=List[Tool], operation_id="get_tools_from_agent")
@@ -149,7 +149,7 @@ def get_tools_from_agent(
149
149
  ):
150
150
  """Get tools from an existing agent"""
151
151
  actor = server.user_manager.get_user_or_default(user_id=user_id)
152
- return server.get_tools_from_agent(agent_id=agent_id, user_id=actor.id)
152
+ return server.agent_manager.get_agent_by_id(agent_id=agent_id, actor=actor).tools
153
153
 
154
154
 
155
155
  @router.patch("/{agent_id}/add-tool/{tool_id}", response_model=AgentState, operation_id="add_tool_to_agent")
@@ -161,7 +161,7 @@ def add_tool_to_agent(
161
161
  ):
162
162
  """Add tools to an existing agent"""
163
163
  actor = server.user_manager.get_user_or_default(user_id=user_id)
164
- return server.add_tool_to_agent(agent_id=agent_id, tool_id=tool_id, user_id=actor.id)
164
+ return server.agent_manager.attach_tool(agent_id=agent_id, tool_id=tool_id, actor=actor)
165
165
 
166
166
 
167
167
  @router.patch("/{agent_id}/remove-tool/{tool_id}", response_model=AgentState, operation_id="remove_tool_from_agent")
@@ -173,7 +173,7 @@ def remove_tool_from_agent(
173
173
  ):
174
174
  """Add tools to an existing agent"""
175
175
  actor = server.user_manager.get_user_or_default(user_id=user_id)
176
- return server.remove_tool_from_agent(agent_id=agent_id, tool_id=tool_id, user_id=actor.id)
176
+ return server.agent_manager.detach_tool(agent_id=agent_id, tool_id=tool_id, actor=actor)
177
177
 
178
178
 
179
179
  @router.get("/{agent_id}", response_model=AgentState, operation_id="get_agent")
@@ -232,7 +232,7 @@ def get_agent_in_context_messages(
232
232
  Retrieve the messages in the context of a specific agent.
233
233
  """
234
234
  actor = server.user_manager.get_user_or_default(user_id=user_id)
235
- return server.get_in_context_messages(agent_id=agent_id, actor=actor)
235
+ return server.agent_manager.get_in_context_messages(agent_id=agent_id, actor=actor)
236
236
 
237
237
 
238
238
  # TODO: remove? can also get with agent blocks
@@ -429,7 +429,7 @@ def delete_agent_archival_memory(
429
429
  """
430
430
  actor = server.user_manager.get_user_or_default(user_id=user_id)
431
431
 
432
- server.delete_archival_memory(agent_id=agent_id, memory_id=memory_id, actor=actor)
432
+ server.delete_archival_memory(memory_id=memory_id, actor=actor)
433
433
  return JSONResponse(status_code=status.HTTP_200_OK, content={"message": f"Memory id={memory_id} successfully deleted"})
434
434
 
435
435
 
@@ -479,8 +479,9 @@ def update_message(
479
479
  """
480
480
  Update the details of a message associated with an agent.
481
481
  """
482
+ # TODO: Get rid of agent_id here, it's not really relevant
482
483
  actor = server.user_manager.get_user_or_default(user_id=user_id)
483
- return server.update_agent_message(agent_id=agent_id, message_id=message_id, request=request, actor=actor)
484
+ return server.message_manager.update_message_by_id(message_id=message_id, message_update=request, actor=actor)
484
485
 
485
486
 
486
487
  @router.post(
@@ -130,11 +130,8 @@ def attach_source_to_agent(
130
130
  Attach a data source to an existing agent.
131
131
  """
132
132
  actor = server.user_manager.get_user_or_default(user_id=user_id)
133
-
134
- source = server.source_manager.get_source_by_id(source_id=source_id, actor=actor)
135
- assert source is not None, f"Source with id={source_id} not found."
136
- source = server.attach_source_to_agent(source_id=source.id, agent_id=agent_id, user_id=actor.id)
137
- return source
133
+ server.agent_manager.attach_source(source_id=source_id, agent_id=agent_id, actor=actor)
134
+ return server.source_manager.get_source_by_id(source_id=source_id, actor=actor)
138
135
 
139
136
 
140
137
  @router.post("/{source_id}/detach", response_model=Source, operation_id="detach_agent_from_source")
@@ -148,8 +145,8 @@ def detach_source_from_agent(
148
145
  Detach a data source from an existing agent.
149
146
  """
150
147
  actor = server.user_manager.get_user_or_default(user_id=user_id)
151
-
152
- return server.detach_source_from_agent(source_id=source_id, agent_id=agent_id, user_id=actor.id)
148
+ server.agent_manager.detach_source(agent_id=agent_id, source_id=source_id, actor=actor)
149
+ return server.source_manager.get_source_by_id(source_id=source_id, actor=actor)
153
150
 
154
151
 
155
152
  @router.post("/{source_id}/upload", response_model=Job, operation_id="upload_file_to_source")
@@ -243,6 +243,7 @@ def add_composio_tool(
243
243
  raise HTTPException(
244
244
  status_code=400, # Bad Request
245
245
  detail={
246
+ "code": "EnumStringNotFound",
246
247
  "message": f"Cannot find composio action with name `{composio_action_name}`.",
247
248
  "composio_action_name": composio_action_name,
248
249
  },
@@ -251,6 +252,7 @@ def add_composio_tool(
251
252
  raise HTTPException(
252
253
  status_code=400, # Bad Request
253
254
  detail={
255
+ "code": "ComposioSDKError",
254
256
  "message": f"No connected account found for tool `{composio_action_name}`. You need to connect the relevant app in Composio order to use the tool.",
255
257
  "composio_action_name": composio_action_name,
256
258
  },
@@ -61,7 +61,7 @@ async def sse_async_generator(
61
61
  # Double-check the type
62
62
  if not isinstance(usage, LettaUsageStatistics):
63
63
  raise ValueError(f"Expected LettaUsageStatistics, got {type(usage)}")
64
- yield sse_formatter({"usage": usage.model_dump()})
64
+ yield sse_formatter(usage.model_dump())
65
65
 
66
66
  except ContextWindowExceededError as e:
67
67
  log_error_to_sentry(e)