letta-nightly 0.6.50.dev20250411104155__py3-none-any.whl → 0.6.52.dev20250412051016__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +23 -32
  3. letta/agents/base_agent.py +17 -6
  4. letta/agents/ephemeral_agent.py +5 -6
  5. letta/agents/ephemeral_memory_agent.py +8 -10
  6. letta/agents/helpers.py +6 -6
  7. letta/agents/letta_agent.py +9 -10
  8. letta/agents/letta_agent_batch.py +164 -0
  9. letta/agents/voice_agent.py +8 -8
  10. letta/functions/function_sets/base.py +1 -1
  11. letta/helpers/converters.py +5 -2
  12. letta/helpers/tool_rule_solver.py +12 -2
  13. letta/jobs/scheduler.py +13 -11
  14. letta/llm_api/anthropic.py +0 -1
  15. letta/llm_api/anthropic_client.py +61 -23
  16. letta/llm_api/cohere.py +1 -1
  17. letta/llm_api/google_ai_client.py +48 -13
  18. letta/llm_api/google_vertex_client.py +19 -1
  19. letta/llm_api/llm_client_base.py +13 -5
  20. letta/llm_api/openai.py +4 -3
  21. letta/llm_api/openai_client.py +18 -10
  22. letta/orm/organization.py +4 -2
  23. letta/orm/sqlalchemy_base.py +3 -0
  24. letta/schemas/enums.py +1 -0
  25. letta/schemas/group.py +30 -1
  26. letta/schemas/identity.py +10 -0
  27. letta/schemas/letta_request.py +4 -0
  28. letta/schemas/letta_response.py +9 -1
  29. letta/schemas/llm_config.py +10 -0
  30. letta/schemas/message.py +21 -12
  31. letta/schemas/openai/chat_completion_request.py +1 -0
  32. letta/schemas/tool_rule.py +14 -1
  33. letta/server/rest_api/interface.py +5 -4
  34. letta/server/rest_api/routers/v1/agents.py +20 -13
  35. letta/server/rest_api/routers/v1/groups.py +1 -1
  36. letta/server/rest_api/routers/v1/identities.py +23 -2
  37. letta/server/rest_api/utils.py +20 -22
  38. letta/server/server.py +34 -21
  39. letta/services/agent_manager.py +13 -9
  40. letta/services/block_manager.py +2 -4
  41. letta/services/identity_manager.py +21 -5
  42. letta/services/llm_batch_manager.py +21 -1
  43. letta/services/summarizer/summarizer.py +11 -4
  44. letta/services/tool_manager.py +1 -1
  45. letta/settings.py +1 -0
  46. letta/utils.py +2 -2
  47. {letta_nightly-0.6.50.dev20250411104155.dist-info → letta_nightly-0.6.52.dev20250412051016.dist-info}/METADATA +3 -3
  48. {letta_nightly-0.6.50.dev20250411104155.dist-info → letta_nightly-0.6.52.dev20250412051016.dist-info}/RECORD +51 -50
  49. {letta_nightly-0.6.50.dev20250411104155.dist-info → letta_nightly-0.6.52.dev20250412051016.dist-info}/LICENSE +0 -0
  50. {letta_nightly-0.6.50.dev20250411104155.dist-info → letta_nightly-0.6.52.dev20250412051016.dist-info}/WHEEL +0 -0
  51. {letta_nightly-0.6.50.dev20250411104155.dist-info → letta_nightly-0.6.52.dev20250412051016.dist-info}/entry_points.txt +0 -0
letta/server/server.py CHANGED
@@ -95,7 +95,7 @@ from letta.services.tool_manager import ToolManager
95
95
  from letta.services.user_manager import UserManager
96
96
  from letta.settings import model_settings, settings, tool_settings
97
97
  from letta.sleeptime_agent import SleeptimeAgent
98
- from letta.tracing import trace_method
98
+ from letta.tracing import log_event, trace_method
99
99
  from letta.utils import get_friendly_error_msg, make_key
100
100
 
101
101
  config = LettaConfig.load()
@@ -706,18 +706,21 @@ class SyncServer(Server):
706
706
  command = command[1:] # strip the prefix
707
707
  return self._command(user_id=user_id, agent_id=agent_id, command=command)
708
708
 
709
+ @trace_method
709
710
  def get_cached_llm_config(self, **kwargs):
710
711
  key = make_key(**kwargs)
711
712
  if key not in self._llm_config_cache:
712
713
  self._llm_config_cache[key] = self.get_llm_config_from_handle(**kwargs)
713
714
  return self._llm_config_cache[key]
714
715
 
716
+ @trace_method
715
717
  def get_cached_embedding_config(self, **kwargs):
716
718
  key = make_key(**kwargs)
717
719
  if key not in self._embedding_config_cache:
718
720
  self._embedding_config_cache[key] = self.get_embedding_config_from_handle(**kwargs)
719
721
  return self._embedding_config_cache[key]
720
722
 
723
+ @trace_method
721
724
  def create_agent(
722
725
  self,
723
726
  request: CreateAgent,
@@ -728,26 +731,34 @@ class SyncServer(Server):
728
731
  if request.llm_config is None:
729
732
  if request.model is None:
730
733
  raise ValueError("Must specify either model or llm_config in request")
731
- request.llm_config = self.get_cached_llm_config(
732
- handle=request.model,
733
- context_window_limit=request.context_window_limit,
734
- max_tokens=request.max_tokens,
735
- max_reasoning_tokens=request.max_reasoning_tokens,
736
- enable_reasoner=request.enable_reasoner,
737
- )
734
+ config_params = {
735
+ "handle": request.model,
736
+ "context_window_limit": request.context_window_limit,
737
+ "max_tokens": request.max_tokens,
738
+ "max_reasoning_tokens": request.max_reasoning_tokens,
739
+ "enable_reasoner": request.enable_reasoner,
740
+ }
741
+ log_event(name="start get_cached_llm_config", attributes=config_params)
742
+ request.llm_config = self.get_cached_llm_config(**config_params)
743
+ log_event(name="end get_cached_llm_config", attributes=config_params)
738
744
 
739
745
  if request.embedding_config is None:
740
746
  if request.embedding is None:
741
747
  raise ValueError("Must specify either embedding or embedding_config in request")
742
- request.embedding_config = self.get_cached_embedding_config(
743
- handle=request.embedding,
744
- embedding_chunk_size=request.embedding_chunk_size or constants.DEFAULT_EMBEDDING_CHUNK_SIZE,
745
- )
746
-
748
+ embedding_config_params = {
749
+ "handle": request.embedding,
750
+ "embedding_chunk_size": request.embedding_chunk_size or constants.DEFAULT_EMBEDDING_CHUNK_SIZE,
751
+ }
752
+ log_event(name="start get_cached_embedding_config", attributes=embedding_config_params)
753
+ request.embedding_config = self.get_cached_embedding_config(**embedding_config_params)
754
+ log_event(name="end get_cached_embedding_config", attributes=embedding_config_params)
755
+
756
+ log_event(name="start create_agent db")
747
757
  main_agent = self.agent_manager.create_agent(
748
758
  agent_create=request,
749
759
  actor=actor,
750
760
  )
761
+ log_event(name="end create_agent db")
751
762
 
752
763
  if request.enable_sleeptime:
753
764
  main_agent = self.create_sleeptime_agent(main_agent=main_agent, actor=actor)
@@ -1108,6 +1119,7 @@ class SyncServer(Server):
1108
1119
  # Merge the two dictionaries, keeping the values from providers_from_db where conflicts occur
1109
1120
  return {**providers_from_env, **providers_from_db}.values()
1110
1121
 
1122
+ @trace_method
1111
1123
  def get_llm_config_from_handle(
1112
1124
  self,
1113
1125
  handle: str,
@@ -1157,6 +1169,7 @@ class SyncServer(Server):
1157
1169
 
1158
1170
  return llm_config
1159
1171
 
1172
+ @trace_method
1160
1173
  def get_embedding_config_from_handle(
1161
1174
  self, handle: str, embedding_chunk_size: int = constants.DEFAULT_EMBEDDING_CHUNK_SIZE
1162
1175
  ) -> EmbeddingConfig:
@@ -1259,16 +1272,16 @@ class SyncServer(Server):
1259
1272
  if tool_source_type is not None and tool_source_type != "python":
1260
1273
  raise ValueError("Only Python source code is supported at this time")
1261
1274
 
1262
- # NOTE: we're creating a floating Tool object and NOT persisting to DB
1263
- tool = Tool(
1264
- name=tool_name,
1265
- source_code=tool_source,
1266
- args_json_schema=tool_args_json_schema,
1267
- )
1268
-
1269
1275
  # If tools_json_schema is explicitly passed in, override it on the created Tool object
1270
1276
  if tool_json_schema:
1271
- tool.json_schema = tool_json_schema
1277
+ tool = Tool(name=tool_name, source_code=tool_source, json_schema=tool_json_schema)
1278
+ else:
1279
+ # NOTE: we're creating a floating Tool object and NOT persisting to DB
1280
+ tool = Tool(
1281
+ name=tool_name,
1282
+ source_code=tool_source,
1283
+ args_json_schema=tool_args_json_schema,
1284
+ )
1272
1285
 
1273
1286
  assert tool.name is not None, "Failed to create tool object"
1274
1287
 
@@ -42,8 +42,8 @@ from letta.schemas.message import MessageCreate, MessageUpdate
42
42
  from letta.schemas.passage import Passage as PydanticPassage
43
43
  from letta.schemas.source import Source as PydanticSource
44
44
  from letta.schemas.tool import Tool as PydanticTool
45
- from letta.schemas.tool_rule import ChildToolRule as PydanticChildToolRule
46
45
  from letta.schemas.tool_rule import ContinueToolRule as PydanticContinueToolRule
46
+ from letta.schemas.tool_rule import ParentToolRule as PydanticParentToolRule
47
47
  from letta.schemas.tool_rule import TerminalToolRule as PydanticTerminalToolRule
48
48
  from letta.schemas.tool_rule import ToolRule as PydanticToolRule
49
49
  from letta.schemas.user import User as PydanticUser
@@ -70,6 +70,7 @@ from letta.services.passage_manager import PassageManager
70
70
  from letta.services.source_manager import SourceManager
71
71
  from letta.services.tool_manager import ToolManager
72
72
  from letta.settings import settings
73
+ from letta.tracing import trace_method
73
74
  from letta.utils import enforce_types, united_diff
74
75
 
75
76
  logger = get_logger(__name__)
@@ -93,6 +94,7 @@ class AgentManager:
93
94
  # ======================================================================================================================
94
95
  # Basic CRUD operations
95
96
  # ======================================================================================================================
97
+ @trace_method
96
98
  @enforce_types
97
99
  def create_agent(
98
100
  self,
@@ -162,7 +164,7 @@ class AgentManager:
162
164
  tool_rules.append(PydanticContinueToolRule(tool_name=tool_name))
163
165
 
164
166
  if agent_create.agent_type == AgentType.sleeptime_agent:
165
- tool_rules.append(PydanticChildToolRule(tool_name="view_core_memory_with_line_numbers", children=["core_memory_insert"]))
167
+ tool_rules.append(PydanticParentToolRule(tool_name="view_core_memory_with_line_numbers", children=["core_memory_insert"]))
166
168
 
167
169
  # if custom rules, check tool rules are valid
168
170
  if agent_create.tool_rules:
@@ -213,7 +215,6 @@ class AgentManager:
213
215
  # We always need the system prompt up front
214
216
  system_message_obj = PydanticMessage.dict_to_message(
215
217
  agent_id=agent_state.id,
216
- user_id=agent_state.created_by_id,
217
218
  model=agent_state.llm_config.model,
218
219
  openai_message_dict=init_messages[0],
219
220
  )
@@ -224,9 +225,7 @@ class AgentManager:
224
225
  )
225
226
  else:
226
227
  init_messages = [
227
- PydanticMessage.dict_to_message(
228
- agent_id=agent_state.id, user_id=agent_state.created_by_id, model=agent_state.llm_config.model, openai_message_dict=msg
229
- )
228
+ PydanticMessage.dict_to_message(agent_id=agent_state.id, model=agent_state.llm_config.model, openai_message_dict=msg)
230
229
  for msg in init_messages
231
230
  ]
232
231
 
@@ -661,6 +660,9 @@ class AgentManager:
661
660
  message_ids = self.get_agent_by_id(agent_id=agent_id, actor=actor).message_ids
662
661
  return self.message_manager.get_message_by_id(message_id=message_ids[0], actor=actor)
663
662
 
663
+ # TODO: This is duplicated below
664
+ # TODO: This is legacy code and should be cleaned up
665
+ # TODO: A lot of the memory "compilation" should be offset to a separate class
664
666
  @enforce_types
665
667
  def rebuild_system_prompt(self, agent_id: str, actor: PydanticUser, force=False, update_timestamp=True) -> PydanticAgentState:
666
668
  """Rebuilds the system message with the latest memory object and any shared memory block updates
@@ -714,7 +716,6 @@ class AgentManager:
714
716
  # Swap the system message out (only if there is a diff)
715
717
  message = PydanticMessage.dict_to_message(
716
718
  agent_id=agent_id,
717
- user_id=actor.id,
718
719
  model=agent_state.llm_config.model,
719
720
  openai_message_dict={"role": "system", "content": new_system_message_str},
720
721
  )
@@ -801,7 +802,6 @@ class AgentManager:
801
802
  )
802
803
  system_message = PydanticMessage.dict_to_message(
803
804
  agent_id=agent_state.id,
804
- user_id=agent_state.created_by_id,
805
805
  model=agent_state.llm_config.model,
806
806
  openai_message_dict=init_messages[0],
807
807
  )
@@ -845,6 +845,10 @@ class AgentManager:
845
845
 
846
846
  @enforce_types
847
847
  def refresh_memory(self, agent_state: PydanticAgentState, actor: PydanticUser) -> PydanticAgentState:
848
+ block_ids = [b.id for b in agent_state.memory.blocks]
849
+ if not block_ids:
850
+ return agent_state
851
+
848
852
  agent_state.memory.blocks = self.block_manager.get_all_blocks_by_ids(
849
853
  block_ids=[b.id for b in agent_state.memory.blocks], actor=actor
850
854
  )
@@ -903,7 +907,7 @@ class AgentManager:
903
907
  # get the agent
904
908
  agent = self.get_agent_by_id(agent_id=agent_id, actor=actor)
905
909
  message = PydanticMessage.dict_to_message(
906
- agent_id=agent.id, user_id=actor.id, model=agent.llm_config.model, openai_message_dict={"role": "system", "content": content}
910
+ agent_id=agent.id, model=agent.llm_config.model, openai_message_dict={"role": "system", "content": content}
907
911
  )
908
912
 
909
913
  # update agent in-context message IDs
@@ -109,11 +109,9 @@ class BlockManager:
109
109
 
110
110
  @enforce_types
111
111
  def get_all_blocks_by_ids(self, block_ids: List[str], actor: Optional[PydanticUser] = None) -> List[PydanticBlock]:
112
- """Retrieve blocks by their names."""
112
+ """Retrieve blocks by their ids."""
113
113
  with self.session_maker() as session:
114
- blocks = list(
115
- map(lambda obj: obj.to_pydantic(), BlockModel.read_multiple(db_session=session, identifiers=block_ids, actor=actor))
116
- )
114
+ blocks = [block.to_pydantic() for block in BlockModel.read_multiple(db_session=session, identifiers=block_ids, actor=actor)]
117
115
  # backwards compatibility. previous implementation added None for every block not found.
118
116
  blocks.extend([None for _ in range(len(block_ids) - len(blocks))])
119
117
  return blocks
@@ -8,7 +8,7 @@ from letta.orm.agent import Agent as AgentModel
8
8
  from letta.orm.block import Block as BlockModel
9
9
  from letta.orm.identity import Identity as IdentityModel
10
10
  from letta.schemas.identity import Identity as PydanticIdentity
11
- from letta.schemas.identity import IdentityCreate, IdentityType, IdentityUpdate
11
+ from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityType, IdentityUpdate, IdentityUpsert
12
12
  from letta.schemas.user import User as PydanticUser
13
13
  from letta.utils import enforce_types
14
14
 
@@ -81,7 +81,7 @@ class IdentityManager:
81
81
  return new_identity.to_pydantic()
82
82
 
83
83
  @enforce_types
84
- def upsert_identity(self, identity: IdentityCreate, actor: PydanticUser) -> PydanticIdentity:
84
+ def upsert_identity(self, identity: IdentityUpsert, actor: PydanticUser) -> PydanticIdentity:
85
85
  with self.session_maker() as session:
86
86
  existing_identity = IdentityModel.read(
87
87
  db_session=session,
@@ -92,7 +92,7 @@ class IdentityManager:
92
92
  )
93
93
 
94
94
  if existing_identity is None:
95
- return self.create_identity(identity=identity, actor=actor)
95
+ return self.create_identity(identity=IdentityCreate(**identity.model_dump()), actor=actor)
96
96
  else:
97
97
  identity_update = IdentityUpdate(
98
98
  name=identity.name,
@@ -137,8 +137,10 @@ class IdentityManager:
137
137
  if replace:
138
138
  existing_identity.properties = [prop.model_dump() for prop in identity.properties]
139
139
  else:
140
- new_properties = existing_identity.properties + [prop.model_dump() for prop in identity.properties]
141
- existing_identity.properties = new_properties
140
+ new_properties = {old_prop["key"]: old_prop for old_prop in existing_identity.properties} | {
141
+ new_prop.key: new_prop.model_dump() for new_prop in identity.properties
142
+ }
143
+ existing_identity.properties = list(new_properties.values())
142
144
 
143
145
  if identity.agent_ids is not None:
144
146
  self._process_relationship(
@@ -163,6 +165,20 @@ class IdentityManager:
163
165
  existing_identity.update(session, actor=actor)
164
166
  return existing_identity.to_pydantic()
165
167
 
168
+ @enforce_types
169
+ def upsert_identity_properties(self, identity_id: str, properties: List[IdentityProperty], actor: PydanticUser) -> PydanticIdentity:
170
+ with self.session_maker() as session:
171
+ existing_identity = IdentityModel.read(db_session=session, identifier=identity_id, actor=actor)
172
+ if existing_identity is None:
173
+ raise HTTPException(status_code=404, detail="Identity not found")
174
+ return self._update_identity(
175
+ session=session,
176
+ existing_identity=existing_identity,
177
+ identity=IdentityUpdate(properties=properties),
178
+ actor=actor,
179
+ replace=True,
180
+ )
181
+
166
182
  @enforce_types
167
183
  def delete_identity(self, identity_id: str, actor: PydanticUser) -> None:
168
184
  with self.session_maker() as session:
@@ -28,7 +28,7 @@ class LLMBatchManager:
28
28
  self.session_maker = db_context
29
29
 
30
30
  @enforce_types
31
- def create_batch_request(
31
+ def create_batch_job(
32
32
  self,
33
33
  llm_provider: ProviderType,
34
34
  create_batch_response: BetaMessageBatch,
@@ -172,6 +172,26 @@ class LLMBatchManager:
172
172
 
173
173
  return item.update(db_session=session, actor=actor).to_pydantic()
174
174
 
175
+ @enforce_types
176
+ def list_batch_items(
177
+ self,
178
+ batch_id: str,
179
+ limit: Optional[int] = None,
180
+ actor: Optional[PydanticUser] = None,
181
+ ) -> List[PydanticLLMBatchItem]:
182
+ """List all batch items for a given batch_id, optionally filtered by organization and limited in count."""
183
+ with self.session_maker() as session:
184
+ query = session.query(LLMBatchItem).filter(LLMBatchItem.batch_id == batch_id)
185
+
186
+ if actor is not None:
187
+ query = query.filter(LLMBatchItem.organization_id == actor.organization_id)
188
+
189
+ if limit:
190
+ query = query.limit(limit)
191
+
192
+ results = query.all()
193
+ return [item.to_pydantic() for item in results]
194
+
175
195
  def bulk_update_batch_items_by_agent(
176
196
  self,
177
197
  updates: List[ItemUpdateInfo],
@@ -4,8 +4,8 @@ from typing import List, Tuple
4
4
 
5
5
  from letta.agents.base_agent import BaseAgent
6
6
  from letta.schemas.enums import MessageRole
7
- from letta.schemas.message import Message
8
- from letta.schemas.openai.chat_completion_request import UserMessage
7
+ from letta.schemas.letta_message_content import TextContent
8
+ from letta.schemas.message import Message, MessageCreate
9
9
  from letta.services.summarizer.enums import SummarizationMode
10
10
 
11
11
 
@@ -95,8 +95,15 @@ class Summarizer:
95
95
  "It should be in note-taking format in natural English. You are to return the new, updated memory only."
96
96
  )
97
97
 
98
- messages = await self.summarizer_agent.step(UserMessage(content=summary_request_text))
99
- current_summary = "\n".join([m.content[0].text for m in messages])
98
+ response = await self.summarizer_agent.step(
99
+ input_messages=[
100
+ MessageCreate(
101
+ role=MessageRole.user,
102
+ content=[TextContent(text=summary_request_text)],
103
+ ),
104
+ ],
105
+ )
106
+ current_summary = "\n".join([m.content[0].text for m in response.messages if m.message_type == "assistant_message"])
100
107
  current_summary = f"{self.summary_prefix}{current_summary}"
101
108
 
102
109
  return updated_in_context_messages, current_summary, True
@@ -54,7 +54,7 @@ class ToolManager:
54
54
 
55
55
  # If there's anything to update
56
56
  if update_data:
57
- self.update_tool_by_id(tool.id, ToolUpdate(**update_data), actor)
57
+ tool = self.update_tool_by_id(tool.id, ToolUpdate(**update_data), actor)
58
58
  else:
59
59
  printd(
60
60
  f"`create_or_update_tool` was called with user_id={actor.id}, organization_id={actor.organization_id}, name={pydantic_tool.name}, but found existing tool with nothing to update."
letta/settings.py CHANGED
@@ -205,6 +205,7 @@ class Settings(BaseSettings):
205
205
  httpx_keepalive_expiry: float = 120.0
206
206
 
207
207
  # cron job parameters
208
+ enable_batch_job_polling: bool = False
208
209
  poll_running_llm_batches_interval_seconds: int = 5 * 60
209
210
 
210
211
  @property
letta/utils.py CHANGED
@@ -828,7 +828,7 @@ def parse_json(string) -> dict:
828
828
  raise ValueError(f"JSON from string input ({string}) is not a dictionary (type {type(result)}): {result}")
829
829
  return result
830
830
  except Exception as e:
831
- print(f"Error parsing json with json package: {e}")
831
+ print(f"Error parsing json with json package, falling back to demjson: {e}")
832
832
 
833
833
  try:
834
834
  result = demjson.decode(string)
@@ -836,7 +836,7 @@ def parse_json(string) -> dict:
836
836
  raise ValueError(f"JSON from string input ({string}) is not a dictionary (type {type(result)}): {result}")
837
837
  return result
838
838
  except demjson.JSONDecodeError as e:
839
- print(f"Error parsing json with demjson package: {e}")
839
+ print(f"Error parsing json with demjson package (fatal): {e}")
840
840
  raise e
841
841
 
842
842
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.6.50.dev20250411104155
3
+ Version: 0.6.52.dev20250412051016
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -50,7 +50,7 @@ Requires-Dist: isort (>=5.13.2,<6.0.0) ; extra == "dev" or extra == "all"
50
50
  Requires-Dist: jinja2 (>=3.1.5,<4.0.0)
51
51
  Requires-Dist: langchain (>=0.3.7,<0.4.0) ; extra == "external-tools" or extra == "desktop" or extra == "all"
52
52
  Requires-Dist: langchain-community (>=0.3.7,<0.4.0) ; extra == "external-tools" or extra == "desktop" or extra == "all"
53
- Requires-Dist: letta_client (>=0.1.97,<0.2.0)
53
+ Requires-Dist: letta_client (>=0.1.104,<0.2.0)
54
54
  Requires-Dist: llama-index (>=0.12.2,<0.13.0)
55
55
  Requires-Dist: llama-index-embeddings-openai (>=0.3.1,<0.4.0)
56
56
  Requires-Dist: locust (>=2.31.5,<3.0.0) ; extra == "dev" or extra == "desktop" or extra == "all"
@@ -255,7 +255,7 @@ No, the data in your Letta server database stays on your machine. The Letta ADE
255
255
 
256
256
  > _"Do I have to use your ADE? Can I build my own?"_
257
257
 
258
- The ADE is built on top of the (fully open source) Letta server and Letta Agents API. You can build your own application like the ADE on top of the REST API (view the documention [here](https://docs.letta.com/api-reference)).
258
+ The ADE is built on top of the (fully open source) Letta server and Letta Agents API. You can build your own application like the ADE on top of the REST API (view the documentation [here](https://docs.letta.com/api-reference)).
259
259
 
260
260
  > _"Can I interact with Letta agents via the CLI?"_
261
261