letta-nightly 0.5.0.dev20241015014828__py3-none-any.whl → 0.5.0.dev20241016104103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

letta/client/utils.py CHANGED
@@ -1,6 +1,9 @@
1
+ import re
1
2
  from datetime import datetime
3
+ from typing import Optional
2
4
 
3
5
  from IPython.display import HTML, display
6
+ from sqlalchemy.testing.plugin.plugin_base import warnings
4
7
 
5
8
  from letta.local_llm.constants import (
6
9
  ASSISTANT_MESSAGE_CLI_SYMBOL,
@@ -64,3 +67,15 @@ def pprint(messages):
64
67
  html_content += "</div>"
65
68
 
66
69
  display(HTML(html_content))
70
+
71
+
72
+ def derive_function_name_regex(function_string: str) -> Optional[str]:
73
+ # Regular expression to match the function name
74
+ match = re.search(r"def\s+([a-zA-Z_]\w*)\s*\(", function_string)
75
+
76
+ if match:
77
+ function_name = match.group(1)
78
+ return function_name
79
+ else:
80
+ warnings.warn("No function name found.")
81
+ return None
letta/constants.py CHANGED
@@ -39,7 +39,7 @@ DEFAULT_PRESET = "memgpt_chat"
39
39
  # Tools
40
40
  BASE_TOOLS = [
41
41
  "send_message",
42
- "pause_heartbeats",
42
+ # "pause_heartbeats",
43
43
  "conversation_search",
44
44
  "conversation_search_date",
45
45
  "archival_memory_insert",
@@ -27,7 +27,7 @@ def load_function_set(module: ModuleType) -> dict:
27
27
  if attr_name in function_dict:
28
28
  raise ValueError(f"Found a duplicate of function name '{attr_name}'")
29
29
 
30
- generated_schema = generate_schema(attr)
30
+ generated_schema = generate_schema(attr, terminal=False)
31
31
  function_dict[attr_name] = {
32
32
  "module": inspect.getsource(module),
33
33
  "python_function": attr,
@@ -74,7 +74,7 @@ def pydantic_model_to_open_ai(model):
74
74
  }
75
75
 
76
76
 
77
- def generate_schema(function, name: Optional[str] = None, description: Optional[str] = None):
77
+ def generate_schema(function, terminal: Optional[bool], name: Optional[str] = None, description: Optional[str] = None):
78
78
  # Get the signature of the function
79
79
  sig = inspect.signature(function)
80
80
 
@@ -127,7 +127,8 @@ def generate_schema(function, name: Optional[str] = None, description: Optional[
127
127
  schema["parameters"]["required"].append(param.name)
128
128
 
129
129
  # append the heartbeat
130
- if function.__name__ not in ["send_message", "pause_heartbeats"]:
130
+ # TODO: don't hard-code
131
+ if function.__name__ not in ["send_message", "pause_heartbeats"] and not terminal:
131
132
  schema["parameters"]["properties"]["request_heartbeat"] = {
132
133
  "type": "boolean",
133
134
  "description": "Request an immediate heartbeat after function execution. Set to `True` if you want to send a follow-up message or run a follow-up function.",
letta/main.py CHANGED
@@ -361,8 +361,10 @@ def run_agent_loop(
361
361
  skip_next_user_input = False
362
362
 
363
363
  def process_agent_step(user_message, no_verify):
364
+ # TODO(charles): update to use agent.step() instead of inner_step()
365
+
364
366
  if user_message is None:
365
- step_response = letta_agent.step(
367
+ step_response = letta_agent.inner_step(
366
368
  messages=[],
367
369
  first_message=False,
368
370
  skip_verify=no_verify,
@@ -402,15 +404,15 @@ def run_agent_loop(
402
404
  while True:
403
405
  try:
404
406
  if strip_ui:
405
- new_messages, user_message, skip_next_user_input = process_agent_step(user_message, no_verify)
407
+ _, user_message, skip_next_user_input = process_agent_step(user_message, no_verify)
406
408
  break
407
409
  else:
408
410
  if stream:
409
411
  # Don't display the "Thinking..." if streaming
410
- new_messages, user_message, skip_next_user_input = process_agent_step(user_message, no_verify)
412
+ _, user_message, skip_next_user_input = process_agent_step(user_message, no_verify)
411
413
  else:
412
414
  with console.status("[bold cyan]Thinking...") as status:
413
- new_messages, user_message, skip_next_user_input = process_agent_step(user_message, no_verify)
415
+ _, user_message, skip_next_user_input = process_agent_step(user_message, no_verify)
414
416
  break
415
417
  except KeyboardInterrupt:
416
418
  print("User interrupt occurred.")
letta/metadata.py CHANGED
@@ -577,7 +577,7 @@ class MetadataStore:
577
577
  @enforce_types
578
578
  def create_tool(self, tool: Tool):
579
579
  with self.session_maker() as session:
580
- if self.get_tool(tool_name=tool.name, user_id=tool.user_id) is not None:
580
+ if self.get_tool(tool_id=tool.id, tool_name=tool.name, user_id=tool.user_id) is not None:
581
581
  raise ValueError(f"Tool with name {tool.name} already exists")
582
582
  session.add(ToolModel(**vars(tool)))
583
583
  session.commit()
@@ -620,9 +620,9 @@ class MetadataStore:
620
620
  session.commit()
621
621
 
622
622
  @enforce_types
623
- def update_tool(self, tool: Tool):
623
+ def update_tool(self, tool_id: str, tool: Tool):
624
624
  with self.session_maker() as session:
625
- session.query(ToolModel).filter(ToolModel.id == tool.id).update(vars(tool))
625
+ session.query(ToolModel).filter(ToolModel.id == tool_id).update(vars(tool))
626
626
  session.commit()
627
627
 
628
628
  @enforce_types
@@ -631,6 +631,21 @@ class MetadataStore:
631
631
  session.query(ToolModel).filter(ToolModel.id == tool_id).delete()
632
632
  session.commit()
633
633
 
634
+ @enforce_types
635
+ def delete_file_from_source(self, source_id: str, file_id: str, user_id: Optional[str]):
636
+ with self.session_maker() as session:
637
+ file_metadata = (
638
+ session.query(FileMetadataModel)
639
+ .filter(FileMetadataModel.source_id == source_id, FileMetadataModel.id == file_id, FileMetadataModel.user_id == user_id)
640
+ .first()
641
+ )
642
+
643
+ if file_metadata:
644
+ session.delete(file_metadata)
645
+ session.commit()
646
+
647
+ return file_metadata
648
+
634
649
  @enforce_types
635
650
  def delete_block(self, block_id: str):
636
651
  with self.session_maker() as session:
@@ -800,6 +815,15 @@ class MetadataStore:
800
815
  results = session.query(ToolModel).filter(ToolModel.name == tool_name).filter(ToolModel.user_id == None).all()
801
816
  if user_id:
802
817
  results += session.query(ToolModel).filter(ToolModel.name == tool_name).filter(ToolModel.user_id == user_id).all()
818
+ if len(results) == 0:
819
+ return None
820
+ # assert len(results) == 1, f"Expected 1 result, got {len(results)}"
821
+ return results[0].to_record()
822
+
823
+ @enforce_types
824
+ def get_tool_with_name_and_user_id(self, tool_name: Optional[str] = None, user_id: Optional[str] = None) -> Optional[ToolModel]:
825
+ with self.session_maker() as session:
826
+ results = session.query(ToolModel).filter(ToolModel.name == tool_name).filter(ToolModel.user_id == user_id).all()
803
827
  if len(results) == 0:
804
828
  return None
805
829
  assert len(results) == 1, f"Expected 1 result, got {len(results)}"
letta/schemas/agent.py CHANGED
@@ -1,7 +1,7 @@
1
1
  import uuid
2
2
  from datetime import datetime
3
3
  from enum import Enum
4
- from typing import Dict, List, Optional, Union
4
+ from typing import Dict, List, Optional
5
5
 
6
6
  from pydantic import BaseModel, Field, field_validator
7
7
 
@@ -121,11 +121,15 @@ class UpdateAgentState(BaseAgent):
121
121
 
122
122
 
123
123
  class AgentStepResponse(BaseModel):
124
- # TODO remove support for list of dicts
125
- messages: Union[List[Message], List[dict]] = Field(..., description="The messages generated during the agent's step.")
124
+ messages: List[Message] = Field(..., description="The messages generated during the agent's step.")
126
125
  heartbeat_request: bool = Field(..., description="Whether the agent requested a heartbeat (i.e. follow-up execution).")
127
126
  function_failed: bool = Field(..., description="Whether the agent step ended because a function call failed.")
128
127
  in_context_memory_warning: bool = Field(
129
128
  ..., description="Whether the agent step ended because the in-context memory is near its limit."
130
129
  )
131
130
  usage: UsageStatistics = Field(..., description="Usage statistics of the LLM call during the agent's step.")
131
+
132
+
133
+ class RemoveToolsFromAgent(BaseModel):
134
+ agent_id: str = Field(..., description="The id of the agent.")
135
+ tool_ids: Optional[List[str]] = Field(None, description="The tools to be removed from the agent.")
letta/schemas/memory.py CHANGED
@@ -8,6 +8,43 @@ if TYPE_CHECKING:
8
8
  from letta.agent import Agent
9
9
 
10
10
  from letta.schemas.block import Block
11
+ from letta.schemas.message import Message
12
+
13
+
14
+ class ContextWindowOverview(BaseModel):
15
+ """
16
+ Overview of the context window, including the number of messages and tokens.
17
+ """
18
+
19
+ # top-level information
20
+ context_window_size_max: int = Field(..., description="The maximum amount of tokens the context window can hold.")
21
+ context_window_size_current: int = Field(..., description="The current number of tokens in the context window.")
22
+
23
+ # context window breakdown (in messages)
24
+ # (technically not in the context window, but useful to know)
25
+ num_messages: int = Field(..., description="The number of messages in the context window.")
26
+ num_archival_memory: int = Field(..., description="The number of messages in the archival memory.")
27
+ num_recall_memory: int = Field(..., description="The number of messages in the recall memory.")
28
+ num_tokens_external_memory_summary: int = Field(
29
+ ..., description="The number of tokens in the external memory summary (archival + recall metadata)."
30
+ )
31
+
32
+ # context window breakdown (in tokens)
33
+ # this should all add up to context_window_size_current
34
+
35
+ num_tokens_system: int = Field(..., description="The number of tokens in the system prompt.")
36
+ system_prompt: str = Field(..., description="The content of the system prompt.")
37
+
38
+ num_tokens_core_memory: int = Field(..., description="The number of tokens in the core memory.")
39
+ core_memory: str = Field(..., description="The content of the core memory.")
40
+
41
+ num_tokens_summary_memory: int = Field(..., description="The number of tokens in the summary memory.")
42
+ summary_memory: Optional[str] = Field(None, description="The content of the summary memory.")
43
+
44
+ num_tokens_messages: int = Field(..., description="The number of tokens in the messages list.")
45
+ # TODO make list of messages?
46
+ # messages: List[dict] = Field(..., description="The messages in the context window.")
47
+ messages: List[Message] = Field(..., description="The messages in the context window.")
11
48
 
12
49
 
13
50
  class Memory(BaseModel, validate_assignment=True):
letta/schemas/tool.py CHANGED
@@ -176,16 +176,20 @@ class Tool(BaseTool):
176
176
 
177
177
 
178
178
  class ToolCreate(BaseTool):
179
+ id: Optional[str] = Field(None, description="The unique identifier of the tool. If this is not provided, it will be autogenerated.")
179
180
  name: Optional[str] = Field(None, description="The name of the function (auto-generated from source_code if not provided).")
181
+ description: Optional[str] = Field(None, description="The description of the tool.")
180
182
  tags: List[str] = Field([], description="Metadata tags.")
181
183
  source_code: str = Field(..., description="The source code of the function.")
182
184
  json_schema: Optional[Dict] = Field(
183
185
  None, description="The JSON schema of the function (auto-generated from source_code if not provided)"
184
186
  )
187
+ terminal: Optional[bool] = Field(None, description="Whether the tool is a terminal tool (allow requesting heartbeats).")
185
188
 
186
189
 
187
190
  class ToolUpdate(ToolCreate):
188
191
  id: str = Field(..., description="The unique identifier of the tool.")
192
+ description: Optional[str] = Field(None, description="The description of the tool.")
189
193
  name: Optional[str] = Field(None, description="The name of the function.")
190
194
  tags: Optional[List[str]] = Field(None, description="Metadata tags.")
191
195
  source_code: Optional[str] = Field(None, description="The source code of the function.")
@@ -248,7 +248,7 @@ def create_run(
248
248
  agent_id = thread_id
249
249
  # TODO: override preset of agent with request.assistant_id
250
250
  agent = server._get_or_load_agent(agent_id=agent_id)
251
- agent.step(user_message=None) # already has messages added
251
+ agent.inner_step(messages=[]) # already has messages added
252
252
  run_id = str(uuid.uuid4())
253
253
  create_time = int(get_utc_time().timestamp())
254
254
  return OpenAIRun(
@@ -19,6 +19,7 @@ from letta.schemas.letta_response import LettaResponse
19
19
  from letta.schemas.memory import (
20
20
  ArchivalMemorySummary,
21
21
  BasicBlockMemory,
22
+ ContextWindowOverview,
22
23
  CreateArchivalMemory,
23
24
  Memory,
24
25
  RecallMemorySummary,
@@ -51,6 +52,20 @@ def list_agents(
51
52
  return server.list_agents(user_id=actor.id)
52
53
 
53
54
 
55
+ @router.get("/{agent_id}/context", response_model=ContextWindowOverview, operation_id="get_agent_context_window")
56
+ def get_agent_context_window(
57
+ agent_id: str,
58
+ server: "SyncServer" = Depends(get_letta_server),
59
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
60
+ ):
61
+ """
62
+ Retrieve the context window of a specific agent.
63
+ """
64
+ actor = server.get_user_or_default(user_id=user_id)
65
+
66
+ return server.get_agent_context_window(user_id=actor.id, agent_id=agent_id)
67
+
68
+
54
69
  @router.post("/", response_model=AgentState, operation_id="create_agent")
55
70
  def create_agent(
56
71
  agent: CreateAgent = Body(...),
@@ -85,6 +100,34 @@ def update_agent(
85
100
  return server.update_agent(update_agent, user_id=actor.id)
86
101
 
87
102
 
103
+ @router.patch("/{agent_id}/add-tool/{tool_id}", response_model=AgentState, operation_id="add_tool_to_agent")
104
+ def add_tool_to_agent(
105
+ agent_id: str,
106
+ tool_id: str,
107
+ server: "SyncServer" = Depends(get_letta_server),
108
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
109
+ ):
110
+ """Add tools to an exsiting agent"""
111
+ actor = server.get_user_or_default(user_id=user_id)
112
+
113
+ update_agent.id = agent_id
114
+ return server.add_tool_to_agent(agent_id=agent_id, tool_id=tool_id, user_id=actor.id)
115
+
116
+
117
+ @router.patch("/{agent_id}/remove-tool/{tool_id}", response_model=AgentState, operation_id="remove_tool_from_agent")
118
+ def remove_tool_from_agent(
119
+ agent_id: str,
120
+ tool_id: str,
121
+ server: "SyncServer" = Depends(get_letta_server),
122
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
123
+ ):
124
+ """Add tools to an exsiting agent"""
125
+ actor = server.get_user_or_default(user_id=user_id)
126
+
127
+ update_agent.id = agent_id
128
+ return server.remove_tool_from_agent(agent_id=agent_id, tool_id=tool_id, user_id=actor.id)
129
+
130
+
88
131
  @router.get("/{agent_id}", response_model=AgentState, operation_id="get_agent")
89
132
  def get_agent_state(
90
133
  agent_id: str,
@@ -2,7 +2,15 @@ import os
2
2
  import tempfile
3
3
  from typing import List, Optional
4
4
 
5
- from fastapi import APIRouter, BackgroundTasks, Depends, Header, Query, UploadFile
5
+ from fastapi import (
6
+ APIRouter,
7
+ BackgroundTasks,
8
+ Depends,
9
+ Header,
10
+ HTTPException,
11
+ Query,
12
+ UploadFile,
13
+ )
6
14
 
7
15
  from letta.schemas.file import FileMetadata
8
16
  from letta.schemas.job import Job
@@ -199,6 +207,25 @@ def list_files_from_source(
199
207
  return server.list_files_from_source(source_id=source_id, limit=limit, cursor=cursor)
200
208
 
201
209
 
210
+ # it's redundant to include /delete in the URL path. The HTTP verb DELETE already implies that action.
211
+ # it's still good practice to return a status indicating the success or failure of the deletion
212
+ @router.delete("/{source_id}/{file_id}", status_code=204, operation_id="delete_file_from_source")
213
+ def delete_file_from_source(
214
+ source_id: str,
215
+ file_id: str,
216
+ server: "SyncServer" = Depends(get_letta_server),
217
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
218
+ ):
219
+ """
220
+ Delete a data source.
221
+ """
222
+ actor = server.get_user_or_default(user_id=user_id)
223
+
224
+ deleted_file = server.delete_file_from_source(source_id=source_id, file_id=file_id, user_id=actor.id)
225
+ if deleted_file is None:
226
+ raise HTTPException(status_code=404, detail=f"File with id={file_id} not found.")
227
+
228
+
202
229
  def load_file_to_source_async(server: SyncServer, source_id: str, job_id: str, file: UploadFile, bytes: bytes):
203
230
  # write the file to a temporary directory (deleted after the context manager exits)
204
231
  with tempfile.TemporaryDirectory() as tmpdirname:
@@ -105,4 +105,4 @@ def update_tool(
105
105
  """
106
106
  assert tool_id == request.id, "Tool ID in path must match tool ID in request body"
107
107
  # actor = server.get_user_or_default(user_id=user_id)
108
- return server.update_tool(request)
108
+ return server.update_tool(request, user_id)