letta-nightly 0.6.14.dev20250123041709__py3-none-any.whl → 0.6.15.dev20250124054224__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (59) hide show
  1. letta/__init__.py +1 -1
  2. letta/client/client.py +144 -68
  3. letta/client/streaming.py +1 -1
  4. letta/functions/function_sets/extras.py +8 -3
  5. letta/functions/function_sets/multi_agent.py +1 -1
  6. letta/functions/helpers.py +2 -2
  7. letta/llm_api/llm_api_tools.py +2 -2
  8. letta/llm_api/openai.py +30 -138
  9. letta/memory.py +4 -4
  10. letta/offline_memory_agent.py +10 -10
  11. letta/orm/agent.py +10 -2
  12. letta/orm/block.py +14 -3
  13. letta/orm/job.py +2 -1
  14. letta/orm/message.py +12 -1
  15. letta/orm/passage.py +6 -2
  16. letta/orm/source.py +6 -1
  17. letta/orm/sqlalchemy_base.py +80 -32
  18. letta/orm/tool.py +5 -2
  19. letta/schemas/embedding_config_overrides.py +3 -0
  20. letta/schemas/enums.py +4 -0
  21. letta/schemas/job.py +1 -1
  22. letta/schemas/letta_message.py +22 -5
  23. letta/schemas/llm_config.py +5 -0
  24. letta/schemas/llm_config_overrides.py +38 -0
  25. letta/schemas/message.py +61 -15
  26. letta/schemas/openai/chat_completions.py +1 -1
  27. letta/schemas/passage.py +1 -1
  28. letta/schemas/providers.py +24 -8
  29. letta/schemas/source.py +1 -1
  30. letta/server/rest_api/app.py +12 -3
  31. letta/server/rest_api/interface.py +5 -7
  32. letta/server/rest_api/routers/v1/agents.py +7 -12
  33. letta/server/rest_api/routers/v1/blocks.py +19 -0
  34. letta/server/rest_api/routers/v1/organizations.py +2 -2
  35. letta/server/rest_api/routers/v1/providers.py +2 -2
  36. letta/server/rest_api/routers/v1/runs.py +15 -7
  37. letta/server/rest_api/routers/v1/sandbox_configs.py +4 -4
  38. letta/server/rest_api/routers/v1/sources.py +2 -2
  39. letta/server/rest_api/routers/v1/tags.py +2 -2
  40. letta/server/rest_api/routers/v1/tools.py +2 -2
  41. letta/server/rest_api/routers/v1/users.py +2 -2
  42. letta/server/server.py +62 -34
  43. letta/services/agent_manager.py +80 -33
  44. letta/services/block_manager.py +15 -2
  45. letta/services/helpers/agent_manager_helper.py +11 -4
  46. letta/services/job_manager.py +19 -9
  47. letta/services/message_manager.py +14 -8
  48. letta/services/organization_manager.py +8 -4
  49. letta/services/provider_manager.py +8 -4
  50. letta/services/sandbox_config_manager.py +16 -8
  51. letta/services/source_manager.py +4 -4
  52. letta/services/tool_manager.py +3 -3
  53. letta/services/user_manager.py +9 -5
  54. {letta_nightly-0.6.14.dev20250123041709.dist-info → letta_nightly-0.6.15.dev20250124054224.dist-info}/METADATA +2 -1
  55. {letta_nightly-0.6.14.dev20250123041709.dist-info → letta_nightly-0.6.15.dev20250124054224.dist-info}/RECORD +58 -57
  56. letta/orm/job_usage_statistics.py +0 -30
  57. {letta_nightly-0.6.14.dev20250123041709.dist-info → letta_nightly-0.6.15.dev20250124054224.dist-info}/LICENSE +0 -0
  58. {letta_nightly-0.6.14.dev20250123041709.dist-info → letta_nightly-0.6.15.dev20250124054224.dist-info}/WHEEL +0 -0
  59. {letta_nightly-0.6.14.dev20250123041709.dist-info → letta_nightly-0.6.15.dev20250124054224.dist-info}/entry_points.txt +0 -0
@@ -7,8 +7,10 @@ from letta.constants import LLM_MAX_TOKENS, MIN_CONTEXT_WINDOW
7
7
  from letta.llm_api.azure_openai import get_azure_chat_completions_endpoint, get_azure_embeddings_endpoint
8
8
  from letta.llm_api.azure_openai_constants import AZURE_MODEL_TO_CONTEXT_LENGTH
9
9
  from letta.schemas.embedding_config import EmbeddingConfig
10
+ from letta.schemas.embedding_config_overrides import EMBEDDING_HANDLE_OVERRIDES
10
11
  from letta.schemas.letta_base import LettaBase
11
12
  from letta.schemas.llm_config import LLMConfig
13
+ from letta.schemas.llm_config_overrides import LLM_HANDLE_OVERRIDES
12
14
 
13
15
 
14
16
  class ProviderBase(LettaBase):
@@ -39,7 +41,21 @@ class Provider(ProviderBase):
39
41
  """String representation of the provider for display purposes"""
40
42
  raise NotImplementedError
41
43
 
42
- def get_handle(self, model_name: str) -> str:
44
+ def get_handle(self, model_name: str, is_embedding: bool = False) -> str:
45
+ """
46
+ Get the handle for a model, with support for custom overrides.
47
+
48
+ Args:
49
+ model_name (str): The name of the model.
50
+ is_embedding (bool, optional): Whether the handle is for an embedding model. Defaults to False.
51
+
52
+ Returns:
53
+ str: The handle for the model.
54
+ """
55
+ overrides = EMBEDDING_HANDLE_OVERRIDES if is_embedding else LLM_HANDLE_OVERRIDES
56
+ if self.name in overrides and model_name in overrides[self.name]:
57
+ model_name = overrides[self.name][model_name]
58
+
43
59
  return f"{self.name}/{model_name}"
44
60
 
45
61
 
@@ -76,7 +92,7 @@ class LettaProvider(Provider):
76
92
  embedding_endpoint="https://embeddings.memgpt.ai",
77
93
  embedding_dim=1024,
78
94
  embedding_chunk_size=300,
79
- handle=self.get_handle("letta-free"),
95
+ handle=self.get_handle("letta-free", is_embedding=True),
80
96
  )
81
97
  ]
82
98
 
@@ -167,7 +183,7 @@ class OpenAIProvider(Provider):
167
183
  embedding_endpoint="https://api.openai.com/v1",
168
184
  embedding_dim=1536,
169
185
  embedding_chunk_size=300,
170
- handle=self.get_handle("text-embedding-ada-002"),
186
+ handle=self.get_handle("text-embedding-ada-002", is_embedding=True),
171
187
  ),
172
188
  EmbeddingConfig(
173
189
  embedding_model="text-embedding-3-small",
@@ -175,7 +191,7 @@ class OpenAIProvider(Provider):
175
191
  embedding_endpoint="https://api.openai.com/v1",
176
192
  embedding_dim=2000,
177
193
  embedding_chunk_size=300,
178
- handle=self.get_handle("text-embedding-3-small"),
194
+ handle=self.get_handle("text-embedding-3-small", is_embedding=True),
179
195
  ),
180
196
  EmbeddingConfig(
181
197
  embedding_model="text-embedding-3-large",
@@ -183,7 +199,7 @@ class OpenAIProvider(Provider):
183
199
  embedding_endpoint="https://api.openai.com/v1",
184
200
  embedding_dim=2000,
185
201
  embedding_chunk_size=300,
186
- handle=self.get_handle("text-embedding-3-large"),
202
+ handle=self.get_handle("text-embedding-3-large", is_embedding=True),
187
203
  ),
188
204
  ]
189
205
 
@@ -377,7 +393,7 @@ class OllamaProvider(OpenAIProvider):
377
393
  embedding_endpoint=self.base_url,
378
394
  embedding_dim=embedding_dim,
379
395
  embedding_chunk_size=300,
380
- handle=self.get_handle(model["name"]),
396
+ handle=self.get_handle(model["name"], is_embedding=True),
381
397
  )
382
398
  )
383
399
  return configs
@@ -575,7 +591,7 @@ class GoogleAIProvider(Provider):
575
591
  embedding_endpoint=self.base_url,
576
592
  embedding_dim=768,
577
593
  embedding_chunk_size=300, # NOTE: max is 2048
578
- handle=self.get_handle(model),
594
+ handle=self.get_handle(model, is_embedding=True),
579
595
  )
580
596
  )
581
597
  return configs
@@ -641,7 +657,7 @@ class AzureProvider(Provider):
641
657
  embedding_endpoint=model_endpoint,
642
658
  embedding_dim=768,
643
659
  embedding_chunk_size=300, # NOTE: max is 2048
644
- handle=self.get_handle(model_name),
660
+ handle=self.get_handle(model_name, is_embedding=True),
645
661
  )
646
662
  )
647
663
  return configs
letta/schemas/source.py CHANGED
@@ -33,7 +33,7 @@ class Source(BaseSource):
33
33
  description: Optional[str] = Field(None, description="The description of the source.")
34
34
  embedding_config: EmbeddingConfig = Field(..., description="The embedding configuration used by the source.")
35
35
  organization_id: Optional[str] = Field(None, description="The ID of the organization that created the source.")
36
- metadata: Optional[dict] = Field(None, description="Metadata associated with the source.")
36
+ metadata: Optional[dict] = Field(None, validation_alias="metadata_", description="Metadata associated with the source.")
37
37
 
38
38
  # metadata fields
39
39
  created_by_id: Optional[str] = Field(None, description="The id of the user that made this Tool.")
@@ -12,7 +12,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
12
12
  from starlette.middleware.cors import CORSMiddleware
13
13
 
14
14
  from letta.__init__ import __version__
15
- from letta.constants import ADMIN_PREFIX, API_PREFIX, OPENAI_API_PREFIX
15
+ from letta.constants import ADMIN_PREFIX, API_PREFIX
16
16
  from letta.errors import BedrockPermissionError, LettaAgentNotFoundError, LettaUserNotFoundError
17
17
  from letta.log import get_logger
18
18
  from letta.orm.errors import DatabaseTimeoutError, ForeignKeyConstraintViolationError, NoResultFound, UniqueConstraintViolationError
@@ -49,9 +49,12 @@ password = None
49
49
  # #typer.secho(f"Generated admin server password for this session: {password}", fg=typer.colors.GREEN)
50
50
 
51
51
  import logging
52
+ import platform
52
53
 
53
54
  from fastapi import FastAPI
54
55
 
56
+ is_windows = platform.system() == "Windows"
57
+
55
58
  log = logging.getLogger("uvicorn")
56
59
 
57
60
 
@@ -285,8 +288,14 @@ def start_server(
285
288
  ssl_certfile="certs/localhost.pem",
286
289
  )
287
290
  else:
288
- print(f"▶ Server running at: http://{host or 'localhost'}:{port or REST_DEFAULT_PORT}")
289
- print(f"▶ View using ADE at: https://app.letta.com/development-servers/local/dashboard\n")
291
+ if is_windows:
292
+ # Windows doesn't those the fancy unicode characters
293
+ print(f"Server running at: http://{host or 'localhost'}:{port or REST_DEFAULT_PORT}")
294
+ print(f"View using ADE at: https://app.letta.com/development-servers/local/dashboard\n")
295
+ else:
296
+ print(f"▶ Server running at: http://{host or 'localhost'}:{port or REST_DEFAULT_PORT}")
297
+ print(f"▶ View using ADE at: https://app.letta.com/development-servers/local/dashboard\n")
298
+
290
299
  uvicorn.run(
291
300
  app,
292
301
  host=host or "localhost",
@@ -472,7 +472,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
472
472
  processed_chunk = AssistantMessage(
473
473
  id=message_id,
474
474
  date=message_date,
475
- assistant_message=cleaned_func_args,
475
+ content=cleaned_func_args,
476
476
  )
477
477
 
478
478
  # otherwise we just do a regular passthrough of a ToolCallDelta via a ToolCallMessage
@@ -613,7 +613,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
613
613
  processed_chunk = AssistantMessage(
614
614
  id=message_id,
615
615
  date=message_date,
616
- assistant_message=combined_chunk,
616
+ content=combined_chunk,
617
617
  )
618
618
  # Store the ID of the tool call so allow skipping the corresponding response
619
619
  if self.function_id_buffer:
@@ -627,7 +627,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
627
627
  processed_chunk = AssistantMessage(
628
628
  id=message_id,
629
629
  date=message_date,
630
- assistant_message=updates_main_json,
630
+ content=updates_main_json,
631
631
  )
632
632
  # Store the ID of the tool call so allow skipping the corresponding response
633
633
  if self.function_id_buffer:
@@ -959,7 +959,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
959
959
  processed_chunk = AssistantMessage(
960
960
  id=msg_obj.id,
961
961
  date=msg_obj.created_at,
962
- assistant_message=func_args["message"],
962
+ content=func_args["message"],
963
963
  )
964
964
  self._push_to_buffer(processed_chunk)
965
965
  except Exception as e:
@@ -981,7 +981,7 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
981
981
  processed_chunk = AssistantMessage(
982
982
  id=msg_obj.id,
983
983
  date=msg_obj.created_at,
984
- assistant_message=func_args[self.assistant_message_tool_kwarg],
984
+ content=func_args[self.assistant_message_tool_kwarg],
985
985
  )
986
986
  # Store the ID of the tool call so allow skipping the corresponding response
987
987
  self.prev_assistant_message_id = function_call.id
@@ -1018,8 +1018,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1018
1018
  # new_message = {"function_return": msg, "status": "success"}
1019
1019
  assert msg_obj.tool_call_id is not None
1020
1020
 
1021
- print(f"YYY printing the function call - {msg_obj.tool_call_id} == {self.prev_assistant_message_id} ???")
1022
-
1023
1021
  # Skip this is use_assistant_message is on
1024
1022
  if self.use_assistant_message and msg_obj.tool_call_id == self.prev_assistant_message_id:
1025
1023
  # Wipe the cache
@@ -43,7 +43,8 @@ def list_agents(
43
43
  ),
44
44
  server: "SyncServer" = Depends(get_letta_server),
45
45
  user_id: Optional[str] = Header(None, alias="user_id"),
46
- cursor: Optional[str] = Query(None, description="Cursor for pagination"),
46
+ before: Optional[str] = Query(None, description="Cursor for pagination"),
47
+ after: Optional[str] = Query(None, description="Cursor for pagination"),
47
48
  limit: Optional[int] = Query(None, description="Limit for pagination"),
48
49
  query_text: Optional[str] = Query(None, description="Search agents by name"),
49
50
  ):
@@ -66,7 +67,7 @@ def list_agents(
66
67
  }
67
68
 
68
69
  # Call list_agents with the dynamic kwargs
69
- agents = server.agent_manager.list_agents(actor=actor, cursor=cursor, limit=limit, **kwargs)
70
+ agents = server.agent_manager.list_agents(actor=actor, before=before, after=after, limit=limit, **kwargs)
70
71
  return agents
71
72
 
72
73
 
@@ -347,14 +348,11 @@ def list_archival_memory(
347
348
  """
348
349
  actor = server.user_manager.get_user_or_default(user_id=user_id)
349
350
 
350
- # TODO need to add support for non-postgres here
351
- # chroma will throw:
352
- # raise ValueError("Cannot run get_all_cursor with chroma")
353
-
354
- return server.get_agent_archival_cursor(
351
+ return server.get_agent_archival(
355
352
  user_id=actor.id,
356
353
  agent_id=agent_id,
357
- cursor=after, # TODO: deleting before, after. is this expected?
354
+ after=after,
355
+ before=before,
358
356
  limit=limit,
359
357
  )
360
358
 
@@ -429,7 +427,7 @@ def list_messages(
429
427
  """
430
428
  actor = server.user_manager.get_user_or_default(user_id=user_id)
431
429
 
432
- return server.get_agent_recall_cursor(
430
+ return server.get_agent_recall(
433
431
  user_id=actor.id,
434
432
  agent_id=agent_id,
435
433
  before=before,
@@ -560,9 +558,6 @@ async def process_message_background(
560
558
  )
561
559
  server.job_manager.update_job_by_id(job_id=job_id, job_update=job_update, actor=actor)
562
560
 
563
- # Add job usage statistics
564
- server.job_manager.add_job_usage(job_id=job_id, usage=result.usage, actor=actor)
565
-
566
561
  except Exception as e:
567
562
  # Update job status to failed
568
563
  job_update = JobUpdate(
@@ -3,6 +3,7 @@ from typing import TYPE_CHECKING, List, Optional
3
3
  from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query
4
4
 
5
5
  from letta.orm.errors import NoResultFound
6
+ from letta.schemas.agent import AgentState
6
7
  from letta.schemas.block import Block, BlockUpdate, CreateBlock
7
8
  from letta.server.rest_api.utils import get_letta_server
8
9
  from letta.server.server import SyncServer
@@ -73,3 +74,21 @@ def retrieve_block(
73
74
  return block
74
75
  except NoResultFound:
75
76
  raise HTTPException(status_code=404, detail="Block not found")
77
+
78
+
79
+ @router.get("/{block_id}/agents", response_model=List[AgentState], operation_id="list_agents_for_block")
80
+ def list_agents_for_block(
81
+ block_id: str,
82
+ server: SyncServer = Depends(get_letta_server),
83
+ user_id: Optional[str] = Header(None, alias="user_id"),
84
+ ):
85
+ """
86
+ Retrieves all agents associated with the specified block.
87
+ Raises a 404 if the block does not exist.
88
+ """
89
+ actor = server.user_manager.get_user_or_default(user_id=user_id)
90
+ try:
91
+ agents = server.block_manager.get_agents_for_block(block_id=block_id, actor=actor)
92
+ return agents
93
+ except NoResultFound:
94
+ raise HTTPException(status_code=404, detail=f"Block with id={block_id} not found")
@@ -14,7 +14,7 @@ router = APIRouter(prefix="/orgs", tags=["organization", "admin"])
14
14
 
15
15
  @router.get("/", tags=["admin"], response_model=List[Organization], operation_id="list_orgs")
16
16
  def get_all_orgs(
17
- cursor: Optional[str] = Query(None),
17
+ after: Optional[str] = Query(None),
18
18
  limit: Optional[int] = Query(50),
19
19
  server: "SyncServer" = Depends(get_letta_server),
20
20
  ):
@@ -22,7 +22,7 @@ def get_all_orgs(
22
22
  Get a list of all orgs in the database
23
23
  """
24
24
  try:
25
- orgs = server.organization_manager.list_organizations(cursor=cursor, limit=limit)
25
+ orgs = server.organization_manager.list_organizations(after=after, limit=limit)
26
26
  except HTTPException:
27
27
  raise
28
28
  except Exception as e:
@@ -13,7 +13,7 @@ router = APIRouter(prefix="/providers", tags=["providers"])
13
13
 
14
14
  @router.get("/", tags=["providers"], response_model=List[Provider], operation_id="list_providers")
15
15
  def list_providers(
16
- cursor: Optional[str] = Query(None),
16
+ after: Optional[str] = Query(None),
17
17
  limit: Optional[int] = Query(50),
18
18
  server: "SyncServer" = Depends(get_letta_server),
19
19
  ):
@@ -21,7 +21,7 @@ def list_providers(
21
21
  Get a list of all custom providers in the database
22
22
  """
23
23
  try:
24
- providers = server.provider_manager.list_providers(cursor=cursor, limit=limit)
24
+ providers = server.provider_manager.list_providers(after=after, limit=limit)
25
25
  except HTTPException:
26
26
  raise
27
27
  except Exception as e:
@@ -75,9 +75,12 @@ async def list_run_messages(
75
75
  run_id: str,
76
76
  server: "SyncServer" = Depends(get_letta_server),
77
77
  user_id: Optional[str] = Header(None, alias="user_id"),
78
- cursor: Optional[str] = Query(None, description="Cursor for pagination"),
78
+ before: Optional[str] = Query(None, description="Cursor for pagination"),
79
+ after: Optional[str] = Query(None, description="Cursor for pagination"),
79
80
  limit: Optional[int] = Query(100, description="Maximum number of messages to return"),
80
- ascending: bool = Query(True, description="Sort order by creation time"),
81
+ order: str = Query(
82
+ "desc", description="Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order."
83
+ ),
81
84
  role: Optional[MessageRole] = Query(None, description="Filter by role"),
82
85
  ):
83
86
  """
@@ -85,9 +88,10 @@ async def list_run_messages(
85
88
 
86
89
  Args:
87
90
  run_id: ID of the run
88
- cursor: Cursor for pagination
91
+ before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.
92
+ after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.
89
93
  limit: Maximum number of messages to return
90
- ascending: Sort order by creation time
94
+ order: Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order.
91
95
  role: Filter by role (user/assistant/system/tool)
92
96
  return_message_object: Whether to return Message objects or LettaMessage objects
93
97
  user_id: ID of the user making the request
@@ -95,15 +99,19 @@ async def list_run_messages(
95
99
  Returns:
96
100
  A list of messages associated with the run. Default is List[LettaMessage].
97
101
  """
102
+ if order not in ["asc", "desc"]:
103
+ raise HTTPException(status_code=400, detail="Order must be 'asc' or 'desc'")
104
+
98
105
  actor = server.user_manager.get_user_or_default(user_id=user_id)
99
106
 
100
107
  try:
101
- messages = server.job_manager.get_run_messages_cursor(
108
+ messages = server.job_manager.get_run_messages(
102
109
  run_id=run_id,
103
110
  actor=actor,
104
111
  limit=limit,
105
- cursor=cursor,
106
- ascending=ascending,
112
+ before=before,
113
+ after=after,
114
+ ascending=(order == "asc"),
107
115
  role=role,
108
116
  )
109
117
  return messages
@@ -68,13 +68,13 @@ def delete_sandbox_config(
68
68
  @router.get("/", response_model=List[PydanticSandboxConfig])
69
69
  def list_sandbox_configs(
70
70
  limit: int = Query(1000, description="Number of results to return"),
71
- cursor: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
71
+ after: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
72
72
  sandbox_type: Optional[SandboxType] = Query(None, description="Filter for this specific sandbox type"),
73
73
  server: SyncServer = Depends(get_letta_server),
74
74
  user_id: str = Depends(get_user_id),
75
75
  ):
76
76
  actor = server.user_manager.get_user_or_default(user_id=user_id)
77
- return server.sandbox_config_manager.list_sandbox_configs(actor, limit=limit, cursor=cursor, sandbox_type=sandbox_type)
77
+ return server.sandbox_config_manager.list_sandbox_configs(actor, limit=limit, after=after, sandbox_type=sandbox_type)
78
78
 
79
79
 
80
80
  ### Sandbox Environment Variable Routes
@@ -116,9 +116,9 @@ def delete_sandbox_env_var(
116
116
  def list_sandbox_env_vars(
117
117
  sandbox_config_id: str,
118
118
  limit: int = Query(1000, description="Number of results to return"),
119
- cursor: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
119
+ after: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
120
120
  server: SyncServer = Depends(get_letta_server),
121
121
  user_id: str = Depends(get_user_id),
122
122
  ):
123
123
  actor = server.user_manager.get_user_or_default(user_id=user_id)
124
- return server.sandbox_config_manager.list_sandbox_env_vars(sandbox_config_id, actor, limit=limit, cursor=cursor)
124
+ return server.sandbox_config_manager.list_sandbox_env_vars(sandbox_config_id, actor, limit=limit, after=after)
@@ -165,7 +165,7 @@ def list_source_passages(
165
165
  def list_source_files(
166
166
  source_id: str,
167
167
  limit: int = Query(1000, description="Number of files to return"),
168
- cursor: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
168
+ after: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
169
169
  server: "SyncServer" = Depends(get_letta_server),
170
170
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
171
171
  ):
@@ -173,7 +173,7 @@ def list_source_files(
173
173
  List paginated files associated with a data source.
174
174
  """
175
175
  actor = server.user_manager.get_user_or_default(user_id=user_id)
176
- return server.source_manager.list_files(source_id=source_id, limit=limit, cursor=cursor, actor=actor)
176
+ return server.source_manager.list_files(source_id=source_id, limit=limit, after=after, actor=actor)
177
177
 
178
178
 
179
179
  # it's redundant to include /delete in the URL path. The HTTP verb DELETE already implies that action.
@@ -13,7 +13,7 @@ router = APIRouter(prefix="/tags", tags=["tag", "admin"])
13
13
 
14
14
  @router.get("/", tags=["admin"], response_model=List[str], operation_id="list_tags")
15
15
  def list_tags(
16
- cursor: Optional[str] = Query(None),
16
+ after: Optional[str] = Query(None),
17
17
  limit: Optional[int] = Query(50),
18
18
  server: "SyncServer" = Depends(get_letta_server),
19
19
  query_text: Optional[str] = Query(None),
@@ -23,5 +23,5 @@ def list_tags(
23
23
  Get a list of all tags in the database
24
24
  """
25
25
  actor = server.user_manager.get_user_or_default(user_id=user_id)
26
- tags = server.agent_manager.list_tags(actor=actor, cursor=cursor, limit=limit, query_text=query_text)
26
+ tags = server.agent_manager.list_tags(actor=actor, after=after, limit=limit, query_text=query_text)
27
27
  return tags
@@ -50,7 +50,7 @@ def retrieve_tool(
50
50
 
51
51
  @router.get("/", response_model=List[Tool], operation_id="list_tools")
52
52
  def list_tools(
53
- cursor: Optional[str] = None,
53
+ after: Optional[str] = None,
54
54
  limit: Optional[int] = 50,
55
55
  server: SyncServer = Depends(get_letta_server),
56
56
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
@@ -60,7 +60,7 @@ def list_tools(
60
60
  """
61
61
  try:
62
62
  actor = server.user_manager.get_user_or_default(user_id=user_id)
63
- return server.tool_manager.list_tools(actor=actor, cursor=cursor, limit=limit)
63
+ return server.tool_manager.list_tools(actor=actor, after=after, limit=limit)
64
64
  except Exception as e:
65
65
  # Log or print the full exception here for debugging
66
66
  print(f"Error occurred: {e}")
@@ -15,7 +15,7 @@ router = APIRouter(prefix="/users", tags=["users", "admin"])
15
15
 
16
16
  @router.get("/", tags=["admin"], response_model=List[User], operation_id="list_users")
17
17
  def list_users(
18
- cursor: Optional[str] = Query(None),
18
+ after: Optional[str] = Query(None),
19
19
  limit: Optional[int] = Query(50),
20
20
  server: "SyncServer" = Depends(get_letta_server),
21
21
  ):
@@ -23,7 +23,7 @@ def list_users(
23
23
  Get a list of all users in the database
24
24
  """
25
25
  try:
26
- next_cursor, users = server.user_manager.list_users(cursor=cursor, limit=limit)
26
+ users = server.user_manager.list_users(after=after, limit=limit)
27
27
  except HTTPException:
28
28
  raise
29
29
  except Exception as e: