letta-nightly 0.11.7.dev20250915104130__py3-none-any.whl → 0.11.7.dev20250917104122__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +10 -2
- letta/adapters/letta_llm_request_adapter.py +0 -1
- letta/adapters/letta_llm_stream_adapter.py +0 -1
- letta/agent.py +1 -1
- letta/agents/letta_agent.py +1 -4
- letta/agents/letta_agent_v2.py +2 -1
- letta/agents/voice_agent.py +1 -1
- letta/functions/function_sets/multi_agent.py +1 -1
- letta/functions/helpers.py +1 -1
- letta/helpers/converters.py +8 -2
- letta/helpers/crypto_utils.py +144 -0
- letta/llm_api/llm_api_tools.py +0 -1
- letta/llm_api/llm_client_base.py +0 -2
- letta/orm/__init__.py +1 -0
- letta/orm/agent.py +5 -1
- letta/orm/job.py +3 -1
- letta/orm/mcp_oauth.py +6 -0
- letta/orm/mcp_server.py +7 -1
- letta/orm/sqlalchemy_base.py +2 -1
- letta/prompts/gpt_system.py +13 -15
- letta/prompts/system_prompts/__init__.py +27 -0
- letta/prompts/{system/memgpt_chat.txt → system_prompts/memgpt_chat.py} +2 -0
- letta/prompts/{system/memgpt_generate_tool.txt → system_prompts/memgpt_generate_tool.py} +4 -2
- letta/prompts/{system/memgpt_v2_chat.txt → system_prompts/memgpt_v2_chat.py} +2 -0
- letta/prompts/{system/react.txt → system_prompts/react.py} +2 -0
- letta/prompts/{system/sleeptime_doc_ingest.txt → system_prompts/sleeptime_doc_ingest.py} +2 -0
- letta/prompts/{system/sleeptime_v2.txt → system_prompts/sleeptime_v2.py} +2 -0
- letta/prompts/{system/summary_system_prompt.txt → system_prompts/summary_system_prompt.py} +2 -0
- letta/prompts/{system/voice_chat.txt → system_prompts/voice_chat.py} +2 -0
- letta/prompts/{system/voice_sleeptime.txt → system_prompts/voice_sleeptime.py} +2 -0
- letta/prompts/{system/workflow.txt → system_prompts/workflow.py} +2 -0
- letta/schemas/agent.py +10 -7
- letta/schemas/job.py +10 -0
- letta/schemas/mcp.py +146 -6
- letta/schemas/provider_trace.py +0 -2
- letta/schemas/run.py +2 -0
- letta/schemas/secret.py +378 -0
- letta/serialize_schemas/marshmallow_agent.py +4 -0
- letta/server/rest_api/dependencies.py +37 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +4 -3
- letta/server/rest_api/routers/v1/__init__.py +2 -0
- letta/server/rest_api/routers/v1/agents.py +115 -107
- letta/server/rest_api/routers/v1/archives.py +113 -0
- letta/server/rest_api/routers/v1/blocks.py +44 -20
- letta/server/rest_api/routers/v1/embeddings.py +3 -3
- letta/server/rest_api/routers/v1/folders.py +107 -47
- letta/server/rest_api/routers/v1/groups.py +52 -32
- letta/server/rest_api/routers/v1/identities.py +110 -21
- letta/server/rest_api/routers/v1/internal_templates.py +28 -13
- letta/server/rest_api/routers/v1/jobs.py +19 -14
- letta/server/rest_api/routers/v1/llms.py +6 -8
- letta/server/rest_api/routers/v1/messages.py +14 -14
- letta/server/rest_api/routers/v1/organizations.py +1 -1
- letta/server/rest_api/routers/v1/providers.py +40 -16
- letta/server/rest_api/routers/v1/runs.py +28 -20
- letta/server/rest_api/routers/v1/sandbox_configs.py +25 -25
- letta/server/rest_api/routers/v1/sources.py +44 -45
- letta/server/rest_api/routers/v1/steps.py +27 -25
- letta/server/rest_api/routers/v1/tags.py +11 -7
- letta/server/rest_api/routers/v1/telemetry.py +11 -6
- letta/server/rest_api/routers/v1/tools.py +78 -80
- letta/server/rest_api/routers/v1/users.py +1 -1
- letta/server/rest_api/routers/v1/voice.py +6 -5
- letta/server/rest_api/utils.py +1 -18
- letta/services/agent_manager.py +17 -9
- letta/services/agent_serialization_manager.py +11 -3
- letta/services/archive_manager.py +73 -0
- letta/services/file_manager.py +6 -0
- letta/services/group_manager.py +2 -1
- letta/services/helpers/agent_manager_helper.py +6 -1
- letta/services/identity_manager.py +67 -0
- letta/services/job_manager.py +18 -2
- letta/services/mcp_manager.py +198 -82
- letta/services/provider_manager.py +14 -1
- letta/services/source_manager.py +11 -1
- letta/services/telemetry_manager.py +2 -0
- letta/services/tool_executor/composio_tool_executor.py +1 -1
- letta/services/tool_manager.py +46 -9
- letta/services/tool_sandbox/base.py +2 -3
- letta/utils.py +4 -2
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250917104122.dist-info}/METADATA +5 -2
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250917104122.dist-info}/RECORD +85 -94
- letta/prompts/system/memgpt_base.txt +0 -54
- letta/prompts/system/memgpt_chat_compressed.txt +0 -13
- letta/prompts/system/memgpt_chat_fstring.txt +0 -51
- letta/prompts/system/memgpt_convo_only.txt +0 -12
- letta/prompts/system/memgpt_doc.txt +0 -50
- letta/prompts/system/memgpt_gpt35_extralong.txt +0 -53
- letta/prompts/system/memgpt_intuitive_knowledge.txt +0 -31
- letta/prompts/system/memgpt_memory_only.txt +0 -29
- letta/prompts/system/memgpt_modified_chat.txt +0 -23
- letta/prompts/system/memgpt_modified_o1.txt +0 -31
- letta/prompts/system/memgpt_offline_memory.txt +0 -23
- letta/prompts/system/memgpt_offline_memory_chat.txt +0 -35
- letta/prompts/system/memgpt_sleeptime_chat.txt +0 -52
- letta/prompts/system/sleeptime.txt +0 -37
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250917104122.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250917104122.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250917104122.dist-info}/licenses/LICENSE +0 -0
@@ -1,11 +1,11 @@
|
|
1
1
|
from typing import List, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Depends,
|
3
|
+
from fastapi import APIRouter, Depends, HTTPException, Query
|
4
4
|
|
5
5
|
from letta.orm.errors import NoResultFound
|
6
6
|
from letta.schemas.enums import JobStatus
|
7
7
|
from letta.schemas.job import Job
|
8
|
-
from letta.server.rest_api.
|
8
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
9
9
|
from letta.server.server import SyncServer
|
10
10
|
from letta.settings import settings
|
11
11
|
|
@@ -19,18 +19,23 @@ async def list_jobs(
|
|
19
19
|
before: Optional[str] = Query(None, description="Cursor for pagination"),
|
20
20
|
after: Optional[str] = Query(None, description="Cursor for pagination"),
|
21
21
|
limit: Optional[int] = Query(50, description="Limit for pagination"),
|
22
|
+
active: bool = Query(False, description="Filter for active jobs."),
|
22
23
|
ascending: bool = Query(True, description="Whether to sort jobs oldest to newest (True, default) or newest to oldest (False)"),
|
23
|
-
|
24
|
+
headers: HeaderParams = Depends(get_headers),
|
24
25
|
):
|
25
26
|
"""
|
26
27
|
List all jobs.
|
27
|
-
TODO (cliandy): implementation for pagination
|
28
28
|
"""
|
29
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
29
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
30
|
+
|
31
|
+
statuses = None
|
32
|
+
if active:
|
33
|
+
statuses = [JobStatus.created, JobStatus.running]
|
30
34
|
|
31
35
|
# TODO: add filtering by status
|
32
36
|
return await server.job_manager.list_jobs_async(
|
33
37
|
actor=actor,
|
38
|
+
statuses=statuses,
|
34
39
|
source_id=source_id,
|
35
40
|
before=before,
|
36
41
|
after=after,
|
@@ -39,10 +44,10 @@ async def list_jobs(
|
|
39
44
|
)
|
40
45
|
|
41
46
|
|
42
|
-
@router.get("/active", response_model=List[Job], operation_id="list_active_jobs")
|
47
|
+
@router.get("/active", response_model=List[Job], operation_id="list_active_jobs", deprecated=True)
|
43
48
|
async def list_active_jobs(
|
44
49
|
server: "SyncServer" = Depends(get_letta_server),
|
45
|
-
|
50
|
+
headers: HeaderParams = Depends(get_headers),
|
46
51
|
source_id: Optional[str] = Query(None, description="Only list jobs associated with the source."),
|
47
52
|
before: Optional[str] = Query(None, description="Cursor for pagination"),
|
48
53
|
after: Optional[str] = Query(None, description="Cursor for pagination"),
|
@@ -52,7 +57,7 @@ async def list_active_jobs(
|
|
52
57
|
"""
|
53
58
|
List all active jobs.
|
54
59
|
"""
|
55
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
60
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
56
61
|
return await server.job_manager.list_jobs_async(
|
57
62
|
actor=actor,
|
58
63
|
statuses=[JobStatus.created, JobStatus.running],
|
@@ -67,13 +72,13 @@ async def list_active_jobs(
|
|
67
72
|
@router.get("/{job_id}", response_model=Job, operation_id="retrieve_job")
|
68
73
|
async def retrieve_job(
|
69
74
|
job_id: str,
|
70
|
-
|
75
|
+
headers: HeaderParams = Depends(get_headers),
|
71
76
|
server: "SyncServer" = Depends(get_letta_server),
|
72
77
|
):
|
73
78
|
"""
|
74
79
|
Get the status of a job.
|
75
80
|
"""
|
76
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
81
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
77
82
|
|
78
83
|
try:
|
79
84
|
return await server.job_manager.get_job_by_id_async(job_id=job_id, actor=actor)
|
@@ -84,7 +89,7 @@ async def retrieve_job(
|
|
84
89
|
@router.patch("/{job_id}/cancel", response_model=Job, operation_id="cancel_job")
|
85
90
|
async def cancel_job(
|
86
91
|
job_id: str,
|
87
|
-
|
92
|
+
headers: HeaderParams = Depends(get_headers),
|
88
93
|
server: "SyncServer" = Depends(get_letta_server),
|
89
94
|
):
|
90
95
|
"""
|
@@ -93,7 +98,7 @@ async def cancel_job(
|
|
93
98
|
This endpoint marks a job as cancelled, which will cause any associated
|
94
99
|
agent execution to terminate as soon as possible.
|
95
100
|
"""
|
96
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
101
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
97
102
|
if not settings.track_agent_run:
|
98
103
|
raise HTTPException(status_code=400, detail="Agent run tracking is disabled")
|
99
104
|
|
@@ -113,13 +118,13 @@ async def cancel_job(
|
|
113
118
|
@router.delete("/{job_id}", response_model=Job, operation_id="delete_job")
|
114
119
|
async def delete_job(
|
115
120
|
job_id: str,
|
116
|
-
|
121
|
+
headers: HeaderParams = Depends(get_headers),
|
117
122
|
server: "SyncServer" = Depends(get_letta_server),
|
118
123
|
):
|
119
124
|
"""
|
120
125
|
Delete a job by its job_id.
|
121
126
|
"""
|
122
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
127
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
123
128
|
|
124
129
|
try:
|
125
130
|
job = await server.job_manager.delete_job_by_id_async(job_id=job_id, actor=actor)
|
@@ -1,11 +1,11 @@
|
|
1
1
|
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Depends,
|
3
|
+
from fastapi import APIRouter, Depends, Query
|
4
4
|
|
5
5
|
from letta.schemas.embedding_config import EmbeddingConfig
|
6
6
|
from letta.schemas.enums import ProviderCategory, ProviderType
|
7
7
|
from letta.schemas.llm_config import LLMConfig
|
8
|
-
from letta.server.rest_api.
|
8
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
9
9
|
|
10
10
|
if TYPE_CHECKING:
|
11
11
|
from letta.server.server import SyncServer
|
@@ -19,11 +19,10 @@ async def list_llm_models(
|
|
19
19
|
provider_name: Optional[str] = Query(None),
|
20
20
|
provider_type: Optional[ProviderType] = Query(None),
|
21
21
|
server: "SyncServer" = Depends(get_letta_server),
|
22
|
-
|
23
|
-
# Extract user_id from header, default to None if not present
|
22
|
+
headers: HeaderParams = Depends(get_headers),
|
24
23
|
):
|
25
24
|
"""List available LLM models using the asynchronous implementation for improved performance"""
|
26
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
25
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
27
26
|
|
28
27
|
models = await server.list_llm_models_async(
|
29
28
|
provider_category=provider_category,
|
@@ -38,11 +37,10 @@ async def list_llm_models(
|
|
38
37
|
@router.get("/embedding", response_model=List[EmbeddingConfig], operation_id="list_embedding_models")
|
39
38
|
async def list_embedding_models(
|
40
39
|
server: "SyncServer" = Depends(get_letta_server),
|
41
|
-
|
42
|
-
# Extract user_id from header, default to None if not present
|
40
|
+
headers: HeaderParams = Depends(get_headers),
|
43
41
|
):
|
44
42
|
"""List available embedding models using the asynchronous implementation for improved performance"""
|
45
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
43
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
46
44
|
models = await server.list_embedding_models_async(actor=actor)
|
47
45
|
|
48
46
|
return models
|
@@ -1,6 +1,6 @@
|
|
1
1
|
from typing import List, Literal, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Body, Depends,
|
3
|
+
from fastapi import APIRouter, Body, Depends, Query
|
4
4
|
from fastapi.exceptions import HTTPException
|
5
5
|
from starlette.requests import Request
|
6
6
|
|
@@ -10,7 +10,7 @@ from letta.orm.errors import NoResultFound
|
|
10
10
|
from letta.schemas.job import BatchJob, JobStatus, JobType, JobUpdate
|
11
11
|
from letta.schemas.letta_request import CreateBatch
|
12
12
|
from letta.schemas.letta_response import LettaBatchMessages
|
13
|
-
from letta.server.rest_api.
|
13
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
14
14
|
from letta.server.server import SyncServer
|
15
15
|
from letta.settings import settings
|
16
16
|
|
@@ -28,7 +28,7 @@ async def create_batch(
|
|
28
28
|
request: Request,
|
29
29
|
payload: CreateBatch = Body(..., description="Messages and config for all agents"),
|
30
30
|
server: SyncServer = Depends(get_letta_server),
|
31
|
-
|
31
|
+
headers: HeaderParams = Depends(get_headers),
|
32
32
|
):
|
33
33
|
"""
|
34
34
|
Submit a batch of agent runs for asynchronous processing.
|
@@ -47,7 +47,7 @@ async def create_batch(
|
|
47
47
|
if not settings.enable_batch_job_polling:
|
48
48
|
logger.warning("Batch job polling is disabled. Enable batch processing by setting LETTA_ENABLE_BATCH_JOB_POLLING to True.")
|
49
49
|
|
50
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
50
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
51
51
|
batch_job = BatchJob(
|
52
52
|
user_id=actor.id,
|
53
53
|
status=JobStatus.running,
|
@@ -86,13 +86,13 @@ async def create_batch(
|
|
86
86
|
@router.get("/batches/{batch_id}", response_model=BatchJob, operation_id="retrieve_batch")
|
87
87
|
async def retrieve_batch(
|
88
88
|
batch_id: str,
|
89
|
-
|
89
|
+
headers: HeaderParams = Depends(get_headers),
|
90
90
|
server: "SyncServer" = Depends(get_letta_server),
|
91
91
|
):
|
92
92
|
"""
|
93
93
|
Retrieve the status and details of a batch run.
|
94
94
|
"""
|
95
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
95
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
96
96
|
|
97
97
|
try:
|
98
98
|
job = await server.job_manager.get_job_by_id_async(job_id=batch_id, actor=actor)
|
@@ -114,13 +114,13 @@ async def list_batches(
|
|
114
114
|
"desc", description="Sort order for jobs by creation time. 'asc' for oldest first, 'desc' for newest first"
|
115
115
|
),
|
116
116
|
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
|
117
|
-
|
117
|
+
headers: HeaderParams = Depends(get_headers),
|
118
118
|
server: "SyncServer" = Depends(get_letta_server),
|
119
119
|
):
|
120
120
|
"""
|
121
121
|
List all batch runs.
|
122
122
|
"""
|
123
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
123
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
124
124
|
|
125
125
|
jobs = server.job_manager.list_jobs(
|
126
126
|
actor=actor,
|
@@ -137,9 +137,9 @@ async def list_batches(
|
|
137
137
|
@router.get(
|
138
138
|
"/batches/{batch_id}/messages",
|
139
139
|
response_model=LettaBatchMessages,
|
140
|
-
operation_id="
|
140
|
+
operation_id="list_messages_for_batch",
|
141
141
|
)
|
142
|
-
async def
|
142
|
+
async def list_messages_for_batch(
|
143
143
|
batch_id: str,
|
144
144
|
before: Optional[str] = Query(
|
145
145
|
None, description="Message ID cursor for pagination. Returns messages that come before this message ID in the specified sort order"
|
@@ -153,13 +153,13 @@ async def list_batch_messages(
|
|
153
153
|
),
|
154
154
|
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
|
155
155
|
agent_id: Optional[str] = Query(None, description="Filter messages by agent ID"),
|
156
|
-
|
156
|
+
headers: HeaderParams = Depends(get_headers),
|
157
157
|
server: SyncServer = Depends(get_letta_server),
|
158
158
|
):
|
159
159
|
"""
|
160
160
|
Get response messages for a specific batch job.
|
161
161
|
"""
|
162
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
162
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
163
163
|
|
164
164
|
# Verify the batch job exists and the user has access to it
|
165
165
|
try:
|
@@ -180,12 +180,12 @@ async def list_batch_messages(
|
|
180
180
|
async def cancel_batch(
|
181
181
|
batch_id: str,
|
182
182
|
server: "SyncServer" = Depends(get_letta_server),
|
183
|
-
|
183
|
+
headers: HeaderParams = Depends(get_headers),
|
184
184
|
):
|
185
185
|
"""
|
186
186
|
Cancel a batch run.
|
187
187
|
"""
|
188
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
188
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
189
189
|
|
190
190
|
try:
|
191
191
|
job = await server.job_manager.get_job_by_id_async(job_id=batch_id, actor=actor)
|
@@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, List, Optional
|
|
3
3
|
from fastapi import APIRouter, Body, Depends, HTTPException, Query
|
4
4
|
|
5
5
|
from letta.schemas.organization import Organization, OrganizationCreate, OrganizationUpdate
|
6
|
-
from letta.server.rest_api.
|
6
|
+
from letta.server.rest_api.dependencies import get_letta_server
|
7
7
|
|
8
8
|
if TYPE_CHECKING:
|
9
9
|
from letta.server.server import SyncServer
|
@@ -1,13 +1,13 @@
|
|
1
|
-
from typing import TYPE_CHECKING, List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Literal, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Body, Depends,
|
3
|
+
from fastapi import APIRouter, Body, Depends, HTTPException, Query, status
|
4
4
|
from fastapi.responses import JSONResponse
|
5
5
|
|
6
6
|
from letta.errors import LLMAuthenticationError
|
7
7
|
from letta.orm.errors import NoResultFound
|
8
8
|
from letta.schemas.enums import ProviderType
|
9
9
|
from letta.schemas.providers import Provider, ProviderCheck, ProviderCreate, ProviderUpdate
|
10
|
-
from letta.server.rest_api.
|
10
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
11
11
|
|
12
12
|
if TYPE_CHECKING:
|
13
13
|
from letta.server.server import SyncServer
|
@@ -17,20 +17,31 @@ router = APIRouter(prefix="/providers", tags=["providers"])
|
|
17
17
|
|
18
18
|
@router.get("/", response_model=List[Provider], operation_id="list_providers")
|
19
19
|
async def list_providers(
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
20
|
+
before: Optional[str] = Query(
|
21
|
+
None,
|
22
|
+
description="Provider ID cursor for pagination. Returns providers that come before this provider ID in the specified sort order",
|
23
|
+
),
|
24
|
+
after: Optional[str] = Query(
|
25
|
+
None,
|
26
|
+
description="Provider ID cursor for pagination. Returns providers that come after this provider ID in the specified sort order",
|
27
|
+
),
|
28
|
+
limit: Optional[int] = Query(50, description="Maximum number of providers to return"),
|
29
|
+
order: Literal["asc", "desc"] = Query(
|
30
|
+
"desc", description="Sort order for providers by creation time. 'asc' for oldest first, 'desc' for newest first"
|
31
|
+
),
|
32
|
+
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
|
33
|
+
name: Optional[str] = Query(None, description="Filter providers by name"),
|
34
|
+
provider_type: Optional[ProviderType] = Query(None, description="Filter providers by type"),
|
35
|
+
headers: HeaderParams = Depends(get_headers),
|
25
36
|
server: "SyncServer" = Depends(get_letta_server),
|
26
37
|
):
|
27
38
|
"""
|
28
39
|
Get a list of all custom providers.
|
29
40
|
"""
|
30
41
|
try:
|
31
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
42
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
32
43
|
providers = await server.provider_manager.list_providers_async(
|
33
|
-
after=after, limit=limit, actor=actor, name=name, provider_type=provider_type
|
44
|
+
before=before, after=after, limit=limit, actor=actor, name=name, provider_type=provider_type, ascending=(order == "asc")
|
34
45
|
)
|
35
46
|
except HTTPException:
|
36
47
|
raise
|
@@ -39,16 +50,29 @@ async def list_providers(
|
|
39
50
|
return providers
|
40
51
|
|
41
52
|
|
53
|
+
@router.get("/{provider_id}", response_model=Provider, operation_id="retrieve_provider")
|
54
|
+
async def retrieve_provider(
|
55
|
+
provider_id: str,
|
56
|
+
headers: HeaderParams = Depends(get_headers),
|
57
|
+
server: "SyncServer" = Depends(get_letta_server),
|
58
|
+
):
|
59
|
+
"""
|
60
|
+
Get a provider by ID.
|
61
|
+
"""
|
62
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
63
|
+
return await server.provider_manager.get_provider_async(provider_id=provider_id, actor=actor)
|
64
|
+
|
65
|
+
|
42
66
|
@router.post("/", response_model=Provider, operation_id="create_provider")
|
43
67
|
async def create_provider(
|
44
68
|
request: ProviderCreate = Body(...),
|
45
|
-
|
69
|
+
headers: HeaderParams = Depends(get_headers),
|
46
70
|
server: "SyncServer" = Depends(get_letta_server),
|
47
71
|
):
|
48
72
|
"""
|
49
73
|
Create a new custom provider.
|
50
74
|
"""
|
51
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
75
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
52
76
|
for field_name in request.model_fields:
|
53
77
|
value = getattr(request, field_name, None)
|
54
78
|
if isinstance(value, str) and value == "":
|
@@ -64,13 +88,13 @@ async def create_provider(
|
|
64
88
|
async def modify_provider(
|
65
89
|
provider_id: str,
|
66
90
|
request: ProviderUpdate = Body(...),
|
67
|
-
|
91
|
+
headers: HeaderParams = Depends(get_headers),
|
68
92
|
server: "SyncServer" = Depends(get_letta_server),
|
69
93
|
):
|
70
94
|
"""
|
71
95
|
Update an existing custom provider.
|
72
96
|
"""
|
73
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
97
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
74
98
|
return await server.provider_manager.update_provider_async(provider_id=provider_id, provider_update=request, actor=actor)
|
75
99
|
|
76
100
|
|
@@ -99,14 +123,14 @@ async def check_provider(
|
|
99
123
|
@router.delete("/{provider_id}", response_model=None, operation_id="delete_provider")
|
100
124
|
async def delete_provider(
|
101
125
|
provider_id: str,
|
102
|
-
|
126
|
+
headers: HeaderParams = Depends(get_headers),
|
103
127
|
server: "SyncServer" = Depends(get_letta_server),
|
104
128
|
):
|
105
129
|
"""
|
106
130
|
Delete an existing custom provider.
|
107
131
|
"""
|
108
132
|
try:
|
109
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
133
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
110
134
|
await server.provider_manager.delete_provider_by_id_async(provider_id=provider_id, actor=actor)
|
111
135
|
return JSONResponse(status_code=status.HTTP_200_OK, content={"message": f"Provider id={provider_id} successfully deleted"})
|
112
136
|
except NoResultFound:
|
@@ -1,25 +1,26 @@
|
|
1
1
|
from datetime import timedelta
|
2
2
|
from typing import Annotated, List, Literal, Optional
|
3
3
|
|
4
|
-
from fastapi import APIRouter, Body, Depends,
|
4
|
+
from fastapi import APIRouter, Body, Depends, HTTPException, Query
|
5
5
|
from pydantic import Field
|
6
6
|
|
7
7
|
from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client
|
8
8
|
from letta.helpers.datetime_helpers import get_utc_time
|
9
9
|
from letta.orm.errors import NoResultFound
|
10
|
-
from letta.schemas.enums import JobStatus, JobType
|
10
|
+
from letta.schemas.enums import JobStatus, JobType
|
11
11
|
from letta.schemas.letta_message import LettaMessageUnion
|
12
12
|
from letta.schemas.letta_request import RetrieveStreamRequest
|
13
|
+
from letta.schemas.letta_stop_reason import StopReasonType
|
13
14
|
from letta.schemas.openai.chat_completion_response import UsageStatistics
|
14
15
|
from letta.schemas.run import Run
|
15
16
|
from letta.schemas.step import Step
|
17
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
16
18
|
from letta.server.rest_api.redis_stream_manager import redis_sse_stream_generator
|
17
19
|
from letta.server.rest_api.streaming_response import (
|
18
20
|
StreamingResponseWithStatusCode,
|
19
21
|
add_keepalive_to_stream,
|
20
22
|
cancellation_aware_stream_wrapper,
|
21
23
|
)
|
22
|
-
from letta.server.rest_api.utils import get_letta_server
|
23
24
|
from letta.server.server import SyncServer
|
24
25
|
from letta.settings import settings
|
25
26
|
|
@@ -31,29 +32,36 @@ def list_runs(
|
|
31
32
|
server: "SyncServer" = Depends(get_letta_server),
|
32
33
|
agent_ids: Optional[List[str]] = Query(None, description="The unique identifier of the agent associated with the run."),
|
33
34
|
background: Optional[bool] = Query(None, description="If True, filters for runs that were created in background mode."),
|
35
|
+
stop_reason: Optional[StopReasonType] = Query(None, description="Filter runs by stop reason."),
|
34
36
|
after: Optional[str] = Query(None, description="Cursor for pagination"),
|
35
37
|
before: Optional[str] = Query(None, description="Cursor for pagination"),
|
36
38
|
limit: Optional[int] = Query(50, description="Maximum number of runs to return"),
|
39
|
+
active: bool = Query(False, description="Filter for active runs."),
|
37
40
|
ascending: bool = Query(
|
38
41
|
False,
|
39
42
|
description="Whether to sort agents oldest to newest (True) or newest to oldest (False, default)",
|
40
43
|
),
|
41
|
-
|
44
|
+
headers: HeaderParams = Depends(get_headers),
|
42
45
|
):
|
43
46
|
"""
|
44
47
|
List all runs.
|
45
48
|
"""
|
46
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
49
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
50
|
+
statuses = None
|
51
|
+
if active:
|
52
|
+
statuses = [JobStatus.created, JobStatus.running]
|
47
53
|
|
48
54
|
runs = [
|
49
55
|
Run.from_job(job)
|
50
56
|
for job in server.job_manager.list_jobs(
|
51
57
|
actor=actor,
|
58
|
+
statuses=statuses,
|
52
59
|
job_type=JobType.RUN,
|
53
60
|
limit=limit,
|
54
61
|
before=before,
|
55
62
|
after=after,
|
56
63
|
ascending=False,
|
64
|
+
stop_reason=stop_reason,
|
57
65
|
)
|
58
66
|
]
|
59
67
|
if agent_ids:
|
@@ -63,17 +71,17 @@ def list_runs(
|
|
63
71
|
return runs
|
64
72
|
|
65
73
|
|
66
|
-
@router.get("/active", response_model=List[Run], operation_id="list_active_runs")
|
74
|
+
@router.get("/active", response_model=List[Run], operation_id="list_active_runs", deprecated=True)
|
67
75
|
def list_active_runs(
|
68
76
|
server: "SyncServer" = Depends(get_letta_server),
|
69
77
|
agent_ids: Optional[List[str]] = Query(None, description="The unique identifier of the agent associated with the run."),
|
70
78
|
background: Optional[bool] = Query(None, description="If True, filters for runs that were created in background mode."),
|
71
|
-
|
79
|
+
headers: HeaderParams = Depends(get_headers),
|
72
80
|
):
|
73
81
|
"""
|
74
82
|
List all active runs.
|
75
83
|
"""
|
76
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
84
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
77
85
|
|
78
86
|
active_runs = server.job_manager.list_jobs(actor=actor, statuses=[JobStatus.created, JobStatus.running], job_type=JobType.RUN)
|
79
87
|
active_runs = [Run.from_job(job) for job in active_runs]
|
@@ -90,13 +98,13 @@ def list_active_runs(
|
|
90
98
|
@router.get("/{run_id}", response_model=Run, operation_id="retrieve_run")
|
91
99
|
def retrieve_run(
|
92
100
|
run_id: str,
|
93
|
-
|
101
|
+
headers: HeaderParams = Depends(get_headers),
|
94
102
|
server: "SyncServer" = Depends(get_letta_server),
|
95
103
|
):
|
96
104
|
"""
|
97
105
|
Get the status of a run.
|
98
106
|
"""
|
99
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
107
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
100
108
|
|
101
109
|
try:
|
102
110
|
job = server.job_manager.get_job_by_id(job_id=run_id, actor=actor)
|
@@ -118,7 +126,7 @@ RunMessagesResponse = Annotated[
|
|
118
126
|
async def list_run_messages(
|
119
127
|
run_id: str,
|
120
128
|
server: "SyncServer" = Depends(get_letta_server),
|
121
|
-
|
129
|
+
headers: HeaderParams = Depends(get_headers),
|
122
130
|
before: Optional[str] = Query(
|
123
131
|
None, description="Message ID cursor for pagination. Returns messages that come before this message ID in the specified sort order"
|
124
132
|
),
|
@@ -131,7 +139,7 @@ async def list_run_messages(
|
|
131
139
|
),
|
132
140
|
):
|
133
141
|
"""Get response messages associated with a run."""
|
134
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
142
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
135
143
|
|
136
144
|
try:
|
137
145
|
messages = server.job_manager.get_run_messages(
|
@@ -150,13 +158,13 @@ async def list_run_messages(
|
|
150
158
|
@router.get("/{run_id}/usage", response_model=UsageStatistics, operation_id="retrieve_run_usage")
|
151
159
|
def retrieve_run_usage(
|
152
160
|
run_id: str,
|
153
|
-
|
161
|
+
headers: HeaderParams = Depends(get_headers),
|
154
162
|
server: "SyncServer" = Depends(get_letta_server),
|
155
163
|
):
|
156
164
|
"""
|
157
165
|
Get usage statistics for a run.
|
158
166
|
"""
|
159
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
167
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
160
168
|
|
161
169
|
try:
|
162
170
|
usage = server.job_manager.get_job_usage(job_id=run_id, actor=actor)
|
@@ -173,7 +181,7 @@ def retrieve_run_usage(
|
|
173
181
|
async def list_run_steps(
|
174
182
|
run_id: str,
|
175
183
|
server: "SyncServer" = Depends(get_letta_server),
|
176
|
-
|
184
|
+
headers: HeaderParams = Depends(get_headers),
|
177
185
|
before: Optional[str] = Query(None, description="Cursor for pagination"),
|
178
186
|
after: Optional[str] = Query(None, description="Cursor for pagination"),
|
179
187
|
limit: Optional[int] = Query(100, description="Maximum number of messages to return"),
|
@@ -197,7 +205,7 @@ async def list_run_steps(
|
|
197
205
|
if order not in ["asc", "desc"]:
|
198
206
|
raise HTTPException(status_code=400, detail="Order must be 'asc' or 'desc'")
|
199
207
|
|
200
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
208
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
201
209
|
|
202
210
|
try:
|
203
211
|
steps = server.job_manager.get_job_steps(
|
@@ -216,13 +224,13 @@ async def list_run_steps(
|
|
216
224
|
@router.delete("/{run_id}", response_model=Run, operation_id="delete_run")
|
217
225
|
async def delete_run(
|
218
226
|
run_id: str,
|
219
|
-
|
227
|
+
headers: HeaderParams = Depends(get_headers),
|
220
228
|
server: "SyncServer" = Depends(get_letta_server),
|
221
229
|
):
|
222
230
|
"""
|
223
231
|
Delete a run by its run_id.
|
224
232
|
"""
|
225
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
233
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
226
234
|
|
227
235
|
try:
|
228
236
|
job = await server.job_manager.delete_job_by_id_async(job_id=run_id, actor=actor)
|
@@ -266,10 +274,10 @@ async def delete_run(
|
|
266
274
|
async def retrieve_stream(
|
267
275
|
run_id: str,
|
268
276
|
request: RetrieveStreamRequest = Body(None),
|
269
|
-
|
277
|
+
headers: HeaderParams = Depends(get_headers),
|
270
278
|
server: "SyncServer" = Depends(get_letta_server),
|
271
279
|
):
|
272
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
280
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
273
281
|
try:
|
274
282
|
job = server.job_manager.get_job_by_id(job_id=run_id, actor=actor)
|
275
283
|
except NoResultFound:
|