letta-nightly 0.11.7.dev20250915104130__py3-none-any.whl → 0.11.7.dev20250916104104__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/functions/function_sets/multi_agent.py +1 -1
- letta/functions/helpers.py +1 -1
- letta/prompts/gpt_system.py +13 -15
- letta/prompts/system_prompts/__init__.py +27 -0
- letta/prompts/{system/memgpt_chat.txt → system_prompts/memgpt_chat.py} +2 -0
- letta/prompts/{system/memgpt_generate_tool.txt → system_prompts/memgpt_generate_tool.py} +4 -2
- letta/prompts/{system/memgpt_v2_chat.txt → system_prompts/memgpt_v2_chat.py} +2 -0
- letta/prompts/{system/react.txt → system_prompts/react.py} +2 -0
- letta/prompts/{system/sleeptime_doc_ingest.txt → system_prompts/sleeptime_doc_ingest.py} +2 -0
- letta/prompts/{system/sleeptime_v2.txt → system_prompts/sleeptime_v2.py} +2 -0
- letta/prompts/{system/summary_system_prompt.txt → system_prompts/summary_system_prompt.py} +2 -0
- letta/prompts/{system/voice_chat.txt → system_prompts/voice_chat.py} +2 -0
- letta/prompts/{system/voice_sleeptime.txt → system_prompts/voice_sleeptime.py} +2 -0
- letta/prompts/{system/workflow.txt → system_prompts/workflow.py} +2 -0
- letta/server/rest_api/dependencies.py +37 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +4 -3
- letta/server/rest_api/routers/v1/agents.py +106 -103
- letta/server/rest_api/routers/v1/blocks.py +44 -20
- letta/server/rest_api/routers/v1/embeddings.py +3 -3
- letta/server/rest_api/routers/v1/folders.py +107 -47
- letta/server/rest_api/routers/v1/groups.py +52 -32
- letta/server/rest_api/routers/v1/identities.py +110 -21
- letta/server/rest_api/routers/v1/internal_templates.py +28 -13
- letta/server/rest_api/routers/v1/jobs.py +12 -12
- letta/server/rest_api/routers/v1/llms.py +6 -8
- letta/server/rest_api/routers/v1/messages.py +14 -14
- letta/server/rest_api/routers/v1/organizations.py +1 -1
- letta/server/rest_api/routers/v1/providers.py +40 -16
- letta/server/rest_api/routers/v1/runs.py +19 -19
- letta/server/rest_api/routers/v1/sandbox_configs.py +25 -25
- letta/server/rest_api/routers/v1/sources.py +44 -45
- letta/server/rest_api/routers/v1/steps.py +27 -25
- letta/server/rest_api/routers/v1/tags.py +11 -7
- letta/server/rest_api/routers/v1/telemetry.py +11 -6
- letta/server/rest_api/routers/v1/tools.py +71 -54
- letta/server/rest_api/routers/v1/users.py +1 -1
- letta/server/rest_api/routers/v1/voice.py +6 -5
- letta/server/rest_api/utils.py +1 -18
- letta/services/file_manager.py +6 -0
- letta/services/group_manager.py +2 -1
- letta/services/identity_manager.py +67 -0
- letta/services/provider_manager.py +14 -1
- letta/services/source_manager.py +11 -1
- letta/services/tool_manager.py +46 -9
- letta/utils.py +4 -2
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250916104104.dist-info}/METADATA +1 -1
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250916104104.dist-info}/RECORD +50 -62
- letta/prompts/system/memgpt_base.txt +0 -54
- letta/prompts/system/memgpt_chat_compressed.txt +0 -13
- letta/prompts/system/memgpt_chat_fstring.txt +0 -51
- letta/prompts/system/memgpt_convo_only.txt +0 -12
- letta/prompts/system/memgpt_doc.txt +0 -50
- letta/prompts/system/memgpt_gpt35_extralong.txt +0 -53
- letta/prompts/system/memgpt_intuitive_knowledge.txt +0 -31
- letta/prompts/system/memgpt_memory_only.txt +0 -29
- letta/prompts/system/memgpt_modified_chat.txt +0 -23
- letta/prompts/system/memgpt_modified_o1.txt +0 -31
- letta/prompts/system/memgpt_offline_memory.txt +0 -23
- letta/prompts/system/memgpt_offline_memory_chat.txt +0 -35
- letta/prompts/system/memgpt_sleeptime_chat.txt +0 -52
- letta/prompts/system/sleeptime.txt +0 -37
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250916104104.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250916104104.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.11.7.dev20250915104130.dist-info → letta_nightly-0.11.7.dev20250916104104.dist-info}/licenses/LICENSE +0 -0
@@ -1,11 +1,11 @@
|
|
1
1
|
from typing import List, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Depends,
|
3
|
+
from fastapi import APIRouter, Depends, HTTPException, Query
|
4
4
|
|
5
5
|
from letta.orm.errors import NoResultFound
|
6
6
|
from letta.schemas.enums import JobStatus
|
7
7
|
from letta.schemas.job import Job
|
8
|
-
from letta.server.rest_api.
|
8
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
9
9
|
from letta.server.server import SyncServer
|
10
10
|
from letta.settings import settings
|
11
11
|
|
@@ -20,13 +20,13 @@ async def list_jobs(
|
|
20
20
|
after: Optional[str] = Query(None, description="Cursor for pagination"),
|
21
21
|
limit: Optional[int] = Query(50, description="Limit for pagination"),
|
22
22
|
ascending: bool = Query(True, description="Whether to sort jobs oldest to newest (True, default) or newest to oldest (False)"),
|
23
|
-
|
23
|
+
headers: HeaderParams = Depends(get_headers),
|
24
24
|
):
|
25
25
|
"""
|
26
26
|
List all jobs.
|
27
27
|
TODO (cliandy): implementation for pagination
|
28
28
|
"""
|
29
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
29
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
30
30
|
|
31
31
|
# TODO: add filtering by status
|
32
32
|
return await server.job_manager.list_jobs_async(
|
@@ -42,7 +42,7 @@ async def list_jobs(
|
|
42
42
|
@router.get("/active", response_model=List[Job], operation_id="list_active_jobs")
|
43
43
|
async def list_active_jobs(
|
44
44
|
server: "SyncServer" = Depends(get_letta_server),
|
45
|
-
|
45
|
+
headers: HeaderParams = Depends(get_headers),
|
46
46
|
source_id: Optional[str] = Query(None, description="Only list jobs associated with the source."),
|
47
47
|
before: Optional[str] = Query(None, description="Cursor for pagination"),
|
48
48
|
after: Optional[str] = Query(None, description="Cursor for pagination"),
|
@@ -52,7 +52,7 @@ async def list_active_jobs(
|
|
52
52
|
"""
|
53
53
|
List all active jobs.
|
54
54
|
"""
|
55
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
55
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
56
56
|
return await server.job_manager.list_jobs_async(
|
57
57
|
actor=actor,
|
58
58
|
statuses=[JobStatus.created, JobStatus.running],
|
@@ -67,13 +67,13 @@ async def list_active_jobs(
|
|
67
67
|
@router.get("/{job_id}", response_model=Job, operation_id="retrieve_job")
|
68
68
|
async def retrieve_job(
|
69
69
|
job_id: str,
|
70
|
-
|
70
|
+
headers: HeaderParams = Depends(get_headers),
|
71
71
|
server: "SyncServer" = Depends(get_letta_server),
|
72
72
|
):
|
73
73
|
"""
|
74
74
|
Get the status of a job.
|
75
75
|
"""
|
76
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
76
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
77
77
|
|
78
78
|
try:
|
79
79
|
return await server.job_manager.get_job_by_id_async(job_id=job_id, actor=actor)
|
@@ -84,7 +84,7 @@ async def retrieve_job(
|
|
84
84
|
@router.patch("/{job_id}/cancel", response_model=Job, operation_id="cancel_job")
|
85
85
|
async def cancel_job(
|
86
86
|
job_id: str,
|
87
|
-
|
87
|
+
headers: HeaderParams = Depends(get_headers),
|
88
88
|
server: "SyncServer" = Depends(get_letta_server),
|
89
89
|
):
|
90
90
|
"""
|
@@ -93,7 +93,7 @@ async def cancel_job(
|
|
93
93
|
This endpoint marks a job as cancelled, which will cause any associated
|
94
94
|
agent execution to terminate as soon as possible.
|
95
95
|
"""
|
96
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
96
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
97
97
|
if not settings.track_agent_run:
|
98
98
|
raise HTTPException(status_code=400, detail="Agent run tracking is disabled")
|
99
99
|
|
@@ -113,13 +113,13 @@ async def cancel_job(
|
|
113
113
|
@router.delete("/{job_id}", response_model=Job, operation_id="delete_job")
|
114
114
|
async def delete_job(
|
115
115
|
job_id: str,
|
116
|
-
|
116
|
+
headers: HeaderParams = Depends(get_headers),
|
117
117
|
server: "SyncServer" = Depends(get_letta_server),
|
118
118
|
):
|
119
119
|
"""
|
120
120
|
Delete a job by its job_id.
|
121
121
|
"""
|
122
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
122
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
123
123
|
|
124
124
|
try:
|
125
125
|
job = await server.job_manager.delete_job_by_id_async(job_id=job_id, actor=actor)
|
@@ -1,11 +1,11 @@
|
|
1
1
|
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Depends,
|
3
|
+
from fastapi import APIRouter, Depends, Query
|
4
4
|
|
5
5
|
from letta.schemas.embedding_config import EmbeddingConfig
|
6
6
|
from letta.schemas.enums import ProviderCategory, ProviderType
|
7
7
|
from letta.schemas.llm_config import LLMConfig
|
8
|
-
from letta.server.rest_api.
|
8
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
9
9
|
|
10
10
|
if TYPE_CHECKING:
|
11
11
|
from letta.server.server import SyncServer
|
@@ -19,11 +19,10 @@ async def list_llm_models(
|
|
19
19
|
provider_name: Optional[str] = Query(None),
|
20
20
|
provider_type: Optional[ProviderType] = Query(None),
|
21
21
|
server: "SyncServer" = Depends(get_letta_server),
|
22
|
-
|
23
|
-
# Extract user_id from header, default to None if not present
|
22
|
+
headers: HeaderParams = Depends(get_headers),
|
24
23
|
):
|
25
24
|
"""List available LLM models using the asynchronous implementation for improved performance"""
|
26
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
25
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
27
26
|
|
28
27
|
models = await server.list_llm_models_async(
|
29
28
|
provider_category=provider_category,
|
@@ -38,11 +37,10 @@ async def list_llm_models(
|
|
38
37
|
@router.get("/embedding", response_model=List[EmbeddingConfig], operation_id="list_embedding_models")
|
39
38
|
async def list_embedding_models(
|
40
39
|
server: "SyncServer" = Depends(get_letta_server),
|
41
|
-
|
42
|
-
# Extract user_id from header, default to None if not present
|
40
|
+
headers: HeaderParams = Depends(get_headers),
|
43
41
|
):
|
44
42
|
"""List available embedding models using the asynchronous implementation for improved performance"""
|
45
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
43
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
46
44
|
models = await server.list_embedding_models_async(actor=actor)
|
47
45
|
|
48
46
|
return models
|
@@ -1,6 +1,6 @@
|
|
1
1
|
from typing import List, Literal, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Body, Depends,
|
3
|
+
from fastapi import APIRouter, Body, Depends, Query
|
4
4
|
from fastapi.exceptions import HTTPException
|
5
5
|
from starlette.requests import Request
|
6
6
|
|
@@ -10,7 +10,7 @@ from letta.orm.errors import NoResultFound
|
|
10
10
|
from letta.schemas.job import BatchJob, JobStatus, JobType, JobUpdate
|
11
11
|
from letta.schemas.letta_request import CreateBatch
|
12
12
|
from letta.schemas.letta_response import LettaBatchMessages
|
13
|
-
from letta.server.rest_api.
|
13
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
14
14
|
from letta.server.server import SyncServer
|
15
15
|
from letta.settings import settings
|
16
16
|
|
@@ -28,7 +28,7 @@ async def create_batch(
|
|
28
28
|
request: Request,
|
29
29
|
payload: CreateBatch = Body(..., description="Messages and config for all agents"),
|
30
30
|
server: SyncServer = Depends(get_letta_server),
|
31
|
-
|
31
|
+
headers: HeaderParams = Depends(get_headers),
|
32
32
|
):
|
33
33
|
"""
|
34
34
|
Submit a batch of agent runs for asynchronous processing.
|
@@ -47,7 +47,7 @@ async def create_batch(
|
|
47
47
|
if not settings.enable_batch_job_polling:
|
48
48
|
logger.warning("Batch job polling is disabled. Enable batch processing by setting LETTA_ENABLE_BATCH_JOB_POLLING to True.")
|
49
49
|
|
50
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
50
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
51
51
|
batch_job = BatchJob(
|
52
52
|
user_id=actor.id,
|
53
53
|
status=JobStatus.running,
|
@@ -86,13 +86,13 @@ async def create_batch(
|
|
86
86
|
@router.get("/batches/{batch_id}", response_model=BatchJob, operation_id="retrieve_batch")
|
87
87
|
async def retrieve_batch(
|
88
88
|
batch_id: str,
|
89
|
-
|
89
|
+
headers: HeaderParams = Depends(get_headers),
|
90
90
|
server: "SyncServer" = Depends(get_letta_server),
|
91
91
|
):
|
92
92
|
"""
|
93
93
|
Retrieve the status and details of a batch run.
|
94
94
|
"""
|
95
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
95
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
96
96
|
|
97
97
|
try:
|
98
98
|
job = await server.job_manager.get_job_by_id_async(job_id=batch_id, actor=actor)
|
@@ -114,13 +114,13 @@ async def list_batches(
|
|
114
114
|
"desc", description="Sort order for jobs by creation time. 'asc' for oldest first, 'desc' for newest first"
|
115
115
|
),
|
116
116
|
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
|
117
|
-
|
117
|
+
headers: HeaderParams = Depends(get_headers),
|
118
118
|
server: "SyncServer" = Depends(get_letta_server),
|
119
119
|
):
|
120
120
|
"""
|
121
121
|
List all batch runs.
|
122
122
|
"""
|
123
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
123
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
124
124
|
|
125
125
|
jobs = server.job_manager.list_jobs(
|
126
126
|
actor=actor,
|
@@ -137,9 +137,9 @@ async def list_batches(
|
|
137
137
|
@router.get(
|
138
138
|
"/batches/{batch_id}/messages",
|
139
139
|
response_model=LettaBatchMessages,
|
140
|
-
operation_id="
|
140
|
+
operation_id="list_messages_for_batch",
|
141
141
|
)
|
142
|
-
async def
|
142
|
+
async def list_messages_for_batch(
|
143
143
|
batch_id: str,
|
144
144
|
before: Optional[str] = Query(
|
145
145
|
None, description="Message ID cursor for pagination. Returns messages that come before this message ID in the specified sort order"
|
@@ -153,13 +153,13 @@ async def list_batch_messages(
|
|
153
153
|
),
|
154
154
|
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
|
155
155
|
agent_id: Optional[str] = Query(None, description="Filter messages by agent ID"),
|
156
|
-
|
156
|
+
headers: HeaderParams = Depends(get_headers),
|
157
157
|
server: SyncServer = Depends(get_letta_server),
|
158
158
|
):
|
159
159
|
"""
|
160
160
|
Get response messages for a specific batch job.
|
161
161
|
"""
|
162
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
162
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
163
163
|
|
164
164
|
# Verify the batch job exists and the user has access to it
|
165
165
|
try:
|
@@ -180,12 +180,12 @@ async def list_batch_messages(
|
|
180
180
|
async def cancel_batch(
|
181
181
|
batch_id: str,
|
182
182
|
server: "SyncServer" = Depends(get_letta_server),
|
183
|
-
|
183
|
+
headers: HeaderParams = Depends(get_headers),
|
184
184
|
):
|
185
185
|
"""
|
186
186
|
Cancel a batch run.
|
187
187
|
"""
|
188
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
188
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
189
189
|
|
190
190
|
try:
|
191
191
|
job = await server.job_manager.get_job_by_id_async(job_id=batch_id, actor=actor)
|
@@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, List, Optional
|
|
3
3
|
from fastapi import APIRouter, Body, Depends, HTTPException, Query
|
4
4
|
|
5
5
|
from letta.schemas.organization import Organization, OrganizationCreate, OrganizationUpdate
|
6
|
-
from letta.server.rest_api.
|
6
|
+
from letta.server.rest_api.dependencies import get_letta_server
|
7
7
|
|
8
8
|
if TYPE_CHECKING:
|
9
9
|
from letta.server.server import SyncServer
|
@@ -1,13 +1,13 @@
|
|
1
|
-
from typing import TYPE_CHECKING, List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Literal, Optional
|
2
2
|
|
3
|
-
from fastapi import APIRouter, Body, Depends,
|
3
|
+
from fastapi import APIRouter, Body, Depends, HTTPException, Query, status
|
4
4
|
from fastapi.responses import JSONResponse
|
5
5
|
|
6
6
|
from letta.errors import LLMAuthenticationError
|
7
7
|
from letta.orm.errors import NoResultFound
|
8
8
|
from letta.schemas.enums import ProviderType
|
9
9
|
from letta.schemas.providers import Provider, ProviderCheck, ProviderCreate, ProviderUpdate
|
10
|
-
from letta.server.rest_api.
|
10
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
11
11
|
|
12
12
|
if TYPE_CHECKING:
|
13
13
|
from letta.server.server import SyncServer
|
@@ -17,20 +17,31 @@ router = APIRouter(prefix="/providers", tags=["providers"])
|
|
17
17
|
|
18
18
|
@router.get("/", response_model=List[Provider], operation_id="list_providers")
|
19
19
|
async def list_providers(
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
20
|
+
before: Optional[str] = Query(
|
21
|
+
None,
|
22
|
+
description="Provider ID cursor for pagination. Returns providers that come before this provider ID in the specified sort order",
|
23
|
+
),
|
24
|
+
after: Optional[str] = Query(
|
25
|
+
None,
|
26
|
+
description="Provider ID cursor for pagination. Returns providers that come after this provider ID in the specified sort order",
|
27
|
+
),
|
28
|
+
limit: Optional[int] = Query(50, description="Maximum number of providers to return"),
|
29
|
+
order: Literal["asc", "desc"] = Query(
|
30
|
+
"desc", description="Sort order for providers by creation time. 'asc' for oldest first, 'desc' for newest first"
|
31
|
+
),
|
32
|
+
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
|
33
|
+
name: Optional[str] = Query(None, description="Filter providers by name"),
|
34
|
+
provider_type: Optional[ProviderType] = Query(None, description="Filter providers by type"),
|
35
|
+
headers: HeaderParams = Depends(get_headers),
|
25
36
|
server: "SyncServer" = Depends(get_letta_server),
|
26
37
|
):
|
27
38
|
"""
|
28
39
|
Get a list of all custom providers.
|
29
40
|
"""
|
30
41
|
try:
|
31
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
42
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
32
43
|
providers = await server.provider_manager.list_providers_async(
|
33
|
-
after=after, limit=limit, actor=actor, name=name, provider_type=provider_type
|
44
|
+
before=before, after=after, limit=limit, actor=actor, name=name, provider_type=provider_type, ascending=(order == "asc")
|
34
45
|
)
|
35
46
|
except HTTPException:
|
36
47
|
raise
|
@@ -39,16 +50,29 @@ async def list_providers(
|
|
39
50
|
return providers
|
40
51
|
|
41
52
|
|
53
|
+
@router.get("/{provider_id}", response_model=Provider, operation_id="retrieve_provider")
|
54
|
+
async def retrieve_provider(
|
55
|
+
provider_id: str,
|
56
|
+
headers: HeaderParams = Depends(get_headers),
|
57
|
+
server: "SyncServer" = Depends(get_letta_server),
|
58
|
+
):
|
59
|
+
"""
|
60
|
+
Get a provider by ID.
|
61
|
+
"""
|
62
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
63
|
+
return await server.provider_manager.get_provider_async(provider_id=provider_id, actor=actor)
|
64
|
+
|
65
|
+
|
42
66
|
@router.post("/", response_model=Provider, operation_id="create_provider")
|
43
67
|
async def create_provider(
|
44
68
|
request: ProviderCreate = Body(...),
|
45
|
-
|
69
|
+
headers: HeaderParams = Depends(get_headers),
|
46
70
|
server: "SyncServer" = Depends(get_letta_server),
|
47
71
|
):
|
48
72
|
"""
|
49
73
|
Create a new custom provider.
|
50
74
|
"""
|
51
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
75
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
52
76
|
for field_name in request.model_fields:
|
53
77
|
value = getattr(request, field_name, None)
|
54
78
|
if isinstance(value, str) and value == "":
|
@@ -64,13 +88,13 @@ async def create_provider(
|
|
64
88
|
async def modify_provider(
|
65
89
|
provider_id: str,
|
66
90
|
request: ProviderUpdate = Body(...),
|
67
|
-
|
91
|
+
headers: HeaderParams = Depends(get_headers),
|
68
92
|
server: "SyncServer" = Depends(get_letta_server),
|
69
93
|
):
|
70
94
|
"""
|
71
95
|
Update an existing custom provider.
|
72
96
|
"""
|
73
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
97
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
74
98
|
return await server.provider_manager.update_provider_async(provider_id=provider_id, provider_update=request, actor=actor)
|
75
99
|
|
76
100
|
|
@@ -99,14 +123,14 @@ async def check_provider(
|
|
99
123
|
@router.delete("/{provider_id}", response_model=None, operation_id="delete_provider")
|
100
124
|
async def delete_provider(
|
101
125
|
provider_id: str,
|
102
|
-
|
126
|
+
headers: HeaderParams = Depends(get_headers),
|
103
127
|
server: "SyncServer" = Depends(get_letta_server),
|
104
128
|
):
|
105
129
|
"""
|
106
130
|
Delete an existing custom provider.
|
107
131
|
"""
|
108
132
|
try:
|
109
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
133
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
110
134
|
await server.provider_manager.delete_provider_by_id_async(provider_id=provider_id, actor=actor)
|
111
135
|
return JSONResponse(status_code=status.HTTP_200_OK, content={"message": f"Provider id={provider_id} successfully deleted"})
|
112
136
|
except NoResultFound:
|
@@ -1,25 +1,25 @@
|
|
1
1
|
from datetime import timedelta
|
2
2
|
from typing import Annotated, List, Literal, Optional
|
3
3
|
|
4
|
-
from fastapi import APIRouter, Body, Depends,
|
4
|
+
from fastapi import APIRouter, Body, Depends, HTTPException, Query
|
5
5
|
from pydantic import Field
|
6
6
|
|
7
7
|
from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client
|
8
8
|
from letta.helpers.datetime_helpers import get_utc_time
|
9
9
|
from letta.orm.errors import NoResultFound
|
10
|
-
from letta.schemas.enums import JobStatus, JobType
|
10
|
+
from letta.schemas.enums import JobStatus, JobType
|
11
11
|
from letta.schemas.letta_message import LettaMessageUnion
|
12
12
|
from letta.schemas.letta_request import RetrieveStreamRequest
|
13
13
|
from letta.schemas.openai.chat_completion_response import UsageStatistics
|
14
14
|
from letta.schemas.run import Run
|
15
15
|
from letta.schemas.step import Step
|
16
|
+
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
|
16
17
|
from letta.server.rest_api.redis_stream_manager import redis_sse_stream_generator
|
17
18
|
from letta.server.rest_api.streaming_response import (
|
18
19
|
StreamingResponseWithStatusCode,
|
19
20
|
add_keepalive_to_stream,
|
20
21
|
cancellation_aware_stream_wrapper,
|
21
22
|
)
|
22
|
-
from letta.server.rest_api.utils import get_letta_server
|
23
23
|
from letta.server.server import SyncServer
|
24
24
|
from letta.settings import settings
|
25
25
|
|
@@ -38,12 +38,12 @@ def list_runs(
|
|
38
38
|
False,
|
39
39
|
description="Whether to sort agents oldest to newest (True) or newest to oldest (False, default)",
|
40
40
|
),
|
41
|
-
|
41
|
+
headers: HeaderParams = Depends(get_headers),
|
42
42
|
):
|
43
43
|
"""
|
44
44
|
List all runs.
|
45
45
|
"""
|
46
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
46
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
47
47
|
|
48
48
|
runs = [
|
49
49
|
Run.from_job(job)
|
@@ -68,12 +68,12 @@ def list_active_runs(
|
|
68
68
|
server: "SyncServer" = Depends(get_letta_server),
|
69
69
|
agent_ids: Optional[List[str]] = Query(None, description="The unique identifier of the agent associated with the run."),
|
70
70
|
background: Optional[bool] = Query(None, description="If True, filters for runs that were created in background mode."),
|
71
|
-
|
71
|
+
headers: HeaderParams = Depends(get_headers),
|
72
72
|
):
|
73
73
|
"""
|
74
74
|
List all active runs.
|
75
75
|
"""
|
76
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
76
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
77
77
|
|
78
78
|
active_runs = server.job_manager.list_jobs(actor=actor, statuses=[JobStatus.created, JobStatus.running], job_type=JobType.RUN)
|
79
79
|
active_runs = [Run.from_job(job) for job in active_runs]
|
@@ -90,13 +90,13 @@ def list_active_runs(
|
|
90
90
|
@router.get("/{run_id}", response_model=Run, operation_id="retrieve_run")
|
91
91
|
def retrieve_run(
|
92
92
|
run_id: str,
|
93
|
-
|
93
|
+
headers: HeaderParams = Depends(get_headers),
|
94
94
|
server: "SyncServer" = Depends(get_letta_server),
|
95
95
|
):
|
96
96
|
"""
|
97
97
|
Get the status of a run.
|
98
98
|
"""
|
99
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
99
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
100
100
|
|
101
101
|
try:
|
102
102
|
job = server.job_manager.get_job_by_id(job_id=run_id, actor=actor)
|
@@ -118,7 +118,7 @@ RunMessagesResponse = Annotated[
|
|
118
118
|
async def list_run_messages(
|
119
119
|
run_id: str,
|
120
120
|
server: "SyncServer" = Depends(get_letta_server),
|
121
|
-
|
121
|
+
headers: HeaderParams = Depends(get_headers),
|
122
122
|
before: Optional[str] = Query(
|
123
123
|
None, description="Message ID cursor for pagination. Returns messages that come before this message ID in the specified sort order"
|
124
124
|
),
|
@@ -131,7 +131,7 @@ async def list_run_messages(
|
|
131
131
|
),
|
132
132
|
):
|
133
133
|
"""Get response messages associated with a run."""
|
134
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
134
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
135
135
|
|
136
136
|
try:
|
137
137
|
messages = server.job_manager.get_run_messages(
|
@@ -150,13 +150,13 @@ async def list_run_messages(
|
|
150
150
|
@router.get("/{run_id}/usage", response_model=UsageStatistics, operation_id="retrieve_run_usage")
|
151
151
|
def retrieve_run_usage(
|
152
152
|
run_id: str,
|
153
|
-
|
153
|
+
headers: HeaderParams = Depends(get_headers),
|
154
154
|
server: "SyncServer" = Depends(get_letta_server),
|
155
155
|
):
|
156
156
|
"""
|
157
157
|
Get usage statistics for a run.
|
158
158
|
"""
|
159
|
-
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
159
|
+
actor = server.user_manager.get_user_or_default(user_id=headers.actor_id)
|
160
160
|
|
161
161
|
try:
|
162
162
|
usage = server.job_manager.get_job_usage(job_id=run_id, actor=actor)
|
@@ -173,7 +173,7 @@ def retrieve_run_usage(
|
|
173
173
|
async def list_run_steps(
|
174
174
|
run_id: str,
|
175
175
|
server: "SyncServer" = Depends(get_letta_server),
|
176
|
-
|
176
|
+
headers: HeaderParams = Depends(get_headers),
|
177
177
|
before: Optional[str] = Query(None, description="Cursor for pagination"),
|
178
178
|
after: Optional[str] = Query(None, description="Cursor for pagination"),
|
179
179
|
limit: Optional[int] = Query(100, description="Maximum number of messages to return"),
|
@@ -197,7 +197,7 @@ async def list_run_steps(
|
|
197
197
|
if order not in ["asc", "desc"]:
|
198
198
|
raise HTTPException(status_code=400, detail="Order must be 'asc' or 'desc'")
|
199
199
|
|
200
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
200
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
201
201
|
|
202
202
|
try:
|
203
203
|
steps = server.job_manager.get_job_steps(
|
@@ -216,13 +216,13 @@ async def list_run_steps(
|
|
216
216
|
@router.delete("/{run_id}", response_model=Run, operation_id="delete_run")
|
217
217
|
async def delete_run(
|
218
218
|
run_id: str,
|
219
|
-
|
219
|
+
headers: HeaderParams = Depends(get_headers),
|
220
220
|
server: "SyncServer" = Depends(get_letta_server),
|
221
221
|
):
|
222
222
|
"""
|
223
223
|
Delete a run by its run_id.
|
224
224
|
"""
|
225
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
225
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
226
226
|
|
227
227
|
try:
|
228
228
|
job = await server.job_manager.delete_job_by_id_async(job_id=run_id, actor=actor)
|
@@ -266,10 +266,10 @@ async def delete_run(
|
|
266
266
|
async def retrieve_stream(
|
267
267
|
run_id: str,
|
268
268
|
request: RetrieveStreamRequest = Body(None),
|
269
|
-
|
269
|
+
headers: HeaderParams = Depends(get_headers),
|
270
270
|
server: "SyncServer" = Depends(get_letta_server),
|
271
271
|
):
|
272
|
-
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
272
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
273
273
|
try:
|
274
274
|
job = server.job_manager.get_job_by_id(job_id=run_id, actor=actor)
|
275
275
|
except NoResultFound:
|