letta-nightly 0.11.3.dev20250820104219__py3-none-any.whl → 0.11.4.dev20250820213507__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +1 -1
- letta/agents/helpers.py +4 -0
- letta/agents/letta_agent.py +142 -5
- letta/constants.py +10 -7
- letta/data_sources/connectors.py +70 -53
- letta/embeddings.py +3 -240
- letta/errors.py +28 -0
- letta/functions/function_sets/base.py +4 -4
- letta/functions/functions.py +287 -32
- letta/functions/mcp_client/types.py +11 -0
- letta/functions/schema_validator.py +187 -0
- letta/functions/typescript_parser.py +196 -0
- letta/helpers/datetime_helpers.py +8 -4
- letta/helpers/tool_execution_helper.py +25 -2
- letta/llm_api/anthropic_client.py +23 -18
- letta/llm_api/azure_client.py +73 -0
- letta/llm_api/bedrock_client.py +8 -4
- letta/llm_api/google_vertex_client.py +14 -5
- letta/llm_api/llm_api_tools.py +2 -217
- letta/llm_api/llm_client.py +15 -1
- letta/llm_api/llm_client_base.py +32 -1
- letta/llm_api/openai.py +1 -0
- letta/llm_api/openai_client.py +18 -28
- letta/llm_api/together_client.py +55 -0
- letta/orm/provider.py +1 -0
- letta/orm/step_metrics.py +40 -1
- letta/otel/db_pool_monitoring.py +1 -1
- letta/schemas/agent.py +3 -4
- letta/schemas/agent_file.py +2 -0
- letta/schemas/block.py +11 -5
- letta/schemas/embedding_config.py +4 -5
- letta/schemas/enums.py +1 -1
- letta/schemas/job.py +2 -3
- letta/schemas/llm_config.py +79 -7
- letta/schemas/mcp.py +0 -24
- letta/schemas/message.py +0 -108
- letta/schemas/openai/chat_completion_request.py +1 -0
- letta/schemas/providers/__init__.py +0 -2
- letta/schemas/providers/anthropic.py +106 -8
- letta/schemas/providers/azure.py +102 -8
- letta/schemas/providers/base.py +10 -3
- letta/schemas/providers/bedrock.py +28 -16
- letta/schemas/providers/letta.py +3 -3
- letta/schemas/providers/ollama.py +2 -12
- letta/schemas/providers/openai.py +4 -4
- letta/schemas/providers/together.py +14 -2
- letta/schemas/sandbox_config.py +2 -1
- letta/schemas/tool.py +46 -22
- letta/server/rest_api/routers/v1/agents.py +179 -38
- letta/server/rest_api/routers/v1/folders.py +13 -8
- letta/server/rest_api/routers/v1/providers.py +10 -3
- letta/server/rest_api/routers/v1/sources.py +14 -8
- letta/server/rest_api/routers/v1/steps.py +17 -1
- letta/server/rest_api/routers/v1/tools.py +96 -5
- letta/server/rest_api/streaming_response.py +91 -45
- letta/server/server.py +27 -38
- letta/services/agent_manager.py +92 -20
- letta/services/agent_serialization_manager.py +11 -7
- letta/services/context_window_calculator/context_window_calculator.py +40 -2
- letta/services/helpers/agent_manager_helper.py +73 -12
- letta/services/mcp_manager.py +109 -15
- letta/services/passage_manager.py +28 -109
- letta/services/provider_manager.py +24 -0
- letta/services/step_manager.py +68 -0
- letta/services/summarizer/summarizer.py +1 -4
- letta/services/tool_executor/core_tool_executor.py +1 -1
- letta/services/tool_executor/sandbox_tool_executor.py +26 -9
- letta/services/tool_manager.py +82 -5
- letta/services/tool_sandbox/base.py +3 -11
- letta/services/tool_sandbox/modal_constants.py +17 -0
- letta/services/tool_sandbox/modal_deployment_manager.py +242 -0
- letta/services/tool_sandbox/modal_sandbox.py +218 -3
- letta/services/tool_sandbox/modal_sandbox_v2.py +429 -0
- letta/services/tool_sandbox/modal_version_manager.py +273 -0
- letta/services/tool_sandbox/safe_pickle.py +193 -0
- letta/settings.py +5 -3
- letta/templates/sandbox_code_file.py.j2 +2 -4
- letta/templates/sandbox_code_file_async.py.j2 +2 -4
- letta/utils.py +1 -1
- {letta_nightly-0.11.3.dev20250820104219.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/METADATA +2 -2
- {letta_nightly-0.11.3.dev20250820104219.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/RECORD +84 -81
- letta/llm_api/anthropic.py +0 -1206
- letta/llm_api/aws_bedrock.py +0 -104
- letta/llm_api/azure_openai.py +0 -118
- letta/llm_api/azure_openai_constants.py +0 -11
- letta/llm_api/cohere.py +0 -391
- letta/schemas/providers/cohere.py +0 -18
- {letta_nightly-0.11.3.dev20250820104219.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/LICENSE +0 -0
- {letta_nightly-0.11.3.dev20250820104219.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.3.dev20250820104219.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/entry_points.txt +0 -0
@@ -8,13 +8,14 @@ from fastapi import APIRouter, Body, Depends, File, Form, Header, HTTPException,
|
|
8
8
|
from fastapi.responses import JSONResponse
|
9
9
|
from marshmallow import ValidationError
|
10
10
|
from orjson import orjson
|
11
|
-
from pydantic import Field
|
11
|
+
from pydantic import BaseModel, Field
|
12
12
|
from sqlalchemy.exc import IntegrityError, OperationalError
|
13
13
|
from starlette.responses import Response, StreamingResponse
|
14
14
|
|
15
15
|
from letta.agents.letta_agent import LettaAgent
|
16
|
-
from letta.constants import DEFAULT_MAX_STEPS, DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, REDIS_RUN_ID_PREFIX
|
16
|
+
from letta.constants import AGENT_ID_PATTERN, DEFAULT_MAX_STEPS, DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, REDIS_RUN_ID_PREFIX
|
17
17
|
from letta.data_sources.redis_client import get_redis_client
|
18
|
+
from letta.errors import AgentExportIdMappingError, AgentExportProcessingError, AgentFileImportError, AgentNotFoundForExportError
|
18
19
|
from letta.groups.sleeptime_multi_agent_v2 import SleeptimeMultiAgentV2
|
19
20
|
from letta.helpers.datetime_helpers import get_utc_timestamp_ns
|
20
21
|
from letta.log import get_logger
|
@@ -22,7 +23,9 @@ from letta.orm.errors import NoResultFound
|
|
22
23
|
from letta.otel.context import get_ctx_attributes
|
23
24
|
from letta.otel.metric_registry import MetricRegistry
|
24
25
|
from letta.schemas.agent import AgentState, AgentType, CreateAgent, UpdateAgent
|
26
|
+
from letta.schemas.agent_file import AgentFileSchema
|
25
27
|
from letta.schemas.block import Block, BlockUpdate
|
28
|
+
from letta.schemas.enums import JobType
|
26
29
|
from letta.schemas.group import Group
|
27
30
|
from letta.schemas.job import JobStatus, JobUpdate, LettaRequestConfig
|
28
31
|
from letta.schemas.letta_message import LettaMessageUnion, LettaMessageUpdateUnion, MessageType
|
@@ -144,29 +147,143 @@ class IndentedORJSONResponse(Response):
|
|
144
147
|
|
145
148
|
|
146
149
|
@router.get("/{agent_id}/export", response_class=IndentedORJSONResponse, operation_id="export_agent_serialized")
|
147
|
-
def export_agent_serialized(
|
150
|
+
async def export_agent_serialized(
|
148
151
|
agent_id: str,
|
149
152
|
max_steps: int = 100,
|
150
153
|
server: "SyncServer" = Depends(get_letta_server),
|
151
154
|
actor_id: str | None = Header(None, alias="user_id"),
|
155
|
+
use_legacy_format: bool = Query(
|
156
|
+
True,
|
157
|
+
description="If true, exports using the legacy single-agent format. If false, exports using the new multi-entity format.",
|
158
|
+
),
|
152
159
|
# do not remove, used to autogeneration of spec
|
153
|
-
# TODO: Think of a better way to export
|
154
|
-
spec:
|
160
|
+
# TODO: Think of a better way to export AgentFileSchema
|
161
|
+
spec: AgentFileSchema | None = None,
|
162
|
+
legacy_spec: AgentSchema | None = None,
|
155
163
|
) -> JSONResponse:
|
156
164
|
"""
|
157
165
|
Export the serialized JSON representation of an agent, formatted with indentation.
|
166
|
+
|
167
|
+
Supports two export formats:
|
168
|
+
- Legacy format (use_legacy_format=true): Single agent with inline tools/blocks
|
169
|
+
- New format (default): Multi-entity format with separate agents, tools, blocks, files, etc.
|
158
170
|
"""
|
159
171
|
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
160
172
|
|
173
|
+
if use_legacy_format:
|
174
|
+
# Use the legacy serialization method
|
175
|
+
try:
|
176
|
+
agent = server.agent_manager.serialize(agent_id=agent_id, actor=actor, max_steps=max_steps)
|
177
|
+
return agent.model_dump()
|
178
|
+
except NoResultFound:
|
179
|
+
raise HTTPException(status_code=404, detail=f"Agent with id={agent_id} not found for user_id={actor.id}.")
|
180
|
+
else:
|
181
|
+
# Use the new multi-entity export format
|
182
|
+
try:
|
183
|
+
agent_file_schema = await server.agent_serialization_manager.export(agent_ids=[agent_id], actor=actor)
|
184
|
+
return agent_file_schema.model_dump()
|
185
|
+
except AgentNotFoundForExportError:
|
186
|
+
raise HTTPException(status_code=404, detail=f"Agent with id={agent_id} not found for user_id={actor.id}.")
|
187
|
+
except AgentExportIdMappingError as e:
|
188
|
+
raise HTTPException(
|
189
|
+
status_code=500, detail=f"Internal error during export: ID mapping failed for {e.entity_type} ID '{e.db_id}'"
|
190
|
+
)
|
191
|
+
except AgentExportProcessingError as e:
|
192
|
+
raise HTTPException(status_code=500, detail=f"Export processing failed: {str(e.original_error)}")
|
193
|
+
|
194
|
+
|
195
|
+
class ImportedAgentsResponse(BaseModel):
|
196
|
+
"""Response model for imported agents"""
|
197
|
+
|
198
|
+
agent_ids: List[str] = Field(..., description="List of IDs of the imported agents")
|
199
|
+
|
200
|
+
|
201
|
+
def import_agent_legacy(
|
202
|
+
agent_json: dict,
|
203
|
+
server: "SyncServer",
|
204
|
+
actor: User,
|
205
|
+
append_copy_suffix: bool = True,
|
206
|
+
override_existing_tools: bool = True,
|
207
|
+
project_id: str | None = None,
|
208
|
+
strip_messages: bool = False,
|
209
|
+
env_vars: Optional[dict[str, Any]] = None,
|
210
|
+
) -> List[str]:
|
211
|
+
"""
|
212
|
+
Import an agent using the legacy AgentSchema format.
|
213
|
+
"""
|
161
214
|
try:
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
215
|
+
# Validate the JSON against AgentSchema before passing it to deserialize
|
216
|
+
agent_schema = AgentSchema.model_validate(agent_json)
|
217
|
+
|
218
|
+
new_agent = server.agent_manager.deserialize(
|
219
|
+
serialized_agent=agent_schema, # Ensure we're passing a validated AgentSchema
|
220
|
+
actor=actor,
|
221
|
+
append_copy_suffix=append_copy_suffix,
|
222
|
+
override_existing_tools=override_existing_tools,
|
223
|
+
project_id=project_id,
|
224
|
+
strip_messages=strip_messages,
|
225
|
+
env_vars=env_vars,
|
226
|
+
)
|
227
|
+
return [new_agent.id]
|
228
|
+
|
229
|
+
except ValidationError as e:
|
230
|
+
raise HTTPException(status_code=422, detail=f"Invalid agent schema: {e!s}")
|
231
|
+
|
232
|
+
except IntegrityError as e:
|
233
|
+
raise HTTPException(status_code=409, detail=f"Database integrity error: {e!s}")
|
234
|
+
|
235
|
+
except OperationalError as e:
|
236
|
+
raise HTTPException(status_code=503, detail=f"Database connection error. Please try again later: {e!s}")
|
237
|
+
|
238
|
+
except Exception as e:
|
239
|
+
traceback.print_exc()
|
240
|
+
raise HTTPException(status_code=500, detail=f"An unexpected error occurred while uploading the agent: {e!s}")
|
241
|
+
|
242
|
+
|
243
|
+
async def import_agent(
|
244
|
+
agent_file_json: dict,
|
245
|
+
server: "SyncServer",
|
246
|
+
actor: User,
|
247
|
+
# TODO: Support these fields for new agent file
|
248
|
+
append_copy_suffix: bool = True,
|
249
|
+
override_existing_tools: bool = True,
|
250
|
+
project_id: str | None = None,
|
251
|
+
strip_messages: bool = False,
|
252
|
+
) -> List[str]:
|
253
|
+
"""
|
254
|
+
Import an agent using the new AgentFileSchema format.
|
255
|
+
"""
|
256
|
+
try:
|
257
|
+
agent_schema = AgentFileSchema.model_validate(agent_file_json)
|
258
|
+
except ValidationError as e:
|
259
|
+
raise HTTPException(status_code=422, detail=f"Invalid agent file schema: {e!s}")
|
260
|
+
|
261
|
+
try:
|
262
|
+
import_result = await server.agent_serialization_manager.import_file(schema=agent_schema, actor=actor)
|
263
|
+
|
264
|
+
if not import_result.success:
|
265
|
+
raise HTTPException(
|
266
|
+
status_code=500, detail=f"Import failed: {import_result.message}. Errors: {', '.join(import_result.errors)}"
|
267
|
+
)
|
166
268
|
|
269
|
+
return import_result.imported_agent_ids
|
167
270
|
|
168
|
-
|
169
|
-
|
271
|
+
except AgentFileImportError as e:
|
272
|
+
raise HTTPException(status_code=400, detail=f"Agent file import error: {str(e)}")
|
273
|
+
|
274
|
+
except IntegrityError as e:
|
275
|
+
raise HTTPException(status_code=409, detail=f"Database integrity error: {e!s}")
|
276
|
+
|
277
|
+
except OperationalError as e:
|
278
|
+
raise HTTPException(status_code=503, detail=f"Database connection error. Please try again later: {e!s}")
|
279
|
+
|
280
|
+
except Exception as e:
|
281
|
+
traceback.print_exc()
|
282
|
+
raise HTTPException(status_code=500, detail=f"An unexpected error occurred while importing agents: {e!s}")
|
283
|
+
|
284
|
+
|
285
|
+
@router.post("/import", response_model=ImportedAgentsResponse, operation_id="import_agent_serialized")
|
286
|
+
async def import_agent_serialized(
|
170
287
|
file: UploadFile = File(...),
|
171
288
|
server: "SyncServer" = Depends(get_letta_server),
|
172
289
|
actor_id: str | None = Header(None, alias="user_id"),
|
@@ -183,19 +300,35 @@ def import_agent_serialized(
|
|
183
300
|
env_vars: Optional[Dict[str, Any]] = Form(None, description="Environment variables to pass to the agent for tool execution."),
|
184
301
|
):
|
185
302
|
"""
|
186
|
-
Import a serialized agent file and recreate the agent in the system.
|
303
|
+
Import a serialized agent file and recreate the agent(s) in the system.
|
304
|
+
Returns the IDs of all imported agents.
|
187
305
|
"""
|
188
306
|
actor = server.user_manager.get_user_or_default(user_id=actor_id)
|
189
307
|
|
190
308
|
try:
|
191
309
|
serialized_data = file.file.read()
|
192
310
|
agent_json = json.loads(serialized_data)
|
311
|
+
except json.JSONDecodeError:
|
312
|
+
raise HTTPException(status_code=400, detail="Corrupted agent file format.")
|
193
313
|
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
314
|
+
# Check if the JSON is AgentFileSchema or AgentSchema
|
315
|
+
# TODO: This is kind of hacky, but should work as long as dont' change the schema
|
316
|
+
if "agents" in agent_json and isinstance(agent_json.get("agents"), list):
|
317
|
+
# This is an AgentFileSchema
|
318
|
+
agent_ids = await import_agent(
|
319
|
+
agent_file_json=agent_json,
|
320
|
+
server=server,
|
321
|
+
actor=actor,
|
322
|
+
append_copy_suffix=append_copy_suffix,
|
323
|
+
override_existing_tools=override_existing_tools,
|
324
|
+
project_id=project_id,
|
325
|
+
strip_messages=strip_messages,
|
326
|
+
)
|
327
|
+
else:
|
328
|
+
# This is a legacy AgentSchema
|
329
|
+
agent_ids = import_agent_legacy(
|
330
|
+
agent_json=agent_json,
|
331
|
+
server=server,
|
199
332
|
actor=actor,
|
200
333
|
append_copy_suffix=append_copy_suffix,
|
201
334
|
override_existing_tools=override_existing_tools,
|
@@ -203,23 +336,8 @@ def import_agent_serialized(
|
|
203
336
|
strip_messages=strip_messages,
|
204
337
|
env_vars=env_vars,
|
205
338
|
)
|
206
|
-
return new_agent
|
207
339
|
|
208
|
-
|
209
|
-
raise HTTPException(status_code=400, detail="Corrupted agent file format.")
|
210
|
-
|
211
|
-
except ValidationError as e:
|
212
|
-
raise HTTPException(status_code=422, detail=f"Invalid agent schema: {e!s}")
|
213
|
-
|
214
|
-
except IntegrityError as e:
|
215
|
-
raise HTTPException(status_code=409, detail=f"Database integrity error: {e!s}")
|
216
|
-
|
217
|
-
except OperationalError as e:
|
218
|
-
raise HTTPException(status_code=503, detail=f"Database connection error. Please try again later: {e!s}")
|
219
|
-
|
220
|
-
except Exception as e:
|
221
|
-
traceback.print_exc()
|
222
|
-
raise HTTPException(status_code=500, detail=f"An unexpected error occurred while uploading the agent: {e!s}")
|
340
|
+
return ImportedAgentsResponse(agent_ids=agent_ids)
|
223
341
|
|
224
342
|
|
225
343
|
@router.get("/{agent_id}/context", response_model=ContextWindowOverview, operation_id="retrieve_agent_context_window")
|
@@ -555,6 +673,10 @@ async def retrieve_agent(
|
|
555
673
|
"""
|
556
674
|
Get the state of the agent.
|
557
675
|
"""
|
676
|
+
# Check if agent_id matches uuid4 format
|
677
|
+
if not AGENT_ID_PATTERN.match(agent_id):
|
678
|
+
raise HTTPException(status_code=400, detail=f"agent_id {agent_id} is not in the valid format 'agent-<uuid4>'")
|
679
|
+
|
558
680
|
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
559
681
|
|
560
682
|
try:
|
@@ -876,6 +998,8 @@ async def send_message(
|
|
876
998
|
"google_vertex",
|
877
999
|
"bedrock",
|
878
1000
|
"ollama",
|
1001
|
+
"azure",
|
1002
|
+
"together",
|
879
1003
|
]
|
880
1004
|
|
881
1005
|
# Create a new run for execution tracking
|
@@ -1018,6 +1142,8 @@ async def send_message_streaming(
|
|
1018
1142
|
"google_vertex",
|
1019
1143
|
"bedrock",
|
1020
1144
|
"ollama",
|
1145
|
+
"azure",
|
1146
|
+
"together",
|
1021
1147
|
]
|
1022
1148
|
model_compatible_token_streaming = agent.llm_config.model_endpoint_type in ["anthropic", "openai", "bedrock"]
|
1023
1149
|
|
@@ -1153,10 +1279,14 @@ async def send_message_streaming(
|
|
1153
1279
|
)
|
1154
1280
|
|
1155
1281
|
|
1282
|
+
class CancelAgentRunRequest(BaseModel):
|
1283
|
+
run_ids: list[str] | None = Field(None, description="Optional list of run IDs to cancel")
|
1284
|
+
|
1285
|
+
|
1156
1286
|
@router.post("/{agent_id}/messages/cancel", operation_id="cancel_agent_run")
|
1157
1287
|
async def cancel_agent_run(
|
1158
1288
|
agent_id: str,
|
1159
|
-
|
1289
|
+
request: CancelAgentRunRequest = Body(None),
|
1160
1290
|
server: SyncServer = Depends(get_letta_server),
|
1161
1291
|
actor_id: str | None = Header(None, alias="user_id"),
|
1162
1292
|
) -> dict:
|
@@ -1165,17 +1295,24 @@ async def cancel_agent_run(
|
|
1165
1295
|
|
1166
1296
|
Note to cancel active runs associated with an agent, redis is required.
|
1167
1297
|
"""
|
1168
|
-
|
1169
1298
|
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
1170
1299
|
if not settings.track_agent_run:
|
1171
1300
|
raise HTTPException(status_code=400, detail="Agent run tracking is disabled")
|
1301
|
+
run_ids = request.run_ids if request else None
|
1172
1302
|
if not run_ids:
|
1173
1303
|
redis_client = await get_redis_client()
|
1174
1304
|
run_id = await redis_client.get(f"{REDIS_RUN_ID_PREFIX}:{agent_id}")
|
1175
1305
|
if run_id is None:
|
1176
|
-
logger.warning("Cannot find run associated with agent to cancel.")
|
1177
|
-
|
1178
|
-
|
1306
|
+
logger.warning("Cannot find run associated with agent to cancel in redis, fetching from db.")
|
1307
|
+
job_ids = await server.job_manager.list_jobs_async(
|
1308
|
+
actor=actor,
|
1309
|
+
statuses=[JobStatus.created, JobStatus.running],
|
1310
|
+
job_type=JobType.RUN,
|
1311
|
+
ascending=False,
|
1312
|
+
)
|
1313
|
+
run_ids = [Run.from_job(job).id for job in job_ids]
|
1314
|
+
else:
|
1315
|
+
run_ids = [run_id]
|
1179
1316
|
|
1180
1317
|
results = {}
|
1181
1318
|
for run_id in run_ids:
|
@@ -1400,6 +1537,8 @@ async def preview_raw_payload(
|
|
1400
1537
|
"google_vertex",
|
1401
1538
|
"bedrock",
|
1402
1539
|
"ollama",
|
1540
|
+
"azure",
|
1541
|
+
"together",
|
1403
1542
|
]
|
1404
1543
|
|
1405
1544
|
if agent_eligible and model_compatible:
|
@@ -1468,6 +1607,8 @@ async def summarize_agent_conversation(
|
|
1468
1607
|
"google_vertex",
|
1469
1608
|
"bedrock",
|
1470
1609
|
"ollama",
|
1610
|
+
"azure",
|
1611
|
+
"together",
|
1471
1612
|
]
|
1472
1613
|
|
1473
1614
|
if agent_eligible and model_compatible:
|
@@ -208,6 +208,7 @@ async def upload_file_to_folder(
|
|
208
208
|
file: UploadFile,
|
209
209
|
folder_id: str,
|
210
210
|
duplicate_handling: DuplicateFileHandling = Query(DuplicateFileHandling.SUFFIX, description="How to handle duplicate filenames"),
|
211
|
+
name: Optional[str] = Query(None, description="Optional custom name to override the uploaded file's name"),
|
211
212
|
server: "SyncServer" = Depends(get_letta_server),
|
212
213
|
actor_id: Optional[str] = Header(None, alias="user_id"),
|
213
214
|
):
|
@@ -255,7 +256,8 @@ async def upload_file_to_folder(
|
|
255
256
|
content = await file.read()
|
256
257
|
|
257
258
|
# Store original filename and handle duplicate logic
|
258
|
-
|
259
|
+
# Use custom name if provided, otherwise use the uploaded file's name
|
260
|
+
original_filename = sanitize_filename(name if name else file.filename) # Basic sanitization only
|
259
261
|
|
260
262
|
# Check if duplicate exists
|
261
263
|
existing_file = await server.file_manager.get_file_by_original_name_and_source(
|
@@ -270,18 +272,21 @@ async def upload_file_to_folder(
|
|
270
272
|
)
|
271
273
|
elif duplicate_handling == DuplicateFileHandling.SKIP:
|
272
274
|
# Return existing file metadata with custom header to indicate it was skipped
|
273
|
-
from fastapi import Response
|
274
|
-
|
275
275
|
response = Response(
|
276
276
|
content=existing_file.model_dump_json(), media_type="application/json", headers={"X-Upload-Result": "skipped"}
|
277
277
|
)
|
278
278
|
return response
|
279
|
-
|
279
|
+
elif duplicate_handling == DuplicateFileHandling.REPLACE:
|
280
|
+
# delete the file
|
281
|
+
deleted_file = await server.file_manager.delete_file(file_id=existing_file.id, actor=actor)
|
282
|
+
unique_filename = original_filename
|
280
283
|
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
284
|
+
if not unique_filename:
|
285
|
+
# For SUFFIX, continue to generate unique filename
|
286
|
+
# Generate unique filename (adds suffix if needed)
|
287
|
+
unique_filename = await server.file_manager.generate_unique_filename(
|
288
|
+
original_filename=original_filename, source=folder, organization_id=actor.organization_id
|
289
|
+
)
|
285
290
|
|
286
291
|
# create file metadata
|
287
292
|
file_metadata = FileMetadata(
|
@@ -49,9 +49,13 @@ async def create_provider(
|
|
49
49
|
Create a new custom provider
|
50
50
|
"""
|
51
51
|
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
52
|
+
for field_name in request.model_fields:
|
53
|
+
value = getattr(request, field_name, None)
|
54
|
+
if isinstance(value, str) and value == "":
|
55
|
+
setattr(request, field_name, None)
|
52
56
|
|
53
|
-
|
54
|
-
|
57
|
+
request_data = request.model_dump(exclude_unset=True, exclude_none=True)
|
58
|
+
provider = ProviderCreate(**request_data)
|
55
59
|
provider = await server.provider_manager.create_provider_async(provider, actor=actor)
|
56
60
|
return provider
|
57
61
|
|
@@ -70,12 +74,15 @@ async def modify_provider(
|
|
70
74
|
return await server.provider_manager.update_provider_async(provider_id=provider_id, provider_update=request, actor=actor)
|
71
75
|
|
72
76
|
|
73
|
-
@router.
|
77
|
+
@router.post("/check", response_model=None, operation_id="check_provider")
|
74
78
|
async def check_provider(
|
75
79
|
request: ProviderCheck = Body(...),
|
76
80
|
server: "SyncServer" = Depends(get_letta_server),
|
77
81
|
):
|
78
82
|
try:
|
83
|
+
if request.base_url and len(request.base_url) == 0:
|
84
|
+
# set to null if empty string
|
85
|
+
request.base_url = None
|
79
86
|
await server.provider_manager.check_provider_api_key(provider_check=request)
|
80
87
|
return JSONResponse(
|
81
88
|
status_code=status.HTTP_200_OK, content={"message": f"Valid api key for provider_type={request.provider_type.value}"}
|
@@ -209,6 +209,7 @@ async def upload_file_to_source(
|
|
209
209
|
file: UploadFile,
|
210
210
|
source_id: str,
|
211
211
|
duplicate_handling: DuplicateFileHandling = Query(DuplicateFileHandling.SUFFIX, description="How to handle duplicate filenames"),
|
212
|
+
name: Optional[str] = Query(None, description="Optional custom name to override the uploaded file's name"),
|
212
213
|
server: "SyncServer" = Depends(get_letta_server),
|
213
214
|
actor_id: Optional[str] = Header(None, alias="user_id"),
|
214
215
|
):
|
@@ -256,13 +257,15 @@ async def upload_file_to_source(
|
|
256
257
|
content = await file.read()
|
257
258
|
|
258
259
|
# Store original filename and handle duplicate logic
|
259
|
-
|
260
|
+
# Use custom name if provided, otherwise use the uploaded file's name
|
261
|
+
original_filename = sanitize_filename(name if name else file.filename) # Basic sanitization only
|
260
262
|
|
261
263
|
# Check if duplicate exists
|
262
264
|
existing_file = await server.file_manager.get_file_by_original_name_and_source(
|
263
265
|
original_filename=original_filename, source_id=source_id, actor=actor
|
264
266
|
)
|
265
267
|
|
268
|
+
unique_filename = None
|
266
269
|
if existing_file:
|
267
270
|
# Duplicate found, handle based on strategy
|
268
271
|
if duplicate_handling == DuplicateFileHandling.ERROR:
|
@@ -271,18 +274,21 @@ async def upload_file_to_source(
|
|
271
274
|
)
|
272
275
|
elif duplicate_handling == DuplicateFileHandling.SKIP:
|
273
276
|
# Return existing file metadata with custom header to indicate it was skipped
|
274
|
-
from fastapi import Response
|
275
|
-
|
276
277
|
response = Response(
|
277
278
|
content=existing_file.model_dump_json(), media_type="application/json", headers={"X-Upload-Result": "skipped"}
|
278
279
|
)
|
279
280
|
return response
|
280
|
-
|
281
|
+
elif duplicate_handling == DuplicateFileHandling.REPLACE:
|
282
|
+
# delete the file
|
283
|
+
deleted_file = await server.file_manager.delete_file(file_id=existing_file.id, actor=actor)
|
284
|
+
unique_filename = original_filename
|
281
285
|
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
+
if not unique_filename:
|
287
|
+
# For SUFFIX, continue to generate unique filename
|
288
|
+
# Generate unique filename (adds suffix if needed)
|
289
|
+
unique_filename = await server.file_manager.generate_unique_filename(
|
290
|
+
original_filename=original_filename, source=source, organization_id=actor.organization_id
|
291
|
+
)
|
286
292
|
|
287
293
|
# create file metadata
|
288
294
|
file_metadata = FileMetadata(
|
@@ -5,6 +5,7 @@ from fastapi import APIRouter, Depends, Header, HTTPException, Query
|
|
5
5
|
|
6
6
|
from letta.orm.errors import NoResultFound
|
7
7
|
from letta.schemas.step import Step
|
8
|
+
from letta.schemas.step_metrics import StepMetrics
|
8
9
|
from letta.server.rest_api.utils import get_letta_server
|
9
10
|
from letta.server.server import SyncServer
|
10
11
|
from letta.services.step_manager import FeedbackType
|
@@ -56,7 +57,6 @@ async def list_steps(
|
|
56
57
|
trace_ids=trace_ids,
|
57
58
|
feedback=feedback,
|
58
59
|
has_feedback=has_feedback,
|
59
|
-
tags=tags,
|
60
60
|
project_id=project_id,
|
61
61
|
)
|
62
62
|
|
@@ -77,6 +77,22 @@ async def retrieve_step(
|
|
77
77
|
raise HTTPException(status_code=404, detail="Step not found")
|
78
78
|
|
79
79
|
|
80
|
+
@router.get("/{step_id}/metrics", response_model=StepMetrics, operation_id="retrieve_step_metrics")
|
81
|
+
async def retrieve_step_metrics(
|
82
|
+
step_id: str,
|
83
|
+
actor_id: Optional[str] = Header(None, alias="user_id"),
|
84
|
+
server: SyncServer = Depends(get_letta_server),
|
85
|
+
):
|
86
|
+
"""
|
87
|
+
Get step metrics by step ID.
|
88
|
+
"""
|
89
|
+
try:
|
90
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
91
|
+
return await server.step_manager.get_step_metrics_async(step_id=step_id, actor=actor)
|
92
|
+
except NoResultFound:
|
93
|
+
raise HTTPException(status_code=404, detail="Step metrics not found")
|
94
|
+
|
95
|
+
|
80
96
|
@router.patch("/{step_id}/feedback", response_model=Step, operation_id="add_feedback")
|
81
97
|
async def add_feedback(
|
82
98
|
step_id: str,
|
@@ -486,6 +486,20 @@ async def add_mcp_tool(
|
|
486
486
|
},
|
487
487
|
)
|
488
488
|
|
489
|
+
# Check tool health - reject only INVALID tools
|
490
|
+
if mcp_tool.health:
|
491
|
+
if mcp_tool.health.status == "INVALID":
|
492
|
+
raise HTTPException(
|
493
|
+
status_code=400,
|
494
|
+
detail={
|
495
|
+
"code": "MCPToolSchemaInvalid",
|
496
|
+
"message": f"Tool {mcp_tool_name} has an invalid schema and cannot be attached",
|
497
|
+
"mcp_tool_name": mcp_tool_name,
|
498
|
+
"health_status": mcp_tool.health.status,
|
499
|
+
"reasons": mcp_tool.health.reasons,
|
500
|
+
},
|
501
|
+
)
|
502
|
+
|
489
503
|
tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=mcp_tool)
|
490
504
|
# For config-based servers, use the server name as ID since they don't have database IDs
|
491
505
|
mcp_server_id = mcp_server_name
|
@@ -608,7 +622,7 @@ async def delete_mcp_server_from_config(
|
|
608
622
|
actor_id: Optional[str] = Header(None, alias="user_id"),
|
609
623
|
):
|
610
624
|
"""
|
611
|
-
|
625
|
+
Delete a MCP server configuration
|
612
626
|
"""
|
613
627
|
if tool_settings.mcp_read_from_config:
|
614
628
|
# write to config file
|
@@ -774,7 +788,8 @@ async def connect_mcp_server(
|
|
774
788
|
|
775
789
|
|
776
790
|
class CodeInput(BaseModel):
|
777
|
-
code: str = Field(..., description="
|
791
|
+
code: str = Field(..., description="Source code to parse for JSON schema")
|
792
|
+
source_type: Optional[str] = Field("python", description="The source type of the code (python or typescript)")
|
778
793
|
|
779
794
|
|
780
795
|
@router.post("/generate-schema", response_model=Dict[str, Any], operation_id="generate_json_schema")
|
@@ -784,16 +799,90 @@ async def generate_json_schema(
|
|
784
799
|
actor_id: Optional[str] = Header(None, alias="user_id"),
|
785
800
|
):
|
786
801
|
"""
|
787
|
-
Generate a JSON schema from the given
|
802
|
+
Generate a JSON schema from the given source code defining a function or class.
|
803
|
+
Supports both Python and TypeScript source code.
|
788
804
|
"""
|
789
805
|
try:
|
790
|
-
|
806
|
+
if request.source_type == "typescript":
|
807
|
+
from letta.functions.typescript_parser import derive_typescript_json_schema
|
808
|
+
|
809
|
+
schema = derive_typescript_json_schema(source_code=request.code)
|
810
|
+
else:
|
811
|
+
# Default to Python for backwards compatibility
|
812
|
+
schema = derive_openai_json_schema(source_code=request.code)
|
791
813
|
return schema
|
792
814
|
|
793
815
|
except Exception as e:
|
794
816
|
raise HTTPException(status_code=400, detail=f"Failed to generate schema: {str(e)}")
|
795
817
|
|
796
818
|
|
819
|
+
# TODO: @jnjpng move this and other models above to appropriate file for schemas
|
820
|
+
class MCPToolExecuteRequest(BaseModel):
|
821
|
+
args: Dict[str, Any] = Field(default_factory=dict, description="Arguments to pass to the MCP tool")
|
822
|
+
|
823
|
+
|
824
|
+
@router.post("/mcp/servers/{mcp_server_name}/tools/{tool_name}/execute", operation_id="execute_mcp_tool")
|
825
|
+
async def execute_mcp_tool(
|
826
|
+
mcp_server_name: str,
|
827
|
+
tool_name: str,
|
828
|
+
request: MCPToolExecuteRequest = Body(...),
|
829
|
+
server: SyncServer = Depends(get_letta_server),
|
830
|
+
actor_id: Optional[str] = Header(None, alias="user_id"),
|
831
|
+
):
|
832
|
+
"""
|
833
|
+
Execute a specific MCP tool from a configured server.
|
834
|
+
Returns the tool execution result.
|
835
|
+
"""
|
836
|
+
client = None
|
837
|
+
try:
|
838
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
839
|
+
|
840
|
+
# Get the MCP server by name
|
841
|
+
mcp_server = await server.mcp_manager.get_mcp_server(mcp_server_name, actor)
|
842
|
+
if not mcp_server:
|
843
|
+
raise HTTPException(
|
844
|
+
status_code=404,
|
845
|
+
detail={
|
846
|
+
"code": "MCPServerNotFound",
|
847
|
+
"message": f"MCP server '{mcp_server_name}' not found",
|
848
|
+
"server_name": mcp_server_name,
|
849
|
+
},
|
850
|
+
)
|
851
|
+
|
852
|
+
# Create client and connect
|
853
|
+
server_config = mcp_server.to_config()
|
854
|
+
server_config.resolve_environment_variables()
|
855
|
+
client = await server.mcp_manager.get_mcp_client(server_config, actor)
|
856
|
+
await client.connect_to_server()
|
857
|
+
|
858
|
+
# Execute the tool
|
859
|
+
result, success = await client.execute_tool(tool_name, request.args)
|
860
|
+
|
861
|
+
return {
|
862
|
+
"result": result,
|
863
|
+
"success": success,
|
864
|
+
}
|
865
|
+
except HTTPException:
|
866
|
+
raise
|
867
|
+
except Exception as e:
|
868
|
+
logger.warning(f"Error executing MCP tool: {str(e)}")
|
869
|
+
raise HTTPException(
|
870
|
+
status_code=500,
|
871
|
+
detail={
|
872
|
+
"code": "MCPToolExecutionError",
|
873
|
+
"message": f"Failed to execute MCP tool: {str(e)}",
|
874
|
+
"server_name": mcp_server_name,
|
875
|
+
"tool_name": tool_name,
|
876
|
+
},
|
877
|
+
)
|
878
|
+
finally:
|
879
|
+
if client:
|
880
|
+
try:
|
881
|
+
await client.cleanup()
|
882
|
+
except Exception as cleanup_error:
|
883
|
+
logger.warning(f"Error during MCP client cleanup: {cleanup_error}")
|
884
|
+
|
885
|
+
|
797
886
|
# TODO: @jnjpng need to route this through cloud API for production
|
798
887
|
@router.get("/mcp/oauth/callback/{session_id}", operation_id="mcp_oauth_callback")
|
799
888
|
async def mcp_oauth_callback(
|
@@ -855,6 +944,8 @@ async def generate_tool_from_prompt(
|
|
855
944
|
"""
|
856
945
|
Generate a tool from the given user prompt.
|
857
946
|
"""
|
947
|
+
response_data = None
|
948
|
+
|
858
949
|
try:
|
859
950
|
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
860
951
|
llm_config = await server.get_cached_llm_config_async(actor=actor, handle=request.handle or "anthropic/claude-3-5-sonnet-20240620")
|
@@ -917,5 +1008,5 @@ async def generate_tool_from_prompt(
|
|
917
1008
|
response=response.choices[0].message.content,
|
918
1009
|
)
|
919
1010
|
except Exception as e:
|
920
|
-
logger.error(f"Failed to generate tool: {str(e)}")
|
1011
|
+
logger.error(f"Failed to generate tool: {str(e)}. Raw response: {response_data}")
|
921
1012
|
raise HTTPException(status_code=500, detail=f"Failed to generate tool: {str(e)}")
|