letta-nightly 0.12.1.dev20251024104217__py3-none-any.whl → 0.13.0.dev20251025104015__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +2 -3
- letta/adapters/letta_llm_adapter.py +1 -0
- letta/adapters/simple_llm_request_adapter.py +8 -5
- letta/adapters/simple_llm_stream_adapter.py +22 -6
- letta/agents/agent_loop.py +10 -3
- letta/agents/base_agent.py +4 -1
- letta/agents/helpers.py +41 -9
- letta/agents/letta_agent.py +11 -10
- letta/agents/letta_agent_v2.py +47 -37
- letta/agents/letta_agent_v3.py +395 -300
- letta/agents/voice_agent.py +8 -6
- letta/agents/voice_sleeptime_agent.py +3 -3
- letta/constants.py +30 -7
- letta/errors.py +20 -0
- letta/functions/function_sets/base.py +55 -3
- letta/functions/mcp_client/types.py +33 -57
- letta/functions/schema_generator.py +135 -23
- letta/groups/sleeptime_multi_agent_v3.py +6 -11
- letta/groups/sleeptime_multi_agent_v4.py +227 -0
- letta/helpers/converters.py +78 -4
- letta/helpers/crypto_utils.py +6 -2
- letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py +9 -11
- letta/interfaces/anthropic_streaming_interface.py +3 -4
- letta/interfaces/gemini_streaming_interface.py +4 -6
- letta/interfaces/openai_streaming_interface.py +63 -28
- letta/llm_api/anthropic_client.py +7 -4
- letta/llm_api/deepseek_client.py +6 -4
- letta/llm_api/google_ai_client.py +3 -12
- letta/llm_api/google_vertex_client.py +1 -1
- letta/llm_api/helpers.py +90 -61
- letta/llm_api/llm_api_tools.py +4 -1
- letta/llm_api/openai.py +12 -12
- letta/llm_api/openai_client.py +53 -16
- letta/local_llm/constants.py +4 -3
- letta/local_llm/json_parser.py +5 -2
- letta/local_llm/utils.py +2 -3
- letta/log.py +171 -7
- letta/orm/agent.py +43 -9
- letta/orm/archive.py +4 -0
- letta/orm/custom_columns.py +15 -0
- letta/orm/identity.py +11 -11
- letta/orm/mcp_server.py +9 -0
- letta/orm/message.py +6 -1
- letta/orm/run_metrics.py +7 -2
- letta/orm/sqlalchemy_base.py +2 -2
- letta/orm/tool.py +3 -0
- letta/otel/tracing.py +2 -0
- letta/prompts/prompt_generator.py +7 -2
- letta/schemas/agent.py +41 -10
- letta/schemas/agent_file.py +3 -0
- letta/schemas/archive.py +4 -2
- letta/schemas/block.py +2 -1
- letta/schemas/enums.py +36 -3
- letta/schemas/file.py +3 -3
- letta/schemas/folder.py +2 -1
- letta/schemas/group.py +2 -1
- letta/schemas/identity.py +18 -9
- letta/schemas/job.py +3 -1
- letta/schemas/letta_message.py +71 -12
- letta/schemas/letta_request.py +7 -3
- letta/schemas/letta_stop_reason.py +0 -25
- letta/schemas/llm_config.py +8 -2
- letta/schemas/mcp.py +80 -83
- letta/schemas/mcp_server.py +349 -0
- letta/schemas/memory.py +20 -8
- letta/schemas/message.py +212 -67
- letta/schemas/providers/anthropic.py +13 -6
- letta/schemas/providers/azure.py +6 -4
- letta/schemas/providers/base.py +8 -4
- letta/schemas/providers/bedrock.py +6 -2
- letta/schemas/providers/cerebras.py +7 -3
- letta/schemas/providers/deepseek.py +2 -1
- letta/schemas/providers/google_gemini.py +15 -6
- letta/schemas/providers/groq.py +2 -1
- letta/schemas/providers/lmstudio.py +9 -6
- letta/schemas/providers/mistral.py +2 -1
- letta/schemas/providers/openai.py +7 -2
- letta/schemas/providers/together.py +9 -3
- letta/schemas/providers/xai.py +7 -3
- letta/schemas/run.py +7 -2
- letta/schemas/run_metrics.py +2 -1
- letta/schemas/sandbox_config.py +2 -2
- letta/schemas/secret.py +3 -158
- letta/schemas/source.py +2 -2
- letta/schemas/step.py +2 -2
- letta/schemas/tool.py +24 -1
- letta/schemas/usage.py +0 -1
- letta/server/rest_api/app.py +123 -7
- letta/server/rest_api/dependencies.py +3 -0
- letta/server/rest_api/interface.py +7 -4
- letta/server/rest_api/redis_stream_manager.py +16 -1
- letta/server/rest_api/routers/v1/__init__.py +7 -0
- letta/server/rest_api/routers/v1/agents.py +332 -322
- letta/server/rest_api/routers/v1/archives.py +127 -40
- letta/server/rest_api/routers/v1/blocks.py +54 -6
- letta/server/rest_api/routers/v1/chat_completions.py +146 -0
- letta/server/rest_api/routers/v1/folders.py +27 -35
- letta/server/rest_api/routers/v1/groups.py +23 -35
- letta/server/rest_api/routers/v1/identities.py +24 -10
- letta/server/rest_api/routers/v1/internal_runs.py +107 -0
- letta/server/rest_api/routers/v1/internal_templates.py +162 -179
- letta/server/rest_api/routers/v1/jobs.py +15 -27
- letta/server/rest_api/routers/v1/mcp_servers.py +309 -0
- letta/server/rest_api/routers/v1/messages.py +23 -34
- letta/server/rest_api/routers/v1/organizations.py +6 -27
- letta/server/rest_api/routers/v1/providers.py +35 -62
- letta/server/rest_api/routers/v1/runs.py +30 -43
- letta/server/rest_api/routers/v1/sandbox_configs.py +6 -4
- letta/server/rest_api/routers/v1/sources.py +26 -42
- letta/server/rest_api/routers/v1/steps.py +16 -29
- letta/server/rest_api/routers/v1/tools.py +17 -13
- letta/server/rest_api/routers/v1/users.py +5 -17
- letta/server/rest_api/routers/v1/voice.py +18 -27
- letta/server/rest_api/streaming_response.py +5 -2
- letta/server/rest_api/utils.py +187 -25
- letta/server/server.py +27 -22
- letta/server/ws_api/server.py +5 -4
- letta/services/agent_manager.py +148 -26
- letta/services/agent_serialization_manager.py +6 -1
- letta/services/archive_manager.py +168 -15
- letta/services/block_manager.py +14 -4
- letta/services/file_manager.py +33 -29
- letta/services/group_manager.py +10 -0
- letta/services/helpers/agent_manager_helper.py +65 -11
- letta/services/identity_manager.py +105 -4
- letta/services/job_manager.py +11 -1
- letta/services/mcp/base_client.py +2 -2
- letta/services/mcp/oauth_utils.py +33 -8
- letta/services/mcp_manager.py +174 -78
- letta/services/mcp_server_manager.py +1331 -0
- letta/services/message_manager.py +109 -4
- letta/services/organization_manager.py +4 -4
- letta/services/passage_manager.py +9 -25
- letta/services/provider_manager.py +91 -15
- letta/services/run_manager.py +72 -15
- letta/services/sandbox_config_manager.py +45 -3
- letta/services/source_manager.py +15 -8
- letta/services/step_manager.py +24 -1
- letta/services/streaming_service.py +581 -0
- letta/services/summarizer/summarizer.py +1 -1
- letta/services/tool_executor/core_tool_executor.py +111 -0
- letta/services/tool_executor/files_tool_executor.py +5 -3
- letta/services/tool_executor/sandbox_tool_executor.py +2 -2
- letta/services/tool_executor/tool_execution_manager.py +1 -1
- letta/services/tool_manager.py +10 -3
- letta/services/tool_sandbox/base.py +61 -1
- letta/services/tool_sandbox/local_sandbox.py +1 -3
- letta/services/user_manager.py +2 -2
- letta/settings.py +49 -5
- letta/system.py +14 -5
- letta/utils.py +73 -1
- letta/validators.py +105 -0
- {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251025104015.dist-info}/METADATA +4 -2
- {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251025104015.dist-info}/RECORD +157 -151
- letta/schemas/letta_ping.py +0 -28
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251025104015.dist-info}/WHEEL +0 -0
- {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251025104015.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251025104015.dist-info}/licenses/LICENSE +0 -0
|
@@ -7,11 +7,10 @@ from sqlalchemy import delete, exists, func, select, text
|
|
|
7
7
|
|
|
8
8
|
from letta.constants import CONVERSATION_SEARCH_TOOL_NAME, DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
|
|
9
9
|
from letta.log import get_logger
|
|
10
|
-
from letta.orm.agent import Agent as AgentModel
|
|
11
10
|
from letta.orm.errors import NoResultFound
|
|
12
11
|
from letta.orm.message import Message as MessageModel
|
|
13
12
|
from letta.otel.tracing import trace_method
|
|
14
|
-
from letta.schemas.enums import MessageRole
|
|
13
|
+
from letta.schemas.enums import MessageRole, PrimitiveType
|
|
15
14
|
from letta.schemas.letta_message import LettaMessageUpdateUnion
|
|
16
15
|
from letta.schemas.letta_message_content import ImageSourceType, LettaImage, MessageContentType, TextContent
|
|
17
16
|
from letta.schemas.message import Message as PydanticMessage, MessageSearchResult, MessageUpdate
|
|
@@ -21,10 +20,104 @@ from letta.services.file_manager import FileManager
|
|
|
21
20
|
from letta.services.helpers.agent_manager_helper import validate_agent_exists_async
|
|
22
21
|
from letta.settings import DatabaseChoice, settings
|
|
23
22
|
from letta.utils import enforce_types, fire_and_forget
|
|
23
|
+
from letta.validators import raise_on_invalid_id
|
|
24
24
|
|
|
25
25
|
logger = get_logger(__name__)
|
|
26
26
|
|
|
27
27
|
|
|
28
|
+
@trace_method
|
|
29
|
+
def backfill_missing_tool_call_ids(messages: list, agent_id: Optional[str] = None, actor: Optional[PydanticUser] = None) -> list:
|
|
30
|
+
"""Backfill missing tool_call_id values in tool messages from historical bug (oct 1-6, 2025)
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
messages: List of messages to backfill
|
|
34
|
+
agent_id: Optional agent ID for logging
|
|
35
|
+
actor: Optional actor information for logging
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
List of messages with tool_call_ids backfilled where appropriate
|
|
39
|
+
"""
|
|
40
|
+
if not messages:
|
|
41
|
+
return messages
|
|
42
|
+
|
|
43
|
+
from letta.schemas.message import Message as PydanticMessage
|
|
44
|
+
|
|
45
|
+
# Check if messages are ordered chronologically (oldest first)
|
|
46
|
+
# If not, reverse the list to ensure proper chronological order
|
|
47
|
+
was_reversed = False
|
|
48
|
+
if len(messages) > 1:
|
|
49
|
+
first_msg = messages[0]
|
|
50
|
+
last_msg = messages[-1]
|
|
51
|
+
|
|
52
|
+
# Only check PydanticMessage objects that have created_at
|
|
53
|
+
if (
|
|
54
|
+
isinstance(first_msg, PydanticMessage)
|
|
55
|
+
and isinstance(last_msg, PydanticMessage)
|
|
56
|
+
and hasattr(first_msg, "created_at")
|
|
57
|
+
and hasattr(last_msg, "created_at")
|
|
58
|
+
):
|
|
59
|
+
# If first message is newer than last message, list is reversed
|
|
60
|
+
if first_msg.created_at > last_msg.created_at:
|
|
61
|
+
was_reversed = True
|
|
62
|
+
messages.reverse()
|
|
63
|
+
|
|
64
|
+
updated_messages = []
|
|
65
|
+
last_tool_call_id = None
|
|
66
|
+
backfilled_count = 0
|
|
67
|
+
|
|
68
|
+
for i, message in enumerate(messages):
|
|
69
|
+
if not isinstance(message, PydanticMessage):
|
|
70
|
+
updated_messages.append(message)
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
# check if assistant message has a single tool call to track
|
|
74
|
+
if message.role == MessageRole.assistant and message.tool_calls:
|
|
75
|
+
if len(message.tool_calls) == 1 and message.tool_calls[0].id:
|
|
76
|
+
last_tool_call_id = message.tool_calls[0].id
|
|
77
|
+
else:
|
|
78
|
+
# parallel tool calls or missing id - don't backfill
|
|
79
|
+
last_tool_call_id = None
|
|
80
|
+
|
|
81
|
+
# check if tool message needs backfilling
|
|
82
|
+
elif message.role == MessageRole.tool:
|
|
83
|
+
needs_update = False
|
|
84
|
+
|
|
85
|
+
# only backfill if we have a single tool return and a preceding tool call id
|
|
86
|
+
if message.tool_returns and len(message.tool_returns) == 1 and last_tool_call_id is not None:
|
|
87
|
+
# check and update message.tool_call_id
|
|
88
|
+
if message.tool_call_id is None:
|
|
89
|
+
message.tool_call_id = last_tool_call_id
|
|
90
|
+
needs_update = True
|
|
91
|
+
|
|
92
|
+
# check and update tool_return.tool_call_id
|
|
93
|
+
tool_return = message.tool_returns[0]
|
|
94
|
+
if tool_return.tool_call_id is None:
|
|
95
|
+
tool_return.tool_call_id = last_tool_call_id
|
|
96
|
+
needs_update = True
|
|
97
|
+
|
|
98
|
+
if needs_update:
|
|
99
|
+
backfilled_count += 1
|
|
100
|
+
logger.debug(f"Backfilled tool_call_id '{last_tool_call_id}' for message {i} (id={message.id})")
|
|
101
|
+
|
|
102
|
+
# clear last_tool_call_id after processing tool message
|
|
103
|
+
last_tool_call_id = None
|
|
104
|
+
|
|
105
|
+
updated_messages.append(message)
|
|
106
|
+
|
|
107
|
+
# log warning with context if any backfilling occurred
|
|
108
|
+
if backfilled_count > 0:
|
|
109
|
+
actor_info = f"actor_id={actor.id}" if actor else "actor=unknown"
|
|
110
|
+
agent_info = f"agent_id={agent_id}" if agent_id else "agent=unknown"
|
|
111
|
+
logger.warning(
|
|
112
|
+
f"Backfilled {backfilled_count} missing tool_call_ids for historical messages (oct 1-6, 2025 bug) - {agent_info}, {actor_info}"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
if was_reversed:
|
|
116
|
+
updated_messages.reverse()
|
|
117
|
+
|
|
118
|
+
return updated_messages
|
|
119
|
+
|
|
120
|
+
|
|
28
121
|
class MessageManager:
|
|
29
122
|
"""Manager class to handle business logic related to Messages."""
|
|
30
123
|
|
|
@@ -216,6 +309,7 @@ class MessageManager:
|
|
|
216
309
|
|
|
217
310
|
@enforce_types
|
|
218
311
|
@trace_method
|
|
312
|
+
@raise_on_invalid_id(param_name="message_id", expected_prefix=PrimitiveType.MESSAGE)
|
|
219
313
|
async def get_message_by_id_async(self, message_id: str, actor: PydanticUser) -> Optional[PydanticMessage]:
|
|
220
314
|
"""Fetch a message by ID."""
|
|
221
315
|
async with db_registry.async_session() as session:
|
|
@@ -244,7 +338,14 @@ class MessageManager:
|
|
|
244
338
|
)
|
|
245
339
|
# Sort results directly based on message_ids
|
|
246
340
|
result_dict = {msg.id: msg.to_pydantic() for msg in results}
|
|
247
|
-
|
|
341
|
+
messages = list(filter(lambda x: x is not None, [result_dict.get(msg_id, None) for msg_id in message_ids]))
|
|
342
|
+
|
|
343
|
+
# backfill missing tool_call_ids from historical bug (oct 1-6, 2025)
|
|
344
|
+
# Note: we don't have agent_id or actor here, but that's OK for logging
|
|
345
|
+
# TODO: This can cause bugs technically, if we adversarially craft a series of message_ids that are not contiguous
|
|
346
|
+
# TODO: But usually, this is being used by the agent loop code to get the in context messages, which are contiguous
|
|
347
|
+
# TODO: We should remove this as soon as possible, need to inspect for the above log message, if it hasn't happened in a while
|
|
348
|
+
return backfill_missing_tool_call_ids(messages)
|
|
248
349
|
|
|
249
350
|
def _create_many_preprocess(self, pydantic_msgs: List[PydanticMessage], actor: PydanticUser) -> List[MessageModel]:
|
|
250
351
|
# Create ORM model instances for all messages
|
|
@@ -613,6 +714,7 @@ class MessageManager:
|
|
|
613
714
|
|
|
614
715
|
@enforce_types
|
|
615
716
|
@trace_method
|
|
717
|
+
@raise_on_invalid_id(param_name="message_id", expected_prefix=PrimitiveType.MESSAGE)
|
|
616
718
|
async def delete_message_by_id_async(self, message_id: str, actor: PydanticUser, strict_mode: bool = False) -> bool:
|
|
617
719
|
"""Delete a message (async version with turbopuffer support)."""
|
|
618
720
|
# capture agent_id before deletion
|
|
@@ -803,7 +905,10 @@ class MessageManager:
|
|
|
803
905
|
# Execute and convert each Message to its Pydantic representation.
|
|
804
906
|
result = await session.execute(query)
|
|
805
907
|
results = result.scalars().all()
|
|
806
|
-
|
|
908
|
+
messages = [msg.to_pydantic() for msg in results]
|
|
909
|
+
|
|
910
|
+
# backfill missing tool_call_ids from historical bug (oct 1-6, 2025)
|
|
911
|
+
return backfill_missing_tool_call_ids(messages, agent_id=agent_id, actor=actor)
|
|
807
912
|
|
|
808
913
|
@enforce_types
|
|
809
914
|
@trace_method
|
|
@@ -20,8 +20,8 @@ class OrganizationManager:
|
|
|
20
20
|
|
|
21
21
|
@enforce_types
|
|
22
22
|
@trace_method
|
|
23
|
-
async def get_organization_by_id_async(self, org_id: str) ->
|
|
24
|
-
"""Fetch an organization by ID."""
|
|
23
|
+
async def get_organization_by_id_async(self, org_id: str) -> PydanticOrganization:
|
|
24
|
+
"""Fetch an organization by ID. Raises NoResultFound if not found."""
|
|
25
25
|
async with db_registry.async_session() as session:
|
|
26
26
|
organization = await OrganizationModel.read_async(db_session=session, identifier=org_id)
|
|
27
27
|
return organization.to_pydantic()
|
|
@@ -64,7 +64,7 @@ class OrganizationManager:
|
|
|
64
64
|
@enforce_types
|
|
65
65
|
@trace_method
|
|
66
66
|
async def update_organization_async(self, org_id: str, org_update: OrganizationUpdate) -> PydanticOrganization:
|
|
67
|
-
"""Update an organization."""
|
|
67
|
+
"""Update an organization. Raises NoResultFound if not found."""
|
|
68
68
|
async with db_registry.async_session() as session:
|
|
69
69
|
org = await OrganizationModel.read_async(db_session=session, identifier=org_id)
|
|
70
70
|
if org_update.name:
|
|
@@ -77,7 +77,7 @@ class OrganizationManager:
|
|
|
77
77
|
@enforce_types
|
|
78
78
|
@trace_method
|
|
79
79
|
async def delete_organization_by_id_async(self, org_id: str):
|
|
80
|
-
"""Delete an organization by marking it as deleted."""
|
|
80
|
+
"""Delete an organization by marking it as deleted. Raises NoResultFound if not found."""
|
|
81
81
|
async with db_registry.async_session() as session:
|
|
82
82
|
organization = await OrganizationModel.read_async(db_session=session, identifier=org_id)
|
|
83
83
|
await organization.hard_delete_async(session)
|
|
@@ -120,12 +120,8 @@ class PassageManager:
|
|
|
120
120
|
@trace_method
|
|
121
121
|
async def get_passage_by_id_async(self, passage_id: str, actor: PydanticUser) -> Optional[PydanticPassage]:
|
|
122
122
|
"""DEPRECATED: Use get_agent_passage_by_id_async() or get_source_passage_by_id_async() instead."""
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
warnings.warn(
|
|
126
|
-
"get_passage_by_id_async is deprecated. Use get_agent_passage_by_id_async() or get_source_passage_by_id_async() instead.",
|
|
127
|
-
DeprecationWarning,
|
|
128
|
-
stacklevel=2,
|
|
123
|
+
logger.warning(
|
|
124
|
+
"get_passage_by_id_async is deprecated. Use get_agent_passage_by_id_async() or get_source_passage_by_id_async() instead."
|
|
129
125
|
)
|
|
130
126
|
|
|
131
127
|
async with db_registry.async_session() as session:
|
|
@@ -231,13 +227,7 @@ class PassageManager:
|
|
|
231
227
|
@trace_method
|
|
232
228
|
async def create_passage_async(self, pydantic_passage: PydanticPassage, actor: PydanticUser) -> PydanticPassage:
|
|
233
229
|
"""DEPRECATED: Use create_agent_passage_async() or create_source_passage_async() instead."""
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
warnings.warn(
|
|
237
|
-
"create_passage_async is deprecated. Use create_agent_passage_async() or create_source_passage_async() instead.",
|
|
238
|
-
DeprecationWarning,
|
|
239
|
-
stacklevel=2,
|
|
240
|
-
)
|
|
230
|
+
logger.warning("create_passage_async is deprecated. Use create_agent_passage_async() or create_source_passage_async() instead.")
|
|
241
231
|
|
|
242
232
|
# Common fields for both passage types
|
|
243
233
|
passage = self._preprocess_passage_for_creation(pydantic_passage=pydantic_passage)
|
|
@@ -365,9 +355,8 @@ class PassageManager:
|
|
|
365
355
|
"""DEPRECATED: Use create_many_agent_passages() or create_many_source_passages() instead."""
|
|
366
356
|
import warnings
|
|
367
357
|
|
|
368
|
-
|
|
358
|
+
logger.warning(
|
|
369
359
|
"create_many_passages is deprecated. Use create_many_agent_passages() or create_many_source_passages() instead.",
|
|
370
|
-
DeprecationWarning,
|
|
371
360
|
stacklevel=2,
|
|
372
361
|
)
|
|
373
362
|
return [self.create_passage(p, actor) for p in passages]
|
|
@@ -378,9 +367,8 @@ class PassageManager:
|
|
|
378
367
|
"""DEPRECATED: Use create_many_agent_passages_async() or create_many_source_passages_async() instead."""
|
|
379
368
|
import warnings
|
|
380
369
|
|
|
381
|
-
|
|
370
|
+
logger.warning(
|
|
382
371
|
"create_many_passages_async is deprecated. Use create_many_agent_passages_async() or create_many_source_passages_async() instead.",
|
|
383
|
-
DeprecationWarning,
|
|
384
372
|
stacklevel=2,
|
|
385
373
|
)
|
|
386
374
|
|
|
@@ -437,9 +425,7 @@ class PassageManager:
|
|
|
437
425
|
)
|
|
438
426
|
|
|
439
427
|
# Get or create the default archive for the agent
|
|
440
|
-
archive = await self.archive_manager.get_or_create_default_archive_for_agent_async(
|
|
441
|
-
agent_id=agent_state.id, agent_name=agent_state.name, actor=actor
|
|
442
|
-
)
|
|
428
|
+
archive = await self.archive_manager.get_or_create_default_archive_for_agent_async(agent_state=agent_state, actor=actor)
|
|
443
429
|
|
|
444
430
|
text_chunks = list(parse_and_chunk_text(text, embedding_chunk_size))
|
|
445
431
|
|
|
@@ -653,9 +639,8 @@ class PassageManager:
|
|
|
653
639
|
"""DEPRECATED: Use delete_agent_passage_by_id_async() or delete_source_passage_by_id_async() instead."""
|
|
654
640
|
import warnings
|
|
655
641
|
|
|
656
|
-
|
|
642
|
+
logger.warning(
|
|
657
643
|
"delete_passage_by_id_async is deprecated. Use delete_agent_passage_by_id_async() or delete_source_passage_by_id_async() instead.",
|
|
658
|
-
DeprecationWarning,
|
|
659
644
|
stacklevel=2,
|
|
660
645
|
)
|
|
661
646
|
|
|
@@ -767,9 +752,8 @@ class PassageManager:
|
|
|
767
752
|
"""DEPRECATED: Use delete_agent_passages() or delete_source_passages() instead."""
|
|
768
753
|
import warnings
|
|
769
754
|
|
|
770
|
-
|
|
755
|
+
logger.warning(
|
|
771
756
|
"delete_passages is deprecated. Use delete_agent_passages() or delete_source_passages() instead.",
|
|
772
|
-
DeprecationWarning,
|
|
773
757
|
stacklevel=2,
|
|
774
758
|
)
|
|
775
759
|
# TODO: This is very inefficient
|
|
@@ -789,7 +773,7 @@ class PassageManager:
|
|
|
789
773
|
"""DEPRECATED: Use agent_passage_size() instead (this only counted agent passages anyway)."""
|
|
790
774
|
import warnings
|
|
791
775
|
|
|
792
|
-
|
|
776
|
+
logger.warning("size is deprecated. Use agent_passage_size() instead.", stacklevel=2)
|
|
793
777
|
return self.agent_passage_size(actor=actor, agent_id=agent_id)
|
|
794
778
|
|
|
795
779
|
@enforce_types
|
|
@@ -2,11 +2,13 @@ from typing import List, Optional, Tuple, Union
|
|
|
2
2
|
|
|
3
3
|
from letta.orm.provider import Provider as ProviderModel
|
|
4
4
|
from letta.otel.tracing import trace_method
|
|
5
|
-
from letta.schemas.enums import ProviderCategory, ProviderType
|
|
5
|
+
from letta.schemas.enums import PrimitiveType, ProviderCategory, ProviderType
|
|
6
6
|
from letta.schemas.providers import Provider as PydanticProvider, ProviderCheck, ProviderCreate, ProviderUpdate
|
|
7
|
+
from letta.schemas.secret import Secret
|
|
7
8
|
from letta.schemas.user import User as PydanticUser
|
|
8
9
|
from letta.server.db import db_registry
|
|
9
10
|
from letta.utils import enforce_types
|
|
11
|
+
from letta.validators import raise_on_invalid_id
|
|
10
12
|
|
|
11
13
|
|
|
12
14
|
class ProviderManager:
|
|
@@ -27,12 +29,19 @@ class ProviderManager:
|
|
|
27
29
|
# Lazily create the provider id prior to persistence
|
|
28
30
|
provider.resolve_identifier()
|
|
29
31
|
|
|
32
|
+
# Explicitly populate encrypted fields from plaintext
|
|
33
|
+
if provider.api_key is not None:
|
|
34
|
+
provider.api_key_enc = Secret.from_plaintext(provider.api_key)
|
|
35
|
+
if provider.access_key is not None:
|
|
36
|
+
provider.access_key_enc = Secret.from_plaintext(provider.access_key)
|
|
37
|
+
|
|
30
38
|
new_provider = ProviderModel(**provider.model_dump(to_orm=True, exclude_unset=True))
|
|
31
39
|
await new_provider.create_async(session, actor=actor)
|
|
32
40
|
return new_provider.to_pydantic()
|
|
33
41
|
|
|
34
42
|
@enforce_types
|
|
35
43
|
@trace_method
|
|
44
|
+
@raise_on_invalid_id(param_name="provider_id", expected_prefix=PrimitiveType.PROVIDER)
|
|
36
45
|
async def update_provider_async(self, provider_id: str, provider_update: ProviderUpdate, actor: PydanticUser) -> PydanticProvider:
|
|
37
46
|
"""Update provider details."""
|
|
38
47
|
async with db_registry.async_session() as session:
|
|
@@ -43,6 +52,50 @@ class ProviderManager:
|
|
|
43
52
|
|
|
44
53
|
# Update only the fields that are provided in ProviderUpdate
|
|
45
54
|
update_data = provider_update.model_dump(to_orm=True, exclude_unset=True, exclude_none=True)
|
|
55
|
+
|
|
56
|
+
# Handle encryption for api_key if provided
|
|
57
|
+
# Only re-encrypt if the value has actually changed
|
|
58
|
+
if "api_key" in update_data and update_data["api_key"] is not None:
|
|
59
|
+
# Check if value changed
|
|
60
|
+
existing_api_key = None
|
|
61
|
+
if existing_provider.api_key_enc:
|
|
62
|
+
existing_secret = Secret.from_encrypted(existing_provider.api_key_enc)
|
|
63
|
+
existing_api_key = existing_secret.get_plaintext()
|
|
64
|
+
elif existing_provider.api_key:
|
|
65
|
+
existing_api_key = existing_provider.api_key
|
|
66
|
+
|
|
67
|
+
# Only re-encrypt if different
|
|
68
|
+
if existing_api_key != update_data["api_key"]:
|
|
69
|
+
existing_provider.api_key_enc = Secret.from_plaintext(update_data["api_key"]).get_encrypted()
|
|
70
|
+
# Keep plaintext for dual-write during migration
|
|
71
|
+
existing_provider.api_key = update_data["api_key"]
|
|
72
|
+
|
|
73
|
+
# Remove from update_data since we set directly on existing_provider
|
|
74
|
+
update_data.pop("api_key", None)
|
|
75
|
+
update_data.pop("api_key_enc", None)
|
|
76
|
+
|
|
77
|
+
# Handle encryption for access_key if provided
|
|
78
|
+
# Only re-encrypt if the value has actually changed
|
|
79
|
+
if "access_key" in update_data and update_data["access_key"] is not None:
|
|
80
|
+
# Check if value changed
|
|
81
|
+
existing_access_key = None
|
|
82
|
+
if existing_provider.access_key_enc:
|
|
83
|
+
existing_secret = Secret.from_encrypted(existing_provider.access_key_enc)
|
|
84
|
+
existing_access_key = existing_secret.get_plaintext()
|
|
85
|
+
elif existing_provider.access_key:
|
|
86
|
+
existing_access_key = existing_provider.access_key
|
|
87
|
+
|
|
88
|
+
# Only re-encrypt if different
|
|
89
|
+
if existing_access_key != update_data["access_key"]:
|
|
90
|
+
existing_provider.access_key_enc = Secret.from_plaintext(update_data["access_key"]).get_encrypted()
|
|
91
|
+
# Keep plaintext for dual-write during migration
|
|
92
|
+
existing_provider.access_key = update_data["access_key"]
|
|
93
|
+
|
|
94
|
+
# Remove from update_data since we set directly on existing_provider
|
|
95
|
+
update_data.pop("access_key", None)
|
|
96
|
+
update_data.pop("access_key_enc", None)
|
|
97
|
+
|
|
98
|
+
# Apply remaining updates
|
|
46
99
|
for key, value in update_data.items():
|
|
47
100
|
setattr(existing_provider, key, value)
|
|
48
101
|
|
|
@@ -52,6 +105,7 @@ class ProviderManager:
|
|
|
52
105
|
|
|
53
106
|
@enforce_types
|
|
54
107
|
@trace_method
|
|
108
|
+
@raise_on_invalid_id(param_name="provider_id", expected_prefix=PrimitiveType.PROVIDER)
|
|
55
109
|
async def delete_provider_by_id_async(self, provider_id: str, actor: PydanticUser):
|
|
56
110
|
"""Delete a provider."""
|
|
57
111
|
async with db_registry.async_session() as session:
|
|
@@ -102,6 +156,7 @@ class ProviderManager:
|
|
|
102
156
|
|
|
103
157
|
@enforce_types
|
|
104
158
|
@trace_method
|
|
159
|
+
@raise_on_invalid_id(param_name="provider_id", expected_prefix=PrimitiveType.PROVIDER)
|
|
105
160
|
async def get_provider_async(self, provider_id: str, actor: PydanticUser) -> PydanticProvider:
|
|
106
161
|
async with db_registry.async_session() as session:
|
|
107
162
|
provider_model = await ProviderModel.read_async(db_session=session, identifier=provider_id, actor=actor)
|
|
@@ -117,13 +172,21 @@ class ProviderManager:
|
|
|
117
172
|
@trace_method
|
|
118
173
|
def get_override_key(self, provider_name: Union[str, None], actor: PydanticUser) -> Optional[str]:
|
|
119
174
|
providers = self.list_providers(name=provider_name, actor=actor)
|
|
120
|
-
|
|
175
|
+
if providers:
|
|
176
|
+
# Decrypt the API key before returning
|
|
177
|
+
api_key_secret = providers[0].get_api_key_secret()
|
|
178
|
+
return api_key_secret.get_plaintext()
|
|
179
|
+
return None
|
|
121
180
|
|
|
122
181
|
@enforce_types
|
|
123
182
|
@trace_method
|
|
124
183
|
async def get_override_key_async(self, provider_name: Union[str, None], actor: PydanticUser) -> Optional[str]:
|
|
125
184
|
providers = await self.list_providers_async(name=provider_name, actor=actor)
|
|
126
|
-
|
|
185
|
+
if providers:
|
|
186
|
+
# Decrypt the API key before returning
|
|
187
|
+
api_key_secret = providers[0].get_api_key_secret()
|
|
188
|
+
return api_key_secret.get_plaintext()
|
|
189
|
+
return None
|
|
127
190
|
|
|
128
191
|
@enforce_types
|
|
129
192
|
@trace_method
|
|
@@ -131,10 +194,15 @@ class ProviderManager:
|
|
|
131
194
|
self, provider_name: Union[str, None], actor: PydanticUser
|
|
132
195
|
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
|
133
196
|
providers = await self.list_providers_async(name=provider_name, actor=actor)
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
197
|
+
if providers:
|
|
198
|
+
# Decrypt the credentials before returning
|
|
199
|
+
access_key_secret = providers[0].get_access_key_secret()
|
|
200
|
+
api_key_secret = providers[0].get_api_key_secret()
|
|
201
|
+
access_key = access_key_secret.get_plaintext()
|
|
202
|
+
secret_key = api_key_secret.get_plaintext()
|
|
203
|
+
region = providers[0].region
|
|
204
|
+
return access_key, secret_key, region
|
|
205
|
+
return None, None, None
|
|
138
206
|
|
|
139
207
|
@enforce_types
|
|
140
208
|
@trace_method
|
|
@@ -142,10 +210,14 @@ class ProviderManager:
|
|
|
142
210
|
self, provider_name: Union[str, None], actor: PydanticUser
|
|
143
211
|
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
|
144
212
|
providers = self.list_providers(name=provider_name, actor=actor)
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
213
|
+
if providers:
|
|
214
|
+
# Decrypt the API key before returning
|
|
215
|
+
api_key_secret = providers[0].get_api_key_secret()
|
|
216
|
+
api_key = api_key_secret.get_plaintext()
|
|
217
|
+
base_url = providers[0].base_url
|
|
218
|
+
api_version = providers[0].api_version
|
|
219
|
+
return api_key, base_url, api_version
|
|
220
|
+
return None, None, None
|
|
149
221
|
|
|
150
222
|
@enforce_types
|
|
151
223
|
@trace_method
|
|
@@ -153,10 +225,14 @@ class ProviderManager:
|
|
|
153
225
|
self, provider_name: Union[str, None], actor: PydanticUser
|
|
154
226
|
) -> Tuple[Optional[str], Optional[str], Optional[str]]:
|
|
155
227
|
providers = await self.list_providers_async(name=provider_name, actor=actor)
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
228
|
+
if providers:
|
|
229
|
+
# Decrypt the API key before returning
|
|
230
|
+
api_key_secret = providers[0].get_api_key_secret()
|
|
231
|
+
api_key = api_key_secret.get_plaintext()
|
|
232
|
+
base_url = providers[0].base_url
|
|
233
|
+
api_version = providers[0].api_version
|
|
234
|
+
return api_key, base_url, api_version
|
|
235
|
+
return None, None, None
|
|
160
236
|
|
|
161
237
|
@enforce_types
|
|
162
238
|
@trace_method
|
letta/services/run_manager.py
CHANGED
|
@@ -3,8 +3,6 @@ from pickletools import pyunicode
|
|
|
3
3
|
from typing import List, Literal, Optional
|
|
4
4
|
|
|
5
5
|
from httpx import AsyncClient
|
|
6
|
-
from sqlalchemy import select
|
|
7
|
-
from sqlalchemy.orm import Session
|
|
8
6
|
|
|
9
7
|
from letta.helpers.datetime_helpers import get_utc_time
|
|
10
8
|
from letta.log import get_logger
|
|
@@ -16,7 +14,7 @@ from letta.orm.run_metrics import RunMetrics as RunMetricsModel
|
|
|
16
14
|
from letta.orm.sqlalchemy_base import AccessType
|
|
17
15
|
from letta.orm.step import Step as StepModel
|
|
18
16
|
from letta.otel.tracing import log_event, trace_method
|
|
19
|
-
from letta.schemas.enums import AgentType, MessageRole, RunStatus
|
|
17
|
+
from letta.schemas.enums import AgentType, ComparisonOperator, MessageRole, RunStatus, PrimitiveType
|
|
20
18
|
from letta.schemas.job import LettaRequestConfig
|
|
21
19
|
from letta.schemas.letta_message import LettaMessage, LettaMessageUnion
|
|
22
20
|
from letta.schemas.letta_response import LettaResponse
|
|
@@ -33,6 +31,7 @@ from letta.services.helpers.agent_manager_helper import validate_agent_exists_as
|
|
|
33
31
|
from letta.services.message_manager import MessageManager
|
|
34
32
|
from letta.services.step_manager import StepManager
|
|
35
33
|
from letta.utils import enforce_types
|
|
34
|
+
from letta.validators import raise_on_invalid_id
|
|
36
35
|
|
|
37
36
|
logger = get_logger(__name__)
|
|
38
37
|
|
|
@@ -87,6 +86,7 @@ class RunManager:
|
|
|
87
86
|
return run.to_pydantic()
|
|
88
87
|
|
|
89
88
|
@enforce_types
|
|
89
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
90
90
|
async def get_run_by_id(self, run_id: str, actor: PydanticUser) -> PydanticRun:
|
|
91
91
|
"""Get a run by its ID."""
|
|
92
92
|
async with db_registry.async_session() as session:
|
|
@@ -108,10 +108,14 @@ class RunManager:
|
|
|
108
108
|
ascending: bool = False,
|
|
109
109
|
stop_reason: Optional[str] = None,
|
|
110
110
|
background: Optional[bool] = None,
|
|
111
|
+
template_family: Optional[str] = None,
|
|
112
|
+
step_count: Optional[int] = None,
|
|
113
|
+
step_count_operator: ComparisonOperator = ComparisonOperator.EQ,
|
|
114
|
+
tools_used: Optional[List[str]] = None,
|
|
111
115
|
) -> List[PydanticRun]:
|
|
112
116
|
"""List runs with filtering options."""
|
|
113
117
|
async with db_registry.async_session() as session:
|
|
114
|
-
from sqlalchemy import select
|
|
118
|
+
from sqlalchemy import or_, select
|
|
115
119
|
|
|
116
120
|
query = select(RunModel).filter(RunModel.organization_id == actor.organization_id)
|
|
117
121
|
|
|
@@ -133,6 +137,33 @@ class RunManager:
|
|
|
133
137
|
if background is not None:
|
|
134
138
|
query = query.filter(RunModel.background == background)
|
|
135
139
|
|
|
140
|
+
# Filter by template_family (base_template_id)
|
|
141
|
+
if template_family:
|
|
142
|
+
query = query.filter(RunModel.base_template_id == template_family)
|
|
143
|
+
|
|
144
|
+
# Filter by step_count and/or tools_used - join with run_metrics
|
|
145
|
+
if step_count is not None or tools_used:
|
|
146
|
+
query = query.join(RunMetricsModel, RunModel.id == RunMetricsModel.id)
|
|
147
|
+
|
|
148
|
+
# Filter by step_count with the specified operator
|
|
149
|
+
if step_count is not None:
|
|
150
|
+
if step_count_operator == ComparisonOperator.EQ:
|
|
151
|
+
query = query.filter(RunMetricsModel.num_steps == step_count)
|
|
152
|
+
elif step_count_operator == ComparisonOperator.GTE:
|
|
153
|
+
query = query.filter(RunMetricsModel.num_steps >= step_count)
|
|
154
|
+
elif step_count_operator == ComparisonOperator.LTE:
|
|
155
|
+
query = query.filter(RunMetricsModel.num_steps <= step_count)
|
|
156
|
+
|
|
157
|
+
# Filter by tools used ids
|
|
158
|
+
if tools_used:
|
|
159
|
+
from sqlalchemy import String, cast as sa_cast, type_coerce
|
|
160
|
+
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
|
|
161
|
+
|
|
162
|
+
# Use ?| operator to check if any tool_id exists in the array (OR logic)
|
|
163
|
+
jsonb_tools = sa_cast(RunMetricsModel.tools_used, JSONB)
|
|
164
|
+
tools_array = type_coerce(tools_used, ARRAY(String))
|
|
165
|
+
query = query.filter(jsonb_tools.op("?|")(tools_array))
|
|
166
|
+
|
|
136
167
|
# Apply pagination
|
|
137
168
|
from letta.services.helpers.run_manager_helper import _apply_pagination_async
|
|
138
169
|
|
|
@@ -147,24 +178,22 @@ class RunManager:
|
|
|
147
178
|
return [run.to_pydantic() for run in runs]
|
|
148
179
|
|
|
149
180
|
@enforce_types
|
|
150
|
-
|
|
181
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
182
|
+
async def delete_run(self, run_id: str, actor: PydanticUser) -> None:
|
|
151
183
|
"""Delete a run by its ID."""
|
|
152
184
|
async with db_registry.async_session() as session:
|
|
153
185
|
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor, access_type=AccessType.ORGANIZATION)
|
|
154
186
|
if not run:
|
|
155
187
|
raise NoResultFound(f"Run with id {run_id} not found")
|
|
156
188
|
|
|
157
|
-
pydantic_run = run.to_pydantic()
|
|
158
189
|
await run.hard_delete_async(db_session=session, actor=actor)
|
|
159
190
|
|
|
160
|
-
return pydantic_run
|
|
161
|
-
|
|
162
191
|
@enforce_types
|
|
192
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
163
193
|
async def update_run_by_id_async(
|
|
164
194
|
self, run_id: str, update: RunUpdate, actor: PydanticUser, refresh_result_messages: bool = True
|
|
165
195
|
) -> PydanticRun:
|
|
166
196
|
"""Update a run using a RunUpdate object."""
|
|
167
|
-
|
|
168
197
|
async with db_registry.async_session() as session:
|
|
169
198
|
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor)
|
|
170
199
|
|
|
@@ -203,15 +232,38 @@ class RunManager:
|
|
|
203
232
|
|
|
204
233
|
# update run metrics table
|
|
205
234
|
num_steps = len(await self.step_manager.list_steps_async(run_id=run_id, actor=actor))
|
|
235
|
+
|
|
236
|
+
# Collect tools used from run messages
|
|
237
|
+
tools_used = set()
|
|
238
|
+
messages = await self.message_manager.list_messages(actor=actor, run_id=run_id)
|
|
239
|
+
for message in messages:
|
|
240
|
+
if message.tool_calls:
|
|
241
|
+
for tool_call in message.tool_calls:
|
|
242
|
+
if hasattr(tool_call, "function") and hasattr(tool_call.function, "name"):
|
|
243
|
+
# Get tool ID from tool name
|
|
244
|
+
from letta.services.tool_manager import ToolManager
|
|
245
|
+
|
|
246
|
+
tool_manager = ToolManager()
|
|
247
|
+
tool_name = tool_call.function.name
|
|
248
|
+
tool_id = await tool_manager.get_tool_id_by_name_async(tool_name, actor)
|
|
249
|
+
if tool_id:
|
|
250
|
+
tools_used.add(tool_id)
|
|
251
|
+
|
|
206
252
|
async with db_registry.async_session() as session:
|
|
207
253
|
metrics = await RunMetricsModel.read_async(db_session=session, identifier=run_id, actor=actor)
|
|
208
254
|
# Calculate runtime if run is completing
|
|
209
|
-
if is_terminal_update
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
255
|
+
if is_terminal_update:
|
|
256
|
+
# Use total_duration_ns from RunUpdate if provided
|
|
257
|
+
# Otherwise fall back to system time
|
|
258
|
+
if update.total_duration_ns is not None:
|
|
259
|
+
metrics.run_ns = update.total_duration_ns
|
|
260
|
+
elif metrics.run_start_ns:
|
|
261
|
+
import time
|
|
262
|
+
|
|
263
|
+
current_ns = int(time.time() * 1e9)
|
|
264
|
+
metrics.run_ns = current_ns - metrics.run_start_ns
|
|
214
265
|
metrics.num_steps = num_steps
|
|
266
|
+
metrics.tools_used = list(tools_used) if tools_used else None
|
|
215
267
|
await metrics.update_async(db_session=session, actor=actor, no_commit=True, no_refresh=True)
|
|
216
268
|
await session.commit()
|
|
217
269
|
|
|
@@ -267,7 +319,7 @@ class RunManager:
|
|
|
267
319
|
log_event("POST callback finished")
|
|
268
320
|
result["callback_status_code"] = resp.status_code
|
|
269
321
|
except Exception as e:
|
|
270
|
-
error_message = f"Failed to dispatch callback for run {callback_info['run_id']} to {callback_info['callback_url']}: {e!
|
|
322
|
+
error_message = f"Failed to dispatch callback for run {callback_info['run_id']} to {callback_info['callback_url']}: {e!r}"
|
|
271
323
|
logger.error(error_message)
|
|
272
324
|
result["callback_error"] = error_message
|
|
273
325
|
# Continue silently - callback failures should not affect run completion
|
|
@@ -275,6 +327,7 @@ class RunManager:
|
|
|
275
327
|
return result
|
|
276
328
|
|
|
277
329
|
@enforce_types
|
|
330
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
278
331
|
async def get_run_usage(self, run_id: str, actor: PydanticUser) -> LettaUsageStatistics:
|
|
279
332
|
"""Get usage statistics for a run."""
|
|
280
333
|
async with db_registry.async_session() as session:
|
|
@@ -292,6 +345,7 @@ class RunManager:
|
|
|
292
345
|
return total_usage
|
|
293
346
|
|
|
294
347
|
@enforce_types
|
|
348
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
295
349
|
async def get_run_messages(
|
|
296
350
|
self,
|
|
297
351
|
run_id: str,
|
|
@@ -326,6 +380,7 @@ class RunManager:
|
|
|
326
380
|
return letta_messages
|
|
327
381
|
|
|
328
382
|
@enforce_types
|
|
383
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
329
384
|
async def get_run_request_config(self, run_id: str, actor: PydanticUser) -> Optional[LettaRequestConfig]:
|
|
330
385
|
"""Get the letta request config from a run."""
|
|
331
386
|
async with db_registry.async_session() as session:
|
|
@@ -336,6 +391,7 @@ class RunManager:
|
|
|
336
391
|
return pydantic_run.request_config
|
|
337
392
|
|
|
338
393
|
@enforce_types
|
|
394
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
339
395
|
async def get_run_metrics_async(self, run_id: str, actor: PydanticUser) -> PydanticRunMetrics:
|
|
340
396
|
"""Get metrics for a run."""
|
|
341
397
|
async with db_registry.async_session() as session:
|
|
@@ -343,6 +399,7 @@ class RunManager:
|
|
|
343
399
|
return metrics.to_pydantic()
|
|
344
400
|
|
|
345
401
|
@enforce_types
|
|
402
|
+
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
|
|
346
403
|
async def get_run_steps(
|
|
347
404
|
self,
|
|
348
405
|
run_id: str,
|