letta-nightly 0.8.15.dev20250720104313__py3-none-any.whl → 0.8.16.dev20250721104533__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +27 -11
  3. letta/agents/helpers.py +1 -1
  4. letta/agents/letta_agent.py +518 -322
  5. letta/agents/letta_agent_batch.py +1 -2
  6. letta/agents/voice_agent.py +15 -17
  7. letta/client/client.py +3 -3
  8. letta/constants.py +5 -0
  9. letta/embeddings.py +0 -2
  10. letta/errors.py +8 -0
  11. letta/functions/function_sets/base.py +3 -3
  12. letta/functions/helpers.py +2 -3
  13. letta/groups/sleeptime_multi_agent.py +0 -1
  14. letta/helpers/composio_helpers.py +2 -2
  15. letta/helpers/converters.py +1 -1
  16. letta/helpers/pinecone_utils.py +8 -0
  17. letta/helpers/tool_rule_solver.py +13 -18
  18. letta/llm_api/aws_bedrock.py +16 -2
  19. letta/llm_api/cohere.py +1 -1
  20. letta/llm_api/openai_client.py +1 -1
  21. letta/local_llm/grammars/gbnf_grammar_generator.py +1 -1
  22. letta/local_llm/llm_chat_completion_wrappers/zephyr.py +14 -14
  23. letta/local_llm/utils.py +1 -2
  24. letta/orm/agent.py +3 -3
  25. letta/orm/block.py +4 -4
  26. letta/orm/files_agents.py +0 -1
  27. letta/orm/identity.py +2 -0
  28. letta/orm/mcp_server.py +0 -2
  29. letta/orm/message.py +140 -14
  30. letta/orm/organization.py +5 -5
  31. letta/orm/passage.py +4 -4
  32. letta/orm/source.py +1 -1
  33. letta/orm/sqlalchemy_base.py +61 -39
  34. letta/orm/step.py +2 -0
  35. letta/otel/db_pool_monitoring.py +308 -0
  36. letta/otel/metric_registry.py +94 -1
  37. letta/otel/sqlalchemy_instrumentation.py +548 -0
  38. letta/otel/sqlalchemy_instrumentation_integration.py +124 -0
  39. letta/otel/tracing.py +37 -1
  40. letta/schemas/agent.py +0 -3
  41. letta/schemas/agent_file.py +283 -0
  42. letta/schemas/block.py +0 -3
  43. letta/schemas/file.py +28 -26
  44. letta/schemas/letta_message.py +15 -4
  45. letta/schemas/memory.py +1 -1
  46. letta/schemas/message.py +31 -26
  47. letta/schemas/openai/chat_completion_response.py +0 -1
  48. letta/schemas/providers.py +20 -0
  49. letta/schemas/source.py +11 -13
  50. letta/schemas/step.py +12 -0
  51. letta/schemas/tool.py +0 -4
  52. letta/serialize_schemas/marshmallow_agent.py +14 -1
  53. letta/serialize_schemas/marshmallow_block.py +23 -1
  54. letta/serialize_schemas/marshmallow_message.py +1 -3
  55. letta/serialize_schemas/marshmallow_tool.py +23 -1
  56. letta/server/db.py +110 -6
  57. letta/server/rest_api/app.py +85 -73
  58. letta/server/rest_api/routers/v1/agents.py +68 -53
  59. letta/server/rest_api/routers/v1/blocks.py +2 -2
  60. letta/server/rest_api/routers/v1/jobs.py +3 -0
  61. letta/server/rest_api/routers/v1/organizations.py +2 -2
  62. letta/server/rest_api/routers/v1/sources.py +18 -2
  63. letta/server/rest_api/routers/v1/tools.py +11 -12
  64. letta/server/rest_api/routers/v1/users.py +1 -1
  65. letta/server/rest_api/streaming_response.py +13 -5
  66. letta/server/rest_api/utils.py +8 -25
  67. letta/server/server.py +11 -4
  68. letta/server/ws_api/server.py +2 -2
  69. letta/services/agent_file_manager.py +616 -0
  70. letta/services/agent_manager.py +133 -46
  71. letta/services/block_manager.py +38 -17
  72. letta/services/file_manager.py +106 -21
  73. letta/services/file_processor/file_processor.py +93 -0
  74. letta/services/files_agents_manager.py +28 -0
  75. letta/services/group_manager.py +4 -5
  76. letta/services/helpers/agent_manager_helper.py +57 -9
  77. letta/services/identity_manager.py +22 -0
  78. letta/services/job_manager.py +210 -91
  79. letta/services/llm_batch_manager.py +9 -6
  80. letta/services/mcp/stdio_client.py +1 -2
  81. letta/services/mcp_manager.py +0 -1
  82. letta/services/message_manager.py +49 -26
  83. letta/services/passage_manager.py +0 -1
  84. letta/services/provider_manager.py +1 -1
  85. letta/services/source_manager.py +114 -5
  86. letta/services/step_manager.py +36 -4
  87. letta/services/telemetry_manager.py +9 -2
  88. letta/services/tool_executor/builtin_tool_executor.py +5 -1
  89. letta/services/tool_executor/core_tool_executor.py +3 -3
  90. letta/services/tool_manager.py +95 -20
  91. letta/services/user_manager.py +4 -12
  92. letta/settings.py +23 -6
  93. letta/system.py +1 -1
  94. letta/utils.py +26 -2
  95. {letta_nightly-0.8.15.dev20250720104313.dist-info → letta_nightly-0.8.16.dev20250721104533.dist-info}/METADATA +3 -2
  96. {letta_nightly-0.8.15.dev20250720104313.dist-info → letta_nightly-0.8.16.dev20250721104533.dist-info}/RECORD +99 -94
  97. {letta_nightly-0.8.15.dev20250720104313.dist-info → letta_nightly-0.8.16.dev20250721104533.dist-info}/LICENSE +0 -0
  98. {letta_nightly-0.8.15.dev20250720104313.dist-info → letta_nightly-0.8.16.dev20250721104533.dist-info}/WHEEL +0 -0
  99. {letta_nightly-0.8.15.dev20250720104313.dist-info → letta_nightly-0.8.16.dev20250721104533.dist-info}/entry_points.txt +0 -0
letta/otel/tracing.py CHANGED
@@ -143,7 +143,43 @@ def setup_tracing(
143
143
  if settings.sqlalchemy_tracing:
144
144
  from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
145
145
 
146
- SQLAlchemyInstrumentor().instrument()
146
+ from letta.server.db import db_registry
147
+
148
+ # For OpenTelemetry SQLAlchemy instrumentation, we need to use the sync_engine
149
+ async_engine = db_registry.get_async_engine()
150
+ if async_engine:
151
+ # Access the sync_engine attribute safely
152
+ try:
153
+ SQLAlchemyInstrumentor().instrument(
154
+ engine=async_engine.sync_engine,
155
+ enable_commenter=True,
156
+ commenter_options={},
157
+ enable_attribute_commenter=True,
158
+ )
159
+ except Exception:
160
+ # Fall back to instrumenting without specifying an engine
161
+ # This will still capture some SQL operations
162
+ SQLAlchemyInstrumentor().instrument(
163
+ enable_commenter=True,
164
+ commenter_options={},
165
+ enable_attribute_commenter=True,
166
+ )
167
+ else:
168
+ # If no async engine is available, instrument without an engine
169
+ SQLAlchemyInstrumentor().instrument(
170
+ enable_commenter=True,
171
+ commenter_options={},
172
+ enable_attribute_commenter=True,
173
+ )
174
+
175
+ # Additionally set up our custom instrumentation
176
+ try:
177
+ from letta.otel.sqlalchemy_instrumentation_integration import setup_letta_db_instrumentation
178
+
179
+ setup_letta_db_instrumentation(enable_joined_monitoring=True)
180
+ except Exception as e:
181
+ # Log but continue if our custom instrumentation fails
182
+ logger.warning(f"Failed to setup Letta DB instrumentation: {e}")
147
183
 
148
184
  if app:
149
185
  # Add middleware first
letta/schemas/agent.py CHANGED
@@ -78,8 +78,6 @@ class AgentState(OrmMetadataBase, validate_assignment=True):
78
78
 
79
79
  # This is an object representing the in-process state of a running `Agent`
80
80
  # Field in this object can be theoretically edited by tools, and will be persisted by the ORM
81
- organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the agent.")
82
-
83
81
  description: Optional[str] = Field(None, description="The description of the agent.")
84
82
  metadata: Optional[Dict] = Field(None, description="The metadata of the agent.")
85
83
 
@@ -309,7 +307,6 @@ class AgentStepResponse(BaseModel):
309
307
 
310
308
 
311
309
  def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
312
-
313
310
  # Workflow agents and ReAct agents don't use memory blocks
314
311
  # However, they still allow files to be injected into the context
315
312
  if agent_type == AgentType.react_agent or agent_type == AgentType.workflow_agent:
@@ -0,0 +1,283 @@
1
+ from datetime import datetime
2
+ from typing import Dict, List, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from letta.schemas.agent import AgentState, CreateAgent
7
+ from letta.schemas.block import Block, CreateBlock
8
+ from letta.schemas.enums import MessageRole
9
+ from letta.schemas.file import FileAgent, FileAgentBase, FileMetadata, FileMetadataBase
10
+ from letta.schemas.group import GroupCreate
11
+ from letta.schemas.message import Message, MessageCreate
12
+ from letta.schemas.source import Source, SourceCreate
13
+ from letta.schemas.tool import Tool
14
+ from letta.schemas.user import User
15
+ from letta.services.message_manager import MessageManager
16
+
17
+
18
+ class ImportResult:
19
+ """Result of an agent file import operation"""
20
+
21
+ def __init__(
22
+ self,
23
+ success: bool,
24
+ message: str = "",
25
+ imported_count: int = 0,
26
+ errors: Optional[List[str]] = None,
27
+ id_mappings: Optional[Dict[str, str]] = None,
28
+ ):
29
+ self.success = success
30
+ self.message = message
31
+ self.imported_count = imported_count
32
+ self.errors = errors or []
33
+ self.id_mappings = id_mappings or {}
34
+
35
+
36
+ class MessageSchema(MessageCreate):
37
+ """Message with human-readable ID for agent file"""
38
+
39
+ __id_prefix__ = "message"
40
+ id: str = Field(..., description="Human-readable identifier for this message in the file")
41
+
42
+ # Override the role field to accept all message roles, not just user/system/assistant
43
+ role: MessageRole = Field(..., description="The role of the participant.")
44
+ model: Optional[str] = Field(None, description="The model used to make the function call")
45
+ agent_id: Optional[str] = Field(None, description="The unique identifier of the agent")
46
+
47
+ @classmethod
48
+ def from_message(cls, message: Message) -> "MessageSchema":
49
+ """Convert Message to MessageSchema"""
50
+
51
+ # Create MessageSchema directly without going through MessageCreate
52
+ # to avoid role validation issues
53
+ return cls(
54
+ id=message.id,
55
+ role=message.role,
56
+ content=message.content,
57
+ name=message.name,
58
+ otid=None, # TODO
59
+ sender_id=None, # TODO
60
+ batch_item_id=message.batch_item_id,
61
+ group_id=message.group_id,
62
+ model=message.model,
63
+ agent_id=message.agent_id,
64
+ )
65
+
66
+
67
+ class FileAgentSchema(FileAgentBase):
68
+ """File-Agent relationship with human-readable ID for agent file"""
69
+
70
+ __id_prefix__ = "file_agent"
71
+ id: str = Field(..., description="Human-readable identifier for this file-agent relationship in the file")
72
+
73
+ @classmethod
74
+ def from_file_agent(cls, file_agent: FileAgent) -> "FileAgentSchema":
75
+ """Convert FileAgent to FileAgentSchema"""
76
+
77
+ create_file_agent = FileAgentBase(
78
+ agent_id=file_agent.agent_id,
79
+ file_id=file_agent.file_id,
80
+ source_id=file_agent.source_id,
81
+ file_name=file_agent.file_name,
82
+ is_open=file_agent.is_open,
83
+ visible_content=file_agent.visible_content,
84
+ last_accessed_at=file_agent.last_accessed_at,
85
+ )
86
+
87
+ # Create FileAgentSchema with the file_agent's ID (will be remapped later)
88
+ return cls(id=file_agent.id, **create_file_agent.model_dump())
89
+
90
+
91
+ class AgentSchema(CreateAgent):
92
+ """Agent with human-readable ID for agent file"""
93
+
94
+ __id_prefix__ = "agent"
95
+ id: str = Field(..., description="Human-readable identifier for this agent in the file")
96
+ in_context_message_ids: List[str] = Field(
97
+ default_factory=list, description="List of message IDs that are currently in the agent's context"
98
+ )
99
+ messages: List[MessageSchema] = Field(default_factory=list, description="List of messages in the agent's conversation history")
100
+ files_agents: List[FileAgentSchema] = Field(default_factory=list, description="List of file-agent relationships for this agent")
101
+
102
+ @classmethod
103
+ async def from_agent_state(
104
+ cls, agent_state: AgentState, message_manager: MessageManager, files_agents: List[FileAgent], actor: User
105
+ ) -> "AgentSchema":
106
+ """Convert AgentState to AgentSchema"""
107
+
108
+ create_agent = CreateAgent(
109
+ name=agent_state.name,
110
+ memory_blocks=[], # TODO: Convert from agent_state.memory if needed
111
+ tools=[],
112
+ tool_ids=[tool.id for tool in agent_state.tools] if agent_state.tools else [],
113
+ source_ids=[], # [source.id for source in agent_state.sources] if agent_state.sources else [],
114
+ block_ids=[block.id for block in agent_state.memory.blocks],
115
+ tool_rules=agent_state.tool_rules,
116
+ tags=agent_state.tags,
117
+ system=agent_state.system,
118
+ agent_type=agent_state.agent_type,
119
+ llm_config=agent_state.llm_config,
120
+ embedding_config=agent_state.embedding_config,
121
+ initial_message_sequence=None,
122
+ include_base_tools=False,
123
+ include_multi_agent_tools=False,
124
+ include_base_tool_rules=False,
125
+ include_default_source=False,
126
+ description=agent_state.description,
127
+ metadata=agent_state.metadata,
128
+ model=None,
129
+ embedding=None,
130
+ context_window_limit=None,
131
+ embedding_chunk_size=None,
132
+ max_tokens=None,
133
+ max_reasoning_tokens=None,
134
+ enable_reasoner=False,
135
+ from_template=None, # TODO: Need to get passed in
136
+ template=False, # TODO: Need to get passed in
137
+ project=None, # TODO: Need to get passed in
138
+ tool_exec_environment_variables=agent_state.get_agent_env_vars_as_dict(),
139
+ memory_variables=None, # TODO: Need to get passed in
140
+ project_id=None, # TODO: Need to get passed in
141
+ template_id=None, # TODO: Need to get passed in
142
+ base_template_id=None, # TODO: Need to get passed in
143
+ identity_ids=None, # TODO: Need to get passed in
144
+ message_buffer_autoclear=agent_state.message_buffer_autoclear,
145
+ enable_sleeptime=False, # TODO: Need to figure out how to patch this
146
+ response_format=agent_state.response_format,
147
+ timezone=agent_state.timezone or "UTC",
148
+ )
149
+
150
+ messages = await message_manager.list_messages_for_agent_async(
151
+ agent_id=agent_state.id, actor=actor, limit=50
152
+ ) # TODO: Expand to get more messages
153
+
154
+ # Convert messages to MessageSchema objects
155
+ message_schemas = [MessageSchema.from_message(msg) for msg in messages]
156
+
157
+ # Create AgentSchema with agent state ID (remapped later)
158
+ return cls(
159
+ id=agent_state.id,
160
+ in_context_message_ids=agent_state.message_ids or [],
161
+ messages=message_schemas, # Messages will be populated separately by the manager
162
+ files_agents=[FileAgentSchema.from_file_agent(f) for f in files_agents],
163
+ **create_agent.model_dump(),
164
+ )
165
+
166
+
167
+ class GroupSchema(GroupCreate):
168
+ """Group with human-readable ID for agent file"""
169
+
170
+ __id_prefix__ = "group"
171
+ id: str = Field(..., description="Human-readable identifier for this group in the file")
172
+
173
+
174
+ class BlockSchema(CreateBlock):
175
+ """Block with human-readable ID for agent file"""
176
+
177
+ __id_prefix__ = "block"
178
+ id: str = Field(..., description="Human-readable identifier for this block in the file")
179
+
180
+ @classmethod
181
+ def from_block(cls, block: Block) -> "BlockSchema":
182
+ """Convert Block to BlockSchema"""
183
+
184
+ create_block = CreateBlock(
185
+ value=block.value,
186
+ limit=block.limit,
187
+ template_name=block.template_name,
188
+ is_template=block.is_template,
189
+ preserve_on_migration=block.preserve_on_migration,
190
+ label=block.label,
191
+ read_only=block.read_only,
192
+ description=block.description,
193
+ metadata=block.metadata or {},
194
+ )
195
+
196
+ # Create BlockSchema with the block's ID (will be remapped later)
197
+ return cls(id=block.id, **create_block.model_dump())
198
+
199
+
200
+ class FileSchema(FileMetadataBase):
201
+ """File with human-readable ID for agent file"""
202
+
203
+ __id_prefix__ = "file"
204
+ id: str = Field(..., description="Human-readable identifier for this file in the file")
205
+
206
+ @classmethod
207
+ def from_file_metadata(cls, file_metadata: FileMetadata) -> "FileSchema":
208
+ """Convert FileMetadata to FileSchema"""
209
+
210
+ create_file = FileMetadataBase(
211
+ source_id=file_metadata.source_id,
212
+ file_name=file_metadata.file_name,
213
+ original_file_name=file_metadata.original_file_name,
214
+ file_path=file_metadata.file_path,
215
+ file_type=file_metadata.file_type,
216
+ file_size=file_metadata.file_size,
217
+ file_creation_date=file_metadata.file_creation_date,
218
+ file_last_modified_date=file_metadata.file_last_modified_date,
219
+ processing_status=file_metadata.processing_status,
220
+ error_message=file_metadata.error_message,
221
+ total_chunks=file_metadata.total_chunks,
222
+ chunks_embedded=file_metadata.chunks_embedded,
223
+ content=file_metadata.content,
224
+ )
225
+
226
+ # Create FileSchema with the file's ID (will be remapped later)
227
+ return cls(id=file_metadata.id, **create_file.model_dump())
228
+
229
+
230
+ class SourceSchema(SourceCreate):
231
+ """Source with human-readable ID for agent file"""
232
+
233
+ __id_prefix__ = "source"
234
+ id: str = Field(..., description="Human-readable identifier for this source in the file")
235
+
236
+ @classmethod
237
+ def from_source(cls, source: Source) -> "SourceSchema":
238
+ """Convert Block to BlockSchema"""
239
+
240
+ create_block = SourceCreate(
241
+ name=source.name,
242
+ description=source.description,
243
+ instructions=source.instructions,
244
+ metadata=source.metadata,
245
+ embedding_config=source.embedding_config,
246
+ )
247
+
248
+ # Create SourceSchema with the block's ID (will be remapped later)
249
+ return cls(id=source.id, **create_block.model_dump())
250
+
251
+
252
+ # TODO: This one is quite thin, just a wrapper over Tool
253
+ class ToolSchema(Tool):
254
+ """Tool with human-readable ID for agent file"""
255
+
256
+ __id_prefix__ = "tool"
257
+ id: str = Field(..., description="Human-readable identifier for this tool in the file")
258
+
259
+ @classmethod
260
+ def from_tool(cls, tool: Tool) -> "ToolSchema":
261
+ """Convert Tool to ToolSchema"""
262
+ return cls(**tool.model_dump())
263
+
264
+
265
+ # class MCPServerSchema(RegisterMCPServer):
266
+ # """MCP Server with human-readable ID for agent file"""
267
+ # id: str = Field(..., description="Human-readable identifier for this MCP server in the file")
268
+
269
+
270
+ class AgentFileSchema(BaseModel):
271
+ """Schema for serialized agent file that can be exported to JSON and imported into agent server."""
272
+
273
+ agents: List[AgentSchema] = Field(..., description="List of agents in this agent file")
274
+ groups: List[GroupSchema] = Field(..., description="List of groups in this agent file")
275
+ blocks: List[BlockSchema] = Field(..., description="List of memory blocks in this agent file")
276
+ files: List[FileSchema] = Field(..., description="List of files in this agent file")
277
+ sources: List[SourceSchema] = Field(..., description="List of sources in this agent file")
278
+ tools: List[ToolSchema] = Field(..., description="List of tools in this agent file")
279
+ # mcp_servers: List[MCPServerSchema] = Field(..., description="List of MCP servers in this agent file")
280
+ metadata: Dict[str, str] = Field(
281
+ default_factory=dict, description="Metadata for this agent file, including revision_id and other export information."
282
+ )
283
+ created_at: Optional[datetime] = Field(default=None, description="The timestamp when the object was created.")
letta/schemas/block.py CHANGED
@@ -74,9 +74,6 @@ class Block(BaseBlock):
74
74
 
75
75
  id: str = BaseBlock.generate_id_field()
76
76
 
77
- # associated user/agent
78
- organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the block.")
79
-
80
77
  # default orm fields
81
78
  created_by_id: Optional[str] = Field(None, description="The id of the user that made this Block.")
82
79
  last_updated_by_id: Optional[str] = Field(None, description="The id of the user that last updated this Block.")
letta/schemas/file.py CHANGED
@@ -22,12 +22,7 @@ class FileMetadataBase(LettaBase):
22
22
 
23
23
  __id_prefix__ = "file"
24
24
 
25
-
26
- class FileMetadata(FileMetadataBase):
27
- """Representation of a single FileMetadata"""
28
-
29
- id: str = FileMetadataBase.generate_id_field()
30
- organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the document.")
25
+ # Core file metadata fields
31
26
  source_id: str = Field(..., description="The unique identifier of the source associated with the document.")
32
27
  file_name: Optional[str] = Field(None, description="The name of the file.")
33
28
  original_file_name: Optional[str] = Field(None, description="The original name of the file as uploaded.")
@@ -43,13 +38,6 @@ class FileMetadata(FileMetadataBase):
43
38
  error_message: Optional[str] = Field(default=None, description="Optional error message if the file failed processing.")
44
39
  total_chunks: Optional[int] = Field(default=None, description="Total number of chunks for the file.")
45
40
  chunks_embedded: Optional[int] = Field(default=None, description="Number of chunks that have been embedded.")
46
-
47
- # orm metadata, optional fields
48
- created_at: Optional[datetime] = Field(default_factory=datetime.utcnow, description="The creation date of the file.")
49
- updated_at: Optional[datetime] = Field(default_factory=datetime.utcnow, description="The update date of the file.")
50
- is_deleted: bool = Field(False, description="Whether this file is deleted or not.")
51
-
52
- # This is optional, and only occasionally pulled in since it can be very large
53
41
  content: Optional[str] = Field(
54
42
  default=None, description="Optional full-text content of the file; only populated on demand due to its size."
55
43
  )
@@ -59,11 +47,38 @@ class FileMetadata(FileMetadataBase):
59
47
  return self.processing_status in (FileProcessingStatus.COMPLETED, FileProcessingStatus.ERROR)
60
48
 
61
49
 
50
+ class FileMetadata(FileMetadataBase):
51
+ """Representation of a single FileMetadata"""
52
+
53
+ id: str = FileMetadataBase.generate_id_field()
54
+ organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the document.")
55
+
56
+ # orm metadata, optional fields
57
+ created_at: Optional[datetime] = Field(default_factory=datetime.utcnow, description="The creation date of the file.")
58
+ updated_at: Optional[datetime] = Field(default_factory=datetime.utcnow, description="The update date of the file.")
59
+ is_deleted: bool = Field(False, description="Whether this file is deleted or not.")
60
+
61
+
62
62
  class FileAgentBase(LettaBase):
63
63
  """Base class for the FileMetadata-⇄-Agent association schemas"""
64
64
 
65
65
  __id_prefix__ = "file_agent"
66
66
 
67
+ # Core file-agent association fields
68
+ agent_id: str = Field(..., description="Unique identifier of the agent.")
69
+ file_id: str = Field(..., description="Unique identifier of the file.")
70
+ source_id: str = Field(..., description="Unique identifier of the source (denormalized from files.source_id).")
71
+ file_name: str = Field(..., description="Name of the file.")
72
+ is_open: bool = Field(True, description="True if the agent currently has the file open.")
73
+ visible_content: Optional[str] = Field(
74
+ None,
75
+ description="Portion of the file the agent is focused on (may be large).",
76
+ )
77
+ last_accessed_at: Optional[datetime] = Field(
78
+ default_factory=datetime.utcnow,
79
+ description="UTC timestamp of the agent’s most recent access to this file.",
80
+ )
81
+
67
82
 
68
83
  class FileAgent(FileAgentBase):
69
84
  """
@@ -83,19 +98,6 @@ class FileAgent(FileAgentBase):
83
98
  None,
84
99
  description="Org ID this association belongs to (inherited from both agent and file).",
85
100
  )
86
- agent_id: str = Field(..., description="Unique identifier of the agent.")
87
- file_id: str = Field(..., description="Unique identifier of the file.")
88
- source_id: str = Field(..., description="Unique identifier of the source (denormalized from files.source_id).")
89
- file_name: str = Field(..., description="Name of the file.")
90
- is_open: bool = Field(True, description="True if the agent currently has the file open.")
91
- visible_content: Optional[str] = Field(
92
- None,
93
- description="Portion of the file the agent is focused on (may be large).",
94
- )
95
- last_accessed_at: Optional[datetime] = Field(
96
- default_factory=datetime.utcnow,
97
- description="UTC timestamp of the agent’s most recent access to this file.",
98
- )
99
101
 
100
102
  created_at: Optional[datetime] = Field(
101
103
  default_factory=datetime.utcnow,
@@ -40,15 +40,18 @@ class LettaMessage(BaseModel):
40
40
  message_type (MessageType): The type of the message
41
41
  otid (Optional[str]): The offline threading id associated with this message
42
42
  sender_id (Optional[str]): The id of the sender of the message, can be an identity id or agent id
43
+ step_id (Optional[str]): The step id associated with the message
44
+ is_err (Optional[bool]): Whether the message is an errored message or not. Used for debugging purposes only.
43
45
  """
44
46
 
45
47
  id: str
46
48
  date: datetime
47
- name: Optional[str] = None
49
+ name: str | None = None
48
50
  message_type: MessageType = Field(..., description="The type of the message.")
49
- otid: Optional[str] = None
50
- sender_id: Optional[str] = None
51
- step_id: Optional[str] = None
51
+ otid: str | None = None
52
+ sender_id: str | None = None
53
+ step_id: str | None = None
54
+ is_err: bool | None = None
52
55
 
53
56
  @field_serializer("date")
54
57
  def serialize_datetime(self, dt: datetime, _info):
@@ -60,6 +63,14 @@ class LettaMessage(BaseModel):
60
63
  dt = dt.replace(tzinfo=timezone.utc)
61
64
  return dt.isoformat(timespec="seconds")
62
65
 
66
+ @field_serializer("is_err", when_used="unless-none")
67
+ def serialize_is_err(self, value: bool | None, _info):
68
+ """
69
+ Only serialize is_err field when it's True (for debugging purposes).
70
+ When is_err is None or False, this field will be excluded from the JSON output.
71
+ """
72
+ return value if value is True else None
73
+
63
74
 
64
75
  class SystemMessage(LettaMessage):
65
76
  """
letta/schemas/memory.py CHANGED
@@ -174,7 +174,7 @@ class Memory(BaseModel, validate_assignment=True):
174
174
  def update_block_value(self, label: str, value: str):
175
175
  """Update the value of a block"""
176
176
  if not isinstance(value, str):
177
- raise ValueError(f"Provided value must be a string")
177
+ raise ValueError("Provided value must be a string")
178
178
 
179
179
  for block in self.blocks:
180
180
  if block.label == label:
letta/schemas/message.py CHANGED
@@ -128,29 +128,28 @@ class MessageUpdate(BaseModel):
128
128
 
129
129
  class Message(BaseMessage):
130
130
  """
131
- Letta's internal representation of a message. Includes methods to convert to/from LLM provider formats.
132
-
133
- Attributes:
134
- id (str): The unique identifier of the message.
135
- role (MessageRole): The role of the participant.
136
- text (str): The text of the message.
137
- user_id (str): The unique identifier of the user.
138
- agent_id (str): The unique identifier of the agent.
139
- model (str): The model used to make the function call.
140
- name (str): The name of the participant.
141
- created_at (datetime): The time the message was created.
142
- tool_calls (List[OpenAIToolCall,]): The list of tool calls requested.
143
- tool_call_id (str): The id of the tool call.
144
- step_id (str): The id of the step that this message was created in.
145
- otid (str): The offline threading id associated with this message.
146
- tool_returns (List[ToolReturn]): The list of tool returns requested.
147
- group_id (str): The multi-agent group that the message was sent in.
148
- sender_id (str): The id of the sender of the message, can be an identity id or agent id.
149
-
131
+ Letta's internal representation of a message. Includes methods to convert to/from LLM provider formats.
132
+
133
+ Attributes:
134
+ id (str): The unique identifier of the message.
135
+ role (MessageRole): The role of the participant.
136
+ text (str): The text of the message.
137
+ user_id (str): The unique identifier of the user.
138
+ agent_id (str): The unique identifier of the agent.
139
+ model (str): The model used to make the function call.
140
+ name (str): The name of the participant.
141
+ created_at (datetime): The time the message was created.
142
+ tool_calls (List[OpenAIToolCall,]): The list of tool calls requested.
143
+ tool_call_id (str): The id of the tool call.
144
+ step_id (str): The id of the step that this message was created in.
145
+ otid (str): The offline threading id associated with this message.
146
+ tool_returns (List[ToolReturn]): The list of tool returns requested.
147
+ group_id (str): The multi-agent group that the message was sent in.
148
+ sender_id (str): The id of the sender of the message, can be an identity id or agent id.
149
+ t
150
150
  """
151
151
 
152
152
  id: str = BaseMessage.generate_id_field()
153
- organization_id: Optional[str] = Field(default=None, description="The unique identifier of the organization.")
154
153
  agent_id: Optional[str] = Field(default=None, description="The unique identifier of the agent.")
155
154
  model: Optional[str] = Field(default=None, description="The model used to make the function call.")
156
155
  # Basic OpenAI-style fields
@@ -172,6 +171,9 @@ class Message(BaseMessage):
172
171
  group_id: Optional[str] = Field(default=None, description="The multi-agent group that the message was sent in")
173
172
  sender_id: Optional[str] = Field(default=None, description="The id of the sender of the message, can be an identity id or agent id")
174
173
  batch_item_id: Optional[str] = Field(default=None, description="The id of the LLMBatchItem that this message is associated with")
174
+ is_err: Optional[bool] = Field(
175
+ default=None, description="Whether this message is part of an error step. Used only for debugging purposes."
176
+ )
175
177
  # This overrides the optional base orm schema, created_at MUST exist on all messages objects
176
178
  created_at: datetime = Field(default_factory=get_utc_time, description="The timestamp when the object was created.")
177
179
 
@@ -191,6 +193,7 @@ class Message(BaseMessage):
191
193
  if not is_utc_datetime(self.created_at):
192
194
  self.created_at = self.created_at.replace(tzinfo=timezone.utc)
193
195
  json_message["created_at"] = self.created_at.isoformat()
196
+ json_message.pop("is_err", None) # make sure we don't include this debugging information
194
197
  return json_message
195
198
 
196
199
  @staticmethod
@@ -204,6 +207,7 @@ class Message(BaseMessage):
204
207
  assistant_message_tool_name: str = DEFAULT_MESSAGE_TOOL,
205
208
  assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
206
209
  reverse: bool = True,
210
+ include_err: Optional[bool] = None,
207
211
  ) -> List[LettaMessage]:
208
212
  if use_assistant_message:
209
213
  message_ids_to_remove = []
@@ -234,6 +238,7 @@ class Message(BaseMessage):
234
238
  assistant_message_tool_name=assistant_message_tool_name,
235
239
  assistant_message_tool_kwarg=assistant_message_tool_kwarg,
236
240
  reverse=reverse,
241
+ include_err=include_err,
237
242
  )
238
243
  ]
239
244
 
@@ -243,6 +248,7 @@ class Message(BaseMessage):
243
248
  assistant_message_tool_name: str = DEFAULT_MESSAGE_TOOL,
244
249
  assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
245
250
  reverse: bool = True,
251
+ include_err: Optional[bool] = None,
246
252
  ) -> List[LettaMessage]:
247
253
  """Convert message object (in DB format) to the style used by the original Letta API"""
248
254
  messages = []
@@ -682,14 +688,13 @@ class Message(BaseMessage):
682
688
  # since the only "parts" we have are for supporting various COT
683
689
 
684
690
  if self.role == "system":
685
- assert all([v is not None for v in [self.role]]), vars(self)
686
691
  openai_message = {
687
692
  "content": text_content,
688
693
  "role": "developer" if use_developer_message else self.role,
689
694
  }
690
695
 
691
696
  elif self.role == "user":
692
- assert all([v is not None for v in [text_content, self.role]]), vars(self)
697
+ assert text_content is not None, vars(self)
693
698
  openai_message = {
694
699
  "content": text_content,
695
700
  "role": self.role,
@@ -720,7 +725,7 @@ class Message(BaseMessage):
720
725
  tool_call_dict["id"] = tool_call_dict["id"][:max_tool_id_length]
721
726
 
722
727
  elif self.role == "tool":
723
- assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
728
+ assert self.tool_call_id is not None, vars(self)
724
729
  openai_message = {
725
730
  "content": text_content,
726
731
  "role": self.role,
@@ -776,7 +781,7 @@ class Message(BaseMessage):
776
781
  if self.role == "system":
777
782
  # NOTE: this is not for system instructions, but instead system "events"
778
783
 
779
- assert all([v is not None for v in [text_content, self.role]]), vars(self)
784
+ assert text_content is not None, vars(self)
780
785
  # Two options here, we would use system.package_system_message,
781
786
  # or use a more Anthropic-specific packaging ie xml tags
782
787
  user_system_event = add_xml_tag(string=f"SYSTEM ALERT: {text_content}", xml_tag="event")
@@ -875,7 +880,7 @@ class Message(BaseMessage):
875
880
 
876
881
  elif self.role == "tool":
877
882
  # NOTE: Anthropic uses role "user" for "tool" responses
878
- assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
883
+ assert self.tool_call_id is not None, vars(self)
879
884
  anthropic_message = {
880
885
  "role": "user", # NOTE: diff
881
886
  "content": [
@@ -988,7 +993,7 @@ class Message(BaseMessage):
988
993
 
989
994
  elif self.role == "tool":
990
995
  # NOTE: Significantly different tool calling format, more similar to function calling format
991
- assert all([v is not None for v in [self.role, self.tool_call_id]]), vars(self)
996
+ assert self.tool_call_id is not None, vars(self)
992
997
 
993
998
  if self.name is None:
994
999
  warnings.warn(f"Couldn't find function name on tool call, defaulting to tool ID instead.")
@@ -106,7 +106,6 @@ class UsageStatistics(BaseModel):
106
106
  completion_tokens_details: Optional[UsageStatisticsCompletionTokenDetails] = None
107
107
 
108
108
  def __add__(self, other: "UsageStatistics") -> "UsageStatistics":
109
-
110
109
  if self.prompt_tokens_details is None and other.prompt_tokens_details is None:
111
110
  total_prompt_tokens_details = None
112
111
  elif self.prompt_tokens_details is None:
@@ -1536,6 +1536,26 @@ class BedrockProvider(Provider):
1536
1536
  provider_category: ProviderCategory = Field(ProviderCategory.base, description="The category of the provider (base or byok)")
1537
1537
  region: str = Field(..., description="AWS region for Bedrock")
1538
1538
 
1539
+ def check_api_key(self):
1540
+ """Check if the Bedrock credentials are valid"""
1541
+ from letta.errors import LLMAuthenticationError
1542
+ from letta.llm_api.aws_bedrock import bedrock_get_model_list
1543
+
1544
+ try:
1545
+ # For BYOK providers, use the custom credentials
1546
+ if self.provider_category == ProviderCategory.byok:
1547
+ # If we can list models, the credentials are valid
1548
+ bedrock_get_model_list(
1549
+ region_name=self.region,
1550
+ access_key_id=self.access_key,
1551
+ secret_access_key=self.api_key, # api_key stores the secret access key
1552
+ )
1553
+ else:
1554
+ # For base providers, use default credentials
1555
+ bedrock_get_model_list(region_name=self.region)
1556
+ except Exception as e:
1557
+ raise LLMAuthenticationError(message=f"Failed to authenticate with Bedrock: {e}")
1558
+
1539
1559
  def list_llm_models(self):
1540
1560
  from letta.llm_api.aws_bedrock import bedrock_get_model_list
1541
1561