letta-nightly 0.8.8.dev20250703104323__py3-none-any.whl → 0.8.9.dev20250703191231__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +6 -1
- letta/agent.py +1 -0
- letta/agents/base_agent.py +8 -2
- letta/agents/ephemeral_summary_agent.py +33 -33
- letta/agents/letta_agent.py +104 -53
- letta/agents/voice_agent.py +2 -1
- letta/constants.py +8 -4
- letta/functions/function_sets/files.py +22 -7
- letta/functions/function_sets/multi_agent.py +34 -0
- letta/functions/types.py +1 -1
- letta/groups/helpers.py +8 -5
- letta/groups/sleeptime_multi_agent_v2.py +20 -15
- letta/interface.py +1 -1
- letta/interfaces/anthropic_streaming_interface.py +15 -8
- letta/interfaces/openai_chat_completions_streaming_interface.py +9 -6
- letta/interfaces/openai_streaming_interface.py +17 -11
- letta/llm_api/openai_client.py +2 -1
- letta/orm/agent.py +1 -0
- letta/orm/file.py +8 -2
- letta/orm/files_agents.py +36 -11
- letta/orm/mcp_server.py +3 -0
- letta/orm/source.py +2 -1
- letta/orm/step.py +3 -0
- letta/prompts/system/memgpt_v2_chat.txt +5 -8
- letta/schemas/agent.py +58 -23
- letta/schemas/embedding_config.py +3 -2
- letta/schemas/enums.py +4 -0
- letta/schemas/file.py +1 -0
- letta/schemas/letta_stop_reason.py +18 -0
- letta/schemas/mcp.py +15 -10
- letta/schemas/memory.py +35 -5
- letta/schemas/providers.py +11 -0
- letta/schemas/step.py +1 -0
- letta/schemas/tool.py +2 -1
- letta/server/rest_api/routers/v1/agents.py +320 -184
- letta/server/rest_api/routers/v1/groups.py +6 -2
- letta/server/rest_api/routers/v1/identities.py +6 -2
- letta/server/rest_api/routers/v1/jobs.py +49 -1
- letta/server/rest_api/routers/v1/sources.py +28 -19
- letta/server/rest_api/routers/v1/steps.py +7 -2
- letta/server/rest_api/routers/v1/tools.py +40 -9
- letta/server/rest_api/streaming_response.py +88 -0
- letta/server/server.py +61 -55
- letta/services/agent_manager.py +28 -16
- letta/services/file_manager.py +58 -9
- letta/services/file_processor/chunker/llama_index_chunker.py +2 -0
- letta/services/file_processor/embedder/openai_embedder.py +54 -10
- letta/services/file_processor/file_processor.py +59 -0
- letta/services/file_processor/parser/mistral_parser.py +2 -0
- letta/services/files_agents_manager.py +120 -2
- letta/services/helpers/agent_manager_helper.py +21 -4
- letta/services/job_manager.py +57 -6
- letta/services/mcp/base_client.py +1 -0
- letta/services/mcp_manager.py +13 -1
- letta/services/step_manager.py +14 -5
- letta/services/summarizer/summarizer.py +6 -22
- letta/services/tool_executor/builtin_tool_executor.py +0 -1
- letta/services/tool_executor/files_tool_executor.py +2 -2
- letta/services/tool_executor/multi_agent_tool_executor.py +23 -0
- letta/services/tool_manager.py +7 -7
- letta/settings.py +11 -2
- letta/templates/summary_request_text.j2 +19 -0
- letta/utils.py +95 -14
- {letta_nightly-0.8.8.dev20250703104323.dist-info → letta_nightly-0.8.9.dev20250703191231.dist-info}/METADATA +2 -2
- {letta_nightly-0.8.8.dev20250703104323.dist-info → letta_nightly-0.8.9.dev20250703191231.dist-info}/RECORD +69 -68
- /letta/{agents/prompts → prompts/system}/summary_system_prompt.txt +0 -0
- {letta_nightly-0.8.8.dev20250703104323.dist-info → letta_nightly-0.8.9.dev20250703191231.dist-info}/LICENSE +0 -0
- {letta_nightly-0.8.8.dev20250703104323.dist-info → letta_nightly-0.8.9.dev20250703191231.dist-info}/WHEEL +0 -0
- {letta_nightly-0.8.8.dev20250703104323.dist-info → letta_nightly-0.8.9.dev20250703191231.dist-info}/entry_points.txt +0 -0
letta/schemas/agent.py
CHANGED
@@ -4,12 +4,7 @@ from typing import Dict, List, Optional
|
|
4
4
|
|
5
5
|
from pydantic import BaseModel, Field, field_validator, model_validator
|
6
6
|
|
7
|
-
from letta.constants import
|
8
|
-
CORE_MEMORY_LINE_NUMBER_WARNING,
|
9
|
-
DEFAULT_EMBEDDING_CHUNK_SIZE,
|
10
|
-
FILE_MEMORY_EMPTY_MESSAGE,
|
11
|
-
FILE_MEMORY_EXISTS_MESSAGE,
|
12
|
-
)
|
7
|
+
from letta.constants import CORE_MEMORY_LINE_NUMBER_WARNING, DEFAULT_EMBEDDING_CHUNK_SIZE
|
13
8
|
from letta.schemas.block import CreateBlock
|
14
9
|
from letta.schemas.embedding_config import EmbeddingConfig
|
15
10
|
from letta.schemas.environment_variables import AgentEnvironmentVariable
|
@@ -319,8 +314,19 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
|
|
319
314
|
# However, they still allow files to be injected into the context
|
320
315
|
if agent_type == AgentType.react_agent or agent_type == AgentType.workflow_agent:
|
321
316
|
return (
|
322
|
-
|
317
|
+
"{% if sources %}"
|
318
|
+
"<directories>\n"
|
319
|
+
"{% for source in sources %}"
|
320
|
+
f'<directory name="{{{{ source.name }}}}">\n'
|
321
|
+
"{% if source.description %}"
|
322
|
+
"<description>{{ source.description }}</description>\n"
|
323
|
+
"{% endif %}"
|
324
|
+
"{% if source.instructions %}"
|
325
|
+
"<instructions>{{ source.instructions }}</instructions>\n"
|
326
|
+
"{% endif %}"
|
327
|
+
"{% if file_blocks %}"
|
323
328
|
"{% for block in file_blocks %}"
|
329
|
+
"{% if block.metadata and block.metadata.get('source_id') == source.id %}"
|
324
330
|
f"<file status=\"{{{{ '{FileStatus.open.value}' if block.value else '{FileStatus.closed.value}' }}}}\">\n"
|
325
331
|
"<{{ block.label }}>\n"
|
326
332
|
"<description>\n"
|
@@ -334,11 +340,14 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
|
|
334
340
|
"<value>\n"
|
335
341
|
"{{ block.value }}\n"
|
336
342
|
"</value>\n"
|
337
|
-
"</{{ block.label }}>\n"
|
338
343
|
"</file>\n"
|
339
|
-
"{%
|
344
|
+
"{% endif %}"
|
345
|
+
"{% endfor %}"
|
346
|
+
"{% endif %}"
|
347
|
+
"</directory>\n"
|
340
348
|
"{% endfor %}"
|
341
|
-
"
|
349
|
+
"</directories>"
|
350
|
+
"{% endif %}"
|
342
351
|
)
|
343
352
|
|
344
353
|
# Sleeptime agents use the MemGPT v2 memory tools (line numbers)
|
@@ -372,10 +381,20 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
|
|
372
381
|
"{{ tool_usage_rules.value }}\n"
|
373
382
|
"</tool_usage_rules>"
|
374
383
|
"{% endif %}"
|
375
|
-
|
384
|
+
"\n\n{% if sources %}"
|
385
|
+
"<directories>\n"
|
386
|
+
"{% for source in sources %}"
|
387
|
+
f'<directory name="{{{{ source.name }}}}">\n'
|
388
|
+
"{% if source.description %}"
|
389
|
+
"<description>{{ source.description }}</description>\n"
|
390
|
+
"{% endif %}"
|
391
|
+
"{% if source.instructions %}"
|
392
|
+
"<instructions>{{ source.instructions }}</instructions>\n"
|
393
|
+
"{% endif %}"
|
394
|
+
"{% if file_blocks %}"
|
376
395
|
"{% for block in file_blocks %}"
|
377
|
-
|
378
|
-
"<{{ block.label }}>\n"
|
396
|
+
"{% if block.metadata and block.metadata.get('source_id') == source.id %}"
|
397
|
+
f"<file status=\"{{{{ '{FileStatus.open.value}' if block.value else '{FileStatus.closed.value}' }}}}\" name=\"{{{{ block.label }}}}\">\n"
|
379
398
|
"{% if block.description %}"
|
380
399
|
"<description>\n"
|
381
400
|
"{{ block.description }}\n"
|
@@ -391,14 +410,17 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
|
|
391
410
|
"{{ block.value }}\n"
|
392
411
|
"</value>\n"
|
393
412
|
"{% endif %}"
|
394
|
-
"</{{ block.label }}>\n"
|
395
413
|
"</file>\n"
|
396
|
-
"{%
|
414
|
+
"{% endif %}"
|
415
|
+
"{% endfor %}"
|
416
|
+
"{% endif %}"
|
417
|
+
"</directory>\n"
|
397
418
|
"{% endfor %}"
|
398
|
-
"
|
419
|
+
"</directories>"
|
420
|
+
"{% endif %}"
|
399
421
|
)
|
400
422
|
|
401
|
-
#
|
423
|
+
# All other agent types use memory blocks
|
402
424
|
else:
|
403
425
|
return (
|
404
426
|
"<memory_blocks>\nThe following memory blocks are currently engaged in your core memory unit:\n\n"
|
@@ -425,10 +447,20 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
|
|
425
447
|
"{{ tool_usage_rules.value }}\n"
|
426
448
|
"</tool_usage_rules>"
|
427
449
|
"{% endif %}"
|
428
|
-
|
450
|
+
"\n\n{% if sources %}"
|
451
|
+
"<directories>\n"
|
452
|
+
"{% for source in sources %}"
|
453
|
+
f'<directory name="{{{{ source.name }}}}">\n'
|
454
|
+
"{% if source.description %}"
|
455
|
+
"<description>{{ source.description }}</description>\n"
|
456
|
+
"{% endif %}"
|
457
|
+
"{% if source.instructions %}"
|
458
|
+
"<instructions>{{ source.instructions }}</instructions>\n"
|
459
|
+
"{% endif %}"
|
460
|
+
"{% if file_blocks %}"
|
429
461
|
"{% for block in file_blocks %}"
|
430
|
-
|
431
|
-
"<{{ block.label }}>\n"
|
462
|
+
"{% if block.metadata and block.metadata.get('source_id') == source.id %}"
|
463
|
+
f"<file status=\"{{{{ '{FileStatus.open.value}' if block.value else '{FileStatus.closed.value}' }}}}\" name=\"{{{{ block.label }}}}\">\n"
|
432
464
|
"{% if block.description %}"
|
433
465
|
"<description>\n"
|
434
466
|
"{{ block.description }}\n"
|
@@ -444,9 +476,12 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
|
|
444
476
|
"{{ block.value }}\n"
|
445
477
|
"</value>\n"
|
446
478
|
"{% endif %}"
|
447
|
-
"</{{ block.label }}>\n"
|
448
479
|
"</file>\n"
|
449
|
-
"{%
|
480
|
+
"{% endif %}"
|
450
481
|
"{% endfor %}"
|
451
|
-
"
|
482
|
+
"{% endif %}"
|
483
|
+
"</directory>\n"
|
484
|
+
"{% endfor %}"
|
485
|
+
"</directories>"
|
486
|
+
"{% endif %}"
|
452
487
|
)
|
@@ -46,6 +46,7 @@ class EmbeddingConfig(BaseModel):
|
|
46
46
|
embedding_dim: int = Field(..., description="The dimension of the embedding.")
|
47
47
|
embedding_chunk_size: Optional[int] = Field(300, description="The chunk size of the embedding.")
|
48
48
|
handle: Optional[str] = Field(None, description="The handle for this config, in the format provider/model-name.")
|
49
|
+
batch_size: int = Field(32, description="The maximum batch size for processing embeddings.")
|
49
50
|
|
50
51
|
# azure only
|
51
52
|
azure_endpoint: Optional[str] = Field(None, description="The Azure endpoint for the model.")
|
@@ -55,7 +56,7 @@ class EmbeddingConfig(BaseModel):
|
|
55
56
|
@classmethod
|
56
57
|
def default_config(cls, model_name: Optional[str] = None, provider: Optional[str] = None):
|
57
58
|
|
58
|
-
if model_name == "text-embedding-ada-002"
|
59
|
+
if model_name == "text-embedding-ada-002" and provider == "openai":
|
59
60
|
return cls(
|
60
61
|
embedding_model="text-embedding-ada-002",
|
61
62
|
embedding_endpoint_type="openai",
|
@@ -63,7 +64,7 @@ class EmbeddingConfig(BaseModel):
|
|
63
64
|
embedding_dim=1536,
|
64
65
|
embedding_chunk_size=300,
|
65
66
|
)
|
66
|
-
if model_name == "text-embedding-3-small" and provider == "openai":
|
67
|
+
if (model_name == "text-embedding-3-small" and provider == "openai") or (not model_name and provider == "openai"):
|
67
68
|
return cls(
|
68
69
|
embedding_model="text-embedding-3-small",
|
69
70
|
embedding_endpoint_type="openai",
|
letta/schemas/enums.py
CHANGED
@@ -54,6 +54,10 @@ class JobStatus(str, Enum):
|
|
54
54
|
cancelled = "cancelled"
|
55
55
|
expired = "expired"
|
56
56
|
|
57
|
+
@property
|
58
|
+
def is_terminal(self):
|
59
|
+
return self in (JobStatus.completed, JobStatus.failed, JobStatus.cancelled, JobStatus.expired)
|
60
|
+
|
57
61
|
|
58
62
|
class AgentStepStatus(str, Enum):
|
59
63
|
"""
|
letta/schemas/file.py
CHANGED
@@ -30,6 +30,7 @@ class FileMetadata(FileMetadataBase):
|
|
30
30
|
organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the document.")
|
31
31
|
source_id: str = Field(..., description="The unique identifier of the source associated with the document.")
|
32
32
|
file_name: Optional[str] = Field(None, description="The name of the file.")
|
33
|
+
original_file_name: Optional[str] = Field(None, description="The original name of the file as uploaded.")
|
33
34
|
file_path: Optional[str] = Field(None, description="The path to the file.")
|
34
35
|
file_type: Optional[str] = Field(None, description="The type of the file (MIME type).")
|
35
36
|
file_size: Optional[int] = Field(None, description="The size of the file in bytes.")
|
@@ -3,6 +3,8 @@ from typing import Literal
|
|
3
3
|
|
4
4
|
from pydantic import BaseModel, Field
|
5
5
|
|
6
|
+
from letta.schemas.enums import JobStatus
|
7
|
+
|
6
8
|
|
7
9
|
class StopReasonType(str, Enum):
|
8
10
|
end_turn = "end_turn"
|
@@ -11,6 +13,22 @@ class StopReasonType(str, Enum):
|
|
11
13
|
max_steps = "max_steps"
|
12
14
|
no_tool_call = "no_tool_call"
|
13
15
|
tool_rule = "tool_rule"
|
16
|
+
cancelled = "cancelled"
|
17
|
+
|
18
|
+
@property
|
19
|
+
def run_status(self) -> JobStatus:
|
20
|
+
if self in (
|
21
|
+
StopReasonType.end_turn,
|
22
|
+
StopReasonType.max_steps,
|
23
|
+
StopReasonType.tool_rule,
|
24
|
+
):
|
25
|
+
return JobStatus.completed
|
26
|
+
elif self in (StopReasonType.error, StopReasonType.invalid_tool_call, StopReasonType.no_tool_call):
|
27
|
+
return JobStatus.failed
|
28
|
+
elif self == StopReasonType.cancelled:
|
29
|
+
return JobStatus.cancelled
|
30
|
+
else:
|
31
|
+
raise ValueError("Unknown StopReasonType")
|
14
32
|
|
15
33
|
|
16
34
|
class LettaStopReason(BaseModel):
|
letta/schemas/mcp.py
CHANGED
@@ -19,12 +19,13 @@ class BaseMCPServer(LettaBase):
|
|
19
19
|
|
20
20
|
class MCPServer(BaseMCPServer):
|
21
21
|
id: str = BaseMCPServer.generate_id_field()
|
22
|
-
server_type: MCPServerType = MCPServerType.
|
22
|
+
server_type: MCPServerType = MCPServerType.STREAMABLE_HTTP
|
23
23
|
server_name: str = Field(..., description="The name of the server")
|
24
24
|
|
25
|
-
# sse config
|
26
|
-
server_url: Optional[str] = Field(None, description="The URL of the server (MCP SSE client will connect to this URL)")
|
27
|
-
token: Optional[str] = Field(None, description="The access token or API key for the MCP server (used for
|
25
|
+
# sse / streamable http config
|
26
|
+
server_url: Optional[str] = Field(None, description="The URL of the server (MCP SSE/Streamable HTTP client will connect to this URL)")
|
27
|
+
token: Optional[str] = Field(None, description="The access token or API key for the MCP server (used for authentication)")
|
28
|
+
custom_headers: Optional[Dict[str, str]] = Field(None, description="Custom authentication headers as key-value pairs")
|
28
29
|
|
29
30
|
# stdio config
|
30
31
|
stdio_config: Optional[StdioServerConfig] = Field(
|
@@ -43,9 +44,9 @@ class MCPServer(BaseMCPServer):
|
|
43
44
|
return SSEServerConfig(
|
44
45
|
server_name=self.server_name,
|
45
46
|
server_url=self.server_url,
|
46
|
-
auth_header=MCP_AUTH_HEADER_AUTHORIZATION if self.token else None,
|
47
|
-
auth_token=f"{MCP_AUTH_TOKEN_BEARER_PREFIX} {self.token}" if self.token else None,
|
48
|
-
custom_headers=
|
47
|
+
auth_header=MCP_AUTH_HEADER_AUTHORIZATION if self.token and not self.custom_headers else None,
|
48
|
+
auth_token=f"{MCP_AUTH_TOKEN_BEARER_PREFIX} {self.token}" if self.token and not self.custom_headers else None,
|
49
|
+
custom_headers=self.custom_headers,
|
49
50
|
)
|
50
51
|
elif self.server_type == MCPServerType.STDIO:
|
51
52
|
if self.stdio_config is None:
|
@@ -57,9 +58,9 @@ class MCPServer(BaseMCPServer):
|
|
57
58
|
return StreamableHTTPServerConfig(
|
58
59
|
server_name=self.server_name,
|
59
60
|
server_url=self.server_url,
|
60
|
-
auth_header=MCP_AUTH_HEADER_AUTHORIZATION if self.token else None,
|
61
|
-
auth_token=f"{MCP_AUTH_TOKEN_BEARER_PREFIX} {self.token}" if self.token else None,
|
62
|
-
custom_headers=
|
61
|
+
auth_header=MCP_AUTH_HEADER_AUTHORIZATION if self.token and not self.custom_headers else None,
|
62
|
+
auth_token=f"{MCP_AUTH_TOKEN_BEARER_PREFIX} {self.token}" if self.token and not self.custom_headers else None,
|
63
|
+
custom_headers=self.custom_headers,
|
63
64
|
)
|
64
65
|
else:
|
65
66
|
raise ValueError(f"Unsupported server type: {self.server_type}")
|
@@ -70,6 +71,7 @@ class RegisterSSEMCPServer(LettaBase):
|
|
70
71
|
server_type: MCPServerType = MCPServerType.SSE
|
71
72
|
server_url: str = Field(..., description="The URL of the server (MCP SSE client will connect to this URL)")
|
72
73
|
token: Optional[str] = Field(None, description="The access token or API key for the MCP server used for authentication")
|
74
|
+
custom_headers: Optional[Dict[str, str]] = Field(None, description="Custom authentication headers as key-value pairs")
|
73
75
|
|
74
76
|
|
75
77
|
class RegisterStdioMCPServer(LettaBase):
|
@@ -84,6 +86,7 @@ class RegisterStreamableHTTPMCPServer(LettaBase):
|
|
84
86
|
server_url: str = Field(..., description="The URL path for the streamable HTTP server (e.g., 'example/mcp')")
|
85
87
|
auth_header: Optional[str] = Field(None, description="The name of the authentication header (e.g., 'Authorization')")
|
86
88
|
auth_token: Optional[str] = Field(None, description="The authentication token or API key value")
|
89
|
+
custom_headers: Optional[Dict[str, str]] = Field(None, description="Custom authentication headers as key-value pairs")
|
87
90
|
|
88
91
|
|
89
92
|
class UpdateSSEMCPServer(LettaBase):
|
@@ -92,6 +95,7 @@ class UpdateSSEMCPServer(LettaBase):
|
|
92
95
|
server_name: Optional[str] = Field(None, description="The name of the server")
|
93
96
|
server_url: Optional[str] = Field(None, description="The URL of the server (MCP SSE client will connect to this URL)")
|
94
97
|
token: Optional[str] = Field(None, description="The access token or API key for the MCP server (used for SSE authentication)")
|
98
|
+
custom_headers: Optional[Dict[str, str]] = Field(None, description="Custom authentication headers as key-value pairs")
|
95
99
|
|
96
100
|
|
97
101
|
class UpdateStdioMCPServer(LettaBase):
|
@@ -110,6 +114,7 @@ class UpdateStreamableHTTPMCPServer(LettaBase):
|
|
110
114
|
server_url: Optional[str] = Field(None, description="The URL path for the streamable HTTP server (e.g., 'example/mcp')")
|
111
115
|
auth_header: Optional[str] = Field(None, description="The name of the authentication header (e.g., 'Authorization')")
|
112
116
|
auth_token: Optional[str] = Field(None, description="The authentication token or API key value")
|
117
|
+
custom_headers: Optional[Dict[str, str]] = Field(None, description="Custom authentication headers as key-value pairs")
|
113
118
|
|
114
119
|
|
115
120
|
UpdateMCPServer = Union[UpdateSSEMCPServer, UpdateStdioMCPServer, UpdateStreamableHTTPMCPServer]
|
letta/schemas/memory.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
|
+
import logging
|
1
2
|
from typing import TYPE_CHECKING, List, Optional
|
2
3
|
|
3
4
|
from jinja2 import Template, TemplateSyntaxError
|
4
|
-
from pydantic import BaseModel, Field
|
5
|
+
from pydantic import BaseModel, Field, field_validator
|
5
6
|
|
6
7
|
# Forward referencing to avoid circular import with Agent -> Memory -> Agent
|
7
8
|
if TYPE_CHECKING:
|
@@ -69,6 +70,30 @@ class Memory(BaseModel, validate_assignment=True):
|
|
69
70
|
default_factory=list, description="Blocks representing the agent's in-context memory of an attached file"
|
70
71
|
)
|
71
72
|
|
73
|
+
@field_validator("file_blocks")
|
74
|
+
@classmethod
|
75
|
+
def validate_file_blocks_no_duplicates(cls, v: List[Block]) -> List[Block]:
|
76
|
+
"""Validate that file_blocks don't contain duplicate labels, log warnings and remove duplicates."""
|
77
|
+
if not v:
|
78
|
+
return v
|
79
|
+
|
80
|
+
seen_labels = set()
|
81
|
+
unique_blocks = []
|
82
|
+
duplicate_labels = []
|
83
|
+
|
84
|
+
for block in v:
|
85
|
+
if block.label in seen_labels:
|
86
|
+
duplicate_labels.append(block.label)
|
87
|
+
else:
|
88
|
+
seen_labels.add(block.label)
|
89
|
+
unique_blocks.append(block)
|
90
|
+
|
91
|
+
if duplicate_labels:
|
92
|
+
logger = logging.getLogger(__name__)
|
93
|
+
logger.warning(f"Duplicate block labels found in file_blocks: {duplicate_labels}. Removing duplicates.")
|
94
|
+
|
95
|
+
return unique_blocks
|
96
|
+
|
72
97
|
# Memory.template is a Jinja2 template for compiling memory module into a prompt string.
|
73
98
|
prompt_template: str = Field(
|
74
99
|
default="{% for block in blocks %}"
|
@@ -99,7 +124,7 @@ class Memory(BaseModel, validate_assignment=True):
|
|
99
124
|
Template(prompt_template)
|
100
125
|
|
101
126
|
# Validate compatibility with current memory structure
|
102
|
-
Template(prompt_template).render(blocks=self.blocks, file_blocks=self.file_blocks)
|
127
|
+
Template(prompt_template).render(blocks=self.blocks, file_blocks=self.file_blocks, sources=[])
|
103
128
|
|
104
129
|
# If we get here, the template is valid and compatible
|
105
130
|
self.prompt_template = prompt_template
|
@@ -108,10 +133,15 @@ class Memory(BaseModel, validate_assignment=True):
|
|
108
133
|
except Exception as e:
|
109
134
|
raise ValueError(f"Prompt template is not compatible with current memory structure: {str(e)}")
|
110
135
|
|
111
|
-
def compile(self, tool_usage_rules=None) -> str:
|
136
|
+
def compile(self, tool_usage_rules=None, sources=None) -> str:
|
112
137
|
"""Generate a string representation of the memory in-context using the Jinja2 template"""
|
113
|
-
|
114
|
-
|
138
|
+
try:
|
139
|
+
template = Template(self.prompt_template)
|
140
|
+
return template.render(blocks=self.blocks, file_blocks=self.file_blocks, tool_usage_rules=tool_usage_rules, sources=sources)
|
141
|
+
except TemplateSyntaxError as e:
|
142
|
+
raise ValueError(f"Invalid Jinja2 template syntax: {str(e)}")
|
143
|
+
except Exception as e:
|
144
|
+
raise ValueError(f"Prompt template is not compatible with current memory structure: {str(e)}")
|
115
145
|
|
116
146
|
def list_block_labels(self) -> List[str]:
|
117
147
|
"""Return a list of the block names held inside the memory object"""
|
letta/schemas/providers.py
CHANGED
@@ -181,6 +181,7 @@ class LettaProvider(Provider):
|
|
181
181
|
embedding_dim=1024,
|
182
182
|
embedding_chunk_size=300,
|
183
183
|
handle=self.get_handle("letta-free", is_embedding=True),
|
184
|
+
batch_size=32,
|
184
185
|
)
|
185
186
|
]
|
186
187
|
|
@@ -301,6 +302,7 @@ class OpenAIProvider(Provider):
|
|
301
302
|
if self.base_url == "https://api.openai.com/v1":
|
302
303
|
allowed_types = ["gpt-4", "o1", "o3", "o4"]
|
303
304
|
# NOTE: o1-mini and o1-preview do not support tool calling
|
305
|
+
# NOTE: o1-mini does not support system messages
|
304
306
|
# NOTE: o1-pro is only available in Responses API
|
305
307
|
disallowed_types = ["transcribe", "search", "realtime", "tts", "audio", "computer", "o1-mini", "o1-preview", "o1-pro"]
|
306
308
|
skip = True
|
@@ -352,6 +354,7 @@ class OpenAIProvider(Provider):
|
|
352
354
|
embedding_dim=1536,
|
353
355
|
embedding_chunk_size=300,
|
354
356
|
handle=self.get_handle("text-embedding-ada-002", is_embedding=True),
|
357
|
+
batch_size=1024,
|
355
358
|
),
|
356
359
|
EmbeddingConfig(
|
357
360
|
embedding_model="text-embedding-3-small",
|
@@ -360,6 +363,7 @@ class OpenAIProvider(Provider):
|
|
360
363
|
embedding_dim=2000,
|
361
364
|
embedding_chunk_size=300,
|
362
365
|
handle=self.get_handle("text-embedding-3-small", is_embedding=True),
|
366
|
+
batch_size=1024,
|
363
367
|
),
|
364
368
|
EmbeddingConfig(
|
365
369
|
embedding_model="text-embedding-3-large",
|
@@ -368,6 +372,7 @@ class OpenAIProvider(Provider):
|
|
368
372
|
embedding_dim=2000,
|
369
373
|
embedding_chunk_size=300,
|
370
374
|
handle=self.get_handle("text-embedding-3-large", is_embedding=True),
|
375
|
+
batch_size=1024,
|
371
376
|
),
|
372
377
|
]
|
373
378
|
|
@@ -387,6 +392,7 @@ class OpenAIProvider(Provider):
|
|
387
392
|
embedding_dim=1536,
|
388
393
|
embedding_chunk_size=300,
|
389
394
|
handle=self.get_handle("text-embedding-ada-002", is_embedding=True),
|
395
|
+
batch_size=1024,
|
390
396
|
),
|
391
397
|
EmbeddingConfig(
|
392
398
|
embedding_model="text-embedding-3-small",
|
@@ -395,6 +401,7 @@ class OpenAIProvider(Provider):
|
|
395
401
|
embedding_dim=2000,
|
396
402
|
embedding_chunk_size=300,
|
397
403
|
handle=self.get_handle("text-embedding-3-small", is_embedding=True),
|
404
|
+
batch_size=1024,
|
398
405
|
),
|
399
406
|
EmbeddingConfig(
|
400
407
|
embedding_model="text-embedding-3-large",
|
@@ -403,6 +410,7 @@ class OpenAIProvider(Provider):
|
|
403
410
|
embedding_dim=2000,
|
404
411
|
embedding_chunk_size=300,
|
405
412
|
handle=self.get_handle("text-embedding-3-large", is_embedding=True),
|
413
|
+
batch_size=1024,
|
406
414
|
),
|
407
415
|
]
|
408
416
|
|
@@ -1301,6 +1309,7 @@ class GoogleAIProvider(Provider):
|
|
1301
1309
|
embedding_dim=768,
|
1302
1310
|
embedding_chunk_size=300, # NOTE: max is 2048
|
1303
1311
|
handle=self.get_handle(model, is_embedding=True),
|
1312
|
+
batch_size=1024,
|
1304
1313
|
)
|
1305
1314
|
)
|
1306
1315
|
return configs
|
@@ -1360,6 +1369,7 @@ class GoogleVertexProvider(Provider):
|
|
1360
1369
|
embedding_dim=dim,
|
1361
1370
|
embedding_chunk_size=300, # NOTE: max is 2048
|
1362
1371
|
handle=self.get_handle(model, is_embedding=True),
|
1372
|
+
batch_size=1024,
|
1363
1373
|
)
|
1364
1374
|
)
|
1365
1375
|
return configs
|
@@ -1424,6 +1434,7 @@ class AzureProvider(Provider):
|
|
1424
1434
|
embedding_dim=768,
|
1425
1435
|
embedding_chunk_size=300, # NOTE: max is 2048
|
1426
1436
|
handle=self.get_handle(model_name),
|
1437
|
+
batch_size=1024,
|
1427
1438
|
),
|
1428
1439
|
)
|
1429
1440
|
return configs
|
letta/schemas/step.py
CHANGED
@@ -35,3 +35,4 @@ class Step(StepBase):
|
|
35
35
|
feedback: Optional[Literal["positive", "negative"]] = Field(
|
36
36
|
None, description="The feedback for this step. Must be either 'positive' or 'negative'."
|
37
37
|
)
|
38
|
+
project_id: Optional[str] = Field(None, description="The project that the agent that executed this step belongs to (cloud only).")
|
letta/schemas/tool.py
CHANGED
@@ -91,7 +91,8 @@ class Tool(BaseTool):
|
|
91
91
|
description=description,
|
92
92
|
append_heartbeat=False,
|
93
93
|
)
|
94
|
-
else:
|
94
|
+
else: # elif not self.json_schema: # TODO: JSON schema is not being derived correctly the first time?
|
95
|
+
# If there's not a json_schema provided, then we need to re-derive
|
95
96
|
try:
|
96
97
|
self.json_schema = derive_openai_json_schema(source_code=self.source_code)
|
97
98
|
except Exception as e:
|