letta-nightly 0.11.7.dev20250917104122__py3-none-any.whl → 0.11.7.dev20250918104055__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/agent.py +3 -3
- letta/agents/agent_loop.py +2 -1
- letta/agents/base_agent.py +1 -1
- letta/agents/letta_agent_v2.py +3 -3
- letta/agents/temporal/activities/__init__.py +4 -0
- letta/agents/temporal/activities/example_activity.py +7 -0
- letta/agents/temporal/activities/prepare_messages.py +10 -0
- letta/agents/temporal/temporal_agent_workflow.py +56 -0
- letta/agents/temporal/types.py +25 -0
- letta/agents/voice_agent.py +2 -2
- letta/orm/agent.py +4 -3
- letta/prompts/prompt_generator.py +4 -4
- letta/schemas/agent.py +4 -193
- letta/schemas/enums.py +15 -0
- letta/schemas/memory.py +216 -103
- letta/schemas/step.py +5 -1
- letta/schemas/tool_rule.py +34 -44
- letta/server/rest_api/routers/v1/steps.py +29 -0
- letta/server/server.py +2 -2
- letta/services/agent_manager.py +4 -6
- letta/services/helpers/agent_manager_helper.py +4 -4
- letta/services/step_manager.py +26 -0
- letta/services/summarizer/summarizer.py +25 -3
- letta/services/tool_executor/sandbox_tool_executor.py +2 -2
- letta/services/tool_sandbox/base.py +135 -8
- letta/settings.py +2 -2
- {letta_nightly-0.11.7.dev20250917104122.dist-info → letta_nightly-0.11.7.dev20250918104055.dist-info}/METADATA +2 -2
- {letta_nightly-0.11.7.dev20250917104122.dist-info → letta_nightly-0.11.7.dev20250918104055.dist-info}/RECORD +31 -27
- letta/templates/template_helper.py +0 -53
- {letta_nightly-0.11.7.dev20250917104122.dist-info → letta_nightly-0.11.7.dev20250918104055.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.7.dev20250917104122.dist-info → letta_nightly-0.11.7.dev20250918104055.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.11.7.dev20250917104122.dist-info → letta_nightly-0.11.7.dev20250918104055.dist-info}/licenses/LICENSE +0 -0
letta/schemas/memory.py
CHANGED
@@ -1,20 +1,17 @@
|
|
1
1
|
import asyncio
|
2
2
|
import logging
|
3
3
|
from datetime import datetime
|
4
|
-
from
|
5
|
-
|
6
|
-
from jinja2 import Template, TemplateSyntaxError
|
7
|
-
from pydantic import BaseModel, Field, field_validator
|
8
|
-
|
9
|
-
# Forward referencing to avoid circular import with Agent -> Memory -> Agent
|
10
|
-
if TYPE_CHECKING:
|
11
|
-
pass
|
4
|
+
from io import StringIO
|
5
|
+
from typing import TYPE_CHECKING, List, Optional, Union
|
12
6
|
|
13
7
|
from openai.types.beta.function_tool import FunctionTool as OpenAITool
|
8
|
+
from pydantic import BaseModel, Field, field_validator
|
14
9
|
|
15
|
-
from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT
|
10
|
+
from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT, CORE_MEMORY_LINE_NUMBER_WARNING
|
16
11
|
from letta.otel.tracing import trace_method
|
17
12
|
from letta.schemas.block import Block, FileBlock
|
13
|
+
from letta.schemas.enums import AgentType
|
14
|
+
from letta.schemas.file import FileStatus
|
18
15
|
from letta.schemas.message import Message
|
19
16
|
|
20
17
|
|
@@ -23,12 +20,9 @@ class ContextWindowOverview(BaseModel):
|
|
23
20
|
Overview of the context window, including the number of messages and tokens.
|
24
21
|
"""
|
25
22
|
|
26
|
-
# top-level information
|
27
23
|
context_window_size_max: int = Field(..., description="The maximum amount of tokens the context window can hold.")
|
28
24
|
context_window_size_current: int = Field(..., description="The current number of tokens in the context window.")
|
29
25
|
|
30
|
-
# context window breakdown (in messages)
|
31
|
-
# (technically not in the context window, but useful to know)
|
32
26
|
num_messages: int = Field(..., description="The number of messages in the context window.")
|
33
27
|
num_archival_memory: int = Field(..., description="The number of messages in the archival memory.")
|
34
28
|
num_recall_memory: int = Field(..., description="The number of messages in the recall memory.")
|
@@ -39,9 +33,6 @@ class ContextWindowOverview(BaseModel):
|
|
39
33
|
..., description="The metadata summary of the external memory sources (archival + recall metadata)."
|
40
34
|
)
|
41
35
|
|
42
|
-
# context window breakdown (in tokens)
|
43
|
-
# this should all add up to context_window_size_current
|
44
|
-
|
45
36
|
num_tokens_system: int = Field(..., description="The number of tokens in the system prompt.")
|
46
37
|
system_prompt: str = Field(..., description="The content of the system prompt.")
|
47
38
|
|
@@ -55,8 +46,6 @@ class ContextWindowOverview(BaseModel):
|
|
55
46
|
functions_definitions: Optional[List[OpenAITool]] = Field(..., description="The content of the functions definitions.")
|
56
47
|
|
57
48
|
num_tokens_messages: int = Field(..., description="The number of tokens in the messages list.")
|
58
|
-
# TODO make list of messages?
|
59
|
-
# messages: List[dict] = Field(..., description="The messages in the context window.")
|
60
49
|
messages: List[Message] = Field(..., description="The messages in the context window.")
|
61
50
|
|
62
51
|
|
@@ -67,7 +56,7 @@ class Memory(BaseModel, validate_assignment=True):
|
|
67
56
|
|
68
57
|
"""
|
69
58
|
|
70
|
-
|
59
|
+
agent_type: Optional[Union["AgentType", str]] = Field(None, description="Agent type controlling prompt rendering.")
|
71
60
|
blocks: List[Block] = Field(..., description="Memory blocks contained in the agent's in-context memory")
|
72
61
|
file_blocks: List[FileBlock] = Field(
|
73
62
|
default_factory=list, description="Special blocks representing the agent's in-context memory of an attached file"
|
@@ -97,111 +86,238 @@ class Memory(BaseModel, validate_assignment=True):
|
|
97
86
|
|
98
87
|
return unique_blocks
|
99
88
|
|
100
|
-
|
101
|
-
prompt_template: str = Field(
|
102
|
-
default="{% for block in blocks %}"
|
103
|
-
"<{{ block.label }}>\n"
|
104
|
-
"<metadata>"
|
105
|
-
'read_only="{{ block.read_only}}" chars_current="{{ block.value|length }}" chars_limit="{{ block.limit }}"'
|
106
|
-
"</metadata>"
|
107
|
-
"<value>"
|
108
|
-
"{{ block.value }}\n"
|
109
|
-
"</value>"
|
110
|
-
"</{{ block.label }}>\n"
|
111
|
-
"{% if not loop.last %}\n{% endif %}"
|
112
|
-
"{% endfor %}",
|
113
|
-
description="Jinja2 template for compiling memory blocks into a prompt string",
|
114
|
-
)
|
89
|
+
prompt_template: str = Field(default="", description="Deprecated. Ignored for performance.")
|
115
90
|
|
116
91
|
def get_prompt_template(self) -> str:
|
117
|
-
"""Return the
|
92
|
+
"""Return the stored (deprecated) prompt template string."""
|
118
93
|
return str(self.prompt_template)
|
119
94
|
|
120
95
|
@trace_method
|
121
96
|
def set_prompt_template(self, prompt_template: str):
|
122
|
-
"""
|
123
|
-
|
124
|
-
Validates the template syntax and compatibility with current memory structure.
|
125
|
-
"""
|
126
|
-
try:
|
127
|
-
# Validate Jinja2 syntax
|
128
|
-
Template(prompt_template)
|
129
|
-
|
130
|
-
# Validate compatibility with current memory structure
|
131
|
-
Template(prompt_template).render(blocks=self.blocks, file_blocks=self.file_blocks, sources=[], max_files_open=None)
|
132
|
-
|
133
|
-
# If we get here, the template is valid and compatible
|
134
|
-
self.prompt_template = prompt_template
|
135
|
-
except TemplateSyntaxError as e:
|
136
|
-
raise ValueError(f"Invalid Jinja2 template syntax: {str(e)}")
|
137
|
-
except Exception as e:
|
138
|
-
raise ValueError(f"Prompt template is not compatible with current memory structure: {str(e)}")
|
97
|
+
"""Deprecated. Stores the provided string but is not used for rendering."""
|
98
|
+
self.prompt_template = prompt_template
|
139
99
|
|
140
100
|
@trace_method
|
141
101
|
async def set_prompt_template_async(self, prompt_template: str):
|
142
|
-
"""
|
143
|
-
|
144
|
-
"""
|
145
|
-
try:
|
146
|
-
# Validate Jinja2 syntax with async enabled
|
147
|
-
Template(prompt_template)
|
148
|
-
|
149
|
-
# Validate compatibility with current memory structure - use async rendering
|
150
|
-
template = Template(prompt_template)
|
151
|
-
await asyncio.to_thread(template.render, blocks=self.blocks, file_blocks=self.file_blocks, sources=[], max_files_open=None)
|
152
|
-
|
153
|
-
# If we get here, the template is valid and compatible
|
154
|
-
self.prompt_template = prompt_template
|
155
|
-
except TemplateSyntaxError as e:
|
156
|
-
raise ValueError(f"Invalid Jinja2 template syntax: {str(e)}")
|
157
|
-
except Exception as e:
|
158
|
-
raise ValueError(f"Prompt template is not compatible with current memory structure: {str(e)}")
|
102
|
+
"""Deprecated. Async setter that stores the string but does not validate or use it."""
|
103
|
+
self.prompt_template = prompt_template
|
159
104
|
|
160
105
|
@trace_method
|
106
|
+
def _render_memory_blocks_standard(self, s: StringIO):
|
107
|
+
if len(self.blocks) == 0:
|
108
|
+
# s.write("<memory_blocks></memory_blocks>") # TODO: consider empty tags
|
109
|
+
s.write("")
|
110
|
+
return
|
111
|
+
|
112
|
+
s.write("<memory_blocks>\nThe following memory blocks are currently engaged in your core memory unit:\n\n")
|
113
|
+
for idx, block in enumerate(self.blocks):
|
114
|
+
label = block.label or "block"
|
115
|
+
value = block.value or ""
|
116
|
+
desc = block.description or ""
|
117
|
+
chars_current = len(value)
|
118
|
+
limit = block.limit if block.limit is not None else 0
|
119
|
+
|
120
|
+
s.write(f"<{label}>\n")
|
121
|
+
s.write("<description>\n")
|
122
|
+
s.write(f"{desc}\n")
|
123
|
+
s.write("</description>\n")
|
124
|
+
s.write("<metadata>")
|
125
|
+
if getattr(block, "read_only", False):
|
126
|
+
s.write("\n- read_only=true")
|
127
|
+
s.write(f"\n- chars_current={chars_current}")
|
128
|
+
s.write(f"\n- chars_limit={limit}\n")
|
129
|
+
s.write("</metadata>\n")
|
130
|
+
s.write("<value>\n")
|
131
|
+
s.write(f"{value}\n")
|
132
|
+
s.write("</value>\n")
|
133
|
+
s.write(f"</{label}>\n")
|
134
|
+
if idx != len(self.blocks) - 1:
|
135
|
+
s.write("\n")
|
136
|
+
s.write("\n</memory_blocks>")
|
137
|
+
|
138
|
+
def _render_memory_blocks_line_numbered(self, s: StringIO):
|
139
|
+
s.write("<memory_blocks>\nThe following memory blocks are currently engaged in your core memory unit:\n\n")
|
140
|
+
for idx, block in enumerate(self.blocks):
|
141
|
+
label = block.label or "block"
|
142
|
+
value = block.value or ""
|
143
|
+
desc = block.description or ""
|
144
|
+
limit = block.limit if block.limit is not None else 0
|
145
|
+
|
146
|
+
s.write(f"<{label}>\n")
|
147
|
+
s.write("<description>\n")
|
148
|
+
s.write(f"{desc}\n")
|
149
|
+
s.write("</description>\n")
|
150
|
+
s.write("<metadata>")
|
151
|
+
if getattr(block, "read_only", False):
|
152
|
+
s.write("\n- read_only=true")
|
153
|
+
s.write(f"\n- chars_current={len(value)}")
|
154
|
+
s.write(f"\n- chars_limit={limit}\n")
|
155
|
+
s.write("</metadata>\n")
|
156
|
+
s.write("<value>\n")
|
157
|
+
s.write(f"{CORE_MEMORY_LINE_NUMBER_WARNING}\n")
|
158
|
+
if value:
|
159
|
+
for i, line in enumerate(value.split("\n"), start=1):
|
160
|
+
s.write(f"Line {i}: {line}\n")
|
161
|
+
s.write("</value>\n")
|
162
|
+
s.write(f"</{label}>\n")
|
163
|
+
if idx != len(self.blocks) - 1:
|
164
|
+
s.write("\n")
|
165
|
+
s.write("\n</memory_blocks>")
|
166
|
+
|
167
|
+
def _render_directories_common(self, s: StringIO, sources, max_files_open):
|
168
|
+
s.write("\n\n<directories>\n")
|
169
|
+
if max_files_open is not None:
|
170
|
+
current_open = sum(1 for b in self.file_blocks if getattr(b, "value", None))
|
171
|
+
s.write("<file_limits>\n")
|
172
|
+
s.write(f"- current_files_open={current_open}\n")
|
173
|
+
s.write(f"- max_files_open={max_files_open}\n")
|
174
|
+
s.write("</file_limits>\n")
|
175
|
+
|
176
|
+
for source in sources:
|
177
|
+
source_name = getattr(source, "name", "")
|
178
|
+
source_desc = getattr(source, "description", None)
|
179
|
+
source_instr = getattr(source, "instructions", None)
|
180
|
+
source_id = getattr(source, "id", None)
|
181
|
+
|
182
|
+
s.write(f'<directory name="{source_name}">\n')
|
183
|
+
if source_desc:
|
184
|
+
s.write(f"<description>{source_desc}</description>\n")
|
185
|
+
if source_instr:
|
186
|
+
s.write(f"<instructions>{source_instr}</instructions>\n")
|
187
|
+
|
188
|
+
if self.file_blocks:
|
189
|
+
for fb in self.file_blocks:
|
190
|
+
if source_id is not None and getattr(fb, "source_id", None) == source_id:
|
191
|
+
status = FileStatus.open.value if getattr(fb, "value", None) else FileStatus.closed.value
|
192
|
+
label = fb.label or "file"
|
193
|
+
desc = fb.description or ""
|
194
|
+
chars_current = len(fb.value or "")
|
195
|
+
limit = fb.limit if fb.limit is not None else 0
|
196
|
+
|
197
|
+
s.write(f'<file status="{status}" name="{label}">\n')
|
198
|
+
if desc:
|
199
|
+
s.write("<description>\n")
|
200
|
+
s.write(f"{desc}\n")
|
201
|
+
s.write("</description>\n")
|
202
|
+
s.write("<metadata>")
|
203
|
+
if getattr(fb, "read_only", False):
|
204
|
+
s.write("\n- read_only=true")
|
205
|
+
s.write(f"\n- chars_current={chars_current}\n")
|
206
|
+
s.write(f"- chars_limit={limit}\n")
|
207
|
+
s.write("</metadata>\n")
|
208
|
+
if getattr(fb, "value", None):
|
209
|
+
s.write("<value>\n")
|
210
|
+
s.write(f"{fb.value}\n")
|
211
|
+
s.write("</value>\n")
|
212
|
+
s.write("</file>\n")
|
213
|
+
|
214
|
+
s.write("</directory>\n")
|
215
|
+
s.write("</directories>")
|
216
|
+
|
217
|
+
def _render_directories_react(self, s: StringIO, sources, max_files_open):
|
218
|
+
s.write("\n\n<directories>\n")
|
219
|
+
if max_files_open is not None:
|
220
|
+
current_open = sum(1 for b in self.file_blocks if getattr(b, "value", None))
|
221
|
+
s.write("<file_limits>\n")
|
222
|
+
s.write(f"- current_files_open={current_open}\n")
|
223
|
+
s.write(f"- max_files_open={max_files_open}\n")
|
224
|
+
s.write("</file_limits>\n")
|
225
|
+
|
226
|
+
for source in sources:
|
227
|
+
source_name = getattr(source, "name", "")
|
228
|
+
source_desc = getattr(source, "description", None)
|
229
|
+
source_instr = getattr(source, "instructions", None)
|
230
|
+
source_id = getattr(source, "id", None)
|
231
|
+
|
232
|
+
s.write(f'<directory name="{source_name}">\n')
|
233
|
+
if source_desc:
|
234
|
+
s.write(f"<description>{source_desc}</description>\n")
|
235
|
+
if source_instr:
|
236
|
+
s.write(f"<instructions>{source_instr}</instructions>\n")
|
237
|
+
|
238
|
+
if self.file_blocks:
|
239
|
+
for fb in self.file_blocks:
|
240
|
+
if source_id is not None and getattr(fb, "source_id", None) == source_id:
|
241
|
+
status = FileStatus.open.value if getattr(fb, "value", None) else FileStatus.closed.value
|
242
|
+
label = fb.label or "file"
|
243
|
+
desc = fb.description or ""
|
244
|
+
chars_current = len(fb.value or "")
|
245
|
+
limit = fb.limit if fb.limit is not None else 0
|
246
|
+
|
247
|
+
s.write(f'<file status="{status}">\n')
|
248
|
+
s.write(f"<{label}>\n")
|
249
|
+
s.write("<description>\n")
|
250
|
+
s.write(f"{desc}\n")
|
251
|
+
s.write("</description>\n")
|
252
|
+
s.write("<metadata>")
|
253
|
+
if getattr(fb, "read_only", False):
|
254
|
+
s.write("\n- read_only=true")
|
255
|
+
s.write(f"\n- chars_current={chars_current}\n")
|
256
|
+
s.write(f"- chars_limit={limit}\n")
|
257
|
+
s.write("</metadata>\n")
|
258
|
+
s.write("<value>\n")
|
259
|
+
s.write(f"{fb.value or ''}\n")
|
260
|
+
s.write("</value>\n")
|
261
|
+
s.write(f"</{label}>\n")
|
262
|
+
s.write("</file>\n")
|
263
|
+
|
264
|
+
s.write("</directory>\n")
|
265
|
+
s.write("</directories>")
|
266
|
+
|
161
267
|
def compile(self, tool_usage_rules=None, sources=None, max_files_open=None) -> str:
|
162
|
-
"""
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
268
|
+
"""Efficiently render memory, tool rules, and sources into a prompt string."""
|
269
|
+
s = StringIO()
|
270
|
+
|
271
|
+
raw_type = self.agent_type.value if hasattr(self.agent_type, "value") else (self.agent_type or "")
|
272
|
+
norm_type = raw_type.lower()
|
273
|
+
is_react = norm_type in ("react_agent", "workflow_agent")
|
274
|
+
is_line_numbered = norm_type in ("sleeptime_agent", "memgpt_v2_agent")
|
275
|
+
|
276
|
+
# Memory blocks (not for react/workflow). Always include wrapper for preview/tests.
|
277
|
+
if not is_react:
|
278
|
+
if is_line_numbered:
|
279
|
+
self._render_memory_blocks_line_numbered(s)
|
280
|
+
else:
|
281
|
+
self._render_memory_blocks_standard(s)
|
282
|
+
|
283
|
+
if tool_usage_rules is not None:
|
284
|
+
desc = getattr(tool_usage_rules, "description", None) or ""
|
285
|
+
val = getattr(tool_usage_rules, "value", None) or ""
|
286
|
+
s.write("\n\n<tool_usage_rules>\n")
|
287
|
+
s.write(f"{desc}\n\n")
|
288
|
+
s.write(f"{val}\n")
|
289
|
+
s.write("</tool_usage_rules>")
|
290
|
+
|
291
|
+
if sources:
|
292
|
+
if is_react:
|
293
|
+
self._render_directories_react(s, sources, max_files_open)
|
294
|
+
else:
|
295
|
+
self._render_directories_common(s, sources, max_files_open)
|
296
|
+
|
297
|
+
return s.getvalue()
|
176
298
|
|
177
299
|
@trace_method
|
178
300
|
async def compile_async(self, tool_usage_rules=None, sources=None, max_files_open=None) -> str:
|
179
|
-
"""Async version
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
sources=sources,
|
187
|
-
max_files_open=max_files_open,
|
188
|
-
)
|
189
|
-
except TemplateSyntaxError as e:
|
190
|
-
raise ValueError(f"Invalid Jinja2 template syntax: {str(e)}")
|
191
|
-
except Exception as e:
|
192
|
-
raise ValueError(f"Prompt template is not compatible with current memory structure: {str(e)}")
|
301
|
+
"""Async version that offloads to a thread for CPU-bound string building."""
|
302
|
+
return await asyncio.to_thread(
|
303
|
+
self.compile,
|
304
|
+
tool_usage_rules=tool_usage_rules,
|
305
|
+
sources=sources,
|
306
|
+
max_files_open=max_files_open,
|
307
|
+
)
|
193
308
|
|
194
309
|
@trace_method
|
195
310
|
async def compile_in_thread_async(self, tool_usage_rules=None, sources=None, max_files_open=None) -> str:
|
196
|
-
"""
|
197
|
-
|
311
|
+
"""Deprecated: use compile() instead."""
|
312
|
+
import warnings
|
313
|
+
|
314
|
+
warnings.warn("compile_in_thread_async is deprecated; use compile()", DeprecationWarning, stacklevel=2)
|
315
|
+
return self.compile(tool_usage_rules=tool_usage_rules, sources=sources, max_files_open=max_files_open)
|
198
316
|
|
199
317
|
def list_block_labels(self) -> List[str]:
|
200
318
|
"""Return a list of the block names held inside the memory object"""
|
201
|
-
# return list(self.memory.keys())
|
202
319
|
return [block.label for block in self.blocks]
|
203
320
|
|
204
|
-
# TODO: these should actually be label, not name
|
205
321
|
def get_block(self, label: str) -> Block:
|
206
322
|
"""Correct way to index into the memory.memory field, returns a Block"""
|
207
323
|
keys = []
|
@@ -213,7 +329,6 @@ class Memory(BaseModel, validate_assignment=True):
|
|
213
329
|
|
214
330
|
def get_blocks(self) -> List[Block]:
|
215
331
|
"""Return a list of the blocks held inside the memory object"""
|
216
|
-
# return list(self.memory.values())
|
217
332
|
return self.blocks
|
218
333
|
|
219
334
|
def set_block(self, block: Block):
|
@@ -236,7 +351,6 @@ class Memory(BaseModel, validate_assignment=True):
|
|
236
351
|
raise ValueError(f"Block with label {label} does not exist")
|
237
352
|
|
238
353
|
|
239
|
-
# TODO: ideally this is refactored into ChatMemory and the subclasses are given more specific names.
|
240
354
|
class BasicBlockMemory(Memory):
|
241
355
|
"""
|
242
356
|
BasicBlockMemory is a basic implemention of the Memory class, which takes in a list of blocks and links them to the memory object. These are editable by the agent via the core memory functions.
|
@@ -308,7 +422,6 @@ class ChatMemory(BasicBlockMemory):
|
|
308
422
|
human (str): The starter value for the human block.
|
309
423
|
limit (int): The character limit for each block.
|
310
424
|
"""
|
311
|
-
# TODO: Should these be CreateBlocks?
|
312
425
|
super().__init__(blocks=[Block(value=persona, limit=limit, label="persona"), Block(value=human, limit=limit, label="human")])
|
313
426
|
|
314
427
|
|
letta/schemas/step.py
CHANGED
@@ -35,7 +35,11 @@ class Step(StepBase):
|
|
35
35
|
tags: List[str] = Field([], description="Metadata tags.")
|
36
36
|
tid: Optional[str] = Field(None, description="The unique identifier of the transaction that processed this step.")
|
37
37
|
trace_id: Optional[str] = Field(None, description="The trace id of the agent step.")
|
38
|
-
messages: List[Message] = Field(
|
38
|
+
messages: List[Message] = Field(
|
39
|
+
[],
|
40
|
+
description="The messages generated during this step. Deprecated: use `GET /v1/steps/{step_id}/messages` endpoint instead",
|
41
|
+
deprecated=True,
|
42
|
+
)
|
39
43
|
feedback: Optional[Literal["positive", "negative"]] = Field(
|
40
44
|
None, description="The feedback for this step. Must be either 'positive' or 'negative'."
|
41
45
|
)
|
letta/schemas/tool_rule.py
CHANGED
@@ -2,7 +2,6 @@ import json
|
|
2
2
|
import logging
|
3
3
|
from typing import Annotated, Any, Dict, List, Literal, Optional, Set, Union
|
4
4
|
|
5
|
-
from jinja2 import Template
|
6
5
|
from pydantic import Field, field_validator
|
7
6
|
|
8
7
|
from letta.schemas.enums import ToolRuleType
|
@@ -17,7 +16,7 @@ class BaseToolRule(LettaBase):
|
|
17
16
|
type: ToolRuleType = Field(..., description="The type of the message.")
|
18
17
|
prompt_template: Optional[str] = Field(
|
19
18
|
None,
|
20
|
-
description="Optional
|
19
|
+
description="Optional template string (ignored). Rendering uses fast built-in formatting for performance.",
|
21
20
|
)
|
22
21
|
|
23
22
|
def __hash__(self):
|
@@ -34,22 +33,8 @@ class BaseToolRule(LettaBase):
|
|
34
33
|
raise NotImplementedError
|
35
34
|
|
36
35
|
def render_prompt(self) -> str | None:
|
37
|
-
"""
|
38
|
-
|
39
|
-
return None
|
40
|
-
|
41
|
-
try:
|
42
|
-
template = Template(self.prompt_template)
|
43
|
-
return template.render(**self.model_dump())
|
44
|
-
except Exception as e:
|
45
|
-
logger.warning(
|
46
|
-
"Failed to render prompt template for tool rule '%s' (type: %s). Template: '%s'. Error: %s",
|
47
|
-
self.tool_name,
|
48
|
-
self.type,
|
49
|
-
self.prompt_template,
|
50
|
-
e,
|
51
|
-
)
|
52
|
-
return None
|
36
|
+
"""Default implementation returns None. Subclasses provide optimized strings."""
|
37
|
+
return None
|
53
38
|
|
54
39
|
|
55
40
|
class ChildToolRule(BaseToolRule):
|
@@ -60,8 +45,8 @@ class ChildToolRule(BaseToolRule):
|
|
60
45
|
type: Literal[ToolRuleType.constrain_child_tools] = ToolRuleType.constrain_child_tools
|
61
46
|
children: List[str] = Field(..., description="The children tools that can be invoked.")
|
62
47
|
prompt_template: Optional[str] = Field(
|
63
|
-
default=
|
64
|
-
description="Optional
|
48
|
+
default=None,
|
49
|
+
description="Optional template string (ignored).",
|
65
50
|
)
|
66
51
|
|
67
52
|
def __hash__(self):
|
@@ -78,6 +63,10 @@ class ChildToolRule(BaseToolRule):
|
|
78
63
|
last_tool = tool_call_history[-1] if tool_call_history else None
|
79
64
|
return set(self.children) if last_tool == self.tool_name else available_tools
|
80
65
|
|
66
|
+
def render_prompt(self) -> str | None:
|
67
|
+
children_str = ", ".join(self.children)
|
68
|
+
return f"<tool_rule>\nAfter using {self.tool_name}, you must use one of these tools: {children_str}\n</tool_rule>"
|
69
|
+
|
81
70
|
|
82
71
|
class ParentToolRule(BaseToolRule):
|
83
72
|
"""
|
@@ -86,10 +75,7 @@ class ParentToolRule(BaseToolRule):
|
|
86
75
|
|
87
76
|
type: Literal[ToolRuleType.parent_last_tool] = ToolRuleType.parent_last_tool
|
88
77
|
children: List[str] = Field(..., description="The children tools that can be invoked.")
|
89
|
-
prompt_template: Optional[str] = Field(
|
90
|
-
default="<tool_rule>\n{{ children | join(', ') }} can only be used after {{ tool_name }}\n</tool_rule>",
|
91
|
-
description="Optional Jinja2 template for generating agent prompt about this tool rule.",
|
92
|
-
)
|
78
|
+
prompt_template: Optional[str] = Field(default=None, description="Optional template string (ignored).")
|
93
79
|
|
94
80
|
def __hash__(self):
|
95
81
|
"""Hash including children list (sorted for consistency)."""
|
@@ -105,6 +91,10 @@ class ParentToolRule(BaseToolRule):
|
|
105
91
|
last_tool = tool_call_history[-1] if tool_call_history else None
|
106
92
|
return set(self.children) if last_tool == self.tool_name else available_tools - set(self.children)
|
107
93
|
|
94
|
+
def render_prompt(self) -> str | None:
|
95
|
+
children_str = ", ".join(self.children)
|
96
|
+
return f"<tool_rule>\n{children_str} can only be used after {self.tool_name}\n</tool_rule>"
|
97
|
+
|
108
98
|
|
109
99
|
class ConditionalToolRule(BaseToolRule):
|
110
100
|
"""
|
@@ -115,10 +105,7 @@ class ConditionalToolRule(BaseToolRule):
|
|
115
105
|
default_child: Optional[str] = Field(None, description="The default child tool to be called. If None, any tool can be called.")
|
116
106
|
child_output_mapping: Dict[Any, str] = Field(..., description="The output case to check for mapping")
|
117
107
|
require_output_mapping: bool = Field(default=False, description="Whether to throw an error when output doesn't match any case")
|
118
|
-
prompt_template: Optional[str] = Field(
|
119
|
-
default="<tool_rule>\n{{ tool_name }} will determine which tool to use next based on its output\n</tool_rule>",
|
120
|
-
description="Optional Jinja2 template for generating agent prompt about this tool rule.",
|
121
|
-
)
|
108
|
+
prompt_template: Optional[str] = Field(default=None, description="Optional template string (ignored).")
|
122
109
|
|
123
110
|
def __hash__(self):
|
124
111
|
"""Hash including all configuration fields."""
|
@@ -165,6 +152,9 @@ class ConditionalToolRule(BaseToolRule):
|
|
165
152
|
|
166
153
|
return {self.default_child} if self.default_child else available_tools
|
167
154
|
|
155
|
+
def render_prompt(self) -> str | None:
|
156
|
+
return f"<tool_rule>\n{self.tool_name} will determine which tool to use next based on its output\n</tool_rule>"
|
157
|
+
|
168
158
|
@field_validator("child_output_mapping")
|
169
159
|
@classmethod
|
170
160
|
def validate_child_output_mapping(cls, v):
|
@@ -205,10 +195,10 @@ class TerminalToolRule(BaseToolRule):
|
|
205
195
|
"""
|
206
196
|
|
207
197
|
type: Literal[ToolRuleType.exit_loop] = ToolRuleType.exit_loop
|
208
|
-
prompt_template: Optional[str] = Field(
|
209
|
-
|
210
|
-
|
211
|
-
|
198
|
+
prompt_template: Optional[str] = Field(default=None, description="Optional template string (ignored).")
|
199
|
+
|
200
|
+
def render_prompt(self) -> str | None:
|
201
|
+
return f"<tool_rule>\n{self.tool_name} ends your response (yields control) when called\n</tool_rule>"
|
212
202
|
|
213
203
|
|
214
204
|
class ContinueToolRule(BaseToolRule):
|
@@ -217,10 +207,10 @@ class ContinueToolRule(BaseToolRule):
|
|
217
207
|
"""
|
218
208
|
|
219
209
|
type: Literal[ToolRuleType.continue_loop] = ToolRuleType.continue_loop
|
220
|
-
prompt_template: Optional[str] = Field(
|
221
|
-
|
222
|
-
|
223
|
-
|
210
|
+
prompt_template: Optional[str] = Field(default=None, description="Optional template string (ignored).")
|
211
|
+
|
212
|
+
def render_prompt(self) -> str | None:
|
213
|
+
return f"<tool_rule>\n{self.tool_name} requires continuing your response when called\n</tool_rule>"
|
224
214
|
|
225
215
|
|
226
216
|
class RequiredBeforeExitToolRule(BaseToolRule):
|
@@ -229,15 +219,15 @@ class RequiredBeforeExitToolRule(BaseToolRule):
|
|
229
219
|
"""
|
230
220
|
|
231
221
|
type: Literal[ToolRuleType.required_before_exit] = ToolRuleType.required_before_exit
|
232
|
-
prompt_template: Optional[str] = Field(
|
233
|
-
default="<tool_rule>{{ tool_name }} must be called before ending the conversation</tool_rule>",
|
234
|
-
description="Optional Jinja2 template for generating agent prompt about this tool rule.",
|
235
|
-
)
|
222
|
+
prompt_template: Optional[str] = Field(default=None, description="Optional template string (ignored).")
|
236
223
|
|
237
224
|
def get_valid_tools(self, tool_call_history: List[str], available_tools: Set[str], last_function_response: Optional[str]) -> Set[str]:
|
238
225
|
"""Returns all available tools - the logic for preventing exit is handled elsewhere."""
|
239
226
|
return available_tools
|
240
227
|
|
228
|
+
def render_prompt(self) -> str | None:
|
229
|
+
return f"<tool_rule>{self.tool_name} must be called before ending the conversation</tool_rule>"
|
230
|
+
|
241
231
|
|
242
232
|
class MaxCountPerStepToolRule(BaseToolRule):
|
243
233
|
"""
|
@@ -246,10 +236,7 @@ class MaxCountPerStepToolRule(BaseToolRule):
|
|
246
236
|
|
247
237
|
type: Literal[ToolRuleType.max_count_per_step] = ToolRuleType.max_count_per_step
|
248
238
|
max_count_limit: int = Field(..., description="The max limit for the total number of times this tool can be invoked in a single step.")
|
249
|
-
prompt_template: Optional[str] = Field(
|
250
|
-
default="<tool_rule>\n{{ tool_name }}: at most {{ max_count_limit }} use(s) per response\n</tool_rule>",
|
251
|
-
description="Optional Jinja2 template for generating agent prompt about this tool rule.",
|
252
|
-
)
|
239
|
+
prompt_template: Optional[str] = Field(default=None, description="Optional template string (ignored).")
|
253
240
|
|
254
241
|
def __hash__(self):
|
255
242
|
"""Hash including max_count_limit."""
|
@@ -271,6 +258,9 @@ class MaxCountPerStepToolRule(BaseToolRule):
|
|
271
258
|
|
272
259
|
return available_tools
|
273
260
|
|
261
|
+
def render_prompt(self) -> str | None:
|
262
|
+
return f"<tool_rule>\n{self.tool_name}: at most {self.max_count_limit} use(s) per response\n</tool_rule>"
|
263
|
+
|
274
264
|
|
275
265
|
class RequiresApprovalToolRule(BaseToolRule):
|
276
266
|
"""
|
@@ -5,6 +5,8 @@ from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query
|
|
5
5
|
from pydantic import BaseModel, Field
|
6
6
|
|
7
7
|
from letta.orm.errors import NoResultFound
|
8
|
+
from letta.schemas.letta_message import LettaMessageUnion
|
9
|
+
from letta.schemas.message import Message
|
8
10
|
from letta.schemas.provider_trace import ProviderTrace
|
9
11
|
from letta.schemas.step import Step
|
10
12
|
from letta.schemas.step_metrics import StepMetrics
|
@@ -138,6 +140,33 @@ async def modify_feedback_for_step(
|
|
138
140
|
raise HTTPException(status_code=404, detail="Step not found")
|
139
141
|
|
140
142
|
|
143
|
+
@router.get("/{step_id}/messages", response_model=List[LettaMessageUnion], operation_id="list_messages_for_step")
|
144
|
+
async def list_messages_for_step(
|
145
|
+
step_id: str,
|
146
|
+
headers: HeaderParams = Depends(get_headers),
|
147
|
+
server: SyncServer = Depends(get_letta_server),
|
148
|
+
before: Optional[str] = Query(
|
149
|
+
None, description="Message ID cursor for pagination. Returns messages that come before this message ID in the specified sort order"
|
150
|
+
),
|
151
|
+
after: Optional[str] = Query(
|
152
|
+
None, description="Message ID cursor for pagination. Returns messages that come after this message ID in the specified sort order"
|
153
|
+
),
|
154
|
+
limit: Optional[int] = Query(100, description="Maximum number of messages to return"),
|
155
|
+
order: Literal["asc", "desc"] = Query(
|
156
|
+
"asc", description="Sort order for messages by creation time. 'asc' for oldest first, 'desc' for newest first"
|
157
|
+
),
|
158
|
+
order_by: Literal["created_at"] = Query("created_at", description="Sort by field"),
|
159
|
+
):
|
160
|
+
"""
|
161
|
+
List messages for a given step.
|
162
|
+
"""
|
163
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
|
164
|
+
messages = await server.step_manager.list_step_messages_async(
|
165
|
+
step_id=step_id, actor=actor, before=before, after=after, limit=limit, ascending=(order == "asc")
|
166
|
+
)
|
167
|
+
return Message.to_letta_messages_from_list(messages)
|
168
|
+
|
169
|
+
|
141
170
|
@router.patch("/{step_id}/transaction/{transaction_id}", response_model=Step, operation_id="update_step_transaction_id")
|
142
171
|
async def update_step_transaction_id(
|
143
172
|
step_id: str,
|
letta/server/server.py
CHANGED
@@ -38,12 +38,12 @@ from letta.log import get_logger
|
|
38
38
|
from letta.orm.errors import NoResultFound
|
39
39
|
from letta.otel.tracing import log_event, trace_method
|
40
40
|
from letta.prompts.gpt_system import get_system_text
|
41
|
-
from letta.schemas.agent import AgentState,
|
41
|
+
from letta.schemas.agent import AgentState, CreateAgent, UpdateAgent
|
42
42
|
from letta.schemas.block import Block, BlockUpdate, CreateBlock
|
43
43
|
from letta.schemas.embedding_config import EmbeddingConfig
|
44
44
|
|
45
45
|
# openai schemas
|
46
|
-
from letta.schemas.enums import JobStatus, MessageStreamStatus, ProviderCategory, ProviderType, SandboxType, ToolSourceType
|
46
|
+
from letta.schemas.enums import AgentType, JobStatus, MessageStreamStatus, ProviderCategory, ProviderType, SandboxType, ToolSourceType
|
47
47
|
from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate
|
48
48
|
from letta.schemas.group import GroupCreate, ManagerType, SleeptimeManager, VoiceSleeptimeManager
|
49
49
|
from letta.schemas.job import Job, JobUpdate
|