letta-nightly 0.11.0.dev20250807104511__py3-none-any.whl → 0.11.0.dev20250808055434__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/agent.py +2 -1
- letta/agents/letta_agent.py +215 -143
- letta/functions/function_sets/base.py +2 -2
- letta/functions/function_sets/files.py +22 -9
- letta/interfaces/anthropic_streaming_interface.py +291 -265
- letta/interfaces/openai_streaming_interface.py +270 -250
- letta/llm_api/anthropic.py +3 -10
- letta/llm_api/openai_client.py +6 -1
- letta/orm/__init__.py +1 -0
- letta/orm/step.py +14 -0
- letta/orm/step_metrics.py +71 -0
- letta/schemas/enums.py +9 -0
- letta/schemas/llm_config.py +8 -6
- letta/schemas/providers/lmstudio.py +2 -2
- letta/schemas/providers/openai.py +1 -1
- letta/schemas/step.py +6 -0
- letta/schemas/step_metrics.py +23 -0
- letta/schemas/tool_rule.py +10 -29
- letta/services/step_manager.py +179 -1
- letta/services/tool_executor/builtin_tool_executor.py +4 -1
- letta/services/tool_executor/core_tool_executor.py +2 -10
- letta/services/tool_executor/files_tool_executor.py +89 -40
- {letta_nightly-0.11.0.dev20250807104511.dist-info → letta_nightly-0.11.0.dev20250808055434.dist-info}/METADATA +1 -1
- {letta_nightly-0.11.0.dev20250807104511.dist-info → letta_nightly-0.11.0.dev20250808055434.dist-info}/RECORD +27 -25
- {letta_nightly-0.11.0.dev20250807104511.dist-info → letta_nightly-0.11.0.dev20250808055434.dist-info}/LICENSE +0 -0
- {letta_nightly-0.11.0.dev20250807104511.dist-info → letta_nightly-0.11.0.dev20250808055434.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.0.dev20250807104511.dist-info → letta_nightly-0.11.0.dev20250808055434.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,71 @@
|
|
1
|
+
from typing import TYPE_CHECKING, Optional
|
2
|
+
|
3
|
+
from sqlalchemy import BigInteger, ForeignKey, String
|
4
|
+
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
5
|
+
|
6
|
+
from letta.orm.mixins import AgentMixin, ProjectMixin
|
7
|
+
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
8
|
+
from letta.schemas.step_metrics import StepMetrics as PydanticStepMetrics
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from letta.orm.agent import Agent
|
12
|
+
from letta.orm.job import Job
|
13
|
+
from letta.orm.step import Step
|
14
|
+
|
15
|
+
|
16
|
+
class StepMetrics(SqlalchemyBase, ProjectMixin, AgentMixin):
|
17
|
+
"""Tracks performance metrics for agent steps."""
|
18
|
+
|
19
|
+
__tablename__ = "step_metrics"
|
20
|
+
__pydantic_model__ = PydanticStepMetrics
|
21
|
+
|
22
|
+
id: Mapped[str] = mapped_column(
|
23
|
+
ForeignKey("steps.id", ondelete="CASCADE"),
|
24
|
+
primary_key=True,
|
25
|
+
doc="The unique identifier of the step this metric belongs to (also serves as PK)",
|
26
|
+
)
|
27
|
+
organization_id: Mapped[str] = mapped_column(
|
28
|
+
ForeignKey("organizations.id", ondelete="RESTRICT"),
|
29
|
+
nullable=True,
|
30
|
+
doc="The unique identifier of the organization",
|
31
|
+
)
|
32
|
+
provider_id: Mapped[Optional[str]] = mapped_column(
|
33
|
+
ForeignKey("providers.id", ondelete="RESTRICT"),
|
34
|
+
nullable=True,
|
35
|
+
doc="The unique identifier of the provider",
|
36
|
+
)
|
37
|
+
job_id: Mapped[Optional[str]] = mapped_column(
|
38
|
+
ForeignKey("jobs.id", ondelete="SET NULL"),
|
39
|
+
nullable=True,
|
40
|
+
doc="The unique identifier of the job",
|
41
|
+
)
|
42
|
+
llm_request_ns: Mapped[Optional[int]] = mapped_column(
|
43
|
+
BigInteger,
|
44
|
+
nullable=True,
|
45
|
+
doc="Time spent on the LLM request in nanoseconds",
|
46
|
+
)
|
47
|
+
tool_execution_ns: Mapped[Optional[int]] = mapped_column(
|
48
|
+
BigInteger,
|
49
|
+
nullable=True,
|
50
|
+
doc="Time spent on tool execution in nanoseconds",
|
51
|
+
)
|
52
|
+
step_ns: Mapped[Optional[int]] = mapped_column(
|
53
|
+
BigInteger,
|
54
|
+
nullable=True,
|
55
|
+
doc="Total time for the step in nanoseconds",
|
56
|
+
)
|
57
|
+
base_template_id: Mapped[Optional[str]] = mapped_column(
|
58
|
+
String,
|
59
|
+
nullable=True,
|
60
|
+
doc="The base template ID for the step",
|
61
|
+
)
|
62
|
+
template_id: Mapped[Optional[str]] = mapped_column(
|
63
|
+
String,
|
64
|
+
nullable=True,
|
65
|
+
doc="The template ID for the step",
|
66
|
+
)
|
67
|
+
|
68
|
+
# Relationships (foreign keys)
|
69
|
+
step: Mapped["Step"] = relationship("Step", back_populates="metrics", uselist=False)
|
70
|
+
job: Mapped[Optional["Job"]] = relationship("Job")
|
71
|
+
agent: Mapped[Optional["Agent"]] = relationship("Agent")
|
letta/schemas/enums.py
CHANGED
@@ -160,3 +160,12 @@ class SandboxType(str, Enum):
|
|
160
160
|
E2B = "e2b"
|
161
161
|
MODAL = "modal"
|
162
162
|
LOCAL = "local"
|
163
|
+
|
164
|
+
|
165
|
+
class StepStatus(str, Enum):
|
166
|
+
"""Status of a step execution"""
|
167
|
+
|
168
|
+
PENDING = "pending"
|
169
|
+
SUCCESS = "success"
|
170
|
+
FAILED = "failed"
|
171
|
+
CANCELLED = "cancelled"
|
letta/schemas/llm_config.py
CHANGED
@@ -58,7 +58,7 @@ class LLMConfig(BaseModel):
|
|
58
58
|
enable_reasoner: bool = Field(
|
59
59
|
False, description="Whether or not the model should use extended thinking if it is a 'reasoning' style model"
|
60
60
|
)
|
61
|
-
reasoning_effort: Optional[Literal["low", "medium", "high"]] = Field(
|
61
|
+
reasoning_effort: Optional[Literal["minimal", "low", "medium", "high"]] = Field(
|
62
62
|
None,
|
63
63
|
description="The reasoning effort to use when generating text reasoning models",
|
64
64
|
)
|
@@ -188,6 +188,8 @@ class LLMConfig(BaseModel):
|
|
188
188
|
@classmethod
|
189
189
|
def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool):
|
190
190
|
if reasoning:
|
191
|
+
config.enable_reasoner = True
|
192
|
+
|
191
193
|
if (
|
192
194
|
config.model_endpoint_type == "anthropic"
|
193
195
|
and ("claude-opus-4" in config.model or "claude-sonnet-4" in config.model or "claude-3-7-sonnet" in config.model)
|
@@ -195,19 +197,19 @@ class LLMConfig(BaseModel):
|
|
195
197
|
config.model_endpoint_type == "google_vertex" and ("gemini-2.5-flash" in config.model or "gemini-2.0-pro" in config.model)
|
196
198
|
):
|
197
199
|
config.put_inner_thoughts_in_kwargs = False
|
198
|
-
config.enable_reasoner = True
|
199
200
|
if config.max_reasoning_tokens == 0:
|
200
201
|
config.max_reasoning_tokens = 1024
|
201
202
|
elif config.model_endpoint_type == "openai" and (
|
202
203
|
config.model.startswith("o1") or config.model.startswith("o3") or config.model.startswith("o4")
|
203
204
|
):
|
204
|
-
config.put_inner_thoughts_in_kwargs =
|
205
|
-
config.enable_reasoner = True
|
205
|
+
config.put_inner_thoughts_in_kwargs = False
|
206
206
|
if config.reasoning_effort is None:
|
207
207
|
config.reasoning_effort = "medium"
|
208
208
|
else:
|
209
209
|
config.put_inner_thoughts_in_kwargs = True
|
210
|
-
|
210
|
+
|
211
211
|
else:
|
212
|
-
config.put_inner_thoughts_in_kwargs = False
|
213
212
|
config.enable_reasoner = False
|
213
|
+
config.put_inner_thoughts_in_kwargs = False
|
214
|
+
|
215
|
+
return config
|
@@ -55,7 +55,7 @@ class LMStudioOpenAIProvider(OpenAIProvider):
|
|
55
55
|
LLMConfig(
|
56
56
|
model=model_name,
|
57
57
|
model_endpoint_type="openai",
|
58
|
-
model_endpoint=self.
|
58
|
+
model_endpoint=self.model_endpoint_url,
|
59
59
|
context_window=context_window_size,
|
60
60
|
handle=self.get_handle(model_name),
|
61
61
|
compatibility_type=compatibility_type,
|
@@ -94,7 +94,7 @@ class LMStudioOpenAIProvider(OpenAIProvider):
|
|
94
94
|
EmbeddingConfig(
|
95
95
|
embedding_model=model_name,
|
96
96
|
embedding_endpoint_type="openai",
|
97
|
-
embedding_endpoint=self.
|
97
|
+
embedding_endpoint=self.model_endpoint_url,
|
98
98
|
embedding_dim=768, # Default embedding dimension, not context window
|
99
99
|
embedding_chunk_size=DEFAULT_EMBEDDING_CHUNK_SIZE, # NOTE: max is 2048
|
100
100
|
handle=self.get_handle(model_name),
|
@@ -11,7 +11,7 @@ from letta.schemas.providers.base import Provider
|
|
11
11
|
|
12
12
|
logger = get_logger(__name__)
|
13
13
|
|
14
|
-
ALLOWED_PREFIXES = {"gpt-4", "o1", "o3", "o4"}
|
14
|
+
ALLOWED_PREFIXES = {"gpt-4", "gpt-5", "o1", "o3", "o4"}
|
15
15
|
DISALLOWED_KEYWORDS = {"transcribe", "search", "realtime", "tts", "audio", "computer", "o1-mini", "o1-preview", "o1-pro"}
|
16
16
|
DEFAULT_EMBEDDING_BATCH_SIZE = 1024
|
17
17
|
|
letta/schemas/step.py
CHANGED
@@ -3,6 +3,7 @@ from typing import Dict, List, Literal, Optional
|
|
3
3
|
|
4
4
|
from pydantic import Field
|
5
5
|
|
6
|
+
from letta.schemas.enums import StepStatus
|
6
7
|
from letta.schemas.letta_base import LettaBase
|
7
8
|
from letta.schemas.letta_stop_reason import StopReasonType
|
8
9
|
from letta.schemas.message import Message
|
@@ -40,6 +41,11 @@ class Step(StepBase):
|
|
40
41
|
)
|
41
42
|
project_id: Optional[str] = Field(None, description="The project that the agent that executed this step belongs to (cloud only).")
|
42
43
|
|
44
|
+
# error tracking fields
|
45
|
+
error_type: Optional[str] = Field(None, description="The type/class of the error that occurred")
|
46
|
+
error_data: Optional[Dict] = Field(None, description="Error details including message, traceback, and additional context")
|
47
|
+
status: Optional[StepStatus] = Field(StepStatus.PENDING, description="Step status: pending, success, or failed")
|
48
|
+
|
43
49
|
|
44
50
|
class StepProgression(int, Enum):
|
45
51
|
START = auto()
|
@@ -0,0 +1,23 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
from pydantic import Field
|
4
|
+
|
5
|
+
from letta.schemas.letta_base import LettaBase
|
6
|
+
|
7
|
+
|
8
|
+
class StepMetricsBase(LettaBase):
|
9
|
+
__id_prefix__ = "step"
|
10
|
+
|
11
|
+
|
12
|
+
class StepMetrics(StepMetricsBase):
|
13
|
+
id: str = Field(..., description="The id of the step this metric belongs to (matches steps.id).")
|
14
|
+
organization_id: Optional[str] = Field(None, description="The unique identifier of the organization.")
|
15
|
+
provider_id: Optional[str] = Field(None, description="The unique identifier of the provider.")
|
16
|
+
job_id: Optional[str] = Field(None, description="The unique identifier of the job.")
|
17
|
+
agent_id: Optional[str] = Field(None, description="The unique identifier of the agent.")
|
18
|
+
llm_request_ns: Optional[int] = Field(None, description="Time spent on LLM requests in nanoseconds.")
|
19
|
+
tool_execution_ns: Optional[int] = Field(None, description="Time spent on tool execution in nanoseconds.")
|
20
|
+
step_ns: Optional[int] = Field(None, description="Total time for the step in nanoseconds.")
|
21
|
+
base_template_id: Optional[str] = Field(None, description="The base template ID that the step belongs to (cloud only).")
|
22
|
+
template_id: Optional[str] = Field(None, description="The template ID that the step belongs to (cloud only).")
|
23
|
+
project_id: Optional[str] = Field(None, description="The project that the step belongs to (cloud only).")
|
letta/schemas/tool_rule.py
CHANGED
@@ -23,26 +23,24 @@ class BaseToolRule(LettaBase):
|
|
23
23
|
def get_valid_tools(self, tool_call_history: List[str], available_tools: Set[str], last_function_response: Optional[str]) -> set[str]:
|
24
24
|
raise NotImplementedError
|
25
25
|
|
26
|
-
def render_prompt(self) ->
|
26
|
+
def render_prompt(self) -> str | None:
|
27
27
|
"""Render the prompt template with this rule's attributes."""
|
28
|
-
|
29
|
-
if not template_to_use:
|
28
|
+
if not self.prompt_template:
|
30
29
|
return None
|
31
30
|
|
32
31
|
try:
|
33
|
-
template = Template(
|
32
|
+
template = Template(self.prompt_template)
|
34
33
|
return template.render(**self.model_dump())
|
35
34
|
except Exception as e:
|
36
35
|
logger.warning(
|
37
|
-
|
38
|
-
|
36
|
+
"Failed to render prompt template for tool rule '%s' (type: %s). Template: '%s'. Error: %s",
|
37
|
+
self.tool_name,
|
38
|
+
self.type,
|
39
|
+
self.prompt_template,
|
40
|
+
e,
|
39
41
|
)
|
40
42
|
return None
|
41
43
|
|
42
|
-
def _get_default_template(self) -> Optional[str]:
|
43
|
-
"""Get the default template for this rule type. Override in subclasses."""
|
44
|
-
return None
|
45
|
-
|
46
44
|
|
47
45
|
class ChildToolRule(BaseToolRule):
|
48
46
|
"""
|
@@ -60,9 +58,6 @@ class ChildToolRule(BaseToolRule):
|
|
60
58
|
last_tool = tool_call_history[-1] if tool_call_history else None
|
61
59
|
return set(self.children) if last_tool == self.tool_name else available_tools
|
62
60
|
|
63
|
-
def _get_default_template(self) -> Optional[str]:
|
64
|
-
return "<tool_rule>\nAfter using {{ tool_name }}, you must use one of these tools: {{ children | join(', ') }}\n</tool_rule>"
|
65
|
-
|
66
61
|
|
67
62
|
class ParentToolRule(BaseToolRule):
|
68
63
|
"""
|
@@ -80,9 +75,6 @@ class ParentToolRule(BaseToolRule):
|
|
80
75
|
last_tool = tool_call_history[-1] if tool_call_history else None
|
81
76
|
return set(self.children) if last_tool == self.tool_name else available_tools - set(self.children)
|
82
77
|
|
83
|
-
def _get_default_template(self) -> Optional[str]:
|
84
|
-
return "<tool_rule>\n{{ children | join(', ') }} can only be used after {{ tool_name }}\n</tool_rule>"
|
85
|
-
|
86
78
|
|
87
79
|
class ConditionalToolRule(BaseToolRule):
|
88
80
|
"""
|
@@ -125,7 +117,8 @@ class ConditionalToolRule(BaseToolRule):
|
|
125
117
|
|
126
118
|
return {self.default_child} if self.default_child else available_tools
|
127
119
|
|
128
|
-
|
120
|
+
@staticmethod
|
121
|
+
def _matches_key(function_output: str, key: Any) -> bool:
|
129
122
|
"""Helper function to determine if function output matches a mapping key."""
|
130
123
|
if isinstance(key, bool):
|
131
124
|
return function_output.lower() == "true" if key else function_output.lower() == "false"
|
@@ -142,9 +135,6 @@ class ConditionalToolRule(BaseToolRule):
|
|
142
135
|
else: # Assume string
|
143
136
|
return str(function_output) == str(key)
|
144
137
|
|
145
|
-
def _get_default_template(self) -> Optional[str]:
|
146
|
-
return "<tool_rule>\n{{ tool_name }} will determine which tool to use next based on its output\n</tool_rule>"
|
147
|
-
|
148
138
|
|
149
139
|
class InitToolRule(BaseToolRule):
|
150
140
|
"""
|
@@ -165,9 +155,6 @@ class TerminalToolRule(BaseToolRule):
|
|
165
155
|
description="Optional Jinja2 template for generating agent prompt about this tool rule.",
|
166
156
|
)
|
167
157
|
|
168
|
-
def _get_default_template(self) -> Optional[str]:
|
169
|
-
return "<tool_rule>\n{{ tool_name }} ends your response (yields control) when called\n</tool_rule>"
|
170
|
-
|
171
158
|
|
172
159
|
class ContinueToolRule(BaseToolRule):
|
173
160
|
"""
|
@@ -196,9 +183,6 @@ class RequiredBeforeExitToolRule(BaseToolRule):
|
|
196
183
|
"""Returns all available tools - the logic for preventing exit is handled elsewhere."""
|
197
184
|
return available_tools
|
198
185
|
|
199
|
-
def _get_default_template(self) -> Optional[str]:
|
200
|
-
return "<tool_rule>{{ tool_name }} must be called before ending the conversation</tool_rule>"
|
201
|
-
|
202
186
|
|
203
187
|
class MaxCountPerStepToolRule(BaseToolRule):
|
204
188
|
"""
|
@@ -222,9 +206,6 @@ class MaxCountPerStepToolRule(BaseToolRule):
|
|
222
206
|
|
223
207
|
return available_tools
|
224
208
|
|
225
|
-
def _get_default_template(self) -> Optional[str]:
|
226
|
-
return "<tool_rule>\n{{ tool_name }}: at most {{ max_count_limit }} use(s) per response\n</tool_rule>"
|
227
|
-
|
228
209
|
|
229
210
|
ToolRule = Annotated[
|
230
211
|
Union[
|
letta/services/step_manager.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
from datetime import datetime
|
2
2
|
from enum import Enum
|
3
|
-
from typing import List, Literal, Optional
|
3
|
+
from typing import Dict, List, Literal, Optional
|
4
4
|
|
5
5
|
from sqlalchemy import select
|
6
6
|
from sqlalchemy.ext.asyncio import AsyncSession
|
@@ -12,6 +12,7 @@ from letta.orm.job import Job as JobModel
|
|
12
12
|
from letta.orm.sqlalchemy_base import AccessType
|
13
13
|
from letta.orm.step import Step as StepModel
|
14
14
|
from letta.otel.tracing import get_trace_id, trace_method
|
15
|
+
from letta.schemas.enums import StepStatus
|
15
16
|
from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
|
16
17
|
from letta.schemas.openai.chat_completion_response import UsageStatistics
|
17
18
|
from letta.schemas.step import Step as PydanticStep
|
@@ -87,6 +88,10 @@ class StepManager:
|
|
87
88
|
job_id: Optional[str] = None,
|
88
89
|
step_id: Optional[str] = None,
|
89
90
|
project_id: Optional[str] = None,
|
91
|
+
stop_reason: Optional[LettaStopReason] = None,
|
92
|
+
status: Optional[StepStatus] = None,
|
93
|
+
error_type: Optional[str] = None,
|
94
|
+
error_data: Optional[Dict] = None,
|
90
95
|
) -> PydanticStep:
|
91
96
|
step_data = {
|
92
97
|
"origin": None,
|
@@ -106,9 +111,14 @@ class StepManager:
|
|
106
111
|
"tid": None,
|
107
112
|
"trace_id": get_trace_id(), # Get the current trace ID
|
108
113
|
"project_id": project_id,
|
114
|
+
"status": status if status else StepStatus.PENDING,
|
115
|
+
"error_type": error_type,
|
116
|
+
"error_data": error_data,
|
109
117
|
}
|
110
118
|
if step_id:
|
111
119
|
step_data["id"] = step_id
|
120
|
+
if stop_reason:
|
121
|
+
step_data["stop_reason"] = stop_reason.stop_reason
|
112
122
|
with db_registry.session() as session:
|
113
123
|
if job_id:
|
114
124
|
self._verify_job_access(session, job_id, actor, access=["write"])
|
@@ -133,6 +143,9 @@ class StepManager:
|
|
133
143
|
step_id: Optional[str] = None,
|
134
144
|
project_id: Optional[str] = None,
|
135
145
|
stop_reason: Optional[LettaStopReason] = None,
|
146
|
+
status: Optional[StepStatus] = None,
|
147
|
+
error_type: Optional[str] = None,
|
148
|
+
error_data: Optional[Dict] = None,
|
136
149
|
) -> PydanticStep:
|
137
150
|
step_data = {
|
138
151
|
"origin": None,
|
@@ -152,6 +165,9 @@ class StepManager:
|
|
152
165
|
"tid": None,
|
153
166
|
"trace_id": get_trace_id(), # Get the current trace ID
|
154
167
|
"project_id": project_id,
|
168
|
+
"status": status if status else StepStatus.PENDING,
|
169
|
+
"error_type": error_type,
|
170
|
+
"error_data": error_data,
|
155
171
|
}
|
156
172
|
if step_id:
|
157
173
|
step_data["id"] = step_id
|
@@ -236,6 +252,126 @@ class StepManager:
|
|
236
252
|
await session.commit()
|
237
253
|
return step
|
238
254
|
|
255
|
+
@enforce_types
|
256
|
+
@trace_method
|
257
|
+
async def update_step_error_async(
|
258
|
+
self,
|
259
|
+
actor: PydanticUser,
|
260
|
+
step_id: str,
|
261
|
+
error_type: str,
|
262
|
+
error_message: str,
|
263
|
+
error_traceback: str,
|
264
|
+
error_details: Optional[Dict] = None,
|
265
|
+
stop_reason: Optional[LettaStopReason] = None,
|
266
|
+
) -> PydanticStep:
|
267
|
+
"""Update a step with error information.
|
268
|
+
|
269
|
+
Args:
|
270
|
+
actor: The user making the request
|
271
|
+
step_id: The ID of the step to update
|
272
|
+
error_type: The type/class of the error
|
273
|
+
error_message: The error message
|
274
|
+
error_traceback: Full error traceback
|
275
|
+
error_details: Additional error context
|
276
|
+
stop_reason: The stop reason to set
|
277
|
+
|
278
|
+
Returns:
|
279
|
+
The updated step
|
280
|
+
|
281
|
+
Raises:
|
282
|
+
NoResultFound: If the step does not exist
|
283
|
+
"""
|
284
|
+
async with db_registry.async_session() as session:
|
285
|
+
step = await session.get(StepModel, step_id)
|
286
|
+
if not step:
|
287
|
+
raise NoResultFound(f"Step with id {step_id} does not exist")
|
288
|
+
if step.organization_id != actor.organization_id:
|
289
|
+
raise Exception("Unauthorized")
|
290
|
+
|
291
|
+
step.status = StepStatus.FAILED
|
292
|
+
step.error_type = error_type
|
293
|
+
step.error_data = {"message": error_message, "traceback": error_traceback, "details": error_details}
|
294
|
+
if stop_reason:
|
295
|
+
step.stop_reason = stop_reason.stop_reason
|
296
|
+
|
297
|
+
await session.commit()
|
298
|
+
return step.to_pydantic()
|
299
|
+
|
300
|
+
@enforce_types
|
301
|
+
@trace_method
|
302
|
+
async def update_step_success_async(
|
303
|
+
self,
|
304
|
+
actor: PydanticUser,
|
305
|
+
step_id: str,
|
306
|
+
usage: UsageStatistics,
|
307
|
+
stop_reason: Optional[LettaStopReason] = None,
|
308
|
+
) -> PydanticStep:
|
309
|
+
"""Update a step with success status and final usage statistics.
|
310
|
+
|
311
|
+
Args:
|
312
|
+
actor: The user making the request
|
313
|
+
step_id: The ID of the step to update
|
314
|
+
usage: Final usage statistics
|
315
|
+
stop_reason: The stop reason to set
|
316
|
+
|
317
|
+
Returns:
|
318
|
+
The updated step
|
319
|
+
|
320
|
+
Raises:
|
321
|
+
NoResultFound: If the step does not exist
|
322
|
+
"""
|
323
|
+
async with db_registry.async_session() as session:
|
324
|
+
step = await session.get(StepModel, step_id)
|
325
|
+
if not step:
|
326
|
+
raise NoResultFound(f"Step with id {step_id} does not exist")
|
327
|
+
if step.organization_id != actor.organization_id:
|
328
|
+
raise Exception("Unauthorized")
|
329
|
+
|
330
|
+
step.status = StepStatus.SUCCESS
|
331
|
+
step.completion_tokens = usage.completion_tokens
|
332
|
+
step.prompt_tokens = usage.prompt_tokens
|
333
|
+
step.total_tokens = usage.total_tokens
|
334
|
+
if stop_reason:
|
335
|
+
step.stop_reason = stop_reason.stop_reason
|
336
|
+
|
337
|
+
await session.commit()
|
338
|
+
return step.to_pydantic()
|
339
|
+
|
340
|
+
@enforce_types
|
341
|
+
@trace_method
|
342
|
+
async def update_step_cancelled_async(
|
343
|
+
self,
|
344
|
+
actor: PydanticUser,
|
345
|
+
step_id: str,
|
346
|
+
stop_reason: Optional[LettaStopReason] = None,
|
347
|
+
) -> PydanticStep:
|
348
|
+
"""Update a step with cancelled status.
|
349
|
+
|
350
|
+
Args:
|
351
|
+
actor: The user making the request
|
352
|
+
step_id: The ID of the step to update
|
353
|
+
stop_reason: The stop reason to set
|
354
|
+
|
355
|
+
Returns:
|
356
|
+
The updated step
|
357
|
+
|
358
|
+
Raises:
|
359
|
+
NoResultFound: If the step does not exist
|
360
|
+
"""
|
361
|
+
async with db_registry.async_session() as session:
|
362
|
+
step = await session.get(StepModel, step_id)
|
363
|
+
if not step:
|
364
|
+
raise NoResultFound(f"Step with id {step_id} does not exist")
|
365
|
+
if step.organization_id != actor.organization_id:
|
366
|
+
raise Exception("Unauthorized")
|
367
|
+
|
368
|
+
step.status = StepStatus.CANCELLED
|
369
|
+
if stop_reason:
|
370
|
+
step.stop_reason = stop_reason.stop_reason
|
371
|
+
|
372
|
+
await session.commit()
|
373
|
+
return step.to_pydantic()
|
374
|
+
|
239
375
|
def _verify_job_access(
|
240
376
|
self,
|
241
377
|
session: Session,
|
@@ -319,6 +455,10 @@ class NoopStepManager(StepManager):
|
|
319
455
|
job_id: Optional[str] = None,
|
320
456
|
step_id: Optional[str] = None,
|
321
457
|
project_id: Optional[str] = None,
|
458
|
+
stop_reason: Optional[LettaStopReason] = None,
|
459
|
+
status: Optional[StepStatus] = None,
|
460
|
+
error_type: Optional[str] = None,
|
461
|
+
error_data: Optional[Dict] = None,
|
322
462
|
) -> PydanticStep:
|
323
463
|
return
|
324
464
|
|
@@ -339,5 +479,43 @@ class NoopStepManager(StepManager):
|
|
339
479
|
step_id: Optional[str] = None,
|
340
480
|
project_id: Optional[str] = None,
|
341
481
|
stop_reason: Optional[LettaStopReason] = None,
|
482
|
+
status: Optional[StepStatus] = None,
|
483
|
+
error_type: Optional[str] = None,
|
484
|
+
error_data: Optional[Dict] = None,
|
485
|
+
) -> PydanticStep:
|
486
|
+
return
|
487
|
+
|
488
|
+
@enforce_types
|
489
|
+
@trace_method
|
490
|
+
async def update_step_error_async(
|
491
|
+
self,
|
492
|
+
actor: PydanticUser,
|
493
|
+
step_id: str,
|
494
|
+
error_type: str,
|
495
|
+
error_message: str,
|
496
|
+
error_traceback: str,
|
497
|
+
error_details: Optional[Dict] = None,
|
498
|
+
stop_reason: Optional[LettaStopReason] = None,
|
499
|
+
) -> PydanticStep:
|
500
|
+
return
|
501
|
+
|
502
|
+
@enforce_types
|
503
|
+
@trace_method
|
504
|
+
async def update_step_success_async(
|
505
|
+
self,
|
506
|
+
actor: PydanticUser,
|
507
|
+
step_id: str,
|
508
|
+
usage: UsageStatistics,
|
509
|
+
stop_reason: Optional[LettaStopReason] = None,
|
510
|
+
) -> PydanticStep:
|
511
|
+
return
|
512
|
+
|
513
|
+
@enforce_types
|
514
|
+
@trace_method
|
515
|
+
async def update_step_cancelled_async(
|
516
|
+
self,
|
517
|
+
actor: PydanticUser,
|
518
|
+
step_id: str,
|
519
|
+
stop_reason: Optional[LettaStopReason] = None,
|
342
520
|
) -> PydanticStep:
|
343
521
|
return
|
@@ -210,7 +210,10 @@ class LettaBuiltinToolExecutor(ToolExecutor):
|
|
210
210
|
logger.info(f"[DEBUG] Starting Firecrawl search for query: '{task.query}' with limit={limit}")
|
211
211
|
|
212
212
|
# Perform the search for this task
|
213
|
-
|
213
|
+
scrape_options = ScrapeOptions(
|
214
|
+
formats=["markdown"], excludeTags=["#ad", "#footer"], onlyMainContent=True, parsePDF=True, removeBase64Images=True
|
215
|
+
)
|
216
|
+
search_result = await app.search(task.query, limit=limit, scrape_options=scrape_options)
|
214
217
|
|
215
218
|
logger.info(
|
216
219
|
f"[DEBUG] Firecrawl search completed for '{task.query}': {len(search_result.get('data', [])) if search_result else 0} results"
|
@@ -230,14 +230,7 @@ class LettaCoreToolExecutor(ToolExecutor):
|
|
230
230
|
await AgentManager().update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
|
231
231
|
return None
|
232
232
|
|
233
|
-
async def memory_replace(
|
234
|
-
self,
|
235
|
-
agent_state: AgentState,
|
236
|
-
actor: User,
|
237
|
-
label: str,
|
238
|
-
old_str: str,
|
239
|
-
new_str: Optional[str] = None,
|
240
|
-
) -> str:
|
233
|
+
async def memory_replace(self, agent_state: AgentState, actor: User, label: str, old_str: str, new_str: str) -> str:
|
241
234
|
"""
|
242
235
|
The memory_replace command allows you to replace a specific string in a memory
|
243
236
|
block with a new string. This is used for making precise edits.
|
@@ -246,8 +239,7 @@ class LettaCoreToolExecutor(ToolExecutor):
|
|
246
239
|
label (str): Section of the memory to be edited, identified by its label.
|
247
240
|
old_str (str): The text to replace (must match exactly, including whitespace
|
248
241
|
and indentation). Do not include line number prefixes.
|
249
|
-
new_str (
|
250
|
-
Omit this argument to delete the old_str. Do not include line number prefixes.
|
242
|
+
new_str (str): The new text to insert in place of the old text. Do not include line number prefixes.
|
251
243
|
|
252
244
|
Returns:
|
253
245
|
str: The success message
|