letta-nightly 0.11.7.dev20251007104119__py3-none-any.whl → 0.12.0.dev20251009104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +1 -1
- letta/adapters/letta_llm_adapter.py +1 -0
- letta/adapters/letta_llm_request_adapter.py +0 -1
- letta/adapters/letta_llm_stream_adapter.py +7 -2
- letta/adapters/simple_llm_request_adapter.py +88 -0
- letta/adapters/simple_llm_stream_adapter.py +192 -0
- letta/agents/agent_loop.py +6 -0
- letta/agents/ephemeral_summary_agent.py +2 -1
- letta/agents/helpers.py +142 -6
- letta/agents/letta_agent.py +13 -33
- letta/agents/letta_agent_batch.py +2 -4
- letta/agents/letta_agent_v2.py +87 -77
- letta/agents/letta_agent_v3.py +927 -0
- letta/agents/voice_agent.py +2 -6
- letta/constants.py +8 -4
- letta/database_utils.py +161 -0
- letta/errors.py +40 -0
- letta/functions/function_sets/base.py +84 -4
- letta/functions/function_sets/multi_agent.py +0 -3
- letta/functions/schema_generator.py +113 -71
- letta/groups/dynamic_multi_agent.py +3 -2
- letta/groups/helpers.py +1 -2
- letta/groups/round_robin_multi_agent.py +3 -2
- letta/groups/sleeptime_multi_agent.py +3 -2
- letta/groups/sleeptime_multi_agent_v2.py +1 -1
- letta/groups/sleeptime_multi_agent_v3.py +17 -17
- letta/groups/supervisor_multi_agent.py +84 -80
- letta/helpers/converters.py +3 -0
- letta/helpers/message_helper.py +4 -0
- letta/helpers/tool_rule_solver.py +92 -5
- letta/interfaces/anthropic_streaming_interface.py +409 -0
- letta/interfaces/gemini_streaming_interface.py +296 -0
- letta/interfaces/openai_streaming_interface.py +752 -1
- letta/llm_api/anthropic_client.py +127 -16
- letta/llm_api/bedrock_client.py +4 -2
- letta/llm_api/deepseek_client.py +4 -1
- letta/llm_api/google_vertex_client.py +124 -42
- letta/llm_api/groq_client.py +4 -1
- letta/llm_api/llm_api_tools.py +11 -4
- letta/llm_api/llm_client_base.py +6 -2
- letta/llm_api/openai.py +32 -2
- letta/llm_api/openai_client.py +423 -18
- letta/llm_api/xai_client.py +4 -1
- letta/main.py +9 -5
- letta/memory.py +1 -0
- letta/orm/__init__.py +2 -1
- letta/orm/agent.py +10 -0
- letta/orm/block.py +7 -16
- letta/orm/blocks_agents.py +8 -2
- letta/orm/files_agents.py +2 -0
- letta/orm/job.py +7 -5
- letta/orm/mcp_oauth.py +1 -0
- letta/orm/message.py +21 -6
- letta/orm/organization.py +2 -0
- letta/orm/provider.py +6 -2
- letta/orm/run.py +71 -0
- letta/orm/run_metrics.py +82 -0
- letta/orm/sandbox_config.py +7 -1
- letta/orm/sqlalchemy_base.py +0 -306
- letta/orm/step.py +6 -5
- letta/orm/step_metrics.py +5 -5
- letta/otel/tracing.py +28 -3
- letta/plugins/defaults.py +4 -4
- letta/prompts/system_prompts/__init__.py +2 -0
- letta/prompts/system_prompts/letta_v1.py +25 -0
- letta/schemas/agent.py +3 -2
- letta/schemas/agent_file.py +9 -3
- letta/schemas/block.py +23 -10
- letta/schemas/enums.py +21 -2
- letta/schemas/job.py +17 -4
- letta/schemas/letta_message_content.py +71 -2
- letta/schemas/letta_stop_reason.py +5 -5
- letta/schemas/llm_config.py +53 -3
- letta/schemas/memory.py +1 -1
- letta/schemas/message.py +564 -117
- letta/schemas/openai/responses_request.py +64 -0
- letta/schemas/providers/__init__.py +2 -0
- letta/schemas/providers/anthropic.py +16 -0
- letta/schemas/providers/ollama.py +115 -33
- letta/schemas/providers/openrouter.py +52 -0
- letta/schemas/providers/vllm.py +2 -1
- letta/schemas/run.py +48 -42
- letta/schemas/run_metrics.py +21 -0
- letta/schemas/step.py +2 -2
- letta/schemas/step_metrics.py +1 -1
- letta/schemas/tool.py +15 -107
- letta/schemas/tool_rule.py +88 -5
- letta/serialize_schemas/marshmallow_agent.py +1 -0
- letta/server/db.py +79 -408
- letta/server/rest_api/app.py +61 -10
- letta/server/rest_api/dependencies.py +14 -0
- letta/server/rest_api/redis_stream_manager.py +19 -8
- letta/server/rest_api/routers/v1/agents.py +364 -292
- letta/server/rest_api/routers/v1/blocks.py +14 -20
- letta/server/rest_api/routers/v1/identities.py +45 -110
- letta/server/rest_api/routers/v1/internal_templates.py +21 -0
- letta/server/rest_api/routers/v1/jobs.py +23 -6
- letta/server/rest_api/routers/v1/messages.py +1 -1
- letta/server/rest_api/routers/v1/runs.py +149 -99
- letta/server/rest_api/routers/v1/sandbox_configs.py +10 -19
- letta/server/rest_api/routers/v1/tools.py +281 -594
- letta/server/rest_api/routers/v1/voice.py +1 -1
- letta/server/rest_api/streaming_response.py +29 -29
- letta/server/rest_api/utils.py +122 -64
- letta/server/server.py +160 -887
- letta/services/agent_manager.py +236 -919
- letta/services/agent_serialization_manager.py +16 -0
- letta/services/archive_manager.py +0 -100
- letta/services/block_manager.py +211 -168
- letta/services/context_window_calculator/token_counter.py +1 -1
- letta/services/file_manager.py +1 -1
- letta/services/files_agents_manager.py +24 -33
- letta/services/group_manager.py +0 -142
- letta/services/helpers/agent_manager_helper.py +7 -2
- letta/services/helpers/run_manager_helper.py +69 -0
- letta/services/job_manager.py +96 -411
- letta/services/lettuce/__init__.py +6 -0
- letta/services/lettuce/lettuce_client_base.py +86 -0
- letta/services/mcp_manager.py +38 -6
- letta/services/message_manager.py +165 -362
- letta/services/organization_manager.py +0 -36
- letta/services/passage_manager.py +0 -345
- letta/services/provider_manager.py +0 -80
- letta/services/run_manager.py +364 -0
- letta/services/sandbox_config_manager.py +0 -234
- letta/services/step_manager.py +62 -39
- letta/services/summarizer/summarizer.py +9 -7
- letta/services/telemetry_manager.py +0 -16
- letta/services/tool_executor/builtin_tool_executor.py +35 -0
- letta/services/tool_executor/core_tool_executor.py +397 -2
- letta/services/tool_executor/files_tool_executor.py +3 -3
- letta/services/tool_executor/multi_agent_tool_executor.py +30 -15
- letta/services/tool_executor/tool_execution_manager.py +6 -8
- letta/services/tool_executor/tool_executor_base.py +3 -3
- letta/services/tool_manager.py +85 -339
- letta/services/tool_sandbox/base.py +24 -13
- letta/services/tool_sandbox/e2b_sandbox.py +16 -1
- letta/services/tool_schema_generator.py +123 -0
- letta/services/user_manager.py +0 -99
- letta/settings.py +20 -4
- letta/system.py +5 -1
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.12.0.dev20251009104148.dist-info}/METADATA +3 -5
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.12.0.dev20251009104148.dist-info}/RECORD +146 -135
- letta/agents/temporal/activities/__init__.py +0 -4
- letta/agents/temporal/activities/example_activity.py +0 -7
- letta/agents/temporal/activities/prepare_messages.py +0 -10
- letta/agents/temporal/temporal_agent_workflow.py +0 -56
- letta/agents/temporal/types.py +0 -25
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.12.0.dev20251009104148.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.12.0.dev20251009104148.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.12.0.dev20251009104148.dist-info}/licenses/LICENSE +0 -0
letta/schemas/enums.py
CHANGED
@@ -28,6 +28,7 @@ class AgentType(str, Enum):
|
|
28
28
|
|
29
29
|
memgpt_agent = "memgpt_agent" # the OG set of memgpt tools
|
30
30
|
memgpt_v2_agent = "memgpt_v2_agent" # memgpt style tools, but refreshed
|
31
|
+
letta_v1_agent = "letta_v1_agent" # simplification of the memgpt loop, no heartbeats or forced tool calls
|
31
32
|
react_agent = "react_agent" # basic react agent, no memory tools
|
32
33
|
workflow_agent = "workflow_agent" # workflow with auto-clearing message buffer
|
33
34
|
split_thread_agent = "split_thread_agent"
|
@@ -50,6 +51,11 @@ class MessageRole(str, Enum):
|
|
50
51
|
approval = "approval"
|
51
52
|
|
52
53
|
|
54
|
+
class MessageSourceType(str, Enum):
|
55
|
+
input = "input" # external input
|
56
|
+
output = "output" # internal output
|
57
|
+
|
58
|
+
|
53
59
|
class OptionState(str, Enum):
|
54
60
|
"""Useful for kwargs that are bool + default option"""
|
55
61
|
|
@@ -77,6 +83,18 @@ class JobStatus(StrEnum):
|
|
77
83
|
return self in (JobStatus.completed, JobStatus.failed, JobStatus.cancelled, JobStatus.expired)
|
78
84
|
|
79
85
|
|
86
|
+
class RunStatus(StrEnum):
|
87
|
+
"""
|
88
|
+
Status of the run.
|
89
|
+
"""
|
90
|
+
|
91
|
+
created = "created"
|
92
|
+
running = "running"
|
93
|
+
completed = "completed"
|
94
|
+
failed = "failed"
|
95
|
+
cancelled = "cancelled"
|
96
|
+
|
97
|
+
|
80
98
|
class AgentStepStatus(str, Enum):
|
81
99
|
"""
|
82
100
|
Status of agent step.
|
@@ -134,12 +152,13 @@ class ToolType(str, Enum):
|
|
134
152
|
LETTA_VOICE_SLEEPTIME_CORE = "letta_voice_sleeptime_core"
|
135
153
|
LETTA_BUILTIN = "letta_builtin"
|
136
154
|
LETTA_FILES_CORE = "letta_files_core"
|
137
|
-
|
138
|
-
|
155
|
+
EXTERNAL_LANGCHAIN = "external_langchain" # DEPRECATED
|
156
|
+
EXTERNAL_COMPOSIO = "external_composio" # DEPRECATED
|
139
157
|
# TODO is "external" the right name here? Since as of now, MCP is local / doesn't support remote?
|
140
158
|
EXTERNAL_MCP = "external_mcp"
|
141
159
|
|
142
160
|
|
161
|
+
|
143
162
|
class JobType(str, Enum):
|
144
163
|
JOB = "job"
|
145
164
|
RUN = "run"
|
letta/schemas/job.py
CHANGED
@@ -1,8 +1,11 @@
|
|
1
1
|
from datetime import datetime
|
2
|
-
from typing import List, Optional
|
2
|
+
from typing import TYPE_CHECKING, List, Optional
|
3
3
|
|
4
4
|
from pydantic import BaseModel, ConfigDict, Field
|
5
5
|
|
6
|
+
if TYPE_CHECKING:
|
7
|
+
from letta.schemas.letta_request import LettaRequest
|
8
|
+
|
6
9
|
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
|
7
10
|
from letta.helpers.datetime_helpers import get_utc_time
|
8
11
|
from letta.schemas.enums import JobStatus, JobType
|
@@ -24,9 +27,9 @@ class JobBase(OrmMetadataBase):
|
|
24
27
|
metadata: Optional[dict] = Field(None, validation_alias="metadata_", description="The metadata of the job.")
|
25
28
|
job_type: JobType = Field(default=JobType.JOB, description="The type of the job.")
|
26
29
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
+
# Run-specific fields
|
31
|
+
background: Optional[bool] = Field(None, description="Whether the job was created in background mode.")
|
32
|
+
agent_id: Optional[str] = Field(None, description="The agent associated with this job/run.")
|
30
33
|
|
31
34
|
callback_url: Optional[str] = Field(None, description="If set, POST to this URL when the job completes.")
|
32
35
|
callback_sent_at: Optional[datetime] = Field(None, description="Timestamp when the callback was last attempted.")
|
@@ -112,3 +115,13 @@ class LettaRequestConfig(BaseModel):
|
|
112
115
|
include_return_message_types: Optional[List[MessageType]] = Field(
|
113
116
|
default=None, description="Only return specified message types in the response. If `None` (default) returns all messages."
|
114
117
|
)
|
118
|
+
|
119
|
+
@classmethod
|
120
|
+
def from_letta_request(cls, request: "LettaRequest") -> "LettaRequestConfig":
|
121
|
+
"""Create a LettaRequestConfig from a LettaRequest."""
|
122
|
+
return cls(
|
123
|
+
use_assistant_message=request.use_assistant_message,
|
124
|
+
assistant_message_tool_name=request.assistant_message_tool_name,
|
125
|
+
assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
|
126
|
+
include_return_message_types=request.include_return_message_types,
|
127
|
+
)
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from enum import Enum
|
2
|
-
from typing import Annotated, Literal, Optional, Union
|
2
|
+
from typing import Annotated, List, Literal, Optional, Union
|
3
3
|
|
4
|
+
from openai.types import Reasoning
|
4
5
|
from pydantic import BaseModel, Field
|
5
6
|
|
6
7
|
|
@@ -9,9 +10,13 @@ class MessageContentType(str, Enum):
|
|
9
10
|
image = "image"
|
10
11
|
tool_call = "tool_call"
|
11
12
|
tool_return = "tool_return"
|
13
|
+
# For Anthropic extended thinking
|
12
14
|
reasoning = "reasoning"
|
13
15
|
redacted_reasoning = "redacted_reasoning"
|
16
|
+
# Generic "hidden" (unsavailable) reasoning
|
14
17
|
omitted_reasoning = "omitted_reasoning"
|
18
|
+
# For OpenAI Responses API
|
19
|
+
summarized_reasoning = "summarized_reasoning"
|
15
20
|
|
16
21
|
|
17
22
|
class MessageContent(BaseModel):
|
@@ -34,6 +39,9 @@ class MessageContent(BaseModel):
|
|
34
39
|
class TextContent(MessageContent):
|
35
40
|
type: Literal[MessageContentType.text] = Field(default=MessageContentType.text, description="The type of the message.")
|
36
41
|
text: str = Field(..., description="The text content of the message.")
|
42
|
+
signature: Optional[str] = Field(
|
43
|
+
default=None, description="Stores a unique identifier for any reasoning associated with this text content."
|
44
|
+
)
|
37
45
|
|
38
46
|
def to_text(self) -> str:
|
39
47
|
"""Return the text content."""
|
@@ -183,6 +191,9 @@ class ToolCallContent(MessageContent):
|
|
183
191
|
input: dict = Field(
|
184
192
|
..., description="The parameters being passed to the tool, structured as a dictionary of parameter names to values."
|
185
193
|
)
|
194
|
+
signature: Optional[str] = Field(
|
195
|
+
default=None, description="Stores a unique identifier for any reasoning associated with this tool call."
|
196
|
+
)
|
186
197
|
|
187
198
|
def to_text(self) -> str:
|
188
199
|
"""Return a text representation of the tool call."""
|
@@ -207,6 +218,8 @@ class ToolReturnContent(MessageContent):
|
|
207
218
|
|
208
219
|
|
209
220
|
class ReasoningContent(MessageContent):
|
221
|
+
"""Sent via the Anthropic Messages API"""
|
222
|
+
|
210
223
|
type: Literal[MessageContentType.reasoning] = Field(
|
211
224
|
default=MessageContentType.reasoning, description="Indicates this is a reasoning/intermediate step."
|
212
225
|
)
|
@@ -220,6 +233,8 @@ class ReasoningContent(MessageContent):
|
|
220
233
|
|
221
234
|
|
222
235
|
class RedactedReasoningContent(MessageContent):
|
236
|
+
"""Sent via the Anthropic Messages API"""
|
237
|
+
|
223
238
|
type: Literal[MessageContentType.redacted_reasoning] = Field(
|
224
239
|
default=MessageContentType.redacted_reasoning, description="Indicates this is a redacted thinking step."
|
225
240
|
)
|
@@ -227,16 +242,70 @@ class RedactedReasoningContent(MessageContent):
|
|
227
242
|
|
228
243
|
|
229
244
|
class OmittedReasoningContent(MessageContent):
|
245
|
+
"""A placeholder for reasoning content we know is present, but isn't returned by the provider (e.g. OpenAI GPT-5 on ChatCompletions)"""
|
246
|
+
|
230
247
|
type: Literal[MessageContentType.omitted_reasoning] = Field(
|
231
248
|
default=MessageContentType.omitted_reasoning, description="Indicates this is an omitted reasoning step."
|
232
249
|
)
|
250
|
+
signature: Optional[str] = Field(default=None, description="A unique identifier for this reasoning step.")
|
233
251
|
# NOTE: dropping because we don't track this kind of information for the other reasoning types
|
234
252
|
# tokens: int = Field(..., description="The reasoning token count for intermediate reasoning content.")
|
235
253
|
|
236
254
|
|
255
|
+
class SummarizedReasoningContentPart(BaseModel):
|
256
|
+
index: int = Field(..., description="The index of the summary part.")
|
257
|
+
text: str = Field(..., description="The text of the summary part.")
|
258
|
+
|
259
|
+
|
260
|
+
class SummarizedReasoningContent(MessageContent):
|
261
|
+
"""The style of reasoning content returned by the OpenAI Responses API"""
|
262
|
+
|
263
|
+
# TODO consider expanding ReasoningContent to support this superset?
|
264
|
+
# Or alternatively, rename `ReasoningContent` to `AnthropicReasoningContent`,
|
265
|
+
# and rename this one to `OpenAIReasoningContent`?
|
266
|
+
|
267
|
+
# NOTE: I think the argument for putting thie in ReasoningContent as an additional "summary" field is that it keeps the
|
268
|
+
# rendering and GET / listing code a lot simpler, you just need to know how to render "TextContent" and "ReasoningContent"
|
269
|
+
# vs breaking out into having to know how to render additional types
|
270
|
+
# NOTE: I think the main issue is that we need to track provenance of which provider the reasoning came from
|
271
|
+
# so that we don't attempt eg to put Anthropic encrypted reasoning into a GPT-5 responses payload
|
272
|
+
type: Literal[MessageContentType.summarized_reasoning] = Field(
|
273
|
+
default=MessageContentType.summarized_reasoning, description="Indicates this is a summarized reasoning step."
|
274
|
+
)
|
275
|
+
|
276
|
+
# OpenAI requires holding a string
|
277
|
+
id: str = Field(..., description="The unique identifier for this reasoning step.") # NOTE: I don't think this is actually needed?
|
278
|
+
# OpenAI returns a list of summary objects, each a string
|
279
|
+
# Straying a bit from the OpenAI schema so that we can enforce ordering on the deltas that come out
|
280
|
+
# summary: List[str] = Field(..., description="Summaries of the reasoning content.")
|
281
|
+
summary: List[SummarizedReasoningContentPart] = Field(..., description="Summaries of the reasoning content.")
|
282
|
+
encrypted_content: str = Field(default=None, description="The encrypted reasoning content.")
|
283
|
+
|
284
|
+
# Temporary stop-gap until the SDKs are updated
|
285
|
+
def to_reasoning_content(self) -> Optional[ReasoningContent]:
|
286
|
+
# Merge the summary parts with a '\n' join
|
287
|
+
parts = [s.text for s in self.summary if s.text != ""]
|
288
|
+
if not parts or len(parts) == 0:
|
289
|
+
return None
|
290
|
+
else:
|
291
|
+
combined_summary = "\n\n".join(parts)
|
292
|
+
return ReasoningContent(
|
293
|
+
is_native=True,
|
294
|
+
reasoning=combined_summary,
|
295
|
+
signature=self.encrypted_content,
|
296
|
+
)
|
297
|
+
|
298
|
+
|
237
299
|
LettaMessageContentUnion = Annotated[
|
238
300
|
Union[
|
239
|
-
TextContent,
|
301
|
+
TextContent,
|
302
|
+
ImageContent,
|
303
|
+
ToolCallContent,
|
304
|
+
ToolReturnContent,
|
305
|
+
ReasoningContent,
|
306
|
+
RedactedReasoningContent,
|
307
|
+
OmittedReasoningContent,
|
308
|
+
SummarizedReasoningContent,
|
240
309
|
],
|
241
310
|
Field(discriminator="type"),
|
242
311
|
]
|
@@ -3,7 +3,7 @@ from typing import Literal
|
|
3
3
|
|
4
4
|
from pydantic import BaseModel, Field
|
5
5
|
|
6
|
-
from letta.schemas.enums import
|
6
|
+
from letta.schemas.enums import RunStatus
|
7
7
|
|
8
8
|
|
9
9
|
class StopReasonType(str, Enum):
|
@@ -19,14 +19,14 @@ class StopReasonType(str, Enum):
|
|
19
19
|
requires_approval = "requires_approval"
|
20
20
|
|
21
21
|
@property
|
22
|
-
def run_status(self) ->
|
22
|
+
def run_status(self) -> RunStatus:
|
23
23
|
if self in (
|
24
24
|
StopReasonType.end_turn,
|
25
25
|
StopReasonType.max_steps,
|
26
26
|
StopReasonType.tool_rule,
|
27
27
|
StopReasonType.requires_approval,
|
28
28
|
):
|
29
|
-
return
|
29
|
+
return RunStatus.completed
|
30
30
|
elif self in (
|
31
31
|
StopReasonType.error,
|
32
32
|
StopReasonType.invalid_tool_call,
|
@@ -34,9 +34,9 @@ class StopReasonType(str, Enum):
|
|
34
34
|
StopReasonType.invalid_llm_response,
|
35
35
|
StopReasonType.llm_api_error,
|
36
36
|
):
|
37
|
-
return
|
37
|
+
return RunStatus.failed
|
38
38
|
elif self == StopReasonType.cancelled:
|
39
|
-
return
|
39
|
+
return RunStatus.cancelled
|
40
40
|
else:
|
41
41
|
raise ValueError("Unknown StopReasonType")
|
42
42
|
|
letta/schemas/llm_config.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1
|
-
from typing import Literal, Optional
|
1
|
+
from typing import TYPE_CHECKING, Literal, Optional
|
2
2
|
|
3
3
|
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
4
4
|
|
5
5
|
from letta.constants import LETTA_MODEL_ENDPOINT
|
6
6
|
from letta.log import get_logger
|
7
|
-
from letta.schemas.enums import ProviderCategory
|
7
|
+
from letta.schemas.enums import AgentType, ProviderCategory
|
8
8
|
|
9
9
|
logger = get_logger(__name__)
|
10
10
|
|
@@ -259,7 +259,57 @@ class LLMConfig(BaseModel):
|
|
259
259
|
return config.model_endpoint_type == "openai" and config.model.startswith("gpt-5")
|
260
260
|
|
261
261
|
@classmethod
|
262
|
-
def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool):
|
262
|
+
def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool, agent_type: Optional["AgentType"] = None):
|
263
|
+
"""
|
264
|
+
Normalize reasoning-related flags on the config based on the requested
|
265
|
+
"reasoning" setting, model capabilities, and optionally the agent type.
|
266
|
+
|
267
|
+
For AgentType.letta_v1_agent, we enforce stricter semantics:
|
268
|
+
- OpenAI native reasoning (o1/o3/o4/gpt-5): force enabled (non-togglable)
|
269
|
+
- Anthropic (claude 3.7 / 4): toggle honored (default on elsewhere)
|
270
|
+
- Google Gemini (2.5 family): force disabled until native reasoning supported
|
271
|
+
- All others: disabled (no simulated reasoning via kwargs)
|
272
|
+
"""
|
273
|
+
# V1 agent policy: do not allow simulated reasoning for non-native models
|
274
|
+
if agent_type is not None and agent_type == AgentType.letta_v1_agent:
|
275
|
+
# OpenAI native reasoning models: always on
|
276
|
+
if cls.is_openai_reasoning_model(config):
|
277
|
+
config.put_inner_thoughts_in_kwargs = False
|
278
|
+
config.enable_reasoner = True
|
279
|
+
if config.reasoning_effort is None:
|
280
|
+
if config.model.startswith("gpt-5"):
|
281
|
+
config.reasoning_effort = "minimal"
|
282
|
+
else:
|
283
|
+
config.reasoning_effort = "medium"
|
284
|
+
if config.model.startswith("gpt-5") and config.verbosity is None:
|
285
|
+
config.verbosity = "medium"
|
286
|
+
return config
|
287
|
+
|
288
|
+
# Anthropic 3.7/4 and Gemini: toggle honored
|
289
|
+
is_google_reasoner_with_configurable_thinking = (
|
290
|
+
cls.is_google_vertex_reasoning_model(config) or cls.is_google_ai_reasoning_model(config)
|
291
|
+
) and not config.model.startswith("gemini-2.5-pro")
|
292
|
+
if cls.is_anthropic_reasoning_model(config) or is_google_reasoner_with_configurable_thinking:
|
293
|
+
config.enable_reasoner = bool(reasoning)
|
294
|
+
config.put_inner_thoughts_in_kwargs = False
|
295
|
+
if config.enable_reasoner and config.max_reasoning_tokens == 0:
|
296
|
+
config.max_reasoning_tokens = 1024
|
297
|
+
return config
|
298
|
+
|
299
|
+
# Google Gemini 2.5 Pro: not possible to disable
|
300
|
+
if config.model.startswith("gemini-2.5-pro"):
|
301
|
+
config.put_inner_thoughts_in_kwargs = False
|
302
|
+
config.enable_reasoner = True
|
303
|
+
if config.max_reasoning_tokens == 0:
|
304
|
+
config.max_reasoning_tokens = 1024
|
305
|
+
return config
|
306
|
+
|
307
|
+
# Everything else: disabled (no inner_thoughts-in-kwargs simulation)
|
308
|
+
config.put_inner_thoughts_in_kwargs = False
|
309
|
+
config.enable_reasoner = False
|
310
|
+
config.max_reasoning_tokens = 0
|
311
|
+
return config
|
312
|
+
|
263
313
|
if not reasoning:
|
264
314
|
if cls.is_openai_reasoning_model(config):
|
265
315
|
logger.warning("Reasoning cannot be disabled for OpenAI o1/o3/gpt-5 models")
|
letta/schemas/memory.py
CHANGED
@@ -271,7 +271,7 @@ class Memory(BaseModel, validate_assignment=True):
|
|
271
271
|
raw_type = self.agent_type.value if hasattr(self.agent_type, "value") else (self.agent_type or "")
|
272
272
|
norm_type = raw_type.lower()
|
273
273
|
is_react = norm_type in ("react_agent", "workflow_agent")
|
274
|
-
is_line_numbered = norm_type in ("sleeptime_agent", "memgpt_v2_agent")
|
274
|
+
is_line_numbered = norm_type in ("sleeptime_agent", "memgpt_v2_agent", "letta_v1_agent")
|
275
275
|
|
276
276
|
# Memory blocks (not for react/workflow). Always include wrapper for preview/tests.
|
277
277
|
if not is_react:
|