letta-nightly 0.6.27.dev20250219104103__py3-none-any.whl → 0.6.28.dev20250220163833__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +1 -1
- letta/agent.py +13 -1
- letta/client/client.py +2 -0
- letta/constants.py +2 -0
- letta/functions/schema_generator.py +6 -6
- letta/helpers/converters.py +153 -0
- letta/helpers/tool_rule_solver.py +11 -1
- letta/llm_api/anthropic.py +10 -5
- letta/llm_api/aws_bedrock.py +1 -1
- letta/llm_api/azure_openai_constants.py +1 -0
- letta/llm_api/deepseek.py +303 -0
- letta/llm_api/llm_api_tools.py +81 -1
- letta/llm_api/openai.py +13 -0
- letta/local_llm/chat_completion_proxy.py +15 -2
- letta/local_llm/lmstudio/api.py +75 -1
- letta/orm/__init__.py +1 -0
- letta/orm/agent.py +14 -5
- letta/orm/custom_columns.py +31 -110
- letta/orm/identity.py +39 -0
- letta/orm/organization.py +2 -0
- letta/schemas/agent.py +13 -1
- letta/schemas/identity.py +44 -0
- letta/schemas/llm_config.py +2 -0
- letta/schemas/message.py +1 -1
- letta/schemas/openai/chat_completion_response.py +2 -0
- letta/schemas/providers.py +72 -1
- letta/schemas/tool_rule.py +9 -1
- letta/serialize_schemas/__init__.py +1 -0
- letta/serialize_schemas/agent.py +36 -0
- letta/serialize_schemas/base.py +12 -0
- letta/serialize_schemas/custom_fields.py +69 -0
- letta/serialize_schemas/message.py +15 -0
- letta/server/db.py +111 -0
- letta/server/rest_api/app.py +8 -0
- letta/server/rest_api/interface.py +114 -9
- letta/server/rest_api/routers/v1/__init__.py +2 -0
- letta/server/rest_api/routers/v1/agents.py +7 -1
- letta/server/rest_api/routers/v1/identities.py +111 -0
- letta/server/server.py +13 -116
- letta/services/agent_manager.py +54 -6
- letta/services/block_manager.py +1 -1
- letta/services/helpers/agent_manager_helper.py +15 -0
- letta/services/identity_manager.py +140 -0
- letta/services/job_manager.py +1 -1
- letta/services/message_manager.py +1 -1
- letta/services/organization_manager.py +1 -1
- letta/services/passage_manager.py +1 -1
- letta/services/provider_manager.py +1 -1
- letta/services/sandbox_config_manager.py +1 -1
- letta/services/source_manager.py +1 -1
- letta/services/step_manager.py +1 -1
- letta/services/tool_manager.py +1 -1
- letta/services/user_manager.py +1 -1
- letta/settings.py +3 -0
- letta/tracing.py +205 -0
- letta/utils.py +4 -0
- {letta_nightly-0.6.27.dev20250219104103.dist-info → letta_nightly-0.6.28.dev20250220163833.dist-info}/METADATA +9 -2
- {letta_nightly-0.6.27.dev20250219104103.dist-info → letta_nightly-0.6.28.dev20250220163833.dist-info}/RECORD +61 -48
- {letta_nightly-0.6.27.dev20250219104103.dist-info → letta_nightly-0.6.28.dev20250220163833.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.27.dev20250219104103.dist-info → letta_nightly-0.6.28.dev20250220163833.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.27.dev20250219104103.dist-info → letta_nightly-0.6.28.dev20250220163833.dist-info}/entry_points.txt +0 -0
letta/schemas/agent.py
CHANGED
|
@@ -84,6 +84,9 @@ class AgentState(OrmMetadataBase, validate_assignment=True):
|
|
|
84
84
|
template_id: Optional[str] = Field(None, description="The id of the template the agent belongs to.")
|
|
85
85
|
base_template_id: Optional[str] = Field(None, description="The base template id of the agent.")
|
|
86
86
|
|
|
87
|
+
# Identity
|
|
88
|
+
identifier_key: Optional[str] = Field(None, description="The identifier key belonging to the identity associated with this agent.")
|
|
89
|
+
|
|
87
90
|
# An advanced configuration that makes it so this agent does not remember any previous messages
|
|
88
91
|
message_buffer_autoclear: bool = Field(
|
|
89
92
|
False,
|
|
@@ -129,6 +132,9 @@ class CreateAgent(BaseModel, validate_assignment=True): #
|
|
|
129
132
|
include_multi_agent_tools: bool = Field(
|
|
130
133
|
False, description="If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent)."
|
|
131
134
|
)
|
|
135
|
+
include_base_tool_rules: bool = Field(
|
|
136
|
+
True, description="If true, attaches the Letta base tool rules (e.g. deny all tools not explicitly allowed)."
|
|
137
|
+
)
|
|
132
138
|
description: Optional[str] = Field(None, description="The description of the agent.")
|
|
133
139
|
metadata: Optional[Dict] = Field(None, description="The metadata of the agent.")
|
|
134
140
|
model: Optional[str] = Field(
|
|
@@ -143,7 +149,11 @@ class CreateAgent(BaseModel, validate_assignment=True): #
|
|
|
143
149
|
embedding_chunk_size: Optional[int] = Field(DEFAULT_EMBEDDING_CHUNK_SIZE, description="The embedding chunk size used by the agent.")
|
|
144
150
|
from_template: Optional[str] = Field(None, description="The template id used to configure the agent")
|
|
145
151
|
template: bool = Field(False, description="Whether the agent is a template")
|
|
146
|
-
project: Optional[str] = Field(
|
|
152
|
+
project: Optional[str] = Field(
|
|
153
|
+
None,
|
|
154
|
+
deprecated=True,
|
|
155
|
+
description="Deprecated: Project should now be passed via the X-Project header instead of in the request body. If using the sdk, this can be done via the new x_project field below.",
|
|
156
|
+
)
|
|
147
157
|
tool_exec_environment_variables: Optional[Dict[str, str]] = Field(
|
|
148
158
|
None, description="The environment variables for tool execution specific to this agent."
|
|
149
159
|
)
|
|
@@ -151,6 +161,7 @@ class CreateAgent(BaseModel, validate_assignment=True): #
|
|
|
151
161
|
project_id: Optional[str] = Field(None, description="The id of the project the agent belongs to.")
|
|
152
162
|
template_id: Optional[str] = Field(None, description="The id of the template the agent belongs to.")
|
|
153
163
|
base_template_id: Optional[str] = Field(None, description="The base template id of the agent.")
|
|
164
|
+
identifier_key: Optional[str] = Field(None, description="The identifier key belonging to the identity associated with this agent.")
|
|
154
165
|
message_buffer_autoclear: bool = Field(
|
|
155
166
|
False,
|
|
156
167
|
description="If set to True, the agent will not remember previous messages (though the agent will still retain state via core memory blocks and archival/recall memory). Not recommended unless you have an advanced use case.",
|
|
@@ -225,6 +236,7 @@ class UpdateAgent(BaseModel):
|
|
|
225
236
|
project_id: Optional[str] = Field(None, description="The id of the project the agent belongs to.")
|
|
226
237
|
template_id: Optional[str] = Field(None, description="The id of the template the agent belongs to.")
|
|
227
238
|
base_template_id: Optional[str] = Field(None, description="The base template id of the agent.")
|
|
239
|
+
identifier_key: Optional[str] = Field(None, description="The identifier key belonging to the identity associated with this agent.")
|
|
228
240
|
message_buffer_autoclear: Optional[bool] = Field(
|
|
229
241
|
None,
|
|
230
242
|
description="If set to True, the agent will not remember previous messages (though the agent will still retain state via core memory blocks and archival/recall memory). Not recommended unless you have an advanced use case.",
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import List, Optional
|
|
3
|
+
|
|
4
|
+
from pydantic import Field
|
|
5
|
+
|
|
6
|
+
from letta.schemas.agent import AgentState
|
|
7
|
+
from letta.schemas.letta_base import LettaBase
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class IdentityType(str, Enum):
|
|
11
|
+
"""
|
|
12
|
+
Enum to represent the type of the identity.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
org = "org"
|
|
16
|
+
user = "user"
|
|
17
|
+
other = "other"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class IdentityBase(LettaBase):
|
|
21
|
+
__id_prefix__ = "identity"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Identity(IdentityBase):
|
|
25
|
+
id: str = IdentityBase.generate_id_field()
|
|
26
|
+
identifier_key: str = Field(..., description="External, user-generated identifier key of the identity.")
|
|
27
|
+
name: str = Field(..., description="The name of the identity.")
|
|
28
|
+
identity_type: IdentityType = Field(..., description="The type of the identity.")
|
|
29
|
+
project_id: Optional[str] = Field(None, description="The project id of the identity, if applicable.")
|
|
30
|
+
agents: List[AgentState] = Field(..., description="The agents associated with the identity.")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class IdentityCreate(LettaBase):
|
|
34
|
+
identifier_key: str = Field(..., description="External, user-generated identifier key of the identity.")
|
|
35
|
+
name: str = Field(..., description="The name of the identity.")
|
|
36
|
+
identity_type: IdentityType = Field(..., description="The type of the identity.")
|
|
37
|
+
project_id: Optional[str] = Field(None, description="The project id of the identity, if applicable.")
|
|
38
|
+
agent_ids: Optional[List[str]] = Field(None, description="The agent ids that are associated with the identity.")
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class IdentityUpdate(LettaBase):
|
|
42
|
+
name: Optional[str] = Field(None, description="The name of the identity.")
|
|
43
|
+
identity_type: Optional[IdentityType] = Field(None, description="The type of the identity.")
|
|
44
|
+
agent_ids: Optional[List[str]] = Field(None, description="The agent ids that are associated with the identity.")
|
letta/schemas/llm_config.py
CHANGED
|
@@ -33,6 +33,7 @@ class LLMConfig(BaseModel):
|
|
|
33
33
|
"webui-legacy",
|
|
34
34
|
"lmstudio",
|
|
35
35
|
"lmstudio-legacy",
|
|
36
|
+
"lmstudio-chatcompletions",
|
|
36
37
|
"llamacpp",
|
|
37
38
|
"koboldcpp",
|
|
38
39
|
"vllm",
|
|
@@ -40,6 +41,7 @@ class LLMConfig(BaseModel):
|
|
|
40
41
|
"mistral",
|
|
41
42
|
"together", # completions endpoint
|
|
42
43
|
"bedrock",
|
|
44
|
+
"deepseek",
|
|
43
45
|
] = Field(..., description="The endpoint type for the model.")
|
|
44
46
|
model_endpoint: Optional[str] = Field(None, description="The endpoint for the model.")
|
|
45
47
|
model_wrapper: Optional[str] = Field(None, description="The wrapper for the model.")
|
letta/schemas/message.py
CHANGED
|
@@ -647,7 +647,7 @@ class Message(BaseMessage):
|
|
|
647
647
|
# role: str ('user' or 'model')
|
|
648
648
|
|
|
649
649
|
if self.role != "tool" and self.name is not None:
|
|
650
|
-
|
|
650
|
+
warnings.warn(f"Using Google AI with non-null 'name' field ({self.name}) not yet supported.")
|
|
651
651
|
|
|
652
652
|
if self.role == "system":
|
|
653
653
|
# NOTE: Gemini API doesn't have a 'system' role, use 'user' instead
|
|
@@ -39,6 +39,7 @@ class Message(BaseModel):
|
|
|
39
39
|
tool_calls: Optional[List[ToolCall]] = None
|
|
40
40
|
role: str
|
|
41
41
|
function_call: Optional[FunctionCall] = None # Deprecated
|
|
42
|
+
reasoning_content: Optional[str] = None # Used in newer reasoning APIs
|
|
42
43
|
|
|
43
44
|
|
|
44
45
|
class Choice(BaseModel):
|
|
@@ -115,6 +116,7 @@ class MessageDelta(BaseModel):
|
|
|
115
116
|
"""
|
|
116
117
|
|
|
117
118
|
content: Optional[str] = None
|
|
119
|
+
reasoning_content: Optional[str] = None
|
|
118
120
|
tool_calls: Optional[List[ToolCallDelta]] = None
|
|
119
121
|
role: Optional[str] = None
|
|
120
122
|
function_call: Optional[FunctionCallDelta] = None # Deprecated
|
letta/schemas/providers.py
CHANGED
|
@@ -211,6 +211,75 @@ class OpenAIProvider(Provider):
|
|
|
211
211
|
return None
|
|
212
212
|
|
|
213
213
|
|
|
214
|
+
class DeepSeekProvider(OpenAIProvider):
|
|
215
|
+
"""
|
|
216
|
+
DeepSeek ChatCompletions API is similar to OpenAI's reasoning API,
|
|
217
|
+
but with slight differences:
|
|
218
|
+
* For example, DeepSeek's API requires perfect interleaving of user/assistant
|
|
219
|
+
* It also does not support native function calling
|
|
220
|
+
"""
|
|
221
|
+
|
|
222
|
+
name: str = "deepseek"
|
|
223
|
+
base_url: str = Field("https://api.deepseek.com/v1", description="Base URL for the DeepSeek API.")
|
|
224
|
+
api_key: str = Field(..., description="API key for the DeepSeek API.")
|
|
225
|
+
|
|
226
|
+
def get_model_context_window_size(self, model_name: str) -> Optional[int]:
|
|
227
|
+
# DeepSeek doesn't return context window in the model listing,
|
|
228
|
+
# so these are hardcoded from their website
|
|
229
|
+
if model_name == "deepseek-reasoner":
|
|
230
|
+
return 64000
|
|
231
|
+
elif model_name == "deepseek-chat":
|
|
232
|
+
return 64000
|
|
233
|
+
else:
|
|
234
|
+
return None
|
|
235
|
+
|
|
236
|
+
def list_llm_models(self) -> List[LLMConfig]:
|
|
237
|
+
from letta.llm_api.openai import openai_get_model_list
|
|
238
|
+
|
|
239
|
+
response = openai_get_model_list(self.base_url, api_key=self.api_key)
|
|
240
|
+
|
|
241
|
+
if "data" in response:
|
|
242
|
+
data = response["data"]
|
|
243
|
+
else:
|
|
244
|
+
data = response
|
|
245
|
+
|
|
246
|
+
configs = []
|
|
247
|
+
for model in data:
|
|
248
|
+
assert "id" in model, f"DeepSeek model missing 'id' field: {model}"
|
|
249
|
+
model_name = model["id"]
|
|
250
|
+
|
|
251
|
+
# In case DeepSeek starts supporting it in the future:
|
|
252
|
+
if "context_length" in model:
|
|
253
|
+
# Context length is returned in OpenRouter as "context_length"
|
|
254
|
+
context_window_size = model["context_length"]
|
|
255
|
+
else:
|
|
256
|
+
context_window_size = self.get_model_context_window_size(model_name)
|
|
257
|
+
|
|
258
|
+
if not context_window_size:
|
|
259
|
+
warnings.warn(f"Couldn't find context window size for model {model_name}")
|
|
260
|
+
continue
|
|
261
|
+
|
|
262
|
+
# Not used for deepseek-reasoner, but otherwise is true
|
|
263
|
+
put_inner_thoughts_in_kwargs = False if model_name == "deepseek-reasoner" else True
|
|
264
|
+
|
|
265
|
+
configs.append(
|
|
266
|
+
LLMConfig(
|
|
267
|
+
model=model_name,
|
|
268
|
+
model_endpoint_type="deepseek",
|
|
269
|
+
model_endpoint=self.base_url,
|
|
270
|
+
context_window=context_window_size,
|
|
271
|
+
handle=self.get_handle(model_name),
|
|
272
|
+
put_inner_thoughts_in_kwargs=put_inner_thoughts_in_kwargs,
|
|
273
|
+
)
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
return configs
|
|
277
|
+
|
|
278
|
+
def list_embedding_models(self) -> List[EmbeddingConfig]:
|
|
279
|
+
# No embeddings supported
|
|
280
|
+
return []
|
|
281
|
+
|
|
282
|
+
|
|
214
283
|
class LMStudioOpenAIProvider(OpenAIProvider):
|
|
215
284
|
name: str = "lmstudio-openai"
|
|
216
285
|
base_url: str = Field(..., description="Base URL for the LMStudio OpenAI API.")
|
|
@@ -945,4 +1014,6 @@ class AnthropicBedrockProvider(Provider):
|
|
|
945
1014
|
return bedrock_get_model_context_window(model_name)
|
|
946
1015
|
|
|
947
1016
|
def get_handle(self, model_name: str) -> str:
|
|
948
|
-
|
|
1017
|
+
print(model_name)
|
|
1018
|
+
model = model_name.split(".")[-1]
|
|
1019
|
+
return f"bedrock/{model}"
|
letta/schemas/tool_rule.py
CHANGED
|
@@ -48,7 +48,15 @@ class TerminalToolRule(BaseToolRule):
|
|
|
48
48
|
type: Literal[ToolRuleType.exit_loop] = ToolRuleType.exit_loop
|
|
49
49
|
|
|
50
50
|
|
|
51
|
+
class ContinueToolRule(BaseToolRule):
|
|
52
|
+
"""
|
|
53
|
+
Represents a tool rule configuration where if this tool gets called, it must continue the agent loop.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
type: Literal[ToolRuleType.continue_loop] = ToolRuleType.continue_loop
|
|
57
|
+
|
|
58
|
+
|
|
51
59
|
ToolRule = Annotated[
|
|
52
|
-
Union[ChildToolRule, InitToolRule, TerminalToolRule, ConditionalToolRule],
|
|
60
|
+
Union[ChildToolRule, InitToolRule, TerminalToolRule, ConditionalToolRule, ContinueToolRule],
|
|
53
61
|
Field(discriminator="type"),
|
|
54
62
|
]
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from letta.serialize_schemas.agent import SerializedAgentSchema
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from marshmallow import fields
|
|
2
|
+
|
|
3
|
+
from letta.orm import Agent
|
|
4
|
+
from letta.serialize_schemas.base import BaseSchema
|
|
5
|
+
from letta.serialize_schemas.custom_fields import EmbeddingConfigField, LLMConfigField, ToolRulesField
|
|
6
|
+
from letta.serialize_schemas.message import SerializedMessageSchema
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class SerializedAgentSchema(BaseSchema):
|
|
10
|
+
"""
|
|
11
|
+
Marshmallow schema for serializing/deserializing Agent objects.
|
|
12
|
+
Excludes relational fields.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
llm_config = LLMConfigField()
|
|
16
|
+
embedding_config = EmbeddingConfigField()
|
|
17
|
+
tool_rules = ToolRulesField()
|
|
18
|
+
|
|
19
|
+
messages = fields.List(fields.Nested(SerializedMessageSchema))
|
|
20
|
+
|
|
21
|
+
def __init__(self, *args, session=None, **kwargs):
|
|
22
|
+
super().__init__(*args, **kwargs)
|
|
23
|
+
if session:
|
|
24
|
+
self.session = session
|
|
25
|
+
|
|
26
|
+
# propagate session to nested schemas
|
|
27
|
+
for field_name, field_obj in self.fields.items():
|
|
28
|
+
if isinstance(field_obj, fields.List) and hasattr(field_obj.inner, "schema"):
|
|
29
|
+
field_obj.inner.schema.session = session
|
|
30
|
+
elif hasattr(field_obj, "schema"):
|
|
31
|
+
field_obj.schema.session = session
|
|
32
|
+
|
|
33
|
+
class Meta(BaseSchema.Meta):
|
|
34
|
+
model = Agent
|
|
35
|
+
# TODO: Serialize these as well...
|
|
36
|
+
exclude = ("tools", "sources", "core_memory", "tags", "source_passages", "agent_passages", "organization")
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
from marshmallow_sqlalchemy import SQLAlchemyAutoSchema
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class BaseSchema(SQLAlchemyAutoSchema):
|
|
5
|
+
"""
|
|
6
|
+
Base schema for all SQLAlchemy models.
|
|
7
|
+
This ensures all schemas share the same session.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
class Meta:
|
|
11
|
+
include_relationships = True
|
|
12
|
+
load_instance = True
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
from marshmallow import fields
|
|
2
|
+
|
|
3
|
+
from letta.helpers.converters import (
|
|
4
|
+
deserialize_embedding_config,
|
|
5
|
+
deserialize_llm_config,
|
|
6
|
+
deserialize_tool_calls,
|
|
7
|
+
deserialize_tool_rules,
|
|
8
|
+
serialize_embedding_config,
|
|
9
|
+
serialize_llm_config,
|
|
10
|
+
serialize_tool_calls,
|
|
11
|
+
serialize_tool_rules,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PydanticField(fields.Field):
|
|
16
|
+
"""Generic Marshmallow field for handling Pydantic models."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, pydantic_class, **kwargs):
|
|
19
|
+
self.pydantic_class = pydantic_class
|
|
20
|
+
super().__init__(**kwargs)
|
|
21
|
+
|
|
22
|
+
def _serialize(self, value, attr, obj, **kwargs):
|
|
23
|
+
return value.model_dump() if value else None
|
|
24
|
+
|
|
25
|
+
def _deserialize(self, value, attr, data, **kwargs):
|
|
26
|
+
return self.pydantic_class(**value) if value else None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class LLMConfigField(fields.Field):
|
|
30
|
+
"""Marshmallow field for handling LLMConfig serialization."""
|
|
31
|
+
|
|
32
|
+
def _serialize(self, value, attr, obj, **kwargs):
|
|
33
|
+
return serialize_llm_config(value)
|
|
34
|
+
|
|
35
|
+
def _deserialize(self, value, attr, data, **kwargs):
|
|
36
|
+
return deserialize_llm_config(value)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class EmbeddingConfigField(fields.Field):
|
|
40
|
+
"""Marshmallow field for handling EmbeddingConfig serialization."""
|
|
41
|
+
|
|
42
|
+
def _serialize(self, value, attr, obj, **kwargs):
|
|
43
|
+
return serialize_embedding_config(value)
|
|
44
|
+
|
|
45
|
+
def _deserialize(self, value, attr, data, **kwargs):
|
|
46
|
+
return deserialize_embedding_config(value)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class ToolRulesField(fields.List):
|
|
50
|
+
"""Custom Marshmallow field to handle a list of ToolRules."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, **kwargs):
|
|
53
|
+
super().__init__(fields.Dict(), **kwargs)
|
|
54
|
+
|
|
55
|
+
def _serialize(self, value, attr, obj, **kwargs):
|
|
56
|
+
return serialize_tool_rules(value)
|
|
57
|
+
|
|
58
|
+
def _deserialize(self, value, attr, data, **kwargs):
|
|
59
|
+
return deserialize_tool_rules(value)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class ToolCallField(fields.Field):
|
|
63
|
+
"""Marshmallow field for handling a list of OpenAI ToolCall objects."""
|
|
64
|
+
|
|
65
|
+
def _serialize(self, value, attr, obj, **kwargs):
|
|
66
|
+
return serialize_tool_calls(value)
|
|
67
|
+
|
|
68
|
+
def _deserialize(self, value, attr, data, **kwargs):
|
|
69
|
+
return deserialize_tool_calls(value)
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from letta.orm.message import Message
|
|
2
|
+
from letta.serialize_schemas.base import BaseSchema
|
|
3
|
+
from letta.serialize_schemas.custom_fields import ToolCallField
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SerializedMessageSchema(BaseSchema):
|
|
7
|
+
"""
|
|
8
|
+
Marshmallow schema for serializing/deserializing Message objects.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
tool_calls = ToolCallField()
|
|
12
|
+
|
|
13
|
+
class Meta(BaseSchema.Meta):
|
|
14
|
+
model = Message
|
|
15
|
+
exclude = ("step", "job_message")
|
letta/server/db.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from contextlib import contextmanager
|
|
3
|
+
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.panel import Panel
|
|
6
|
+
from rich.text import Text
|
|
7
|
+
from sqlalchemy import create_engine
|
|
8
|
+
from sqlalchemy.orm import sessionmaker
|
|
9
|
+
|
|
10
|
+
from letta.config import LettaConfig
|
|
11
|
+
from letta.log import get_logger
|
|
12
|
+
from letta.orm import Base
|
|
13
|
+
|
|
14
|
+
# NOTE: hack to see if single session management works
|
|
15
|
+
from letta.settings import settings
|
|
16
|
+
|
|
17
|
+
config = LettaConfig.load()
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def print_sqlite_schema_error():
|
|
23
|
+
"""Print a formatted error message for SQLite schema issues"""
|
|
24
|
+
console = Console()
|
|
25
|
+
error_text = Text()
|
|
26
|
+
error_text.append("Existing SQLite DB schema is invalid, and schema migrations are not supported for SQLite. ", style="bold red")
|
|
27
|
+
error_text.append("To have migrations supported between Letta versions, please run Letta with Docker (", style="white")
|
|
28
|
+
error_text.append("https://docs.letta.com/server/docker", style="blue underline")
|
|
29
|
+
error_text.append(") or use Postgres by setting ", style="white")
|
|
30
|
+
error_text.append("LETTA_PG_URI", style="yellow")
|
|
31
|
+
error_text.append(".\n\n", style="white")
|
|
32
|
+
error_text.append("If you wish to keep using SQLite, you can reset your database by removing the DB file with ", style="white")
|
|
33
|
+
error_text.append("rm ~/.letta/sqlite.db", style="yellow")
|
|
34
|
+
error_text.append(" or downgrade to your previous version of Letta.", style="white")
|
|
35
|
+
|
|
36
|
+
console.print(Panel(error_text, border_style="red"))
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@contextmanager
|
|
40
|
+
def db_error_handler():
|
|
41
|
+
"""Context manager for handling database errors"""
|
|
42
|
+
try:
|
|
43
|
+
yield
|
|
44
|
+
except Exception as e:
|
|
45
|
+
# Handle other SQLAlchemy errors
|
|
46
|
+
print(e)
|
|
47
|
+
print_sqlite_schema_error()
|
|
48
|
+
# raise ValueError(f"SQLite DB error: {str(e)}")
|
|
49
|
+
exit(1)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
if settings.letta_pg_uri_no_default:
|
|
53
|
+
print("Creating postgres engine")
|
|
54
|
+
config.recall_storage_type = "postgres"
|
|
55
|
+
config.recall_storage_uri = settings.letta_pg_uri_no_default
|
|
56
|
+
config.archival_storage_type = "postgres"
|
|
57
|
+
config.archival_storage_uri = settings.letta_pg_uri_no_default
|
|
58
|
+
|
|
59
|
+
# create engine
|
|
60
|
+
engine = create_engine(
|
|
61
|
+
settings.letta_pg_uri,
|
|
62
|
+
pool_size=settings.pg_pool_size,
|
|
63
|
+
max_overflow=settings.pg_max_overflow,
|
|
64
|
+
pool_timeout=settings.pg_pool_timeout,
|
|
65
|
+
pool_recycle=settings.pg_pool_recycle,
|
|
66
|
+
echo=settings.pg_echo,
|
|
67
|
+
)
|
|
68
|
+
else:
|
|
69
|
+
# TODO: don't rely on config storage
|
|
70
|
+
engine_path = "sqlite:///" + os.path.join(config.recall_storage_path, "sqlite.db")
|
|
71
|
+
logger.info("Creating sqlite engine " + engine_path)
|
|
72
|
+
|
|
73
|
+
engine = create_engine(engine_path)
|
|
74
|
+
|
|
75
|
+
# Store the original connect method
|
|
76
|
+
original_connect = engine.connect
|
|
77
|
+
|
|
78
|
+
def wrapped_connect(*args, **kwargs):
|
|
79
|
+
with db_error_handler():
|
|
80
|
+
# Get the connection
|
|
81
|
+
connection = original_connect(*args, **kwargs)
|
|
82
|
+
|
|
83
|
+
# Store the original execution method
|
|
84
|
+
original_execute = connection.execute
|
|
85
|
+
|
|
86
|
+
# Wrap the execute method of the connection
|
|
87
|
+
def wrapped_execute(*args, **kwargs):
|
|
88
|
+
with db_error_handler():
|
|
89
|
+
return original_execute(*args, **kwargs)
|
|
90
|
+
|
|
91
|
+
# Replace the connection's execute method
|
|
92
|
+
connection.execute = wrapped_execute
|
|
93
|
+
|
|
94
|
+
return connection
|
|
95
|
+
|
|
96
|
+
# Replace the engine's connect method
|
|
97
|
+
engine.connect = wrapped_connect
|
|
98
|
+
|
|
99
|
+
Base.metadata.create_all(bind=engine)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def get_db():
|
|
103
|
+
db = SessionLocal()
|
|
104
|
+
try:
|
|
105
|
+
yield db
|
|
106
|
+
finally:
|
|
107
|
+
db.close()
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
|
|
111
|
+
db_context = contextmanager(get_db)
|
letta/server/rest_api/app.py
CHANGED
|
@@ -231,6 +231,14 @@ def create_application() -> "FastAPI":
|
|
|
231
231
|
allow_headers=["*"],
|
|
232
232
|
)
|
|
233
233
|
|
|
234
|
+
# Set up OpenTelemetry tracing
|
|
235
|
+
endpoint = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT")
|
|
236
|
+
if endpoint:
|
|
237
|
+
print(f"▶ Using OTLP tracing with endpoint: {endpoint}")
|
|
238
|
+
from letta.tracing import setup_tracing
|
|
239
|
+
|
|
240
|
+
setup_tracing(endpoint=endpoint, service_name="memgpt-server")
|
|
241
|
+
|
|
234
242
|
for route in v1_routes:
|
|
235
243
|
app.include_router(route, prefix=API_PREFIX)
|
|
236
244
|
# this gives undocumented routes for "latest" and bare api calls.
|