letta-nightly 0.5.5.dev20241122170833__py3-none-any.whl → 0.6.0.dev20241204051808__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +2 -2
- letta/agent.py +155 -166
- letta/agent_store/chroma.py +2 -0
- letta/agent_store/db.py +1 -1
- letta/cli/cli.py +12 -8
- letta/cli/cli_config.py +1 -1
- letta/client/client.py +765 -137
- letta/config.py +2 -2
- letta/constants.py +10 -14
- letta/errors.py +12 -0
- letta/functions/function_sets/base.py +38 -1
- letta/functions/functions.py +40 -57
- letta/functions/helpers.py +0 -4
- letta/functions/schema_generator.py +279 -18
- letta/helpers/tool_rule_solver.py +6 -5
- letta/llm_api/helpers.py +99 -5
- letta/llm_api/openai.py +8 -2
- letta/local_llm/utils.py +13 -6
- letta/log.py +7 -9
- letta/main.py +1 -1
- letta/metadata.py +53 -38
- letta/o1_agent.py +1 -4
- letta/orm/__init__.py +2 -0
- letta/orm/block.py +7 -3
- letta/orm/blocks_agents.py +32 -0
- letta/orm/errors.py +8 -0
- letta/orm/mixins.py +8 -0
- letta/orm/organization.py +8 -1
- letta/orm/sandbox_config.py +56 -0
- letta/orm/sqlalchemy_base.py +68 -10
- letta/persistence_manager.py +1 -0
- letta/schemas/agent.py +57 -52
- letta/schemas/block.py +85 -26
- letta/schemas/blocks_agents.py +32 -0
- letta/schemas/enums.py +14 -0
- letta/schemas/letta_base.py +10 -1
- letta/schemas/letta_request.py +11 -23
- letta/schemas/letta_response.py +1 -2
- letta/schemas/memory.py +41 -76
- letta/schemas/message.py +3 -3
- letta/schemas/sandbox_config.py +114 -0
- letta/schemas/tool.py +37 -1
- letta/schemas/tool_rule.py +13 -5
- letta/server/rest_api/app.py +5 -4
- letta/server/rest_api/interface.py +12 -19
- letta/server/rest_api/routers/openai/assistants/threads.py +2 -3
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +0 -2
- letta/server/rest_api/routers/v1/__init__.py +4 -9
- letta/server/rest_api/routers/v1/agents.py +145 -61
- letta/server/rest_api/routers/v1/blocks.py +50 -5
- letta/server/rest_api/routers/v1/sandbox_configs.py +127 -0
- letta/server/rest_api/routers/v1/sources.py +8 -1
- letta/server/rest_api/routers/v1/tools.py +139 -13
- letta/server/rest_api/utils.py +6 -0
- letta/server/server.py +397 -340
- letta/server/static_files/assets/index-9fa459a2.js +1 -1
- letta/services/block_manager.py +23 -2
- letta/services/blocks_agents_manager.py +106 -0
- letta/services/per_agent_lock_manager.py +18 -0
- letta/services/sandbox_config_manager.py +256 -0
- letta/services/tool_execution_sandbox.py +352 -0
- letta/services/tool_manager.py +16 -22
- letta/services/tool_sandbox_env/.gitkeep +0 -0
- letta/settings.py +4 -0
- letta/utils.py +0 -7
- {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/METADATA +8 -6
- {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/RECORD +70 -60
- {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/LICENSE +0 -0
- {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/WHEEL +0 -0
- {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/entry_points.txt +0 -0
|
@@ -2,11 +2,12 @@ from typing import Dict, List, Optional, Set
|
|
|
2
2
|
|
|
3
3
|
from pydantic import BaseModel, Field
|
|
4
4
|
|
|
5
|
+
from letta.schemas.enums import ToolRuleType
|
|
5
6
|
from letta.schemas.tool_rule import (
|
|
6
7
|
BaseToolRule,
|
|
8
|
+
ChildToolRule,
|
|
7
9
|
InitToolRule,
|
|
8
10
|
TerminalToolRule,
|
|
9
|
-
ToolRule,
|
|
10
11
|
)
|
|
11
12
|
|
|
12
13
|
|
|
@@ -21,7 +22,7 @@ class ToolRulesSolver(BaseModel):
|
|
|
21
22
|
init_tool_rules: List[InitToolRule] = Field(
|
|
22
23
|
default_factory=list, description="Initial tool rules to be used at the start of tool execution."
|
|
23
24
|
)
|
|
24
|
-
tool_rules: List[
|
|
25
|
+
tool_rules: List[ChildToolRule] = Field(
|
|
25
26
|
default_factory=list, description="Standard tool rules for controlling execution sequence and allowed transitions."
|
|
26
27
|
)
|
|
27
28
|
terminal_tool_rules: List[TerminalToolRule] = Field(
|
|
@@ -33,11 +34,11 @@ class ToolRulesSolver(BaseModel):
|
|
|
33
34
|
super().__init__(**kwargs)
|
|
34
35
|
# Separate the provided tool rules into init, standard, and terminal categories
|
|
35
36
|
for rule in tool_rules:
|
|
36
|
-
if
|
|
37
|
+
if rule.type == ToolRuleType.run_first:
|
|
37
38
|
self.init_tool_rules.append(rule)
|
|
38
|
-
elif
|
|
39
|
+
elif rule.type == ToolRuleType.constrain_child_tools:
|
|
39
40
|
self.tool_rules.append(rule)
|
|
40
|
-
elif
|
|
41
|
+
elif rule.type == ToolRuleType.exit_loop:
|
|
41
42
|
self.terminal_tool_rules.append(rule)
|
|
42
43
|
|
|
43
44
|
# Validate the tool rules to ensure they form a DAG
|
letta/llm_api/helpers.py
CHANGED
|
@@ -11,7 +11,55 @@ from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
|
|
|
11
11
|
from letta.utils import json_dumps, printd
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
def
|
|
14
|
+
def _convert_to_structured_output_helper(property: dict) -> dict:
|
|
15
|
+
"""Convert a single JSON schema property to structured output format (recursive)"""
|
|
16
|
+
|
|
17
|
+
if "type" not in property:
|
|
18
|
+
raise ValueError(f"Property {property} is missing a type")
|
|
19
|
+
param_type = property["type"]
|
|
20
|
+
|
|
21
|
+
if "description" not in property:
|
|
22
|
+
# raise ValueError(f"Property {property} is missing a description")
|
|
23
|
+
param_description = None
|
|
24
|
+
else:
|
|
25
|
+
param_description = property["description"]
|
|
26
|
+
|
|
27
|
+
if param_type == "object":
|
|
28
|
+
if "properties" not in property:
|
|
29
|
+
raise ValueError(f"Property {property} of type object is missing properties")
|
|
30
|
+
properties = property["properties"]
|
|
31
|
+
property_dict = {
|
|
32
|
+
"type": "object",
|
|
33
|
+
"properties": {k: _convert_to_structured_output_helper(v) for k, v in properties.items()},
|
|
34
|
+
"additionalProperties": False,
|
|
35
|
+
"required": list(properties.keys()),
|
|
36
|
+
}
|
|
37
|
+
if param_description is not None:
|
|
38
|
+
property_dict["description"] = param_description
|
|
39
|
+
return property_dict
|
|
40
|
+
|
|
41
|
+
elif param_type == "array":
|
|
42
|
+
if "items" not in property:
|
|
43
|
+
raise ValueError(f"Property {property} of type array is missing items")
|
|
44
|
+
items = property["items"]
|
|
45
|
+
property_dict = {
|
|
46
|
+
"type": "array",
|
|
47
|
+
"items": _convert_to_structured_output_helper(items),
|
|
48
|
+
}
|
|
49
|
+
if param_description is not None:
|
|
50
|
+
property_dict["description"] = param_description
|
|
51
|
+
return property_dict
|
|
52
|
+
|
|
53
|
+
else:
|
|
54
|
+
property_dict = {
|
|
55
|
+
"type": param_type, # simple type
|
|
56
|
+
}
|
|
57
|
+
if param_description is not None:
|
|
58
|
+
property_dict["description"] = param_description
|
|
59
|
+
return property_dict
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def convert_to_structured_output(openai_function: dict, allow_optional: bool = False) -> dict:
|
|
15
63
|
"""Convert function call objects to structured output objects
|
|
16
64
|
|
|
17
65
|
See: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
|
|
@@ -22,17 +70,63 @@ def convert_to_structured_output(openai_function: dict) -> dict:
|
|
|
22
70
|
"name": openai_function["name"],
|
|
23
71
|
"description": description,
|
|
24
72
|
"strict": True,
|
|
25
|
-
"parameters": {
|
|
73
|
+
"parameters": {
|
|
74
|
+
"type": "object",
|
|
75
|
+
"properties": {},
|
|
76
|
+
"additionalProperties": False,
|
|
77
|
+
"required": [],
|
|
78
|
+
},
|
|
26
79
|
}
|
|
27
80
|
|
|
81
|
+
# This code needs to be able to handle nested properties
|
|
82
|
+
# For example, the param details may have "type" + "description",
|
|
83
|
+
# but if "type" is "object" we expected "properties", where each property has details
|
|
84
|
+
# and if "type" is "array" we expect "items": <type>
|
|
28
85
|
for param, details in openai_function["parameters"]["properties"].items():
|
|
29
|
-
|
|
86
|
+
|
|
87
|
+
param_type = details["type"]
|
|
88
|
+
description = details["description"]
|
|
89
|
+
|
|
90
|
+
if param_type == "object":
|
|
91
|
+
if "properties" not in details:
|
|
92
|
+
# Structured outputs requires the properties on dicts be specified ahead of time
|
|
93
|
+
raise ValueError(f"Property {param} of type object is missing properties")
|
|
94
|
+
structured_output["parameters"]["properties"][param] = {
|
|
95
|
+
"type": "object",
|
|
96
|
+
"description": description,
|
|
97
|
+
"properties": {k: _convert_to_structured_output_helper(v) for k, v in details["properties"].items()},
|
|
98
|
+
"additionalProperties": False,
|
|
99
|
+
"required": list(details["properties"].keys()),
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
elif param_type == "array":
|
|
103
|
+
structured_output["parameters"]["properties"][param] = {
|
|
104
|
+
"type": "array",
|
|
105
|
+
"description": description,
|
|
106
|
+
"items": _convert_to_structured_output_helper(details["items"]),
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
else:
|
|
110
|
+
structured_output["parameters"]["properties"][param] = {
|
|
111
|
+
"type": param_type, # simple type
|
|
112
|
+
"description": description,
|
|
113
|
+
}
|
|
30
114
|
|
|
31
115
|
if "enum" in details:
|
|
32
116
|
structured_output["parameters"]["properties"][param]["enum"] = details["enum"]
|
|
33
117
|
|
|
34
|
-
|
|
35
|
-
|
|
118
|
+
if not allow_optional:
|
|
119
|
+
# Add all properties to required list
|
|
120
|
+
structured_output["parameters"]["required"] = list(structured_output["parameters"]["properties"].keys())
|
|
121
|
+
|
|
122
|
+
else:
|
|
123
|
+
# See what parameters exist that aren't required
|
|
124
|
+
# Those are implied "optional" types
|
|
125
|
+
# For those types, turn each of them into a union type with "null"
|
|
126
|
+
# e.g.
|
|
127
|
+
# "type": "string" -> "type": ["string", "null"]
|
|
128
|
+
# TODO
|
|
129
|
+
raise NotImplementedError
|
|
36
130
|
|
|
37
131
|
return structured_output
|
|
38
132
|
|
letta/llm_api/openai.py
CHANGED
|
@@ -477,7 +477,10 @@ def openai_chat_completions_request_stream(
|
|
|
477
477
|
if "tools" in data:
|
|
478
478
|
for tool in data["tools"]:
|
|
479
479
|
# tool["strict"] = True
|
|
480
|
-
|
|
480
|
+
try:
|
|
481
|
+
tool["function"] = convert_to_structured_output(tool["function"])
|
|
482
|
+
except ValueError as e:
|
|
483
|
+
warnings.warn(f"Failed to convert tool function to structured output, tool={tool}, error={e}")
|
|
481
484
|
|
|
482
485
|
# print(f"\n\n\n\nData[tools]: {json.dumps(data['tools'], indent=2)}")
|
|
483
486
|
|
|
@@ -533,7 +536,10 @@ def openai_chat_completions_request(
|
|
|
533
536
|
|
|
534
537
|
if "tools" in data:
|
|
535
538
|
for tool in data["tools"]:
|
|
536
|
-
|
|
539
|
+
try:
|
|
540
|
+
tool["function"] = convert_to_structured_output(tool["function"])
|
|
541
|
+
except ValueError as e:
|
|
542
|
+
warnings.warn(f"Failed to convert tool function to structured output, tool={tool}, error={e}")
|
|
537
543
|
|
|
538
544
|
response_json = make_post_request(url, headers, data)
|
|
539
545
|
return ChatCompletionResponse(**response_json)
|
letta/local_llm/utils.py
CHANGED
|
@@ -88,16 +88,21 @@ def num_tokens_from_functions(functions: List[dict], model: str = "gpt-4"):
|
|
|
88
88
|
try:
|
|
89
89
|
encoding = tiktoken.encoding_for_model(model)
|
|
90
90
|
except KeyError:
|
|
91
|
-
|
|
91
|
+
from letta.utils import printd
|
|
92
|
+
|
|
93
|
+
printd(f"Warning: model not found. Using cl100k_base encoding.")
|
|
92
94
|
encoding = tiktoken.get_encoding("cl100k_base")
|
|
93
95
|
|
|
94
96
|
num_tokens = 0
|
|
95
97
|
for function in functions:
|
|
96
98
|
function_tokens = len(encoding.encode(function["name"]))
|
|
97
99
|
if function["description"]:
|
|
98
|
-
|
|
100
|
+
if not isinstance(function["description"], str):
|
|
101
|
+
warnings.warn(f"Function {function['name']} has non-string description: {function['description']}")
|
|
102
|
+
else:
|
|
103
|
+
function_tokens += len(encoding.encode(function["description"]))
|
|
99
104
|
else:
|
|
100
|
-
|
|
105
|
+
warnings.warn(f"Function {function['name']} has no description, function: {function}")
|
|
101
106
|
|
|
102
107
|
if "parameters" in function:
|
|
103
108
|
parameters = function["parameters"]
|
|
@@ -118,7 +123,7 @@ def num_tokens_from_functions(functions: List[dict], model: str = "gpt-4"):
|
|
|
118
123
|
function_tokens += 3
|
|
119
124
|
function_tokens += len(encoding.encode(o))
|
|
120
125
|
else:
|
|
121
|
-
|
|
126
|
+
warnings.warn(f"num_tokens_from_functions: Unsupported field {field} in function {function}")
|
|
122
127
|
function_tokens += 11
|
|
123
128
|
|
|
124
129
|
num_tokens += function_tokens
|
|
@@ -212,8 +217,10 @@ def num_tokens_from_messages(messages: List[dict], model: str = "gpt-4") -> int:
|
|
|
212
217
|
# print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
|
213
218
|
return num_tokens_from_messages(messages, model="gpt-4-0613")
|
|
214
219
|
else:
|
|
215
|
-
|
|
216
|
-
|
|
220
|
+
from letta.utils import printd
|
|
221
|
+
|
|
222
|
+
printd(
|
|
223
|
+
f"num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."
|
|
217
224
|
)
|
|
218
225
|
return num_tokens_from_messages(messages, model="gpt-4-0613")
|
|
219
226
|
# raise NotImplementedError(
|
letta/log.py
CHANGED
|
@@ -23,12 +23,10 @@ def _setup_logfile() -> "Path":
|
|
|
23
23
|
# TODO: production logging should be much less invasive
|
|
24
24
|
DEVELOPMENT_LOGGING = {
|
|
25
25
|
"version": 1,
|
|
26
|
-
"disable_existing_loggers":
|
|
26
|
+
"disable_existing_loggers": False, # Allow capturing from all loggers
|
|
27
27
|
"formatters": {
|
|
28
28
|
"standard": {"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s"},
|
|
29
|
-
"no_datetime": {
|
|
30
|
-
"format": "%(name)s - %(levelname)s - %(message)s",
|
|
31
|
-
},
|
|
29
|
+
"no_datetime": {"format": "%(name)s - %(levelname)s - %(message)s"},
|
|
32
30
|
},
|
|
33
31
|
"handlers": {
|
|
34
32
|
"console": {
|
|
@@ -46,14 +44,14 @@ DEVELOPMENT_LOGGING = {
|
|
|
46
44
|
"formatter": "standard",
|
|
47
45
|
},
|
|
48
46
|
},
|
|
47
|
+
"root": { # Root logger handles all logs
|
|
48
|
+
"level": logging.DEBUG if settings.debug else logging.INFO,
|
|
49
|
+
"handlers": ["console", "file"],
|
|
50
|
+
},
|
|
49
51
|
"loggers": {
|
|
50
52
|
"Letta": {
|
|
51
53
|
"level": logging.DEBUG if settings.debug else logging.INFO,
|
|
52
|
-
"
|
|
53
|
-
"console",
|
|
54
|
-
"file",
|
|
55
|
-
],
|
|
56
|
-
"propagate": False,
|
|
54
|
+
"propagate": True, # Let logs bubble up to root
|
|
57
55
|
},
|
|
58
56
|
"uvicorn": {
|
|
59
57
|
"level": "CRITICAL",
|
letta/main.py
CHANGED
|
@@ -189,7 +189,7 @@ def run_agent_loop(
|
|
|
189
189
|
|
|
190
190
|
elif user_input.lower() == "/memory":
|
|
191
191
|
print(f"\nDumping memory contents:\n")
|
|
192
|
-
print(f"{letta_agent.memory.compile()}")
|
|
192
|
+
print(f"{letta_agent.agent_state.memory.compile()}")
|
|
193
193
|
print(f"{letta_agent.persistence_manager.archival_memory.compile()}")
|
|
194
194
|
print(f"{letta_agent.persistence_manager.recall_memory.compile()}")
|
|
195
195
|
continue
|
letta/metadata.py
CHANGED
|
@@ -2,28 +2,23 @@
|
|
|
2
2
|
|
|
3
3
|
import os
|
|
4
4
|
import secrets
|
|
5
|
-
from typing import List, Optional
|
|
5
|
+
from typing import List, Optional, Union
|
|
6
6
|
|
|
7
7
|
from sqlalchemy import JSON, Column, DateTime, Index, String, TypeDecorator
|
|
8
8
|
from sqlalchemy.sql import func
|
|
9
9
|
|
|
10
10
|
from letta.config import LettaConfig
|
|
11
11
|
from letta.orm.base import Base
|
|
12
|
-
from letta.schemas.agent import
|
|
12
|
+
from letta.schemas.agent import PersistedAgentState
|
|
13
13
|
from letta.schemas.api_key import APIKey
|
|
14
14
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
15
|
-
from letta.schemas.enums import JobStatus
|
|
15
|
+
from letta.schemas.enums import JobStatus, ToolRuleType
|
|
16
16
|
from letta.schemas.job import Job
|
|
17
17
|
from letta.schemas.llm_config import LLMConfig
|
|
18
|
-
from letta.schemas.memory import Memory
|
|
19
18
|
from letta.schemas.openai.chat_completions import ToolCall, ToolCallFunction
|
|
20
|
-
from letta.schemas.tool_rule import
|
|
21
|
-
BaseToolRule,
|
|
22
|
-
InitToolRule,
|
|
23
|
-
TerminalToolRule,
|
|
24
|
-
ToolRule,
|
|
25
|
-
)
|
|
19
|
+
from letta.schemas.tool_rule import ChildToolRule, InitToolRule, TerminalToolRule
|
|
26
20
|
from letta.schemas.user import User
|
|
21
|
+
from letta.services.per_agent_lock_manager import PerAgentLockManager
|
|
27
22
|
from letta.settings import settings
|
|
28
23
|
from letta.utils import enforce_types, get_utc_time, printd
|
|
29
24
|
|
|
@@ -163,28 +158,35 @@ class ToolRulesColumn(TypeDecorator):
|
|
|
163
158
|
def load_dialect_impl(self, dialect):
|
|
164
159
|
return dialect.type_descriptor(JSON())
|
|
165
160
|
|
|
166
|
-
def process_bind_param(self, value
|
|
161
|
+
def process_bind_param(self, value, dialect):
|
|
167
162
|
"""Convert a list of ToolRules to JSON-serializable format."""
|
|
168
163
|
if value:
|
|
169
|
-
|
|
164
|
+
data = [rule.model_dump() for rule in value]
|
|
165
|
+
for d in data:
|
|
166
|
+
d["type"] = d["type"].value
|
|
167
|
+
|
|
168
|
+
for d in data:
|
|
169
|
+
assert not (d["type"] == "ToolRule" and "children" not in d), "ToolRule does not have children field"
|
|
170
|
+
return data
|
|
170
171
|
return value
|
|
171
172
|
|
|
172
|
-
def process_result_value(self, value, dialect) -> List[
|
|
173
|
+
def process_result_value(self, value, dialect) -> List[Union[ChildToolRule, InitToolRule, TerminalToolRule]]:
|
|
173
174
|
"""Convert JSON back to a list of ToolRules."""
|
|
174
175
|
if value:
|
|
175
176
|
return [self.deserialize_tool_rule(rule_data) for rule_data in value]
|
|
176
177
|
return value
|
|
177
178
|
|
|
178
179
|
@staticmethod
|
|
179
|
-
def deserialize_tool_rule(data: dict) ->
|
|
180
|
+
def deserialize_tool_rule(data: dict) -> Union[ChildToolRule, InitToolRule, TerminalToolRule]:
|
|
180
181
|
"""Deserialize a dictionary to the appropriate ToolRule subclass based on the 'type'."""
|
|
181
|
-
rule_type = data.get("type") # Remove 'type' field if it exists since it is a class var
|
|
182
|
-
if rule_type ==
|
|
182
|
+
rule_type = ToolRuleType(data.get("type")) # Remove 'type' field if it exists since it is a class var
|
|
183
|
+
if rule_type == ToolRuleType.run_first:
|
|
183
184
|
return InitToolRule(**data)
|
|
184
|
-
elif rule_type ==
|
|
185
|
+
elif rule_type == ToolRuleType.exit_loop:
|
|
185
186
|
return TerminalToolRule(**data)
|
|
186
|
-
elif rule_type ==
|
|
187
|
-
|
|
187
|
+
elif rule_type == ToolRuleType.constrain_child_tools:
|
|
188
|
+
rule = ChildToolRule(**data)
|
|
189
|
+
return rule
|
|
188
190
|
else:
|
|
189
191
|
raise ValueError(f"Unknown tool rule type: {rule_type}")
|
|
190
192
|
|
|
@@ -203,7 +205,6 @@ class AgentModel(Base):
|
|
|
203
205
|
|
|
204
206
|
# state (context compilation)
|
|
205
207
|
message_ids = Column(JSON)
|
|
206
|
-
memory = Column(JSON)
|
|
207
208
|
system = Column(String)
|
|
208
209
|
|
|
209
210
|
# configs
|
|
@@ -215,7 +216,7 @@ class AgentModel(Base):
|
|
|
215
216
|
metadata_ = Column(JSON)
|
|
216
217
|
|
|
217
218
|
# tools
|
|
218
|
-
|
|
219
|
+
tool_names = Column(JSON)
|
|
219
220
|
tool_rules = Column(ToolRulesColumn)
|
|
220
221
|
|
|
221
222
|
Index(__tablename__ + "_idx_user", user_id),
|
|
@@ -223,24 +224,22 @@ class AgentModel(Base):
|
|
|
223
224
|
def __repr__(self) -> str:
|
|
224
225
|
return f"<Agent(id='{self.id}', name='{self.name}')>"
|
|
225
226
|
|
|
226
|
-
def to_record(self) ->
|
|
227
|
-
agent_state =
|
|
227
|
+
def to_record(self) -> PersistedAgentState:
|
|
228
|
+
agent_state = PersistedAgentState(
|
|
228
229
|
id=self.id,
|
|
229
230
|
user_id=self.user_id,
|
|
230
231
|
name=self.name,
|
|
231
232
|
created_at=self.created_at,
|
|
232
233
|
description=self.description,
|
|
233
234
|
message_ids=self.message_ids,
|
|
234
|
-
memory=Memory.load(self.memory), # load dictionary
|
|
235
235
|
system=self.system,
|
|
236
|
-
|
|
236
|
+
tool_names=self.tool_names,
|
|
237
237
|
tool_rules=self.tool_rules,
|
|
238
238
|
agent_type=self.agent_type,
|
|
239
239
|
llm_config=self.llm_config,
|
|
240
240
|
embedding_config=self.embedding_config,
|
|
241
241
|
metadata_=self.metadata_,
|
|
242
242
|
)
|
|
243
|
-
assert isinstance(agent_state.memory, Memory), f"Memory object is not of type Memory: {type(agent_state.memory)}"
|
|
244
243
|
return agent_state
|
|
245
244
|
|
|
246
245
|
|
|
@@ -345,32 +344,48 @@ class MetadataStore:
|
|
|
345
344
|
return tokens
|
|
346
345
|
|
|
347
346
|
@enforce_types
|
|
348
|
-
def create_agent(self, agent:
|
|
347
|
+
def create_agent(self, agent: PersistedAgentState):
|
|
349
348
|
# insert into agent table
|
|
350
349
|
# make sure agent.name does not already exist for user user_id
|
|
351
350
|
with self.session_maker() as session:
|
|
352
351
|
if session.query(AgentModel).filter(AgentModel.name == agent.name).filter(AgentModel.user_id == agent.user_id).count() > 0:
|
|
353
352
|
raise ValueError(f"Agent with name {agent.name} already exists")
|
|
354
353
|
fields = vars(agent)
|
|
355
|
-
fields["memory"] = agent.memory.to_dict()
|
|
356
|
-
|
|
357
|
-
del fields["
|
|
354
|
+
# fields["memory"] = agent.memory.to_dict()
|
|
355
|
+
# if "_internal_memory" in fields:
|
|
356
|
+
# del fields["_internal_memory"]
|
|
357
|
+
# else:
|
|
358
|
+
# warnings.warn(f"Agent {agent.id} has no _internal_memory field")
|
|
359
|
+
if "tags" in fields:
|
|
360
|
+
del fields["tags"]
|
|
361
|
+
# else:
|
|
362
|
+
# warnings.warn(f"Agent {agent.id} has no tags field")
|
|
358
363
|
session.add(AgentModel(**fields))
|
|
359
364
|
session.commit()
|
|
360
365
|
|
|
361
366
|
@enforce_types
|
|
362
|
-
def update_agent(self, agent:
|
|
367
|
+
def update_agent(self, agent: PersistedAgentState):
|
|
363
368
|
with self.session_maker() as session:
|
|
364
369
|
fields = vars(agent)
|
|
365
|
-
if isinstance(agent.memory, Memory): # TODO: this is nasty but this whole class will soon be removed so whatever
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
del fields["
|
|
370
|
+
# if isinstance(agent.memory, Memory): # TODO: this is nasty but this whole class will soon be removed so whatever
|
|
371
|
+
# fields["memory"] = agent.memory.to_dict()
|
|
372
|
+
# if "_internal_memory" in fields:
|
|
373
|
+
# del fields["_internal_memory"]
|
|
374
|
+
# else:
|
|
375
|
+
# warnings.warn(f"Agent {agent.id} has no _internal_memory field")
|
|
376
|
+
if "tags" in fields:
|
|
377
|
+
del fields["tags"]
|
|
378
|
+
# else:
|
|
379
|
+
# warnings.warn(f"Agent {agent.id} has no tags field")
|
|
369
380
|
session.query(AgentModel).filter(AgentModel.id == agent.id).update(fields)
|
|
370
381
|
session.commit()
|
|
371
382
|
|
|
372
383
|
@enforce_types
|
|
373
|
-
def delete_agent(self, agent_id: str):
|
|
384
|
+
def delete_agent(self, agent_id: str, per_agent_lock_manager: PerAgentLockManager):
|
|
385
|
+
# TODO: Remove this once Agent is on the ORM
|
|
386
|
+
# TODO: To prevent unbounded growth
|
|
387
|
+
per_agent_lock_manager.clear_lock(agent_id)
|
|
388
|
+
|
|
374
389
|
with self.session_maker() as session:
|
|
375
390
|
|
|
376
391
|
# delete agents
|
|
@@ -382,7 +397,7 @@ class MetadataStore:
|
|
|
382
397
|
session.commit()
|
|
383
398
|
|
|
384
399
|
@enforce_types
|
|
385
|
-
def list_agents(self, user_id: str) -> List[
|
|
400
|
+
def list_agents(self, user_id: str) -> List[PersistedAgentState]:
|
|
386
401
|
with self.session_maker() as session:
|
|
387
402
|
results = session.query(AgentModel).filter(AgentModel.user_id == user_id).all()
|
|
388
403
|
return [r.to_record() for r in results]
|
|
@@ -390,7 +405,7 @@ class MetadataStore:
|
|
|
390
405
|
@enforce_types
|
|
391
406
|
def get_agent(
|
|
392
407
|
self, agent_id: Optional[str] = None, agent_name: Optional[str] = None, user_id: Optional[str] = None
|
|
393
|
-
) -> Optional[
|
|
408
|
+
) -> Optional[PersistedAgentState]:
|
|
394
409
|
with self.session_maker() as session:
|
|
395
410
|
if agent_id:
|
|
396
411
|
results = session.query(AgentModel).filter(AgentModel.id == agent_id).all()
|
letta/o1_agent.py
CHANGED
|
@@ -6,7 +6,6 @@ from letta.metadata import MetadataStore
|
|
|
6
6
|
from letta.schemas.agent import AgentState
|
|
7
7
|
from letta.schemas.message import Message
|
|
8
8
|
from letta.schemas.openai.chat_completion_response import UsageStatistics
|
|
9
|
-
from letta.schemas.tool import Tool
|
|
10
9
|
from letta.schemas.usage import LettaUsageStatistics
|
|
11
10
|
from letta.schemas.user import User
|
|
12
11
|
|
|
@@ -45,13 +44,11 @@ class O1Agent(Agent):
|
|
|
45
44
|
interface: AgentInterface,
|
|
46
45
|
agent_state: AgentState,
|
|
47
46
|
user: User,
|
|
48
|
-
tools: List[Tool] = [],
|
|
49
47
|
max_thinking_steps: int = 10,
|
|
50
48
|
first_message_verify_mono: bool = False,
|
|
51
49
|
):
|
|
52
|
-
super().__init__(interface, agent_state,
|
|
50
|
+
super().__init__(interface, agent_state, user)
|
|
53
51
|
self.max_thinking_steps = max_thinking_steps
|
|
54
|
-
self.tools = tools
|
|
55
52
|
self.first_message_verify_mono = first_message_verify_mono
|
|
56
53
|
|
|
57
54
|
def step(
|
letta/orm/__init__.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
from letta.orm.base import Base
|
|
2
2
|
from letta.orm.block import Block
|
|
3
|
+
from letta.orm.blocks_agents import BlocksAgents
|
|
3
4
|
from letta.orm.file import FileMetadata
|
|
4
5
|
from letta.orm.organization import Organization
|
|
6
|
+
from letta.orm.sandbox_config import SandboxConfig, SandboxEnvironmentVariable
|
|
5
7
|
from letta.orm.source import Source
|
|
6
8
|
from letta.orm.tool import Tool
|
|
7
9
|
from letta.orm.user import User
|
letta/orm/block.py
CHANGED
|
@@ -1,15 +1,16 @@
|
|
|
1
1
|
from typing import TYPE_CHECKING, Optional, Type
|
|
2
2
|
|
|
3
|
-
from sqlalchemy import JSON, BigInteger, Integer
|
|
3
|
+
from sqlalchemy import JSON, BigInteger, Integer, UniqueConstraint
|
|
4
4
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
5
5
|
|
|
6
|
+
from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT
|
|
6
7
|
from letta.orm.mixins import OrganizationMixin
|
|
7
8
|
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
|
8
9
|
from letta.schemas.block import Block as PydanticBlock
|
|
9
10
|
from letta.schemas.block import Human, Persona
|
|
10
11
|
|
|
11
12
|
if TYPE_CHECKING:
|
|
12
|
-
from letta.orm
|
|
13
|
+
from letta.orm import BlocksAgents, Organization
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
class Block(OrganizationMixin, SqlalchemyBase):
|
|
@@ -17,6 +18,8 @@ class Block(OrganizationMixin, SqlalchemyBase):
|
|
|
17
18
|
|
|
18
19
|
__tablename__ = "block"
|
|
19
20
|
__pydantic_model__ = PydanticBlock
|
|
21
|
+
# This may seem redundant, but is necessary for the BlocksAgents composite FK relationship
|
|
22
|
+
__table_args__ = (UniqueConstraint("id", "label", name="unique_block_id_label"),)
|
|
20
23
|
|
|
21
24
|
template_name: Mapped[Optional[str]] = mapped_column(
|
|
22
25
|
nullable=True, doc="the unique name that identifies a block in a human-readable way"
|
|
@@ -27,11 +30,12 @@ class Block(OrganizationMixin, SqlalchemyBase):
|
|
|
27
30
|
doc="whether the block is a template (e.g. saved human/persona options as baselines for other templates)", default=False
|
|
28
31
|
)
|
|
29
32
|
value: Mapped[str] = mapped_column(doc="Text content of the block for the respective section of core memory.")
|
|
30
|
-
limit: Mapped[BigInteger] = mapped_column(Integer, default=
|
|
33
|
+
limit: Mapped[BigInteger] = mapped_column(Integer, default=CORE_MEMORY_BLOCK_CHAR_LIMIT, doc="Character limit of the block.")
|
|
31
34
|
metadata_: Mapped[Optional[dict]] = mapped_column(JSON, default={}, doc="arbitrary information related to the block.")
|
|
32
35
|
|
|
33
36
|
# relationships
|
|
34
37
|
organization: Mapped[Optional["Organization"]] = relationship("Organization")
|
|
38
|
+
blocks_agents: Mapped[list["BlocksAgents"]] = relationship("BlocksAgents", back_populates="block", cascade="all, delete")
|
|
35
39
|
|
|
36
40
|
def to_pydantic(self) -> Type:
|
|
37
41
|
match self.label:
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from sqlalchemy import ForeignKey, ForeignKeyConstraint, String, UniqueConstraint
|
|
2
|
+
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
3
|
+
|
|
4
|
+
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
|
5
|
+
from letta.schemas.blocks_agents import BlocksAgents as PydanticBlocksAgents
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class BlocksAgents(SqlalchemyBase):
|
|
9
|
+
"""Agents must have one or many blocks to make up their core memory."""
|
|
10
|
+
|
|
11
|
+
__tablename__ = "blocks_agents"
|
|
12
|
+
__pydantic_model__ = PydanticBlocksAgents
|
|
13
|
+
__table_args__ = (
|
|
14
|
+
UniqueConstraint(
|
|
15
|
+
"agent_id",
|
|
16
|
+
"block_label",
|
|
17
|
+
name="unique_label_per_agent",
|
|
18
|
+
),
|
|
19
|
+
ForeignKeyConstraint(
|
|
20
|
+
["block_id", "block_label"],
|
|
21
|
+
["block.id", "block.label"],
|
|
22
|
+
name="fk_block_id_label",
|
|
23
|
+
),
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
# unique agent + block label
|
|
27
|
+
agent_id: Mapped[str] = mapped_column(String, ForeignKey("agents.id"), primary_key=True)
|
|
28
|
+
block_id: Mapped[str] = mapped_column(String, primary_key=True)
|
|
29
|
+
block_label: Mapped[str] = mapped_column(String, primary_key=True)
|
|
30
|
+
|
|
31
|
+
# relationships
|
|
32
|
+
block: Mapped["Block"] = relationship("Block", back_populates="blocks_agents")
|
letta/orm/errors.py
CHANGED
|
@@ -4,3 +4,11 @@ class NoResultFound(Exception):
|
|
|
4
4
|
|
|
5
5
|
class MalformedIdError(Exception):
|
|
6
6
|
"""An id not in the right format, most likely violating uuid4 format."""
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class UniqueConstraintViolationError(ValueError):
|
|
10
|
+
"""Custom exception for unique constraint violations."""
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ForeignKeyConstraintViolationError(ValueError):
|
|
14
|
+
"""Custom exception for foreign key constraint violations."""
|
letta/orm/mixins.py
CHANGED
|
@@ -37,3 +37,11 @@ class SourceMixin(Base):
|
|
|
37
37
|
__abstract__ = True
|
|
38
38
|
|
|
39
39
|
source_id: Mapped[str] = mapped_column(String, ForeignKey("sources.id"))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class SandboxConfigMixin(Base):
|
|
43
|
+
"""Mixin for models that belong to a SandboxConfig."""
|
|
44
|
+
|
|
45
|
+
__abstract__ = True
|
|
46
|
+
|
|
47
|
+
sandbox_config_id: Mapped[str] = mapped_column(String, ForeignKey("sandbox_configs.id"))
|
letta/orm/organization.py
CHANGED
|
@@ -2,12 +2,12 @@ from typing import TYPE_CHECKING, List
|
|
|
2
2
|
|
|
3
3
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
4
4
|
|
|
5
|
-
from letta.orm.file import FileMetadata
|
|
6
5
|
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
|
7
6
|
from letta.schemas.organization import Organization as PydanticOrganization
|
|
8
7
|
|
|
9
8
|
if TYPE_CHECKING:
|
|
10
9
|
|
|
10
|
+
from letta.orm.file import FileMetadata
|
|
11
11
|
from letta.orm.tool import Tool
|
|
12
12
|
from letta.orm.user import User
|
|
13
13
|
|
|
@@ -27,6 +27,13 @@ class Organization(SqlalchemyBase):
|
|
|
27
27
|
sources: Mapped[List["Source"]] = relationship("Source", back_populates="organization", cascade="all, delete-orphan")
|
|
28
28
|
agents_tags: Mapped[List["AgentsTags"]] = relationship("AgentsTags", back_populates="organization", cascade="all, delete-orphan")
|
|
29
29
|
files: Mapped[List["FileMetadata"]] = relationship("FileMetadata", back_populates="organization", cascade="all, delete-orphan")
|
|
30
|
+
sandbox_configs: Mapped[List["SandboxConfig"]] = relationship(
|
|
31
|
+
"SandboxConfig", back_populates="organization", cascade="all, delete-orphan"
|
|
32
|
+
)
|
|
33
|
+
sandbox_environment_variables: Mapped[List["SandboxEnvironmentVariable"]] = relationship(
|
|
34
|
+
"SandboxEnvironmentVariable", back_populates="organization", cascade="all, delete-orphan"
|
|
35
|
+
)
|
|
36
|
+
|
|
30
37
|
# TODO: Map these relationships later when we actually make these models
|
|
31
38
|
# below is just a suggestion
|
|
32
39
|
# agents: Mapped[List["Agent"]] = relationship("Agent", back_populates="organization", cascade="all, delete-orphan")
|