letta-nightly 0.6.1.dev20241206104246__py3-none-any.whl → 0.6.1.dev20241208104134__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/agent.py +68 -76
- letta/agent_store/db.py +1 -77
- letta/agent_store/storage.py +0 -5
- letta/cli/cli.py +1 -4
- letta/client/client.py +11 -14
- letta/constants.py +1 -0
- letta/functions/function_sets/base.py +33 -5
- letta/functions/helpers.py +3 -3
- letta/llm_api/openai.py +0 -1
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +13 -1
- letta/main.py +2 -2
- letta/memory.py +4 -82
- letta/metadata.py +0 -35
- letta/o1_agent.py +7 -2
- letta/offline_memory_agent.py +6 -0
- letta/orm/__init__.py +2 -0
- letta/orm/file.py +1 -1
- letta/orm/message.py +64 -0
- letta/orm/mixins.py +16 -0
- letta/orm/organization.py +1 -0
- letta/orm/sqlalchemy_base.py +118 -26
- letta/schemas/letta_base.py +7 -6
- letta/schemas/message.py +6 -12
- letta/schemas/tool.py +18 -11
- letta/server/rest_api/app.py +2 -3
- letta/server/rest_api/routers/v1/agents.py +7 -6
- letta/server/rest_api/routers/v1/blocks.py +2 -2
- letta/server/rest_api/routers/v1/tools.py +26 -4
- letta/server/rest_api/utils.py +3 -1
- letta/server/server.py +67 -62
- letta/server/static_files/assets/index-43ab4d62.css +1 -0
- letta/server/static_files/assets/index-4848e3d7.js +40 -0
- letta/server/static_files/index.html +2 -2
- letta/services/block_manager.py +1 -1
- letta/services/message_manager.py +194 -0
- letta/services/organization_manager.py +6 -9
- letta/services/sandbox_config_manager.py +16 -1
- letta/services/source_manager.py +1 -1
- letta/services/tool_manager.py +2 -4
- letta/services/user_manager.py +1 -1
- {letta_nightly-0.6.1.dev20241206104246.dist-info → letta_nightly-0.6.1.dev20241208104134.dist-info}/METADATA +2 -2
- {letta_nightly-0.6.1.dev20241206104246.dist-info → letta_nightly-0.6.1.dev20241208104134.dist-info}/RECORD +45 -45
- letta/agent_store/lancedb.py +0 -177
- letta/persistence_manager.py +0 -149
- letta/server/static_files/assets/index-1b5d1a41.js +0 -271
- letta/server/static_files/assets/index-56a3f8c6.css +0 -1
- {letta_nightly-0.6.1.dev20241206104246.dist-info → letta_nightly-0.6.1.dev20241208104134.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.1.dev20241206104246.dist-info → letta_nightly-0.6.1.dev20241208104134.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.1.dev20241206104246.dist-info → letta_nightly-0.6.1.dev20241208104134.dist-info}/entry_points.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from datetime import datetime
|
|
1
2
|
from typing import Optional
|
|
2
3
|
|
|
3
4
|
from letta.agent import Agent
|
|
@@ -38,7 +39,7 @@ Returns:
|
|
|
38
39
|
"""
|
|
39
40
|
|
|
40
41
|
|
|
41
|
-
def pause_heartbeats(self: Agent, minutes: int) -> Optional[str]:
|
|
42
|
+
def pause_heartbeats(self: "Agent", minutes: int) -> Optional[str]:
|
|
42
43
|
import datetime
|
|
43
44
|
|
|
44
45
|
from letta.constants import MAX_PAUSE_HEARTBEATS
|
|
@@ -80,7 +81,15 @@ def conversation_search(self: "Agent", query: str, page: Optional[int] = 0) -> O
|
|
|
80
81
|
except:
|
|
81
82
|
raise ValueError(f"'page' argument must be an integer")
|
|
82
83
|
count = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
83
|
-
|
|
84
|
+
# TODO: add paging by page number. currently cursor only works with strings.
|
|
85
|
+
# original: start=page * count
|
|
86
|
+
results = self.message_manager.list_user_messages_for_agent(
|
|
87
|
+
agent_id=self.agent_state.id,
|
|
88
|
+
actor=self.user,
|
|
89
|
+
query_text=query,
|
|
90
|
+
limit=count,
|
|
91
|
+
)
|
|
92
|
+
total = len(results)
|
|
84
93
|
num_pages = math.ceil(total / count) - 1 # 0 index
|
|
85
94
|
if len(results) == 0:
|
|
86
95
|
results_str = f"No results found."
|
|
@@ -112,10 +121,29 @@ def conversation_search_date(self: "Agent", start_date: str, end_date: str, page
|
|
|
112
121
|
page = 0
|
|
113
122
|
try:
|
|
114
123
|
page = int(page)
|
|
124
|
+
if page < 0:
|
|
125
|
+
raise ValueError
|
|
115
126
|
except:
|
|
116
127
|
raise ValueError(f"'page' argument must be an integer")
|
|
128
|
+
|
|
129
|
+
# Convert date strings to datetime objects
|
|
130
|
+
try:
|
|
131
|
+
start_datetime = datetime.strptime(start_date, "%Y-%m-%d").replace(hour=0, minute=0, second=0, microsecond=0)
|
|
132
|
+
end_datetime = datetime.strptime(end_date, "%Y-%m-%d").replace(hour=23, minute=59, second=59, microsecond=999999)
|
|
133
|
+
except ValueError:
|
|
134
|
+
raise ValueError("Dates must be in the format 'YYYY-MM-DD'")
|
|
135
|
+
|
|
117
136
|
count = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
118
|
-
results
|
|
137
|
+
results = self.message_manager.list_user_messages_for_agent(
|
|
138
|
+
# TODO: add paging by page number. currently cursor only works with strings.
|
|
139
|
+
agent_id=self.agent_state.id,
|
|
140
|
+
actor=self.user,
|
|
141
|
+
start_date=start_datetime,
|
|
142
|
+
end_date=end_datetime,
|
|
143
|
+
limit=count,
|
|
144
|
+
# start_date=start_date, end_date=end_date, limit=count, start=page * count
|
|
145
|
+
)
|
|
146
|
+
total = len(results)
|
|
119
147
|
num_pages = math.ceil(total / count) - 1 # 0 index
|
|
120
148
|
if len(results) == 0:
|
|
121
149
|
results_str = f"No results found."
|
|
@@ -136,7 +164,7 @@ def archival_memory_insert(self: "Agent", content: str) -> Optional[str]:
|
|
|
136
164
|
Returns:
|
|
137
165
|
Optional[str]: None is always returned as this function does not produce a response.
|
|
138
166
|
"""
|
|
139
|
-
self.
|
|
167
|
+
self.archival_memory.insert(content)
|
|
140
168
|
return None
|
|
141
169
|
|
|
142
170
|
|
|
@@ -163,7 +191,7 @@ def archival_memory_search(self: "Agent", query: str, page: Optional[int] = 0) -
|
|
|
163
191
|
except:
|
|
164
192
|
raise ValueError(f"'page' argument must be an integer")
|
|
165
193
|
count = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
166
|
-
results, total = self.
|
|
194
|
+
results, total = self.archival_memory.search(query, count=count, start=page * count)
|
|
167
195
|
num_pages = math.ceil(total / count) - 1 # 0 index
|
|
168
196
|
if len(results) == 0:
|
|
169
197
|
results_str = f"No results found."
|
letta/functions/helpers.py
CHANGED
|
@@ -4,12 +4,12 @@ import humps
|
|
|
4
4
|
from pydantic import BaseModel
|
|
5
5
|
|
|
6
6
|
|
|
7
|
-
def generate_composio_tool_wrapper(
|
|
7
|
+
def generate_composio_tool_wrapper(action_name: str) -> tuple[str, str]:
|
|
8
8
|
# Instantiate the object
|
|
9
|
-
tool_instantiation_str = f"composio_toolset.get_tools(actions=[
|
|
9
|
+
tool_instantiation_str = f"composio_toolset.get_tools(actions=['{action_name}'])[0]"
|
|
10
10
|
|
|
11
11
|
# Generate func name
|
|
12
|
-
func_name =
|
|
12
|
+
func_name = action_name.lower()
|
|
13
13
|
|
|
14
14
|
wrapper_function_str = f"""
|
|
15
15
|
def {func_name}(**kwargs):
|
letta/llm_api/openai.py
CHANGED
|
@@ -3,6 +3,7 @@ from letta.local_llm.json_parser import clean_json
|
|
|
3
3
|
from letta.local_llm.llm_chat_completion_wrappers.wrapper_base import (
|
|
4
4
|
LLMChatCompletionWrapper,
|
|
5
5
|
)
|
|
6
|
+
from letta.schemas.enums import MessageRole
|
|
6
7
|
from letta.utils import json_dumps, json_loads
|
|
7
8
|
|
|
8
9
|
PREFIX_HINT = """# Reminders:
|
|
@@ -208,7 +209,9 @@ class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper):
|
|
|
208
209
|
|
|
209
210
|
# Last are the user/assistant messages
|
|
210
211
|
for message in messages[1:]:
|
|
211
|
-
|
|
212
|
+
# check that message["role"] is a valid option for MessageRole
|
|
213
|
+
# TODO: this shouldn't be necessary if we use pydantic in the future
|
|
214
|
+
assert message["role"] in [role.value for role in MessageRole]
|
|
212
215
|
|
|
213
216
|
if message["role"] == "user":
|
|
214
217
|
# Support for AutoGen naming of agents
|
|
@@ -231,6 +234,15 @@ class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper):
|
|
|
231
234
|
|
|
232
235
|
prompt += f"\n<|im_start|>{role_str}\n{msg_str.strip()}<|im_end|>"
|
|
233
236
|
|
|
237
|
+
elif message["role"] == "system":
|
|
238
|
+
|
|
239
|
+
role_str = "system"
|
|
240
|
+
msg_str = self._compile_system_message(
|
|
241
|
+
system_message=message["content"], functions=functions, function_documentation=function_documentation
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
prompt += f"\n<|im_start|>{role_str}\n{msg_str.strip()}<|im_end|>"
|
|
245
|
+
|
|
234
246
|
elif message["role"] in ["tool", "function"]:
|
|
235
247
|
if self.allow_function_role:
|
|
236
248
|
role_str = message["role"]
|
letta/main.py
CHANGED
|
@@ -190,8 +190,8 @@ def run_agent_loop(
|
|
|
190
190
|
elif user_input.lower() == "/memory":
|
|
191
191
|
print(f"\nDumping memory contents:\n")
|
|
192
192
|
print(f"{letta_agent.agent_state.memory.compile()}")
|
|
193
|
-
print(f"{letta_agent.
|
|
194
|
-
print(f"{letta_agent.
|
|
193
|
+
print(f"{letta_agent.archival_memory.compile()}")
|
|
194
|
+
print(f"{letta_agent.recall_memory.compile()}")
|
|
195
195
|
continue
|
|
196
196
|
|
|
197
197
|
elif user_input.lower() == "/model":
|
letta/memory.py
CHANGED
|
@@ -67,14 +67,12 @@ def summarize_messages(
|
|
|
67
67
|
+ message_sequence_to_summarize[cutoff:]
|
|
68
68
|
)
|
|
69
69
|
|
|
70
|
-
|
|
70
|
+
agent_state.user_id
|
|
71
71
|
dummy_agent_id = agent_state.id
|
|
72
72
|
message_sequence = []
|
|
73
|
-
message_sequence.append(Message(
|
|
74
|
-
message_sequence.append(
|
|
75
|
-
|
|
76
|
-
)
|
|
77
|
-
message_sequence.append(Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role=MessageRole.user, text=summary_input))
|
|
73
|
+
message_sequence.append(Message(agent_id=dummy_agent_id, role=MessageRole.system, text=summary_prompt))
|
|
74
|
+
message_sequence.append(Message(agent_id=dummy_agent_id, role=MessageRole.assistant, text=MESSAGE_SUMMARY_REQUEST_ACK))
|
|
75
|
+
message_sequence.append(Message(agent_id=dummy_agent_id, role=MessageRole.user, text=summary_input))
|
|
78
76
|
|
|
79
77
|
# TODO: We need to eventually have a separate LLM config for the summarizer LLM
|
|
80
78
|
llm_config_no_inner_thoughts = agent_state.llm_config.model_copy(deep=True)
|
|
@@ -252,82 +250,6 @@ class DummyRecallMemory(RecallMemory):
|
|
|
252
250
|
return matches, len(matches)
|
|
253
251
|
|
|
254
252
|
|
|
255
|
-
class BaseRecallMemory(RecallMemory):
|
|
256
|
-
"""Recall memory based on base functions implemented by storage connectors"""
|
|
257
|
-
|
|
258
|
-
def __init__(self, agent_state, restrict_search_to_summaries=False):
|
|
259
|
-
# If true, the pool of messages that can be queried are the automated summaries only
|
|
260
|
-
# (generated when the conversation window needs to be shortened)
|
|
261
|
-
self.restrict_search_to_summaries = restrict_search_to_summaries
|
|
262
|
-
from letta.agent_store.storage import StorageConnector
|
|
263
|
-
|
|
264
|
-
self.agent_state = agent_state
|
|
265
|
-
|
|
266
|
-
# create embedding model
|
|
267
|
-
self.embed_model = embedding_model(agent_state.embedding_config)
|
|
268
|
-
self.embedding_chunk_size = agent_state.embedding_config.embedding_chunk_size
|
|
269
|
-
|
|
270
|
-
# create storage backend
|
|
271
|
-
self.storage = StorageConnector.get_recall_storage_connector(user_id=agent_state.user_id, agent_id=agent_state.id)
|
|
272
|
-
# TODO: have some mechanism for cleanup otherwise will lead to OOM
|
|
273
|
-
self.cache = {}
|
|
274
|
-
|
|
275
|
-
def get_all(self, start=0, count=None):
|
|
276
|
-
start = 0 if start is None else int(start)
|
|
277
|
-
count = 0 if count is None else int(count)
|
|
278
|
-
results = self.storage.get_all(start, count)
|
|
279
|
-
results_json = [message.to_openai_dict() for message in results]
|
|
280
|
-
return results_json, len(results)
|
|
281
|
-
|
|
282
|
-
def text_search(self, query_string, count=None, start=None):
|
|
283
|
-
start = 0 if start is None else int(start)
|
|
284
|
-
count = 0 if count is None else int(count)
|
|
285
|
-
results = self.storage.query_text(query_string, count, start)
|
|
286
|
-
results_json = [message.to_openai_dict_search_results() for message in results]
|
|
287
|
-
return results_json, len(results)
|
|
288
|
-
|
|
289
|
-
def date_search(self, start_date, end_date, count=None, start=None):
|
|
290
|
-
start = 0 if start is None else int(start)
|
|
291
|
-
count = 0 if count is None else int(count)
|
|
292
|
-
results = self.storage.query_date(start_date, end_date, count, start)
|
|
293
|
-
results_json = [message.to_openai_dict_search_results() for message in results]
|
|
294
|
-
return results_json, len(results)
|
|
295
|
-
|
|
296
|
-
def compile(self) -> str:
|
|
297
|
-
total = self.storage.size()
|
|
298
|
-
system_count = self.storage.size(filters={"role": "system"})
|
|
299
|
-
user_count = self.storage.size(filters={"role": "user"})
|
|
300
|
-
assistant_count = self.storage.size(filters={"role": "assistant"})
|
|
301
|
-
function_count = self.storage.size(filters={"role": "function"})
|
|
302
|
-
other_count = total - (system_count + user_count + assistant_count + function_count)
|
|
303
|
-
|
|
304
|
-
memory_str = (
|
|
305
|
-
f"Statistics:"
|
|
306
|
-
+ f"\n{total} total messages"
|
|
307
|
-
+ f"\n{system_count} system"
|
|
308
|
-
+ f"\n{user_count} user"
|
|
309
|
-
+ f"\n{assistant_count} assistant"
|
|
310
|
-
+ f"\n{function_count} function"
|
|
311
|
-
+ f"\n{other_count} other"
|
|
312
|
-
)
|
|
313
|
-
return f"\n### RECALL MEMORY ###" + f"\n{memory_str}"
|
|
314
|
-
|
|
315
|
-
def insert(self, message: Message):
|
|
316
|
-
self.storage.insert(message)
|
|
317
|
-
|
|
318
|
-
def insert_many(self, messages: List[Message]):
|
|
319
|
-
self.storage.insert_many(messages)
|
|
320
|
-
|
|
321
|
-
def save(self):
|
|
322
|
-
self.storage.save()
|
|
323
|
-
|
|
324
|
-
def __len__(self):
|
|
325
|
-
return self.storage.size()
|
|
326
|
-
|
|
327
|
-
def count(self) -> int:
|
|
328
|
-
return len(self)
|
|
329
|
-
|
|
330
|
-
|
|
331
253
|
class EmbeddingArchivalMemory(ArchivalMemory):
|
|
332
254
|
"""Archival memory with embedding based search"""
|
|
333
255
|
|
letta/metadata.py
CHANGED
|
@@ -14,7 +14,6 @@ from letta.schemas.api_key import APIKey
|
|
|
14
14
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
15
15
|
from letta.schemas.enums import ToolRuleType
|
|
16
16
|
from letta.schemas.llm_config import LLMConfig
|
|
17
|
-
from letta.schemas.openai.chat_completions import ToolCall, ToolCallFunction
|
|
18
17
|
from letta.schemas.tool_rule import ChildToolRule, InitToolRule, TerminalToolRule
|
|
19
18
|
from letta.schemas.user import User
|
|
20
19
|
from letta.services.per_agent_lock_manager import PerAgentLockManager
|
|
@@ -66,40 +65,6 @@ class EmbeddingConfigColumn(TypeDecorator):
|
|
|
66
65
|
return value
|
|
67
66
|
|
|
68
67
|
|
|
69
|
-
class ToolCallColumn(TypeDecorator):
|
|
70
|
-
|
|
71
|
-
impl = JSON
|
|
72
|
-
cache_ok = True
|
|
73
|
-
|
|
74
|
-
def load_dialect_impl(self, dialect):
|
|
75
|
-
return dialect.type_descriptor(JSON())
|
|
76
|
-
|
|
77
|
-
def process_bind_param(self, value, dialect):
|
|
78
|
-
if value:
|
|
79
|
-
values = []
|
|
80
|
-
for v in value:
|
|
81
|
-
if isinstance(v, ToolCall):
|
|
82
|
-
values.append(v.model_dump())
|
|
83
|
-
else:
|
|
84
|
-
values.append(v)
|
|
85
|
-
return values
|
|
86
|
-
|
|
87
|
-
return value
|
|
88
|
-
|
|
89
|
-
def process_result_value(self, value, dialect):
|
|
90
|
-
if value:
|
|
91
|
-
tools = []
|
|
92
|
-
for tool_value in value:
|
|
93
|
-
if "function" in tool_value:
|
|
94
|
-
tool_call_function = ToolCallFunction(**tool_value["function"])
|
|
95
|
-
del tool_value["function"]
|
|
96
|
-
else:
|
|
97
|
-
tool_call_function = None
|
|
98
|
-
tools.append(ToolCall(function=tool_call_function, **tool_value))
|
|
99
|
-
return tools
|
|
100
|
-
return value
|
|
101
|
-
|
|
102
|
-
|
|
103
68
|
# TODO: eventually store providers?
|
|
104
69
|
# class Provider(Base):
|
|
105
70
|
# __tablename__ = "providers"
|
letta/o1_agent.py
CHANGED
|
@@ -20,7 +20,7 @@ def send_thinking_message(self: "Agent", message: str) -> Optional[str]:
|
|
|
20
20
|
Returns:
|
|
21
21
|
Optional[str]: None is always returned as this function does not produce a response.
|
|
22
22
|
"""
|
|
23
|
-
self.interface.internal_monologue(message
|
|
23
|
+
self.interface.internal_monologue(message)
|
|
24
24
|
return None
|
|
25
25
|
|
|
26
26
|
|
|
@@ -34,7 +34,7 @@ def send_final_message(self: "Agent", message: str) -> Optional[str]:
|
|
|
34
34
|
Returns:
|
|
35
35
|
Optional[str]: None is always returned as this function does not produce a response.
|
|
36
36
|
"""
|
|
37
|
-
self.interface.internal_monologue(message
|
|
37
|
+
self.interface.internal_monologue(message)
|
|
38
38
|
return None
|
|
39
39
|
|
|
40
40
|
|
|
@@ -62,10 +62,15 @@ class O1Agent(Agent):
|
|
|
62
62
|
"""Run Agent.inner_step in a loop, terminate when final thinking message is sent or max_thinking_steps is reached"""
|
|
63
63
|
# assert ms is not None, "MetadataStore is required"
|
|
64
64
|
next_input_message = messages if isinstance(messages, list) else [messages]
|
|
65
|
+
|
|
65
66
|
counter = 0
|
|
66
67
|
total_usage = UsageStatistics()
|
|
67
68
|
step_count = 0
|
|
68
69
|
while step_count < self.max_thinking_steps:
|
|
70
|
+
# This is hacky but we need to do this for now
|
|
71
|
+
for m in next_input_message:
|
|
72
|
+
m.id = m._generate_id()
|
|
73
|
+
|
|
69
74
|
kwargs["ms"] = ms
|
|
70
75
|
kwargs["first_message"] = False
|
|
71
76
|
step_response = self.inner_step(
|
letta/offline_memory_agent.py
CHANGED
|
@@ -18,6 +18,7 @@ def trigger_rethink_memory(agent_state: "AgentState", message: Optional[str]) ->
|
|
|
18
18
|
|
|
19
19
|
"""
|
|
20
20
|
from letta import create_client
|
|
21
|
+
|
|
21
22
|
client = create_client()
|
|
22
23
|
agents = client.list_agents()
|
|
23
24
|
for agent in agents:
|
|
@@ -149,6 +150,11 @@ class OfflineMemoryAgent(Agent):
|
|
|
149
150
|
step_count = 0
|
|
150
151
|
|
|
151
152
|
while counter < self.max_memory_rethinks:
|
|
153
|
+
# This is hacky but we need to do this for now
|
|
154
|
+
# TODO: REMOVE THIS
|
|
155
|
+
for m in next_input_message:
|
|
156
|
+
m.id = m._generate_id()
|
|
157
|
+
|
|
152
158
|
kwargs["ms"] = ms
|
|
153
159
|
kwargs["first_message"] = False
|
|
154
160
|
step_response = self.inner_step(
|
letta/orm/__init__.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
|
1
|
+
from letta.orm.agents_tags import AgentsTags
|
|
1
2
|
from letta.orm.base import Base
|
|
2
3
|
from letta.orm.block import Block
|
|
3
4
|
from letta.orm.blocks_agents import BlocksAgents
|
|
4
5
|
from letta.orm.file import FileMetadata
|
|
5
6
|
from letta.orm.job import Job
|
|
7
|
+
from letta.orm.message import Message
|
|
6
8
|
from letta.orm.organization import Organization
|
|
7
9
|
from letta.orm.sandbox_config import SandboxConfig, SandboxEnvironmentVariable
|
|
8
10
|
from letta.orm.source import Source
|
letta/orm/file.py
CHANGED
letta/orm/message.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from sqlalchemy import JSON, TypeDecorator
|
|
4
|
+
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
5
|
+
|
|
6
|
+
from letta.orm.mixins import AgentMixin, OrganizationMixin
|
|
7
|
+
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
|
8
|
+
from letta.schemas.message import Message as PydanticMessage
|
|
9
|
+
from letta.schemas.openai.chat_completions import ToolCall, ToolCallFunction
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ToolCallColumn(TypeDecorator):
|
|
13
|
+
|
|
14
|
+
impl = JSON
|
|
15
|
+
cache_ok = True
|
|
16
|
+
|
|
17
|
+
def load_dialect_impl(self, dialect):
|
|
18
|
+
return dialect.type_descriptor(JSON())
|
|
19
|
+
|
|
20
|
+
def process_bind_param(self, value, dialect):
|
|
21
|
+
if value:
|
|
22
|
+
values = []
|
|
23
|
+
for v in value:
|
|
24
|
+
if isinstance(v, ToolCall):
|
|
25
|
+
values.append(v.model_dump())
|
|
26
|
+
else:
|
|
27
|
+
values.append(v)
|
|
28
|
+
return values
|
|
29
|
+
|
|
30
|
+
return value
|
|
31
|
+
|
|
32
|
+
def process_result_value(self, value, dialect):
|
|
33
|
+
if value:
|
|
34
|
+
tools = []
|
|
35
|
+
for tool_value in value:
|
|
36
|
+
if "function" in tool_value:
|
|
37
|
+
tool_call_function = ToolCallFunction(**tool_value["function"])
|
|
38
|
+
del tool_value["function"]
|
|
39
|
+
else:
|
|
40
|
+
tool_call_function = None
|
|
41
|
+
tools.append(ToolCall(function=tool_call_function, **tool_value))
|
|
42
|
+
return tools
|
|
43
|
+
return value
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class Message(SqlalchemyBase, OrganizationMixin, AgentMixin):
|
|
47
|
+
"""Defines data model for storing Message objects"""
|
|
48
|
+
|
|
49
|
+
__tablename__ = "messages"
|
|
50
|
+
__table_args__ = {"extend_existing": True}
|
|
51
|
+
__pydantic_model__ = PydanticMessage
|
|
52
|
+
|
|
53
|
+
id: Mapped[str] = mapped_column(primary_key=True, doc="Unique message identifier")
|
|
54
|
+
role: Mapped[str] = mapped_column(doc="Message role (user/assistant/system/tool)")
|
|
55
|
+
text: Mapped[Optional[str]] = mapped_column(nullable=True, doc="Message content")
|
|
56
|
+
model: Mapped[Optional[str]] = mapped_column(nullable=True, doc="LLM model used")
|
|
57
|
+
name: Mapped[Optional[str]] = mapped_column(nullable=True, doc="Name for multi-agent scenarios")
|
|
58
|
+
tool_calls: Mapped[ToolCall] = mapped_column(ToolCallColumn, doc="Tool call information")
|
|
59
|
+
tool_call_id: Mapped[Optional[str]] = mapped_column(nullable=True, doc="ID of the tool call")
|
|
60
|
+
|
|
61
|
+
# Relationships
|
|
62
|
+
# TODO: Add in after Agent ORM is created
|
|
63
|
+
# agent: Mapped["Agent"] = relationship("Agent", back_populates="messages", lazy="selectin")
|
|
64
|
+
organization: Mapped["Organization"] = relationship("Organization", back_populates="messages", lazy="selectin")
|
letta/orm/mixins.py
CHANGED
|
@@ -31,6 +31,22 @@ class UserMixin(Base):
|
|
|
31
31
|
user_id: Mapped[str] = mapped_column(String, ForeignKey("users.id"))
|
|
32
32
|
|
|
33
33
|
|
|
34
|
+
class AgentMixin(Base):
|
|
35
|
+
"""Mixin for models that belong to an agent."""
|
|
36
|
+
|
|
37
|
+
__abstract__ = True
|
|
38
|
+
|
|
39
|
+
agent_id: Mapped[str] = mapped_column(String, ForeignKey("agents.id"))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class FileMixin(Base):
|
|
43
|
+
"""Mixin for models that belong to a file."""
|
|
44
|
+
|
|
45
|
+
__abstract__ = True
|
|
46
|
+
|
|
47
|
+
file_id: Mapped[str] = mapped_column(String, ForeignKey("files.id"))
|
|
48
|
+
|
|
49
|
+
|
|
34
50
|
class SourceMixin(Base):
|
|
35
51
|
"""Mixin for models (e.g. file) that belong to a source."""
|
|
36
52
|
|
letta/orm/organization.py
CHANGED
|
@@ -33,6 +33,7 @@ class Organization(SqlalchemyBase):
|
|
|
33
33
|
sandbox_environment_variables: Mapped[List["SandboxEnvironmentVariable"]] = relationship(
|
|
34
34
|
"SandboxEnvironmentVariable", back_populates="organization", cascade="all, delete-orphan"
|
|
35
35
|
)
|
|
36
|
+
messages: Mapped[List["Message"]] = relationship("Message", back_populates="organization", cascade="all, delete-orphan")
|
|
36
37
|
|
|
37
38
|
# TODO: Map these relationships later when we actually make these models
|
|
38
39
|
# below is just a suggestion
|