letta-nightly 0.6.13.dev20250122185528__py3-none-any.whl → 0.6.14.dev20250123041709__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +2 -2
- letta/agent.py +69 -100
- letta/chat_only_agent.py +1 -1
- letta/client/client.py +153 -137
- letta/constants.py +1 -8
- letta/data_sources/connectors.py +1 -1
- letta/functions/helpers.py +29 -4
- letta/functions/schema_generator.py +55 -0
- letta/llm_api/helpers.py +51 -1
- letta/memory.py +9 -7
- letta/orm/agent.py +2 -2
- letta/orm/block.py +3 -1
- letta/orm/custom_columns.py +5 -4
- letta/orm/enums.py +1 -0
- letta/orm/message.py +2 -2
- letta/orm/sqlalchemy_base.py +5 -0
- letta/schemas/agent.py +3 -3
- letta/schemas/block.py +2 -2
- letta/schemas/environment_variables.py +1 -1
- letta/schemas/job.py +1 -1
- letta/schemas/letta_base.py +6 -0
- letta/schemas/letta_message.py +6 -6
- letta/schemas/memory.py +3 -2
- letta/schemas/message.py +21 -13
- letta/schemas/passage.py +1 -1
- letta/schemas/source.py +4 -4
- letta/schemas/tool.py +38 -43
- letta/server/rest_api/app.py +1 -16
- letta/server/rest_api/routers/v1/agents.py +101 -84
- letta/server/rest_api/routers/v1/blocks.py +8 -46
- letta/server/rest_api/routers/v1/jobs.py +4 -4
- letta/server/rest_api/routers/v1/providers.py +2 -2
- letta/server/rest_api/routers/v1/runs.py +6 -6
- letta/server/rest_api/routers/v1/sources.py +8 -38
- letta/server/rest_api/routers/v1/tags.py +1 -1
- letta/server/rest_api/routers/v1/tools.py +6 -7
- letta/server/server.py +3 -3
- letta/services/agent_manager.py +43 -9
- letta/services/block_manager.py +3 -3
- letta/services/job_manager.py +5 -3
- letta/services/organization_manager.py +1 -1
- letta/services/passage_manager.py +3 -3
- letta/services/provider_manager.py +2 -2
- letta/services/sandbox_config_manager.py +2 -2
- letta/services/source_manager.py +3 -3
- letta/services/tool_execution_sandbox.py +3 -1
- letta/services/tool_manager.py +8 -3
- letta/services/user_manager.py +2 -2
- letta/settings.py +29 -0
- letta/system.py +2 -2
- {letta_nightly-0.6.13.dev20250122185528.dist-info → letta_nightly-0.6.14.dev20250123041709.dist-info}/METADATA +1 -1
- {letta_nightly-0.6.13.dev20250122185528.dist-info → letta_nightly-0.6.14.dev20250123041709.dist-info}/RECORD +55 -61
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +0 -115
- letta/server/rest_api/routers/openai/assistants/schemas.py +0 -115
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +0 -120
- {letta_nightly-0.6.13.dev20250122185528.dist-info → letta_nightly-0.6.14.dev20250123041709.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.13.dev20250122185528.dist-info → letta_nightly-0.6.14.dev20250123041709.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.13.dev20250122185528.dist-info → letta_nightly-0.6.14.dev20250123041709.dist-info}/entry_points.txt +0 -0
letta/functions/helpers.py
CHANGED
|
@@ -12,12 +12,37 @@ from letta.schemas.letta_response import LettaResponse
|
|
|
12
12
|
from letta.schemas.message import MessageCreate
|
|
13
13
|
|
|
14
14
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
15
|
+
# TODO: This is kind of hacky, as this is used to search up the action later on composio's side
|
|
16
|
+
# TODO: So be very careful changing/removing these pair of functions
|
|
17
|
+
def generate_func_name_from_composio_action(action_name: str) -> str:
|
|
18
|
+
"""
|
|
19
|
+
Generates the composio function name from the composio action.
|
|
18
20
|
|
|
21
|
+
Args:
|
|
22
|
+
action_name: The composio action name
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
function name
|
|
26
|
+
"""
|
|
27
|
+
return action_name.lower()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def generate_composio_action_from_func_name(func_name: str) -> str:
|
|
31
|
+
"""
|
|
32
|
+
Generates the composio action from the composio function name.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
func_name: The composio function name
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
composio action name
|
|
39
|
+
"""
|
|
40
|
+
return func_name.upper()
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def generate_composio_tool_wrapper(action_name: str) -> tuple[str, str]:
|
|
19
44
|
# Generate func name
|
|
20
|
-
func_name = action_name
|
|
45
|
+
func_name = generate_func_name_from_composio_action(action_name)
|
|
21
46
|
|
|
22
47
|
wrapper_function_str = f"""
|
|
23
48
|
def {func_name}(**kwargs):
|
|
@@ -2,6 +2,7 @@ import inspect
|
|
|
2
2
|
import warnings
|
|
3
3
|
from typing import Any, Dict, List, Optional, Type, Union, get_args, get_origin
|
|
4
4
|
|
|
5
|
+
from composio.client.collections import ActionParametersModel
|
|
5
6
|
from docstring_parser import parse
|
|
6
7
|
from pydantic import BaseModel
|
|
7
8
|
|
|
@@ -429,3 +430,57 @@ def generate_schema_from_args_schema_v2(
|
|
|
429
430
|
function_call_json["parameters"]["required"].append("request_heartbeat")
|
|
430
431
|
|
|
431
432
|
return function_call_json
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
def generate_tool_schema_for_composio(
|
|
436
|
+
parameters_model: ActionParametersModel,
|
|
437
|
+
name: str,
|
|
438
|
+
description: str,
|
|
439
|
+
append_heartbeat: bool = True,
|
|
440
|
+
) -> Dict[str, Any]:
|
|
441
|
+
properties_json = {}
|
|
442
|
+
required_fields = parameters_model.required or []
|
|
443
|
+
|
|
444
|
+
# Extract properties from the ActionParametersModel
|
|
445
|
+
for field_name, field_props in parameters_model.properties.items():
|
|
446
|
+
# Initialize the property structure
|
|
447
|
+
property_schema = {
|
|
448
|
+
"type": field_props["type"],
|
|
449
|
+
"description": field_props.get("description", ""),
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
# Handle optional default values
|
|
453
|
+
if "default" in field_props:
|
|
454
|
+
property_schema["default"] = field_props["default"]
|
|
455
|
+
|
|
456
|
+
# Handle enumerations
|
|
457
|
+
if "enum" in field_props:
|
|
458
|
+
property_schema["enum"] = field_props["enum"]
|
|
459
|
+
|
|
460
|
+
# Handle array item types
|
|
461
|
+
if field_props["type"] == "array" and "items" in field_props:
|
|
462
|
+
property_schema["items"] = field_props["items"]
|
|
463
|
+
|
|
464
|
+
# Add the property to the schema
|
|
465
|
+
properties_json[field_name] = property_schema
|
|
466
|
+
|
|
467
|
+
# Add the optional heartbeat parameter
|
|
468
|
+
if append_heartbeat:
|
|
469
|
+
properties_json["request_heartbeat"] = {
|
|
470
|
+
"type": "boolean",
|
|
471
|
+
"description": "Request an immediate heartbeat after function execution. Set to `True` if you want to send a follow-up message or run a follow-up function.",
|
|
472
|
+
}
|
|
473
|
+
required_fields.append("request_heartbeat")
|
|
474
|
+
|
|
475
|
+
# Return the final schema
|
|
476
|
+
return {
|
|
477
|
+
"name": name,
|
|
478
|
+
"description": description,
|
|
479
|
+
"strict": True,
|
|
480
|
+
"parameters": {
|
|
481
|
+
"type": "object",
|
|
482
|
+
"properties": properties_json,
|
|
483
|
+
"additionalProperties": False,
|
|
484
|
+
"required": required_fields,
|
|
485
|
+
},
|
|
486
|
+
}
|
letta/llm_api/helpers.py
CHANGED
|
@@ -7,8 +7,10 @@ from typing import Any, List, Union
|
|
|
7
7
|
import requests
|
|
8
8
|
|
|
9
9
|
from letta.constants import OPENAI_CONTEXT_WINDOW_ERROR_SUBSTRING
|
|
10
|
+
from letta.schemas.message import Message
|
|
10
11
|
from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, Choice
|
|
11
|
-
from letta.
|
|
12
|
+
from letta.settings import summarizer_settings
|
|
13
|
+
from letta.utils import count_tokens, json_dumps, printd
|
|
12
14
|
|
|
13
15
|
|
|
14
16
|
def _convert_to_structured_output_helper(property: dict) -> dict:
|
|
@@ -287,6 +289,54 @@ def unpack_inner_thoughts_from_kwargs(choice: Choice, inner_thoughts_key: str) -
|
|
|
287
289
|
return rewritten_choice
|
|
288
290
|
|
|
289
291
|
|
|
292
|
+
def calculate_summarizer_cutoff(in_context_messages: List[Message], token_counts: List[int], logger: "logging.Logger") -> int:
|
|
293
|
+
if len(in_context_messages) != len(token_counts):
|
|
294
|
+
raise ValueError(
|
|
295
|
+
f"Given in_context_messages has different length from given token_counts: {len(in_context_messages)} != {len(token_counts)}"
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
in_context_messages_openai = [m.to_openai_dict() for m in in_context_messages]
|
|
299
|
+
|
|
300
|
+
if summarizer_settings.evict_all_messages:
|
|
301
|
+
logger.info("Evicting all messages...")
|
|
302
|
+
return len(in_context_messages)
|
|
303
|
+
else:
|
|
304
|
+
# Start at index 1 (past the system message),
|
|
305
|
+
# and collect messages for summarization until we reach the desired truncation token fraction (eg 50%)
|
|
306
|
+
# We do the inverse of `desired_memory_token_pressure` to get what we need to remove
|
|
307
|
+
desired_token_count_to_summarize = int(sum(token_counts) * (1 - summarizer_settings.desired_memory_token_pressure))
|
|
308
|
+
logger.info(f"desired_token_count_to_summarize={desired_token_count_to_summarize}")
|
|
309
|
+
|
|
310
|
+
tokens_so_far = 0
|
|
311
|
+
cutoff = 0
|
|
312
|
+
for i, msg in enumerate(in_context_messages_openai):
|
|
313
|
+
# Skip system
|
|
314
|
+
if i == 0:
|
|
315
|
+
continue
|
|
316
|
+
cutoff = i
|
|
317
|
+
tokens_so_far += token_counts[i]
|
|
318
|
+
|
|
319
|
+
if msg["role"] not in ["user", "tool", "function"] and tokens_so_far >= desired_token_count_to_summarize:
|
|
320
|
+
# Break if the role is NOT a user or tool/function and tokens_so_far is enough
|
|
321
|
+
break
|
|
322
|
+
elif len(in_context_messages) - cutoff - 1 <= summarizer_settings.keep_last_n_messages:
|
|
323
|
+
# Also break if we reached the `keep_last_n_messages` threshold
|
|
324
|
+
# NOTE: This may be on a user, tool, or function in theory
|
|
325
|
+
logger.warning(
|
|
326
|
+
f"Breaking summary cutoff early on role={msg['role']} because we hit the `keep_last_n_messages`={summarizer_settings.keep_last_n_messages}"
|
|
327
|
+
)
|
|
328
|
+
break
|
|
329
|
+
|
|
330
|
+
logger.info(f"Evicting {cutoff}/{len(in_context_messages)} messages...")
|
|
331
|
+
return cutoff + 1
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def get_token_counts_for_messages(in_context_messages: List[Message]) -> List[int]:
|
|
335
|
+
in_context_messages_openai = [m.to_openai_dict() for m in in_context_messages]
|
|
336
|
+
token_counts = [count_tokens(str(msg)) for msg in in_context_messages_openai]
|
|
337
|
+
return token_counts
|
|
338
|
+
|
|
339
|
+
|
|
290
340
|
def is_context_overflow_error(exception: Union[requests.exceptions.RequestException, Exception]) -> bool:
|
|
291
341
|
"""Checks if an exception is due to context overflow (based on common OpenAI response messages)"""
|
|
292
342
|
from letta.utils import printd
|
letta/memory.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
from typing import Callable, Dict, List
|
|
2
2
|
|
|
3
|
-
from letta.constants import MESSAGE_SUMMARY_REQUEST_ACK
|
|
3
|
+
from letta.constants import MESSAGE_SUMMARY_REQUEST_ACK
|
|
4
4
|
from letta.llm_api.llm_api_tools import create
|
|
5
5
|
from letta.prompts.gpt_summarize import SYSTEM as SUMMARY_PROMPT_SYSTEM
|
|
6
6
|
from letta.schemas.agent import AgentState
|
|
7
7
|
from letta.schemas.enums import MessageRole
|
|
8
8
|
from letta.schemas.memory import Memory
|
|
9
9
|
from letta.schemas.message import Message
|
|
10
|
+
from letta.settings import summarizer_settings
|
|
10
11
|
from letta.utils import count_tokens, printd
|
|
11
12
|
|
|
12
13
|
|
|
@@ -49,8 +50,8 @@ def summarize_messages(
|
|
|
49
50
|
summary_prompt = SUMMARY_PROMPT_SYSTEM
|
|
50
51
|
summary_input = _format_summary_history(message_sequence_to_summarize)
|
|
51
52
|
summary_input_tkns = count_tokens(summary_input)
|
|
52
|
-
if summary_input_tkns >
|
|
53
|
-
trunc_ratio = (
|
|
53
|
+
if summary_input_tkns > summarizer_settings.memory_warning_threshold * context_window:
|
|
54
|
+
trunc_ratio = (summarizer_settings.memory_warning_threshold * context_window / summary_input_tkns) * 0.8 # For good measure...
|
|
54
55
|
cutoff = int(len(message_sequence_to_summarize) * trunc_ratio)
|
|
55
56
|
summary_input = str(
|
|
56
57
|
[summarize_messages(agent_state, message_sequence_to_summarize=message_sequence_to_summarize[:cutoff])]
|
|
@@ -58,10 +59,11 @@ def summarize_messages(
|
|
|
58
59
|
)
|
|
59
60
|
|
|
60
61
|
dummy_agent_id = agent_state.id
|
|
61
|
-
message_sequence = [
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
62
|
+
message_sequence = [
|
|
63
|
+
Message(agent_id=dummy_agent_id, role=MessageRole.system, text=summary_prompt),
|
|
64
|
+
Message(agent_id=dummy_agent_id, role=MessageRole.assistant, text=MESSAGE_SUMMARY_REQUEST_ACK),
|
|
65
|
+
Message(agent_id=dummy_agent_id, role=MessageRole.user, text=summary_input),
|
|
66
|
+
]
|
|
65
67
|
|
|
66
68
|
# TODO: We need to eventually have a separate LLM config for the summarizer LLM
|
|
67
69
|
llm_config_no_inner_thoughts = agent_state.llm_config.model_copy(deep=True)
|
letta/orm/agent.py
CHANGED
|
@@ -113,14 +113,14 @@ class Agent(SqlalchemyBase, OrganizationMixin):
|
|
|
113
113
|
"description": self.description,
|
|
114
114
|
"message_ids": self.message_ids,
|
|
115
115
|
"tools": self.tools,
|
|
116
|
-
"sources": self.sources,
|
|
116
|
+
"sources": [source.to_pydantic() for source in self.sources],
|
|
117
117
|
"tags": [t.tag for t in self.tags],
|
|
118
118
|
"tool_rules": self.tool_rules,
|
|
119
119
|
"system": self.system,
|
|
120
120
|
"agent_type": self.agent_type,
|
|
121
121
|
"llm_config": self.llm_config,
|
|
122
122
|
"embedding_config": self.embedding_config,
|
|
123
|
-
"
|
|
123
|
+
"metadata": self.metadata_,
|
|
124
124
|
"memory": Memory(blocks=[b.to_pydantic() for b in self.core_memory]),
|
|
125
125
|
"created_by_id": self.created_by_id,
|
|
126
126
|
"last_updated_by_id": self.last_updated_by_id,
|
letta/orm/block.py
CHANGED
|
@@ -45,7 +45,9 @@ class Block(OrganizationMixin, SqlalchemyBase):
|
|
|
45
45
|
Schema = Persona
|
|
46
46
|
case _:
|
|
47
47
|
Schema = PydanticBlock
|
|
48
|
-
|
|
48
|
+
model_dict = {k: v for k, v in self.__dict__.items() if k in self.__pydantic_model__.model_fields}
|
|
49
|
+
model_dict["metadata"] = self.metadata_
|
|
50
|
+
return Schema.model_validate(model_dict)
|
|
49
51
|
|
|
50
52
|
|
|
51
53
|
@event.listens_for(Block, "after_update") # Changed from 'before_update'
|
letta/orm/custom_columns.py
CHANGED
|
@@ -2,13 +2,14 @@ import base64
|
|
|
2
2
|
from typing import List, Union
|
|
3
3
|
|
|
4
4
|
import numpy as np
|
|
5
|
+
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall
|
|
6
|
+
from openai.types.chat.chat_completion_message_tool_call import Function as OpenAIFunction
|
|
5
7
|
from sqlalchemy import JSON
|
|
6
8
|
from sqlalchemy.types import BINARY, TypeDecorator
|
|
7
9
|
|
|
8
10
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
9
11
|
from letta.schemas.enums import ToolRuleType
|
|
10
12
|
from letta.schemas.llm_config import LLMConfig
|
|
11
|
-
from letta.schemas.openai.chat_completions import ToolCall, ToolCallFunction
|
|
12
13
|
from letta.schemas.tool_rule import ChildToolRule, ConditionalToolRule, InitToolRule, TerminalToolRule
|
|
13
14
|
|
|
14
15
|
|
|
@@ -109,7 +110,7 @@ class ToolCallColumn(TypeDecorator):
|
|
|
109
110
|
if value:
|
|
110
111
|
values = []
|
|
111
112
|
for v in value:
|
|
112
|
-
if isinstance(v,
|
|
113
|
+
if isinstance(v, OpenAIToolCall):
|
|
113
114
|
values.append(v.model_dump())
|
|
114
115
|
else:
|
|
115
116
|
values.append(v)
|
|
@@ -122,11 +123,11 @@ class ToolCallColumn(TypeDecorator):
|
|
|
122
123
|
tools = []
|
|
123
124
|
for tool_value in value:
|
|
124
125
|
if "function" in tool_value:
|
|
125
|
-
tool_call_function =
|
|
126
|
+
tool_call_function = OpenAIFunction(**tool_value["function"])
|
|
126
127
|
del tool_value["function"]
|
|
127
128
|
else:
|
|
128
129
|
tool_call_function = None
|
|
129
|
-
tools.append(
|
|
130
|
+
tools.append(OpenAIToolCall(function=tool_call_function, **tool_value))
|
|
130
131
|
return tools
|
|
131
132
|
return value
|
|
132
133
|
|
letta/orm/enums.py
CHANGED
letta/orm/message.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from typing import Optional
|
|
2
2
|
|
|
3
|
+
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall
|
|
3
4
|
from sqlalchemy import ForeignKey, Index
|
|
4
5
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
5
6
|
|
|
@@ -7,7 +8,6 @@ from letta.orm.custom_columns import ToolCallColumn
|
|
|
7
8
|
from letta.orm.mixins import AgentMixin, OrganizationMixin
|
|
8
9
|
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
|
9
10
|
from letta.schemas.message import Message as PydanticMessage
|
|
10
|
-
from letta.schemas.openai.chat_completions import ToolCall
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class Message(SqlalchemyBase, OrganizationMixin, AgentMixin):
|
|
@@ -22,7 +22,7 @@ class Message(SqlalchemyBase, OrganizationMixin, AgentMixin):
|
|
|
22
22
|
text: Mapped[Optional[str]] = mapped_column(nullable=True, doc="Message content")
|
|
23
23
|
model: Mapped[Optional[str]] = mapped_column(nullable=True, doc="LLM model used")
|
|
24
24
|
name: Mapped[Optional[str]] = mapped_column(nullable=True, doc="Name for multi-agent scenarios")
|
|
25
|
-
tool_calls: Mapped[
|
|
25
|
+
tool_calls: Mapped[OpenAIToolCall] = mapped_column(ToolCallColumn, doc="Tool call information")
|
|
26
26
|
tool_call_id: Mapped[Optional[str]] = mapped_column(nullable=True, doc="ID of the tool call")
|
|
27
27
|
step_id: Mapped[Optional[str]] = mapped_column(
|
|
28
28
|
ForeignKey("steps.id", ondelete="SET NULL"), nullable=True, doc="ID of the step that this message belongs to"
|
letta/orm/sqlalchemy_base.py
CHANGED
|
@@ -449,6 +449,11 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
|
|
|
449
449
|
|
|
450
450
|
def to_pydantic(self) -> "BaseModel":
|
|
451
451
|
"""converts to the basic pydantic model counterpart"""
|
|
452
|
+
if hasattr(self, "metadata_"):
|
|
453
|
+
model_dict = {k: v for k, v in self.__dict__.items() if k in self.__pydantic_model__.model_fields}
|
|
454
|
+
model_dict["metadata"] = self.metadata_
|
|
455
|
+
return self.__pydantic_model__.model_validate(model_dict)
|
|
456
|
+
|
|
452
457
|
return self.__pydantic_model__.model_validate(self)
|
|
453
458
|
|
|
454
459
|
def to_record(self) -> "BaseModel":
|
letta/schemas/agent.py
CHANGED
|
@@ -72,7 +72,7 @@ class AgentState(OrmMetadataBase, validate_assignment=True):
|
|
|
72
72
|
organization_id: Optional[str] = Field(None, description="The unique identifier of the organization associated with the agent.")
|
|
73
73
|
|
|
74
74
|
description: Optional[str] = Field(None, description="The description of the agent.")
|
|
75
|
-
|
|
75
|
+
metadata: Optional[Dict] = Field(None, description="The metadata of the agent.")
|
|
76
76
|
|
|
77
77
|
memory: Memory = Field(..., description="The in-context memory of the agent.")
|
|
78
78
|
tools: List[Tool] = Field(..., description="The tools used by the agent.")
|
|
@@ -122,7 +122,7 @@ class CreateAgent(BaseModel, validate_assignment=True): #
|
|
|
122
122
|
False, description="If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent)."
|
|
123
123
|
)
|
|
124
124
|
description: Optional[str] = Field(None, description="The description of the agent.")
|
|
125
|
-
|
|
125
|
+
metadata: Optional[Dict] = Field(None, description="The metadata of the agent.")
|
|
126
126
|
model: Optional[str] = Field(
|
|
127
127
|
None,
|
|
128
128
|
description="The LLM configuration handle used by the agent, specified in the format "
|
|
@@ -203,7 +203,7 @@ class UpdateAgent(BaseModel):
|
|
|
203
203
|
embedding_config: Optional[EmbeddingConfig] = Field(None, description="The embedding configuration used by the agent.")
|
|
204
204
|
message_ids: Optional[List[str]] = Field(None, description="The ids of the messages in the agent's in-context memory.")
|
|
205
205
|
description: Optional[str] = Field(None, description="The description of the agent.")
|
|
206
|
-
|
|
206
|
+
metadata: Optional[Dict] = Field(None, description="The metadata of the agent.")
|
|
207
207
|
tool_exec_environment_variables: Optional[Dict[str, str]] = Field(
|
|
208
208
|
None, description="The environment variables for tool execution specific to this agent."
|
|
209
209
|
)
|
letta/schemas/block.py
CHANGED
|
@@ -27,7 +27,7 @@ class BaseBlock(LettaBase, validate_assignment=True):
|
|
|
27
27
|
|
|
28
28
|
# metadata
|
|
29
29
|
description: Optional[str] = Field(None, description="Description of the block.")
|
|
30
|
-
|
|
30
|
+
metadata: Optional[dict] = Field({}, description="Metadata of the block.")
|
|
31
31
|
|
|
32
32
|
# def __len__(self):
|
|
33
33
|
# return len(self.value)
|
|
@@ -63,7 +63,7 @@ class Block(BaseBlock):
|
|
|
63
63
|
label (str): The label of the block (e.g. 'human', 'persona'). This defines a category for the block.
|
|
64
64
|
template_name (str): The name of the block template (if it is a template).
|
|
65
65
|
description (str): Description of the block.
|
|
66
|
-
|
|
66
|
+
metadata (Dict): Metadata of the block.
|
|
67
67
|
user_id (str): The unique identifier of the user associated with the block.
|
|
68
68
|
"""
|
|
69
69
|
|
|
@@ -26,7 +26,7 @@ class EnvironmentVariableUpdateBase(LettaBase):
|
|
|
26
26
|
description: Optional[str] = Field(None, description="An optional description of the environment variable.")
|
|
27
27
|
|
|
28
28
|
|
|
29
|
-
#
|
|
29
|
+
# Environment Variable
|
|
30
30
|
class SandboxEnvironmentVariableBase(EnvironmentVariableBase):
|
|
31
31
|
__id_prefix__ = "sandbox-env"
|
|
32
32
|
sandbox_config_id: str = Field(..., description="The ID of the sandbox config this environment variable belongs to.")
|
letta/schemas/job.py
CHANGED
|
@@ -12,7 +12,7 @@ class JobBase(OrmMetadataBase):
|
|
|
12
12
|
__id_prefix__ = "job"
|
|
13
13
|
status: JobStatus = Field(default=JobStatus.created, description="The status of the job.")
|
|
14
14
|
completed_at: Optional[datetime] = Field(None, description="The unix timestamp of when the job was completed.")
|
|
15
|
-
|
|
15
|
+
metadata: Optional[dict] = Field(None, description="The metadata of the job.")
|
|
16
16
|
job_type: JobType = Field(default=JobType.JOB, description="The type of the job.")
|
|
17
17
|
|
|
18
18
|
|
letta/schemas/letta_base.py
CHANGED
|
@@ -88,6 +88,12 @@ class LettaBase(BaseModel):
|
|
|
88
88
|
return f"{cls.__id_prefix__}-{v}"
|
|
89
89
|
return v
|
|
90
90
|
|
|
91
|
+
def model_dump(self, to_orm: bool = False, **kwargs):
|
|
92
|
+
data = super().model_dump(**kwargs)
|
|
93
|
+
if to_orm and "metadata" in data:
|
|
94
|
+
data["metadata_"] = data.pop("metadata")
|
|
95
|
+
return data
|
|
96
|
+
|
|
91
97
|
|
|
92
98
|
class OrmMetadataBase(LettaBase):
|
|
93
99
|
# metadata fields
|
letta/schemas/letta_message.py
CHANGED
|
@@ -222,22 +222,22 @@ LettaMessageUnion = Annotated[
|
|
|
222
222
|
def create_letta_message_union_schema():
|
|
223
223
|
return {
|
|
224
224
|
"oneOf": [
|
|
225
|
-
{"$ref": "#/components/schemas/SystemMessage
|
|
226
|
-
{"$ref": "#/components/schemas/UserMessage
|
|
225
|
+
{"$ref": "#/components/schemas/SystemMessage"},
|
|
226
|
+
{"$ref": "#/components/schemas/UserMessage"},
|
|
227
227
|
{"$ref": "#/components/schemas/ReasoningMessage"},
|
|
228
228
|
{"$ref": "#/components/schemas/ToolCallMessage"},
|
|
229
229
|
{"$ref": "#/components/schemas/ToolReturnMessage"},
|
|
230
|
-
{"$ref": "#/components/schemas/AssistantMessage
|
|
230
|
+
{"$ref": "#/components/schemas/AssistantMessage"},
|
|
231
231
|
],
|
|
232
232
|
"discriminator": {
|
|
233
233
|
"propertyName": "message_type",
|
|
234
234
|
"mapping": {
|
|
235
|
-
"system_message": "#/components/schemas/SystemMessage
|
|
236
|
-
"user_message": "#/components/schemas/UserMessage
|
|
235
|
+
"system_message": "#/components/schemas/SystemMessage",
|
|
236
|
+
"user_message": "#/components/schemas/UserMessage",
|
|
237
237
|
"reasoning_message": "#/components/schemas/ReasoningMessage",
|
|
238
238
|
"tool_call_message": "#/components/schemas/ToolCallMessage",
|
|
239
239
|
"tool_return_message": "#/components/schemas/ToolReturnMessage",
|
|
240
|
-
"assistant_message": "#/components/schemas/AssistantMessage
|
|
240
|
+
"assistant_message": "#/components/schemas/AssistantMessage",
|
|
241
241
|
},
|
|
242
242
|
},
|
|
243
243
|
}
|
letta/schemas/memory.py
CHANGED
|
@@ -7,10 +7,11 @@ from pydantic import BaseModel, Field
|
|
|
7
7
|
if TYPE_CHECKING:
|
|
8
8
|
pass
|
|
9
9
|
|
|
10
|
+
from openai.types.beta.function_tool import FunctionTool as OpenAITool
|
|
11
|
+
|
|
10
12
|
from letta.constants import CORE_MEMORY_BLOCK_CHAR_LIMIT
|
|
11
13
|
from letta.schemas.block import Block
|
|
12
14
|
from letta.schemas.message import Message
|
|
13
|
-
from letta.schemas.openai.chat_completion_request import Tool
|
|
14
15
|
|
|
15
16
|
|
|
16
17
|
class ContextWindowOverview(BaseModel):
|
|
@@ -47,7 +48,7 @@ class ContextWindowOverview(BaseModel):
|
|
|
47
48
|
summary_memory: Optional[str] = Field(None, description="The content of the summary memory.")
|
|
48
49
|
|
|
49
50
|
num_tokens_functions_definitions: int = Field(..., description="The number of tokens in the functions definitions.")
|
|
50
|
-
functions_definitions: Optional[List[
|
|
51
|
+
functions_definitions: Optional[List[OpenAITool]] = Field(..., description="The content of the functions definitions.")
|
|
51
52
|
|
|
52
53
|
num_tokens_messages: int = Field(..., description="The number of tokens in the messages list.")
|
|
53
54
|
# TODO make list of messages?
|
letta/schemas/message.py
CHANGED
|
@@ -4,24 +4,32 @@ import warnings
|
|
|
4
4
|
from datetime import datetime, timezone
|
|
5
5
|
from typing import List, Literal, Optional
|
|
6
6
|
|
|
7
|
+
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall
|
|
8
|
+
from openai.types.chat.chat_completion_message_tool_call import Function as OpenAIFunction
|
|
7
9
|
from pydantic import BaseModel, Field, field_validator
|
|
8
10
|
|
|
9
11
|
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, TOOL_CALL_ID_MAX_LEN
|
|
10
12
|
from letta.local_llm.constants import INNER_THOUGHTS_KWARG
|
|
11
13
|
from letta.schemas.enums import MessageRole
|
|
12
14
|
from letta.schemas.letta_base import OrmMetadataBase
|
|
13
|
-
from letta.schemas.letta_message import
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
15
|
+
from letta.schemas.letta_message import (
|
|
16
|
+
AssistantMessage,
|
|
17
|
+
LettaMessage,
|
|
18
|
+
ReasoningMessage,
|
|
19
|
+
SystemMessage,
|
|
20
|
+
ToolCall,
|
|
21
|
+
ToolCallMessage,
|
|
22
|
+
ToolReturnMessage,
|
|
23
|
+
UserMessage,
|
|
24
|
+
)
|
|
17
25
|
from letta.utils import get_utc_time, is_utc_datetime, json_dumps
|
|
18
26
|
|
|
19
27
|
|
|
20
28
|
def add_inner_thoughts_to_tool_call(
|
|
21
|
-
tool_call:
|
|
29
|
+
tool_call: OpenAIToolCall,
|
|
22
30
|
inner_thoughts: str,
|
|
23
31
|
inner_thoughts_key: str,
|
|
24
|
-
) ->
|
|
32
|
+
) -> OpenAIToolCall:
|
|
25
33
|
"""Add inner thoughts (arg + value) to a tool call"""
|
|
26
34
|
# because the kwargs are stored as strings, we need to load then write the JSON dicts
|
|
27
35
|
try:
|
|
@@ -68,7 +76,7 @@ class MessageUpdate(BaseModel):
|
|
|
68
76
|
name: Optional[str] = Field(None, description="The name of the participant.")
|
|
69
77
|
# NOTE: we probably shouldn't allow updating the created_at field, right?
|
|
70
78
|
# created_at: Optional[datetime] = Field(None, description="The time the message was created.")
|
|
71
|
-
tool_calls: Optional[List[
|
|
79
|
+
tool_calls: Optional[List[OpenAIToolCall,]] = Field(None, description="The list of tool calls requested.")
|
|
72
80
|
tool_call_id: Optional[str] = Field(None, description="The id of the tool call.")
|
|
73
81
|
|
|
74
82
|
|
|
@@ -85,7 +93,7 @@ class Message(BaseMessage):
|
|
|
85
93
|
model (str): The model used to make the function call.
|
|
86
94
|
name (str): The name of the participant.
|
|
87
95
|
created_at (datetime): The time the message was created.
|
|
88
|
-
tool_calls (List[
|
|
96
|
+
tool_calls (List[OpenAIToolCall,]): The list of tool calls requested.
|
|
89
97
|
tool_call_id (str): The id of the tool call.
|
|
90
98
|
|
|
91
99
|
"""
|
|
@@ -97,7 +105,7 @@ class Message(BaseMessage):
|
|
|
97
105
|
agent_id: Optional[str] = Field(None, description="The unique identifier of the agent.")
|
|
98
106
|
model: Optional[str] = Field(None, description="The model used to make the function call.")
|
|
99
107
|
name: Optional[str] = Field(None, description="The name of the participant.")
|
|
100
|
-
tool_calls: Optional[List[
|
|
108
|
+
tool_calls: Optional[List[OpenAIToolCall,]] = Field(None, description="The list of tool calls requested.")
|
|
101
109
|
tool_call_id: Optional[str] = Field(None, description="The id of the tool call.")
|
|
102
110
|
step_id: Optional[str] = Field(None, description="The id of the step that this message was created in.")
|
|
103
111
|
# This overrides the optional base orm schema, created_at MUST exist on all messages objects
|
|
@@ -165,7 +173,7 @@ class Message(BaseMessage):
|
|
|
165
173
|
ToolCallMessage(
|
|
166
174
|
id=self.id,
|
|
167
175
|
date=self.created_at,
|
|
168
|
-
tool_call=
|
|
176
|
+
tool_call=ToolCall(
|
|
169
177
|
name=tool_call.function.name,
|
|
170
178
|
arguments=tool_call.function.arguments,
|
|
171
179
|
tool_call_id=tool_call.id,
|
|
@@ -304,10 +312,10 @@ class Message(BaseMessage):
|
|
|
304
312
|
# Convert a function_call (from an assistant message) into a tool_call
|
|
305
313
|
# NOTE: this does not conventionally include a tool_call_id (ToolCall.id), it's on the caster to provide it
|
|
306
314
|
tool_calls = [
|
|
307
|
-
|
|
315
|
+
OpenAIToolCall(
|
|
308
316
|
id=openai_message_dict["tool_call_id"], # NOTE: unconventional source, not to spec
|
|
309
317
|
type="function",
|
|
310
|
-
function=
|
|
318
|
+
function=OpenAIFunction(
|
|
311
319
|
name=openai_message_dict["function_call"]["name"],
|
|
312
320
|
arguments=openai_message_dict["function_call"]["arguments"],
|
|
313
321
|
),
|
|
@@ -352,7 +360,7 @@ class Message(BaseMessage):
|
|
|
352
360
|
assert openai_message_dict["role"] == "assistant", openai_message_dict
|
|
353
361
|
|
|
354
362
|
tool_calls = [
|
|
355
|
-
|
|
363
|
+
OpenAIToolCall(id=tool_call["id"], type=tool_call["type"], function=tool_call["function"])
|
|
356
364
|
for tool_call in openai_message_dict["tool_calls"]
|
|
357
365
|
]
|
|
358
366
|
else:
|
letta/schemas/passage.py
CHANGED
|
@@ -23,7 +23,7 @@ class PassageBase(OrmMetadataBase):
|
|
|
23
23
|
|
|
24
24
|
# file association
|
|
25
25
|
file_id: Optional[str] = Field(None, description="The unique identifier of the file associated with the passage.")
|
|
26
|
-
|
|
26
|
+
metadata: Optional[Dict] = Field({}, description="The metadata of the passage.")
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
class Passage(PassageBase):
|
letta/schemas/source.py
CHANGED
|
@@ -24,7 +24,7 @@ class Source(BaseSource):
|
|
|
24
24
|
name (str): The name of the source.
|
|
25
25
|
embedding_config (EmbeddingConfig): The embedding configuration used by the source.
|
|
26
26
|
user_id (str): The ID of the user that created the source.
|
|
27
|
-
|
|
27
|
+
metadata (dict): Metadata associated with the source.
|
|
28
28
|
description (str): The description of the source.
|
|
29
29
|
"""
|
|
30
30
|
|
|
@@ -33,7 +33,7 @@ class Source(BaseSource):
|
|
|
33
33
|
description: Optional[str] = Field(None, description="The description of the source.")
|
|
34
34
|
embedding_config: EmbeddingConfig = Field(..., description="The embedding configuration used by the source.")
|
|
35
35
|
organization_id: Optional[str] = Field(None, description="The ID of the organization that created the source.")
|
|
36
|
-
|
|
36
|
+
metadata: Optional[dict] = Field(None, description="Metadata associated with the source.")
|
|
37
37
|
|
|
38
38
|
# metadata fields
|
|
39
39
|
created_by_id: Optional[str] = Field(None, description="The id of the user that made this Tool.")
|
|
@@ -54,7 +54,7 @@ class SourceCreate(BaseSource):
|
|
|
54
54
|
|
|
55
55
|
# optional
|
|
56
56
|
description: Optional[str] = Field(None, description="The description of the source.")
|
|
57
|
-
|
|
57
|
+
metadata: Optional[dict] = Field(None, description="Metadata associated with the source.")
|
|
58
58
|
|
|
59
59
|
|
|
60
60
|
class SourceUpdate(BaseSource):
|
|
@@ -64,5 +64,5 @@ class SourceUpdate(BaseSource):
|
|
|
64
64
|
|
|
65
65
|
name: Optional[str] = Field(None, description="The name of the source.")
|
|
66
66
|
description: Optional[str] = Field(None, description="The description of the source.")
|
|
67
|
-
|
|
67
|
+
metadata: Optional[dict] = Field(None, description="Metadata associated with the source.")
|
|
68
68
|
embedding_config: Optional[EmbeddingConfig] = Field(None, description="The embedding configuration used by the source.")
|