letta-nightly 0.11.7.dev20250909104137__py3-none-any.whl → 0.11.7.dev20250910104051__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/adapters/letta_llm_adapter.py +81 -0
- letta/adapters/letta_llm_request_adapter.py +111 -0
- letta/adapters/letta_llm_stream_adapter.py +169 -0
- letta/agents/base_agent.py +4 -1
- letta/agents/base_agent_v2.py +68 -0
- letta/agents/helpers.py +3 -5
- letta/agents/letta_agent.py +23 -12
- letta/agents/letta_agent_v2.py +1220 -0
- letta/agents/voice_agent.py +2 -1
- letta/constants.py +1 -1
- letta/errors.py +12 -0
- letta/functions/function_sets/base.py +53 -12
- letta/functions/schema_generator.py +1 -1
- letta/groups/sleeptime_multi_agent_v3.py +231 -0
- letta/helpers/tool_rule_solver.py +4 -0
- letta/helpers/tpuf_client.py +607 -34
- letta/interfaces/anthropic_streaming_interface.py +64 -24
- letta/interfaces/openai_streaming_interface.py +80 -37
- letta/llm_api/openai_client.py +45 -4
- letta/orm/block.py +1 -0
- letta/orm/group.py +1 -0
- letta/orm/source.py +8 -1
- letta/orm/step_metrics.py +10 -0
- letta/schemas/block.py +4 -0
- letta/schemas/enums.py +1 -0
- letta/schemas/group.py +8 -0
- letta/schemas/letta_message.py +1 -1
- letta/schemas/letta_request.py +2 -2
- letta/schemas/mcp.py +9 -1
- letta/schemas/message.py +23 -0
- letta/schemas/providers/ollama.py +1 -1
- letta/schemas/providers.py +1 -2
- letta/schemas/source.py +6 -0
- letta/schemas/step_metrics.py +2 -0
- letta/server/rest_api/routers/v1/__init__.py +2 -0
- letta/server/rest_api/routers/v1/agents.py +100 -5
- letta/server/rest_api/routers/v1/blocks.py +6 -0
- letta/server/rest_api/routers/v1/folders.py +23 -5
- letta/server/rest_api/routers/v1/groups.py +6 -0
- letta/server/rest_api/routers/v1/internal_templates.py +218 -12
- letta/server/rest_api/routers/v1/messages.py +14 -19
- letta/server/rest_api/routers/v1/runs.py +43 -28
- letta/server/rest_api/routers/v1/sources.py +23 -5
- letta/server/rest_api/routers/v1/tools.py +42 -0
- letta/server/rest_api/streaming_response.py +9 -1
- letta/server/server.py +2 -1
- letta/services/agent_manager.py +39 -59
- letta/services/agent_serialization_manager.py +22 -8
- letta/services/archive_manager.py +60 -9
- letta/services/block_manager.py +5 -0
- letta/services/file_processor/embedder/base_embedder.py +5 -0
- letta/services/file_processor/embedder/openai_embedder.py +4 -0
- letta/services/file_processor/embedder/pinecone_embedder.py +5 -1
- letta/services/file_processor/embedder/turbopuffer_embedder.py +71 -0
- letta/services/file_processor/file_processor.py +9 -7
- letta/services/group_manager.py +74 -11
- letta/services/mcp_manager.py +132 -26
- letta/services/message_manager.py +229 -125
- letta/services/passage_manager.py +2 -1
- letta/services/source_manager.py +23 -1
- letta/services/summarizer/summarizer.py +2 -0
- letta/services/tool_executor/core_tool_executor.py +2 -120
- letta/services/tool_executor/files_tool_executor.py +133 -8
- letta/settings.py +6 -0
- letta/utils.py +34 -1
- {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/METADATA +2 -2
- {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/RECORD +70 -63
- {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.11.7.dev20250909104137.dist-info → letta_nightly-0.11.7.dev20250910104051.dist-info}/licenses/LICENSE +0 -0
letta/agents/voice_agent.py
CHANGED
@@ -494,7 +494,8 @@ class VoiceAgent(BaseAgent):
|
|
494
494
|
start_date=start_date,
|
495
495
|
end_date=end_date,
|
496
496
|
)
|
497
|
-
|
497
|
+
# Extract passages from tuples and format
|
498
|
+
formatted_archival_results = [{"timestamp": str(passage.created_at), "content": passage.text} for passage, _, _ in archival_results]
|
498
499
|
response = {
|
499
500
|
"archival_search_results": formatted_archival_results,
|
500
501
|
}
|
letta/constants.py
CHANGED
@@ -173,7 +173,7 @@ CONVERSATION_SEARCH_TOOL_NAME = "conversation_search"
|
|
173
173
|
PRE_EXECUTION_MESSAGE_ARG = "pre_exec_msg"
|
174
174
|
|
175
175
|
REQUEST_HEARTBEAT_PARAM = "request_heartbeat"
|
176
|
-
REQUEST_HEARTBEAT_DESCRIPTION = "Request an immediate heartbeat after function execution.
|
176
|
+
REQUEST_HEARTBEAT_DESCRIPTION = "Request an immediate heartbeat after function execution. You MUST set this value to `True` if you want to send a follow-up message or run a follow-up tool call (chain multiple tools together). If set to `False` (the default), then the chain of execution will end immediately after this function call."
|
177
177
|
|
178
178
|
|
179
179
|
# Structured output models
|
letta/errors.py
CHANGED
@@ -18,6 +18,7 @@ class ErrorCode(Enum):
|
|
18
18
|
CONTEXT_WINDOW_EXCEEDED = "CONTEXT_WINDOW_EXCEEDED"
|
19
19
|
RATE_LIMIT_EXCEEDED = "RATE_LIMIT_EXCEEDED"
|
20
20
|
TIMEOUT = "TIMEOUT"
|
21
|
+
CONFLICT = "CONFLICT"
|
21
22
|
|
22
23
|
|
23
24
|
class LettaError(Exception):
|
@@ -40,6 +41,17 @@ class LettaError(Exception):
|
|
40
41
|
return f"{self.__class__.__name__}(message='{self.message}', code='{self.code}', details={self.details})"
|
41
42
|
|
42
43
|
|
44
|
+
class PendingApprovalError(LettaError):
|
45
|
+
"""Error raised when attempting an operation while agent is waiting for tool approval."""
|
46
|
+
|
47
|
+
def __init__(self, pending_request_id: Optional[str] = None):
|
48
|
+
self.pending_request_id = pending_request_id
|
49
|
+
message = "Cannot send a new message: The agent is waiting for approval on a tool call. Please approve or deny the pending request before continuing."
|
50
|
+
code = ErrorCode.CONFLICT
|
51
|
+
details = {"error_code": "PENDING_APPROVAL", "pending_request_id": pending_request_id}
|
52
|
+
super().__init__(message=message, code=code, details=details)
|
53
|
+
|
54
|
+
|
43
55
|
class LettaToolCreateError(LettaError):
|
44
56
|
"""Error raised when a tool cannot be created."""
|
45
57
|
|
@@ -35,8 +35,8 @@ def conversation_search(
|
|
35
35
|
query (str): String to search for using both text matching and semantic similarity.
|
36
36
|
roles (Optional[List[Literal["assistant", "user", "tool"]]]): Optional list of message roles to filter by.
|
37
37
|
limit (Optional[int]): Maximum number of results to return. Uses system default if not specified.
|
38
|
-
start_date (Optional[str]): Filter results to messages created after this date. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-15", "2024-01-15T14:30".
|
39
|
-
end_date (Optional[str]): Filter results to messages created before this date. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-20", "2024-01-20T17:00".
|
38
|
+
start_date (Optional[str]): Filter results to messages created on or after this date (INCLUSIVE). When using date-only format (e.g., "2024-01-15"), includes messages starting from 00:00:00 of that day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-15" (from start of Jan 15), "2024-01-15T14:30" (from 2:30 PM on Jan 15).
|
39
|
+
end_date (Optional[str]): Filter results to messages created on or before this date (INCLUSIVE). When using date-only format (e.g., "2024-01-20"), includes all messages from that entire day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-20" (includes all of Jan 20), "2024-01-20T17:00" (up to 5 PM on Jan 20).
|
40
40
|
|
41
41
|
Examples:
|
42
42
|
# Search all messages
|
@@ -45,8 +45,17 @@ def conversation_search(
|
|
45
45
|
# Search only assistant messages
|
46
46
|
conversation_search(query="error handling", roles=["assistant"])
|
47
47
|
|
48
|
-
# Search with date range
|
48
|
+
# Search with date range (inclusive of both dates)
|
49
49
|
conversation_search(query="meetings", start_date="2024-01-15", end_date="2024-01-20")
|
50
|
+
# This includes all messages from Jan 15 00:00:00 through Jan 20 23:59:59
|
51
|
+
|
52
|
+
# Search messages from a specific day (inclusive)
|
53
|
+
conversation_search(query="bug reports", start_date="2024-09-04", end_date="2024-09-04")
|
54
|
+
# This includes ALL messages from September 4, 2024
|
55
|
+
|
56
|
+
# Search with specific time boundaries
|
57
|
+
conversation_search(query="deployment", start_date="2024-01-15T09:00", end_date="2024-01-15T17:30")
|
58
|
+
# This includes messages from 9 AM to 5:30 PM on Jan 15
|
50
59
|
|
51
60
|
# Search with limit
|
52
61
|
conversation_search(query="debugging", limit=10)
|
@@ -115,18 +124,24 @@ async def archival_memory_search(
|
|
115
124
|
tags (Optional[list[str]]): Optional list of tags to filter search results. Only passages with these tags will be returned.
|
116
125
|
tag_match_mode (Literal["any", "all"]): How to match tags - "any" to match passages with any of the tags, "all" to match only passages with all tags. Defaults to "any".
|
117
126
|
top_k (Optional[int]): Maximum number of results to return. Uses system default if not specified.
|
118
|
-
start_datetime (Optional[str]): Filter results to passages created after this datetime. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-15", "2024-01-15T14:30".
|
119
|
-
end_datetime (Optional[str]): Filter results to passages created before this datetime. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-20", "2024-01-20T17:00".
|
127
|
+
start_datetime (Optional[str]): Filter results to passages created on or after this datetime (INCLUSIVE). When using date-only format (e.g., "2024-01-15"), includes passages starting from 00:00:00 of that day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-15" (from start of Jan 15), "2024-01-15T14:30" (from 2:30 PM on Jan 15).
|
128
|
+
end_datetime (Optional[str]): Filter results to passages created on or before this datetime (INCLUSIVE). When using date-only format (e.g., "2024-01-20"), includes all passages from that entire day. ISO 8601 format: "YYYY-MM-DD" or "YYYY-MM-DDTHH:MM". Examples: "2024-01-20" (includes all of Jan 20), "2024-01-20T17:00" (up to 5 PM on Jan 20).
|
120
129
|
|
121
130
|
Examples:
|
122
131
|
# Search all passages
|
123
132
|
archival_memory_search(query="project updates")
|
124
133
|
|
125
|
-
# Search with date range (
|
134
|
+
# Search with date range (inclusive of both dates)
|
126
135
|
archival_memory_search(query="meetings", start_datetime="2024-01-15", end_datetime="2024-01-20")
|
136
|
+
# This includes all passages from Jan 15 00:00:00 through Jan 20 23:59:59
|
137
|
+
|
138
|
+
# Search passages from a specific day (inclusive)
|
139
|
+
archival_memory_search(query="bug reports", start_datetime="2024-09-04", end_datetime="2024-09-04")
|
140
|
+
# This includes ALL passages from September 4, 2024
|
127
141
|
|
128
142
|
# Search with specific time range
|
129
143
|
archival_memory_search(query="error logs", start_datetime="2024-01-15T09:30", end_datetime="2024-01-15T17:30")
|
144
|
+
# This includes passages from 9:30 AM to 5:30 PM on Jan 15
|
130
145
|
|
131
146
|
# Search from a specific point in time onwards
|
132
147
|
archival_memory_search(query="customer feedback", start_datetime="2024-01-15T14:00")
|
@@ -208,6 +223,25 @@ def memory_replace(agent_state: "AgentState", label: str, old_str: str, new_str:
|
|
208
223
|
old_str (str): The text to replace (must match exactly, including whitespace and indentation).
|
209
224
|
new_str (str): The new text to insert in place of the old text. Do not include line number prefixes.
|
210
225
|
|
226
|
+
Examples:
|
227
|
+
# Update a block containing information about the user
|
228
|
+
memory_replace(label="human", old_str="Their name is Alice", new_str="Their name is Bob")
|
229
|
+
|
230
|
+
# Update a block containing a todo list
|
231
|
+
memory_replace(label="todos", old_str="- [ ] Step 5: Search the web", new_str="- [x] Step 5: Search the web")
|
232
|
+
|
233
|
+
# Pass an empty string to
|
234
|
+
memory_replace(label="human", old_str="Their name is Alice", new_str="")
|
235
|
+
|
236
|
+
# Bad example - do NOT add (view-only) line numbers to the args
|
237
|
+
memory_replace(label="human", old_str="Line 1: Their name is Alice", new_str="Line 1: Their name is Bob")
|
238
|
+
|
239
|
+
# Bad example - do NOT include the number number warning either
|
240
|
+
memory_replace(label="human", old_str="# NOTE: Line numbers shown below are to help during editing. Do NOT include line number prefixes in your memory edit tool calls.\\nLine 1: Their name is Alice", new_str="Line 1: Their name is Bob")
|
241
|
+
|
242
|
+
# Good example - no line numbers or line number warning (they are view-only), just the text
|
243
|
+
memory_replace(label="human", old_str="Their name is Alice", new_str="Their name is Bob")
|
244
|
+
|
211
245
|
Returns:
|
212
246
|
str: The success message
|
213
247
|
"""
|
@@ -248,11 +282,11 @@ def memory_replace(agent_state: "AgentState", label: str, old_str: str, new_str:
|
|
248
282
|
agent_state.memory.update_block_value(label=label, value=new_value)
|
249
283
|
|
250
284
|
# Create a snippet of the edited section
|
251
|
-
SNIPPET_LINES = 3
|
252
|
-
replacement_line = current_value.split(old_str)[0].count("\n")
|
253
|
-
start_line = max(0, replacement_line - SNIPPET_LINES)
|
254
|
-
end_line = replacement_line + SNIPPET_LINES + new_str.count("\n")
|
255
|
-
snippet = "\n".join(new_value.split("\n")[start_line : end_line + 1])
|
285
|
+
# SNIPPET_LINES = 3
|
286
|
+
# replacement_line = current_value.split(old_str)[0].count("\n")
|
287
|
+
# start_line = max(0, replacement_line - SNIPPET_LINES)
|
288
|
+
# end_line = replacement_line + SNIPPET_LINES + new_str.count("\n")
|
289
|
+
# snippet = "\n".join(new_value.split("\n")[start_line : end_line + 1])
|
256
290
|
|
257
291
|
# Prepare the success message
|
258
292
|
success_msg = f"The core memory block with label `{label}` has been edited. "
|
@@ -275,6 +309,13 @@ def memory_insert(agent_state: "AgentState", label: str, new_str: str, insert_li
|
|
275
309
|
new_str (str): The text to insert. Do not include line number prefixes.
|
276
310
|
insert_line (int): The line number after which to insert the text (0 for beginning of file). Defaults to -1 (end of the file).
|
277
311
|
|
312
|
+
Examples:
|
313
|
+
# Update a block containing information about the user (append to the end of the block)
|
314
|
+
memory_insert(label="customer", new_str="The customer's ticket number is 12345")
|
315
|
+
|
316
|
+
# Update a block containing information about the user (insert at the beginning of the block)
|
317
|
+
memory_insert(label="customer", new_str="The customer's ticket number is 12345", insert_line=0)
|
318
|
+
|
278
319
|
Returns:
|
279
320
|
Optional[str]: None is always returned as this function does not produce a response.
|
280
321
|
"""
|
@@ -313,7 +354,7 @@ def memory_insert(agent_state: "AgentState", label: str, new_str: str, insert_li
|
|
313
354
|
|
314
355
|
# Collate into the new value to update
|
315
356
|
new_value = "\n".join(new_value_lines)
|
316
|
-
snippet = "\n".join(snippet_lines)
|
357
|
+
# snippet = "\n".join(snippet_lines)
|
317
358
|
|
318
359
|
# Write into the block
|
319
360
|
agent_state.memory.update_block_value(label=label, value=new_value)
|
@@ -622,7 +622,7 @@ def generate_tool_schema_for_mcp(
|
|
622
622
|
format_value = option["format"]
|
623
623
|
if types:
|
624
624
|
# Deduplicate types using set
|
625
|
-
field_props["type"] = list(
|
625
|
+
field_props["type"] = list(dict.fromkeys(types))
|
626
626
|
# Only add format if the field is not optional (doesn't have null type)
|
627
627
|
if format_value and len(field_props["type"]) == 1 and "null" not in field_props["type"]:
|
628
628
|
field_props["format"] = format_value
|
@@ -0,0 +1,231 @@
|
|
1
|
+
import asyncio
|
2
|
+
from collections.abc import AsyncGenerator
|
3
|
+
from datetime import datetime, timezone
|
4
|
+
|
5
|
+
from letta.agents.letta_agent_v2 import LettaAgentV2
|
6
|
+
from letta.constants import DEFAULT_MAX_STEPS
|
7
|
+
from letta.groups.helpers import stringify_message
|
8
|
+
from letta.otel.tracing import trace_method
|
9
|
+
from letta.schemas.agent import AgentState
|
10
|
+
from letta.schemas.enums import JobStatus
|
11
|
+
from letta.schemas.group import Group, ManagerType
|
12
|
+
from letta.schemas.job import JobUpdate
|
13
|
+
from letta.schemas.letta_message import MessageType
|
14
|
+
from letta.schemas.letta_message_content import TextContent
|
15
|
+
from letta.schemas.letta_response import LettaResponse
|
16
|
+
from letta.schemas.message import Message, MessageCreate
|
17
|
+
from letta.schemas.run import Run
|
18
|
+
from letta.schemas.user import User
|
19
|
+
from letta.services.group_manager import GroupManager
|
20
|
+
|
21
|
+
|
22
|
+
class SleeptimeMultiAgentV3(LettaAgentV2):
|
23
|
+
def __init__(
|
24
|
+
self,
|
25
|
+
agent_state: AgentState,
|
26
|
+
actor: User,
|
27
|
+
group: Group,
|
28
|
+
):
|
29
|
+
super().__init__(agent_state, actor)
|
30
|
+
assert group.manager_type == ManagerType.sleeptime, f"Expected group type to be 'sleeptime', got {group.manager_type}"
|
31
|
+
self.group = group
|
32
|
+
self.run_ids = []
|
33
|
+
|
34
|
+
# Additional manager classes
|
35
|
+
self.group_manager = GroupManager()
|
36
|
+
|
37
|
+
@trace_method
|
38
|
+
async def step(
|
39
|
+
self,
|
40
|
+
input_messages: list[MessageCreate],
|
41
|
+
max_steps: int = DEFAULT_MAX_STEPS,
|
42
|
+
run_id: str | None = None,
|
43
|
+
use_assistant_message: bool = False,
|
44
|
+
include_return_message_types: list[MessageType] | None = None,
|
45
|
+
request_start_timestamp_ns: int | None = None,
|
46
|
+
) -> LettaResponse:
|
47
|
+
self.run_ids = []
|
48
|
+
|
49
|
+
for i in range(len(input_messages)):
|
50
|
+
input_messages[i].group_id = self.group.id
|
51
|
+
|
52
|
+
response = await super().step(
|
53
|
+
input_messages=input_messages,
|
54
|
+
max_steps=max_steps,
|
55
|
+
run_id=run_id,
|
56
|
+
use_assistant_message=use_assistant_message,
|
57
|
+
include_return_message_types=include_return_message_types,
|
58
|
+
request_start_timestamp_ns=request_start_timestamp_ns,
|
59
|
+
)
|
60
|
+
|
61
|
+
await self.run_sleeptime_agents(use_assistant_message=use_assistant_message)
|
62
|
+
|
63
|
+
response.usage.run_ids = self.run_ids
|
64
|
+
return response
|
65
|
+
|
66
|
+
@trace_method
|
67
|
+
async def stream(
|
68
|
+
self,
|
69
|
+
input_messages: list[MessageCreate],
|
70
|
+
max_steps: int = DEFAULT_MAX_STEPS,
|
71
|
+
stream_tokens: bool = True,
|
72
|
+
run_id: str | None = None,
|
73
|
+
use_assistant_message: bool = True,
|
74
|
+
request_start_timestamp_ns: int | None = None,
|
75
|
+
include_return_message_types: list[MessageType] | None = None,
|
76
|
+
) -> AsyncGenerator[str, None]:
|
77
|
+
self.run_ids = []
|
78
|
+
|
79
|
+
for i in range(len(input_messages)):
|
80
|
+
input_messages[i].group_id = self.group.id
|
81
|
+
|
82
|
+
# Perform foreground agent step
|
83
|
+
async for chunk in super().stream(
|
84
|
+
input_messages=input_messages,
|
85
|
+
max_steps=max_steps,
|
86
|
+
stream_tokens=stream_tokens,
|
87
|
+
run_id=run_id,
|
88
|
+
use_assistant_message=use_assistant_message,
|
89
|
+
include_return_message_types=include_return_message_types,
|
90
|
+
request_start_timestamp_ns=request_start_timestamp_ns,
|
91
|
+
):
|
92
|
+
yield chunk
|
93
|
+
|
94
|
+
await self.run_sleeptime_agents(use_assistant_message=use_assistant_message)
|
95
|
+
|
96
|
+
@trace_method
|
97
|
+
async def run_sleeptime_agents(self, use_assistant_message: bool = True):
|
98
|
+
# Get response messages
|
99
|
+
last_response_messages = self.response_messages
|
100
|
+
|
101
|
+
# Update turns counter
|
102
|
+
turns_counter = None
|
103
|
+
if self.group.sleeptime_agent_frequency is not None and self.group.sleeptime_agent_frequency > 0:
|
104
|
+
turns_counter = await self.group_manager.bump_turns_counter_async(group_id=self.group.id, actor=self.actor)
|
105
|
+
|
106
|
+
# Perform participant steps
|
107
|
+
if self.group.sleeptime_agent_frequency is None or (
|
108
|
+
turns_counter is not None and turns_counter % self.group.sleeptime_agent_frequency == 0
|
109
|
+
):
|
110
|
+
last_processed_message_id = await self.group_manager.get_last_processed_message_id_and_update_async(
|
111
|
+
group_id=self.group.id, last_processed_message_id=last_response_messages[-1].id, actor=self.actor
|
112
|
+
)
|
113
|
+
for sleeptime_agent_id in self.group.agent_ids:
|
114
|
+
try:
|
115
|
+
sleeptime_run_id = await self._issue_background_task(
|
116
|
+
sleeptime_agent_id,
|
117
|
+
last_response_messages,
|
118
|
+
last_processed_message_id,
|
119
|
+
use_assistant_message,
|
120
|
+
)
|
121
|
+
self.run_ids.append(sleeptime_run_id)
|
122
|
+
except Exception as e:
|
123
|
+
# Individual task failures
|
124
|
+
print(f"Sleeptime agent processing failed: {e!s}")
|
125
|
+
raise e
|
126
|
+
|
127
|
+
@trace_method
|
128
|
+
async def _issue_background_task(
|
129
|
+
self,
|
130
|
+
sleeptime_agent_id: str,
|
131
|
+
response_messages: list[Message],
|
132
|
+
last_processed_message_id: str,
|
133
|
+
use_assistant_message: bool = True,
|
134
|
+
) -> str:
|
135
|
+
run = Run(
|
136
|
+
user_id=self.actor.id,
|
137
|
+
status=JobStatus.created,
|
138
|
+
metadata={
|
139
|
+
"job_type": "sleeptime_agent_send_message_async", # is this right?
|
140
|
+
"agent_id": sleeptime_agent_id,
|
141
|
+
},
|
142
|
+
)
|
143
|
+
run = await self.job_manager.create_job_async(pydantic_job=run, actor=self.actor)
|
144
|
+
|
145
|
+
asyncio.create_task(
|
146
|
+
self._participant_agent_step(
|
147
|
+
foreground_agent_id=self.agent_state.id,
|
148
|
+
sleeptime_agent_id=sleeptime_agent_id,
|
149
|
+
response_messages=response_messages,
|
150
|
+
last_processed_message_id=last_processed_message_id,
|
151
|
+
run_id=run.id,
|
152
|
+
use_assistant_message=use_assistant_message,
|
153
|
+
)
|
154
|
+
)
|
155
|
+
return run.id
|
156
|
+
|
157
|
+
@trace_method
|
158
|
+
async def _participant_agent_step(
|
159
|
+
self,
|
160
|
+
foreground_agent_id: str,
|
161
|
+
sleeptime_agent_id: str,
|
162
|
+
response_messages: list[Message],
|
163
|
+
last_processed_message_id: str,
|
164
|
+
run_id: str,
|
165
|
+
use_assistant_message: bool = True,
|
166
|
+
) -> LettaResponse:
|
167
|
+
try:
|
168
|
+
# Update job status
|
169
|
+
job_update = JobUpdate(status=JobStatus.running)
|
170
|
+
await self.job_manager.update_job_by_id_async(job_id=run_id, job_update=job_update, actor=self.actor)
|
171
|
+
|
172
|
+
# Create conversation transcript
|
173
|
+
prior_messages = []
|
174
|
+
if self.group.sleeptime_agent_frequency:
|
175
|
+
try:
|
176
|
+
prior_messages = await self.message_manager.list_messages_for_agent_async(
|
177
|
+
agent_id=foreground_agent_id,
|
178
|
+
actor=self.actor,
|
179
|
+
after=last_processed_message_id,
|
180
|
+
before=response_messages[0].id,
|
181
|
+
)
|
182
|
+
except Exception:
|
183
|
+
pass # continue with just latest messages
|
184
|
+
|
185
|
+
transcript_summary = [stringify_message(message) for message in prior_messages + response_messages]
|
186
|
+
transcript_summary = [summary for summary in transcript_summary if summary is not None]
|
187
|
+
message_text = "\n".join(transcript_summary)
|
188
|
+
|
189
|
+
sleeptime_agent_messages = [
|
190
|
+
MessageCreate(
|
191
|
+
role="user",
|
192
|
+
content=[TextContent(text=message_text)],
|
193
|
+
id=Message.generate_id(),
|
194
|
+
agent_id=sleeptime_agent_id,
|
195
|
+
group_id=self.group.id,
|
196
|
+
)
|
197
|
+
]
|
198
|
+
|
199
|
+
# Load sleeptime agent
|
200
|
+
sleeptime_agent_state = await self.agent_manager.get_agent_by_id_async(agent_id=sleeptime_agent_id, actor=self.actor)
|
201
|
+
sleeptime_agent = LettaAgentV2(
|
202
|
+
agent_state=sleeptime_agent_state,
|
203
|
+
actor=self.actor,
|
204
|
+
)
|
205
|
+
|
206
|
+
# Perform sleeptime agent step
|
207
|
+
result = await sleeptime_agent.step(
|
208
|
+
input_messages=sleeptime_agent_messages,
|
209
|
+
run_id=run_id,
|
210
|
+
use_assistant_message=use_assistant_message,
|
211
|
+
)
|
212
|
+
|
213
|
+
# Update job status
|
214
|
+
job_update = JobUpdate(
|
215
|
+
status=JobStatus.completed,
|
216
|
+
completed_at=datetime.now(timezone.utc).replace(tzinfo=None),
|
217
|
+
metadata={
|
218
|
+
"result": result.model_dump(mode="json"),
|
219
|
+
"agent_id": sleeptime_agent_state.id,
|
220
|
+
},
|
221
|
+
)
|
222
|
+
await self.job_manager.update_job_by_id_async(job_id=run_id, job_update=job_update, actor=self.actor)
|
223
|
+
return result
|
224
|
+
except Exception as e:
|
225
|
+
job_update = JobUpdate(
|
226
|
+
status=JobStatus.failed,
|
227
|
+
completed_at=datetime.now(timezone.utc).replace(tzinfo=None),
|
228
|
+
metadata={"error": str(e)},
|
229
|
+
)
|
230
|
+
await self.job_manager.update_job_by_id_async(job_id=run_id, job_update=job_update, actor=self.actor)
|
231
|
+
raise
|
@@ -131,6 +131,10 @@ class ToolRulesSolver(BaseModel):
|
|
131
131
|
"""Check if all required-before-exit tools have been called."""
|
132
132
|
return len(self.get_uncalled_required_tools(available_tools=available_tools)) == 0
|
133
133
|
|
134
|
+
def get_requires_approval_tools(self, available_tools: set[ToolName]) -> list[ToolName]:
|
135
|
+
"""Get the list of tools that require approval."""
|
136
|
+
return [rule.tool_name for rule in self.requires_approval_tool_rules]
|
137
|
+
|
134
138
|
def get_uncalled_required_tools(self, available_tools: set[ToolName]) -> list[str]:
|
135
139
|
"""Get the list of required-before-exit tools that have not been called yet."""
|
136
140
|
if not self.required_before_exit_tool_rules:
|