letta-nightly 0.11.4.dev20250826104242__py3-none-any.whl → 0.11.6.dev20250827050912__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +1 -1
- letta/agent.py +9 -3
- letta/agents/base_agent.py +2 -2
- letta/agents/letta_agent.py +56 -45
- letta/agents/voice_agent.py +2 -2
- letta/data_sources/redis_client.py +146 -1
- letta/errors.py +4 -0
- letta/functions/function_sets/files.py +2 -2
- letta/functions/mcp_client/types.py +30 -6
- letta/functions/schema_generator.py +46 -1
- letta/functions/schema_validator.py +17 -2
- letta/functions/types.py +1 -1
- letta/helpers/tool_execution_helper.py +0 -2
- letta/llm_api/anthropic_client.py +27 -5
- letta/llm_api/deepseek_client.py +97 -0
- letta/llm_api/groq_client.py +79 -0
- letta/llm_api/helpers.py +0 -1
- letta/llm_api/llm_api_tools.py +2 -113
- letta/llm_api/llm_client.py +21 -0
- letta/llm_api/llm_client_base.py +11 -9
- letta/llm_api/openai_client.py +3 -0
- letta/llm_api/xai_client.py +85 -0
- letta/prompts/prompt_generator.py +190 -0
- letta/schemas/agent_file.py +17 -2
- letta/schemas/file.py +24 -1
- letta/schemas/job.py +2 -0
- letta/schemas/letta_message.py +2 -0
- letta/schemas/letta_request.py +22 -0
- letta/schemas/message.py +10 -1
- letta/schemas/providers/bedrock.py +1 -0
- letta/schemas/response_format.py +2 -2
- letta/server/generate_openapi_schema.sh +4 -4
- letta/server/rest_api/redis_stream_manager.py +300 -0
- letta/server/rest_api/routers/v1/agents.py +129 -7
- letta/server/rest_api/routers/v1/folders.py +15 -5
- letta/server/rest_api/routers/v1/runs.py +101 -11
- letta/server/rest_api/routers/v1/sources.py +21 -53
- letta/server/rest_api/routers/v1/telemetry.py +14 -4
- letta/server/rest_api/routers/v1/tools.py +2 -2
- letta/server/rest_api/streaming_response.py +3 -24
- letta/server/server.py +0 -1
- letta/services/agent_manager.py +2 -2
- letta/services/agent_serialization_manager.py +129 -32
- letta/services/file_manager.py +111 -6
- letta/services/file_processor/file_processor.py +5 -2
- letta/services/files_agents_manager.py +60 -0
- letta/services/helpers/agent_manager_helper.py +6 -207
- letta/services/helpers/tool_parser_helper.py +6 -3
- letta/services/llm_batch_manager.py +1 -1
- letta/services/mcp/base_client.py +7 -1
- letta/services/mcp/sse_client.py +7 -2
- letta/services/mcp/stdio_client.py +5 -0
- letta/services/mcp/streamable_http_client.py +11 -2
- letta/services/mcp_manager.py +31 -30
- letta/services/source_manager.py +26 -1
- letta/services/summarizer/summarizer.py +21 -10
- letta/services/tool_executor/files_tool_executor.py +13 -9
- letta/services/tool_executor/mcp_tool_executor.py +3 -0
- letta/services/tool_executor/tool_execution_manager.py +13 -0
- letta/services/tool_executor/tool_execution_sandbox.py +0 -1
- letta/services/tool_manager.py +43 -20
- letta/services/tool_sandbox/local_sandbox.py +0 -2
- letta/settings.py +1 -0
- letta/utils.py +37 -0
- {letta_nightly-0.11.4.dev20250826104242.dist-info → letta_nightly-0.11.6.dev20250827050912.dist-info}/METADATA +116 -102
- {letta_nightly-0.11.4.dev20250826104242.dist-info → letta_nightly-0.11.6.dev20250827050912.dist-info}/RECORD +128 -127
- {letta_nightly-0.11.4.dev20250826104242.dist-info → letta_nightly-0.11.6.dev20250827050912.dist-info}/WHEEL +1 -1
- letta_nightly-0.11.6.dev20250827050912.dist-info/entry_points.txt +2 -0
- letta/functions/mcp_client/__init__.py +0 -0
- letta/functions/mcp_client/base_client.py +0 -156
- letta/functions/mcp_client/sse_client.py +0 -51
- letta/functions/mcp_client/stdio_client.py +0 -109
- letta_nightly-0.11.4.dev20250826104242.dist-info/entry_points.txt +0 -3
- {letta_nightly-0.11.4.dev20250826104242.dist-info → letta_nightly-0.11.6.dev20250827050912.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,300 @@
|
|
1
|
+
"""Redis stream manager for reading and writing SSE chunks with batching and TTL."""
|
2
|
+
|
3
|
+
import asyncio
|
4
|
+
import json
|
5
|
+
import time
|
6
|
+
from collections import defaultdict
|
7
|
+
from typing import AsyncIterator, Dict, List, Optional
|
8
|
+
|
9
|
+
from letta.data_sources.redis_client import AsyncRedisClient
|
10
|
+
from letta.log import get_logger
|
11
|
+
|
12
|
+
logger = get_logger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
class RedisSSEStreamWriter:
|
16
|
+
"""
|
17
|
+
Efficiently writes SSE chunks to Redis streams with batching and TTL management.
|
18
|
+
|
19
|
+
Features:
|
20
|
+
- Batches writes using Redis pipelines for performance
|
21
|
+
- Automatically sets/refreshes TTL on streams
|
22
|
+
- Tracks sequential IDs for cursor-based recovery
|
23
|
+
- Handles flush on size or time thresholds
|
24
|
+
"""
|
25
|
+
|
26
|
+
def __init__(
|
27
|
+
self,
|
28
|
+
redis_client: AsyncRedisClient,
|
29
|
+
flush_interval: float = 0.5,
|
30
|
+
flush_size: int = 50,
|
31
|
+
stream_ttl_seconds: int = 10800, # 3 hours default
|
32
|
+
max_stream_length: int = 10000, # Max entries per stream
|
33
|
+
):
|
34
|
+
"""
|
35
|
+
Initialize the Redis SSE stream writer.
|
36
|
+
|
37
|
+
Args:
|
38
|
+
redis_client: Redis client instance
|
39
|
+
flush_interval: Seconds between automatic flushes
|
40
|
+
flush_size: Number of chunks to buffer before flushing
|
41
|
+
stream_ttl_seconds: TTL for streams in seconds (default: 6 hours)
|
42
|
+
max_stream_length: Maximum entries per stream before trimming
|
43
|
+
"""
|
44
|
+
self.redis = redis_client
|
45
|
+
self.flush_interval = flush_interval
|
46
|
+
self.flush_size = flush_size
|
47
|
+
self.stream_ttl = stream_ttl_seconds
|
48
|
+
self.max_stream_length = max_stream_length
|
49
|
+
|
50
|
+
# Buffer for batching: run_id -> list of chunks
|
51
|
+
self.buffer: Dict[str, List[Dict]] = defaultdict(list)
|
52
|
+
# Track sequence IDs per run
|
53
|
+
self.seq_counters: Dict[str, int] = defaultdict(lambda: 1)
|
54
|
+
# Track last flush time per run
|
55
|
+
self.last_flush: Dict[str, float] = defaultdict(float)
|
56
|
+
|
57
|
+
# Background flush task
|
58
|
+
self._flush_task = None
|
59
|
+
self._running = False
|
60
|
+
|
61
|
+
async def start(self):
|
62
|
+
"""Start the background flush task."""
|
63
|
+
if not self._running:
|
64
|
+
self._running = True
|
65
|
+
self._flush_task = asyncio.create_task(self._periodic_flush())
|
66
|
+
|
67
|
+
async def stop(self):
|
68
|
+
"""Stop the background flush task and flush remaining data."""
|
69
|
+
self._running = False
|
70
|
+
if self._flush_task:
|
71
|
+
self._flush_task.cancel()
|
72
|
+
try:
|
73
|
+
await self._flush_task
|
74
|
+
except asyncio.CancelledError:
|
75
|
+
pass
|
76
|
+
|
77
|
+
for run_id in list(self.buffer.keys()):
|
78
|
+
if self.buffer[run_id]:
|
79
|
+
await self._flush_run(run_id)
|
80
|
+
|
81
|
+
async def write_chunk(
|
82
|
+
self,
|
83
|
+
run_id: str,
|
84
|
+
data: str,
|
85
|
+
is_complete: bool = False,
|
86
|
+
) -> int:
|
87
|
+
"""
|
88
|
+
Write an SSE chunk to the buffer for a specific run.
|
89
|
+
|
90
|
+
Args:
|
91
|
+
run_id: The run ID to write to
|
92
|
+
data: SSE-formatted chunk data
|
93
|
+
is_complete: Whether this is the final chunk
|
94
|
+
|
95
|
+
Returns:
|
96
|
+
The sequence ID assigned to this chunk
|
97
|
+
"""
|
98
|
+
seq_id = self.seq_counters[run_id]
|
99
|
+
self.seq_counters[run_id] += 1
|
100
|
+
|
101
|
+
chunk = {
|
102
|
+
"seq_id": seq_id,
|
103
|
+
"data": data,
|
104
|
+
"timestamp": int(time.time() * 1000),
|
105
|
+
}
|
106
|
+
|
107
|
+
if is_complete:
|
108
|
+
chunk["complete"] = "true"
|
109
|
+
|
110
|
+
self.buffer[run_id].append(chunk)
|
111
|
+
|
112
|
+
should_flush = (
|
113
|
+
len(self.buffer[run_id]) >= self.flush_size or is_complete or (time.time() - self.last_flush[run_id]) > self.flush_interval
|
114
|
+
)
|
115
|
+
|
116
|
+
if should_flush:
|
117
|
+
await self._flush_run(run_id)
|
118
|
+
|
119
|
+
return seq_id
|
120
|
+
|
121
|
+
async def _flush_run(self, run_id: str):
|
122
|
+
"""Flush buffered chunks for a specific run to Redis."""
|
123
|
+
if not self.buffer[run_id]:
|
124
|
+
return
|
125
|
+
|
126
|
+
chunks = self.buffer[run_id]
|
127
|
+
self.buffer[run_id] = []
|
128
|
+
stream_key = f"sse:run:{run_id}"
|
129
|
+
|
130
|
+
try:
|
131
|
+
client = await self.redis.get_client()
|
132
|
+
|
133
|
+
async with client.pipeline(transaction=False) as pipe:
|
134
|
+
for chunk in chunks:
|
135
|
+
pipe.xadd(stream_key, chunk, maxlen=self.max_stream_length, approximate=True)
|
136
|
+
|
137
|
+
pipe.expire(stream_key, self.stream_ttl)
|
138
|
+
|
139
|
+
await pipe.execute()
|
140
|
+
|
141
|
+
self.last_flush[run_id] = time.time()
|
142
|
+
|
143
|
+
logger.debug(
|
144
|
+
f"Flushed {len(chunks)} chunks to Redis stream {stream_key}, " f"seq_ids {chunks[0]['seq_id']}-{chunks[-1]['seq_id']}"
|
145
|
+
)
|
146
|
+
|
147
|
+
if chunks[-1].get("complete") == "true":
|
148
|
+
self._cleanup_run(run_id)
|
149
|
+
|
150
|
+
except Exception as e:
|
151
|
+
logger.error(f"Failed to flush chunks for run {run_id}: {e}")
|
152
|
+
# Put chunks back in buffer to retry
|
153
|
+
self.buffer[run_id] = chunks + self.buffer[run_id]
|
154
|
+
raise
|
155
|
+
|
156
|
+
async def _periodic_flush(self):
|
157
|
+
"""Background task to periodically flush buffers."""
|
158
|
+
while self._running:
|
159
|
+
try:
|
160
|
+
await asyncio.sleep(self.flush_interval)
|
161
|
+
|
162
|
+
# Check each run for time-based flush
|
163
|
+
current_time = time.time()
|
164
|
+
runs_to_flush = [
|
165
|
+
run_id
|
166
|
+
for run_id, last_flush in self.last_flush.items()
|
167
|
+
if (current_time - last_flush) > self.flush_interval and self.buffer[run_id]
|
168
|
+
]
|
169
|
+
|
170
|
+
for run_id in runs_to_flush:
|
171
|
+
await self._flush_run(run_id)
|
172
|
+
|
173
|
+
except asyncio.CancelledError:
|
174
|
+
break
|
175
|
+
except Exception as e:
|
176
|
+
logger.error(f"Error in periodic flush: {e}")
|
177
|
+
|
178
|
+
def _cleanup_run(self, run_id: str):
|
179
|
+
"""Clean up tracking data for a completed run."""
|
180
|
+
self.buffer.pop(run_id, None)
|
181
|
+
self.seq_counters.pop(run_id, None)
|
182
|
+
self.last_flush.pop(run_id, None)
|
183
|
+
|
184
|
+
async def mark_complete(self, run_id: str):
|
185
|
+
"""Mark a stream as complete and flush."""
|
186
|
+
# Add a [DONE] marker
|
187
|
+
await self.write_chunk(run_id, "data: [DONE]\n\n", is_complete=True)
|
188
|
+
|
189
|
+
|
190
|
+
async def create_background_stream_processor(
|
191
|
+
stream_generator,
|
192
|
+
redis_client: AsyncRedisClient,
|
193
|
+
run_id: str,
|
194
|
+
writer: Optional[RedisSSEStreamWriter] = None,
|
195
|
+
) -> None:
|
196
|
+
"""
|
197
|
+
Process a stream in the background and store chunks to Redis.
|
198
|
+
|
199
|
+
This function consumes the stream generator and writes all chunks
|
200
|
+
to Redis for later retrieval.
|
201
|
+
|
202
|
+
Args:
|
203
|
+
stream_generator: The async generator yielding SSE chunks
|
204
|
+
redis_client: Redis client instance
|
205
|
+
run_id: The run ID to store chunks under
|
206
|
+
writer: Optional pre-configured writer (creates new if not provided)
|
207
|
+
"""
|
208
|
+
if writer is None:
|
209
|
+
writer = RedisSSEStreamWriter(redis_client)
|
210
|
+
await writer.start()
|
211
|
+
should_stop_writer = True
|
212
|
+
else:
|
213
|
+
should_stop_writer = False
|
214
|
+
|
215
|
+
try:
|
216
|
+
async for chunk in stream_generator:
|
217
|
+
if isinstance(chunk, tuple):
|
218
|
+
chunk = chunk[0]
|
219
|
+
|
220
|
+
is_done = isinstance(chunk, str) and ("data: [DONE]" in chunk or "event: error" in chunk)
|
221
|
+
|
222
|
+
await writer.write_chunk(run_id=run_id, data=chunk, is_complete=is_done)
|
223
|
+
|
224
|
+
if is_done:
|
225
|
+
break
|
226
|
+
|
227
|
+
except Exception as e:
|
228
|
+
logger.error(f"Error processing stream for run {run_id}: {e}")
|
229
|
+
# Write error chunk
|
230
|
+
error_chunk = {"error": {"message": str(e)}}
|
231
|
+
await writer.write_chunk(run_id=run_id, data=f"event: error\ndata: {json.dumps(error_chunk)}\n\n", is_complete=True)
|
232
|
+
finally:
|
233
|
+
if should_stop_writer:
|
234
|
+
await writer.stop()
|
235
|
+
|
236
|
+
|
237
|
+
async def redis_sse_stream_generator(
|
238
|
+
redis_client: AsyncRedisClient,
|
239
|
+
run_id: str,
|
240
|
+
starting_after: Optional[int] = None,
|
241
|
+
poll_interval: float = 0.1,
|
242
|
+
batch_size: int = 100,
|
243
|
+
) -> AsyncIterator[str]:
|
244
|
+
"""
|
245
|
+
Generate SSE events from Redis stream chunks.
|
246
|
+
|
247
|
+
This generator reads chunks stored in Redis streams and yields them as SSE events.
|
248
|
+
It supports cursor-based recovery by allowing you to start from a specific seq_id.
|
249
|
+
|
250
|
+
Args:
|
251
|
+
redis_client: Redis client instance
|
252
|
+
run_id: The run ID to read chunks for
|
253
|
+
starting_after: Sequential ID (integer) to start reading from (default: None for beginning)
|
254
|
+
poll_interval: Seconds to wait between polls when no new data (default: 0.1)
|
255
|
+
batch_size: Number of entries to read per batch (default: 100)
|
256
|
+
|
257
|
+
Yields:
|
258
|
+
SSE-formatted chunks from the Redis stream
|
259
|
+
"""
|
260
|
+
stream_key = f"sse:run:{run_id}"
|
261
|
+
last_redis_id = "-"
|
262
|
+
cursor_seq_id = starting_after or 0
|
263
|
+
|
264
|
+
logger.debug(f"Starting redis_sse_stream_generator for run_id={run_id}, stream_key={stream_key}")
|
265
|
+
|
266
|
+
while True:
|
267
|
+
entries = await redis_client.xrange(stream_key, start=last_redis_id, count=batch_size)
|
268
|
+
|
269
|
+
if entries:
|
270
|
+
yielded_any = False
|
271
|
+
for entry_id, fields in entries:
|
272
|
+
if entry_id == last_redis_id:
|
273
|
+
continue
|
274
|
+
|
275
|
+
chunk_seq_id = int(fields.get("seq_id", 0))
|
276
|
+
if chunk_seq_id > cursor_seq_id:
|
277
|
+
data = fields.get("data", "")
|
278
|
+
if not data:
|
279
|
+
logger.debug(f"No data found for chunk {chunk_seq_id} in run {run_id}")
|
280
|
+
continue
|
281
|
+
|
282
|
+
if '"run_id":null' in data:
|
283
|
+
data = data.replace('"run_id":null', f'"run_id":"{run_id}"')
|
284
|
+
|
285
|
+
if '"seq_id":null' in data:
|
286
|
+
data = data.replace('"seq_id":null', f'"seq_id":{chunk_seq_id}')
|
287
|
+
|
288
|
+
yield data
|
289
|
+
yielded_any = True
|
290
|
+
|
291
|
+
if fields.get("complete") == "true":
|
292
|
+
return
|
293
|
+
|
294
|
+
last_redis_id = entry_id
|
295
|
+
|
296
|
+
if not yielded_any and len(entries) > 1:
|
297
|
+
continue
|
298
|
+
|
299
|
+
if not entries or (len(entries) == 1 and entries[0][0] == last_redis_id):
|
300
|
+
await asyncio.sleep(poll_interval)
|
@@ -14,7 +14,7 @@ from starlette.responses import Response, StreamingResponse
|
|
14
14
|
|
15
15
|
from letta.agents.letta_agent import LettaAgent
|
16
16
|
from letta.constants import AGENT_ID_PATTERN, DEFAULT_MAX_STEPS, DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, REDIS_RUN_ID_PREFIX
|
17
|
-
from letta.data_sources.redis_client import get_redis_client
|
17
|
+
from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client
|
18
18
|
from letta.errors import AgentExportIdMappingError, AgentExportProcessingError, AgentFileImportError, AgentNotFoundForExportError
|
19
19
|
from letta.groups.sleeptime_multi_agent_v2 import SleeptimeMultiAgentV2
|
20
20
|
from letta.helpers.datetime_helpers import get_utc_timestamp_ns
|
@@ -26,6 +26,7 @@ from letta.schemas.agent import AgentState, AgentType, CreateAgent, UpdateAgent
|
|
26
26
|
from letta.schemas.agent_file import AgentFileSchema
|
27
27
|
from letta.schemas.block import Block, BlockUpdate
|
28
28
|
from letta.schemas.enums import JobType
|
29
|
+
from letta.schemas.file import AgentFileAttachment, PaginatedAgentFiles
|
29
30
|
from letta.schemas.group import Group
|
30
31
|
from letta.schemas.job import JobStatus, JobUpdate, LettaRequestConfig
|
31
32
|
from letta.schemas.letta_message import LettaMessageUnion, LettaMessageUpdateUnion, MessageType
|
@@ -39,6 +40,7 @@ from letta.schemas.source import Source
|
|
39
40
|
from letta.schemas.tool import Tool
|
40
41
|
from letta.schemas.user import User
|
41
42
|
from letta.serialize_schemas.pydantic_agent_schema import AgentSchema
|
43
|
+
from letta.server.rest_api.redis_stream_manager import create_background_stream_processor, redis_sse_stream_generator
|
42
44
|
from letta.server.rest_api.utils import get_letta_server
|
43
45
|
from letta.server.server import SyncServer
|
44
46
|
from letta.services.summarizer.enums import SummarizationMode
|
@@ -249,6 +251,7 @@ async def import_agent(
|
|
249
251
|
override_existing_tools: bool = True,
|
250
252
|
project_id: str | None = None,
|
251
253
|
strip_messages: bool = False,
|
254
|
+
env_vars: Optional[dict[str, Any]] = None,
|
252
255
|
) -> List[str]:
|
253
256
|
"""
|
254
257
|
Import an agent using the new AgentFileSchema format.
|
@@ -259,7 +262,13 @@ async def import_agent(
|
|
259
262
|
raise HTTPException(status_code=422, detail=f"Invalid agent file schema: {e!s}")
|
260
263
|
|
261
264
|
try:
|
262
|
-
import_result = await server.agent_serialization_manager.import_file(
|
265
|
+
import_result = await server.agent_serialization_manager.import_file(
|
266
|
+
schema=agent_schema,
|
267
|
+
actor=actor,
|
268
|
+
append_copy_suffix=append_copy_suffix,
|
269
|
+
override_existing_tools=override_existing_tools,
|
270
|
+
env_vars=env_vars,
|
271
|
+
)
|
263
272
|
|
264
273
|
if not import_result.success:
|
265
274
|
raise HTTPException(
|
@@ -297,7 +306,9 @@ async def import_agent_serialized(
|
|
297
306
|
False,
|
298
307
|
description="If set to True, strips all messages from the agent before importing.",
|
299
308
|
),
|
300
|
-
|
309
|
+
env_vars_json: Optional[str] = Form(
|
310
|
+
None, description="Environment variables as a JSON string to pass to the agent for tool execution."
|
311
|
+
),
|
301
312
|
):
|
302
313
|
"""
|
303
314
|
Import a serialized agent file and recreate the agent(s) in the system.
|
@@ -311,6 +322,17 @@ async def import_agent_serialized(
|
|
311
322
|
except json.JSONDecodeError:
|
312
323
|
raise HTTPException(status_code=400, detail="Corrupted agent file format.")
|
313
324
|
|
325
|
+
# Parse env_vars_json if provided
|
326
|
+
env_vars = None
|
327
|
+
if env_vars_json:
|
328
|
+
try:
|
329
|
+
env_vars = json.loads(env_vars_json)
|
330
|
+
except json.JSONDecodeError:
|
331
|
+
raise HTTPException(status_code=400, detail="env_vars_json must be a valid JSON string")
|
332
|
+
|
333
|
+
if not isinstance(env_vars, dict):
|
334
|
+
raise HTTPException(status_code=400, detail="env_vars_json must be a valid JSON string")
|
335
|
+
|
314
336
|
# Check if the JSON is AgentFileSchema or AgentSchema
|
315
337
|
# TODO: This is kind of hacky, but should work as long as dont' change the schema
|
316
338
|
if "agents" in agent_json and isinstance(agent_json.get("agents"), list):
|
@@ -323,6 +345,7 @@ async def import_agent_serialized(
|
|
323
345
|
override_existing_tools=override_existing_tools,
|
324
346
|
project_id=project_id,
|
325
347
|
strip_messages=strip_messages,
|
348
|
+
env_vars=env_vars,
|
326
349
|
)
|
327
350
|
else:
|
328
351
|
# This is a legacy AgentSchema
|
@@ -728,6 +751,49 @@ async def list_agent_folders(
|
|
728
751
|
return await server.agent_manager.list_attached_sources_async(agent_id=agent_id, actor=actor)
|
729
752
|
|
730
753
|
|
754
|
+
@router.get("/{agent_id}/files", response_model=PaginatedAgentFiles, operation_id="list_agent_files")
|
755
|
+
async def list_agent_files(
|
756
|
+
agent_id: str,
|
757
|
+
cursor: Optional[str] = Query(None, description="Pagination cursor from previous response"),
|
758
|
+
limit: int = Query(20, ge=1, le=100, description="Number of items to return (1-100)"),
|
759
|
+
is_open: Optional[bool] = Query(None, description="Filter by open status (true for open files, false for closed files)"),
|
760
|
+
server: "SyncServer" = Depends(get_letta_server),
|
761
|
+
actor_id: str | None = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
|
762
|
+
):
|
763
|
+
"""
|
764
|
+
Get the files attached to an agent with their open/closed status (paginated).
|
765
|
+
"""
|
766
|
+
actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
|
767
|
+
|
768
|
+
# get paginated file-agent relationships for this agent
|
769
|
+
file_agents, next_cursor, has_more = await server.file_agent_manager.list_files_for_agent_paginated(
|
770
|
+
agent_id=agent_id, actor=actor, cursor=cursor, limit=limit, is_open=is_open
|
771
|
+
)
|
772
|
+
|
773
|
+
# enrich with file and source metadata
|
774
|
+
enriched_files = []
|
775
|
+
for fa in file_agents:
|
776
|
+
# get source/folder metadata
|
777
|
+
source = await server.source_manager.get_source_by_id(source_id=fa.source_id, actor=actor)
|
778
|
+
|
779
|
+
# build response object
|
780
|
+
attachment = AgentFileAttachment(
|
781
|
+
id=fa.id,
|
782
|
+
file_id=fa.file_id,
|
783
|
+
file_name=fa.file_name,
|
784
|
+
folder_id=fa.source_id,
|
785
|
+
folder_name=source.name if source else "Unknown",
|
786
|
+
is_open=fa.is_open,
|
787
|
+
last_accessed_at=fa.last_accessed_at,
|
788
|
+
visible_content=fa.visible_content,
|
789
|
+
start_line=fa.start_line,
|
790
|
+
end_line=fa.end_line,
|
791
|
+
)
|
792
|
+
enriched_files.append(attachment)
|
793
|
+
|
794
|
+
return PaginatedAgentFiles(files=enriched_files, next_cursor=next_cursor, has_more=has_more)
|
795
|
+
|
796
|
+
|
731
797
|
# TODO: remove? can also get with agent blocks
|
732
798
|
@router.get("/{agent_id}/core-memory", response_model=Memory, operation_id="retrieve_agent_memory")
|
733
799
|
async def retrieve_agent_memory(
|
@@ -999,7 +1065,8 @@ async def send_message(
|
|
999
1065
|
"bedrock",
|
1000
1066
|
"ollama",
|
1001
1067
|
"azure",
|
1002
|
-
"
|
1068
|
+
"xai",
|
1069
|
+
"groq",
|
1003
1070
|
]
|
1004
1071
|
|
1005
1072
|
# Create a new run for execution tracking
|
@@ -1143,7 +1210,8 @@ async def send_message_streaming(
|
|
1143
1210
|
"bedrock",
|
1144
1211
|
"ollama",
|
1145
1212
|
"azure",
|
1146
|
-
"
|
1213
|
+
"xai",
|
1214
|
+
"groq",
|
1147
1215
|
]
|
1148
1216
|
model_compatible_token_streaming = agent.llm_config.model_endpoint_type in ["anthropic", "openai", "bedrock"]
|
1149
1217
|
|
@@ -1157,6 +1225,7 @@ async def send_message_streaming(
|
|
1157
1225
|
metadata={
|
1158
1226
|
"job_type": "send_message_streaming",
|
1159
1227
|
"agent_id": agent_id,
|
1228
|
+
"background": request.background or False,
|
1160
1229
|
},
|
1161
1230
|
request_config=LettaRequestConfig(
|
1162
1231
|
use_assistant_message=request.use_assistant_message,
|
@@ -1211,8 +1280,58 @@ async def send_message_streaming(
|
|
1211
1280
|
else SummarizationMode.PARTIAL_EVICT_MESSAGE_BUFFER
|
1212
1281
|
),
|
1213
1282
|
)
|
1283
|
+
|
1214
1284
|
from letta.server.rest_api.streaming_response import StreamingResponseWithStatusCode, add_keepalive_to_stream
|
1215
1285
|
|
1286
|
+
if request.background and settings.track_agent_run:
|
1287
|
+
if isinstance(redis_client, NoopAsyncRedisClient):
|
1288
|
+
raise HTTPException(
|
1289
|
+
status_code=503,
|
1290
|
+
detail=(
|
1291
|
+
"Background streaming requires Redis to be running. "
|
1292
|
+
"Please ensure Redis is properly configured. "
|
1293
|
+
f"LETTA_REDIS_HOST: {settings.redis_host}, LETTA_REDIS_PORT: {settings.redis_port}"
|
1294
|
+
),
|
1295
|
+
)
|
1296
|
+
|
1297
|
+
if request.stream_tokens and model_compatible_token_streaming:
|
1298
|
+
raw_stream = agent_loop.step_stream(
|
1299
|
+
input_messages=request.messages,
|
1300
|
+
max_steps=request.max_steps,
|
1301
|
+
use_assistant_message=request.use_assistant_message,
|
1302
|
+
request_start_timestamp_ns=request_start_timestamp_ns,
|
1303
|
+
include_return_message_types=request.include_return_message_types,
|
1304
|
+
)
|
1305
|
+
else:
|
1306
|
+
raw_stream = agent_loop.step_stream_no_tokens(
|
1307
|
+
request.messages,
|
1308
|
+
max_steps=request.max_steps,
|
1309
|
+
use_assistant_message=request.use_assistant_message,
|
1310
|
+
request_start_timestamp_ns=request_start_timestamp_ns,
|
1311
|
+
include_return_message_types=request.include_return_message_types,
|
1312
|
+
)
|
1313
|
+
|
1314
|
+
asyncio.create_task(
|
1315
|
+
create_background_stream_processor(
|
1316
|
+
stream_generator=raw_stream,
|
1317
|
+
redis_client=redis_client,
|
1318
|
+
run_id=run.id,
|
1319
|
+
)
|
1320
|
+
)
|
1321
|
+
|
1322
|
+
stream = redis_sse_stream_generator(
|
1323
|
+
redis_client=redis_client,
|
1324
|
+
run_id=run.id,
|
1325
|
+
)
|
1326
|
+
|
1327
|
+
if request.include_pings and settings.enable_keepalive:
|
1328
|
+
stream = add_keepalive_to_stream(stream, keepalive_interval=settings.keepalive_interval)
|
1329
|
+
|
1330
|
+
return StreamingResponseWithStatusCode(
|
1331
|
+
stream,
|
1332
|
+
media_type="text/event-stream",
|
1333
|
+
)
|
1334
|
+
|
1216
1335
|
if request.stream_tokens and model_compatible_token_streaming:
|
1217
1336
|
raw_stream = agent_loop.step_stream(
|
1218
1337
|
input_messages=request.messages,
|
@@ -1350,6 +1469,7 @@ async def _process_message_background(
|
|
1350
1469
|
"google_vertex",
|
1351
1470
|
"bedrock",
|
1352
1471
|
"ollama",
|
1472
|
+
"groq",
|
1353
1473
|
]
|
1354
1474
|
if agent_eligible and model_compatible:
|
1355
1475
|
if agent.enable_sleeptime and agent.agent_type != AgentType.voice_convo_agent:
|
@@ -1538,7 +1658,8 @@ async def preview_raw_payload(
|
|
1538
1658
|
"bedrock",
|
1539
1659
|
"ollama",
|
1540
1660
|
"azure",
|
1541
|
-
"
|
1661
|
+
"xai",
|
1662
|
+
"groq",
|
1542
1663
|
]
|
1543
1664
|
|
1544
1665
|
if agent_eligible and model_compatible:
|
@@ -1608,7 +1729,8 @@ async def summarize_agent_conversation(
|
|
1608
1729
|
"bedrock",
|
1609
1730
|
"ollama",
|
1610
1731
|
"azure",
|
1611
|
-
"
|
1732
|
+
"xai",
|
1733
|
+
"groq",
|
1612
1734
|
]
|
1613
1735
|
|
1614
1736
|
if agent_eligible and model_compatible:
|
@@ -7,6 +7,7 @@ from typing import List, Optional
|
|
7
7
|
|
8
8
|
from fastapi import APIRouter, Depends, Header, HTTPException, Query, UploadFile
|
9
9
|
from starlette import status
|
10
|
+
from starlette.responses import Response
|
10
11
|
|
11
12
|
import letta.constants as constants
|
12
13
|
from letta.helpers.pinecone_utils import (
|
@@ -34,7 +35,7 @@ from letta.services.file_processor.file_types import get_allowed_media_types, ge
|
|
34
35
|
from letta.services.file_processor.parser.markitdown_parser import MarkitdownFileParser
|
35
36
|
from letta.services.file_processor.parser.mistral_parser import MistralFileParser
|
36
37
|
from letta.settings import settings
|
37
|
-
from letta.utils import safe_create_task, sanitize_filename
|
38
|
+
from letta.utils import safe_create_file_processing_task, safe_create_task, sanitize_filename
|
38
39
|
|
39
40
|
logger = get_logger(__name__)
|
40
41
|
|
@@ -138,8 +139,11 @@ async def create_folder(
|
|
138
139
|
# TODO: need to asyncify this
|
139
140
|
if not folder_create.embedding_config:
|
140
141
|
if not folder_create.embedding:
|
141
|
-
|
142
|
-
|
142
|
+
if settings.default_embedding_handle is None:
|
143
|
+
# TODO: modify error type
|
144
|
+
raise ValueError("Must specify either embedding or embedding_config in request")
|
145
|
+
else:
|
146
|
+
folder_create.embedding = settings.default_embedding_handle
|
143
147
|
folder_create.embedding_config = await server.get_embedding_config_from_handle_async(
|
144
148
|
handle=folder_create.embedding,
|
145
149
|
embedding_chunk_size=folder_create.embedding_chunk_size or constants.DEFAULT_EMBEDDING_CHUNK_SIZE,
|
@@ -257,13 +261,16 @@ async def upload_file_to_folder(
|
|
257
261
|
|
258
262
|
# Store original filename and handle duplicate logic
|
259
263
|
# Use custom name if provided, otherwise use the uploaded file's name
|
260
|
-
|
264
|
+
# If custom name is provided, use it directly (it's just metadata, not a filesystem path)
|
265
|
+
# Otherwise, sanitize the uploaded filename for security
|
266
|
+
original_filename = name if name else sanitize_filename(file.filename) # Basic sanitization only
|
261
267
|
|
262
268
|
# Check if duplicate exists
|
263
269
|
existing_file = await server.file_manager.get_file_by_original_name_and_source(
|
264
270
|
original_filename=original_filename, source_id=folder_id, actor=actor
|
265
271
|
)
|
266
272
|
|
273
|
+
unique_filename = None
|
267
274
|
if existing_file:
|
268
275
|
# Duplicate found, handle based on strategy
|
269
276
|
if duplicate_handling == DuplicateFileHandling.ERROR:
|
@@ -305,8 +312,11 @@ async def upload_file_to_folder(
|
|
305
312
|
|
306
313
|
# Use cloud processing for all files (simple files always, complex files with Mistral key)
|
307
314
|
logger.info("Running experimental cloud based file processing...")
|
308
|
-
|
315
|
+
safe_create_file_processing_task(
|
309
316
|
load_file_to_source_cloud(server, agent_states, content, folder_id, actor, folder.embedding_config, file_metadata),
|
317
|
+
file_metadata=file_metadata,
|
318
|
+
server=server,
|
319
|
+
actor=actor,
|
310
320
|
logger=logger,
|
311
321
|
label="file_processor.process",
|
312
322
|
)
|