letta-nightly 0.9.1.dev20250731104458__py3-none-any.whl → 0.10.0.dev20250801010504__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +2 -1
- letta/agent.py +1 -1
- letta/agents/base_agent.py +2 -2
- letta/agents/letta_agent.py +22 -8
- letta/agents/letta_agent_batch.py +2 -2
- letta/agents/voice_agent.py +2 -2
- letta/client/client.py +0 -11
- letta/errors.py +11 -0
- letta/functions/function_sets/builtin.py +3 -7
- letta/functions/mcp_client/types.py +107 -1
- letta/helpers/reasoning_helper.py +48 -0
- letta/helpers/tool_execution_helper.py +2 -65
- letta/interfaces/openai_streaming_interface.py +38 -2
- letta/llm_api/anthropic_client.py +1 -5
- letta/llm_api/google_vertex_client.py +1 -1
- letta/llm_api/llm_client.py +1 -1
- letta/llm_api/openai_client.py +2 -0
- letta/llm_api/sample_response_jsons/lmstudio_embedding_list.json +3 -2
- letta/orm/agent.py +5 -0
- letta/orm/enums.py +0 -1
- letta/orm/file.py +0 -1
- letta/orm/files_agents.py +9 -9
- letta/orm/sandbox_config.py +1 -1
- letta/orm/sqlite_functions.py +15 -13
- letta/prompts/system/memgpt_generate_tool.txt +139 -0
- letta/schemas/agent.py +15 -1
- letta/schemas/enums.py +6 -0
- letta/schemas/file.py +3 -3
- letta/schemas/letta_ping.py +28 -0
- letta/schemas/letta_request.py +9 -0
- letta/schemas/letta_stop_reason.py +25 -0
- letta/schemas/llm_config.py +1 -0
- letta/schemas/mcp.py +16 -3
- letta/schemas/memory.py +5 -0
- letta/schemas/providers/lmstudio.py +7 -0
- letta/schemas/providers/ollama.py +11 -8
- letta/schemas/sandbox_config.py +17 -7
- letta/server/rest_api/app.py +2 -0
- letta/server/rest_api/routers/v1/agents.py +93 -30
- letta/server/rest_api/routers/v1/blocks.py +52 -0
- letta/server/rest_api/routers/v1/sandbox_configs.py +2 -1
- letta/server/rest_api/routers/v1/tools.py +43 -101
- letta/server/rest_api/streaming_response.py +121 -9
- letta/server/server.py +6 -10
- letta/services/agent_manager.py +41 -4
- letta/services/block_manager.py +63 -1
- letta/services/file_processor/chunker/line_chunker.py +20 -19
- letta/services/file_processor/file_processor.py +0 -2
- letta/services/file_processor/file_types.py +1 -2
- letta/services/files_agents_manager.py +46 -6
- letta/services/helpers/agent_manager_helper.py +185 -13
- letta/services/job_manager.py +4 -4
- letta/services/mcp/oauth_utils.py +6 -150
- letta/services/mcp_manager.py +120 -2
- letta/services/sandbox_config_manager.py +3 -5
- letta/services/tool_executor/builtin_tool_executor.py +13 -18
- letta/services/tool_executor/files_tool_executor.py +31 -27
- letta/services/tool_executor/mcp_tool_executor.py +10 -1
- letta/services/tool_executor/{tool_executor.py → sandbox_tool_executor.py} +14 -2
- letta/services/tool_executor/tool_execution_manager.py +1 -1
- letta/services/tool_executor/tool_execution_sandbox.py +2 -1
- letta/services/tool_manager.py +59 -21
- letta/services/tool_sandbox/base.py +18 -2
- letta/services/tool_sandbox/e2b_sandbox.py +5 -35
- letta/services/tool_sandbox/local_sandbox.py +5 -22
- letta/services/tool_sandbox/modal_sandbox.py +205 -0
- letta/settings.py +27 -8
- letta/system.py +1 -4
- letta/templates/template_helper.py +5 -0
- letta/utils.py +14 -2
- {letta_nightly-0.9.1.dev20250731104458.dist-info → letta_nightly-0.10.0.dev20250801010504.dist-info}/METADATA +7 -3
- {letta_nightly-0.9.1.dev20250731104458.dist-info → letta_nightly-0.10.0.dev20250801010504.dist-info}/RECORD +75 -72
- letta/orm/__all__.py +0 -15
- {letta_nightly-0.9.1.dev20250731104458.dist-info → letta_nightly-0.10.0.dev20250801010504.dist-info}/LICENSE +0 -0
- {letta_nightly-0.9.1.dev20250731104458.dist-info → letta_nightly-0.10.0.dev20250801010504.dist-info}/WHEEL +0 -0
- {letta_nightly-0.9.1.dev20250731104458.dist-info → letta_nightly-0.10.0.dev20250801010504.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,5 @@
|
|
1
1
|
import os
|
2
|
+
import uuid
|
2
3
|
from datetime import datetime
|
3
4
|
from typing import List, Literal, Optional, Set
|
4
5
|
|
@@ -216,7 +217,7 @@ def compile_memory_metadata_block(
|
|
216
217
|
]
|
217
218
|
|
218
219
|
# Only include archival memory line if there are archival memories
|
219
|
-
if archival_memory_size > 0:
|
220
|
+
if archival_memory_size is not None and archival_memory_size > 0:
|
220
221
|
metadata_lines.append(
|
221
222
|
f"- {archival_memory_size} total memories you created are stored in archival memory (use tools to access them)"
|
222
223
|
)
|
@@ -247,6 +248,7 @@ def safe_format(template: str, variables: dict) -> str:
|
|
247
248
|
return escaped.format_map(PreserveMapping(variables))
|
248
249
|
|
249
250
|
|
251
|
+
@trace_method
|
250
252
|
def compile_system_message(
|
251
253
|
system_prompt: str,
|
252
254
|
in_context_memory: Memory,
|
@@ -326,6 +328,87 @@ def compile_system_message(
|
|
326
328
|
return formatted_prompt
|
327
329
|
|
328
330
|
|
331
|
+
@trace_method
|
332
|
+
async def compile_system_message_async(
|
333
|
+
system_prompt: str,
|
334
|
+
in_context_memory: Memory,
|
335
|
+
in_context_memory_last_edit: datetime, # TODO move this inside of BaseMemory?
|
336
|
+
timezone: str,
|
337
|
+
user_defined_variables: Optional[dict] = None,
|
338
|
+
append_icm_if_missing: bool = True,
|
339
|
+
template_format: Literal["f-string", "mustache", "jinja2"] = "f-string",
|
340
|
+
previous_message_count: int = 0,
|
341
|
+
archival_memory_size: int = 0,
|
342
|
+
tool_rules_solver: Optional[ToolRulesSolver] = None,
|
343
|
+
sources: Optional[List] = None,
|
344
|
+
max_files_open: Optional[int] = None,
|
345
|
+
) -> str:
|
346
|
+
"""Prepare the final/full system message that will be fed into the LLM API
|
347
|
+
|
348
|
+
The base system message may be templated, in which case we need to render the variables.
|
349
|
+
|
350
|
+
The following are reserved variables:
|
351
|
+
- CORE_MEMORY: the in-context memory of the LLM
|
352
|
+
"""
|
353
|
+
|
354
|
+
# Add tool rule constraints if available
|
355
|
+
tool_constraint_block = None
|
356
|
+
if tool_rules_solver is not None:
|
357
|
+
tool_constraint_block = tool_rules_solver.compile_tool_rule_prompts()
|
358
|
+
|
359
|
+
if user_defined_variables is not None:
|
360
|
+
# TODO eventually support the user defining their own variables to inject
|
361
|
+
raise NotImplementedError
|
362
|
+
else:
|
363
|
+
variables = {}
|
364
|
+
|
365
|
+
# Add the protected memory variable
|
366
|
+
if IN_CONTEXT_MEMORY_KEYWORD in variables:
|
367
|
+
raise ValueError(f"Found protected variable '{IN_CONTEXT_MEMORY_KEYWORD}' in user-defined vars: {str(user_defined_variables)}")
|
368
|
+
else:
|
369
|
+
# TODO should this all put into the memory.__repr__ function?
|
370
|
+
memory_metadata_string = compile_memory_metadata_block(
|
371
|
+
memory_edit_timestamp=in_context_memory_last_edit,
|
372
|
+
previous_message_count=previous_message_count,
|
373
|
+
archival_memory_size=archival_memory_size,
|
374
|
+
timezone=timezone,
|
375
|
+
)
|
376
|
+
|
377
|
+
memory_with_sources = await in_context_memory.compile_async(
|
378
|
+
tool_usage_rules=tool_constraint_block, sources=sources, max_files_open=max_files_open
|
379
|
+
)
|
380
|
+
full_memory_string = memory_with_sources + "\n\n" + memory_metadata_string
|
381
|
+
|
382
|
+
# Add to the variables list to inject
|
383
|
+
variables[IN_CONTEXT_MEMORY_KEYWORD] = full_memory_string
|
384
|
+
|
385
|
+
if template_format == "f-string":
|
386
|
+
memory_variable_string = "{" + IN_CONTEXT_MEMORY_KEYWORD + "}"
|
387
|
+
|
388
|
+
# Catch the special case where the system prompt is unformatted
|
389
|
+
if append_icm_if_missing:
|
390
|
+
if memory_variable_string not in system_prompt:
|
391
|
+
# In this case, append it to the end to make sure memory is still injected
|
392
|
+
# warnings.warn(f"{IN_CONTEXT_MEMORY_KEYWORD} variable was missing from system prompt, appending instead")
|
393
|
+
system_prompt += "\n\n" + memory_variable_string
|
394
|
+
|
395
|
+
# render the variables using the built-in templater
|
396
|
+
try:
|
397
|
+
if user_defined_variables:
|
398
|
+
formatted_prompt = safe_format(system_prompt, variables)
|
399
|
+
else:
|
400
|
+
formatted_prompt = system_prompt.replace(memory_variable_string, full_memory_string)
|
401
|
+
except Exception as e:
|
402
|
+
raise ValueError(f"Failed to format system prompt - {str(e)}. System prompt value:\n{system_prompt}")
|
403
|
+
|
404
|
+
else:
|
405
|
+
# TODO support for mustache and jinja2
|
406
|
+
raise NotImplementedError(template_format)
|
407
|
+
|
408
|
+
return formatted_prompt
|
409
|
+
|
410
|
+
|
411
|
+
@trace_method
|
329
412
|
def initialize_message_sequence(
|
330
413
|
agent_state: AgentState,
|
331
414
|
memory_edit_timestamp: Optional[datetime] = None,
|
@@ -351,21 +434,110 @@ def initialize_message_sequence(
|
|
351
434
|
first_user_message = get_login_event(agent_state.timezone) # event letting Letta know the user just logged in
|
352
435
|
|
353
436
|
if include_initial_boot_message:
|
437
|
+
llm_config = agent_state.llm_config
|
438
|
+
uuid_str = str(uuid.uuid4())
|
439
|
+
|
440
|
+
# Some LMStudio models (e.g. ministral) require the tool call ID to be 9 alphanumeric characters
|
441
|
+
tool_call_id = uuid_str[:9] if llm_config.provider_name == "lmstudio_openai" else uuid_str
|
442
|
+
|
354
443
|
if agent_state.agent_type == AgentType.sleeptime_agent:
|
355
444
|
initial_boot_messages = []
|
356
|
-
elif
|
357
|
-
initial_boot_messages = get_initial_boot_messages("startup_with_send_message_gpt35", agent_state.timezone)
|
445
|
+
elif llm_config.model is not None and "gpt-3.5" in llm_config.model:
|
446
|
+
initial_boot_messages = get_initial_boot_messages("startup_with_send_message_gpt35", agent_state.timezone, tool_call_id)
|
358
447
|
else:
|
359
|
-
initial_boot_messages = get_initial_boot_messages("startup_with_send_message", agent_state.timezone)
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
448
|
+
initial_boot_messages = get_initial_boot_messages("startup_with_send_message", agent_state.timezone, tool_call_id)
|
449
|
+
|
450
|
+
# Some LMStudio models (e.g. meta-llama-3.1) require the user message before any tool calls
|
451
|
+
if llm_config.provider_name == "lmstudio_openai":
|
452
|
+
messages = (
|
453
|
+
[
|
454
|
+
{"role": "system", "content": full_system_message},
|
455
|
+
]
|
456
|
+
+ [
|
457
|
+
{"role": "user", "content": first_user_message},
|
458
|
+
]
|
459
|
+
+ initial_boot_messages
|
460
|
+
)
|
461
|
+
else:
|
462
|
+
messages = (
|
463
|
+
[
|
464
|
+
{"role": "system", "content": full_system_message},
|
465
|
+
]
|
466
|
+
+ initial_boot_messages
|
467
|
+
+ [
|
468
|
+
{"role": "user", "content": first_user_message},
|
469
|
+
]
|
470
|
+
)
|
471
|
+
|
472
|
+
else:
|
473
|
+
messages = [
|
474
|
+
{"role": "system", "content": full_system_message},
|
475
|
+
{"role": "user", "content": first_user_message},
|
476
|
+
]
|
477
|
+
|
478
|
+
return messages
|
479
|
+
|
480
|
+
|
481
|
+
@trace_method
|
482
|
+
async def initialize_message_sequence_async(
|
483
|
+
agent_state: AgentState,
|
484
|
+
memory_edit_timestamp: Optional[datetime] = None,
|
485
|
+
include_initial_boot_message: bool = True,
|
486
|
+
previous_message_count: int = 0,
|
487
|
+
archival_memory_size: int = 0,
|
488
|
+
) -> List[dict]:
|
489
|
+
if memory_edit_timestamp is None:
|
490
|
+
memory_edit_timestamp = get_local_time()
|
491
|
+
|
492
|
+
full_system_message = await compile_system_message_async(
|
493
|
+
system_prompt=agent_state.system,
|
494
|
+
in_context_memory=agent_state.memory,
|
495
|
+
in_context_memory_last_edit=memory_edit_timestamp,
|
496
|
+
timezone=agent_state.timezone,
|
497
|
+
user_defined_variables=None,
|
498
|
+
append_icm_if_missing=True,
|
499
|
+
previous_message_count=previous_message_count,
|
500
|
+
archival_memory_size=archival_memory_size,
|
501
|
+
sources=agent_state.sources,
|
502
|
+
max_files_open=agent_state.max_files_open,
|
503
|
+
)
|
504
|
+
first_user_message = get_login_event(agent_state.timezone) # event letting Letta know the user just logged in
|
505
|
+
|
506
|
+
if include_initial_boot_message:
|
507
|
+
llm_config = agent_state.llm_config
|
508
|
+
uuid_str = str(uuid.uuid4())
|
509
|
+
|
510
|
+
# Some LMStudio models (e.g. ministral) require the tool call ID to be 9 alphanumeric characters
|
511
|
+
tool_call_id = uuid_str[:9] if llm_config.provider_name == "lmstudio_openai" else uuid_str
|
512
|
+
|
513
|
+
if agent_state.agent_type == AgentType.sleeptime_agent:
|
514
|
+
initial_boot_messages = []
|
515
|
+
elif llm_config.model is not None and "gpt-3.5" in llm_config.model:
|
516
|
+
initial_boot_messages = get_initial_boot_messages("startup_with_send_message_gpt35", agent_state.timezone, tool_call_id)
|
517
|
+
else:
|
518
|
+
initial_boot_messages = get_initial_boot_messages("startup_with_send_message", agent_state.timezone, tool_call_id)
|
519
|
+
|
520
|
+
# Some LMStudio models (e.g. meta-llama-3.1) require the user message before any tool calls
|
521
|
+
if llm_config.provider_name == "lmstudio_openai":
|
522
|
+
messages = (
|
523
|
+
[
|
524
|
+
{"role": "system", "content": full_system_message},
|
525
|
+
]
|
526
|
+
+ [
|
527
|
+
{"role": "user", "content": first_user_message},
|
528
|
+
]
|
529
|
+
+ initial_boot_messages
|
530
|
+
)
|
531
|
+
else:
|
532
|
+
messages = (
|
533
|
+
[
|
534
|
+
{"role": "system", "content": full_system_message},
|
535
|
+
]
|
536
|
+
+ initial_boot_messages
|
537
|
+
+ [
|
538
|
+
{"role": "user", "content": first_user_message},
|
539
|
+
]
|
540
|
+
)
|
369
541
|
|
370
542
|
else:
|
371
543
|
messages = [
|
letta/services/job_manager.py
CHANGED
@@ -831,8 +831,8 @@ class JobManager:
|
|
831
831
|
logger.error(error_message)
|
832
832
|
result["callback_error"] = error_message
|
833
833
|
# Continue silently - callback failures should not affect job completion
|
834
|
-
|
835
|
-
|
834
|
+
finally:
|
835
|
+
return result
|
836
836
|
|
837
837
|
@trace_method
|
838
838
|
async def _dispatch_callback_async(self, callback_info: dict) -> dict:
|
@@ -860,5 +860,5 @@ class JobManager:
|
|
860
860
|
logger.error(error_message)
|
861
861
|
result["callback_error"] = error_message
|
862
862
|
# Continue silently - callback failures should not affect job completion
|
863
|
-
|
864
|
-
|
863
|
+
finally:
|
864
|
+
return result
|
@@ -132,23 +132,18 @@ class MCPOAuthSession:
|
|
132
132
|
except Exception:
|
133
133
|
pass
|
134
134
|
|
135
|
-
async def store_authorization_code(self, code: str, state: str) ->
|
135
|
+
async def store_authorization_code(self, code: str, state: str) -> Optional[MCPOAuth]:
|
136
136
|
"""Store the authorization code from OAuth callback."""
|
137
137
|
async with db_registry.async_session() as session:
|
138
138
|
try:
|
139
139
|
oauth_record = await MCPOAuth.read_async(db_session=session, identifier=self.session_id, actor=None)
|
140
|
-
|
141
|
-
# if oauth_record.state != state:
|
142
|
-
# return False
|
143
|
-
|
144
140
|
oauth_record.authorization_code = code
|
145
141
|
oauth_record.state = state
|
146
142
|
oauth_record.status = OAuthSessionStatus.AUTHORIZED
|
147
143
|
oauth_record.updated_at = datetime.now()
|
148
|
-
await oauth_record.update_async(db_session=session, actor=None)
|
149
|
-
return True
|
144
|
+
return await oauth_record.update_async(db_session=session, actor=None)
|
150
145
|
except Exception:
|
151
|
-
return
|
146
|
+
return None
|
152
147
|
|
153
148
|
async def get_authorization_url(self) -> Optional[str]:
|
154
149
|
"""Get the authorization URL for this session."""
|
@@ -177,16 +172,18 @@ async def create_oauth_provider(
|
|
177
172
|
redirect_uri: str,
|
178
173
|
mcp_manager: MCPManager,
|
179
174
|
actor: PydanticUser,
|
175
|
+
logo_uri: Optional[str] = None,
|
180
176
|
url_callback: Optional[Callable[[str], None]] = None,
|
181
177
|
) -> OAuthClientProvider:
|
182
178
|
"""Create an OAuth provider for MCP server authentication."""
|
183
179
|
|
184
180
|
client_metadata_dict = {
|
185
|
-
"client_name": "Letta
|
181
|
+
"client_name": "Letta",
|
186
182
|
"redirect_uris": [redirect_uri],
|
187
183
|
"grant_types": ["authorization_code", "refresh_token"],
|
188
184
|
"response_types": ["code"],
|
189
185
|
"token_endpoint_auth_method": "client_secret_post",
|
186
|
+
"logo_uri": logo_uri,
|
190
187
|
}
|
191
188
|
|
192
189
|
# Use manager-based storage
|
@@ -290,144 +287,3 @@ def drill_down_exception(exception, depth=0, max_depth=5):
|
|
290
287
|
|
291
288
|
error_info = "".join(error_details)
|
292
289
|
return error_info
|
293
|
-
|
294
|
-
|
295
|
-
def get_oauth_success_html() -> str:
|
296
|
-
"""Generate HTML for successful OAuth authorization."""
|
297
|
-
return """
|
298
|
-
<!DOCTYPE html>
|
299
|
-
<html>
|
300
|
-
<head>
|
301
|
-
<title>Authorization Successful - Letta</title>
|
302
|
-
<style>
|
303
|
-
* {
|
304
|
-
margin: 0;
|
305
|
-
padding: 0;
|
306
|
-
box-sizing: border-box;
|
307
|
-
}
|
308
|
-
|
309
|
-
body {
|
310
|
-
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
|
311
|
-
display: flex;
|
312
|
-
justify-content: center;
|
313
|
-
align-items: center;
|
314
|
-
min-height: 100vh;
|
315
|
-
margin: 0;
|
316
|
-
background-color: #f5f5f5;
|
317
|
-
background-image: url("data:image/svg+xml,%3Csvg width='1440' height='860' viewBox='0 0 1440 860' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cg clip-path='url(%23clip0_14823_146864)'%3E%3Cpath d='M720.001 1003.14C1080.62 1003.14 1372.96 824.028 1372.96 603.083C1372.96 382.138 1080.62 203.026 720.001 203.026C359.384 203.026 67.046 382.138 67.046 603.083C67.046 824.028 359.384 1003.14 720.001 1003.14Z' stroke='%23E1E2E3' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M719.999 978.04C910.334 978.04 1064.63 883.505 1064.63 766.891C1064.63 650.276 910.334 555.741 719.999 555.741C529.665 555.741 375.368 650.276 375.368 766.891C375.368 883.505 529.665 978.04 719.999 978.04Z' stroke='%23E1E2E3' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M720 1020.95C1262.17 1020.95 1701.68 756.371 1701.68 430C1701.68 103.629 1262.17 -160.946 720 -160.946C177.834 -160.946 -261.678 103.629 -261.678 430C-261.678 756.371 177.834 1020.95 720 1020.95Z' stroke='%23E1E2E3' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M719.999 323.658C910.334 323.658 1064.63 223.814 1064.63 100.649C1064.63 -22.5157 910.334 -122.36 719.999 -122.36C529.665 -122.36 375.368 -22.5157 375.368 100.649C375.368 223.814 529.665 323.658 719.999 323.658Z' stroke='%23E1E2E3' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M720.001 706.676C1080.62 706.676 1372.96 517.507 1372.96 284.155C1372.96 50.8029 1080.62 -138.366 720.001 -138.366C359.384 -138.366 67.046 50.8029 67.046 284.155C67.046 517.507 359.384 706.676 720.001 706.676Z' stroke='%23E1E2E3' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M719.999 874.604C1180.69 874.604 1554.15 645.789 1554.15 363.531C1554.15 81.2725 1180.69 -147.543 719.999 -147.543C259.311 -147.543 -114.15 81.2725 -114.15 363.531C-114.15 645.789 259.311 874.604 719.999 874.604Z' stroke='%23E1E2E3' stroke-width='1.5' stroke-miterlimit='10'/%3E%3C/g%3E%3Cdefs%3E%3CclipPath id='clip0_14823_146864'%3E%3Crect width='1440' height='860' fill='white'/%3E%3C/clipPath%3E%3C/defs%3E%3C/svg%3E");
|
318
|
-
background-size: cover;
|
319
|
-
background-position: center;
|
320
|
-
background-repeat: no-repeat;
|
321
|
-
}
|
322
|
-
|
323
|
-
.card {
|
324
|
-
text-align: center;
|
325
|
-
padding: 48px;
|
326
|
-
background: white;
|
327
|
-
border-radius: 8px;
|
328
|
-
border: 1px solid #E1E2E3;
|
329
|
-
max-width: 400px;
|
330
|
-
width: 90%;
|
331
|
-
position: relative;
|
332
|
-
z-index: 1;
|
333
|
-
}
|
334
|
-
|
335
|
-
.logo {
|
336
|
-
width: 48px;
|
337
|
-
height: 48px;
|
338
|
-
margin: 0 auto 24px;
|
339
|
-
display: block;
|
340
|
-
}
|
341
|
-
|
342
|
-
.logo svg {
|
343
|
-
width: 100%;
|
344
|
-
height: 100%;
|
345
|
-
}
|
346
|
-
|
347
|
-
h1 {
|
348
|
-
font-size: 20px;
|
349
|
-
font-weight: 600;
|
350
|
-
color: #101010;
|
351
|
-
margin-bottom: 12px;
|
352
|
-
line-height: 1.2;
|
353
|
-
}
|
354
|
-
|
355
|
-
.subtitle {
|
356
|
-
color: #666;
|
357
|
-
font-size: 12px;
|
358
|
-
margin-top: 10px;
|
359
|
-
margin-bottom: 24px;
|
360
|
-
line-height: 1.5;
|
361
|
-
}
|
362
|
-
|
363
|
-
.close-info {
|
364
|
-
font-size: 12px;
|
365
|
-
color: #999;
|
366
|
-
display: flex;
|
367
|
-
align-items: center;
|
368
|
-
justify-content: center;
|
369
|
-
gap: 8px;
|
370
|
-
}
|
371
|
-
|
372
|
-
.spinner {
|
373
|
-
width: 16px;
|
374
|
-
height: 16px;
|
375
|
-
border: 2px solid #E1E2E3;
|
376
|
-
border-top: 2px solid #333;
|
377
|
-
border-radius: 50%;
|
378
|
-
animation: spin 1s linear infinite;
|
379
|
-
}
|
380
|
-
|
381
|
-
@keyframes spin {
|
382
|
-
0% { transform: rotate(0deg); }
|
383
|
-
100% { transform: rotate(360deg); }
|
384
|
-
}
|
385
|
-
|
386
|
-
/* Dark mode styles */
|
387
|
-
@media (prefers-color-scheme: dark) {
|
388
|
-
body {
|
389
|
-
background-color: #101010;
|
390
|
-
background-image: url("data:image/svg+xml,%3Csvg width='1440' height='860' viewBox='0 0 1440 860' fill='none' xmlns='http://www.w3.org/2000/svg'%3E%3Cg clip-path='url(%23clip0_14833_149362)'%3E%3Cpath d='M720.001 1003.14C1080.62 1003.14 1372.96 824.028 1372.96 603.083C1372.96 382.138 1080.62 203.026 720.001 203.026C359.384 203.026 67.046 382.138 67.046 603.083C67.046 824.028 359.384 1003.14 720.001 1003.14Z' stroke='%2346484A' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M719.999 978.04C910.334 978.04 1064.63 883.505 1064.63 766.891C1064.63 650.276 910.334 555.741 719.999 555.741C529.665 555.741 375.368 650.276 375.368 766.891C375.368 883.505 529.665 978.04 719.999 978.04Z' stroke='%2346484A' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M720 1020.95C1262.17 1020.95 1701.68 756.371 1701.68 430C1701.68 103.629 1262.17 -160.946 720 -160.946C177.834 -160.946 -261.678 103.629 -261.678 430C-261.678 756.371 177.834 1020.95 720 1020.95Z' stroke='%2346484A' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M719.999 323.658C910.334 323.658 1064.63 223.814 1064.63 100.649C1064.63 -22.5157 910.334 -122.36 719.999 -122.36C529.665 -122.36 375.368 -22.5157 375.368 100.649C375.368 223.814 529.665 323.658 719.999 323.658Z' stroke='%2346484A' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M720.001 706.676C1080.62 706.676 1372.96 517.507 1372.96 284.155C1372.96 50.8029 1080.62 -138.366 720.001 -138.366C359.384 -138.366 67.046 50.8029 67.046 284.155C67.046 517.507 359.384 706.676 720.001 706.676Z' stroke='%2346484A' stroke-width='1.5' stroke-miterlimit='10'/%3E%3Cpath d='M719.999 874.604C1180.69 874.604 1554.15 645.789 1554.15 363.531C1554.15 81.2725 1180.69 -147.543 719.999 -147.543C259.311 -147.543 -114.15 81.2725 -114.15 363.531C-114.15 645.789 259.311 874.604 719.999 874.604Z' stroke='%2346484A' stroke-width='1.5' stroke-miterlimit='10'/%3E%3C/g%3E%3Cdefs%3E%3CclipPath id='clip0_14833_149362'%3E%3Crect width='1440' height='860' fill='white'/%3E%3C/clipPath%3E%3C/defs%3E%3C/svg%3E");
|
391
|
-
}
|
392
|
-
|
393
|
-
.card {
|
394
|
-
background-color: #141414;
|
395
|
-
border-color: #202020;
|
396
|
-
}
|
397
|
-
|
398
|
-
h1 {
|
399
|
-
color: #E1E2E3;
|
400
|
-
}
|
401
|
-
|
402
|
-
.subtitle {
|
403
|
-
color: #999;
|
404
|
-
}
|
405
|
-
|
406
|
-
.logo svg path {
|
407
|
-
fill: #E1E2E3;
|
408
|
-
}
|
409
|
-
|
410
|
-
.spinner {
|
411
|
-
border-color: #46484A;
|
412
|
-
border-top-color: #E1E2E3;
|
413
|
-
}
|
414
|
-
}
|
415
|
-
</style>
|
416
|
-
</head>
|
417
|
-
<body>
|
418
|
-
<div class="card">
|
419
|
-
<div class="logo">
|
420
|
-
<svg width="48" height="48" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg">
|
421
|
-
<path d="M10.7134 7.30028H7.28759V10.7002H10.7134V7.30028Z" fill="#333"/>
|
422
|
-
<path d="M14.1391 2.81618V0.5H3.86131V2.81618C3.86131 3.41495 3.37266 3.89991 2.76935 3.89991H0.435547V14.1001H2.76935C3.37266 14.1001 3.86131 14.5851 3.86131 15.1838V17.5H14.1391V15.1838C14.1391 14.5851 14.6277 14.1001 15.231 14.1001H17.5648V3.89991H15.231C14.6277 3.89991 14.1391 3.41495 14.1391 2.81618ZM14.1391 13.0159C14.1391 13.6147 13.6504 14.0996 13.0471 14.0996H4.95375C4.35043 14.0996 3.86179 13.6147 3.86179 13.0159V4.98363C3.86179 4.38486 4.35043 3.89991 4.95375 3.89991H13.0471C13.6504 3.89991 14.1391 4.38486 14.1391 4.98363V13.0159Z" fill="#333"/>
|
423
|
-
</svg>
|
424
|
-
</div>
|
425
|
-
<h3>Authorization Successful</h3>
|
426
|
-
<p class="subtitle">You have successfully connected your MCP server.</p>
|
427
|
-
<div class="close-info">
|
428
|
-
<span>You can now close this window.</span>
|
429
|
-
</div>
|
430
|
-
</div>
|
431
|
-
</body>
|
432
|
-
</html>
|
433
|
-
"""
|
letta/services/mcp_manager.py
CHANGED
@@ -7,6 +7,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
|
|
7
7
|
|
8
8
|
from fastapi import HTTPException
|
9
9
|
from sqlalchemy import null
|
10
|
+
from starlette.requests import Request
|
10
11
|
|
11
12
|
import letta.constants as constants
|
12
13
|
from letta.functions.mcp_client.types import MCPServerType, MCPTool, SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig
|
@@ -66,7 +67,12 @@ class MCPManager:
|
|
66
67
|
|
67
68
|
@enforce_types
|
68
69
|
async def execute_mcp_server_tool(
|
69
|
-
self,
|
70
|
+
self,
|
71
|
+
mcp_server_name: str,
|
72
|
+
tool_name: str,
|
73
|
+
tool_args: Optional[Dict[str, Any]],
|
74
|
+
environment_variables: Dict[str, str],
|
75
|
+
actor: PydanticUser,
|
70
76
|
) -> Tuple[str, bool]:
|
71
77
|
"""Call a specific tool from a specific MCP server."""
|
72
78
|
from letta.settings import tool_settings
|
@@ -75,7 +81,7 @@ class MCPManager:
|
|
75
81
|
# read from DB
|
76
82
|
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor=actor)
|
77
83
|
mcp_config = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
|
78
|
-
server_config = mcp_config.to_config()
|
84
|
+
server_config = mcp_config.to_config(environment_variables)
|
79
85
|
else:
|
80
86
|
# read from config file
|
81
87
|
mcp_config = self.read_mcp_config()
|
@@ -581,3 +587,115 @@ class MCPManager:
|
|
581
587
|
logger.info(f"Cleaned up {len(expired_sessions)} expired OAuth sessions")
|
582
588
|
|
583
589
|
return len(expired_sessions)
|
590
|
+
|
591
|
+
@enforce_types
|
592
|
+
async def handle_oauth_flow(
|
593
|
+
self,
|
594
|
+
request: Union[SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig],
|
595
|
+
actor: PydanticUser,
|
596
|
+
http_request: Optional[Request] = None,
|
597
|
+
):
|
598
|
+
"""
|
599
|
+
Handle OAuth flow for MCP server connection and yield SSE events.
|
600
|
+
|
601
|
+
Args:
|
602
|
+
request: The server configuration
|
603
|
+
actor: The user making the request
|
604
|
+
http_request: The HTTP request object
|
605
|
+
|
606
|
+
Yields:
|
607
|
+
SSE events during OAuth flow
|
608
|
+
|
609
|
+
Returns:
|
610
|
+
Tuple of (temp_client, connect_task) after yielding events
|
611
|
+
"""
|
612
|
+
import asyncio
|
613
|
+
|
614
|
+
from letta.services.mcp.oauth_utils import create_oauth_provider, oauth_stream_event
|
615
|
+
from letta.services.mcp.types import OauthStreamEvent
|
616
|
+
|
617
|
+
# OAuth required, yield state to client to prepare to handle authorization URL
|
618
|
+
yield oauth_stream_event(OauthStreamEvent.OAUTH_REQUIRED, message="OAuth authentication required")
|
619
|
+
|
620
|
+
# Create OAuth session to persist the state of the OAuth flow
|
621
|
+
session_create = MCPOAuthSessionCreate(
|
622
|
+
server_url=request.server_url,
|
623
|
+
server_name=request.server_name,
|
624
|
+
user_id=actor.id,
|
625
|
+
organization_id=actor.organization_id,
|
626
|
+
)
|
627
|
+
oauth_session = await self.create_oauth_session(session_create, actor)
|
628
|
+
session_id = oauth_session.id
|
629
|
+
|
630
|
+
# TODO: @jnjpng make this check more robust and remove direct os.getenv
|
631
|
+
# Check if request is from web frontend to determine redirect URI
|
632
|
+
is_web_request = (
|
633
|
+
http_request
|
634
|
+
and http_request.headers
|
635
|
+
and http_request.headers.get("user-agent", "") == "Next.js Middleware"
|
636
|
+
and http_request.headers.__contains__("x-organization-id")
|
637
|
+
)
|
638
|
+
|
639
|
+
logo_uri = None
|
640
|
+
NEXT_PUBLIC_CURRENT_HOST = os.getenv("NEXT_PUBLIC_CURRENT_HOST")
|
641
|
+
LETTA_AGENTS_ENDPOINT = os.getenv("LETTA_AGENTS_ENDPOINT")
|
642
|
+
|
643
|
+
if is_web_request and NEXT_PUBLIC_CURRENT_HOST:
|
644
|
+
redirect_uri = f"{NEXT_PUBLIC_CURRENT_HOST}/oauth/callback/{session_id}"
|
645
|
+
logo_uri = f"{NEXT_PUBLIC_CURRENT_HOST}/seo/favicon.svg"
|
646
|
+
elif LETTA_AGENTS_ENDPOINT:
|
647
|
+
# API and SDK usage should call core server directly
|
648
|
+
redirect_uri = f"{LETTA_AGENTS_ENDPOINT}/v1/tools/mcp/oauth/callback/{session_id}"
|
649
|
+
else:
|
650
|
+
logger.error(
|
651
|
+
f"No redirect URI found for request and base urls: {http_request.headers if http_request else 'No headers'} {NEXT_PUBLIC_CURRENT_HOST} {LETTA_AGENTS_ENDPOINT}"
|
652
|
+
)
|
653
|
+
raise HTTPException(status_code=400, detail="No redirect URI found")
|
654
|
+
|
655
|
+
# Create OAuth provider for the instance of the stream connection
|
656
|
+
oauth_provider = await create_oauth_provider(session_id, request.server_url, redirect_uri, self, actor, logo_uri=logo_uri)
|
657
|
+
|
658
|
+
# Get authorization URL by triggering OAuth flow
|
659
|
+
temp_client = None
|
660
|
+
connect_task = None
|
661
|
+
try:
|
662
|
+
temp_client = await self.get_mcp_client(request, actor, oauth_provider)
|
663
|
+
|
664
|
+
# Run connect_to_server in background to avoid blocking
|
665
|
+
# This will trigger the OAuth flow and the redirect_handler will save the authorization URL to database
|
666
|
+
connect_task = asyncio.create_task(temp_client.connect_to_server())
|
667
|
+
|
668
|
+
# Give the OAuth flow time to trigger and save the URL
|
669
|
+
await asyncio.sleep(1.0)
|
670
|
+
|
671
|
+
# Fetch the authorization URL from database and yield state to client to proceed with handling authorization URL
|
672
|
+
auth_session = await self.get_oauth_session_by_id(session_id, actor)
|
673
|
+
if auth_session and auth_session.authorization_url:
|
674
|
+
yield oauth_stream_event(OauthStreamEvent.AUTHORIZATION_URL, url=auth_session.authorization_url, session_id=session_id)
|
675
|
+
|
676
|
+
# Wait for user authorization (with timeout), client should render loading state until user completes the flow and /mcp/oauth/callback/{session_id} is hit
|
677
|
+
yield oauth_stream_event(OauthStreamEvent.WAITING_FOR_AUTH, message="Waiting for user authorization...")
|
678
|
+
|
679
|
+
# Callback handler will poll for authorization code and state and update the OAuth session
|
680
|
+
await connect_task
|
681
|
+
|
682
|
+
tools = await temp_client.list_tools(serialize=True)
|
683
|
+
yield oauth_stream_event(OauthStreamEvent.SUCCESS, tools=tools)
|
684
|
+
|
685
|
+
except Exception as e:
|
686
|
+
logger.error(f"Error triggering OAuth flow: {e}")
|
687
|
+
yield oauth_stream_event(OauthStreamEvent.ERROR, message=f"Failed to trigger OAuth: {str(e)}")
|
688
|
+
raise e
|
689
|
+
finally:
|
690
|
+
# Clean up resources
|
691
|
+
if connect_task and not connect_task.done():
|
692
|
+
connect_task.cancel()
|
693
|
+
try:
|
694
|
+
await connect_task
|
695
|
+
except asyncio.CancelledError:
|
696
|
+
pass
|
697
|
+
if temp_client:
|
698
|
+
try:
|
699
|
+
await temp_client.cleanup()
|
700
|
+
except Exception as cleanup_error:
|
701
|
+
logger.warning(f"Error during temp MCP client cleanup: {cleanup_error}")
|
@@ -6,11 +6,12 @@ from letta.orm.errors import NoResultFound
|
|
6
6
|
from letta.orm.sandbox_config import SandboxConfig as SandboxConfigModel
|
7
7
|
from letta.orm.sandbox_config import SandboxEnvironmentVariable as SandboxEnvVarModel
|
8
8
|
from letta.otel.tracing import trace_method
|
9
|
+
from letta.schemas.enums import SandboxType
|
9
10
|
from letta.schemas.environment_variables import SandboxEnvironmentVariable as PydanticEnvVar
|
10
11
|
from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate
|
11
12
|
from letta.schemas.sandbox_config import LocalSandboxConfig
|
12
13
|
from letta.schemas.sandbox_config import SandboxConfig as PydanticSandboxConfig
|
13
|
-
from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate
|
14
|
+
from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate
|
14
15
|
from letta.schemas.user import User as PydanticUser
|
15
16
|
from letta.server.db import db_registry
|
16
17
|
from letta.utils import enforce_types, printd
|
@@ -493,10 +494,7 @@ class SandboxConfigManager:
|
|
493
494
|
self, sandbox_config_id: str, actor: PydanticUser, after: Optional[str] = None, limit: Optional[int] = 50
|
494
495
|
) -> Dict[str, str]:
|
495
496
|
env_vars = await self.list_sandbox_env_vars_async(sandbox_config_id, actor, after, limit)
|
496
|
-
|
497
|
-
for env_var in env_vars:
|
498
|
-
result[env_var.key] = env_var.value
|
499
|
-
return result
|
497
|
+
return {env_var.key: env_var.value for env_var in env_vars}
|
500
498
|
|
501
499
|
@enforce_types
|
502
500
|
@trace_method
|
@@ -105,13 +105,7 @@ class LettaBuiltinToolExecutor(ToolExecutor):
|
|
105
105
|
return out
|
106
106
|
|
107
107
|
@trace_method
|
108
|
-
async def web_search(
|
109
|
-
self,
|
110
|
-
agent_state: "AgentState",
|
111
|
-
tasks: List[SearchTask],
|
112
|
-
limit: int = 3,
|
113
|
-
return_raw: bool = False,
|
114
|
-
) -> str:
|
108
|
+
async def web_search(self, agent_state: "AgentState", tasks: List[SearchTask], limit: int = 1, return_raw: bool = True) -> str:
|
115
109
|
"""
|
116
110
|
Search the web with a list of query/question pairs and extract passages that answer the corresponding questions.
|
117
111
|
|
@@ -138,10 +132,10 @@ class LettaBuiltinToolExecutor(ToolExecutor):
|
|
138
132
|
Each result includes ranked snippets with their source URLs and relevance scores,
|
139
133
|
corresponding to each search task.
|
140
134
|
"""
|
141
|
-
# TODO: Temporary, maybe deprecate this field?
|
142
|
-
if return_raw:
|
143
|
-
|
144
|
-
return_raw = False
|
135
|
+
# # TODO: Temporary, maybe deprecate this field?
|
136
|
+
# if return_raw:
|
137
|
+
# logger.warning("WARNING! return_raw was set to True, we default to False always. Deprecate this field.")
|
138
|
+
# return_raw = False
|
145
139
|
try:
|
146
140
|
from firecrawl import AsyncFirecrawlApp
|
147
141
|
except ImportError:
|
@@ -175,13 +169,14 @@ class LettaBuiltinToolExecutor(ToolExecutor):
|
|
175
169
|
# Initialize Firecrawl client
|
176
170
|
app = AsyncFirecrawlApp(api_key=firecrawl_api_key)
|
177
171
|
|
178
|
-
# Process all search tasks
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
172
|
+
# Process all search tasks serially
|
173
|
+
search_results = []
|
174
|
+
for task in search_tasks:
|
175
|
+
try:
|
176
|
+
result = await self._process_single_search_task(app, task, limit, return_raw, api_key_source, agent_state)
|
177
|
+
search_results.append(result)
|
178
|
+
except Exception as e:
|
179
|
+
search_results.append(e)
|
185
180
|
|
186
181
|
# Build final response as a mapping of query -> result
|
187
182
|
final_results = {}
|