letta-nightly 0.12.1.dev20251024104217__py3-none-any.whl → 0.13.0.dev20251024223017__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (159) hide show
  1. letta/__init__.py +2 -3
  2. letta/adapters/letta_llm_adapter.py +1 -0
  3. letta/adapters/simple_llm_request_adapter.py +8 -5
  4. letta/adapters/simple_llm_stream_adapter.py +22 -6
  5. letta/agents/agent_loop.py +10 -3
  6. letta/agents/base_agent.py +4 -1
  7. letta/agents/helpers.py +41 -9
  8. letta/agents/letta_agent.py +11 -10
  9. letta/agents/letta_agent_v2.py +47 -37
  10. letta/agents/letta_agent_v3.py +395 -300
  11. letta/agents/voice_agent.py +8 -6
  12. letta/agents/voice_sleeptime_agent.py +3 -3
  13. letta/constants.py +30 -7
  14. letta/errors.py +20 -0
  15. letta/functions/function_sets/base.py +55 -3
  16. letta/functions/mcp_client/types.py +33 -57
  17. letta/functions/schema_generator.py +135 -23
  18. letta/groups/sleeptime_multi_agent_v3.py +6 -11
  19. letta/groups/sleeptime_multi_agent_v4.py +227 -0
  20. letta/helpers/converters.py +78 -4
  21. letta/helpers/crypto_utils.py +6 -2
  22. letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py +9 -11
  23. letta/interfaces/anthropic_streaming_interface.py +3 -4
  24. letta/interfaces/gemini_streaming_interface.py +4 -6
  25. letta/interfaces/openai_streaming_interface.py +63 -28
  26. letta/llm_api/anthropic_client.py +7 -4
  27. letta/llm_api/deepseek_client.py +6 -4
  28. letta/llm_api/google_ai_client.py +3 -12
  29. letta/llm_api/google_vertex_client.py +1 -1
  30. letta/llm_api/helpers.py +90 -61
  31. letta/llm_api/llm_api_tools.py +4 -1
  32. letta/llm_api/openai.py +12 -12
  33. letta/llm_api/openai_client.py +53 -16
  34. letta/local_llm/constants.py +4 -3
  35. letta/local_llm/json_parser.py +5 -2
  36. letta/local_llm/utils.py +2 -3
  37. letta/log.py +171 -7
  38. letta/orm/agent.py +43 -9
  39. letta/orm/archive.py +4 -0
  40. letta/orm/custom_columns.py +15 -0
  41. letta/orm/identity.py +11 -11
  42. letta/orm/mcp_server.py +9 -0
  43. letta/orm/message.py +6 -1
  44. letta/orm/run_metrics.py +7 -2
  45. letta/orm/sqlalchemy_base.py +2 -2
  46. letta/orm/tool.py +3 -0
  47. letta/otel/tracing.py +2 -0
  48. letta/prompts/prompt_generator.py +7 -2
  49. letta/schemas/agent.py +41 -10
  50. letta/schemas/agent_file.py +3 -0
  51. letta/schemas/archive.py +4 -2
  52. letta/schemas/block.py +2 -1
  53. letta/schemas/enums.py +36 -3
  54. letta/schemas/file.py +3 -3
  55. letta/schemas/folder.py +2 -1
  56. letta/schemas/group.py +2 -1
  57. letta/schemas/identity.py +18 -9
  58. letta/schemas/job.py +3 -1
  59. letta/schemas/letta_message.py +71 -12
  60. letta/schemas/letta_request.py +7 -3
  61. letta/schemas/letta_stop_reason.py +0 -25
  62. letta/schemas/llm_config.py +8 -2
  63. letta/schemas/mcp.py +80 -83
  64. letta/schemas/mcp_server.py +349 -0
  65. letta/schemas/memory.py +20 -8
  66. letta/schemas/message.py +212 -67
  67. letta/schemas/providers/anthropic.py +13 -6
  68. letta/schemas/providers/azure.py +6 -4
  69. letta/schemas/providers/base.py +8 -4
  70. letta/schemas/providers/bedrock.py +6 -2
  71. letta/schemas/providers/cerebras.py +7 -3
  72. letta/schemas/providers/deepseek.py +2 -1
  73. letta/schemas/providers/google_gemini.py +15 -6
  74. letta/schemas/providers/groq.py +2 -1
  75. letta/schemas/providers/lmstudio.py +9 -6
  76. letta/schemas/providers/mistral.py +2 -1
  77. letta/schemas/providers/openai.py +7 -2
  78. letta/schemas/providers/together.py +9 -3
  79. letta/schemas/providers/xai.py +7 -3
  80. letta/schemas/run.py +7 -2
  81. letta/schemas/run_metrics.py +2 -1
  82. letta/schemas/sandbox_config.py +2 -2
  83. letta/schemas/secret.py +3 -158
  84. letta/schemas/source.py +2 -2
  85. letta/schemas/step.py +2 -2
  86. letta/schemas/tool.py +24 -1
  87. letta/schemas/usage.py +0 -1
  88. letta/server/rest_api/app.py +123 -7
  89. letta/server/rest_api/dependencies.py +3 -0
  90. letta/server/rest_api/interface.py +7 -4
  91. letta/server/rest_api/redis_stream_manager.py +16 -1
  92. letta/server/rest_api/routers/v1/__init__.py +7 -0
  93. letta/server/rest_api/routers/v1/agents.py +332 -322
  94. letta/server/rest_api/routers/v1/archives.py +127 -40
  95. letta/server/rest_api/routers/v1/blocks.py +54 -6
  96. letta/server/rest_api/routers/v1/chat_completions.py +146 -0
  97. letta/server/rest_api/routers/v1/folders.py +27 -35
  98. letta/server/rest_api/routers/v1/groups.py +23 -35
  99. letta/server/rest_api/routers/v1/identities.py +24 -10
  100. letta/server/rest_api/routers/v1/internal_runs.py +107 -0
  101. letta/server/rest_api/routers/v1/internal_templates.py +162 -179
  102. letta/server/rest_api/routers/v1/jobs.py +15 -27
  103. letta/server/rest_api/routers/v1/mcp_servers.py +309 -0
  104. letta/server/rest_api/routers/v1/messages.py +23 -34
  105. letta/server/rest_api/routers/v1/organizations.py +6 -27
  106. letta/server/rest_api/routers/v1/providers.py +35 -62
  107. letta/server/rest_api/routers/v1/runs.py +30 -43
  108. letta/server/rest_api/routers/v1/sandbox_configs.py +6 -4
  109. letta/server/rest_api/routers/v1/sources.py +26 -42
  110. letta/server/rest_api/routers/v1/steps.py +16 -29
  111. letta/server/rest_api/routers/v1/tools.py +17 -13
  112. letta/server/rest_api/routers/v1/users.py +5 -17
  113. letta/server/rest_api/routers/v1/voice.py +18 -27
  114. letta/server/rest_api/streaming_response.py +5 -2
  115. letta/server/rest_api/utils.py +187 -25
  116. letta/server/server.py +27 -22
  117. letta/server/ws_api/server.py +5 -4
  118. letta/services/agent_manager.py +148 -26
  119. letta/services/agent_serialization_manager.py +6 -1
  120. letta/services/archive_manager.py +168 -15
  121. letta/services/block_manager.py +14 -4
  122. letta/services/file_manager.py +33 -29
  123. letta/services/group_manager.py +10 -0
  124. letta/services/helpers/agent_manager_helper.py +65 -11
  125. letta/services/identity_manager.py +105 -4
  126. letta/services/job_manager.py +11 -1
  127. letta/services/mcp/base_client.py +2 -2
  128. letta/services/mcp/oauth_utils.py +33 -8
  129. letta/services/mcp_manager.py +174 -78
  130. letta/services/mcp_server_manager.py +1331 -0
  131. letta/services/message_manager.py +109 -4
  132. letta/services/organization_manager.py +4 -4
  133. letta/services/passage_manager.py +9 -25
  134. letta/services/provider_manager.py +91 -15
  135. letta/services/run_manager.py +72 -15
  136. letta/services/sandbox_config_manager.py +45 -3
  137. letta/services/source_manager.py +15 -8
  138. letta/services/step_manager.py +24 -1
  139. letta/services/streaming_service.py +581 -0
  140. letta/services/summarizer/summarizer.py +1 -1
  141. letta/services/tool_executor/core_tool_executor.py +111 -0
  142. letta/services/tool_executor/files_tool_executor.py +5 -3
  143. letta/services/tool_executor/sandbox_tool_executor.py +2 -2
  144. letta/services/tool_executor/tool_execution_manager.py +1 -1
  145. letta/services/tool_manager.py +10 -3
  146. letta/services/tool_sandbox/base.py +61 -1
  147. letta/services/tool_sandbox/local_sandbox.py +1 -3
  148. letta/services/user_manager.py +2 -2
  149. letta/settings.py +49 -5
  150. letta/system.py +14 -5
  151. letta/utils.py +73 -1
  152. letta/validators.py +105 -0
  153. {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251024223017.dist-info}/METADATA +4 -2
  154. {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251024223017.dist-info}/RECORD +157 -151
  155. letta/schemas/letta_ping.py +0 -28
  156. letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
  157. {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251024223017.dist-info}/WHEEL +0 -0
  158. {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251024223017.dist-info}/entry_points.txt +0 -0
  159. {letta_nightly-0.12.1.dev20251024104217.dist-info → letta_nightly-0.13.0.dev20251024223017.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,10 @@
1
1
  import asyncio
2
2
  from typing import Literal
3
3
 
4
+ from letta.log import get_logger
5
+
6
+ logger = get_logger(__name__)
7
+
4
8
  from pydantic import Field
5
9
 
6
10
  from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE, LLM_MAX_TOKENS
@@ -19,13 +23,15 @@ class GoogleAIProvider(Provider):
19
23
  async def check_api_key(self):
20
24
  from letta.llm_api.google_ai_client import google_ai_check_valid_api_key
21
25
 
22
- google_ai_check_valid_api_key(self.api_key)
26
+ api_key = self.get_api_key_secret().get_plaintext()
27
+ google_ai_check_valid_api_key(api_key)
23
28
 
24
29
  async def list_llm_models_async(self):
25
30
  from letta.llm_api.google_ai_client import google_ai_get_model_list_async
26
31
 
27
32
  # Get and filter the model list
28
- model_options = await google_ai_get_model_list_async(base_url=self.base_url, api_key=self.api_key)
33
+ api_key = self.get_api_key_secret().get_plaintext()
34
+ model_options = await google_ai_get_model_list_async(base_url=self.base_url, api_key=api_key)
29
35
  model_options = [mo for mo in model_options if "generateContent" in mo["supportedGenerationMethods"]]
30
36
  model_options = [str(m["name"]) for m in model_options]
31
37
 
@@ -58,7 +64,8 @@ class GoogleAIProvider(Provider):
58
64
  from letta.llm_api.google_ai_client import google_ai_get_model_list_async
59
65
 
60
66
  # TODO: use base_url instead
61
- model_options = await google_ai_get_model_list_async(base_url=self.base_url, api_key=self.api_key)
67
+ api_key = self.get_api_key_secret().get_plaintext()
68
+ model_options = await google_ai_get_model_list_async(base_url=self.base_url, api_key=api_key)
62
69
  return self._list_embedding_models(model_options)
63
70
 
64
71
  def _list_embedding_models(self, model_options):
@@ -85,13 +92,14 @@ class GoogleAIProvider(Provider):
85
92
  def get_model_context_window(self, model_name: str) -> int | None:
86
93
  import warnings
87
94
 
88
- warnings.warn("This is deprecated, use get_model_context_window_async when possible.", DeprecationWarning)
95
+ logger.warning("This is deprecated, use get_model_context_window_async when possible.")
89
96
  from letta.llm_api.google_ai_client import google_ai_get_model_context_window
90
97
 
91
98
  if model_name in LLM_MAX_TOKENS:
92
99
  return LLM_MAX_TOKENS[model_name]
93
100
  else:
94
- return google_ai_get_model_context_window(self.base_url, self.api_key, model_name)
101
+ api_key = self.get_api_key_secret().get_plaintext()
102
+ return google_ai_get_model_context_window(self.base_url, api_key, model_name)
95
103
 
96
104
  async def get_model_context_window_async(self, model_name: str) -> int | None:
97
105
  from letta.llm_api.google_ai_client import google_ai_get_model_context_window_async
@@ -99,4 +107,5 @@ class GoogleAIProvider(Provider):
99
107
  if model_name in LLM_MAX_TOKENS:
100
108
  return LLM_MAX_TOKENS[model_name]
101
109
  else:
102
- return await google_ai_get_model_context_window_async(self.base_url, self.api_key, model_name)
110
+ api_key = self.get_api_key_secret().get_plaintext()
111
+ return await google_ai_get_model_context_window_async(self.base_url, api_key, model_name)
@@ -16,7 +16,8 @@ class GroqProvider(OpenAIProvider):
16
16
  async def list_llm_models_async(self) -> list[LLMConfig]:
17
17
  from letta.llm_api.openai import openai_get_model_list_async
18
18
 
19
- response = await openai_get_model_list_async(self.base_url, api_key=self.api_key)
19
+ api_key = self.get_api_key_secret().get_plaintext()
20
+ response = await openai_get_model_list_async(self.base_url, api_key=api_key)
20
21
  configs = []
21
22
  for model in response["data"]:
22
23
  if "context_window" not in model:
@@ -1,6 +1,9 @@
1
- import warnings
2
1
  from typing import Literal
3
2
 
3
+ from letta.log import get_logger
4
+
5
+ logger = get_logger(__name__)
6
+
4
7
  from pydantic import Field
5
8
 
6
9
  from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE
@@ -27,14 +30,14 @@ class LMStudioOpenAIProvider(OpenAIProvider):
27
30
  response = await openai_get_model_list_async(self.model_endpoint_url)
28
31
 
29
32
  if "data" not in response:
30
- warnings.warn(f"LMStudio OpenAI model query response missing 'data' field: {response}")
33
+ logger.warning(f"LMStudio OpenAI model query response missing 'data' field: {response}")
31
34
  return []
32
35
 
33
36
  configs = []
34
37
  for model in response["data"]:
35
38
  model_type = model.get("type")
36
39
  if not model_type:
37
- warnings.warn(f"LMStudio OpenAI model missing 'type' field: {model}")
40
+ logger.warning(f"LMStudio OpenAI model missing 'type' field: {model}")
38
41
  continue
39
42
  if model_type not in ("vlm", "llm"):
40
43
  continue
@@ -48,7 +51,7 @@ class LMStudioOpenAIProvider(OpenAIProvider):
48
51
  if "compatibility_type" in model:
49
52
  compatibility_type = model["compatibility_type"]
50
53
  else:
51
- warnings.warn(f"LMStudio OpenAI model missing 'compatibility_type' field: {model}")
54
+ logger.warning(f"LMStudio OpenAI model missing 'compatibility_type' field: {model}")
52
55
  continue
53
56
 
54
57
  configs.append(
@@ -72,14 +75,14 @@ class LMStudioOpenAIProvider(OpenAIProvider):
72
75
  response = await openai_get_model_list_async(self.model_endpoint_url)
73
76
 
74
77
  if "data" not in response:
75
- warnings.warn(f"LMStudio OpenAI model query response missing 'data' field: {response}")
78
+ logger.warning(f"LMStudio OpenAI model query response missing 'data' field: {response}")
76
79
  return []
77
80
 
78
81
  configs = []
79
82
  for model in response["data"]:
80
83
  model_type = model.get("type")
81
84
  if not model_type:
82
- warnings.warn(f"LMStudio OpenAI model missing 'type' field: {model}")
85
+ logger.warning(f"LMStudio OpenAI model missing 'type' field: {model}")
83
86
  continue
84
87
  if model_type not in ("embeddings"):
85
88
  continue
@@ -18,7 +18,8 @@ class MistralProvider(Provider):
18
18
 
19
19
  # Some hardcoded support for OpenRouter (so that we only get models with tool calling support)...
20
20
  # See: https://openrouter.ai/docs/requests
21
- response = await mistral_get_model_list_async(self.base_url, api_key=self.api_key)
21
+ api_key = self.get_api_key_secret().get_plaintext()
22
+ response = await mistral_get_model_list_async(self.base_url, api_key=api_key)
22
23
 
23
24
  assert "data" in response, f"Mistral model query response missing 'data' field: {response}"
24
25
 
@@ -25,7 +25,9 @@ class OpenAIProvider(Provider):
25
25
  async def check_api_key(self):
26
26
  from letta.llm_api.openai import openai_check_valid_api_key # TODO: DO NOT USE THIS - old code path
27
27
 
28
- openai_check_valid_api_key(self.base_url, self.api_key)
28
+ # Decrypt API key before using
29
+ api_key = self.get_api_key_secret().get_plaintext()
30
+ openai_check_valid_api_key(self.base_url, api_key)
29
31
 
30
32
  async def _get_models_async(self) -> list[dict]:
31
33
  from letta.llm_api.openai import openai_get_model_list_async
@@ -37,9 +39,12 @@ class OpenAIProvider(Provider):
37
39
  # Similar to Nebius
38
40
  extra_params = {"verbose": True} if "nebius.com" in self.base_url else None
39
41
 
42
+ # Decrypt API key before using
43
+ api_key = self.get_api_key_secret().get_plaintext()
44
+
40
45
  response = await openai_get_model_list_async(
41
46
  self.base_url,
42
- api_key=self.api_key,
47
+ api_key=api_key,
43
48
  extra_params=extra_params,
44
49
  # fix_url=True, # NOTE: make sure together ends with /v1
45
50
  )
@@ -4,6 +4,10 @@ Note: this supports completions (deprecated by openai) and chat completions via
4
4
 
5
5
  from typing import Literal, Optional
6
6
 
7
+ from letta.log import get_logger
8
+
9
+ logger = get_logger(__name__)
10
+
7
11
  from pydantic import Field
8
12
 
9
13
  from letta.constants import MIN_CONTEXT_WINDOW
@@ -26,13 +30,14 @@ class TogetherProvider(OpenAIProvider):
26
30
  async def list_llm_models_async(self) -> list[LLMConfig]:
27
31
  from letta.llm_api.openai import openai_get_model_list_async
28
32
 
29
- models = await openai_get_model_list_async(self.base_url, api_key=self.api_key)
33
+ api_key = self.get_api_key_secret().get_plaintext()
34
+ models = await openai_get_model_list_async(self.base_url, api_key=api_key)
30
35
  return self._list_llm_models(models)
31
36
 
32
37
  async def list_embedding_models_async(self) -> list[EmbeddingConfig]:
33
38
  import warnings
34
39
 
35
- warnings.warn(
40
+ logger.warning(
36
41
  "Letta does not currently support listing embedding models for Together. Please "
37
42
  "contact support or reach out via GitHub or Discord to get support."
38
43
  )
@@ -88,7 +93,8 @@ class TogetherProvider(OpenAIProvider):
88
93
  return configs
89
94
 
90
95
  async def check_api_key(self):
91
- if not self.api_key:
96
+ api_key = self.get_api_key_secret().get_plaintext()
97
+ if not api_key:
92
98
  raise ValueError("No API key provided")
93
99
 
94
100
  try:
@@ -1,6 +1,9 @@
1
- import warnings
2
1
  from typing import Literal
3
2
 
3
+ from letta.log import get_logger
4
+
5
+ logger = get_logger(__name__)
6
+
4
7
  from pydantic import Field
5
8
 
6
9
  from letta.schemas.enums import ProviderCategory, ProviderType
@@ -32,7 +35,8 @@ class XAIProvider(OpenAIProvider):
32
35
  async def list_llm_models_async(self) -> list[LLMConfig]:
33
36
  from letta.llm_api.openai import openai_get_model_list_async
34
37
 
35
- response = await openai_get_model_list_async(self.base_url, api_key=self.api_key)
38
+ api_key = self.get_api_key_secret().get_plaintext()
39
+ response = await openai_get_model_list_async(self.base_url, api_key=api_key)
36
40
 
37
41
  data = response.get("data", response)
38
42
 
@@ -48,7 +52,7 @@ class XAIProvider(OpenAIProvider):
48
52
  context_window_size = self.get_model_context_window_size(model_name)
49
53
 
50
54
  if not context_window_size:
51
- warnings.warn(f"Couldn't find context window size for model {model_name}")
55
+ logger.warning(f"Couldn't find context window size for model {model_name}")
52
56
  continue
53
57
 
54
58
  configs.append(
letta/schemas/run.py CHANGED
@@ -4,14 +4,14 @@ from typing import Optional
4
4
  from pydantic import ConfigDict, Field
5
5
 
6
6
  from letta.helpers.datetime_helpers import get_utc_time
7
- from letta.schemas.enums import RunStatus
7
+ from letta.schemas.enums import PrimitiveType, RunStatus
8
8
  from letta.schemas.job import LettaRequestConfig
9
9
  from letta.schemas.letta_base import LettaBase
10
10
  from letta.schemas.letta_stop_reason import StopReasonType
11
11
 
12
12
 
13
13
  class RunBase(LettaBase):
14
- __id_prefix__ = "run"
14
+ __id_prefix__ = PrimitiveType.RUN.value
15
15
 
16
16
 
17
17
  class Run(RunBase):
@@ -25,6 +25,7 @@ class Run(RunBase):
25
25
  created_at (datetime): The timestamp when the run was created.
26
26
  completed_at (datetime): The timestamp when the run was completed.
27
27
  agent_id (str): The unique identifier of the agent associated with the run.
28
+ base_template_id (str): The base template ID that the run belongs to.
28
29
  stop_reason (StopReasonType): The reason why the run was stopped.
29
30
  background (bool): Whether the run was created in background mode.
30
31
  metadata (dict): Additional metadata for the run.
@@ -41,6 +42,9 @@ class Run(RunBase):
41
42
  # Agent relationship
42
43
  agent_id: str = Field(..., description="The unique identifier of the agent associated with the run.")
43
44
 
45
+ # Template fields
46
+ base_template_id: Optional[str] = Field(None, description="The base template ID that the run belongs to.")
47
+
44
48
  # Run configuration
45
49
  background: Optional[bool] = Field(None, description="Whether the run was created in background mode.")
46
50
  metadata: Optional[dict] = Field(None, validation_alias="metadata_", description="Additional metadata for the run.")
@@ -65,4 +69,5 @@ class RunUpdate(RunBase):
65
69
  completed_at: Optional[datetime] = Field(None, description="The timestamp when the run was completed.")
66
70
  stop_reason: Optional[StopReasonType] = Field(None, description="The reason why the run was stopped.")
67
71
  metadata: Optional[dict] = Field(None, validation_alias="metadata_", description="Additional metadata for the run.")
72
+ total_duration_ns: Optional[int] = Field(None, description="Total run duration in nanoseconds")
68
73
  model_config = ConfigDict(extra="ignore") # Ignores extra fields
@@ -1,4 +1,4 @@
1
- from typing import Optional
1
+ from typing import List, Optional
2
2
 
3
3
  from pydantic import Field
4
4
 
@@ -17,5 +17,6 @@ class RunMetrics(RunMetricsBase):
17
17
  run_start_ns: Optional[int] = Field(None, description="The timestamp of the start of the run in nanoseconds.")
18
18
  run_ns: Optional[int] = Field(None, description="Total time for the run in nanoseconds.")
19
19
  num_steps: Optional[int] = Field(None, description="The number of steps in the run.")
20
+ tools_used: Optional[List[str]] = Field(None, description="List of tool IDs that were used in this run.")
20
21
  template_id: Optional[str] = Field(None, description="The template ID that the run belongs to (cloud only).")
21
22
  base_template_id: Optional[str] = Field(None, description="The base template ID that the run belongs to (cloud only).")
@@ -6,7 +6,7 @@ from pydantic import BaseModel, Field, model_validator
6
6
 
7
7
  from letta.constants import LETTA_TOOL_EXECUTION_DIR
8
8
  from letta.schemas.agent import AgentState
9
- from letta.schemas.enums import SandboxType
9
+ from letta.schemas.enums import PrimitiveType, SandboxType
10
10
  from letta.schemas.letta_base import LettaBase, OrmMetadataBase
11
11
  from letta.schemas.pip_requirement import PipRequirement
12
12
  from letta.services.tool_sandbox.modal_constants import DEFAULT_MODAL_TIMEOUT
@@ -92,7 +92,7 @@ class ModalSandboxConfig(BaseModel):
92
92
 
93
93
 
94
94
  class SandboxConfigBase(OrmMetadataBase):
95
- __id_prefix__ = "sandbox"
95
+ __id_prefix__ = PrimitiveType.SANDBOX_CONFIG.value
96
96
 
97
97
 
98
98
  class SandboxConfig(SandboxConfigBase):
letta/schemas/secret.py CHANGED
@@ -271,11 +271,14 @@ class Secret(BaseModel):
271
271
  def __get_pydantic_json_schema__(cls, core_schema: core_schema.CoreSchema, handler) -> Dict[str, Any]:
272
272
  """
273
273
  Define JSON schema representation for Secret fields.
274
+
274
275
  In JSON schema (OpenAPI docs), Secret fields appear as nullable strings.
275
276
  The actual encryption/decryption happens at runtime via __get_pydantic_core_schema__.
277
+
276
278
  Args:
277
279
  core_schema: The core schema for this type
278
280
  handler: Handler for generating JSON schema
281
+
279
282
  Returns:
280
283
  A JSON schema dict representing this type as a nullable string
281
284
  """
@@ -285,161 +288,3 @@ class Secret(BaseModel):
285
288
  "nullable": True,
286
289
  "description": "Encrypted secret value (stored as encrypted string)",
287
290
  }
288
-
289
-
290
- class SecretDict(BaseModel):
291
- """
292
- A wrapper for dictionaries containing sensitive key-value pairs.
293
-
294
- Used for custom headers and other key-value configurations.
295
-
296
- TODO: Once we deprecate plaintext columns in the database:
297
- - Remove the dual-write logic in to_dict()
298
- - Remove the from_db() method's plaintext_value parameter
299
- - Remove the _was_encrypted flag (no longer needed for migration)
300
- - Simplify get_plaintext() to only handle encrypted JSON values
301
- """
302
-
303
- _encrypted_value: Optional[str] = PrivateAttr(default=None)
304
- _plaintext_cache: Optional[Dict[str, str]] = PrivateAttr(default=None)
305
- _was_encrypted: bool = PrivateAttr(default=False)
306
-
307
- model_config = ConfigDict(frozen=True)
308
-
309
- @classmethod
310
- def from_plaintext(cls, value: Optional[Dict[str, str]]) -> "SecretDict":
311
- """Create a SecretDict from a plaintext dictionary."""
312
- if value is None:
313
- instance = cls()
314
- instance._encrypted_value = None
315
- instance._was_encrypted = False
316
- return instance
317
-
318
- # Serialize to JSON then try to encrypt
319
- json_str = json.dumps(value)
320
- try:
321
- encrypted = CryptoUtils.encrypt(json_str)
322
- instance = cls()
323
- instance._encrypted_value = encrypted
324
- instance._was_encrypted = False
325
- return instance
326
- except ValueError as e:
327
- # No encryption key available, store as plaintext JSON
328
- if "No encryption key configured" in str(e):
329
- logger.warning(
330
- "No encryption key configured. Storing SecretDict value as plaintext JSON. "
331
- "Set LETTA_ENCRYPTION_KEY environment variable to enable encryption."
332
- )
333
- instance = cls()
334
- instance._encrypted_value = json_str # Store JSON string
335
- instance._plaintext_cache = value # Cache the dict
336
- instance._was_encrypted = False
337
- return instance
338
- raise # Re-raise if it's a different error
339
-
340
- @classmethod
341
- def from_encrypted(cls, encrypted_value: Optional[str]) -> "SecretDict":
342
- """Create a SecretDict from an encrypted value."""
343
- instance = cls()
344
- instance._encrypted_value = encrypted_value
345
- instance._was_encrypted = True
346
- return instance
347
-
348
- @classmethod
349
- def from_db(cls, encrypted_value: Optional[str], plaintext_value: Optional[Dict[str, str]]) -> "SecretDict":
350
- """Create a SecretDict from database values during migration phase."""
351
- if encrypted_value is not None:
352
- return cls.from_encrypted(encrypted_value)
353
- elif plaintext_value is not None:
354
- return cls.from_plaintext(plaintext_value)
355
- else:
356
- return cls.from_plaintext(None)
357
-
358
- def get_encrypted(self) -> Optional[str]:
359
- """Get the encrypted value."""
360
- return self._encrypted_value
361
-
362
- def get_plaintext(self) -> Optional[Dict[str, str]]:
363
- """Get the decrypted dictionary."""
364
- if self._encrypted_value is None:
365
- return None
366
-
367
- # Use cached value if available, but only if it looks like plaintext
368
- # or we're confident we can decrypt it
369
- if self._plaintext_cache is not None:
370
- # If we have a cache but the stored value looks encrypted and we have no key,
371
- # we should not use the cache
372
- if CryptoUtils.is_encrypted(self._encrypted_value) and not CryptoUtils.is_encryption_available():
373
- self._plaintext_cache = None # Clear invalid cache
374
- else:
375
- return self._plaintext_cache
376
-
377
- try:
378
- decrypted_json = CryptoUtils.decrypt(self._encrypted_value)
379
- plaintext_dict = json.loads(decrypted_json)
380
- # Cache the decrypted value (PrivateAttr fields can be mutated even with frozen=True)
381
- self._plaintext_cache = plaintext_dict
382
- return plaintext_dict
383
- except ValueError as e:
384
- error_msg = str(e)
385
-
386
- # Handle missing encryption key
387
- if "No encryption key configured" in error_msg:
388
- # Check if the value looks encrypted
389
- if CryptoUtils.is_encrypted(self._encrypted_value):
390
- # Value was encrypted, but now we have no key - can't decrypt
391
- logger.warning(
392
- "Cannot decrypt SecretDict value - no encryption key configured. "
393
- "The value was encrypted and requires the original key to decrypt."
394
- )
395
- # Return None to indicate we can't get the plaintext
396
- return None
397
- else:
398
- # Value is plaintext JSON (stored when no key was available)
399
- logger.debug("SecretDict value is plaintext JSON (stored without encryption)")
400
- try:
401
- plaintext_dict = json.loads(self._encrypted_value)
402
- self._plaintext_cache = plaintext_dict
403
- return plaintext_dict
404
- except json.JSONDecodeError:
405
- logger.error("Failed to parse SecretDict plaintext as JSON")
406
- return None
407
-
408
- # Handle decryption failure (might be plaintext JSON)
409
- elif "Failed to decrypt data" in error_msg:
410
- # Check if it might be plaintext JSON
411
- if not CryptoUtils.is_encrypted(self._encrypted_value):
412
- # It's plaintext JSON that was stored when no key was available
413
- logger.debug("SecretDict value appears to be plaintext JSON (stored without encryption)")
414
- try:
415
- plaintext_dict = json.loads(self._encrypted_value)
416
- self._plaintext_cache = plaintext_dict
417
- return plaintext_dict
418
- except json.JSONDecodeError:
419
- logger.error("Failed to parse SecretDict plaintext as JSON")
420
- return None
421
- # Otherwise, it's corrupted or wrong key
422
- logger.error("Failed to decrypt SecretDict value - data may be corrupted or wrong key")
423
- raise
424
-
425
- # Migration case: handle legacy plaintext
426
- elif not self._was_encrypted:
427
- if self._encrypted_value:
428
- try:
429
- plaintext_dict = json.loads(self._encrypted_value)
430
- self._plaintext_cache = plaintext_dict
431
- return plaintext_dict
432
- except json.JSONDecodeError:
433
- pass
434
- return None
435
-
436
- # Re-raise for other errors
437
- raise
438
-
439
- def is_empty(self) -> bool:
440
- """Check if the secret dict is empty/None."""
441
- return self._encrypted_value is None
442
-
443
- def to_dict(self) -> Dict[str, Any]:
444
- """Convert to dictionary for database storage."""
445
- return {"encrypted": self.get_encrypted(), "plaintext": self.get_plaintext() if not self._was_encrypted else None}
letta/schemas/source.py CHANGED
@@ -5,7 +5,7 @@ from pydantic import Field
5
5
 
6
6
  from letta.helpers.tpuf_client import should_use_tpuf
7
7
  from letta.schemas.embedding_config import EmbeddingConfig
8
- from letta.schemas.enums import VectorDBProvider
8
+ from letta.schemas.enums import PrimitiveType, VectorDBProvider
9
9
  from letta.schemas.letta_base import LettaBase
10
10
 
11
11
 
@@ -14,7 +14,7 @@ class BaseSource(LettaBase):
14
14
  Shared attributes across all source schemas.
15
15
  """
16
16
 
17
- __id_prefix__ = "source"
17
+ __id_prefix__ = PrimitiveType.SOURCE.value
18
18
 
19
19
  # Core source fields
20
20
  name: str = Field(..., description="The name of the source.")
letta/schemas/step.py CHANGED
@@ -3,14 +3,14 @@ from typing import Dict, List, Literal, Optional
3
3
 
4
4
  from pydantic import Field
5
5
 
6
- from letta.schemas.enums import StepStatus
6
+ from letta.schemas.enums import PrimitiveType, StepStatus
7
7
  from letta.schemas.letta_base import LettaBase
8
8
  from letta.schemas.letta_stop_reason import StopReasonType
9
9
  from letta.schemas.message import Message
10
10
 
11
11
 
12
12
  class StepBase(LettaBase):
13
- __id_prefix__ = "step"
13
+ __id_prefix__ = PrimitiveType.STEP.value
14
14
 
15
15
 
16
16
  class Step(StepBase):
letta/schemas/tool.py CHANGED
@@ -11,6 +11,7 @@ from letta.constants import (
11
11
  LETTA_VOICE_TOOL_MODULE_NAME,
12
12
  MCP_TOOL_TAG_NAME_PREFIX,
13
13
  )
14
+ from letta.schemas.enums import PrimitiveType
14
15
 
15
16
  # MCP Tool metadata constants for schema health status
16
17
  MCP_TOOL_METADATA_SCHEMA_STATUS = f"{MCP_TOOL_TAG_NAME_PREFIX}:SCHEMA_STATUS"
@@ -28,7 +29,7 @@ logger = get_logger(__name__)
28
29
 
29
30
 
30
31
  class BaseTool(LettaBase):
31
- __id_prefix__ = "tool"
32
+ __id_prefix__ = PrimitiveType.TOOL.value
32
33
 
33
34
 
34
35
  class Tool(BaseTool):
@@ -63,6 +64,9 @@ class Tool(BaseTool):
63
64
  default_requires_approval: Optional[bool] = Field(
64
65
  None, description="Default value for whether or not executing this tool requires approval."
65
66
  )
67
+ enable_parallel_execution: Optional[bool] = Field(
68
+ False, description="If set to True, then this tool will potentially be executed concurrently with other tools. Default False."
69
+ )
66
70
 
67
71
  # metadata fields
68
72
  created_by_id: Optional[str] = Field(None, description="The id of the user that made this Tool.")
@@ -118,6 +122,9 @@ class ToolCreate(LettaBase):
118
122
  pip_requirements: list[PipRequirement] | None = Field(None, description="Optional list of pip packages required by this tool.")
119
123
  npm_requirements: list[NpmRequirement] | None = Field(None, description="Optional list of npm packages required by this tool.")
120
124
  default_requires_approval: Optional[bool] = Field(None, description="Whether or not to require approval before executing this tool.")
125
+ enable_parallel_execution: Optional[bool] = Field(
126
+ False, description="If set to True, then this tool will potentially be executed concurrently with other tools. Default False."
127
+ )
121
128
 
122
129
  @classmethod
123
130
  def from_mcp(cls, mcp_server_name: str, mcp_tool: MCPTool) -> "ToolCreate":
@@ -145,6 +152,19 @@ class ToolCreate(LettaBase):
145
152
  json_schema=json_schema,
146
153
  )
147
154
 
155
+ def model_dump(self, to_orm: bool = False, **kwargs):
156
+ """
157
+ Override LettaBase.model_dump to explicitly handle 'tags' being None,
158
+ ensuring that the output includes 'tags' as None (or any current value).
159
+ """
160
+ data = super().model_dump(**kwargs)
161
+ # TODO: consider making tags itself optional in the ORM
162
+ # Ensure 'tags' is included even when None, but only if tags is in the dict
163
+ # (i.e., don't add tags if exclude_unset=True was used and tags wasn't set)
164
+ if "tags" in data and data["tags"] is None:
165
+ data["tags"] = []
166
+ return data
167
+
148
168
 
149
169
  class ToolUpdate(LettaBase):
150
170
  description: Optional[str] = Field(None, description="The description of the tool.")
@@ -160,6 +180,9 @@ class ToolUpdate(LettaBase):
160
180
  npm_requirements: list[NpmRequirement] | None = Field(None, description="Optional list of npm packages required by this tool.")
161
181
  metadata_: Optional[Dict[str, Any]] = Field(None, description="A dictionary of additional metadata for the tool.")
162
182
  default_requires_approval: Optional[bool] = Field(None, description="Whether or not to require approval before executing this tool.")
183
+ enable_parallel_execution: Optional[bool] = Field(
184
+ False, description="If set to True, then this tool will potentially be executed concurrently with other tools. Default False."
185
+ )
163
186
  # name: Optional[str] = Field(None, description="The name of the tool (must match the JSON schema name and source code function name).")
164
187
 
165
188
  model_config = ConfigDict(extra="ignore") # Allows extra fields without validation errors
letta/schemas/usage.py CHANGED
@@ -22,5 +22,4 @@ class LettaUsageStatistics(BaseModel):
22
22
  total_tokens: int = Field(0, description="The total number of tokens processed by the agent.")
23
23
  step_count: int = Field(0, description="The number of steps taken by the agent.")
24
24
  # TODO: Optional for now. This field makes everyone's lives easier
25
- steps_messages: Optional[List[List[Message]]] = Field(None, description="The messages generated per step")
26
25
  run_ids: Optional[List[str]] = Field(None, description="The background task run IDs associated with the agent interaction")