letta-nightly 0.8.5.dev20250625104328__py3-none-any.whl → 0.8.6.dev20250625222533__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. letta/agent.py +16 -12
  2. letta/agents/base_agent.py +4 -1
  3. letta/agents/helpers.py +35 -3
  4. letta/agents/letta_agent.py +132 -106
  5. letta/agents/letta_agent_batch.py +4 -3
  6. letta/agents/voice_agent.py +12 -2
  7. letta/agents/voice_sleeptime_agent.py +12 -2
  8. letta/constants.py +24 -3
  9. letta/data_sources/redis_client.py +6 -0
  10. letta/errors.py +5 -0
  11. letta/functions/function_sets/files.py +10 -3
  12. letta/functions/function_sets/multi_agent.py +0 -32
  13. letta/groups/sleeptime_multi_agent_v2.py +6 -0
  14. letta/helpers/converters.py +4 -1
  15. letta/helpers/datetime_helpers.py +16 -23
  16. letta/helpers/message_helper.py +5 -2
  17. letta/helpers/tool_rule_solver.py +29 -2
  18. letta/interfaces/openai_streaming_interface.py +9 -2
  19. letta/llm_api/anthropic.py +11 -1
  20. letta/llm_api/anthropic_client.py +14 -3
  21. letta/llm_api/aws_bedrock.py +29 -15
  22. letta/llm_api/bedrock_client.py +74 -0
  23. letta/llm_api/google_ai_client.py +7 -3
  24. letta/llm_api/google_vertex_client.py +18 -4
  25. letta/llm_api/llm_client.py +7 -0
  26. letta/llm_api/openai_client.py +13 -0
  27. letta/orm/agent.py +5 -0
  28. letta/orm/block_history.py +1 -1
  29. letta/orm/enums.py +6 -25
  30. letta/orm/job.py +1 -2
  31. letta/orm/llm_batch_items.py +1 -1
  32. letta/orm/mcp_server.py +1 -1
  33. letta/orm/passage.py +7 -1
  34. letta/orm/sqlalchemy_base.py +7 -5
  35. letta/orm/tool.py +2 -1
  36. letta/schemas/agent.py +34 -10
  37. letta/schemas/enums.py +42 -1
  38. letta/schemas/job.py +6 -3
  39. letta/schemas/letta_request.py +4 -0
  40. letta/schemas/llm_batch_job.py +7 -2
  41. letta/schemas/memory.py +2 -2
  42. letta/schemas/providers.py +32 -6
  43. letta/schemas/run.py +1 -1
  44. letta/schemas/tool_rule.py +40 -12
  45. letta/serialize_schemas/pydantic_agent_schema.py +9 -2
  46. letta/server/rest_api/app.py +3 -2
  47. letta/server/rest_api/routers/v1/agents.py +25 -22
  48. letta/server/rest_api/routers/v1/runs.py +2 -3
  49. letta/server/rest_api/routers/v1/sources.py +31 -0
  50. letta/server/rest_api/routers/v1/voice.py +1 -0
  51. letta/server/rest_api/utils.py +38 -13
  52. letta/server/server.py +52 -21
  53. letta/services/agent_manager.py +58 -7
  54. letta/services/block_manager.py +1 -1
  55. letta/services/file_processor/chunker/line_chunker.py +2 -1
  56. letta/services/file_processor/file_processor.py +2 -9
  57. letta/services/files_agents_manager.py +177 -37
  58. letta/services/helpers/agent_manager_helper.py +77 -48
  59. letta/services/helpers/tool_parser_helper.py +2 -1
  60. letta/services/job_manager.py +33 -2
  61. letta/services/llm_batch_manager.py +1 -1
  62. letta/services/provider_manager.py +6 -4
  63. letta/services/tool_executor/core_tool_executor.py +1 -1
  64. letta/services/tool_executor/files_tool_executor.py +99 -30
  65. letta/services/tool_executor/multi_agent_tool_executor.py +1 -17
  66. letta/services/tool_executor/tool_execution_manager.py +6 -0
  67. letta/services/tool_executor/tool_executor_base.py +3 -0
  68. letta/services/tool_sandbox/base.py +39 -1
  69. letta/services/tool_sandbox/e2b_sandbox.py +7 -0
  70. letta/services/user_manager.py +3 -2
  71. letta/settings.py +8 -14
  72. letta/system.py +17 -17
  73. letta/templates/sandbox_code_file_async.py.j2 +59 -0
  74. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250625222533.dist-info}/METADATA +3 -2
  75. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250625222533.dist-info}/RECORD +78 -76
  76. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250625222533.dist-info}/LICENSE +0 -0
  77. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250625222533.dist-info}/WHEEL +0 -0
  78. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250625222533.dist-info}/entry_points.txt +0 -0
@@ -72,11 +72,16 @@ MODEL_LIST = [
72
72
  "name": "claude-3-opus-20240229",
73
73
  "context_window": 200000,
74
74
  },
75
- # latest
75
+ # 3 latest
76
76
  {
77
77
  "name": "claude-3-opus-latest",
78
78
  "context_window": 200000,
79
79
  },
80
+ # 4
81
+ {
82
+ "name": "claude-opus-4-20250514",
83
+ "context_window": 200000,
84
+ },
80
85
  ## Sonnet
81
86
  # 3.0
82
87
  {
@@ -108,6 +113,11 @@ MODEL_LIST = [
108
113
  "name": "claude-3-7-sonnet-latest",
109
114
  "context_window": 200000,
110
115
  },
116
+ # 4
117
+ {
118
+ "name": "claude-sonnet-4-20250514",
119
+ "context_window": 200000,
120
+ },
111
121
  ## Haiku
112
122
  # 3.0
113
123
  {
@@ -21,9 +21,11 @@ from letta.errors import (
21
21
  LLMPermissionDeniedError,
22
22
  LLMRateLimitError,
23
23
  LLMServerError,
24
+ LLMTimeoutError,
24
25
  LLMUnprocessableEntityError,
25
26
  )
26
27
  from letta.helpers.datetime_helpers import get_utc_time_int
28
+ from letta.helpers.decorators import deprecated
27
29
  from letta.llm_api.helpers import add_inner_thoughts_to_functions, unpack_all_inner_thoughts_from_kwargs
28
30
  from letta.llm_api.llm_client_base import LLMClientBase
29
31
  from letta.local_llm.constants import INNER_THOUGHTS_KWARG, INNER_THOUGHTS_KWARG_DESCRIPTION
@@ -47,22 +49,23 @@ logger = get_logger(__name__)
47
49
  class AnthropicClient(LLMClientBase):
48
50
 
49
51
  @trace_method
52
+ @deprecated("Synchronous version of this is no longer valid. Will result in model_dump of coroutine")
50
53
  def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
51
54
  client = self._get_anthropic_client(llm_config, async_client=False)
52
- response = client.beta.messages.create(**request_data, betas=["tools-2024-04-04"])
55
+ response = client.beta.messages.create(**request_data)
53
56
  return response.model_dump()
54
57
 
55
58
  @trace_method
56
59
  async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict:
57
60
  client = await self._get_anthropic_client_async(llm_config, async_client=True)
58
- response = await client.beta.messages.create(**request_data, betas=["tools-2024-04-04"])
61
+ response = await client.beta.messages.create(**request_data)
59
62
  return response.model_dump()
60
63
 
61
64
  @trace_method
62
65
  async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncStream[BetaRawMessageStreamEvent]:
63
66
  client = await self._get_anthropic_client_async(llm_config, async_client=True)
64
67
  request_data["stream"] = True
65
- return await client.beta.messages.create(**request_data, betas=["tools-2024-04-04"])
68
+ return await client.beta.messages.create(**request_data)
66
69
 
67
70
  @trace_method
68
71
  async def send_llm_batch_request_async(
@@ -299,6 +302,14 @@ class AnthropicClient(LLMClientBase):
299
302
 
300
303
  @trace_method
301
304
  def handle_llm_error(self, e: Exception) -> Exception:
305
+ if isinstance(e, anthropic.APITimeoutError):
306
+ logger.warning(f"[Anthropic] Request timeout: {e}")
307
+ return LLMTimeoutError(
308
+ message=f"Request to Anthropic timed out: {str(e)}",
309
+ code=ErrorCode.TIMEOUT,
310
+ details={"cause": str(e.__cause__) if e.__cause__ else None},
311
+ )
312
+
302
313
  if isinstance(e, anthropic.APIConnectionError):
303
314
  logger.warning(f"[Anthropic] API connection error: {e.__cause__}")
304
315
  return LLMConnectionError(
@@ -3,38 +3,32 @@ from typing import Any, Dict, List, Optional
3
3
 
4
4
  from anthropic import AnthropicBedrock
5
5
 
6
- from letta.log import get_logger
7
6
  from letta.settings import model_settings
8
7
 
9
- logger = get_logger(__name__)
10
-
11
8
 
12
9
  def has_valid_aws_credentials() -> bool:
13
10
  """
14
11
  Check if AWS credentials are properly configured.
15
12
  """
16
- valid_aws_credentials = (
17
- os.getenv("AWS_ACCESS_KEY") is not None and os.getenv("AWS_SECRET_ACCESS_KEY") is not None and os.getenv("AWS_REGION") is not None
18
- )
13
+ valid_aws_credentials = os.getenv("AWS_ACCESS_KEY_ID") and os.getenv("AWS_SECRET_ACCESS_KEY") and os.getenv("AWS_DEFAULT_REGION")
19
14
  return valid_aws_credentials
20
15
 
21
16
 
22
17
  def get_bedrock_client(
23
- access_key: Optional[str] = None,
18
+ access_key_id: Optional[str] = None,
24
19
  secret_key: Optional[str] = None,
25
- region: Optional[str] = None,
20
+ default_region: Optional[str] = None,
26
21
  ):
27
22
  """
28
23
  Get a Bedrock client
29
24
  """
30
25
  import boto3
31
26
 
32
- logger.debug(f"Getting Bedrock client for {model_settings.aws_region}")
33
27
  sts_client = boto3.client(
34
28
  "sts",
35
- aws_access_key_id=access_key or model_settings.aws_access_key,
29
+ aws_access_key_id=access_key_id or model_settings.aws_access_key_id,
36
30
  aws_secret_access_key=secret_key or model_settings.aws_secret_access_key,
37
- region_name=region or model_settings.aws_region,
31
+ region_name=default_region or model_settings.aws_default_region,
38
32
  )
39
33
  credentials = sts_client.get_session_token()["Credentials"]
40
34
 
@@ -42,7 +36,7 @@ def get_bedrock_client(
42
36
  aws_access_key=credentials["AccessKeyId"],
43
37
  aws_secret_key=credentials["SecretAccessKey"],
44
38
  aws_session_token=credentials["SessionToken"],
45
- aws_region=region or model_settings.aws_region,
39
+ aws_region=default_region or model_settings.aws_default_region,
46
40
  )
47
41
  return bedrock
48
42
 
@@ -61,13 +55,34 @@ def bedrock_get_model_list(region_name: str) -> List[dict]:
61
55
  """
62
56
  import boto3
63
57
 
64
- logger.debug(f"Getting model list for {region_name}")
65
58
  try:
66
59
  bedrock = boto3.client("bedrock", region_name=region_name)
67
60
  response = bedrock.list_inference_profiles()
68
61
  return response["inferenceProfileSummaries"]
69
62
  except Exception as e:
70
- logger.exception(f"Error getting model list: {str(e)}", e)
63
+ print(f"Error getting model list: {str(e)}")
64
+ raise e
65
+
66
+
67
+ async def bedrock_get_model_list_async(
68
+ access_key_id: Optional[str] = None,
69
+ secret_access_key: Optional[str] = None,
70
+ default_region: Optional[str] = None,
71
+ ) -> List[dict]:
72
+ from aioboto3.session import Session
73
+
74
+ try:
75
+ session = Session()
76
+ async with session.client(
77
+ "bedrock",
78
+ aws_access_key_id=access_key_id,
79
+ aws_secret_access_key=secret_access_key,
80
+ region_name=default_region,
81
+ ) as bedrock:
82
+ response = await bedrock.list_inference_profiles()
83
+ return response["inferenceProfileSummaries"]
84
+ except Exception as e:
85
+ print(f"Error getting model list: {str(e)}")
71
86
  raise e
72
87
 
73
88
 
@@ -78,7 +93,6 @@ def bedrock_get_model_details(region_name: str, model_id: str) -> Dict[str, Any]
78
93
  import boto3
79
94
  from botocore.exceptions import ClientError
80
95
 
81
- logger.debug(f"Getting model details for {model_id}")
82
96
  try:
83
97
  bedrock = boto3.client("bedrock", region_name=region_name)
84
98
  response = bedrock.get_foundation_model(modelIdentifier=model_id)
@@ -0,0 +1,74 @@
1
+ from typing import List, Optional, Union
2
+
3
+ import anthropic
4
+ from aioboto3.session import Session
5
+
6
+ from letta.llm_api.anthropic_client import AnthropicClient
7
+ from letta.log import get_logger
8
+ from letta.otel.tracing import trace_method
9
+ from letta.schemas.enums import ProviderCategory
10
+ from letta.schemas.llm_config import LLMConfig
11
+ from letta.schemas.message import Message as PydanticMessage
12
+ from letta.services.provider_manager import ProviderManager
13
+ from letta.settings import model_settings
14
+
15
+ logger = get_logger(__name__)
16
+
17
+
18
+ class BedrockClient(AnthropicClient):
19
+
20
+ @trace_method
21
+ async def _get_anthropic_client_async(
22
+ self, llm_config: LLMConfig, async_client: bool = False
23
+ ) -> Union[anthropic.AsyncAnthropic, anthropic.Anthropic, anthropic.AsyncAnthropicBedrock, anthropic.AnthropicBedrock]:
24
+ override_access_key_id, override_secret_access_key, override_default_region = None, None, None
25
+ if llm_config.provider_category == ProviderCategory.byok:
26
+ (
27
+ override_access_key_id,
28
+ override_secret_access_key,
29
+ override_default_region,
30
+ ) = await ProviderManager().get_bedrock_credentials_async(
31
+ llm_config.provider_name,
32
+ actor=self.actor,
33
+ )
34
+
35
+ session = Session()
36
+ async with session.client(
37
+ "sts",
38
+ aws_access_key_id=override_access_key_id or model_settings.aws_access_key_id,
39
+ aws_secret_access_key=override_secret_access_key or model_settings.aws_secret_access_key,
40
+ region_name=override_default_region or model_settings.aws_default_region,
41
+ ) as sts_client:
42
+ session_token = await sts_client.get_session_token()
43
+ credentials = session_token["Credentials"]
44
+
45
+ if async_client:
46
+ return anthropic.AsyncAnthropicBedrock(
47
+ aws_access_key=credentials["AccessKeyId"],
48
+ aws_secret_key=credentials["SecretAccessKey"],
49
+ aws_session_token=credentials["SessionToken"],
50
+ aws_region=override_default_region or model_settings.aws_default_region,
51
+ max_retries=model_settings.anthropic_max_retries,
52
+ )
53
+ else:
54
+ return anthropic.AnthropicBedrock(
55
+ aws_access_key=credentials["AccessKeyId"],
56
+ aws_secret_key=credentials["SecretAccessKey"],
57
+ aws_session_token=credentials["SessionToken"],
58
+ aws_region=override_default_region or model_settings.aws_default_region,
59
+ max_retries=model_settings.anthropic_max_retries,
60
+ )
61
+
62
+ @trace_method
63
+ def build_request_data(
64
+ self,
65
+ messages: List[PydanticMessage],
66
+ llm_config: LLMConfig,
67
+ tools: Optional[List[dict]] = None,
68
+ force_tool_call: Optional[str] = None,
69
+ ) -> dict:
70
+ data = super().build_request_data(messages, llm_config, tools, force_tool_call)
71
+ # remove disallowed fields
72
+ if "tool_choice" in data:
73
+ del data["tool_choice"]["disable_parallel_tool_use"]
74
+ return data
@@ -2,20 +2,24 @@ from typing import List, Optional, Tuple
2
2
 
3
3
  import httpx
4
4
  from google import genai
5
+ from google.genai.types import HttpOptions
5
6
 
6
7
  from letta.errors import ErrorCode, LLMAuthenticationError, LLMError
7
8
  from letta.llm_api.google_constants import GOOGLE_MODEL_FOR_API_KEY_CHECK
8
9
  from letta.llm_api.google_vertex_client import GoogleVertexClient
9
10
  from letta.log import get_logger
10
- from letta.settings import model_settings
11
+ from letta.settings import model_settings, settings
11
12
 
12
13
  logger = get_logger(__name__)
13
14
 
14
15
 
15
16
  class GoogleAIClient(GoogleVertexClient):
16
-
17
17
  def _get_client(self):
18
- return genai.Client(api_key=model_settings.gemini_api_key)
18
+ timeout_ms = int(settings.llm_request_timeout_seconds * 1000)
19
+ return genai.Client(
20
+ api_key=model_settings.gemini_api_key,
21
+ http_options=HttpOptions(timeout=timeout_ms),
22
+ )
19
23
 
20
24
 
21
25
  def get_gemini_endpoint_and_headers(
@@ -3,7 +3,14 @@ import uuid
3
3
  from typing import List, Optional
4
4
 
5
5
  from google import genai
6
- from google.genai.types import FunctionCallingConfig, FunctionCallingConfigMode, GenerateContentResponse, ThinkingConfig, ToolConfig
6
+ from google.genai.types import (
7
+ FunctionCallingConfig,
8
+ FunctionCallingConfigMode,
9
+ GenerateContentResponse,
10
+ HttpOptions,
11
+ ThinkingConfig,
12
+ ToolConfig,
13
+ )
7
14
 
8
15
  from letta.constants import NON_USER_MSG_PREFIX
9
16
  from letta.helpers.datetime_helpers import get_utc_time_int
@@ -26,11 +33,12 @@ logger = get_logger(__name__)
26
33
  class GoogleVertexClient(LLMClientBase):
27
34
 
28
35
  def _get_client(self):
36
+ timeout_ms = int(settings.llm_request_timeout_seconds * 1000)
29
37
  return genai.Client(
30
38
  vertexai=True,
31
39
  project=model_settings.google_cloud_project,
32
40
  location=model_settings.google_cloud_location,
33
- http_options={"api_version": "v1"},
41
+ http_options=HttpOptions(api_version="v1", timeout=timeout_ms),
34
42
  )
35
43
 
36
44
  @trace_method
@@ -59,7 +67,8 @@ class GoogleVertexClient(LLMClientBase):
59
67
  )
60
68
  return response.model_dump()
61
69
 
62
- def add_dummy_model_messages(self, messages: List[dict]) -> List[dict]:
70
+ @staticmethod
71
+ def add_dummy_model_messages(messages: List[dict]) -> List[dict]:
63
72
  """Google AI API requires all function call returns are immediately followed by a 'model' role message.
64
73
 
65
74
  In Letta, the 'model' will often call a function (e.g. send_message) that itself yields to the user,
@@ -90,7 +99,7 @@ class GoogleVertexClient(LLMClientBase):
90
99
  # Per https://ai.google.dev/gemini-api/docs/function-calling?example=meeting#notes_and_limitations
91
100
  # * Only a subset of the OpenAPI schema is supported.
92
101
  # * Supported parameter types in Python are limited.
93
- unsupported_keys = ["default", "exclusiveMaximum", "exclusiveMinimum", "additionalProperties"]
102
+ unsupported_keys = ["default", "exclusiveMaximum", "exclusiveMinimum", "additionalProperties", "$schema"]
94
103
  keys_to_remove_at_this_level = [key for key in unsupported_keys if key in schema_part]
95
104
  for key_to_remove in keys_to_remove_at_this_level:
96
105
  logger.warning(f"Removing unsupported keyword '{key_to_remove}' from schema part.")
@@ -484,3 +493,8 @@ class GoogleVertexClient(LLMClientBase):
484
493
  "propertyOrdering": ["name", "args"],
485
494
  "required": ["name", "args"],
486
495
  }
496
+
497
+ @trace_method
498
+ def handle_llm_error(self, e: Exception) -> Exception:
499
+ # Fallback to base implementation
500
+ return super().handle_llm_error(e)
@@ -51,6 +51,13 @@ class LLMClient:
51
51
  put_inner_thoughts_first=put_inner_thoughts_first,
52
52
  actor=actor,
53
53
  )
54
+ case ProviderType.bedrock:
55
+ from letta.llm_api.bedrock_client import BedrockClient
56
+
57
+ return BedrockClient(
58
+ put_inner_thoughts_first=put_inner_thoughts_first,
59
+ actor=actor,
60
+ )
54
61
  case ProviderType.openai | ProviderType.together:
55
62
  from letta.llm_api.openai_client import OpenAIClient
56
63
 
@@ -17,6 +17,7 @@ from letta.errors import (
17
17
  LLMPermissionDeniedError,
18
18
  LLMRateLimitError,
19
19
  LLMServerError,
20
+ LLMTimeoutError,
20
21
  LLMUnprocessableEntityError,
21
22
  )
22
23
  from letta.llm_api.helpers import add_inner_thoughts_to_functions, convert_to_structured_output, unpack_all_inner_thoughts_from_kwargs
@@ -317,6 +318,18 @@ class OpenAIClient(LLMClientBase):
317
318
  """
318
319
  Maps OpenAI-specific errors to common LLMError types.
319
320
  """
321
+ if isinstance(e, openai.APITimeoutError):
322
+ timeout_duration = getattr(e, "timeout", "unknown")
323
+ logger.warning(f"[OpenAI] Request timeout after {timeout_duration} seconds: {e}")
324
+ return LLMTimeoutError(
325
+ message=f"Request to OpenAI timed out: {str(e)}",
326
+ code=ErrorCode.TIMEOUT,
327
+ details={
328
+ "timeout_duration": timeout_duration,
329
+ "cause": str(e.__cause__) if e.__cause__ else None,
330
+ },
331
+ )
332
+
320
333
  if isinstance(e, openai.APIConnectionError):
321
334
  logger.warning(f"[OpenAI] API connection error: {e}")
322
335
  return LLMConnectionError(
letta/orm/agent.py CHANGED
@@ -89,6 +89,9 @@ class Agent(SqlalchemyBase, OrganizationMixin, AsyncAttrs):
89
89
  Integer, nullable=True, doc="The duration in milliseconds of the agent's last run."
90
90
  )
91
91
 
92
+ # timezone
93
+ timezone: Mapped[Optional[str]] = mapped_column(String, nullable=True, doc="The timezone of the agent (for the context window).")
94
+
92
95
  # relationships
93
96
  organization: Mapped["Organization"] = relationship("Organization", back_populates="agents")
94
97
  tool_exec_environment_variables: Mapped[List["AgentEnvironmentVariable"]] = relationship(
@@ -187,6 +190,7 @@ class Agent(SqlalchemyBase, OrganizationMixin, AsyncAttrs):
187
190
  "response_format": self.response_format,
188
191
  "last_run_completion": self.last_run_completion,
189
192
  "last_run_duration_ms": self.last_run_duration_ms,
193
+ "timezone": self.timezone,
190
194
  # optional field defaults
191
195
  "tags": [],
192
196
  "tools": [],
@@ -261,6 +265,7 @@ class Agent(SqlalchemyBase, OrganizationMixin, AsyncAttrs):
261
265
  "last_updated_by_id": self.last_updated_by_id,
262
266
  "created_at": self.created_at,
263
267
  "updated_at": self.updated_at,
268
+ "timezone": self.timezone,
264
269
  "enable_sleeptime": self.enable_sleeptime,
265
270
  "response_format": self.response_format,
266
271
  "last_run_completion": self.last_run_completion,
@@ -4,9 +4,9 @@ from typing import Optional
4
4
  from sqlalchemy import JSON, BigInteger, ForeignKey, Index, Integer, String, Text
5
5
  from sqlalchemy.orm import Mapped, mapped_column
6
6
 
7
- from letta.orm.enums import ActorType
8
7
  from letta.orm.mixins import OrganizationMixin
9
8
  from letta.orm.sqlalchemy_base import SqlalchemyBase
9
+ from letta.schemas.enums import ActorType
10
10
 
11
11
 
12
12
  class BlockHistory(OrganizationMixin, SqlalchemyBase):
letta/orm/enums.py CHANGED
@@ -1,3 +1,9 @@
1
+ """Compatibility module for enums that were moved to address circular imports.
2
+
3
+ This module maintains the old enum definitions for backwards compatibility,
4
+ especially for pickled objects that reference the old import paths.
5
+ """
6
+
1
7
  from enum import Enum
2
8
 
3
9
 
@@ -14,28 +20,3 @@ class ToolType(str, Enum):
14
20
  EXTERNAL_LANGCHAIN = "external_langchain"
15
21
  # TODO is "external" the right name here? Since as of now, MCP is local / doesn't support remote?
16
22
  EXTERNAL_MCP = "external_mcp"
17
-
18
-
19
- class JobType(str, Enum):
20
- JOB = "job"
21
- RUN = "run"
22
- BATCH = "batch"
23
-
24
-
25
- class ToolSourceType(str, Enum):
26
- """Defines what a tool was derived from"""
27
-
28
- python = "python"
29
- json = "json"
30
-
31
-
32
- class ActorType(str, Enum):
33
- LETTA_USER = "letta_user"
34
- LETTA_AGENT = "letta_agent"
35
- LETTA_SYSTEM = "letta_system"
36
-
37
-
38
- class MCPServerType(str, Enum):
39
- SSE = "sse"
40
- STDIO = "stdio"
41
- STREAMABLE_HTTP = "streamable_http"
letta/orm/job.py CHANGED
@@ -4,10 +4,9 @@ from typing import TYPE_CHECKING, List, Optional
4
4
  from sqlalchemy import JSON, Index, String
5
5
  from sqlalchemy.orm import Mapped, mapped_column, relationship
6
6
 
7
- from letta.orm.enums import JobType
8
7
  from letta.orm.mixins import UserMixin
9
8
  from letta.orm.sqlalchemy_base import SqlalchemyBase
10
- from letta.schemas.enums import JobStatus
9
+ from letta.schemas.enums import JobStatus, JobType
11
10
  from letta.schemas.job import Job as PydanticJob
12
11
  from letta.schemas.job import LettaRequestConfig
13
12
 
@@ -8,8 +8,8 @@ from sqlalchemy.orm import Mapped, mapped_column, relationship
8
8
  from letta.orm.custom_columns import AgentStepStateColumn, BatchRequestResultColumn, LLMConfigColumn
9
9
  from letta.orm.mixins import AgentMixin, OrganizationMixin
10
10
  from letta.orm.sqlalchemy_base import SqlalchemyBase
11
- from letta.schemas.agent import AgentStepState
12
11
  from letta.schemas.enums import AgentStepStatus, JobStatus
12
+ from letta.schemas.llm_batch_job import AgentStepState
13
13
  from letta.schemas.llm_batch_job import LLMBatchItem as PydanticLLMBatchItem
14
14
  from letta.schemas.llm_config import LLMConfig
15
15
 
letta/orm/mcp_server.py CHANGED
@@ -7,9 +7,9 @@ from letta.functions.mcp_client.types import StdioServerConfig
7
7
  from letta.orm.custom_columns import MCPStdioServerConfigColumn
8
8
 
9
9
  # TODO everything in functions should live in this model
10
- from letta.orm.enums import MCPServerType
11
10
  from letta.orm.mixins import OrganizationMixin
12
11
  from letta.orm.sqlalchemy_base import SqlalchemyBase
12
+ from letta.schemas.enums import MCPServerType
13
13
  from letta.schemas.mcp import MCPServer
14
14
 
15
15
  if TYPE_CHECKING:
letta/orm/passage.py CHANGED
@@ -60,13 +60,19 @@ class SourcePassage(BasePassage, FileMixin, SourceMixin):
60
60
 
61
61
  @declared_attr
62
62
  def __table_args__(cls):
63
+ # TODO (cliandy): investigate if this is necessary, may be for SQLite compatability or do we need to add as well?
63
64
  if settings.letta_pg_uri_no_default:
64
65
  return (
65
66
  Index("source_passages_org_idx", "organization_id"),
66
67
  Index("source_passages_created_at_id_idx", "created_at", "id"),
68
+ Index("source_passages_file_id_idx", "file_id"),
67
69
  {"extend_existing": True},
68
70
  )
69
- return (Index("source_passages_created_at_id_idx", "created_at", "id"), {"extend_existing": True})
71
+ return (
72
+ Index("source_passages_created_at_id_idx", "created_at", "id"),
73
+ Index("source_passages_file_id_idx", "file_id"),
74
+ {"extend_existing": True},
75
+ )
70
76
 
71
77
  @declared_attr
72
78
  def source(cls) -> Mapped["Source"]:
@@ -49,6 +49,11 @@ def handle_db_timeout(func):
49
49
  return async_wrapper
50
50
 
51
51
 
52
+ def is_postgresql_session(session: Session) -> bool:
53
+ """Check if the database session is PostgreSQL instead of SQLite for setting query options."""
54
+ return session.bind.dialect.name == "postgresql"
55
+
56
+
52
57
  class AccessType(str, Enum):
53
58
  ORGANIZATION = "organization"
54
59
  USER = "user"
@@ -490,20 +495,17 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
490
495
  Raises:
491
496
  NoResultFound: if the object is not found
492
497
  """
493
- from letta.settings import settings
494
-
495
498
  identifiers = [] if identifier is None else [identifier]
496
499
  query, query_conditions = cls._read_multiple_preprocess(identifiers, actor, access, access_type, check_is_deleted, **kwargs)
497
500
  if query is None:
498
501
  raise NoResultFound(f"{cls.__name__} not found with identifier {identifier}")
499
-
500
- if settings.letta_pg_uri_no_default:
502
+ if is_postgresql_session(db_session):
501
503
  await db_session.execute(text("SET LOCAL enable_seqscan = OFF"))
502
504
  try:
503
505
  result = await db_session.execute(query)
504
506
  item = result.scalar_one_or_none()
505
507
  finally:
506
- if settings.letta_pg_uri_no_default:
508
+ if is_postgresql_session(db_session):
507
509
  await db_session.execute(text("SET LOCAL enable_seqscan = ON"))
508
510
 
509
511
  if item is None:
letta/orm/tool.py CHANGED
@@ -4,9 +4,10 @@ from sqlalchemy import JSON, Index, String, UniqueConstraint
4
4
  from sqlalchemy.orm import Mapped, mapped_column, relationship
5
5
 
6
6
  # TODO everything in functions should live in this model
7
- from letta.orm.enums import ToolSourceType, ToolType
7
+ from letta.orm.enums import ToolType
8
8
  from letta.orm.mixins import OrganizationMixin
9
9
  from letta.orm.sqlalchemy_base import SqlalchemyBase
10
+ from letta.schemas.enums import ToolSourceType
10
11
  from letta.schemas.tool import Tool as PydanticTool
11
12
 
12
13
  if TYPE_CHECKING: