letta-nightly 0.11.3.dev20250819104229__py3-none-any.whl → 0.11.4.dev20250820213507__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. letta/__init__.py +1 -1
  2. letta/agents/helpers.py +4 -0
  3. letta/agents/letta_agent.py +142 -5
  4. letta/constants.py +10 -7
  5. letta/data_sources/connectors.py +70 -53
  6. letta/embeddings.py +3 -240
  7. letta/errors.py +28 -0
  8. letta/functions/function_sets/base.py +4 -4
  9. letta/functions/functions.py +287 -32
  10. letta/functions/mcp_client/types.py +11 -0
  11. letta/functions/schema_validator.py +187 -0
  12. letta/functions/typescript_parser.py +196 -0
  13. letta/helpers/datetime_helpers.py +8 -4
  14. letta/helpers/tool_execution_helper.py +25 -2
  15. letta/llm_api/anthropic_client.py +23 -18
  16. letta/llm_api/azure_client.py +73 -0
  17. letta/llm_api/bedrock_client.py +8 -4
  18. letta/llm_api/google_vertex_client.py +14 -5
  19. letta/llm_api/llm_api_tools.py +2 -217
  20. letta/llm_api/llm_client.py +15 -1
  21. letta/llm_api/llm_client_base.py +32 -1
  22. letta/llm_api/openai.py +1 -0
  23. letta/llm_api/openai_client.py +18 -28
  24. letta/llm_api/together_client.py +55 -0
  25. letta/orm/provider.py +1 -0
  26. letta/orm/step_metrics.py +40 -1
  27. letta/otel/db_pool_monitoring.py +1 -1
  28. letta/schemas/agent.py +3 -4
  29. letta/schemas/agent_file.py +2 -0
  30. letta/schemas/block.py +11 -5
  31. letta/schemas/embedding_config.py +4 -5
  32. letta/schemas/enums.py +1 -1
  33. letta/schemas/job.py +2 -3
  34. letta/schemas/llm_config.py +79 -7
  35. letta/schemas/mcp.py +0 -24
  36. letta/schemas/message.py +0 -108
  37. letta/schemas/openai/chat_completion_request.py +1 -0
  38. letta/schemas/providers/__init__.py +0 -2
  39. letta/schemas/providers/anthropic.py +106 -8
  40. letta/schemas/providers/azure.py +102 -8
  41. letta/schemas/providers/base.py +10 -3
  42. letta/schemas/providers/bedrock.py +28 -16
  43. letta/schemas/providers/letta.py +3 -3
  44. letta/schemas/providers/ollama.py +2 -12
  45. letta/schemas/providers/openai.py +4 -4
  46. letta/schemas/providers/together.py +14 -2
  47. letta/schemas/sandbox_config.py +2 -1
  48. letta/schemas/tool.py +46 -22
  49. letta/server/rest_api/routers/v1/agents.py +179 -38
  50. letta/server/rest_api/routers/v1/folders.py +13 -8
  51. letta/server/rest_api/routers/v1/providers.py +10 -3
  52. letta/server/rest_api/routers/v1/sources.py +14 -8
  53. letta/server/rest_api/routers/v1/steps.py +17 -1
  54. letta/server/rest_api/routers/v1/tools.py +96 -5
  55. letta/server/rest_api/streaming_response.py +91 -45
  56. letta/server/server.py +27 -38
  57. letta/services/agent_manager.py +92 -20
  58. letta/services/agent_serialization_manager.py +11 -7
  59. letta/services/context_window_calculator/context_window_calculator.py +40 -2
  60. letta/services/helpers/agent_manager_helper.py +73 -12
  61. letta/services/mcp_manager.py +109 -15
  62. letta/services/passage_manager.py +28 -109
  63. letta/services/provider_manager.py +24 -0
  64. letta/services/step_manager.py +68 -0
  65. letta/services/summarizer/summarizer.py +1 -4
  66. letta/services/tool_executor/core_tool_executor.py +1 -1
  67. letta/services/tool_executor/sandbox_tool_executor.py +26 -9
  68. letta/services/tool_manager.py +82 -5
  69. letta/services/tool_sandbox/base.py +3 -11
  70. letta/services/tool_sandbox/modal_constants.py +17 -0
  71. letta/services/tool_sandbox/modal_deployment_manager.py +242 -0
  72. letta/services/tool_sandbox/modal_sandbox.py +218 -3
  73. letta/services/tool_sandbox/modal_sandbox_v2.py +429 -0
  74. letta/services/tool_sandbox/modal_version_manager.py +273 -0
  75. letta/services/tool_sandbox/safe_pickle.py +193 -0
  76. letta/settings.py +5 -3
  77. letta/templates/sandbox_code_file.py.j2 +2 -4
  78. letta/templates/sandbox_code_file_async.py.j2 +2 -4
  79. letta/utils.py +1 -1
  80. {letta_nightly-0.11.3.dev20250819104229.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/METADATA +2 -2
  81. {letta_nightly-0.11.3.dev20250819104229.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/RECORD +84 -81
  82. letta/llm_api/anthropic.py +0 -1206
  83. letta/llm_api/aws_bedrock.py +0 -104
  84. letta/llm_api/azure_openai.py +0 -118
  85. letta/llm_api/azure_openai_constants.py +0 -11
  86. letta/llm_api/cohere.py +0 -391
  87. letta/schemas/providers/cohere.py +0 -18
  88. {letta_nightly-0.11.3.dev20250819104229.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/LICENSE +0 -0
  89. {letta_nightly-0.11.3.dev20250819104229.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/WHEEL +0 -0
  90. {letta_nightly-0.11.3.dev20250819104229.dist-info → letta_nightly-0.11.4.dev20250820213507.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,273 @@
1
+ """
2
+ This module tracks and manages deployed app versions. We currently use the tools.metadata field
3
+ to store the information detailing modal deployments and when we need to redeploy due to changes.
4
+ Modal Version Manager - Tracks and manages deployed Modal app versions.
5
+ """
6
+
7
+ import asyncio
8
+ import time
9
+ from datetime import datetime
10
+ from typing import Any
11
+
12
+ import modal
13
+ from pydantic import BaseModel, ConfigDict, Field
14
+
15
+ from letta.log import get_logger
16
+ from letta.schemas.tool import ToolUpdate
17
+ from letta.services.tool_manager import ToolManager
18
+ from letta.services.tool_sandbox.modal_constants import CACHE_TTL_SECONDS, DEFAULT_CONFIG_KEY, MODAL_DEPLOYMENTS_KEY
19
+
20
+ logger = get_logger(__name__)
21
+
22
+
23
+ class DeploymentInfo(BaseModel):
24
+ model_config = ConfigDict(arbitrary_types_allowed=True)
25
+ """Information about a deployed Modal app."""
26
+
27
+ app_name: str = Field(..., description="The name of the modal app.")
28
+ version_hash: str = Field(..., description="The version hash of the modal app.")
29
+ deployed_at: datetime = Field(..., description="The time the modal app was deployed.")
30
+ dependencies: set[str] = Field(default_factory=set, description="A set of dependencies.")
31
+ # app_reference: modal.App | None = Field(None, description="The reference to the modal app.", exclude=True)
32
+ app_reference: Any = Field(None, description="The reference to the modal app.", exclude=True)
33
+
34
+
35
+ class ModalVersionManager:
36
+ """Manages versions and deployments of Modal apps using tools.metadata."""
37
+
38
+ def __init__(self):
39
+ self.tool_manager = ToolManager()
40
+ self._deployment_locks: dict[str, asyncio.Lock] = {}
41
+ self._cache: dict[str, tuple[DeploymentInfo, float]] = {}
42
+ self._deployments_in_progress: dict[str, asyncio.Event] = {}
43
+ self._deployments: dict[str, DeploymentInfo] = {} # Track all deployments for stats
44
+
45
+ @staticmethod
46
+ def _make_cache_key(tool_id: str, sandbox_config_id: str | None = None) -> str:
47
+ """Generate cache key for tool and config combination."""
48
+ return f"{tool_id}:{sandbox_config_id or DEFAULT_CONFIG_KEY}"
49
+
50
+ @staticmethod
51
+ def _get_config_key(sandbox_config_id: str | None = None) -> str:
52
+ """Get standardized config key."""
53
+ return sandbox_config_id or DEFAULT_CONFIG_KEY
54
+
55
+ def _is_cache_valid(self, timestamp: float) -> bool:
56
+ """Check if cache entry is still valid."""
57
+ return time.time() - timestamp < CACHE_TTL_SECONDS
58
+
59
+ def _get_deployment_metadata(self, tool) -> dict:
60
+ """Get or initialize modal deployments metadata."""
61
+ if not tool.metadata_:
62
+ tool.metadata_ = {}
63
+ if MODAL_DEPLOYMENTS_KEY not in tool.metadata_:
64
+ tool.metadata_[MODAL_DEPLOYMENTS_KEY] = {}
65
+ return tool.metadata_[MODAL_DEPLOYMENTS_KEY]
66
+
67
+ def _create_deployment_data(self, app_name: str, version_hash: str, dependencies: set[str]) -> dict:
68
+ """Create deployment data dictionary for metadata storage."""
69
+ return {
70
+ "app_name": app_name,
71
+ "version_hash": version_hash,
72
+ "deployed_at": datetime.now().isoformat(),
73
+ "dependencies": list(dependencies),
74
+ }
75
+
76
+ async def get_deployment(self, tool_id: str, sandbox_config_id: str | None = None, actor=None) -> DeploymentInfo | None:
77
+ """Get deployment info from tool metadata."""
78
+ cache_key = self._make_cache_key(tool_id, sandbox_config_id)
79
+
80
+ if cache_key in self._cache:
81
+ info, timestamp = self._cache[cache_key]
82
+ if self._is_cache_valid(timestamp):
83
+ return info
84
+
85
+ tool = self.tool_manager.get_tool_by_id(tool_id, actor=actor)
86
+ if not tool or not tool.metadata_:
87
+ return None
88
+
89
+ modal_deployments = tool.metadata_.get(MODAL_DEPLOYMENTS_KEY, {})
90
+ config_key = self._get_config_key(sandbox_config_id)
91
+
92
+ if config_key not in modal_deployments:
93
+ return None
94
+
95
+ deployment_data = modal_deployments[config_key]
96
+
97
+ info = DeploymentInfo(
98
+ app_name=deployment_data["app_name"],
99
+ version_hash=deployment_data["version_hash"],
100
+ deployed_at=datetime.fromisoformat(deployment_data["deployed_at"]),
101
+ dependencies=set(deployment_data.get("dependencies", [])),
102
+ app_reference=None,
103
+ )
104
+
105
+ self._cache[cache_key] = (info, time.time())
106
+ return info
107
+
108
+ async def register_deployment(
109
+ self,
110
+ tool_id: str,
111
+ app_name: str,
112
+ version_hash: str,
113
+ app: modal.App,
114
+ dependencies: set[str] | None = None,
115
+ sandbox_config_id: str | None = None,
116
+ actor=None,
117
+ ) -> DeploymentInfo:
118
+ """Register a new deployment in tool metadata."""
119
+ cache_key = self._make_cache_key(tool_id, sandbox_config_id)
120
+ config_key = self._get_config_key(sandbox_config_id)
121
+
122
+ async with self.get_deployment_lock(cache_key):
123
+ tool = self.tool_manager.get_tool_by_id(tool_id, actor=actor)
124
+ if not tool:
125
+ raise ValueError(f"Tool {tool_id} not found")
126
+
127
+ modal_deployments = self._get_deployment_metadata(tool)
128
+
129
+ info = DeploymentInfo(
130
+ app_name=app_name,
131
+ version_hash=version_hash,
132
+ deployed_at=datetime.now(),
133
+ dependencies=dependencies or set(),
134
+ app_reference=app,
135
+ )
136
+
137
+ modal_deployments[config_key] = self._create_deployment_data(app_name, version_hash, info.dependencies)
138
+
139
+ # Use ToolUpdate to update metadata
140
+ tool_update = ToolUpdate(metadata_=tool.metadata_)
141
+ await self.tool_manager.update_tool_by_id_async(
142
+ tool_id=tool_id,
143
+ tool_update=tool_update,
144
+ actor=actor,
145
+ )
146
+
147
+ self._cache[cache_key] = (info, time.time())
148
+ self._deployments[cache_key] = info # Track for stats
149
+ return info
150
+
151
+ async def needs_redeployment(self, tool_id: str, current_version: str, sandbox_config_id: str | None = None, actor=None) -> bool:
152
+ """Check if an app needs to be redeployed."""
153
+ deployment = await self.get_deployment(tool_id, sandbox_config_id, actor=actor)
154
+ if not deployment:
155
+ return True
156
+ return deployment.version_hash != current_version
157
+
158
+ def get_deployment_lock(self, cache_key: str) -> asyncio.Lock:
159
+ """Get or create a deployment lock for a tool+config combination."""
160
+ if cache_key not in self._deployment_locks:
161
+ self._deployment_locks[cache_key] = asyncio.Lock()
162
+ return self._deployment_locks[cache_key]
163
+
164
+ def mark_deployment_in_progress(self, cache_key: str, version_hash: str) -> str:
165
+ """Mark that a deployment is in progress for a specific version.
166
+
167
+ Returns a unique deployment ID that should be used to complete/fail the deployment.
168
+ """
169
+ deployment_key = f"{cache_key}:{version_hash}"
170
+ if deployment_key not in self._deployments_in_progress:
171
+ self._deployments_in_progress[deployment_key] = asyncio.Event()
172
+ return deployment_key
173
+
174
+ def is_deployment_in_progress(self, cache_key: str, version_hash: str) -> bool:
175
+ """Check if a deployment is currently in progress."""
176
+ deployment_key = f"{cache_key}:{version_hash}"
177
+ return deployment_key in self._deployments_in_progress
178
+
179
+ async def wait_for_deployment(self, cache_key: str, version_hash: str, timeout: float = 120) -> bool:
180
+ """Wait for an in-progress deployment to complete.
181
+
182
+ Returns True if deployment completed within timeout, False otherwise.
183
+ """
184
+ deployment_key = f"{cache_key}:{version_hash}"
185
+ if deployment_key not in self._deployments_in_progress:
186
+ return True # No deployment in progress
187
+
188
+ event = self._deployments_in_progress[deployment_key]
189
+ try:
190
+ await asyncio.wait_for(event.wait(), timeout=timeout)
191
+ return True
192
+ except asyncio.TimeoutError:
193
+ return False
194
+
195
+ def complete_deployment(self, deployment_key: str):
196
+ """Mark a deployment as complete and wake up any waiters."""
197
+ if deployment_key in self._deployments_in_progress:
198
+ self._deployments_in_progress[deployment_key].set()
199
+ # Clean up after a short delay to allow waiters to wake up
200
+ asyncio.create_task(self._cleanup_deployment_marker(deployment_key))
201
+
202
+ async def _cleanup_deployment_marker(self, deployment_key: str):
203
+ """Clean up deployment marker after a delay."""
204
+ await asyncio.sleep(5) # Give waiters time to wake up
205
+ if deployment_key in self._deployments_in_progress:
206
+ del self._deployments_in_progress[deployment_key]
207
+
208
+ async def force_redeploy(self, tool_id: str, sandbox_config_id: str | None = None, actor=None):
209
+ """Force a redeployment by removing deployment info from tool metadata."""
210
+ cache_key = self._make_cache_key(tool_id, sandbox_config_id)
211
+ config_key = self._get_config_key(sandbox_config_id)
212
+
213
+ async with self.get_deployment_lock(cache_key):
214
+ tool = self.tool_manager.get_tool_by_id(tool_id, actor=actor)
215
+ if not tool or not tool.metadata_:
216
+ return
217
+
218
+ modal_deployments = tool.metadata_.get(MODAL_DEPLOYMENTS_KEY, {})
219
+ if config_key in modal_deployments:
220
+ del modal_deployments[config_key]
221
+
222
+ # Use ToolUpdate to update metadata
223
+ tool_update = ToolUpdate(metadata_=tool.metadata_)
224
+ await self.tool_manager.update_tool_by_id_async(
225
+ tool_id=tool_id,
226
+ tool_update=tool_update,
227
+ actor=actor,
228
+ )
229
+
230
+ if cache_key in self._cache:
231
+ del self._cache[cache_key]
232
+
233
+ def clear_deployments(self):
234
+ """Clear all deployment tracking (for testing purposes)."""
235
+ self._deployments.clear()
236
+ self._cache.clear()
237
+ self._deployments_in_progress.clear()
238
+
239
+ async def get_deployment_stats(self) -> dict:
240
+ """Get statistics about current deployments."""
241
+ total_deployments = len(self._deployments)
242
+ active_deployments = len([d for d in self._deployments.values() if d])
243
+ stale_deployments = total_deployments - active_deployments
244
+
245
+ deployments_list = []
246
+ for cache_key, deployment in self._deployments.items():
247
+ if deployment:
248
+ deployments_list.append(
249
+ {
250
+ "app_name": deployment.app_name,
251
+ "version": deployment.version_hash,
252
+ "usage_count": 1, # Track usage in future
253
+ "deployed_at": deployment.deployed_at.isoformat(),
254
+ }
255
+ )
256
+
257
+ return {
258
+ "total_deployments": total_deployments,
259
+ "active_deployments": active_deployments,
260
+ "stale_deployments": stale_deployments,
261
+ "deployments": deployments_list,
262
+ }
263
+
264
+
265
+ _version_manager = None
266
+
267
+
268
+ def get_version_manager() -> ModalVersionManager:
269
+ """Get the global Modal version manager instance."""
270
+ global _version_manager
271
+ if _version_manager is None:
272
+ _version_manager = ModalVersionManager()
273
+ return _version_manager
@@ -0,0 +1,193 @@
1
+ """Safe pickle serialization wrapper for Modal sandbox.
2
+
3
+ This module provides defensive serialization utilities to prevent segmentation
4
+ faults and other crashes when passing complex objects to Modal containers.
5
+ """
6
+
7
+ import pickle
8
+ import sys
9
+ from typing import Any, Optional, Tuple
10
+
11
+ from letta.log import get_logger
12
+
13
+ logger = get_logger(__name__)
14
+
15
+ # Serialization limits
16
+ MAX_PICKLE_SIZE = 10 * 1024 * 1024 # 10MB limit
17
+ MAX_RECURSION_DEPTH = 50 # Prevent deep object graphs
18
+ PICKLE_PROTOCOL = 4 # Use protocol 4 for better compatibility
19
+
20
+
21
+ class SafePickleError(Exception):
22
+ """Raised when safe pickling fails."""
23
+
24
+
25
+ class RecursionLimiter:
26
+ """Context manager to limit recursion depth during pickling."""
27
+
28
+ def __init__(self, max_depth: int):
29
+ self.max_depth = max_depth
30
+ self.original_limit = None
31
+
32
+ def __enter__(self):
33
+ self.original_limit = sys.getrecursionlimit()
34
+ sys.setrecursionlimit(min(self.max_depth, self.original_limit))
35
+ return self
36
+
37
+ def __exit__(self, exc_type, exc_val, exc_tb):
38
+ if self.original_limit is not None:
39
+ sys.setrecursionlimit(self.original_limit)
40
+
41
+
42
+ def safe_pickle_dumps(obj: Any, max_size: int = MAX_PICKLE_SIZE) -> bytes:
43
+ """Safely pickle an object with size and recursion limits.
44
+
45
+ Args:
46
+ obj: The object to pickle
47
+ max_size: Maximum allowed pickle size in bytes
48
+
49
+ Returns:
50
+ bytes: The pickled object
51
+
52
+ Raises:
53
+ SafePickleError: If pickling fails or exceeds limits
54
+ """
55
+ try:
56
+ # First check for obvious size issues
57
+ # Do a quick pickle to check size
58
+ quick_pickle = pickle.dumps(obj, protocol=PICKLE_PROTOCOL)
59
+ if len(quick_pickle) > max_size:
60
+ raise SafePickleError(f"Pickle size {len(quick_pickle)} exceeds limit {max_size}")
61
+
62
+ # Check recursion depth by traversing the object
63
+ def check_depth(obj, depth=0):
64
+ if depth > MAX_RECURSION_DEPTH:
65
+ raise SafePickleError(f"Object graph too deep (depth > {MAX_RECURSION_DEPTH})")
66
+
67
+ if isinstance(obj, (list, tuple)):
68
+ for item in obj:
69
+ check_depth(item, depth + 1)
70
+ elif isinstance(obj, dict):
71
+ for value in obj.values():
72
+ check_depth(value, depth + 1)
73
+ elif hasattr(obj, "__dict__"):
74
+ check_depth(obj.__dict__, depth + 1)
75
+
76
+ check_depth(obj)
77
+
78
+ logger.debug(f"Successfully pickled object of size {len(quick_pickle)} bytes")
79
+ return quick_pickle
80
+
81
+ except SafePickleError:
82
+ raise
83
+ except RecursionError as e:
84
+ raise SafePickleError(f"Object graph too deep: {e}")
85
+ except Exception as e:
86
+ raise SafePickleError(f"Failed to pickle object: {e}")
87
+
88
+
89
+ def safe_pickle_loads(data: bytes) -> Any:
90
+ """Safely unpickle data with error handling.
91
+
92
+ Args:
93
+ data: The pickled data
94
+
95
+ Returns:
96
+ Any: The unpickled object
97
+
98
+ Raises:
99
+ SafePickleError: If unpickling fails
100
+ """
101
+ if not data:
102
+ raise SafePickleError("Cannot unpickle empty data")
103
+
104
+ if len(data) > MAX_PICKLE_SIZE:
105
+ raise SafePickleError(f"Pickle data size {len(data)} exceeds limit {MAX_PICKLE_SIZE}")
106
+
107
+ try:
108
+ obj = pickle.loads(data)
109
+ logger.debug(f"Successfully unpickled object from {len(data)} bytes")
110
+ return obj
111
+ except Exception as e:
112
+ raise SafePickleError(f"Failed to unpickle data: {e}")
113
+
114
+
115
+ def try_pickle_with_fallback(obj: Any, fallback_value: Any = None, max_size: int = MAX_PICKLE_SIZE) -> Tuple[Optional[bytes], bool]:
116
+ """Try to pickle an object with fallback on failure.
117
+
118
+ Args:
119
+ obj: The object to pickle
120
+ fallback_value: Value to use if pickling fails
121
+ max_size: Maximum allowed pickle size
122
+
123
+ Returns:
124
+ Tuple of (pickled_data or None, success_flag)
125
+ """
126
+ try:
127
+ pickled = safe_pickle_dumps(obj, max_size)
128
+ return pickled, True
129
+ except SafePickleError as e:
130
+ logger.warning(f"Failed to pickle object, using fallback: {e}")
131
+ if fallback_value is not None:
132
+ try:
133
+ pickled = safe_pickle_dumps(fallback_value, max_size)
134
+ return pickled, False
135
+ except SafePickleError:
136
+ pass
137
+ return None, False
138
+
139
+
140
+ def validate_pickleable(obj: Any) -> bool:
141
+ """Check if an object can be safely pickled.
142
+
143
+ Args:
144
+ obj: The object to validate
145
+
146
+ Returns:
147
+ bool: True if the object can be pickled safely
148
+ """
149
+ try:
150
+ # Try to pickle to a small buffer
151
+ safe_pickle_dumps(obj, max_size=MAX_PICKLE_SIZE)
152
+ return True
153
+ except SafePickleError:
154
+ return False
155
+
156
+
157
+ def sanitize_for_pickle(obj: Any) -> Any:
158
+ """Sanitize an object for safe pickling.
159
+
160
+ This function attempts to make an object pickleable by converting
161
+ problematic types to safe alternatives.
162
+
163
+ Args:
164
+ obj: The object to sanitize
165
+
166
+ Returns:
167
+ Any: A sanitized version of the object
168
+ """
169
+ # Handle common problematic types
170
+ if hasattr(obj, "__dict__"):
171
+ # For objects with __dict__, try to sanitize attributes
172
+ sanitized = {}
173
+ for key, value in obj.__dict__.items():
174
+ if key.startswith("_"):
175
+ continue # Skip private attributes
176
+
177
+ # Convert non-pickleable types
178
+ if callable(value):
179
+ sanitized[key] = f"<function {value.__name__}>"
180
+ elif hasattr(value, "__module__"):
181
+ sanitized[key] = f"<{value.__class__.__name__} object>"
182
+ else:
183
+ try:
184
+ # Test if the value is pickleable
185
+ pickle.dumps(value, protocol=PICKLE_PROTOCOL)
186
+ sanitized[key] = value
187
+ except:
188
+ sanitized[key] = str(value)
189
+
190
+ return sanitized
191
+
192
+ # For other types, return as-is and let pickle handle it
193
+ return obj
letta/settings.py CHANGED
@@ -18,7 +18,8 @@ class ToolSettings(BaseSettings):
18
18
  e2b_api_key: str | None = Field(default=None, description="API key for using E2B as a tool sandbox")
19
19
  e2b_sandbox_template_id: str | None = Field(default=None, description="Template ID for E2B Sandbox. Updated Manually.")
20
20
 
21
- modal_api_key: str | None = Field(default=None, description="API key for using Modal as a tool sandbox")
21
+ modal_token_id: str | None = Field(default=None, description="Token id for using Modal as a tool sandbox")
22
+ modal_token_secret: str | None = Field(default=None, description="Token secret for using Modal as a tool sandbox")
22
23
 
23
24
  # Search Providers
24
25
  tavily_api_key: str | None = Field(default=None, description="API key for using Tavily as a search provider.")
@@ -41,7 +42,7 @@ class ToolSettings(BaseSettings):
41
42
  def sandbox_type(self) -> SandboxType:
42
43
  if self.e2b_api_key:
43
44
  return SandboxType.E2B
44
- elif self.modal_api_key:
45
+ elif self.modal_token_id and self.modal_token_secret:
45
46
  return SandboxType.MODAL
46
47
  else:
47
48
  return SandboxType.LOCAL
@@ -267,6 +268,7 @@ class Settings(BaseSettings):
267
268
  # experimental toggle
268
269
  use_experimental: bool = False
269
270
  use_vertex_structured_outputs_experimental: bool = False
271
+ use_asyncio_shield: bool = True
270
272
 
271
273
  # Database pool monitoring
272
274
  enable_db_pool_monitoring: bool = True # Enable connection pool monitoring
@@ -339,7 +341,7 @@ class Settings(BaseSettings):
339
341
  class TestSettings(Settings):
340
342
  model_config = SettingsConfigDict(env_prefix="letta_test_", extra="ignore")
341
343
 
342
- letta_dir: Path | None = Field(Path.home() / ".letta/test", env="LETTA_TEST_DIR")
344
+ letta_dir: Path | None = Field(Path.home() / ".letta/test", alias="LETTA_TEST_DIR")
343
345
 
344
346
 
345
347
  class LogSettings(BaseSettings):
@@ -28,15 +28,13 @@ _function_result = {{ invoke_function_call }}
28
28
 
29
29
  {# Use a temporary Pydantic wrapper to recursively serialize any nested Pydantic objects #}
30
30
  try:
31
- from pydantic import BaseModel
31
+ from pydantic import BaseModel, ConfigDict
32
32
  from typing import Any
33
33
 
34
34
  class _TempResultWrapper(BaseModel):
35
+ model_config = ConfigDict(arbitrary_types_allowed=True)
35
36
  result: Any
36
37
 
37
- class Config:
38
- arbitrary_types_allowed = True
39
-
40
38
  _wrapped = _TempResultWrapper(result=_function_result)
41
39
  _serialized_result = _wrapped.model_dump()['result']
42
40
  except ImportError:
@@ -30,15 +30,13 @@ async def _async_wrapper():
30
30
 
31
31
  {# Use a temporary Pydantic wrapper to recursively serialize any nested Pydantic objects #}
32
32
  try:
33
- from pydantic import BaseModel
33
+ from pydantic import BaseModel, ConfigDict
34
34
  from typing import Any
35
35
 
36
36
  class _TempResultWrapper(BaseModel):
37
+ model_config = ConfigDict(arbitrary_types_allowed=True)
37
38
  result: Any
38
39
 
39
- class Config:
40
- arbitrary_types_allowed = True
41
-
42
40
  _wrapped = _TempResultWrapper(result=_function_result)
43
41
  _serialized_result = _wrapped.model_dump()['result']
44
42
  except ImportError:
letta/utils.py CHANGED
@@ -874,7 +874,7 @@ def validate_function_response(function_response: Any, return_char_limit: int, s
874
874
  function_response_string = str(function_response)
875
875
 
876
876
  # TODO we should change this to a max token limit that's variable based on tokens remaining (or context-window)
877
- if truncate and len(function_response_string) > return_char_limit:
877
+ if truncate and return_char_limit and len(function_response_string) > return_char_limit:
878
878
  logger.warning(f"function return was over limit ({len(function_response_string)} > {return_char_limit}) and was truncated")
879
879
  function_response_string = f"{function_response_string[:return_char_limit]}... [NOTE: function output was truncated since it exceeded the character limit ({len(function_response_string)} > {return_char_limit})]"
880
880
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.11.3.dev20250819104229
3
+ Version: 0.11.4.dev20250820213507
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -68,7 +68,7 @@ Requires-Dist: mistralai (>=1.8.1,<2.0.0)
68
68
  Requires-Dist: modal (>=1.1.0,<2.0.0) ; extra == "cloud-tool-sandbox"
69
69
  Requires-Dist: nltk (>=3.8.1,<4.0.0)
70
70
  Requires-Dist: numpy (>=2.1.0,<3.0.0)
71
- Requires-Dist: openai (>=1.60.0,<2.0.0)
71
+ Requires-Dist: openai (>=1.99.9,<2.0.0)
72
72
  Requires-Dist: opentelemetry-api (==1.30.0)
73
73
  Requires-Dist: opentelemetry-exporter-otlp (==1.30.0)
74
74
  Requires-Dist: opentelemetry-instrumentation-requests (==0.51b0)