letta-nightly 0.8.5.dev20250625104328__py3-none-any.whl → 0.8.6.dev20250626104326__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. letta/agent.py +16 -12
  2. letta/agents/base_agent.py +4 -1
  3. letta/agents/helpers.py +35 -3
  4. letta/agents/letta_agent.py +132 -106
  5. letta/agents/letta_agent_batch.py +4 -3
  6. letta/agents/voice_agent.py +12 -2
  7. letta/agents/voice_sleeptime_agent.py +12 -2
  8. letta/constants.py +24 -3
  9. letta/data_sources/redis_client.py +6 -0
  10. letta/errors.py +5 -0
  11. letta/functions/function_sets/files.py +10 -3
  12. letta/functions/function_sets/multi_agent.py +0 -32
  13. letta/groups/sleeptime_multi_agent_v2.py +6 -0
  14. letta/helpers/converters.py +4 -1
  15. letta/helpers/datetime_helpers.py +16 -23
  16. letta/helpers/message_helper.py +5 -2
  17. letta/helpers/tool_rule_solver.py +29 -2
  18. letta/interfaces/openai_streaming_interface.py +9 -2
  19. letta/llm_api/anthropic.py +11 -1
  20. letta/llm_api/anthropic_client.py +14 -3
  21. letta/llm_api/aws_bedrock.py +29 -15
  22. letta/llm_api/bedrock_client.py +74 -0
  23. letta/llm_api/google_ai_client.py +7 -3
  24. letta/llm_api/google_vertex_client.py +18 -4
  25. letta/llm_api/llm_client.py +7 -0
  26. letta/llm_api/openai_client.py +13 -0
  27. letta/orm/agent.py +5 -0
  28. letta/orm/block_history.py +1 -1
  29. letta/orm/enums.py +6 -25
  30. letta/orm/job.py +1 -2
  31. letta/orm/llm_batch_items.py +1 -1
  32. letta/orm/mcp_server.py +1 -1
  33. letta/orm/passage.py +7 -1
  34. letta/orm/sqlalchemy_base.py +7 -5
  35. letta/orm/tool.py +2 -1
  36. letta/schemas/agent.py +34 -10
  37. letta/schemas/enums.py +42 -1
  38. letta/schemas/job.py +6 -3
  39. letta/schemas/letta_request.py +4 -0
  40. letta/schemas/llm_batch_job.py +7 -2
  41. letta/schemas/memory.py +2 -2
  42. letta/schemas/providers.py +32 -6
  43. letta/schemas/run.py +1 -1
  44. letta/schemas/tool_rule.py +40 -12
  45. letta/serialize_schemas/pydantic_agent_schema.py +9 -2
  46. letta/server/rest_api/app.py +3 -2
  47. letta/server/rest_api/routers/v1/agents.py +25 -22
  48. letta/server/rest_api/routers/v1/runs.py +2 -3
  49. letta/server/rest_api/routers/v1/sources.py +31 -0
  50. letta/server/rest_api/routers/v1/voice.py +1 -0
  51. letta/server/rest_api/utils.py +38 -13
  52. letta/server/server.py +52 -21
  53. letta/services/agent_manager.py +58 -7
  54. letta/services/block_manager.py +1 -1
  55. letta/services/file_processor/chunker/line_chunker.py +2 -1
  56. letta/services/file_processor/file_processor.py +2 -9
  57. letta/services/files_agents_manager.py +177 -37
  58. letta/services/helpers/agent_manager_helper.py +77 -48
  59. letta/services/helpers/tool_parser_helper.py +2 -1
  60. letta/services/job_manager.py +33 -2
  61. letta/services/llm_batch_manager.py +1 -1
  62. letta/services/provider_manager.py +6 -4
  63. letta/services/tool_executor/core_tool_executor.py +1 -1
  64. letta/services/tool_executor/files_tool_executor.py +99 -30
  65. letta/services/tool_executor/multi_agent_tool_executor.py +1 -17
  66. letta/services/tool_executor/tool_execution_manager.py +6 -0
  67. letta/services/tool_executor/tool_executor_base.py +3 -0
  68. letta/services/tool_sandbox/base.py +39 -1
  69. letta/services/tool_sandbox/e2b_sandbox.py +7 -0
  70. letta/services/user_manager.py +3 -2
  71. letta/settings.py +8 -14
  72. letta/system.py +17 -17
  73. letta/templates/sandbox_code_file_async.py.j2 +59 -0
  74. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/METADATA +3 -2
  75. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/RECORD +78 -76
  76. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/LICENSE +0 -0
  77. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/WHEEL +0 -0
  78. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/entry_points.txt +0 -0
@@ -10,7 +10,6 @@ from letta.schemas.sandbox_config import SandboxConfig
10
10
  from letta.schemas.tool import Tool
11
11
  from letta.schemas.tool_execution_result import ToolExecutionResult
12
12
  from letta.schemas.user import User
13
- from letta.services.tool_executor.tool_executor import logger
14
13
  from letta.services.tool_executor.tool_executor_base import ToolExecutor
15
14
 
16
15
 
@@ -30,7 +29,6 @@ class LettaMultiAgentToolExecutor(ToolExecutor):
30
29
  assert agent_state is not None, "Agent state is required for multi-agent tools"
31
30
  function_map = {
32
31
  "send_message_to_agent_and_wait_for_reply": self.send_message_to_agent_and_wait_for_reply,
33
- "send_message_to_agent_async": self.send_message_to_agent_async,
34
32
  "send_message_to_agents_matching_tags": self.send_message_to_agents_matching_tags_async,
35
33
  }
36
34
 
@@ -54,21 +52,6 @@ class LettaMultiAgentToolExecutor(ToolExecutor):
54
52
 
55
53
  return str(await self._process_agent(agent_id=other_agent_id, message=augmented_message))
56
54
 
57
- async def send_message_to_agent_async(self, agent_state: AgentState, message: str, other_agent_id: str) -> str:
58
- # 1) Build the prefixed system‐message
59
- prefixed = (
60
- f"[Incoming message from agent with ID '{agent_state.id}' - "
61
- f"to reply to this message, make sure to use the "
62
- f"'send_message_to_agent_async' tool, or the agent will not receive your message] "
63
- f"{message}"
64
- )
65
-
66
- task = asyncio.create_task(self._process_agent(agent_id=other_agent_id, message=prefixed))
67
-
68
- task.add_done_callback(lambda t: (logger.error(f"Async send_message task failed: {t.exception()}") if t.exception() else None))
69
-
70
- return "Successfully sent message"
71
-
72
55
  async def send_message_to_agents_matching_tags_async(
73
56
  self, agent_state: AgentState, message: str, match_all: List[str], match_some: List[str]
74
57
  ) -> str:
@@ -101,6 +84,7 @@ class LettaMultiAgentToolExecutor(ToolExecutor):
101
84
  message_manager=self.message_manager,
102
85
  agent_manager=self.agent_manager,
103
86
  block_manager=self.block_manager,
87
+ job_manager=self.job_manager,
104
88
  passage_manager=self.passage_manager,
105
89
  actor=self.actor,
106
90
  )
@@ -15,6 +15,7 @@ from letta.schemas.tool_execution_result import ToolExecutionResult
15
15
  from letta.schemas.user import User
16
16
  from letta.services.agent_manager import AgentManager
17
17
  from letta.services.block_manager import BlockManager
18
+ from letta.services.job_manager import JobManager
18
19
  from letta.services.message_manager import MessageManager
19
20
  from letta.services.passage_manager import PassageManager
20
21
  from letta.services.tool_executor.builtin_tool_executor import LettaBuiltinToolExecutor
@@ -49,6 +50,7 @@ class ToolExecutorFactory:
49
50
  message_manager: MessageManager,
50
51
  agent_manager: AgentManager,
51
52
  block_manager: BlockManager,
53
+ job_manager: JobManager,
52
54
  passage_manager: PassageManager,
53
55
  actor: User,
54
56
  ) -> ToolExecutor:
@@ -58,6 +60,7 @@ class ToolExecutorFactory:
58
60
  message_manager=message_manager,
59
61
  agent_manager=agent_manager,
60
62
  block_manager=block_manager,
63
+ job_manager=job_manager,
61
64
  passage_manager=passage_manager,
62
65
  actor=actor,
63
66
  )
@@ -71,6 +74,7 @@ class ToolExecutionManager:
71
74
  message_manager: MessageManager,
72
75
  agent_manager: AgentManager,
73
76
  block_manager: BlockManager,
77
+ job_manager: JobManager,
74
78
  passage_manager: PassageManager,
75
79
  actor: User,
76
80
  agent_state: Optional[AgentState] = None,
@@ -80,6 +84,7 @@ class ToolExecutionManager:
80
84
  self.message_manager = message_manager
81
85
  self.agent_manager = agent_manager
82
86
  self.block_manager = block_manager
87
+ self.job_manager = job_manager
83
88
  self.passage_manager = passage_manager
84
89
  self.agent_state = agent_state
85
90
  self.logger = get_logger(__name__)
@@ -101,6 +106,7 @@ class ToolExecutionManager:
101
106
  message_manager=self.message_manager,
102
107
  agent_manager=self.agent_manager,
103
108
  block_manager=self.block_manager,
109
+ job_manager=self.job_manager,
104
110
  passage_manager=self.passage_manager,
105
111
  actor=self.actor,
106
112
  )
@@ -8,6 +8,7 @@ from letta.schemas.tool_execution_result import ToolExecutionResult
8
8
  from letta.schemas.user import User
9
9
  from letta.services.agent_manager import AgentManager
10
10
  from letta.services.block_manager import BlockManager
11
+ from letta.services.job_manager import JobManager
11
12
  from letta.services.message_manager import MessageManager
12
13
  from letta.services.passage_manager import PassageManager
13
14
 
@@ -20,12 +21,14 @@ class ToolExecutor(ABC):
20
21
  message_manager: MessageManager,
21
22
  agent_manager: AgentManager,
22
23
  block_manager: BlockManager,
24
+ job_manager: JobManager,
23
25
  passage_manager: PassageManager,
24
26
  actor: User,
25
27
  ):
26
28
  self.message_manager = message_manager
27
29
  self.agent_manager = agent_manager
28
30
  self.block_manager = block_manager
31
+ self.job_manager = job_manager
29
32
  self.passage_manager = passage_manager
30
33
  self.actor = actor
31
34
 
@@ -52,6 +52,9 @@ class AsyncToolSandboxBase(ABC):
52
52
  else:
53
53
  self.inject_agent_state = False
54
54
 
55
+ # Detect if the tool function is async
56
+ self.is_async_function = self._detect_async_function()
57
+
55
58
  # Lazily initialize the manager only when needed
56
59
  @property
57
60
  def sandbox_config_manager(self):
@@ -78,7 +81,8 @@ class AsyncToolSandboxBase(ABC):
78
81
  """
79
82
  from letta.templates.template_helper import render_template
80
83
 
81
- TEMPLATE_NAME = "sandbox_code_file.py.j2"
84
+ # Select the appropriate template based on whether the function is async
85
+ TEMPLATE_NAME = "sandbox_code_file_async.py.j2" if self.is_async_function else "sandbox_code_file.py.j2"
82
86
 
83
87
  future_import = False
84
88
  schema_code = None
@@ -114,6 +118,7 @@ class AsyncToolSandboxBase(ABC):
114
118
  invoke_function_call=self.invoke_function_call(),
115
119
  wrap_print_with_markers=wrap_print_with_markers,
116
120
  start_marker=self.LOCAL_SANDBOX_RESULT_START_MARKER,
121
+ use_top_level_await=self.use_top_level_await(),
117
122
  )
118
123
 
119
124
  def initialize_param(self, name: str, raw_value: JsonValue) -> str:
@@ -150,5 +155,38 @@ class AsyncToolSandboxBase(ABC):
150
155
  func_call_str = self.tool.name + "(" + params + ")"
151
156
  return func_call_str
152
157
 
158
+ def _detect_async_function(self) -> bool:
159
+ """
160
+ Detect if the tool function is an async function by examining its source code.
161
+ Uses AST parsing to reliably detect 'async def' declarations.
162
+ """
163
+ import ast
164
+
165
+ try:
166
+ # Parse the source code to AST
167
+ tree = ast.parse(self.tool.source_code)
168
+
169
+ # Look for function definitions
170
+ for node in ast.walk(tree):
171
+ if isinstance(node, ast.AsyncFunctionDef) and node.name == self.tool.name:
172
+ return True
173
+ elif isinstance(node, ast.FunctionDef) and node.name == self.tool.name:
174
+ return False
175
+
176
+ # If we couldn't find the function definition, fall back to string matching
177
+ return "async def " + self.tool.name in self.tool.source_code
178
+
179
+ except SyntaxError:
180
+ # If source code can't be parsed, fall back to string matching
181
+ return "async def " + self.tool.name in self.tool.source_code
182
+
183
+ def use_top_level_await(self) -> bool:
184
+ """
185
+ Determine if this sandbox environment supports top-level await.
186
+ Should be overridden by subclasses to return True for environments
187
+ with running event loops (like E2B), False for local execution.
188
+ """
189
+ return False # Default to False for local execution
190
+
153
191
  def _update_env_vars(self):
154
192
  pass # TODO
@@ -250,6 +250,13 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
250
250
 
251
251
  return sbx
252
252
 
253
+ def use_top_level_await(self) -> bool:
254
+ """
255
+ E2B sandboxes run in a Jupyter-like environment with an active event loop,
256
+ so they support top-level await.
257
+ """
258
+ return True
259
+
253
260
  @staticmethod
254
261
  async def list_running_e2b_sandboxes():
255
262
  # List running sandboxes and access metadata.
@@ -8,6 +8,7 @@ from letta.helpers.decorators import async_redis_cache
8
8
  from letta.log import get_logger
9
9
  from letta.orm.errors import NoResultFound
10
10
  from letta.orm.organization import Organization as OrganizationModel
11
+ from letta.orm.sqlalchemy_base import is_postgresql_session
11
12
  from letta.orm.user import User as UserModel
12
13
  from letta.otel.tracing import trace_method
13
14
  from letta.schemas.user import User as PydanticUser
@@ -158,14 +159,14 @@ class UserManager:
158
159
  """Fetch a user by ID asynchronously."""
159
160
  async with db_registry.async_session() as session:
160
161
  # Turn off seqscan to force use pk index
161
- if settings.letta_pg_uri_no_default:
162
+ if is_postgresql_session(session):
162
163
  await session.execute(text("SET LOCAL enable_seqscan = OFF"))
163
164
  try:
164
165
  stmt = select(UserModel).where(UserModel.id == actor_id)
165
166
  result = await session.execute(stmt)
166
167
  user = result.scalar_one_or_none()
167
168
  finally:
168
- if settings.letta_pg_uri_no_default:
169
+ if is_postgresql_session(session):
169
170
  await session.execute(text("SET LOCAL enable_seqscan = ON"))
170
171
 
171
172
  if not user:
letta/settings.py CHANGED
@@ -95,9 +95,9 @@ class ModelSettings(BaseSettings):
95
95
  groq_api_key: Optional[str] = None
96
96
 
97
97
  # Bedrock
98
- aws_access_key: Optional[str] = None
98
+ aws_access_key_id: Optional[str] = None
99
99
  aws_secret_access_key: Optional[str] = None
100
- aws_region: Optional[str] = None
100
+ aws_default_region: Optional[str] = None
101
101
  bedrock_anthropic_version: Optional[str] = "bedrock-2023-05-31"
102
102
 
103
103
  # anthropic
@@ -196,8 +196,8 @@ class Settings(BaseSettings):
196
196
  pool_use_lifo: bool = True
197
197
  disable_sqlalchemy_pooling: bool = False
198
198
 
199
- redis_host: Optional[str] = None
200
- redis_port: Optional[int] = None
199
+ redis_host: Optional[str] = Field(default=None, description="Host for Redis instance")
200
+ redis_port: Optional[int] = Field(default=6379, description="Port for Redis instance")
201
201
 
202
202
  plugin_register: Optional[str] = None
203
203
 
@@ -230,16 +230,6 @@ class Settings(BaseSettings):
230
230
  use_experimental: bool = False
231
231
  use_vertex_structured_outputs_experimental: bool = False
232
232
 
233
- # LLM provider client settings
234
- httpx_max_retries: int = 5
235
- httpx_timeout_connect: float = 10.0
236
- httpx_timeout_read: float = 60.0
237
- httpx_timeout_write: float = 30.0
238
- httpx_timeout_pool: float = 10.0
239
- httpx_max_connections: int = 500
240
- httpx_max_keepalive_connections: int = 500
241
- httpx_keepalive_expiry: float = 120.0
242
-
243
233
  # cron job parameters
244
234
  enable_batch_job_polling: bool = False
245
235
  poll_running_llm_batches_interval_seconds: int = 5 * 60
@@ -250,6 +240,10 @@ class Settings(BaseSettings):
250
240
  # for OCR
251
241
  mistral_api_key: Optional[str] = None
252
242
 
243
+ # LLM request timeout settings (model + embedding model)
244
+ llm_request_timeout_seconds: float = Field(default=60.0, ge=10.0, le=1800.0, description="Timeout for LLM requests in seconds")
245
+ llm_stream_timeout_seconds: float = Field(default=60.0, ge=10.0, le=1800.0, description="Timeout for LLM streaming requests in seconds")
246
+
253
247
  @property
254
248
  def letta_pg_uri(self) -> str:
255
249
  if self.pg_uri:
letta/system.py CHANGED
@@ -13,7 +13,7 @@ from .helpers.datetime_helpers import get_local_time
13
13
  from .helpers.json_helpers import json_dumps
14
14
 
15
15
 
16
- def get_initial_boot_messages(version="startup"):
16
+ def get_initial_boot_messages(version, timezone):
17
17
  if version == "startup":
18
18
  initial_boot_message = INITIAL_BOOT_MESSAGE
19
19
  messages = [
@@ -47,7 +47,7 @@ def get_initial_boot_messages(version="startup"):
47
47
  # "role": "function",
48
48
  "role": "tool",
49
49
  "name": "send_message", # NOTE: technically not up to spec, this is old functions style
50
- "content": package_function_response(True, None),
50
+ "content": package_function_response(True, None, timezone),
51
51
  "tool_call_id": tool_call_id,
52
52
  },
53
53
  ]
@@ -76,7 +76,7 @@ def get_initial_boot_messages(version="startup"):
76
76
  # "role": "function",
77
77
  "role": "tool",
78
78
  "name": "send_message",
79
- "content": package_function_response(True, None),
79
+ "content": package_function_response(True, None, timezone),
80
80
  "tool_call_id": tool_call_id,
81
81
  },
82
82
  ]
@@ -87,9 +87,9 @@ def get_initial_boot_messages(version="startup"):
87
87
  return messages
88
88
 
89
89
 
90
- def get_heartbeat(reason="Automated timer", include_location=False, location_name="San Francisco, CA, USA"):
90
+ def get_heartbeat(timezone, reason: str = "Automated timer", include_location: bool = False, location_name: str = "San Francisco, CA, USA"):
91
91
  # Package the message with time and location
92
- formatted_time = get_local_time()
92
+ formatted_time = get_local_time(timezone=timezone)
93
93
  packaged_message = {
94
94
  "type": "heartbeat",
95
95
  "reason": reason,
@@ -102,9 +102,9 @@ def get_heartbeat(reason="Automated timer", include_location=False, location_nam
102
102
  return json_dumps(packaged_message)
103
103
 
104
104
 
105
- def get_login_event(last_login="Never (first login)", include_location=False, location_name="San Francisco, CA, USA"):
105
+ def get_login_event(timezone, last_login="Never (first login)", include_location=False, location_name="San Francisco, CA, USA"):
106
106
  # Package the message with time and location
107
- formatted_time = get_local_time()
107
+ formatted_time = get_local_time(timezone=timezone)
108
108
  packaged_message = {
109
109
  "type": "login",
110
110
  "last_login": last_login,
@@ -119,13 +119,13 @@ def get_login_event(last_login="Never (first login)", include_location=False, lo
119
119
 
120
120
  def package_user_message(
121
121
  user_message: str,
122
- time: Optional[str] = None,
122
+ timezone: str,
123
123
  include_location: bool = False,
124
124
  location_name: Optional[str] = "San Francisco, CA, USA",
125
125
  name: Optional[str] = None,
126
126
  ):
127
127
  # Package the message with time and location
128
- formatted_time = time if time else get_local_time()
128
+ formatted_time = get_local_time(timezone=timezone)
129
129
  packaged_message = {
130
130
  "type": "user_message",
131
131
  "message": user_message,
@@ -141,8 +141,8 @@ def package_user_message(
141
141
  return json_dumps(packaged_message)
142
142
 
143
143
 
144
- def package_function_response(was_success, response_string, timestamp=None):
145
- formatted_time = get_local_time() if timestamp is None else timestamp
144
+ def package_function_response(was_success, response_string, timezone):
145
+ formatted_time = get_local_time(timezone=timezone)
146
146
  packaged_message = {
147
147
  "status": "OK" if was_success else "Failed",
148
148
  "message": response_string,
@@ -152,7 +152,7 @@ def package_function_response(was_success, response_string, timestamp=None):
152
152
  return json_dumps(packaged_message)
153
153
 
154
154
 
155
- def package_system_message(system_message, message_type="system_alert", time=None):
155
+ def package_system_message(system_message, timezone, message_type="system_alert"):
156
156
  # error handling for recursive packaging
157
157
  try:
158
158
  message_json = json.loads(system_message)
@@ -162,7 +162,7 @@ def package_system_message(system_message, message_type="system_alert", time=Non
162
162
  except:
163
163
  pass # do nothing, expected behavior that the message is not JSON
164
164
 
165
- formatted_time = time if time else get_local_time()
165
+ formatted_time = get_local_time(timezone=timezone)
166
166
  packaged_message = {
167
167
  "type": message_type,
168
168
  "message": system_message,
@@ -172,13 +172,13 @@ def package_system_message(system_message, message_type="system_alert", time=Non
172
172
  return json.dumps(packaged_message)
173
173
 
174
174
 
175
- def package_summarize_message(summary, summary_message_count, hidden_message_count, total_message_count, timestamp=None):
175
+ def package_summarize_message(summary, summary_message_count, hidden_message_count, total_message_count, timezone):
176
176
  context_message = (
177
177
  f"Note: prior messages ({hidden_message_count} of {total_message_count} total messages) have been hidden from view due to conversation memory constraints.\n"
178
178
  + f"The following is a summary of the previous {summary_message_count} messages:\n {summary}"
179
179
  )
180
180
 
181
- formatted_time = get_local_time() if timestamp is None else timestamp
181
+ formatted_time = get_local_time(timezone=timezone)
182
182
  packaged_message = {
183
183
  "type": "system_alert",
184
184
  "message": context_message,
@@ -188,11 +188,11 @@ def package_summarize_message(summary, summary_message_count, hidden_message_cou
188
188
  return json_dumps(packaged_message)
189
189
 
190
190
 
191
- def package_summarize_message_no_summary(hidden_message_count, timestamp=None, message=None):
191
+ def package_summarize_message_no_summary(hidden_message_count, message=None, timezone=None):
192
192
  """Add useful metadata to the summary message"""
193
193
 
194
194
  # Package the message with time and location
195
- formatted_time = get_local_time() if timestamp is None else timestamp
195
+ formatted_time = get_local_time(timezone=timezone)
196
196
  context_message = (
197
197
  message
198
198
  if message
@@ -0,0 +1,59 @@
1
+ {{ 'from __future__ import annotations' if future_import else '' }}
2
+ from typing import *
3
+ import pickle
4
+ import sys
5
+ import base64
6
+ import struct
7
+ import hashlib
8
+ import asyncio
9
+
10
+ {# Additional imports to support agent state #}
11
+ {% if inject_agent_state %}
12
+ import letta
13
+ from letta import *
14
+ {% endif %}
15
+
16
+ {# Add schema code if available #}
17
+ {{ schema_imports or '' }}
18
+
19
+ {# Load agent state #}
20
+ agent_state = {{ 'pickle.loads(' ~ agent_state_pickle ~ ')' if agent_state_pickle else 'None' }}
21
+
22
+ {{ tool_args }}
23
+
24
+ {# The tool's source code #}
25
+ {{ tool_source_code }}
26
+
27
+ {# Async wrapper to handle the function call and store the result #}
28
+ async def _async_wrapper():
29
+ result = await {{ invoke_function_call }}
30
+ return {
31
+ "results": result,
32
+ "agent_state": agent_state
33
+ }
34
+
35
+ {# Run the async function - method depends on environment #}
36
+ {% if use_top_level_await %}
37
+ {# Environment with running event loop (like E2B) - use top-level await #}
38
+ {{ local_sandbox_result_var_name }} = await _async_wrapper()
39
+ {% else %}
40
+ {# Local execution environment - use asyncio.run #}
41
+ {{ local_sandbox_result_var_name }} = asyncio.run(_async_wrapper())
42
+ {% endif %}
43
+
44
+ {{ local_sandbox_result_var_name }}_pkl = pickle.dumps({{ local_sandbox_result_var_name }})
45
+
46
+ {% if wrap_print_with_markers %}
47
+ {# Combine everything to flush and write at once. #}
48
+ data_checksum = hashlib.md5({{ local_sandbox_result_var_name }}_pkl).hexdigest().encode('ascii')
49
+ {{ local_sandbox_result_var_name }}_msg = (
50
+ {{ start_marker }} +
51
+ struct.pack('>I', len({{ local_sandbox_result_var_name }}_pkl)) +
52
+ data_checksum +
53
+ {{ local_sandbox_result_var_name }}_pkl
54
+ )
55
+ sys.stdout.buffer.write({{ local_sandbox_result_var_name }}_msg)
56
+ sys.stdout.buffer.flush()
57
+ {% else %}
58
+ base64.b64encode({{ local_sandbox_result_var_name }}_pkl).decode('utf-8')
59
+ {% endif %}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.8.5.dev20250625104328
3
+ Version: 0.8.6.dev20250626104326
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -23,6 +23,7 @@ Provides-Extra: postgres
23
23
  Provides-Extra: redis
24
24
  Provides-Extra: server
25
25
  Provides-Extra: tests
26
+ Requires-Dist: aioboto3 (>=14.3.0,<15.0.0) ; extra == "bedrock"
26
27
  Requires-Dist: aiomultiprocess (>=0.9.1,<0.10.0)
27
28
  Requires-Dist: aiosqlite (>=0.21.0,<0.22.0)
28
29
  Requires-Dist: alembic (>=1.13.3,<2.0.0)
@@ -56,7 +57,7 @@ Requires-Dist: isort (>=5.13.2,<6.0.0) ; extra == "dev" or extra == "all"
56
57
  Requires-Dist: jinja2 (>=3.1.5,<4.0.0)
57
58
  Requires-Dist: langchain (>=0.3.7,<0.4.0) ; extra == "external-tools" or extra == "desktop" or extra == "all"
58
59
  Requires-Dist: langchain-community (>=0.3.7,<0.4.0) ; extra == "external-tools" or extra == "desktop" or extra == "all"
59
- Requires-Dist: letta_client (>=0.1.160,<0.2.0)
60
+ Requires-Dist: letta_client (>=0.1.169,<0.2.0)
60
61
  Requires-Dist: llama-index (>=0.12.2,<0.13.0)
61
62
  Requires-Dist: llama-index-embeddings-openai (>=0.3.1,<0.4.0)
62
63
  Requires-Dist: locust (>=2.31.5,<3.0.0) ; extra == "dev" or extra == "desktop" or extra == "all"