letta-nightly 0.8.12.dev20250710104356__py3-none-any.whl → 0.8.13.dev20250710230421__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

letta/__init__.py CHANGED
@@ -5,7 +5,7 @@ try:
5
5
  __version__ = version("letta")
6
6
  except PackageNotFoundError:
7
7
  # Fallback for development installations
8
- __version__ = "0.8.12"
8
+ __version__ = "0.8.13"
9
9
 
10
10
  if os.environ.get("LETTA_VERSION"):
11
11
  __version__ = os.environ["LETTA_VERSION"]
@@ -96,7 +96,7 @@ class BaseAgent(ABC):
96
96
  """
97
97
  try:
98
98
  # [DB Call] loading blocks (modifies: agent_state.memory.blocks)
99
- await self.agent_manager.refresh_memory_async(agent_state=agent_state, actor=self.actor)
99
+ agent_state = await self.agent_manager.refresh_memory_async(agent_state=agent_state, actor=self.actor)
100
100
 
101
101
  tool_constraint_block = None
102
102
  if tool_rules_solver is not None:
@@ -104,18 +104,37 @@ class BaseAgent(ABC):
104
104
 
105
105
  # TODO: This is a pretty brittle pattern established all over our code, need to get rid of this
106
106
  curr_system_message = in_context_messages[0]
107
- curr_memory_str = agent_state.memory.compile(tool_usage_rules=tool_constraint_block, sources=agent_state.sources)
108
107
  curr_system_message_text = curr_system_message.content[0].text
109
- if curr_memory_str in curr_system_message_text:
108
+
109
+ # extract the dynamic section that includes memory blocks, tool rules, and directories
110
+ # this avoids timestamp comparison issues
111
+ def extract_dynamic_section(text):
112
+ start_marker = "</base_instructions>"
113
+ end_marker = "<memory_metadata>"
114
+
115
+ start_idx = text.find(start_marker)
116
+ end_idx = text.find(end_marker)
117
+
118
+ if start_idx != -1 and end_idx != -1:
119
+ return text[start_idx:end_idx]
120
+ return text # fallback to full text if markers not found
121
+
122
+ curr_dynamic_section = extract_dynamic_section(curr_system_message_text)
123
+
124
+ # generate just the memory string with current state for comparison
125
+ curr_memory_str = agent_state.memory.compile(tool_usage_rules=tool_constraint_block, sources=agent_state.sources)
126
+ new_dynamic_section = extract_dynamic_section(curr_memory_str)
127
+
128
+ # compare just the dynamic sections (memory blocks, tool rules, directories)
129
+ if curr_dynamic_section == new_dynamic_section:
110
130
  logger.debug(
111
- f"Memory hasn't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild"
131
+ f"Memory and sources haven't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild"
112
132
  )
113
133
  return in_context_messages
114
134
 
115
135
  memory_edit_timestamp = get_utc_time()
116
136
 
117
- # [DB Call] size of messages and archival memories
118
- # todo: blocking for now
137
+ # size of messages and archival memories
119
138
  if num_messages is None:
120
139
  num_messages = await self.message_manager.size_async(actor=self.actor, agent_id=agent_state.id)
121
140
  if num_archival_memories is None:
@@ -3,7 +3,7 @@ import json
3
3
  import uuid
4
4
  from collections.abc import AsyncGenerator
5
5
  from datetime import datetime
6
- from typing import Optional
6
+ from typing import Optional, Union
7
7
 
8
8
  from openai import AsyncStream
9
9
  from openai.types.chat import ChatCompletionChunk
@@ -165,18 +165,28 @@ class LettaAgent(BaseAgent):
165
165
  use_assistant_message: bool = True,
166
166
  request_start_timestamp_ns: int | None = None,
167
167
  include_return_message_types: list[MessageType] | None = None,
168
- ) -> LettaResponse:
168
+ dry_run: bool = False,
169
+ ) -> Union[LettaResponse, dict]:
169
170
  # TODO (cliandy): pass in run_id and use at send_message endpoints for all step functions
170
171
  agent_state = await self.agent_manager.get_agent_by_id_async(
171
- agent_id=self.agent_id, include_relationships=["tools", "memory", "tool_exec_environment_variables"], actor=self.actor
172
+ agent_id=self.agent_id,
173
+ include_relationships=["tools", "memory", "tool_exec_environment_variables", "sources"],
174
+ actor=self.actor,
172
175
  )
173
- _, new_in_context_messages, stop_reason, usage = await self._step(
176
+ result = await self._step(
174
177
  agent_state=agent_state,
175
178
  input_messages=input_messages,
176
179
  max_steps=max_steps,
177
180
  run_id=run_id,
178
181
  request_start_timestamp_ns=request_start_timestamp_ns,
182
+ dry_run=dry_run,
179
183
  )
184
+
185
+ # If dry run, return the request payload directly
186
+ if dry_run:
187
+ return result
188
+
189
+ _, new_in_context_messages, stop_reason, usage = result
180
190
  return _create_letta_response(
181
191
  new_in_context_messages=new_in_context_messages,
182
192
  use_assistant_message=use_assistant_message,
@@ -195,7 +205,9 @@ class LettaAgent(BaseAgent):
195
205
  include_return_message_types: list[MessageType] | None = None,
196
206
  ):
197
207
  agent_state = await self.agent_manager.get_agent_by_id_async(
198
- agent_id=self.agent_id, include_relationships=["tools", "memory", "tool_exec_environment_variables"], actor=self.actor
208
+ agent_id=self.agent_id,
209
+ include_relationships=["tools", "memory", "tool_exec_environment_variables", "sources"],
210
+ actor=self.actor,
199
211
  )
200
212
  current_in_context_messages, new_in_context_messages = await _prepare_in_context_messages_no_persist_async(
201
213
  input_messages, agent_state, self.message_manager, self.actor
@@ -279,6 +291,7 @@ class LettaAgent(BaseAgent):
279
291
  tool_rules_solver,
280
292
  response.usage,
281
293
  reasoning_content=reasoning,
294
+ step_id=step_id,
282
295
  initial_messages=initial_messages,
283
296
  agent_step_span=agent_step_span,
284
297
  is_final_step=(i == max_steps - 1),
@@ -357,7 +370,8 @@ class LettaAgent(BaseAgent):
357
370
  max_steps: int = DEFAULT_MAX_STEPS,
358
371
  run_id: str | None = None,
359
372
  request_start_timestamp_ns: int | None = None,
360
- ) -> tuple[list[Message], list[Message], LettaStopReason | None, LettaUsageStatistics]:
373
+ dry_run: bool = False,
374
+ ) -> Union[tuple[list[Message], list[Message], LettaStopReason | None, LettaUsageStatistics], dict]:
361
375
  """
362
376
  Carries out an invocation of the agent loop. In each step, the agent
363
377
  1. Rebuilds its memory
@@ -394,6 +408,16 @@ class LettaAgent(BaseAgent):
394
408
  agent_step_span = tracer.start_span("agent_step", start_time=step_start)
395
409
  agent_step_span.set_attributes({"step_id": step_id})
396
410
 
411
+ # If dry run, build request data and return it without making LLM call
412
+ if dry_run:
413
+ request_data, valid_tool_names = await self._create_llm_request_data_async(
414
+ llm_client=llm_client,
415
+ in_context_messages=current_in_context_messages + new_in_context_messages,
416
+ agent_state=agent_state,
417
+ tool_rules_solver=tool_rules_solver,
418
+ )
419
+ return request_data
420
+
397
421
  request_data, response_data, current_in_context_messages, new_in_context_messages, valid_tool_names = (
398
422
  await self._build_and_request_from_llm(
399
423
  current_in_context_messages, new_in_context_messages, agent_state, llm_client, tool_rules_solver, agent_step_span
@@ -530,7 +554,9 @@ class LettaAgent(BaseAgent):
530
554
  4. Processes the response
531
555
  """
532
556
  agent_state = await self.agent_manager.get_agent_by_id_async(
533
- agent_id=self.agent_id, include_relationships=["tools", "memory", "tool_exec_environment_variables"], actor=self.actor
557
+ agent_id=self.agent_id,
558
+ include_relationships=["tools", "memory", "tool_exec_environment_variables", "sources"],
559
+ actor=self.actor,
534
560
  )
535
561
  current_in_context_messages, new_in_context_messages = await _prepare_in_context_messages_no_persist_async(
536
562
  input_messages, agent_state, self.message_manager, self.actor
@@ -628,7 +654,7 @@ class LettaAgent(BaseAgent):
628
654
  )
629
655
 
630
656
  # log LLM request time
631
- llm_request_ms = ns_to_ms(stream_end_time_ns - request_start_timestamp_ns)
657
+ llm_request_ms = ns_to_ms(stream_end_time_ns - provider_request_start_timestamp_ns)
632
658
  agent_step_span.add_event(name="llm_request_ms", attributes={"duration_ms": llm_request_ms})
633
659
  MetricRegistry().llm_execution_time_ms_histogram.record(
634
660
  llm_request_ms,
@@ -129,7 +129,8 @@ def get_function_name_and_docstring(source_code: str, name: Optional[str] = None
129
129
  raise LettaToolCreateError("Could not determine function name")
130
130
 
131
131
  if not docstring:
132
- raise LettaToolCreateError("Docstring is missing")
132
+ # For tools with args_json_schema, the docstring is optional
133
+ docstring = f"The {function_name} tool"
133
134
 
134
135
  return function_name, docstring
135
136
 
@@ -10,6 +10,8 @@ def json_dumps(data, indent=2):
10
10
  def safe_serializer(obj):
11
11
  if isinstance(obj, datetime):
12
12
  return obj.isoformat()
13
+ if isinstance(obj, bytes):
14
+ return obj.decode("utf-8")
13
15
  raise TypeError(f"Type {type(obj)} not serializable")
14
16
 
15
17
  return json.dumps(data, indent=indent, default=safe_serializer, ensure_ascii=False)
@@ -1,6 +1,12 @@
1
1
  from typing import Any, Dict, List
2
2
 
3
- from pinecone import PineconeAsyncio
3
+ try:
4
+ from pinecone import IndexEmbed, PineconeAsyncio
5
+ from pinecone.exceptions.exceptions import NotFoundException
6
+
7
+ PINECONE_AVAILABLE = True
8
+ except ImportError:
9
+ PINECONE_AVAILABLE = False
4
10
 
5
11
  from letta.constants import (
6
12
  PINECONE_CLOUD,
@@ -27,11 +33,20 @@ def should_use_pinecone(verbose: bool = False):
27
33
  bool(settings.pinecone_source_index),
28
34
  )
29
35
 
30
- return settings.enable_pinecone and settings.pinecone_api_key and settings.pinecone_agent_index and settings.pinecone_source_index
36
+ return all(
37
+ (
38
+ PINECONE_AVAILABLE,
39
+ settings.enable_pinecone,
40
+ settings.pinecone_api_key,
41
+ settings.pinecone_agent_index,
42
+ settings.pinecone_source_index,
43
+ )
44
+ )
31
45
 
32
46
 
33
47
  async def upsert_pinecone_indices():
34
- from pinecone import IndexEmbed, PineconeAsyncio
48
+ if not PINECONE_AVAILABLE:
49
+ raise ImportError("Pinecone is not available. Please install pinecone to use this feature.")
35
50
 
36
51
  for index_name in get_pinecone_indices():
37
52
  async with PineconeAsyncio(api_key=settings.pinecone_api_key) as pc:
@@ -49,6 +64,9 @@ def get_pinecone_indices() -> List[str]:
49
64
 
50
65
 
51
66
  async def upsert_file_records_to_pinecone_index(file_id: str, source_id: str, chunks: List[str], actor: User):
67
+ if not PINECONE_AVAILABLE:
68
+ raise ImportError("Pinecone is not available. Please install pinecone to use this feature.")
69
+
52
70
  records = []
53
71
  for i, chunk in enumerate(chunks):
54
72
  record = {
@@ -63,7 +81,8 @@ async def upsert_file_records_to_pinecone_index(file_id: str, source_id: str, ch
63
81
 
64
82
 
65
83
  async def delete_file_records_from_pinecone_index(file_id: str, actor: User):
66
- from pinecone.exceptions.exceptions import NotFoundException
84
+ if not PINECONE_AVAILABLE:
85
+ raise ImportError("Pinecone is not available. Please install pinecone to use this feature.")
67
86
 
68
87
  namespace = actor.organization_id
69
88
  try:
@@ -81,7 +100,8 @@ async def delete_file_records_from_pinecone_index(file_id: str, actor: User):
81
100
 
82
101
 
83
102
  async def delete_source_records_from_pinecone_index(source_id: str, actor: User):
84
- from pinecone.exceptions.exceptions import NotFoundException
103
+ if not PINECONE_AVAILABLE:
104
+ raise ImportError("Pinecone is not available. Please install pinecone to use this feature.")
85
105
 
86
106
  namespace = actor.organization_id
87
107
  try:
@@ -94,6 +114,9 @@ async def delete_source_records_from_pinecone_index(source_id: str, actor: User)
94
114
 
95
115
 
96
116
  async def upsert_records_to_pinecone_index(records: List[dict], actor: User):
117
+ if not PINECONE_AVAILABLE:
118
+ raise ImportError("Pinecone is not available. Please install pinecone to use this feature.")
119
+
97
120
  async with PineconeAsyncio(api_key=settings.pinecone_api_key) as pc:
98
121
  description = await pc.describe_index(name=settings.pinecone_source_index)
99
122
  async with pc.IndexAsyncio(host=description.index.host) as dense_index:
@@ -104,6 +127,9 @@ async def upsert_records_to_pinecone_index(records: List[dict], actor: User):
104
127
 
105
128
 
106
129
  async def search_pinecone_index(query: str, limit: int, filter: Dict[str, Any], actor: User) -> Dict[str, Any]:
130
+ if not PINECONE_AVAILABLE:
131
+ raise ImportError("Pinecone is not available. Please install pinecone to use this feature.")
132
+
107
133
  async with PineconeAsyncio(api_key=settings.pinecone_api_key) as pc:
108
134
  description = await pc.describe_index(name=settings.pinecone_source_index)
109
135
  async with pc.IndexAsyncio(host=description.index.host) as dense_index:
@@ -127,7 +153,8 @@ async def search_pinecone_index(query: str, limit: int, filter: Dict[str, Any],
127
153
 
128
154
 
129
155
  async def list_pinecone_index_for_files(file_id: str, actor: User, limit: int = None, pagination_token: str = None) -> List[str]:
130
- from pinecone.exceptions.exceptions import NotFoundException
156
+ if not PINECONE_AVAILABLE:
157
+ raise ImportError("Pinecone is not available. Please install pinecone to use this feature.")
131
158
 
132
159
  namespace = actor.organization_id
133
160
  try:
letta/jobs/scheduler.py CHANGED
@@ -29,6 +29,7 @@ async def _try_acquire_lock_and_start_scheduler(server: SyncServer) -> bool:
29
29
  if _is_scheduler_leader:
30
30
  return True # Already leading
31
31
 
32
+ engine_name = None
32
33
  lock_session = None
33
34
  acquired_lock = False
34
35
  try:
@@ -36,32 +37,25 @@ async def _try_acquire_lock_and_start_scheduler(server: SyncServer) -> bool:
36
37
  engine = session.get_bind()
37
38
  engine_name = engine.name
38
39
  logger.info(f"Database engine type: {engine_name}")
39
- if engine_name != "postgresql":
40
- logger.warning(f"Advisory locks not supported for {engine_name} database. Starting scheduler without leader election.")
41
- acquired_lock = True
42
- else:
43
- lock_session = db_registry.get_async_session_factory()()
44
- result = await lock_session.execute(
45
- text("SELECT pg_try_advisory_lock(CAST(:lock_key AS bigint))"), {"lock_key": ADVISORY_LOCK_KEY}
46
- )
47
- acquired_lock = result.scalar()
48
- await lock_session.commit()
49
-
50
- if not acquired_lock:
51
- if lock_session:
52
- await lock_session.close()
53
- logger.info("Scheduler lock held by another instance.")
54
- return False
55
40
 
56
- if engine_name == "postgresql":
57
- logger.info("Acquired PostgreSQL advisory lock.")
58
- _advisory_lock_session = lock_session
59
- lock_session = None
41
+ if engine_name != "postgresql":
42
+ logger.warning(f"Advisory locks not supported for {engine_name} database. Starting scheduler without leader election.")
43
+ acquired_lock = True
60
44
  else:
61
- logger.info("Starting scheduler for non-PostgreSQL database.")
62
- if lock_session:
45
+ lock_session = db_registry.get_async_session_factory()()
46
+ result = await lock_session.execute(
47
+ text("SELECT pg_try_advisory_lock(CAST(:lock_key AS bigint))"), {"lock_key": ADVISORY_LOCK_KEY}
48
+ )
49
+ acquired_lock = result.scalar()
50
+ await lock_session.commit()
51
+
52
+ if not acquired_lock:
63
53
  await lock_session.close()
64
- lock_session = None
54
+ logger.info("Scheduler lock held by another instance.")
55
+ return False
56
+ else:
57
+ _advisory_lock_session = lock_session
58
+ lock_session = None
65
59
 
66
60
  trigger = IntervalTrigger(
67
61
  seconds=settings.poll_running_llm_batches_interval_seconds,
@@ -90,7 +84,6 @@ async def _try_acquire_lock_and_start_scheduler(server: SyncServer) -> bool:
90
84
  if acquired_lock:
91
85
  logger.warning("Attempting to release lock due to error during startup.")
92
86
  try:
93
- _advisory_lock_session = lock_session
94
87
  await _release_advisory_lock(lock_session)
95
88
  except Exception as unlock_err:
96
89
  logger.error(f"Failed to release lock during error handling: {unlock_err}", exc_info=True)
@@ -108,8 +101,8 @@ async def _try_acquire_lock_and_start_scheduler(server: SyncServer) -> bool:
108
101
  if lock_session:
109
102
  try:
110
103
  await lock_session.close()
111
- except:
112
- pass
104
+ except Exception as e:
105
+ logger.error(f"Failed to close session during error handling: {e}", exc_info=True)
113
106
 
114
107
 
115
108
  async def _background_lock_retry_loop(server: SyncServer):
@@ -138,15 +131,13 @@ async def _background_lock_retry_loop(server: SyncServer):
138
131
  break
139
132
  except Exception as e:
140
133
  logger.error(f"Error in background lock retry loop: {e}", exc_info=True)
141
- await asyncio.sleep(settings.poll_lock_retry_interval_seconds)
142
134
 
143
135
 
144
- async def _release_advisory_lock(lock_session=None):
136
+ async def _release_advisory_lock(target_lock_session=None):
145
137
  """Releases the advisory lock using the stored session."""
146
138
  global _advisory_lock_session
147
139
 
148
- lock_session = _advisory_lock_session or lock_session
149
- _advisory_lock_session = None
140
+ lock_session = target_lock_session or _advisory_lock_session
150
141
 
151
142
  if lock_session is not None:
152
143
  logger.info(f"Attempting to release PostgreSQL advisory lock {ADVISORY_LOCK_KEY}")
@@ -161,6 +152,8 @@ async def _release_advisory_lock(lock_session=None):
161
152
  if lock_session:
162
153
  await lock_session.close()
163
154
  logger.info("Closed database session that held advisory lock.")
155
+ if lock_session == _advisory_lock_session:
156
+ _advisory_lock_session = None
164
157
  except Exception as e:
165
158
  logger.error(f"Error closing advisory lock session: {e}", exc_info=True)
166
159
  else:
@@ -58,7 +58,12 @@ class MetricRegistry:
58
58
  def tool_execution_counter(self) -> Counter:
59
59
  return self._get_or_create_metric(
60
60
  "count_tool_execution",
61
- partial(self._meter.create_counter, name="count_tool_execution", description="Counts the number of tools executed.", unit="1"),
61
+ partial(
62
+ self._meter.create_counter,
63
+ name="count_tool_execution",
64
+ description="Counts the number of tools executed.",
65
+ unit="1",
66
+ ),
62
67
  )
63
68
 
64
69
  # project_id + model
@@ -66,7 +71,12 @@ class MetricRegistry:
66
71
  def ttft_ms_histogram(self) -> Histogram:
67
72
  return self._get_or_create_metric(
68
73
  "hist_ttft_ms",
69
- partial(self._meter.create_histogram, name="hist_ttft_ms", description="Histogram for the Time to First Token (ms)", unit="ms"),
74
+ partial(
75
+ self._meter.create_histogram,
76
+ name="hist_ttft_ms",
77
+ description="Histogram for the Time to First Token (ms)",
78
+ unit="ms",
79
+ ),
70
80
  )
71
81
 
72
82
  # (includes model name)
@@ -158,3 +168,15 @@ class MetricRegistry:
158
168
  unit="1",
159
169
  ),
160
170
  )
171
+
172
+ @property
173
+ def file_process_bytes_histogram(self) -> Histogram:
174
+ return self._get_or_create_metric(
175
+ "hist_file_process_bytes",
176
+ partial(
177
+ self._meter.create_histogram,
178
+ name="hist_file_process_bytes",
179
+ description="Histogram for file process in bytes",
180
+ unit="By",
181
+ ),
182
+ )
letta/schemas/enums.py CHANGED
@@ -139,3 +139,11 @@ class MCPServerType(str, Enum):
139
139
  SSE = "sse"
140
140
  STDIO = "stdio"
141
141
  STREAMABLE_HTTP = "streamable_http"
142
+
143
+
144
+ class DuplicateFileHandling(str, Enum):
145
+ """How to handle duplicate filenames when uploading files"""
146
+
147
+ SKIP = "skip" # skip files with duplicate names
148
+ ERROR = "error" # error when duplicate names are encountered
149
+ SUFFIX = "suffix" # add numeric suffix to make names unique (default behavior)
letta/schemas/tool.py CHANGED
@@ -77,9 +77,8 @@ class Tool(BaseTool):
77
77
 
78
78
  if self.tool_type is ToolType.CUSTOM:
79
79
  if not self.source_code:
80
- error_msg = f"Custom tool with id={self.id} is missing source_code field."
81
- logger.error(error_msg)
82
- raise ValueError(error_msg)
80
+ logger.error("Custom tool with id=%s is missing source_code field", self.id)
81
+ raise ValueError(f"Custom tool with id={self.id} is missing source_code field.")
83
82
 
84
83
  # Always derive json_schema for freshest possible json_schema
85
84
  if self.args_json_schema is not None:
@@ -96,8 +95,7 @@ class Tool(BaseTool):
96
95
  try:
97
96
  self.json_schema = derive_openai_json_schema(source_code=self.source_code)
98
97
  except Exception as e:
99
- error_msg = f"Failed to derive json schema for tool with id={self.id} name={self.name}. Error: {str(e)}"
100
- logger.error(error_msg)
98
+ logger.error("Failed to derive json schema for tool with id=%s name=%s: %s", self.id, self.name, e)
101
99
  elif self.tool_type in {ToolType.LETTA_CORE, ToolType.LETTA_MEMORY_CORE, ToolType.LETTA_SLEEPTIME_CORE}:
102
100
  # If it's letta core tool, we generate the json_schema on the fly here
103
101
  self.json_schema = get_json_schema_from_module(module_name=LETTA_CORE_TOOL_MODULE_NAME, function_name=self.name)
@@ -119,9 +117,8 @@ class Tool(BaseTool):
119
117
 
120
118
  # At this point, we need to validate that at least json_schema is populated
121
119
  if not self.json_schema:
122
- error_msg = f"Tool with id={self.id} name={self.name} tool_type={self.tool_type} is missing a json_schema."
123
- logger.error(error_msg)
124
- raise ValueError(error_msg)
120
+ logger.error("Tool with id=%s name=%s tool_type=%s is missing a json_schema", self.id, self.name, self.tool_type)
121
+ raise ValueError(f"Tool with id={self.id} name={self.name} tool_type={self.tool_type} is missing a json_schema.")
125
122
 
126
123
  # Derive name from the JSON schema if not provided
127
124
  if not self.name:
@@ -337,8 +337,11 @@ def create_application() -> "FastAPI":
337
337
  # / static files
338
338
  mount_static_files(app)
339
339
 
340
+ no_generation = "--no-generation" in sys.argv
341
+
340
342
  # Generate OpenAPI schema after all routes are mounted
341
- generate_openapi_schema(app)
343
+ if not no_generation:
344
+ generate_openapi_schema(app)
342
345
 
343
346
  return app
344
347
 
@@ -2,7 +2,7 @@ import asyncio
2
2
  import json
3
3
  import traceback
4
4
  from datetime import datetime, timezone
5
- from typing import Annotated, Any, List, Optional
5
+ from typing import Annotated, Any, Dict, List, Optional, Union
6
6
 
7
7
  from fastapi import APIRouter, Body, Depends, File, Header, HTTPException, Query, Request, UploadFile, status
8
8
  from fastapi.responses import JSONResponse
@@ -522,7 +522,7 @@ async def attach_block(
522
522
  actor_id: str | None = Header(None, alias="user_id"),
523
523
  ):
524
524
  """
525
- Attach a core memoryblock to an agent.
525
+ Attach a core memory block to an agent.
526
526
  """
527
527
  actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
528
528
  return await server.agent_manager.attach_block_async(agent_id=agent_id, block_id=block_id, actor=actor)
@@ -1160,6 +1160,69 @@ async def list_agent_groups(
1160
1160
  return server.agent_manager.list_groups(agent_id=agent_id, manager_type=manager_type, actor=actor)
1161
1161
 
1162
1162
 
1163
+ @router.post(
1164
+ "/{agent_id}/messages/preview-raw-payload",
1165
+ response_model=Dict[str, Any],
1166
+ operation_id="preview_raw_payload",
1167
+ )
1168
+ async def preview_raw_payload(
1169
+ agent_id: str,
1170
+ request: Union[LettaRequest, LettaStreamingRequest] = Body(...),
1171
+ server: SyncServer = Depends(get_letta_server),
1172
+ actor_id: str | None = Header(None, alias="user_id"),
1173
+ ):
1174
+ """
1175
+ Inspect the raw LLM request payload without sending it.
1176
+
1177
+ This endpoint processes the message through the agent loop up until
1178
+ the LLM request, then returns the raw request payload that would
1179
+ be sent to the LLM provider. Useful for debugging and inspection.
1180
+ """
1181
+ actor = await server.user_manager.get_actor_or_default_async(actor_id=actor_id)
1182
+ agent = await server.agent_manager.get_agent_by_id_async(agent_id, actor, include_relationships=["multi_agent_group"])
1183
+ agent_eligible = agent.multi_agent_group is None or agent.multi_agent_group.manager_type in ["sleeptime", "voice_sleeptime"]
1184
+ model_compatible = agent.llm_config.model_endpoint_type in ["anthropic", "openai", "together", "google_ai", "google_vertex", "bedrock"]
1185
+
1186
+ if agent_eligible and model_compatible:
1187
+ if agent.enable_sleeptime:
1188
+ # TODO: @caren need to support this for sleeptime
1189
+ raise HTTPException(
1190
+ status_code=status.HTTP_400_BAD_REQUEST,
1191
+ detail="Payload inspection is not supported for agents with sleeptime enabled.",
1192
+ )
1193
+ else:
1194
+ agent_loop = LettaAgent(
1195
+ agent_id=agent_id,
1196
+ message_manager=server.message_manager,
1197
+ agent_manager=server.agent_manager,
1198
+ block_manager=server.block_manager,
1199
+ job_manager=server.job_manager,
1200
+ passage_manager=server.passage_manager,
1201
+ actor=actor,
1202
+ step_manager=server.step_manager,
1203
+ telemetry_manager=server.telemetry_manager if settings.llm_api_logging else NoopTelemetryManager(),
1204
+ summarizer_mode=(
1205
+ SummarizationMode.STATIC_MESSAGE_BUFFER
1206
+ if agent.agent_type == AgentType.voice_convo_agent
1207
+ else SummarizationMode.PARTIAL_EVICT_MESSAGE_BUFFER
1208
+ ),
1209
+ )
1210
+
1211
+ # TODO: Support step_streaming
1212
+ return await agent_loop.step(
1213
+ input_messages=request.messages,
1214
+ use_assistant_message=request.use_assistant_message,
1215
+ include_return_message_types=request.include_return_message_types,
1216
+ dry_run=True,
1217
+ )
1218
+
1219
+ else:
1220
+ raise HTTPException(
1221
+ status_code=status.HTTP_403_FORBIDDEN,
1222
+ detail="Payload inspection is not currently supported for this agent configuration.",
1223
+ )
1224
+
1225
+
1163
1226
  @router.post("/{agent_id}/summarize", response_model=AgentState, operation_id="summarize_agent_conversation")
1164
1227
  async def summarize_agent_conversation(
1165
1228
  agent_id: str,
@@ -19,7 +19,7 @@ from letta.log import get_logger
19
19
  from letta.otel.tracing import trace_method
20
20
  from letta.schemas.agent import AgentState
21
21
  from letta.schemas.embedding_config import EmbeddingConfig
22
- from letta.schemas.enums import FileProcessingStatus
22
+ from letta.schemas.enums import DuplicateFileHandling, FileProcessingStatus
23
23
  from letta.schemas.file import FileMetadata
24
24
  from letta.schemas.passage import Passage
25
25
  from letta.schemas.source import Source, SourceCreate, SourceUpdate
@@ -208,6 +208,7 @@ async def delete_source(
208
208
  async def upload_file_to_source(
209
209
  file: UploadFile,
210
210
  source_id: str,
211
+ duplicate_handling: DuplicateFileHandling = Query(DuplicateFileHandling.SUFFIX, description="How to handle duplicate filenames"),
211
212
  server: "SyncServer" = Depends(get_letta_server),
212
213
  actor_id: Optional[str] = Header(None, alias="user_id"),
213
214
  ):
@@ -264,8 +265,31 @@ async def upload_file_to_source(
264
265
 
265
266
  content = await file.read()
266
267
 
267
- # Store original filename and generate unique filename
268
+ # Store original filename and handle duplicate logic
268
269
  original_filename = sanitize_filename(file.filename) # Basic sanitization only
270
+
271
+ # Check if duplicate exists
272
+ existing_file = await server.file_manager.get_file_by_original_name_and_source(
273
+ original_filename=original_filename, source_id=source_id, actor=actor
274
+ )
275
+
276
+ if existing_file:
277
+ # Duplicate found, handle based on strategy
278
+ if duplicate_handling == DuplicateFileHandling.ERROR:
279
+ raise HTTPException(
280
+ status_code=status.HTTP_409_CONFLICT, detail=f"File '{original_filename}' already exists in source '{source.name}'"
281
+ )
282
+ elif duplicate_handling == DuplicateFileHandling.SKIP:
283
+ # Return existing file metadata with custom header to indicate it was skipped
284
+ from fastapi import Response
285
+
286
+ response = Response(
287
+ content=existing_file.model_dump_json(), media_type="application/json", headers={"X-Upload-Result": "skipped"}
288
+ )
289
+ return response
290
+ # For SUFFIX, continue to generate unique filename
291
+
292
+ # Generate unique filename (adds suffix if needed)
269
293
  unique_filename = await server.file_manager.generate_unique_filename(
270
294
  original_filename=original_filename, source=source, organization_id=actor.organization_id
271
295
  )
@@ -360,6 +384,13 @@ async def get_file_metadata(
360
384
  file_id=file_id, actor=actor, include_content=include_content, strip_directory_prefix=True
361
385
  )
362
386
 
387
+ if not file_metadata:
388
+ raise HTTPException(status_code=404, detail=f"File with id={file_id} not found.")
389
+
390
+ # Verify the file belongs to the specified source
391
+ if file_metadata.source_id != source_id:
392
+ raise HTTPException(status_code=404, detail=f"File with id={file_id} not found in source {source_id}.")
393
+
363
394
  if should_use_pinecone() and not file_metadata.is_processing_terminal():
364
395
  ids = await list_pinecone_index_for_files(file_id=file_id, actor=actor, limit=file_metadata.total_chunks)
365
396
  logger.info(
@@ -375,13 +406,6 @@ async def get_file_metadata(
375
406
  file_id=file_metadata.id, actor=actor, chunks_embedded=len(ids), processing_status=file_status
376
407
  )
377
408
 
378
- if not file_metadata:
379
- raise HTTPException(status_code=404, detail=f"File with id={file_id} not found.")
380
-
381
- # Verify the file belongs to the specified source
382
- if file_metadata.source_id != source_id:
383
- raise HTTPException(status_code=404, detail=f"File with id={file_id} not found in source {source_id}.")
384
-
385
409
  return file_metadata
386
410
 
387
411
 
@@ -4,7 +4,7 @@ from datetime import datetime, timezone
4
4
  from typing import Dict, List, Optional, Set, Tuple
5
5
 
6
6
  import sqlalchemy as sa
7
- from sqlalchemy import delete, func, insert, literal, or_, select
7
+ from sqlalchemy import delete, func, insert, literal, or_, select, tuple_
8
8
  from sqlalchemy.dialects.postgresql import insert as pg_insert
9
9
 
10
10
  from letta.constants import (
@@ -224,13 +224,44 @@ class AgentManager:
224
224
  @staticmethod
225
225
  async def _replace_pivot_rows_async(session, table, agent_id: str, rows: list[dict]):
226
226
  """
227
- Replace all pivot rows for an agent with *exactly* the provided list.
228
- Uses two bulk statements (DELETE + INSERT ... ON CONFLICT DO NOTHING).
227
+ Replace all pivot rows for an agent atomically using MERGE pattern.
229
228
  """
230
- # delete all existing rows for this agent
231
- await session.execute(delete(table).where(table.c.agent_id == agent_id))
232
- if rows:
233
- await AgentManager._bulk_insert_pivot_async(session, table, rows)
229
+ dialect = session.bind.dialect.name
230
+
231
+ if dialect == "postgresql":
232
+ if rows:
233
+ # separate upsert and delete operations
234
+ stmt = pg_insert(table).values(rows)
235
+ stmt = stmt.on_conflict_do_nothing()
236
+ await session.execute(stmt)
237
+
238
+ # delete rows not in new set
239
+ pk_names = [c.name for c in table.primary_key.columns]
240
+ new_keys = [tuple(r[c] for c in pk_names) for r in rows]
241
+ await session.execute(
242
+ delete(table).where(table.c.agent_id == agent_id, ~tuple_(*[table.c[c] for c in pk_names]).in_(new_keys))
243
+ )
244
+ else:
245
+ # if no rows to insert, just delete all
246
+ await session.execute(delete(table).where(table.c.agent_id == agent_id))
247
+
248
+ elif dialect == "sqlite":
249
+ if rows:
250
+ stmt = sa.insert(table).values(rows).prefix_with("OR REPLACE")
251
+ await session.execute(stmt)
252
+
253
+ if rows:
254
+ primary_key_cols = [table.c[c.name] for c in table.primary_key.columns]
255
+ new_keys = [tuple(r[c.name] for c in table.primary_key.columns) for r in rows]
256
+ await session.execute(delete(table).where(table.c.agent_id == agent_id, ~tuple_(*primary_key_cols).in_(new_keys)))
257
+ else:
258
+ await session.execute(delete(table).where(table.c.agent_id == agent_id))
259
+
260
+ else:
261
+ # fallback: use original DELETE + INSERT pattern
262
+ await session.execute(delete(table).where(table.c.agent_id == agent_id))
263
+ if rows:
264
+ await AgentManager._bulk_insert_pivot_async(session, table, rows)
234
265
 
235
266
  # ======================================================================================================================
236
267
  # Basic CRUD operations
@@ -22,6 +22,15 @@ from letta.server.db import db_registry
22
22
  from letta.utils import enforce_types
23
23
 
24
24
 
25
+ class DuplicateFileError(Exception):
26
+ """Raised when a duplicate file is encountered and error handling is specified"""
27
+
28
+ def __init__(self, filename: str, source_name: str):
29
+ self.filename = filename
30
+ self.source_name = source_name
31
+ super().__init__(f"File '{filename}' already exists in source '{source_name}'")
32
+
33
+
25
34
  class FileManager:
26
35
  """Manager class to handle business logic related to files."""
27
36
 
@@ -237,16 +246,16 @@ class FileManager:
237
246
  @trace_method
238
247
  async def generate_unique_filename(self, original_filename: str, source: PydanticSource, organization_id: str) -> str:
239
248
  """
240
- Generate a unique filename by checking for duplicates and adding a numeric suffix if needed.
241
- Similar to how filesystems handle duplicates (e.g., file.txt, file (1).txt, file (2).txt).
249
+ Generate a unique filename by adding a numeric suffix if duplicates exist.
250
+ Always returns a unique filename - does not handle duplicate policies.
242
251
 
243
252
  Parameters:
244
253
  original_filename (str): The original filename as uploaded.
245
- source_id (str): Source ID to check for duplicates within.
254
+ source (PydanticSource): Source to check for duplicates within.
246
255
  organization_id (str): Organization ID to check for duplicates within.
247
256
 
248
257
  Returns:
249
- str: A unique filename with numeric suffix if needed.
258
+ str: A unique filename with source.name prefix and numeric suffix if needed.
250
259
  """
251
260
  base, ext = os.path.splitext(original_filename)
252
261
 
@@ -271,9 +280,44 @@ class FileManager:
271
280
  # No duplicates, return original filename with source.name
272
281
  return f"{source.name}/{original_filename}"
273
282
  else:
274
- # Add numeric suffix
283
+ # Add numeric suffix to make unique
275
284
  return f"{source.name}/{base}_({count}){ext}"
276
285
 
286
+ @enforce_types
287
+ @trace_method
288
+ async def get_file_by_original_name_and_source(
289
+ self, original_filename: str, source_id: str, actor: PydanticUser
290
+ ) -> Optional[PydanticFileMetadata]:
291
+ """
292
+ Get a file by its original filename and source ID.
293
+
294
+ Parameters:
295
+ original_filename (str): The original filename to search for.
296
+ source_id (str): The source ID to search within.
297
+ actor (PydanticUser): The actor performing the request.
298
+
299
+ Returns:
300
+ Optional[PydanticFileMetadata]: The file metadata if found, None otherwise.
301
+ """
302
+ async with db_registry.async_session() as session:
303
+ query = (
304
+ select(FileMetadataModel)
305
+ .where(
306
+ FileMetadataModel.original_file_name == original_filename,
307
+ FileMetadataModel.source_id == source_id,
308
+ FileMetadataModel.organization_id == actor.organization_id,
309
+ FileMetadataModel.is_deleted == False,
310
+ )
311
+ .limit(1)
312
+ )
313
+
314
+ result = await session.execute(query)
315
+ file_orm = result.scalar_one_or_none()
316
+
317
+ if file_orm:
318
+ return await file_orm.to_pydantic_async()
319
+ return None
320
+
277
321
  @enforce_types
278
322
  @trace_method
279
323
  async def get_organization_sources_metadata(self, actor: PydanticUser) -> OrganizationSourcesStats:
@@ -1,6 +1,7 @@
1
1
  from typing import List
2
2
 
3
3
  from letta.log import get_logger
4
+ from letta.otel.context import get_ctx_attributes
4
5
  from letta.otel.tracing import log_event, trace_method
5
6
  from letta.schemas.agent import AgentState
6
7
  from letta.schemas.enums import FileProcessingStatus
@@ -122,6 +123,10 @@ class FileProcessor:
122
123
  if isinstance(content, str):
123
124
  content = content.encode("utf-8")
124
125
 
126
+ from letta.otel.metric_registry import MetricRegistry
127
+
128
+ MetricRegistry().file_process_bytes_histogram.record(len(content), attributes=get_ctx_attributes())
129
+
125
130
  if len(content) > self.max_file_size:
126
131
  log_event(
127
132
  "file_processor.size_limit_exceeded",
@@ -1,7 +1,7 @@
1
1
  import ast
2
2
  import base64
3
3
  import pickle
4
- from typing import Any
4
+ from typing import Any, Union
5
5
 
6
6
  from letta.constants import REQUEST_HEARTBEAT_DESCRIPTION, REQUEST_HEARTBEAT_PARAM, SEND_MESSAGE_TOOL_NAME
7
7
  from letta.schemas.agent import AgentState
@@ -9,7 +9,7 @@ from letta.schemas.response_format import ResponseFormatType, ResponseFormatUnio
9
9
  from letta.types import JsonDict, JsonValue
10
10
 
11
11
 
12
- def parse_stdout_best_effort(text: str | bytes) -> tuple[Any, AgentState | None]:
12
+ def parse_stdout_best_effort(text: Union[str, bytes]) -> tuple[Any, AgentState | None]:
13
13
  """
14
14
  Decode and unpickle the result from the function execution if possible.
15
15
  Returns (function_return_value, agent_state).
@@ -2,6 +2,7 @@ from functools import partial, reduce
2
2
  from operator import add
3
3
  from typing import List, Literal, Optional, Union
4
4
 
5
+ from httpx import AsyncClient, post
5
6
  from sqlalchemy import select
6
7
  from sqlalchemy.orm import Session
7
8
 
@@ -95,6 +96,8 @@ class JobManager:
95
96
  @trace_method
96
97
  async def update_job_by_id_async(self, job_id: str, job_update: JobUpdate, actor: PydanticUser) -> PydanticJob:
97
98
  """Update a job by its ID with the given JobUpdate object asynchronously."""
99
+ callback_func = None
100
+
98
101
  async with db_registry.async_session() as session:
99
102
  # Fetch the job by ID
100
103
  job = await self._verify_job_access_async(session=session, job_id=job_id, actor=actor, access=["write"])
@@ -114,11 +117,23 @@ class JobManager:
114
117
  logger.info(f"Current job completed at: {job.completed_at}")
115
118
  job.completed_at = get_utc_time().replace(tzinfo=None)
116
119
  if job.callback_url:
117
- await self._dispatch_callback_async(job)
120
+ callback_func = self._dispatch_callback_async(
121
+ callback_url=job.callback_url,
122
+ payload={
123
+ "job_id": job.id,
124
+ "status": job.status,
125
+ "completed_at": job.completed_at.isoformat() if job.completed_at else None,
126
+ "metadata": job.metadata_,
127
+ },
128
+ actor=actor,
129
+ )
118
130
 
119
131
  # Save the updated job to the database
120
132
  await job.update_async(db_session=session, actor=actor)
121
133
 
134
+ if callback_func:
135
+ return await callback_func
136
+
122
137
  return job.to_pydantic()
123
138
 
124
139
  @enforce_types
@@ -683,10 +698,8 @@ class JobManager:
683
698
  "metadata": job.metadata_,
684
699
  }
685
700
  try:
686
- import httpx
687
-
688
701
  log_event("POST callback dispatched", payload)
689
- resp = httpx.post(job.callback_url, json=payload, timeout=5.0)
702
+ resp = post(job.callback_url, json=payload, timeout=5.0)
690
703
  log_event("POST callback finished")
691
704
  job.callback_sent_at = get_utc_time().replace(tzinfo=None)
692
705
  job.callback_status_code = resp.status_code
@@ -700,31 +713,33 @@ class JobManager:
700
713
  # Continue silently - callback failures should not affect job completion
701
714
 
702
715
  @trace_method
703
- async def _dispatch_callback_async(self, job: JobModel) -> None:
716
+ async def _dispatch_callback_async(self, callback_url: str, payload: dict, actor: PydanticUser) -> PydanticJob:
704
717
  """
705
718
  POST a standard JSON payload to job.callback_url and record timestamp + HTTP status asynchronously.
706
719
  """
707
- payload = {
708
- "job_id": job.id,
709
- "status": job.status,
710
- "completed_at": job.completed_at.isoformat() if job.completed_at else None,
711
- "metadata": job.metadata_,
712
- }
720
+ job_id = payload["job_id"]
721
+ callback_sent_at, callback_status_code, callback_error = None, None, None
713
722
 
714
723
  try:
715
- import httpx
716
-
717
- async with httpx.AsyncClient() as client:
724
+ async with AsyncClient() as client:
718
725
  log_event("POST callback dispatched", payload)
719
- resp = await client.post(job.callback_url, json=payload, timeout=5.0)
726
+ resp = await client.post(callback_url, json=payload, timeout=5.0)
720
727
  log_event("POST callback finished")
721
728
  # Ensure timestamp is timezone-naive for DB compatibility
722
- job.callback_sent_at = get_utc_time().replace(tzinfo=None)
723
- job.callback_status_code = resp.status_code
729
+ callback_sent_at = get_utc_time().replace(tzinfo=None)
730
+ callback_status_code = resp.status_code
724
731
  except Exception as e:
725
- error_message = f"Failed to dispatch callback for job {job.id} to {job.callback_url}: {e!s}"
732
+ error_message = f"Failed to dispatch callback for job {job_id} to {callback_url}: {e!s}"
726
733
  logger.error(error_message)
727
734
  # Record the failed attempt
728
- job.callback_sent_at = get_utc_time().replace(tzinfo=None)
729
- job.callback_error = error_message
735
+ callback_sent_at = get_utc_time().replace(tzinfo=None)
736
+ callback_error = error_message
730
737
  # Continue silently - callback failures should not affect job completion
738
+
739
+ async with db_registry.async_session() as session:
740
+ job = await JobModel.read_async(db_session=session, identifier=job_id, actor=actor, access_type=AccessType.USER)
741
+ job.callback_sent_at = callback_sent_at
742
+ job.callback_status_code = callback_status_code
743
+ job.callback_error = callback_error
744
+ await job.update_async(db_session=session, actor=actor)
745
+ return job.to_pydantic()
@@ -8,6 +8,7 @@ from typing import Any, Dict, Optional
8
8
 
9
9
  from pydantic.config import JsonDict
10
10
 
11
+ from letta.log import get_logger
11
12
  from letta.otel.tracing import log_event, trace_method
12
13
  from letta.schemas.agent import AgentState
13
14
  from letta.schemas.sandbox_config import SandboxConfig, SandboxType
@@ -23,6 +24,8 @@ from letta.services.tool_sandbox.base import AsyncToolSandboxBase
23
24
  from letta.settings import tool_settings
24
25
  from letta.utils import get_friendly_error_msg, parse_stderr_error_msg
25
26
 
27
+ logger = get_logger(__name__)
28
+
26
29
 
27
30
  class AsyncToolSandboxLocal(AsyncToolSandboxBase):
28
31
  METADATA_CONFIG_STATE_KEY = "config_state"
@@ -240,9 +243,9 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
240
243
  if isinstance(e, TimeoutError):
241
244
  raise e
242
245
 
243
- print(f"Subprocess execution for tool {self.tool_name} encountered an error: {e}")
244
- print(e.__class__.__name__)
245
- print(e.__traceback__)
246
+ logger.error(f"Subprocess execution for tool {self.tool_name} encountered an error: {e}")
247
+ logger.error(e.__class__.__name__)
248
+ logger.error(e.__traceback__)
246
249
  func_return = get_friendly_error_msg(
247
250
  function_name=self.tool_name,
248
251
  exception_name=type(e).__name__,
@@ -24,8 +24,32 @@ agent_state = {{ 'pickle.loads(' ~ agent_state_pickle ~ ')' if agent_state_pickl
24
24
  {{ tool_source_code }}
25
25
 
26
26
  {# Invoke the function and store the result in a global variable #}
27
+ _function_result = {{ invoke_function_call }}
28
+
29
+ {# Use a temporary Pydantic wrapper to recursively serialize any nested Pydantic objects #}
30
+ try:
31
+ from pydantic import BaseModel
32
+ from typing import Any
33
+
34
+ class _TempResultWrapper(BaseModel):
35
+ result: Any
36
+
37
+ class Config:
38
+ arbitrary_types_allowed = True
39
+
40
+ _wrapped = _TempResultWrapper(result=_function_result)
41
+ _serialized_result = _wrapped.model_dump()['result']
42
+ except ImportError:
43
+ # Pydantic not available in sandbox, fall back to string conversion
44
+ print("Pydantic not available in sandbox environment, falling back to string conversion")
45
+ _serialized_result = str(_function_result)
46
+ except Exception as e:
47
+ # If wrapping fails, print the error and stringify the result
48
+ print(f"Failed to serialize result with Pydantic wrapper: {e}")
49
+ _serialized_result = str(_function_result)
50
+
27
51
  {{ local_sandbox_result_var_name }} = {
28
- "results": {{ invoke_function_call }},
52
+ "results": _serialized_result,
29
53
  "agent_state": agent_state
30
54
  }
31
55
 
@@ -26,9 +26,32 @@ agent_state = {{ 'pickle.loads(' ~ agent_state_pickle ~ ')' if agent_state_pickl
26
26
 
27
27
  {# Async wrapper to handle the function call and store the result #}
28
28
  async def _async_wrapper():
29
- result = await {{ invoke_function_call }}
29
+ _function_result = await {{ invoke_function_call }}
30
+
31
+ {# Use a temporary Pydantic wrapper to recursively serialize any nested Pydantic objects #}
32
+ try:
33
+ from pydantic import BaseModel
34
+ from typing import Any
35
+
36
+ class _TempResultWrapper(BaseModel):
37
+ result: Any
38
+
39
+ class Config:
40
+ arbitrary_types_allowed = True
41
+
42
+ _wrapped = _TempResultWrapper(result=_function_result)
43
+ _serialized_result = _wrapped.model_dump()['result']
44
+ except ImportError:
45
+ # Pydantic not available in sandbox, fall back to string conversion
46
+ print("Pydantic not available in sandbox environment, falling back to string conversion")
47
+ _serialized_result = str(_function_result)
48
+ except Exception as e:
49
+ # If wrapping fails, print the error and stringify the result
50
+ print(f"Failed to serialize result with Pydantic wrapper: {e}")
51
+ _serialized_result = str(_function_result)
52
+
30
53
  return {
31
- "results": result,
54
+ "results": _serialized_result,
32
55
  "agent_state": agent_state
33
56
  }
34
57
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.8.12.dev20250710104356
3
+ Version: 0.8.13.dev20250710230421
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -19,13 +19,13 @@ Provides-Extra: dev
19
19
  Provides-Extra: experimental
20
20
  Provides-Extra: external-tools
21
21
  Provides-Extra: google
22
+ Provides-Extra: pinecone
22
23
  Provides-Extra: postgres
23
24
  Provides-Extra: redis
24
25
  Provides-Extra: server
25
26
  Provides-Extra: tests
26
27
  Requires-Dist: aioboto3 (>=14.3.0,<15.0.0) ; extra == "bedrock"
27
28
  Requires-Dist: aiomultiprocess (>=0.9.1,<0.10.0)
28
- Requires-Dist: aiosqlite (>=0.21.0,<0.22.0)
29
29
  Requires-Dist: alembic (>=1.13.3,<2.0.0)
30
30
  Requires-Dist: anthropic (>=0.49.0,<0.50.0)
31
31
  Requires-Dist: apscheduler (>=3.11.0,<4.0.0)
@@ -56,7 +56,7 @@ Requires-Dist: isort (>=5.13.2,<6.0.0) ; extra == "dev" or extra == "all"
56
56
  Requires-Dist: jinja2 (>=3.1.5,<4.0.0)
57
57
  Requires-Dist: langchain (>=0.3.7,<0.4.0) ; extra == "external-tools" or extra == "desktop" or extra == "all"
58
58
  Requires-Dist: langchain-community (>=0.3.7,<0.4.0) ; extra == "external-tools" or extra == "desktop" or extra == "all"
59
- Requires-Dist: letta_client (>=0.1.183,<0.2.0)
59
+ Requires-Dist: letta_client (>=0.1.197,<0.2.0)
60
60
  Requires-Dist: llama-index (>=0.12.2,<0.13.0)
61
61
  Requires-Dist: llama-index-embeddings-openai (>=0.3.1,<0.4.0)
62
62
  Requires-Dist: locust (>=2.31.5,<3.0.0) ; extra == "dev" or extra == "desktop" or extra == "all"
@@ -75,7 +75,7 @@ Requires-Dist: pathvalidate (>=3.2.1,<4.0.0)
75
75
  Requires-Dist: pexpect (>=4.9.0,<5.0.0) ; extra == "dev" or extra == "all"
76
76
  Requires-Dist: pg8000 (>=1.30.3,<2.0.0) ; extra == "postgres" or extra == "desktop" or extra == "all"
77
77
  Requires-Dist: pgvector (>=0.2.3,<0.3.0) ; extra == "postgres" or extra == "desktop" or extra == "all"
78
- Requires-Dist: pinecone[asyncio] (>=7.3.0,<8.0.0)
78
+ Requires-Dist: pinecone[asyncio] (>=7.3.0,<8.0.0) ; extra == "pinecone" or extra == "all"
79
79
  Requires-Dist: pre-commit (>=3.5.0,<4.0.0) ; extra == "dev" or extra == "all"
80
80
  Requires-Dist: prettytable (>=3.9.0,<4.0.0)
81
81
  Requires-Dist: psycopg2 (>=2.9.10,<3.0.0) ; extra == "postgres" or extra == "desktop" or extra == "all"
@@ -1,12 +1,12 @@
1
- letta/__init__.py,sha256=O18yG1Lw_7vBqjWzHjZ1QG1xU3TTjRvuxL3iG4G-GxM,1222
1
+ letta/__init__.py,sha256=P44L6tH5GEgZx3hAnIaAoBytfTCJGpqFrCashKMQmZ8,1222
2
2
  letta/agent.py,sha256=esW2W5hBzO7aPr7ghEDb_fLnUxgYqBYDq_VWtQDrB0c,89153
3
3
  letta/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- letta/agents/base_agent.py,sha256=Z1jgCTMFRTLnaLRcfdo8TmsP8tuCYqNcOM8ov9kviMA,6869
4
+ letta/agents/base_agent.py,sha256=35JcOjA6FUuQRIf4CiPnJSwgniCaDrjM-fVlWmIk68E,7766
5
5
  letta/agents/ephemeral_agent.py,sha256=el-SUF_16vv_7OouIR-6z0pAE9Yc0PLibygvfCKwqfo,2736
6
6
  letta/agents/ephemeral_summary_agent.py,sha256=tOldA_daa_PduTJ2RA7fAo9Rv6sUb-C_9dJaD6iujS4,4454
7
7
  letta/agents/exceptions.py,sha256=BQY4D4w32OYHM63CM19ko7dPwZiAzUs3NbKvzmCTcJg,318
8
8
  letta/agents/helpers.py,sha256=reSrQCEgIz8wE2FKIr-Gm6jsJeihS607BPqFVs_jaK0,10025
9
- letta/agents/letta_agent.py,sha256=71KuwatbeW3d8KY2aOOARKuelKBjwB3jMm_mfB-5EBM,56825
9
+ letta/agents/letta_agent.py,sha256=5xpKc9qfbjqdkbG5kzjYJ7MdfRAOLeYfHGj0jbmWZGA,57723
10
10
  letta/agents/letta_agent_batch.py,sha256=cl9_nZYflIZWR23D_x_fUpmMHYITDWu0FUfPW1ivDuw,28031
11
11
  letta/agents/voice_agent.py,sha256=73zvqCivmKIW-5yQcRziuUqA3EDBGcfgHu3pKGzx0lc,23465
12
12
  letta/agents/voice_sleeptime_agent.py,sha256=ByRRoH115SaDr0ORMimQdYBWdz6c2RlFWQjVrFiGniQ,8743
@@ -25,7 +25,7 @@ letta/data_sources/redis_client.py,sha256=6hQLdfsJ3ejNvLuA9Md7YXisgjqBiaw-Yl89xf
25
25
  letta/embeddings.py,sha256=KvC2bl5tARpVY9xcFmw4Cwu1vN0DoH266v2mSUZqwkY,10528
26
26
  letta/errors.py,sha256=Ef10b2cHXJJ04rXBmOx4Yrr4qwqCh8bAImgoUExahEo,7063
27
27
  letta/functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
- letta/functions/ast_parsers.py,sha256=lEmjIzGrTjlDJFAX-gjg0dU2CIBu6Vo4msSq-fhhup4,4969
28
+ letta/functions/ast_parsers.py,sha256=0dXAN4qx3pWL_Y0aoEkaBpMKwI-kpoLEJftjW3v2I4E,5031
29
29
  letta/functions/async_composio_toolset.py,sha256=IuhZTVghPDXRsehOOZsEEiJGYyjWjDTQc2xrjTg0yBo,4786
30
30
  letta/functions/composio_helpers.py,sha256=mpybCYcB93HWoKrmQIqcuRQG9IH2lHWhsPQx2i8XP_8,3593
31
31
  letta/functions/function_sets/base.py,sha256=FS-LRbvzO-duSUy0yLP_fBk2WSs4NAaaTAUuhl2ZS-I,16154
@@ -57,9 +57,9 @@ letta/helpers/composio_helpers.py,sha256=MwfmLt7tgjvxAXLHpx9pa5QolxcqoCbofb-30-D
57
57
  letta/helpers/converters.py,sha256=_-6Ke5ZUtaKYmh8SncGj1ejTG3GyKhZ4ByVCrlcHsOI,15026
58
58
  letta/helpers/datetime_helpers.py,sha256=8AwZInX-NX_XQiqej2arozYqfC2ysnWpCJ9ETv8RdL0,4381
59
59
  letta/helpers/decorators.py,sha256=jyywXMxO5XPDSe93ybVXIOjTWkGX514S9BMcy_gP0j8,5891
60
- letta/helpers/json_helpers.py,sha256=PWZ5HhSqGXO4e563dM_8M72q7ScirjXQ4Rv1ckohaV8,396
60
+ letta/helpers/json_helpers.py,sha256=9W_1dhNnXWdQLiZD3tO9047cB2ATrCAYVHnYGvT8Ke0,470
61
61
  letta/helpers/message_helper.py,sha256=Xzf_VCMAXT0Ys8LVUh1ySVtgJwabSQYksOdPr7P4EJU,3549
62
- letta/helpers/pinecone_utils.py,sha256=LMfrynzenK_IuVGEsZmULz4AAtZ58Wof02ENj-NwtLQ,6511
62
+ letta/helpers/pinecone_utils.py,sha256=cF5VeEmENCepbaNT7_cEpiHU3U0x-32g0AG1d4OF73A,7461
63
63
  letta/helpers/singleton.py,sha256=Y4dG_ZBCcrogvl9iZ69bSLq-QltrdP8wHqKkhef8OBI,370
64
64
  letta/helpers/tool_execution_helper.py,sha256=BgBgVLZzbc-JTdOGwyU9miV_-zM3A30jkMpwH1otxaU,7599
65
65
  letta/helpers/tool_rule_solver.py,sha256=avRMQzqxE2r6gRvw7oTImYmkSvuoMHlADPND0__feBw,11620
@@ -75,7 +75,7 @@ letta/interfaces/utils.py,sha256=c6jvO0dBYHh8DQnlN-B0qeNC64d3CSunhfqlFA4pJTY,278
75
75
  letta/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
76
76
  letta/jobs/helpers.py,sha256=kO4aj954xsQ1RAmkjY6LQQ7JEIGuhaxB1e9pzrYKHAY,914
77
77
  letta/jobs/llm_batch_job_polling.py,sha256=HUCTa1lTOiLAB_8m95RUfeNJa4lxlF8paGdCV1NqOeA,10413
78
- letta/jobs/scheduler.py,sha256=_KkOp0uL8C7WpZEwCvHYtvtEjU6RBFXzcS8dnv7fhUM,8947
78
+ letta/jobs/scheduler.py,sha256=Ub5VTCA8P5C9Y-0mPK2YIPJSEzKbSd2l5Sp0sOWctD8,8697
79
79
  letta/jobs/types.py,sha256=K8GKEnqEgAT6Kq4F2hUrBC4ZAFM9OkfOjVMStzxKuXQ,742
80
80
  letta/llm_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
81
  letta/llm_api/anthropic.py,sha256=tbMy4483TySrEmbXD3juM6TpPRrV9_M3Fgp59sDBcqE,47935
@@ -180,7 +180,7 @@ letta/orm/user.py,sha256=rK5N5ViDxmesZMqVVHB7FcQNpcSoM-hB42MyI6q3MnI,1004
180
180
  letta/otel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
181
181
  letta/otel/context.py,sha256=GUTxFpWMdCmib1Qy80TpAs0Qb5fNH_m5sCYZ1LN3HmM,789
182
182
  letta/otel/events.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
183
- letta/otel/metric_registry.py,sha256=QWgOzHt-UoeQetYB8o0aXzrIspyNnsDvBFx8jNIw8LI,5493
183
+ letta/otel/metric_registry.py,sha256=DDsN0edcbs-BgVo_FPvcx8Sdpz10N0u6YaIFuHI2PpU,6039
184
184
  letta/otel/metrics.py,sha256=GlIt8XLkP-igTXptah8UBonpHF7nEtSqTONSkAEERAs,4740
185
185
  letta/otel/resource.py,sha256=3T8dcmHMxsgSi-kKIGFW1T56EkZbdbbWPkICH0kba7I,719
186
186
  letta/otel/tracing.py,sha256=EoSA5WxWpcHrDZhM1h7mplu3RyHuxfbb4Atw5LzR7yI,8840
@@ -232,7 +232,7 @@ letta/schemas/agent.py,sha256=vu3-sPanrFmXueh2FgS2h-BlllXr4tB-KNzrucZLdf4,25231
232
232
  letta/schemas/block.py,sha256=awxCQKxmv4I4k9Au5h-a2RCeSVF54EfWyBQPtHRwuNQ,5585
233
233
  letta/schemas/embedding_config.py,sha256=huMcqUbSUDwAbd7IkjzxDSmOxGCJG_0eMqPqLj6B8JE,3886
234
234
  letta/schemas/embedding_config_overrides.py,sha256=lkTa4y-EQ2RnaEKtKDM0sEAk7EwNa67REw8DGNNtGQY,84
235
- letta/schemas/enums.py,sha256=Vx6qTUtOTphAkqJ932yXxsWELcrmV2XH2avvyOhQZ4E,3295
235
+ letta/schemas/enums.py,sha256=QU6WneYqWtPqL9Z8o8wEw55uheS8df8dRPc7W7-naXI,3606
236
236
  letta/schemas/environment_variables.py,sha256=VRtzOjdeQdHcSHXisk7oJUQlheruxhSWNS0xqlfGzbs,2429
237
237
  letta/schemas/file.py,sha256=lJFQfUCf6iqtbyqxynL98fupDobnRPsoc2GrhgYJaig,4595
238
238
  letta/schemas/group.py,sha256=0qFbCvE5gbdSAk1oXXT8xWQ02R4mS_jttJm0ASh8eCQ,6415
@@ -267,7 +267,7 @@ letta/schemas/sandbox_config.py,sha256=thI4p7R4nnW1W-F_PBNkpmyHXpSH_lorlQX8YxDXS
267
267
  letta/schemas/source.py,sha256=ZDeTjkNp1rKamG7xZzoUHeCptjpW9WNLzAcJ9QQRxlM,3444
268
268
  letta/schemas/source_metadata.py,sha256=_dGjuXhGcVMlc53ja9yuk16Uj64ggEzilRDgmkqYfNs,1334
269
269
  letta/schemas/step.py,sha256=QudHSpLMcNVC-oI26Uy48lsp3FOrcd3JYAp5ubDEIHY,2651
270
- letta/schemas/tool.py,sha256=C2HdnmwrjAWoBjB8H2lpO8oIys3HlkRrRWtR8uMcUfc,14375
270
+ letta/schemas/tool.py,sha256=DO0kPVQWR5_A6YHQ4VJbsyWwwjEgvYyT4yBHaZX4uxE,14397
271
271
  letta/schemas/tool_execution_result.py,sha256=4P77llsUsZBnRd0PtPiC4VzGjx7i_-fUNgXQfCpMS9U,896
272
272
  letta/schemas/tool_rule.py,sha256=dJ-qNDy0LneTt_DhKXsRyC9NAJxZ_aWY3IRpzGuH_sY,10910
273
273
  letta/schemas/usage.py,sha256=9SSTH5kUliwiVF14b-yKbDcmxQBOLg4YH5xhXDbW9UU,1281
@@ -287,7 +287,7 @@ letta/server/constants.py,sha256=yAdGbLkzlOU_dLTx0lKDmAnj0ZgRXCEaIcPJWO69eaE,92
287
287
  letta/server/db.py,sha256=q5wCpTxpcbutk2HuF2ZpyhlPGCdUuLpRFZ0QE5VkZ38,11751
288
288
  letta/server/generate_openapi_schema.sh,sha256=0OtBhkC1g6CobVmNEd_m2B6sTdppjbJLXaM95icejvE,371
289
289
  letta/server/rest_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
290
- letta/server/rest_api/app.py,sha256=JEJlOKBPKfnkQeM6BAd5Olf4EM7O2TKD4_MHObFuci8,18084
290
+ letta/server/rest_api/app.py,sha256=zzK9W4ZcosJvNtWjvWr8aHPKA-v3u1G2wQlEnX_sdWQ,18165
291
291
  letta/server/rest_api/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
292
292
  letta/server/rest_api/auth/index.py,sha256=fQBGyVylGSRfEMLQ17cZzrHd5Y1xiVylvPqH5Rl-lXQ,1378
293
293
  letta/server/rest_api/auth_token.py,sha256=725EFEIiNj4dh70hrSd94UysmFD8vcJLrTRfNHkzxDo,774
@@ -298,7 +298,7 @@ letta/server/rest_api/routers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
298
298
  letta/server/rest_api/routers/openai/chat_completions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
299
299
  letta/server/rest_api/routers/openai/chat_completions/chat_completions.py,sha256=QBWab1fn2LXVDMtc6li3gOzmrNzDiUw5WUJsMeeMZII,5076
300
300
  letta/server/rest_api/routers/v1/__init__.py,sha256=JfSSttkEWu0W18NVVDxl8AGnd8Qhj0BXJNxntOB7070,1768
301
- letta/server/rest_api/routers/v1/agents.py,sha256=i0zS-tEZlMf2gfVy-NOq-4mgicJHQBpqRJTLsRUU6L4,52709
301
+ letta/server/rest_api/routers/v1/agents.py,sha256=KuQB05DedF7wg72PD4dKz-W06QAjVJutUX6FB0dW9HM,55520
302
302
  letta/server/rest_api/routers/v1/blocks.py,sha256=MArBBnC7k9bc-Z1xMf46aH4ij6qhKAQAOoK9KjiXatU,5257
303
303
  letta/server/rest_api/routers/v1/embeddings.py,sha256=PRaQlrmEXPiIdWsTbadrFsv3Afyv5oEFUdhgHA8FTi8,989
304
304
  letta/server/rest_api/routers/v1/groups.py,sha256=kR_oAuwPd9q-DaeK4Q6Xqu1XlXTXkwEvf2hH7tOiVuw,10978
@@ -311,7 +311,7 @@ letta/server/rest_api/routers/v1/organizations.py,sha256=5NEjTOdGKWrfN584jfPpJhA
311
311
  letta/server/rest_api/routers/v1/providers.py,sha256=8SJ_RsSk7L4nh1f_uFE31JOxefmGhOfN-fMJ0Sp6SJo,4353
312
312
  letta/server/rest_api/routers/v1/runs.py,sha256=vieUp7uTvRTdAte0Nw1bqX2APMATZhKTr2R1HVNJT74,8879
313
313
  letta/server/rest_api/routers/v1/sandbox_configs.py,sha256=pKuy88GD3atrBkKa7VVfKTjg8Y07e1vVtdw4TtxkQBk,8910
314
- letta/server/rest_api/routers/v1/sources.py,sha256=RfCp7XCFpEIF6eTHfbKXvE7mAdHFUrpDU_AXbwvJj3o,19584
314
+ letta/server/rest_api/routers/v1/sources.py,sha256=ImunBc5PKO5sFtYLtJ1qOZ0kt5ZXckfr242qtWJrP30,20768
315
315
  letta/server/rest_api/routers/v1/steps.py,sha256=N863b0Oyzz64rKHqpyQnXEQBw0SCQ8kAxWaZ7huV1Rk,4925
316
316
  letta/server/rest_api/routers/v1/tags.py,sha256=ef94QitUSJ3NQVffWF1ZqANUZ2b2jRyGHp_I3UUjhno,912
317
317
  letta/server/rest_api/routers/v1/telemetry.py,sha256=z53BW3Pefi3eWy47FPJyGhFWbZicX9jPJUi5LC5c3sk,790
@@ -335,12 +335,12 @@ letta/server/ws_api/interface.py,sha256=TWl9vkcMCnLsUtgsuENZ-ku2oMDA-OUTzLh_yNRo
335
335
  letta/server/ws_api/protocol.py,sha256=5mDgpfNZn_kNwHnpt5Dsuw8gdNH298sgxTGed3etzYg,1836
336
336
  letta/server/ws_api/server.py,sha256=cBSzf-V4zT1bL_0i54OTI3cMXhTIIxqjSRF8pYjk7fg,5835
337
337
  letta/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
338
- letta/services/agent_manager.py,sha256=7xdWIchSUmlPCnNRsXjzd-9EKyk05t9WpGaerMwJLkg,121953
338
+ letta/services/agent_manager.py,sha256=yZ52BHJ0XyngIQUHkAQQ9cSh73c3WFwP4uezmyf7u8w,123384
339
339
  letta/services/block_manager.py,sha256=7EliXd0-LpSRwD2LbyjFpH5uiBdrtdZ6YLgb2_wKs3o,22905
340
340
  letta/services/context_window_calculator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
341
341
  letta/services/context_window_calculator/context_window_calculator.py,sha256=H0-Ello1DHV28MnzMseWrg--jarDc6YwCcgwPlWjtZk,6527
342
342
  letta/services/context_window_calculator/token_counter.py,sha256=Ai9-aPkNvhhMTj9zlvdiQAdVqroTzIyAn0TrHpHNQZY,2954
343
- letta/services/file_manager.py,sha256=Zm0wK4pkKz_rkPtANZCaxp2mXgi6rWg9moJ-nic6Bms,14277
343
+ letta/services/file_manager.py,sha256=yxBRGKBpCcV7FnllMymYGJheUsfttbI6s-zUr3G3CKo,15938
344
344
  letta/services/file_processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
345
345
  letta/services/file_processor/chunker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
346
346
  letta/services/file_processor/chunker/line_chunker.py,sha256=m02molsKXU_RUEebbHhMA6LNxg3JmFlCTOuX6kZcz3E,7024
@@ -349,7 +349,7 @@ letta/services/file_processor/embedder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JC
349
349
  letta/services/file_processor/embedder/base_embedder.py,sha256=cuHF2kAlBFL9Hr63Q5vJQYYrfyDNtm31vYvW5boUQ58,518
350
350
  letta/services/file_processor/embedder/openai_embedder.py,sha256=qafYDdbbBDCv5Mg-gdZozc5qFCdraaG8B8OCLd8_3vY,5715
351
351
  letta/services/file_processor/embedder/pinecone_embedder.py,sha256=O33NGvDyOG07Iz-tEhZDu_PKq7NfWIaBzjJuLi8hDiU,2841
352
- letta/services/file_processor/file_processor.py,sha256=E2lEoootYA8mAgNKGq2u2KCVqJqg8QCAO3-3pVaV60U,10047
352
+ letta/services/file_processor/file_processor.py,sha256=vjl_pcwqDIMOhDDGbJQuL4oUXRVHm_o_mgy7kXQUanQ,10277
353
353
  letta/services/file_processor/file_types.py,sha256=9k3Lt_bquQjJ7T6L12fPS9IS5wldhJ2puSkH6rhfCaE,13128
354
354
  letta/services/file_processor/parser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
355
355
  letta/services/file_processor/parser/base_parser.py,sha256=WfnXP6fL-xQz4eIHEWa6-ZNEAARbF_alowqH4BAUzJo,238
@@ -359,9 +359,9 @@ letta/services/files_agents_manager.py,sha256=4o9GtgTpmPvpppvgcTJi8MqPDsGwDnt6Cu
359
359
  letta/services/group_manager.py,sha256=X2gKKUGKTXGRMC8YjwmE6EOB1cVM4lo31eCnmog7dPQ,23368
360
360
  letta/services/helpers/agent_manager_helper.py,sha256=8of5EgKA9-Y-s0ovD0NwTnOiaW-z8kBlKleZqnrwy5g,44034
361
361
  letta/services/helpers/tool_execution_helper.py,sha256=45L7woJ98jK5MQAnhE_4NZdCeyOOzC4328FTQPM7iTA,9159
362
- letta/services/helpers/tool_parser_helper.py,sha256=EI5tcre-D5U3mEzIMhfkAGlUwYckW1JlCJ-iqwoTTrc,4336
362
+ letta/services/helpers/tool_parser_helper.py,sha256=_3oAVRVfRaicGpO6qRKAlCAujZw2uBGUclei4FUC6Do,4349
363
363
  letta/services/identity_manager.py,sha256=L8EYGYXA9sveLwPCTYZIdYZwOMnHex47TBiMYcco_y4,10575
364
- letta/services/job_manager.py,sha256=JVmMF9N7pQlbb4zcXMvpDLqTYEhxk8lT18c0U0uYVTo,27922
364
+ letta/services/job_manager.py,sha256=TQZLxPtoF9BzSpLaMgA1xD7PZ_qjONTjM48bID88W30,28803
365
365
  letta/services/llm_batch_manager.py,sha256=SJXzlhg9yu8EIal8WiZFRez-CMhqUVxEW42cQPxG1OQ,20824
366
366
  letta/services/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
367
367
  letta/services/mcp/base_client.py,sha256=8PtauZGtXU2pks5Beggz4BOoE_Bas3LDeBXv1xNLfJ0,4461
@@ -397,21 +397,21 @@ letta/services/tool_manager.py,sha256=6VI3mjXa6-vrPA74Aar_-ti9Rn3EEBq5TIiayyzoVA
397
397
  letta/services/tool_sandbox/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
398
398
  letta/services/tool_sandbox/base.py,sha256=Vt4CnxuY5otUD6Kv8PpJNrAtl9eI8tjfcwkOdtUFwKg,7917
399
399
  letta/services/tool_sandbox/e2b_sandbox.py,sha256=TrWWav56H1AsnaKgNZuq0RI-FeWHOZvOubtUywPH72s,11125
400
- letta/services/tool_sandbox/local_sandbox.py,sha256=mDMoBUl0asu03p7gp2BlzBCna68esMqIRwWF_dzteCA,11937
400
+ letta/services/tool_sandbox/local_sandbox.py,sha256=P6FH-2zNq3MQSuHZe790-G982c7rq2gwOUee-U5hlgk,12022
401
401
  letta/services/user_manager.py,sha256=Neik-mxXgf9jc9jBiiBIlK38UukJonUy9NRS2soFR98,10405
402
402
  letta/settings.py,sha256=lWaLL1t06s9pp4VK5ojQvTiI7D85VcJPjaVKak5LFxs,11304
403
403
  letta/streaming_interface.py,sha256=c-T7zoMTXGXFwDWJJXrv7UypeMPXwPOmNHeuuh0b9zk,16398
404
404
  letta/streaming_utils.py,sha256=jLqFTVhUL76FeOuYk8TaRQHmPTf3HSRc2EoJwxJNK6U,11946
405
405
  letta/system.py,sha256=-cfh9Xpl2Ef_H7N3oZQtNuJqb1EEskdDXNa-VwKsF0A,8977
406
406
  letta/templates/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
407
- letta/templates/sandbox_code_file.py.j2,sha256=zgzaboDZVtM15XkxILnhiKisF7DSUoI2YpfK2xF2WB0,1379
408
- letta/templates/sandbox_code_file_async.py.j2,sha256=hL6UWt4L16o79OPOBq1_Cw7gR5-gpaR_esbmU8bSp8w,1805
407
+ letta/templates/sandbox_code_file.py.j2,sha256=-tLXbRjWaXGhj82oVKbRRMOakTRcuy5bwBTa_ak1GjU,2260
408
+ letta/templates/sandbox_code_file_async.py.j2,sha256=TmhqHHm83jNVPm2azkhxCygEJzMEczzPlDjqrkP5HAs,2742
409
409
  letta/templates/summary_request_text.j2,sha256=ZttQwXonW2lk4pJLYzLK0pmo4EO4EtUUIXjgXKiizuc,842
410
410
  letta/templates/template_helper.py,sha256=uHWO1PukgMoIIvgqQdPyHq3o3CQ6mcjUjTGvx9VLGkk,409
411
411
  letta/types/__init__.py,sha256=hokKjCVFGEfR7SLMrtZsRsBfsC7yTIbgKPLdGg4K1eY,147
412
412
  letta/utils.py,sha256=4segcFYPNsPrzMpiouYoV6Qzj4TIHuqtCyzVwAMildM,36172
413
- letta_nightly-0.8.12.dev20250710104356.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
414
- letta_nightly-0.8.12.dev20250710104356.dist-info/METADATA,sha256=kL1xk74S1rsRef_Byt0LgX5jXjWf1JMHMGWvX2ShA7k,22892
415
- letta_nightly-0.8.12.dev20250710104356.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
416
- letta_nightly-0.8.12.dev20250710104356.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
417
- letta_nightly-0.8.12.dev20250710104356.dist-info/RECORD,,
413
+ letta_nightly-0.8.13.dev20250710230421.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
414
+ letta_nightly-0.8.13.dev20250710230421.dist-info/METADATA,sha256=X9rQMXAA8UdRgxmuf86otSrDA5afl8VJQNUvyfF_P-I,22913
415
+ letta_nightly-0.8.13.dev20250710230421.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
416
+ letta_nightly-0.8.13.dev20250710230421.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
417
+ letta_nightly-0.8.13.dev20250710230421.dist-info/RECORD,,