letta-nightly 0.12.0.dev20251009104148__py3-none-any.whl → 0.12.0.dev20251009203644__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -279,9 +279,11 @@ class AnthropicStreamingInterface:
279
279
  if prev_message_type and prev_message_type != "tool_call_message":
280
280
  message_index += 1
281
281
  if self.tool_call_name not in self.requires_approval_tools:
282
+ tool_call_delta = ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id)
282
283
  tool_call_msg = ToolCallMessage(
283
284
  id=self.letta_message_id,
284
- tool_call=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id),
285
+ tool_call=tool_call_delta,
286
+ tool_calls=tool_call_delta,
285
287
  date=datetime.now(timezone.utc).isoformat(),
286
288
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
287
289
  run_id=self.run_id,
@@ -423,15 +425,17 @@ class AnthropicStreamingInterface:
423
425
  tool_call_args += buffered_msg.tool_call.arguments if buffered_msg.tool_call.arguments else ""
424
426
  tool_call_args = tool_call_args.replace(f'"{INNER_THOUGHTS_KWARG}": "{current_inner_thoughts}"', "")
425
427
 
428
+ tool_call_delta = ToolCallDelta(
429
+ name=self.tool_call_name,
430
+ tool_call_id=self.tool_call_id,
431
+ arguments=tool_call_args,
432
+ )
426
433
  tool_call_msg = ToolCallMessage(
427
434
  id=self.tool_call_buffer[0].id,
428
435
  otid=Message.generate_otid_from_id(self.tool_call_buffer[0].id, message_index),
429
436
  date=self.tool_call_buffer[0].date,
430
- tool_call=ToolCallDelta(
431
- name=self.tool_call_name,
432
- tool_call_id=self.tool_call_id,
433
- arguments=tool_call_args,
434
- ),
437
+ tool_call=tool_call_delta,
438
+ tool_calls=tool_call_delta,
435
439
  run_id=self.run_id,
436
440
  )
437
441
  prev_message_type = tool_call_msg.message_type
@@ -467,9 +471,13 @@ class AnthropicStreamingInterface:
467
471
  run_id=self.run_id,
468
472
  )
469
473
  else:
474
+ tool_call_delta = ToolCallDelta(
475
+ name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json
476
+ )
470
477
  tool_call_msg = ToolCallMessage(
471
478
  id=self.letta_message_id,
472
- tool_call=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json),
479
+ tool_call=tool_call_delta,
480
+ tool_calls=tool_call_delta,
473
481
  date=datetime.now(timezone.utc).isoformat(),
474
482
  run_id=self.run_id,
475
483
  )
@@ -778,9 +786,11 @@ class SimpleAnthropicStreamingInterface:
778
786
  else:
779
787
  if prev_message_type and prev_message_type != "tool_call_message":
780
788
  message_index += 1
789
+ tool_call_delta = ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id)
781
790
  tool_call_msg = ToolCallMessage(
782
791
  id=self.letta_message_id,
783
- tool_call=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id),
792
+ tool_call=tool_call_delta,
793
+ tool_calls=tool_call_delta,
784
794
  date=datetime.now(timezone.utc).isoformat(),
785
795
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
786
796
  run_id=self.run_id,
@@ -860,9 +870,11 @@ class SimpleAnthropicStreamingInterface:
860
870
  else:
861
871
  if prev_message_type and prev_message_type != "tool_call_message":
862
872
  message_index += 1
873
+ tool_call_delta = ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json)
863
874
  tool_call_msg = ToolCallMessage(
864
875
  id=self.letta_message_id,
865
- tool_call=ToolCallDelta(name=self.tool_call_name, tool_call_id=self.tool_call_id, arguments=delta.partial_json),
876
+ tool_call=tool_call_delta,
877
+ tool_calls=tool_call_delta,
866
878
  date=datetime.now(timezone.utc).isoformat(),
867
879
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
868
880
  run_id=self.run_id,
@@ -273,15 +273,17 @@ class SimpleGeminiStreamingInterface:
273
273
  else:
274
274
  if prev_message_type and prev_message_type != "tool_call_message":
275
275
  message_index += 1
276
+ tool_call_delta = ToolCallDelta(
277
+ name=name,
278
+ arguments=arguments_str,
279
+ tool_call_id=call_id,
280
+ )
276
281
  yield ToolCallMessage(
277
282
  id=self.letta_message_id,
278
283
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
279
284
  date=datetime.now(timezone.utc),
280
- tool_call=ToolCallDelta(
281
- name=name,
282
- arguments=arguments_str,
283
- tool_call_id=call_id,
284
- ),
285
+ tool_call=tool_call_delta,
286
+ tool_calls=tool_call_delta,
285
287
  run_id=self.run_id,
286
288
  step_id=self.step_id,
287
289
  )
@@ -336,14 +336,16 @@ class OpenAIStreamingInterface:
336
336
  step_id=self.step_id,
337
337
  )
338
338
  else:
339
+ tool_call_delta = ToolCallDelta(
340
+ name=self.function_name_buffer,
341
+ arguments=None,
342
+ tool_call_id=self.function_id_buffer,
343
+ )
339
344
  tool_call_msg = ToolCallMessage(
340
345
  id=self.letta_message_id,
341
346
  date=datetime.now(timezone.utc),
342
- tool_call=ToolCallDelta(
343
- name=self.function_name_buffer,
344
- arguments=None,
345
- tool_call_id=self.function_id_buffer,
346
- ),
347
+ tool_call=tool_call_delta,
348
+ tool_calls=tool_call_delta,
347
349
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
348
350
  run_id=self.run_id,
349
351
  step_id=self.step_id,
@@ -423,14 +425,16 @@ class OpenAIStreamingInterface:
423
425
  step_id=self.step_id,
424
426
  )
425
427
  else:
428
+ tool_call_delta = ToolCallDelta(
429
+ name=self.function_name_buffer,
430
+ arguments=combined_chunk,
431
+ tool_call_id=self.function_id_buffer,
432
+ )
426
433
  tool_call_msg = ToolCallMessage(
427
434
  id=self.letta_message_id,
428
435
  date=datetime.now(timezone.utc),
429
- tool_call=ToolCallDelta(
430
- name=self.function_name_buffer,
431
- arguments=combined_chunk,
432
- tool_call_id=self.function_id_buffer,
433
- ),
436
+ tool_call=tool_call_delta,
437
+ tool_calls=tool_call_delta,
434
438
  # name=name,
435
439
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
436
440
  run_id=self.run_id,
@@ -460,14 +464,16 @@ class OpenAIStreamingInterface:
460
464
  step_id=self.step_id,
461
465
  )
462
466
  else:
467
+ tool_call_delta = ToolCallDelta(
468
+ name=None,
469
+ arguments=updates_main_json,
470
+ tool_call_id=self.function_id_buffer,
471
+ )
463
472
  tool_call_msg = ToolCallMessage(
464
473
  id=self.letta_message_id,
465
474
  date=datetime.now(timezone.utc),
466
- tool_call=ToolCallDelta(
467
- name=None,
468
- arguments=updates_main_json,
469
- tool_call_id=self.function_id_buffer,
470
- ),
475
+ tool_call=tool_call_delta,
476
+ tool_calls=tool_call_delta,
471
477
  # name=name,
472
478
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
473
479
  run_id=self.run_id,
@@ -717,14 +723,16 @@ class SimpleOpenAIStreamingInterface:
717
723
  step_id=self.step_id,
718
724
  )
719
725
  else:
726
+ tool_call_delta = ToolCallDelta(
727
+ name=tool_call.function.name,
728
+ arguments=tool_call.function.arguments,
729
+ tool_call_id=tool_call.id,
730
+ )
720
731
  tool_call_msg = ToolCallMessage(
721
732
  id=self.letta_message_id,
722
733
  date=datetime.now(timezone.utc),
723
- tool_call=ToolCallDelta(
724
- name=tool_call.function.name,
725
- arguments=tool_call.function.arguments,
726
- tool_call_id=tool_call.id,
727
- ),
734
+ tool_call=tool_call_delta,
735
+ tool_calls=tool_call_delta,
728
736
  # name=name,
729
737
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
730
738
  run_id=self.run_id,
@@ -945,15 +953,17 @@ class SimpleOpenAIResponsesStreamingInterface:
945
953
  else:
946
954
  if prev_message_type and prev_message_type != "tool_call_message":
947
955
  message_index += 1
956
+ tool_call_delta = ToolCallDelta(
957
+ name=name,
958
+ arguments=arguments if arguments != "" else None,
959
+ tool_call_id=call_id,
960
+ )
948
961
  yield ToolCallMessage(
949
962
  id=self.letta_message_id,
950
963
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
951
964
  date=datetime.now(timezone.utc),
952
- tool_call=ToolCallDelta(
953
- name=name,
954
- arguments=arguments if arguments != "" else None,
955
- tool_call_id=call_id,
956
- ),
965
+ tool_call=tool_call_delta,
966
+ tool_calls=tool_call_delta,
957
967
  run_id=self.run_id,
958
968
  step_id=self.step_id,
959
969
  )
@@ -1113,15 +1123,17 @@ class SimpleOpenAIResponsesStreamingInterface:
1113
1123
  else:
1114
1124
  if prev_message_type and prev_message_type != "tool_call_message":
1115
1125
  message_index += 1
1126
+ tool_call_delta = ToolCallDelta(
1127
+ name=None,
1128
+ arguments=delta,
1129
+ tool_call_id=None,
1130
+ )
1116
1131
  yield ToolCallMessage(
1117
1132
  id=self.letta_message_id,
1118
1133
  otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
1119
1134
  date=datetime.now(timezone.utc),
1120
- tool_call=ToolCallDelta(
1121
- name=None,
1122
- arguments=delta,
1123
- tool_call_id=None,
1124
- ),
1135
+ tool_call=tool_call_delta,
1136
+ tool_calls=tool_call_delta,
1125
1137
  run_id=self.run_id,
1126
1138
  step_id=self.step_id,
1127
1139
  )
@@ -56,6 +56,9 @@ class AnthropicClient(LLMClientBase):
56
56
  def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
57
57
  client = self._get_anthropic_client(llm_config, async_client=False)
58
58
  betas: list[str] = []
59
+ # Interleaved thinking for reasoner (sync path parity)
60
+ if llm_config.enable_reasoner:
61
+ betas.append("interleaved-thinking-2025-05-14")
59
62
  # 1M context beta for Sonnet 4/4.5 when enabled
60
63
  try:
61
64
  from letta.settings import model_settings
@@ -371,6 +374,7 @@ class AnthropicClient(LLMClientBase):
371
374
  async def count_tokens(self, messages: List[dict] = None, model: str = None, tools: List[OpenAITool] = None) -> int:
372
375
  logging.getLogger("httpx").setLevel(logging.WARNING)
373
376
 
377
+ # Use the default client; token counting is lightweight and does not require BYOK overrides
374
378
  client = anthropic.AsyncAnthropic()
375
379
  if messages and len(messages) == 0:
376
380
  messages = None
@@ -379,23 +383,20 @@ class AnthropicClient(LLMClientBase):
379
383
  else:
380
384
  anthropic_tools = None
381
385
 
386
+ # Detect presence of reasoning blocks anywhere in the final assistant message.
387
+ # Interleaved thinking is not guaranteed to be the first content part.
382
388
  thinking_enabled = False
383
389
  if messages and len(messages) > 0:
384
- # Check if the last assistant message starts with a thinking block
385
- # Find the last assistant message
386
- last_assistant_message = None
387
- for message in reversed(messages):
388
- if message.get("role") == "assistant":
389
- last_assistant_message = message
390
- break
391
-
392
- if (
393
- last_assistant_message
394
- and isinstance(last_assistant_message.get("content"), list)
395
- and len(last_assistant_message["content"]) > 0
396
- and last_assistant_message["content"][0].get("type") == "thinking"
397
- ):
398
- thinking_enabled = True
390
+ last_assistant_message = next((m for m in reversed(messages) if m.get("role") == "assistant"), None)
391
+ if last_assistant_message:
392
+ content = last_assistant_message.get("content")
393
+ if isinstance(content, list):
394
+ for part in content:
395
+ if isinstance(part, dict) and part.get("type") in {"thinking", "redacted_thinking"}:
396
+ thinking_enabled = True
397
+ break
398
+ elif isinstance(content, str) and "<thinking>" in content:
399
+ thinking_enabled = True
399
400
 
400
401
  try:
401
402
  count_params = {
@@ -404,9 +405,27 @@ class AnthropicClient(LLMClientBase):
404
405
  "tools": anthropic_tools or [],
405
406
  }
406
407
 
408
+ betas: list[str] = []
407
409
  if thinking_enabled:
410
+ # Match interleaved thinking behavior so token accounting is consistent
408
411
  count_params["thinking"] = {"type": "enabled", "budget_tokens": 16000}
409
- result = await client.beta.messages.count_tokens(**count_params)
412
+ betas.append("interleaved-thinking-2025-05-14")
413
+
414
+ # Opt-in to 1M context if enabled for this model in settings
415
+ try:
416
+ if (
417
+ model
418
+ and model_settings.anthropic_sonnet_1m
419
+ and (model.startswith("claude-sonnet-4") or model.startswith("claude-sonnet-4-5"))
420
+ ):
421
+ betas.append("context-1m-2025-08-07")
422
+ except Exception:
423
+ pass
424
+
425
+ if betas:
426
+ result = await client.beta.messages.count_tokens(**count_params, betas=betas)
427
+ else:
428
+ result = await client.beta.messages.count_tokens(**count_params)
410
429
  except:
411
430
  raise
412
431
 
@@ -190,7 +190,8 @@ class ToolCallMessage(LettaMessage):
190
190
  message_type: Literal[MessageType.tool_call_message] = Field(
191
191
  default=MessageType.tool_call_message, description="The type of the message."
192
192
  )
193
- tool_call: Union[ToolCall, ToolCallDelta]
193
+ tool_call: Union[ToolCall, ToolCallDelta] = Field(..., deprecated=True)
194
+ tool_calls: Optional[Union[List[ToolCall], ToolCallDelta]] = None
194
195
 
195
196
  def model_dump(self, *args, **kwargs):
196
197
  """
@@ -198,8 +199,14 @@ class ToolCallMessage(LettaMessage):
198
199
  """
199
200
  kwargs["exclude_none"] = True
200
201
  data = super().model_dump(*args, **kwargs)
201
- if isinstance(data["tool_call"], dict):
202
+ if isinstance(data.get("tool_call"), dict):
202
203
  data["tool_call"] = {k: v for k, v in data["tool_call"].items() if v is not None}
204
+ if isinstance(data.get("tool_calls"), dict):
205
+ data["tool_calls"] = {k: v for k, v in data["tool_calls"].items() if v is not None}
206
+ elif isinstance(data.get("tool_calls"), list):
207
+ data["tool_calls"] = [
208
+ {k: v for k, v in item.items() if v is not None} if isinstance(item, dict) else item for item in data["tool_calls"]
209
+ ]
203
210
  return data
204
211
 
205
212
  class Config:
@@ -226,6 +233,14 @@ class ToolCallMessage(LettaMessage):
226
233
  return v
227
234
 
228
235
 
236
+ class ToolReturn(BaseModel):
237
+ tool_return: str
238
+ status: Literal["success", "error"]
239
+ tool_call_id: str
240
+ stdout: Optional[List[str]] = None
241
+ stderr: Optional[List[str]] = None
242
+
243
+
229
244
  class ToolReturnMessage(LettaMessage):
230
245
  """
231
246
  A message representing the return value of a tool call (generated by Letta executing the requested tool).
@@ -234,21 +249,23 @@ class ToolReturnMessage(LettaMessage):
234
249
  id (str): The ID of the message
235
250
  date (datetime): The date the message was created in ISO format
236
251
  name (Optional[str]): The name of the sender of the message
237
- tool_return (str): The return value of the tool
238
- status (Literal["success", "error"]): The status of the tool call
239
- tool_call_id (str): A unique identifier for the tool call that generated this message
240
- stdout (Optional[List(str)]): Captured stdout (e.g. prints, logs) from the tool invocation
241
- stderr (Optional[List(str)]): Captured stderr from the tool invocation
252
+ tool_return (str): The return value of the tool (deprecated, use tool_returns)
253
+ status (Literal["success", "error"]): The status of the tool call (deprecated, use tool_returns)
254
+ tool_call_id (str): A unique identifier for the tool call that generated this message (deprecated, use tool_returns)
255
+ stdout (Optional[List(str)]): Captured stdout (e.g. prints, logs) from the tool invocation (deprecated, use tool_returns)
256
+ stderr (Optional[List(str)]): Captured stderr from the tool invocation (deprecated, use tool_returns)
257
+ tool_returns (Optional[List[ToolReturn]]): List of tool returns for multi-tool support
242
258
  """
243
259
 
244
260
  message_type: Literal[MessageType.tool_return_message] = Field(
245
261
  default=MessageType.tool_return_message, description="The type of the message."
246
262
  )
247
- tool_return: str
248
- status: Literal["success", "error"]
249
- tool_call_id: str
250
- stdout: Optional[List[str]] = None
251
- stderr: Optional[List[str]] = None
263
+ tool_return: str = Field(..., deprecated=True)
264
+ status: Literal["success", "error"] = Field(..., deprecated=True)
265
+ tool_call_id: str = Field(..., deprecated=True)
266
+ stdout: Optional[List[str]] = Field(None, deprecated=True)
267
+ stderr: Optional[List[str]] = Field(None, deprecated=True)
268
+ tool_returns: Optional[List[ToolReturn]] = None
252
269
 
253
270
 
254
271
  class ApprovalRequestMessage(LettaMessage):
letta/schemas/message.py CHANGED
@@ -492,23 +492,27 @@ class Message(BaseMessage):
492
492
  assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
493
493
  ) -> List[LettaMessage]:
494
494
  messages = []
495
- # This is type FunctionCall
496
- for tool_call in self.tool_calls:
497
- otid = Message.generate_otid_from_id(self.id, current_message_count + len(messages))
498
- # If we're supporting using assistant message,
499
- # then we want to treat certain function calls as a special case
500
- if use_assistant_message and tool_call.function.name == assistant_message_tool_name:
501
- # We need to unpack the actual message contents from the function call
502
- try:
503
- func_args = parse_json(tool_call.function.arguments)
504
- message_string = validate_function_response(func_args[assistant_message_tool_kwarg], 0, truncate=False)
505
- except KeyError:
506
- raise ValueError(f"Function call {tool_call.function.name} missing {assistant_message_tool_kwarg} argument")
495
+
496
+ # If assistant mode is off, just create one ToolCallMessage with all tool calls
497
+ if not use_assistant_message:
498
+ all_tool_call_objs = [
499
+ ToolCall(
500
+ name=tool_call.function.name,
501
+ arguments=tool_call.function.arguments,
502
+ tool_call_id=tool_call.id,
503
+ )
504
+ for tool_call in self.tool_calls
505
+ ]
506
+
507
+ if all_tool_call_objs:
508
+ otid = Message.generate_otid_from_id(self.id, current_message_count)
507
509
  messages.append(
508
- AssistantMessage(
510
+ ToolCallMessage(
509
511
  id=self.id,
510
512
  date=self.created_at,
511
- content=message_string,
513
+ # use first tool call for the deprecated field
514
+ tool_call=all_tool_call_objs[0],
515
+ tool_calls=all_tool_call_objs,
512
516
  name=self.name,
513
517
  otid=otid,
514
518
  sender_id=self.sender_id,
@@ -517,16 +521,41 @@ class Message(BaseMessage):
517
521
  run_id=self.run_id,
518
522
  )
519
523
  )
520
- else:
524
+ return messages
525
+
526
+ collected_tool_calls = []
527
+
528
+ for tool_call in self.tool_calls:
529
+ otid = Message.generate_otid_from_id(self.id, current_message_count + len(messages))
530
+
531
+ if tool_call.function.name == assistant_message_tool_name:
532
+ if collected_tool_calls:
533
+ tool_call_message = ToolCallMessage(
534
+ id=self.id,
535
+ date=self.created_at,
536
+ # use first tool call for the deprecated field
537
+ tool_call=collected_tool_calls[0],
538
+ tool_calls=collected_tool_calls.copy(),
539
+ name=self.name,
540
+ otid=Message.generate_otid_from_id(self.id, current_message_count + len(messages)),
541
+ sender_id=self.sender_id,
542
+ step_id=self.step_id,
543
+ is_err=self.is_err,
544
+ run_id=self.run_id,
545
+ )
546
+ messages.append(tool_call_message)
547
+ collected_tool_calls = [] # reset the collection
548
+
549
+ try:
550
+ func_args = parse_json(tool_call.function.arguments)
551
+ message_string = validate_function_response(func_args[assistant_message_tool_kwarg], 0, truncate=False)
552
+ except KeyError:
553
+ raise ValueError(f"Function call {tool_call.function.name} missing {assistant_message_tool_kwarg} argument")
521
554
  messages.append(
522
- ToolCallMessage(
555
+ AssistantMessage(
523
556
  id=self.id,
524
557
  date=self.created_at,
525
- tool_call=ToolCall(
526
- name=tool_call.function.name,
527
- arguments=tool_call.function.arguments,
528
- tool_call_id=tool_call.id,
529
- ),
558
+ content=message_string,
530
559
  name=self.name,
531
560
  otid=otid,
532
561
  sender_id=self.sender_id,
@@ -535,6 +564,32 @@ class Message(BaseMessage):
535
564
  run_id=self.run_id,
536
565
  )
537
566
  )
567
+ else:
568
+ # non-assistant tool call, collect it
569
+ tool_call_obj = ToolCall(
570
+ name=tool_call.function.name,
571
+ arguments=tool_call.function.arguments,
572
+ tool_call_id=tool_call.id,
573
+ )
574
+ collected_tool_calls.append(tool_call_obj)
575
+
576
+ # flush any remaining collected tool calls
577
+ if collected_tool_calls:
578
+ tool_call_message = ToolCallMessage(
579
+ id=self.id,
580
+ date=self.created_at,
581
+ # use first tool call for the deprecated field
582
+ tool_call=collected_tool_calls[0],
583
+ tool_calls=collected_tool_calls,
584
+ name=self.name,
585
+ otid=Message.generate_otid_from_id(self.id, current_message_count + len(messages)),
586
+ sender_id=self.sender_id,
587
+ step_id=self.step_id,
588
+ is_err=self.is_err,
589
+ run_id=self.run_id,
590
+ )
591
+ messages.append(tool_call_message)
592
+
538
593
  return messages
539
594
 
540
595
  def _convert_tool_return_message(self) -> List[ToolReturnMessage]:
@@ -556,6 +611,13 @@ class Message(BaseMessage):
556
611
  if self.role != MessageRole.tool:
557
612
  raise ValueError(f"Cannot convert message of type {self.role} to ToolReturnMessage")
558
613
 
614
+ # This is a very special buggy case during the double writing period
615
+ # where there is no tool call id on the tool return object, but it exists top level
616
+ # This is meant to be a short term patch - this can happen when people are using old agent files that were exported
617
+ # during a specific migration state
618
+ if len(self.tool_returns) == 1 and self.tool_call_id and not self.tool_returns[0].tool_call_id:
619
+ self.tool_returns[0].tool_call_id = self.tool_call_id
620
+
559
621
  if self.tool_returns:
560
622
  return self._convert_explicit_tool_returns()
561
623
 
@@ -647,6 +709,16 @@ class Message(BaseMessage):
647
709
  Returns:
648
710
  Configured ToolReturnMessage instance
649
711
  """
712
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
713
+
714
+ tool_return_obj = ToolReturnSchema(
715
+ tool_return=message_text,
716
+ status=status,
717
+ tool_call_id=tool_call_id,
718
+ stdout=stdout,
719
+ stderr=stderr,
720
+ )
721
+
650
722
  return ToolReturnMessage(
651
723
  id=self.id,
652
724
  date=self.created_at,
@@ -655,6 +727,7 @@ class Message(BaseMessage):
655
727
  tool_call_id=tool_call_id,
656
728
  stdout=stdout,
657
729
  stderr=stderr,
730
+ tool_returns=[tool_return_obj],
658
731
  name=self.name,
659
732
  otid=Message.generate_otid_from_id(self.id, otid_index),
660
733
  sender_id=self.sender_id,
@@ -1625,6 +1698,14 @@ class Message(BaseMessage):
1625
1698
  if messages[-1].role == "approval" and messages[-1].tool_calls is not None and len(messages[-1].tool_calls) > 0:
1626
1699
  messages.remove(messages[-1])
1627
1700
 
1701
+ # Filter last message if it is a lone reasoning message without assistant message or tool call
1702
+ if (
1703
+ messages[-1].role == "assistant"
1704
+ and messages[-1].tool_calls is None
1705
+ and (not messages[-1].content or all(not isinstance(content_part, TextContent) for content_part in messages[-1].content))
1706
+ ):
1707
+ messages.remove(messages[-1])
1708
+
1628
1709
  return messages
1629
1710
 
1630
1711
  @staticmethod
@@ -562,14 +562,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
562
562
 
563
563
  if prev_message_type and prev_message_type != "tool_call_message":
564
564
  message_index += 1
565
+ tool_call_delta = ToolCallDelta(
566
+ name=json_reasoning_content.get("name"),
567
+ arguments=json.dumps(json_reasoning_content.get("arguments")),
568
+ tool_call_id=None,
569
+ )
565
570
  processed_chunk = ToolCallMessage(
566
571
  id=message_id,
567
572
  date=message_date,
568
- tool_call=ToolCallDelta(
569
- name=json_reasoning_content.get("name"),
570
- arguments=json.dumps(json_reasoning_content.get("arguments")),
571
- tool_call_id=None,
572
- ),
573
+ tool_call=tool_call_delta,
574
+ tool_calls=tool_call_delta,
573
575
  name=name,
574
576
  otid=Message.generate_otid_from_id(message_id, message_index),
575
577
  )
@@ -703,14 +705,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
703
705
  else:
704
706
  if prev_message_type and prev_message_type != "tool_call_message":
705
707
  message_index += 1
708
+ tc_delta = ToolCallDelta(
709
+ name=tool_call_delta.get("name"),
710
+ arguments=tool_call_delta.get("arguments"),
711
+ tool_call_id=tool_call_delta.get("id"),
712
+ )
706
713
  processed_chunk = ToolCallMessage(
707
714
  id=message_id,
708
715
  date=message_date,
709
- tool_call=ToolCallDelta(
710
- name=tool_call_delta.get("name"),
711
- arguments=tool_call_delta.get("arguments"),
712
- tool_call_id=tool_call_delta.get("id"),
713
- ),
716
+ tool_call=tc_delta,
717
+ tool_calls=tc_delta,
714
718
  name=name,
715
719
  otid=Message.generate_otid_from_id(message_id, message_index),
716
720
  )
@@ -779,14 +783,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
779
783
  else:
780
784
  if prev_message_type and prev_message_type != "tool_call_message":
781
785
  message_index += 1
786
+ tc_delta = ToolCallDelta(
787
+ name=self.function_name_buffer,
788
+ arguments=None,
789
+ tool_call_id=self.function_id_buffer,
790
+ )
782
791
  processed_chunk = ToolCallMessage(
783
792
  id=message_id,
784
793
  date=message_date,
785
- tool_call=ToolCallDelta(
786
- name=self.function_name_buffer,
787
- arguments=None,
788
- tool_call_id=self.function_id_buffer,
789
- ),
794
+ tool_call=tc_delta,
795
+ tool_calls=tc_delta,
790
796
  name=name,
791
797
  otid=Message.generate_otid_from_id(message_id, message_index),
792
798
  )
@@ -843,14 +849,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
843
849
  combined_chunk = self.function_args_buffer + updates_main_json
844
850
  if prev_message_type and prev_message_type != "tool_call_message":
845
851
  message_index += 1
852
+ tc_delta = ToolCallDelta(
853
+ name=None,
854
+ arguments=combined_chunk,
855
+ tool_call_id=self.function_id_buffer,
856
+ )
846
857
  processed_chunk = ToolCallMessage(
847
858
  id=message_id,
848
859
  date=message_date,
849
- tool_call=ToolCallDelta(
850
- name=None,
851
- arguments=combined_chunk,
852
- tool_call_id=self.function_id_buffer,
853
- ),
860
+ tool_call=tc_delta,
861
+ tool_calls=tc_delta,
854
862
  name=name,
855
863
  otid=Message.generate_otid_from_id(message_id, message_index),
856
864
  )
@@ -861,14 +869,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
861
869
  # If there's no buffer to clear, just output a new chunk with new data
862
870
  if prev_message_type and prev_message_type != "tool_call_message":
863
871
  message_index += 1
872
+ tc_delta = ToolCallDelta(
873
+ name=None,
874
+ arguments=updates_main_json,
875
+ tool_call_id=self.function_id_buffer,
876
+ )
864
877
  processed_chunk = ToolCallMessage(
865
878
  id=message_id,
866
879
  date=message_date,
867
- tool_call=ToolCallDelta(
868
- name=None,
869
- arguments=updates_main_json,
870
- tool_call_id=self.function_id_buffer,
871
- ),
880
+ tool_call=tc_delta,
881
+ tool_calls=tc_delta,
872
882
  name=name,
873
883
  otid=Message.generate_otid_from_id(message_id, message_index),
874
884
  )
@@ -992,14 +1002,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
992
1002
  else:
993
1003
  if prev_message_type and prev_message_type != "tool_call_message":
994
1004
  message_index += 1
1005
+ tc_delta = ToolCallDelta(
1006
+ name=tool_call_delta.get("name"),
1007
+ arguments=tool_call_delta.get("arguments"),
1008
+ tool_call_id=tool_call_delta.get("id"),
1009
+ )
995
1010
  processed_chunk = ToolCallMessage(
996
1011
  id=message_id,
997
1012
  date=message_date,
998
- tool_call=ToolCallDelta(
999
- name=tool_call_delta.get("name"),
1000
- arguments=tool_call_delta.get("arguments"),
1001
- tool_call_id=tool_call_delta.get("id"),
1002
- ),
1013
+ tool_call=tc_delta,
1014
+ tool_calls=tc_delta,
1003
1015
  name=name,
1004
1016
  otid=Message.generate_otid_from_id(message_id, message_index),
1005
1017
  )
@@ -1262,14 +1274,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1262
1274
  # Store the ID of the tool call so allow skipping the corresponding response
1263
1275
  self.prev_assistant_message_id = function_call.id
1264
1276
  else:
1277
+ tool_call_obj = ToolCall(
1278
+ name=function_call.function.name,
1279
+ arguments=function_call.function.arguments,
1280
+ tool_call_id=function_call.id,
1281
+ )
1265
1282
  processed_chunk = ToolCallMessage(
1266
1283
  id=msg_obj.id,
1267
1284
  date=msg_obj.created_at,
1268
- tool_call=ToolCall(
1269
- name=function_call.function.name,
1270
- arguments=function_call.function.arguments,
1271
- tool_call_id=function_call.id,
1272
- ),
1285
+ tool_call=tool_call_obj,
1286
+ tool_calls=tool_call_obj,
1273
1287
  name=msg_obj.name,
1274
1288
  otid=Message.generate_otid_from_id(msg_obj.id, chunk_index) if chunk_index is not None else None,
1275
1289
  )
@@ -1303,14 +1317,29 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1303
1317
  # Skip this tool call receipt
1304
1318
  return
1305
1319
  else:
1320
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1321
+
1322
+ status = msg_obj.tool_returns[0].status if msg_obj.tool_returns else "success"
1323
+ stdout = msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else []
1324
+ stderr = msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else []
1325
+
1326
+ tool_return_obj = ToolReturnSchema(
1327
+ tool_return=msg,
1328
+ status=status,
1329
+ tool_call_id=msg_obj.tool_call_id,
1330
+ stdout=stdout,
1331
+ stderr=stderr,
1332
+ )
1333
+
1306
1334
  new_message = ToolReturnMessage(
1307
1335
  id=msg_obj.id,
1308
1336
  date=msg_obj.created_at,
1309
1337
  tool_return=msg,
1310
- status=msg_obj.tool_returns[0].status if msg_obj.tool_returns else "success",
1338
+ status=status,
1311
1339
  tool_call_id=msg_obj.tool_call_id,
1312
- stdout=msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else [],
1313
- stderr=msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else [],
1340
+ stdout=stdout,
1341
+ stderr=stderr,
1342
+ tool_returns=[tool_return_obj],
1314
1343
  name=msg_obj.name,
1315
1344
  otid=Message.generate_otid_from_id(msg_obj.id, chunk_index) if chunk_index is not None else None,
1316
1345
  )
@@ -1319,14 +1348,29 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1319
1348
  msg = msg.replace("Error: ", "", 1)
1320
1349
  # new_message = {"function_return": msg, "status": "error"}
1321
1350
  assert msg_obj.tool_call_id is not None
1351
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1352
+
1353
+ status = msg_obj.tool_returns[0].status if msg_obj.tool_returns else "error"
1354
+ stdout = msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else []
1355
+ stderr = msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else []
1356
+
1357
+ tool_return_obj = ToolReturnSchema(
1358
+ tool_return=msg,
1359
+ status=status,
1360
+ tool_call_id=msg_obj.tool_call_id,
1361
+ stdout=stdout,
1362
+ stderr=stderr,
1363
+ )
1364
+
1322
1365
  new_message = ToolReturnMessage(
1323
1366
  id=msg_obj.id,
1324
1367
  date=msg_obj.created_at,
1325
1368
  tool_return=msg,
1326
- status=msg_obj.tool_returns[0].status if msg_obj.tool_returns else "error",
1369
+ status=status,
1327
1370
  tool_call_id=msg_obj.tool_call_id,
1328
- stdout=msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else [],
1329
- stderr=msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else [],
1371
+ stdout=stdout,
1372
+ stderr=stderr,
1373
+ tool_returns=[tool_return_obj],
1330
1374
  name=msg_obj.name,
1331
1375
  otid=Message.generate_otid_from_id(msg_obj.id, chunk_index) if chunk_index is not None else None,
1332
1376
  )
@@ -120,6 +120,40 @@ async def check_provider(
120
120
  raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"{e}")
121
121
 
122
122
 
123
+ @router.post("/{provider_id}/check", response_model=None, operation_id="check_existing_provider")
124
+ async def check_existing_provider(
125
+ provider_id: str,
126
+ headers: HeaderParams = Depends(get_headers),
127
+ server: "SyncServer" = Depends(get_letta_server),
128
+ ):
129
+ """
130
+ Verify the API key and additional parameters for an existing provider.
131
+ """
132
+ try:
133
+ actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
134
+ provider = await server.provider_manager.get_provider_async(provider_id=provider_id, actor=actor)
135
+
136
+ # Create a ProviderCheck from the existing provider
137
+ provider_check = ProviderCheck(
138
+ provider_type=provider.provider_type,
139
+ api_key=provider.api_key,
140
+ base_url=provider.base_url,
141
+ )
142
+
143
+ await server.provider_manager.check_provider_api_key(provider_check=provider_check)
144
+ return JSONResponse(
145
+ status_code=status.HTTP_200_OK, content={"message": f"Valid api key for provider_type={provider.provider_type.value}"}
146
+ )
147
+ except LLMAuthenticationError as e:
148
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=f"{e.message}")
149
+ except NoResultFound:
150
+ raise HTTPException(status_code=404, detail=f"Provider provider_id={provider_id} not found for user_id={actor.id}.")
151
+ except HTTPException:
152
+ raise
153
+ except Exception as e:
154
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"{e}")
155
+
156
+
123
157
  @router.delete("/{provider_id}", response_model=None, operation_id="delete_provider")
124
158
  async def delete_provider(
125
159
  provider_id: str,
@@ -286,7 +286,7 @@ async def delete_run(
286
286
  """
287
287
  actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
288
288
  runs_manager = RunManager()
289
- return await runs_manager.delete_run_by_id(run_id=run_id, actor=actor)
289
+ return await runs_manager.delete_run(run_id=run_id, actor=actor)
290
290
 
291
291
 
292
292
  @router.post(
letta/server/server.py CHANGED
@@ -1239,6 +1239,16 @@ class SyncServer(object):
1239
1239
  function_args=tool_args,
1240
1240
  tool=tool,
1241
1241
  )
1242
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1243
+
1244
+ tool_return_obj = ToolReturnSchema(
1245
+ tool_return=str(tool_execution_result.func_return),
1246
+ status=tool_execution_result.status,
1247
+ tool_call_id="null",
1248
+ stdout=tool_execution_result.stdout,
1249
+ stderr=tool_execution_result.stderr,
1250
+ )
1251
+
1242
1252
  return ToolReturnMessage(
1243
1253
  id="null",
1244
1254
  tool_call_id="null",
@@ -1247,10 +1257,21 @@ class SyncServer(object):
1247
1257
  tool_return=str(tool_execution_result.func_return),
1248
1258
  stdout=tool_execution_result.stdout,
1249
1259
  stderr=tool_execution_result.stderr,
1260
+ tool_returns=[tool_return_obj],
1250
1261
  )
1251
1262
 
1252
1263
  except Exception as e:
1253
1264
  func_return = get_friendly_error_msg(function_name=tool.name, exception_name=type(e).__name__, exception_message=str(e))
1265
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1266
+
1267
+ tool_return_obj = ToolReturnSchema(
1268
+ tool_return=func_return,
1269
+ status="error",
1270
+ tool_call_id="null",
1271
+ stdout=[],
1272
+ stderr=[traceback.format_exc()],
1273
+ )
1274
+
1254
1275
  return ToolReturnMessage(
1255
1276
  id="null",
1256
1277
  tool_call_id="null",
@@ -1259,6 +1280,7 @@ class SyncServer(object):
1259
1280
  tool_return=func_return,
1260
1281
  stdout=[],
1261
1282
  stderr=[traceback.format_exc()],
1283
+ tool_returns=[tool_return_obj],
1262
1284
  )
1263
1285
 
1264
1286
  # MCP wrappers
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: letta-nightly
3
- Version: 0.12.0.dev20251009104148
3
+ Version: 0.12.0.dev20251009203644
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  Author-email: Letta Team <contact@letta.com>
6
6
  License: Apache License
@@ -88,10 +88,10 @@ letta/humans/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
88
88
  letta/humans/examples/basic.txt,sha256=Lcp8YESTWvOJgO4Yf_yyQmgo5bKakeB1nIVrwEGG6PA,17
89
89
  letta/humans/examples/cs_phd.txt,sha256=9C9ZAV_VuG7GB31ksy3-_NAyk8rjE6YtVOkhp08k1xw,297
90
90
  letta/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
91
- letta/interfaces/anthropic_streaming_interface.py,sha256=-zJ4LhS7vIXnfYg-PtEM_SZya1MFT_2hdaxbNyTppFg,44188
92
- letta/interfaces/gemini_streaming_interface.py,sha256=17a96UeLtTqlyNLJe9f0gi5dnUuE8iIgZneIaZ2AotE,12316
91
+ letta/interfaces/anthropic_streaming_interface.py,sha256=VaqI_aLAOk8EbVmDLe8PMOb5LsKNhVqpgVbLe-WbjMg,44780
92
+ letta/interfaces/gemini_streaming_interface.py,sha256=1nJwGl3nKAL7yfDX0zzpWrrjjpqzRFo82JpsEF6u_cA,12406
93
93
  letta/interfaces/openai_chat_completions_streaming_interface.py,sha256=3xHXh8cW79EkiMUTYfvcH_s92nkLjxXfvtVOVC3bfLo,5050
94
- letta/interfaces/openai_streaming_interface.py,sha256=E-gFVpZDl6rECU3KpmN31CU-CfkjSj4aD_3u2CwtuRw,75950
94
+ letta/interfaces/openai_streaming_interface.py,sha256=ROUbTygmwWyxkfAhjnI9LPR16Wxo-Kf5POsJuWM10rk,76594
95
95
  letta/interfaces/utils.py,sha256=c6jvO0dBYHh8DQnlN-B0qeNC64d3CSunhfqlFA4pJTY,278
96
96
  letta/jobs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
97
97
  letta/jobs/helpers.py,sha256=kO4aj954xsQ1RAmkjY6LQQ7JEIGuhaxB1e9pzrYKHAY,914
@@ -99,7 +99,7 @@ letta/jobs/llm_batch_job_polling.py,sha256=HUCTa1lTOiLAB_8m95RUfeNJa4lxlF8paGdCV
99
99
  letta/jobs/scheduler.py,sha256=Ub5VTCA8P5C9Y-0mPK2YIPJSEzKbSd2l5Sp0sOWctD8,8697
100
100
  letta/jobs/types.py,sha256=K8GKEnqEgAT6Kq4F2hUrBC4ZAFM9OkfOjVMStzxKuXQ,742
101
101
  letta/llm_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
- letta/llm_api/anthropic_client.py,sha256=xZc2tvqz0_P3oNc0Wd2iWpK8qopQJkOSGnOFyo7v7hU,42204
102
+ letta/llm_api/anthropic_client.py,sha256=nDE3D0aqxI1pQGl2zOdkJNqc74KNp7_6ZXK5AEwgIP4,43320
103
103
  letta/llm_api/azure_client.py,sha256=BeChGsH4brrSgZBbCf8UE5RkW-3ZughpKnsBY2VYxwI,3841
104
104
  letta/llm_api/bedrock_client.py,sha256=xB01zdk1bzzf0ExkPcWcuxuLb1GXaJglvgRMYP2zai4,3572
105
105
  letta/llm_api/deepseek_client.py,sha256=YYja4-LWODE8fAJPSIVGxA7q0hb6-QmWRxWhlboTp64,17144
@@ -261,7 +261,7 @@ letta/schemas/health.py,sha256=zT6mYovvD17iJRuu2rcaQQzbEEYrkwvAE9TB7iU824c,139
261
261
  letta/schemas/identity.py,sha256=-6ABqAuz-VfGcAoAX5oVyzpjBiY4jAN-gJUM-PLBQQY,3984
262
262
  letta/schemas/job.py,sha256=iIGYuegO9OzWyfA8nTV0sov67KyTb2QAhSHYy4TVte0,5552
263
263
  letta/schemas/letta_base.py,sha256=LhVKaSAUr7HzrvuEJE_1ZKJMe-nJKoYR-ZgLe1IUTpY,4020
264
- letta/schemas/letta_message.py,sha256=RZiWiqan_fxJBjoSi8JpSMjdrvlxEEF4jmq-HGBCMtw,18021
264
+ letta/schemas/letta_message.py,sha256=lsWR5FaUYNmQ5xu0RWk_bjsT0q8x90nWmekuggykQ60,19142
265
265
  letta/schemas/letta_message_content.py,sha256=_BTfU6NcxNbc6SPt4ANVGUh0d0QUqVQkfDF0gM8rbdg,13226
266
266
  letta/schemas/letta_ping.py,sha256=9JphoKhWZ63JqsakIx4aaj8dYMtYVa7HxSkT5cMh5cI,863
267
267
  letta/schemas/letta_request.py,sha256=ll0QTt-tzaJ3zxpPyaifz7mtWcPy6QmvPUDOzngbxfQ,4526
@@ -272,7 +272,7 @@ letta/schemas/llm_config.py,sha256=dzEiIvm1l5xlYF0Q1It-18l9HKdwMd4Yr09A3gWhhNI,1
272
272
  letta/schemas/llm_config_overrides.py,sha256=E6qJuVA8TwAAy3VjGitJ5jSQo5PbN-6VPcZOF5qhP9A,1815
273
273
  letta/schemas/mcp.py,sha256=Wiu3FL5qupaHFaMqKFp-w1Ev6ShQ5dPfAtKIMGmRiF8,15527
274
274
  letta/schemas/memory.py,sha256=g2cPd0CF_3atzVkQA8ioIm52oZbsr6Ng-w31qGgNJ_g,20206
275
- letta/schemas/message.py,sha256=kXTfYnbFm7uzj2TIy-R3ixrSJGnR6xasdt1wgYJJOvw,74961
275
+ letta/schemas/message.py,sha256=E8d8I2ALa6dAW28yrGEIjUNGvqigPPBmovwA8xopDro,78354
276
276
  letta/schemas/npm_requirement.py,sha256=HkvBF7KjHUH-MG-RAEYJHO2MLRS2rxFUcmbpbZVznLk,457
277
277
  letta/schemas/organization.py,sha256=TXrHN4IBQnX-mWvRuCOH57XZSLYCVOY0wWm2_UzDQIA,1279
278
278
  letta/schemas/passage.py,sha256=_bO19zOIQtQ3F3VqDSgIJqh15V0IIrJ_KdlbCt6-4D0,3940
@@ -333,7 +333,7 @@ letta/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
333
333
  letta/server/constants.py,sha256=yAdGbLkzlOU_dLTx0lKDmAnj0ZgRXCEaIcPJWO69eaE,92
334
334
  letta/server/db.py,sha256=bh294tLdOQxOrO9jICDrXa_UXTAYFByu_47tPXlKrjs,3006
335
335
  letta/server/generate_openapi_schema.sh,sha256=14Q6r0fbNNVVdf4X3Z-H0ZtyrVw5zYLzL5Doom3kM9k,351
336
- letta/server/server.py,sha256=nEvHxECQJ6I6EC2GQRNzHZpxg5HgFkhCXzqFwB-U9E4,78499
336
+ letta/server/server.py,sha256=JClC6RiH5giUEGr74VzpABAZOASazHiBGmnG7nr-PwQ,79339
337
337
  letta/server/startup.sh,sha256=z-Fea-7LiuS_aG1tJqS8JAsDQaamwC_kuDhv9D3PPPY,2698
338
338
  letta/server/utils.py,sha256=rRvW6L1lzau4u9boamiyZH54lf5tQ91ypXzUW9cfSPA,1667
339
339
  letta/server/rest_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -341,7 +341,7 @@ letta/server/rest_api/app.py,sha256=43HvR0pVs3XHUpmHJvT4wMm_sWc8NcB1ViH5XYCnSBc,
341
341
  letta/server/rest_api/auth_token.py,sha256=725EFEIiNj4dh70hrSd94UysmFD8vcJLrTRfNHkzxDo,774
342
342
  letta/server/rest_api/chat_completions_interface.py,sha256=-7wO7pNBWXMqblVkJpuZ8JPJ-LjudLTtT6BJu-q_XAM,11138
343
343
  letta/server/rest_api/dependencies.py,sha256=4VdkahB_yLpvwmJnwWG1s46apclrR53QfDeE1es9wUc,1722
344
- letta/server/rest_api/interface.py,sha256=_GQfKYUp9w4Wo2HSE_8Ff7QU16t1blspLaqmukpER9s,67099
344
+ letta/server/rest_api/interface.py,sha256=KbeStLQQ9fLDOYJwA-VrzuppgCr9_b6k2CFVBYNw2A0,68676
345
345
  letta/server/rest_api/json_parser.py,sha256=yoakaCkSMdf0Y_pyILoFKZlvzXeqF-E1KNeHzatLMDc,9157
346
346
  letta/server/rest_api/redis_stream_manager.py,sha256=cJaveonCmTaCMi_I56u5TaSgGIu7e6TltvRZeLC49DU,11227
347
347
  letta/server/rest_api/static_files.py,sha256=NG8sN4Z5EJ8JVQdj19tkFa9iQ1kBPTab9f_CUxd_u4Q,3143
@@ -369,8 +369,8 @@ letta/server/rest_api/routers/v1/jobs.py,sha256=fZkw3O9ivKYMgXkst-lDEiCPN8eL4-MY
369
369
  letta/server/rest_api/routers/v1/llms.py,sha256=sv5VWqB0-iSRi6LyzqsM1fLmOFm9UhM9ofzR9WHvJdE,1808
370
370
  letta/server/rest_api/routers/v1/messages.py,sha256=MgfHaTuPGyO1RjtjDuu4fC5dkmXEannUc9XGEK_aFLg,8559
371
371
  letta/server/rest_api/routers/v1/organizations.py,sha256=Un7qRo-69m9bC_TYyMnIRNLXf3fHHbNh1aVghnzzips,2932
372
- letta/server/rest_api/routers/v1/providers.py,sha256=_gKcCbEN2tW7c0BO7_cMFEYdhhsxDlKaopqXlZXMU1s,5996
373
- letta/server/rest_api/routers/v1/runs.py,sha256=qdCnp90Mdycu_itmPk78U_gYVbboBcadsCCAAdl4Oe8,14957
372
+ letta/server/rest_api/routers/v1/providers.py,sha256=EbUub6JlnGyYvykSqoFQaTWLTcr132VD5Edmh_Vxb1Y,7509
373
+ letta/server/rest_api/routers/v1/runs.py,sha256=f5snwQHkmmWVNuB4nYRL9SE22DWASZu5GuVtHdRwRzs,14951
374
374
  letta/server/rest_api/routers/v1/sandbox_configs.py,sha256=1x1QOOv-7jB9qf_AiYXH7ITyYMYicAgA4a5Qc8OHwB0,8702
375
375
  letta/server/rest_api/routers/v1/sources.py,sha256=8fkCbprC4PZlcf5HnBYj2-8PjWFIkL0TWZBlp95N7nE,22319
376
376
  letta/server/rest_api/routers/v1/steps.py,sha256=OIExfKSwilCmtrVHhF80h8g3yqhf5ww533FIw7N8noI,8251
@@ -476,8 +476,8 @@ letta/templates/sandbox_code_file.py.j2,sha256=eXga5J_04Z8-pGdwfOCDjcRnMceIqcF5i
476
476
  letta/templates/sandbox_code_file_async.py.j2,sha256=lb7nh_P2W9VZHzU_9TxSCEMUod7SDziPXgvT75xVds0,2748
477
477
  letta/templates/summary_request_text.j2,sha256=ZttQwXonW2lk4pJLYzLK0pmo4EO4EtUUIXjgXKiizuc,842
478
478
  letta/types/__init__.py,sha256=hokKjCVFGEfR7SLMrtZsRsBfsC7yTIbgKPLdGg4K1eY,147
479
- letta_nightly-0.12.0.dev20251009104148.dist-info/METADATA,sha256=-xts5F15qFLK5pjWrucujMu7g3rWG_qDh3uG9DQuzjE,24468
480
- letta_nightly-0.12.0.dev20251009104148.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
481
- letta_nightly-0.12.0.dev20251009104148.dist-info/entry_points.txt,sha256=m-94Paj-kxiR6Ktu0us0_2qfhn29DzF2oVzqBE6cu8w,41
482
- letta_nightly-0.12.0.dev20251009104148.dist-info/licenses/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
483
- letta_nightly-0.12.0.dev20251009104148.dist-info/RECORD,,
479
+ letta_nightly-0.12.0.dev20251009203644.dist-info/METADATA,sha256=P1HTklX5YTlDe3BCg-vOF-NsHMseJx7r19qrHXzPft4,24468
480
+ letta_nightly-0.12.0.dev20251009203644.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
481
+ letta_nightly-0.12.0.dev20251009203644.dist-info/entry_points.txt,sha256=m-94Paj-kxiR6Ktu0us0_2qfhn29DzF2oVzqBE6cu8w,41
482
+ letta_nightly-0.12.0.dev20251009203644.dist-info/licenses/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
483
+ letta_nightly-0.12.0.dev20251009203644.dist-info/RECORD,,