letta-nightly 0.11.6.dev20250902104140__py3-none-any.whl → 0.11.7.dev20250904045700__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +10 -14
  3. letta/agents/base_agent.py +18 -0
  4. letta/agents/helpers.py +32 -7
  5. letta/agents/letta_agent.py +953 -762
  6. letta/agents/voice_agent.py +1 -1
  7. letta/client/streaming.py +0 -1
  8. letta/constants.py +11 -8
  9. letta/errors.py +9 -0
  10. letta/functions/function_sets/base.py +77 -69
  11. letta/functions/function_sets/builtin.py +41 -22
  12. letta/functions/function_sets/multi_agent.py +1 -2
  13. letta/functions/schema_generator.py +0 -1
  14. letta/helpers/converters.py +8 -3
  15. letta/helpers/datetime_helpers.py +5 -4
  16. letta/helpers/message_helper.py +1 -2
  17. letta/helpers/pinecone_utils.py +0 -1
  18. letta/helpers/tool_rule_solver.py +10 -0
  19. letta/helpers/tpuf_client.py +848 -0
  20. letta/interface.py +8 -8
  21. letta/interfaces/anthropic_streaming_interface.py +7 -0
  22. letta/interfaces/openai_streaming_interface.py +29 -6
  23. letta/llm_api/anthropic_client.py +188 -18
  24. letta/llm_api/azure_client.py +0 -1
  25. letta/llm_api/bedrock_client.py +1 -2
  26. letta/llm_api/deepseek_client.py +319 -5
  27. letta/llm_api/google_vertex_client.py +75 -17
  28. letta/llm_api/groq_client.py +0 -1
  29. letta/llm_api/helpers.py +2 -2
  30. letta/llm_api/llm_api_tools.py +1 -50
  31. letta/llm_api/llm_client.py +6 -8
  32. letta/llm_api/mistral.py +1 -1
  33. letta/llm_api/openai.py +16 -13
  34. letta/llm_api/openai_client.py +31 -16
  35. letta/llm_api/together_client.py +0 -1
  36. letta/llm_api/xai_client.py +0 -1
  37. letta/local_llm/chat_completion_proxy.py +7 -6
  38. letta/local_llm/settings/settings.py +1 -1
  39. letta/orm/__init__.py +1 -0
  40. letta/orm/agent.py +8 -6
  41. letta/orm/archive.py +9 -1
  42. letta/orm/block.py +3 -4
  43. letta/orm/block_history.py +3 -1
  44. letta/orm/group.py +2 -3
  45. letta/orm/identity.py +1 -2
  46. letta/orm/job.py +1 -2
  47. letta/orm/llm_batch_items.py +1 -2
  48. letta/orm/message.py +8 -4
  49. letta/orm/mixins.py +18 -0
  50. letta/orm/organization.py +2 -0
  51. letta/orm/passage.py +8 -1
  52. letta/orm/passage_tag.py +55 -0
  53. letta/orm/sandbox_config.py +1 -3
  54. letta/orm/step.py +1 -2
  55. letta/orm/tool.py +1 -0
  56. letta/otel/resource.py +2 -2
  57. letta/plugins/plugins.py +1 -1
  58. letta/prompts/prompt_generator.py +10 -2
  59. letta/schemas/agent.py +11 -0
  60. letta/schemas/archive.py +4 -0
  61. letta/schemas/block.py +13 -0
  62. letta/schemas/embedding_config.py +0 -1
  63. letta/schemas/enums.py +24 -7
  64. letta/schemas/group.py +12 -0
  65. letta/schemas/letta_message.py +55 -1
  66. letta/schemas/letta_message_content.py +28 -0
  67. letta/schemas/letta_request.py +21 -4
  68. letta/schemas/letta_stop_reason.py +9 -1
  69. letta/schemas/llm_config.py +24 -8
  70. letta/schemas/mcp.py +0 -3
  71. letta/schemas/memory.py +14 -0
  72. letta/schemas/message.py +245 -141
  73. letta/schemas/openai/chat_completion_request.py +2 -1
  74. letta/schemas/passage.py +1 -0
  75. letta/schemas/providers/bedrock.py +1 -1
  76. letta/schemas/providers/openai.py +2 -2
  77. letta/schemas/tool.py +11 -5
  78. letta/schemas/tool_execution_result.py +0 -1
  79. letta/schemas/tool_rule.py +71 -0
  80. letta/serialize_schemas/marshmallow_agent.py +1 -2
  81. letta/server/rest_api/app.py +3 -3
  82. letta/server/rest_api/auth/index.py +0 -1
  83. letta/server/rest_api/interface.py +3 -11
  84. letta/server/rest_api/redis_stream_manager.py +3 -4
  85. letta/server/rest_api/routers/v1/agents.py +143 -84
  86. letta/server/rest_api/routers/v1/blocks.py +1 -1
  87. letta/server/rest_api/routers/v1/folders.py +1 -1
  88. letta/server/rest_api/routers/v1/groups.py +23 -22
  89. letta/server/rest_api/routers/v1/internal_templates.py +68 -0
  90. letta/server/rest_api/routers/v1/sandbox_configs.py +11 -5
  91. letta/server/rest_api/routers/v1/sources.py +1 -1
  92. letta/server/rest_api/routers/v1/tools.py +167 -15
  93. letta/server/rest_api/streaming_response.py +4 -3
  94. letta/server/rest_api/utils.py +75 -18
  95. letta/server/server.py +24 -35
  96. letta/services/agent_manager.py +359 -45
  97. letta/services/agent_serialization_manager.py +23 -3
  98. letta/services/archive_manager.py +72 -3
  99. letta/services/block_manager.py +1 -2
  100. letta/services/context_window_calculator/token_counter.py +11 -6
  101. letta/services/file_manager.py +1 -3
  102. letta/services/files_agents_manager.py +2 -4
  103. letta/services/group_manager.py +73 -12
  104. letta/services/helpers/agent_manager_helper.py +5 -5
  105. letta/services/identity_manager.py +8 -3
  106. letta/services/job_manager.py +2 -14
  107. letta/services/llm_batch_manager.py +1 -3
  108. letta/services/mcp/base_client.py +1 -2
  109. letta/services/mcp_manager.py +5 -6
  110. letta/services/message_manager.py +536 -15
  111. letta/services/organization_manager.py +1 -2
  112. letta/services/passage_manager.py +287 -12
  113. letta/services/provider_manager.py +1 -3
  114. letta/services/sandbox_config_manager.py +12 -7
  115. letta/services/source_manager.py +1 -2
  116. letta/services/step_manager.py +0 -1
  117. letta/services/summarizer/summarizer.py +4 -2
  118. letta/services/telemetry_manager.py +1 -3
  119. letta/services/tool_executor/builtin_tool_executor.py +136 -316
  120. letta/services/tool_executor/core_tool_executor.py +231 -74
  121. letta/services/tool_executor/files_tool_executor.py +2 -2
  122. letta/services/tool_executor/mcp_tool_executor.py +0 -1
  123. letta/services/tool_executor/multi_agent_tool_executor.py +2 -2
  124. letta/services/tool_executor/sandbox_tool_executor.py +0 -1
  125. letta/services/tool_executor/tool_execution_sandbox.py +2 -3
  126. letta/services/tool_manager.py +181 -64
  127. letta/services/tool_sandbox/modal_deployment_manager.py +2 -2
  128. letta/services/user_manager.py +1 -2
  129. letta/settings.py +5 -3
  130. letta/streaming_interface.py +3 -3
  131. letta/system.py +1 -1
  132. letta/utils.py +0 -1
  133. {letta_nightly-0.11.6.dev20250902104140.dist-info → letta_nightly-0.11.7.dev20250904045700.dist-info}/METADATA +11 -7
  134. {letta_nightly-0.11.6.dev20250902104140.dist-info → letta_nightly-0.11.7.dev20250904045700.dist-info}/RECORD +137 -135
  135. letta/llm_api/deepseek.py +0 -303
  136. {letta_nightly-0.11.6.dev20250902104140.dist-info → letta_nightly-0.11.7.dev20250904045700.dist-info}/WHEEL +0 -0
  137. {letta_nightly-0.11.6.dev20250902104140.dist-info → letta_nightly-0.11.7.dev20250904045700.dist-info}/entry_points.txt +0 -0
  138. {letta_nightly-0.11.6.dev20250902104140.dist-info → letta_nightly-0.11.7.dev20250904045700.dist-info}/licenses/LICENSE +0 -0
@@ -1,9 +1,8 @@
1
1
  import importlib
2
- import os
3
2
  import warnings
4
3
  from typing import List, Optional, Set, Union
5
4
 
6
- from sqlalchemy import func, select
5
+ from sqlalchemy import and_, func, or_, select
7
6
 
8
7
  from letta.constants import (
9
8
  BASE_FUNCTION_RETURN_CHAR_LIMIT,
@@ -19,7 +18,7 @@ from letta.constants import (
19
18
  LOCAL_ONLY_MULTI_AGENT_TOOLS,
20
19
  MCP_TOOL_TAG_NAME_PREFIX,
21
20
  )
22
- from letta.errors import LettaToolNameConflictError
21
+ from letta.errors import LettaToolNameConflictError, LettaToolNameSchemaMismatchError
23
22
  from letta.functions.functions import derive_openai_json_schema, load_function_set
24
23
  from letta.log import get_logger
25
24
 
@@ -28,8 +27,7 @@ from letta.orm.errors import NoResultFound
28
27
  from letta.orm.tool import Tool as ToolModel
29
28
  from letta.otel.tracing import trace_method
30
29
  from letta.schemas.enums import ToolType
31
- from letta.schemas.tool import Tool as PydanticTool
32
- from letta.schemas.tool import ToolCreate, ToolUpdate
30
+ from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate
33
31
  from letta.schemas.user import User as PydanticUser
34
32
  from letta.server.db import db_registry
35
33
  from letta.services.helpers.agent_manager_helper import calculate_multi_agent_tools
@@ -321,19 +319,37 @@ class ToolManager:
321
319
  @enforce_types
322
320
  @trace_method
323
321
  async def list_tools_async(
324
- self, actor: PydanticUser, after: Optional[str] = None, limit: Optional[int] = 50, upsert_base_tools: bool = True
322
+ self,
323
+ actor: PydanticUser,
324
+ after: Optional[str] = None,
325
+ limit: Optional[int] = 50,
326
+ upsert_base_tools: bool = True,
327
+ tool_types: Optional[List[str]] = None,
328
+ exclude_tool_types: Optional[List[str]] = None,
329
+ names: Optional[List[str]] = None,
330
+ tool_ids: Optional[List[str]] = None,
331
+ search: Optional[str] = None,
332
+ return_only_letta_tools: bool = False,
325
333
  ) -> List[PydanticTool]:
326
334
  """List all tools with optional pagination."""
327
- tools = await self._list_tools_async(actor=actor, after=after, limit=limit)
335
+ tools = await self._list_tools_async(
336
+ actor=actor,
337
+ after=after,
338
+ limit=limit,
339
+ tool_types=tool_types,
340
+ exclude_tool_types=exclude_tool_types,
341
+ names=names,
342
+ tool_ids=tool_ids,
343
+ search=search,
344
+ return_only_letta_tools=return_only_letta_tools,
345
+ )
328
346
 
329
347
  # Check if all base tools are present if we requested all the tools w/o cursor
330
348
  # TODO: This is a temporary hack to resolve this issue
331
349
  # TODO: This requires a deeper rethink about how we keep all our internal tools up-to-date
332
350
  if not after and upsert_base_tools:
333
351
  existing_tool_names = {tool.name for tool in tools}
334
- base_tool_names = (
335
- LETTA_TOOL_SET - set(LOCAL_ONLY_MULTI_AGENT_TOOLS) if os.getenv("LETTA_ENVIRONMENT") == "PRODUCTION" else LETTA_TOOL_SET
336
- )
352
+ base_tool_names = LETTA_TOOL_SET - set(LOCAL_ONLY_MULTI_AGENT_TOOLS) if settings.environment == "PRODUCTION" else LETTA_TOOL_SET
337
353
  missing_base_tools = base_tool_names - existing_tool_names
338
354
 
339
355
  # If any base tools are missing, upsert all base tools
@@ -341,22 +357,86 @@ class ToolManager:
341
357
  logger.info(f"Missing base tools detected: {missing_base_tools}. Upserting all base tools.")
342
358
  await self.upsert_base_tools_async(actor=actor)
343
359
  # Re-fetch the tools list after upserting base tools
344
- tools = await self._list_tools_async(actor=actor, after=after, limit=limit)
360
+ tools = await self._list_tools_async(
361
+ actor=actor,
362
+ after=after,
363
+ limit=limit,
364
+ tool_types=tool_types,
365
+ exclude_tool_types=exclude_tool_types,
366
+ names=names,
367
+ tool_ids=tool_ids,
368
+ search=search,
369
+ return_only_letta_tools=return_only_letta_tools,
370
+ )
345
371
 
346
372
  return tools
347
373
 
348
374
  @enforce_types
349
375
  @trace_method
350
- async def _list_tools_async(self, actor: PydanticUser, after: Optional[str] = None, limit: Optional[int] = 50) -> List[PydanticTool]:
376
+ async def _list_tools_async(
377
+ self,
378
+ actor: PydanticUser,
379
+ after: Optional[str] = None,
380
+ limit: Optional[int] = 50,
381
+ tool_types: Optional[List[str]] = None,
382
+ exclude_tool_types: Optional[List[str]] = None,
383
+ names: Optional[List[str]] = None,
384
+ tool_ids: Optional[List[str]] = None,
385
+ search: Optional[str] = None,
386
+ return_only_letta_tools: bool = False,
387
+ ) -> List[PydanticTool]:
351
388
  """List all tools with optional pagination."""
352
389
  tools_to_delete = []
353
390
  async with db_registry.async_session() as session:
354
- tools = await ToolModel.list_async(
355
- db_session=session,
356
- after=after,
357
- limit=limit,
358
- organization_id=actor.organization_id,
359
- )
391
+ # Use SQLAlchemy directly for all cases - more control and consistency
392
+ # Start with base query
393
+ query = select(ToolModel).where(ToolModel.organization_id == actor.organization_id)
394
+
395
+ # Apply tool_types filter
396
+ if tool_types is not None:
397
+ query = query.where(ToolModel.tool_type.in_(tool_types))
398
+
399
+ # Apply names filter
400
+ if names is not None:
401
+ query = query.where(ToolModel.name.in_(names))
402
+
403
+ # Apply tool_ids filter
404
+ if tool_ids is not None:
405
+ query = query.where(ToolModel.id.in_(tool_ids))
406
+
407
+ # Apply search filter (ILIKE for case-insensitive partial match)
408
+ if search is not None:
409
+ query = query.where(ToolModel.name.ilike(f"%{search}%"))
410
+
411
+ # Apply exclude_tool_types filter at database level
412
+ if exclude_tool_types is not None:
413
+ query = query.where(~ToolModel.tool_type.in_(exclude_tool_types))
414
+
415
+ # Apply return_only_letta_tools filter at database level
416
+ if return_only_letta_tools:
417
+ query = query.where(ToolModel.tool_type.like("letta_%"))
418
+
419
+ # Apply pagination if specified
420
+ if after is not None:
421
+ after_tool = await session.get(ToolModel, after)
422
+ if after_tool:
423
+ query = query.where(
424
+ or_(
425
+ ToolModel.created_at < after_tool.created_at,
426
+ and_(ToolModel.created_at == after_tool.created_at, ToolModel.id < after_tool.id),
427
+ )
428
+ )
429
+
430
+ # Apply limit
431
+ if limit is not None:
432
+ query = query.limit(limit)
433
+
434
+ # Order by created_at and id for consistent pagination
435
+ query = query.order_by(ToolModel.created_at.desc(), ToolModel.id.desc())
436
+
437
+ # Execute query
438
+ result = await session.execute(query)
439
+ tools = list(result.scalars())
360
440
 
361
441
  # Remove any malformed tools
362
442
  results = []
@@ -379,6 +459,61 @@ class ToolManager:
379
459
 
380
460
  return results
381
461
 
462
+ @enforce_types
463
+ @trace_method
464
+ async def count_tools_async(
465
+ self,
466
+ actor: PydanticUser,
467
+ tool_types: Optional[List[str]] = None,
468
+ exclude_tool_types: Optional[List[str]] = None,
469
+ names: Optional[List[str]] = None,
470
+ tool_ids: Optional[List[str]] = None,
471
+ search: Optional[str] = None,
472
+ return_only_letta_tools: bool = False,
473
+ exclude_letta_tools: bool = False,
474
+ ) -> int:
475
+ """Count tools with the same filtering logic as list_tools_async."""
476
+ async with db_registry.async_session() as session:
477
+ # Use SQLAlchemy directly with COUNT query - same filtering logic as list_tools_async
478
+ # Start with base query
479
+ query = select(func.count(ToolModel.id)).where(ToolModel.organization_id == actor.organization_id)
480
+
481
+ # Apply tool_types filter
482
+ if tool_types is not None:
483
+ query = query.where(ToolModel.tool_type.in_(tool_types))
484
+
485
+ # Apply names filter
486
+ if names is not None:
487
+ query = query.where(ToolModel.name.in_(names))
488
+
489
+ # Apply tool_ids filter
490
+ if tool_ids is not None:
491
+ query = query.where(ToolModel.id.in_(tool_ids))
492
+
493
+ # Apply search filter (ILIKE for case-insensitive partial match)
494
+ if search is not None:
495
+ query = query.where(ToolModel.name.ilike(f"%{search}%"))
496
+
497
+ # Apply exclude_tool_types filter at database level
498
+ if exclude_tool_types is not None:
499
+ query = query.where(~ToolModel.tool_type.in_(exclude_tool_types))
500
+
501
+ # Apply return_only_letta_tools filter at database level
502
+ if return_only_letta_tools:
503
+ query = query.where(ToolModel.tool_type.like("letta_%"))
504
+
505
+ # Handle exclude_letta_tools logic (if True, exclude Letta tools)
506
+ if exclude_letta_tools:
507
+ # Exclude tools that are in the LETTA_TOOL_SET
508
+ letta_tool_names = list(LETTA_TOOL_SET)
509
+ query = query.where(~ToolModel.name.in_(letta_tool_names))
510
+
511
+ # Execute count query
512
+ result = await session.execute(query)
513
+ count = result.scalar()
514
+
515
+ return count or 0
516
+
382
517
  @enforce_types
383
518
  @trace_method
384
519
  async def size_async(
@@ -406,6 +541,7 @@ class ToolManager:
406
541
  updated_tool_type: Optional[ToolType] = None,
407
542
  bypass_name_check: bool = False,
408
543
  ) -> PydanticTool:
544
+ # TODO: remove this (legacy non-async)
409
545
  """
410
546
  Update a tool with complex validation and schema derivation logic.
411
547
 
@@ -522,55 +658,36 @@ class ToolManager:
522
658
  # Fetch current tool early to allow conditional logic based on tool type
523
659
  current_tool = await self.get_tool_by_id_async(tool_id=tool_id, actor=actor)
524
660
 
525
- # For MCP tools, do NOT derive schema from Python source. Trust provided JSON schema.
526
- if current_tool.tool_type == ToolType.EXTERNAL_MCP:
527
- # Prefer provided json_schema; fall back to current
528
- if "json_schema" in update_data:
529
- new_schema = update_data["json_schema"].copy()
530
- new_name = new_schema.get("name", current_tool.name)
531
- else:
532
- new_schema = current_tool.json_schema
533
- new_name = current_tool.name
534
- # Ensure we don't trigger derive
535
- update_data.pop("source_code", None)
536
- # If name changes, enforce uniqueness
537
- if new_name != current_tool.name:
538
- name_exists = await self.tool_name_exists_async(tool_name=new_name, actor=actor)
539
- if name_exists:
540
- raise LettaToolNameConflictError(tool_name=new_name)
661
+ # Do NOT derive schema from Python source. Trust provided JSON schema.
662
+ # Prefer provided json_schema; fall back to current
663
+ if "json_schema" in update_data:
664
+ new_schema = update_data["json_schema"].copy()
665
+ new_name = new_schema.get("name", current_tool.name)
541
666
  else:
542
- # For non-MCP tools, preserve existing behavior
543
- # TODO: Consider this behavior...is this what we want?
544
- # TODO: I feel like it's bad if json_schema strays from source code so
545
- # if source code is provided, always derive the name from it
546
- if "source_code" in update_data.keys() and not bypass_name_check:
547
- # Check source type to use appropriate parser
548
- source_type = update_data.get("source_type", current_tool.source_type)
549
- if source_type == "typescript":
550
- from letta.functions.typescript_parser import derive_typescript_json_schema
667
+ new_schema = current_tool.json_schema
668
+ new_name = current_tool.name
551
669
 
552
- derived_schema = derive_typescript_json_schema(source_code=update_data["source_code"])
553
- else:
554
- # Default to Python for backwards compatibility
555
- derived_schema = derive_openai_json_schema(source_code=update_data["source_code"])
556
- new_name = derived_schema["name"]
557
-
558
- # if json_schema wasn't provided, use the derived schema
559
- if "json_schema" not in update_data.keys():
560
- new_schema = derived_schema
561
- else:
562
- # if json_schema was provided, update only its name to match the source code
563
- new_schema = update_data["json_schema"].copy()
564
- new_schema["name"] = new_name
565
- # update the json_schema in update_data so it gets applied in the loop
566
- update_data["json_schema"] = new_schema
670
+ # original tool may no have a JSON schema at all for legacy reasons
671
+ # in this case, fallback to dangerous schema generation
672
+ if new_schema is None:
673
+ if source_type == "typescript":
674
+ from letta.functions.typescript_parser import derive_typescript_json_schema
567
675
 
568
- # check if the name is changing and if so, verify it doesn't conflict
569
- if new_name != current_tool.name:
570
- # check if a tool with the new name already exists
571
- name_exists = await self.tool_name_exists_async(tool_name=new_name, actor=actor)
572
- if name_exists:
573
- raise LettaToolNameConflictError(tool_name=new_name)
676
+ new_schema = derive_typescript_json_schema(source_code=update_data["source_code"])
677
+ else:
678
+ new_schema = derive_openai_json_schema(source_code=update_data["source_code"])
679
+
680
+ # If name changes, enforce uniqueness
681
+ if new_name != current_tool.name:
682
+ name_exists = await self.tool_name_exists_async(tool_name=new_name, actor=actor)
683
+ if name_exists:
684
+ raise LettaToolNameConflictError(tool_name=new_name)
685
+
686
+ # NOTE: EXTREMELEY HACKY, we need to stop making assumptions about the source_code
687
+ if "source_code" in update_data and f"def {new_name}" not in update_data.get("source_code", ""):
688
+ raise LettaToolNameSchemaMismatchError(
689
+ tool_name=new_name, json_schema_name=new_schema.get("name"), source_code=update_data.get("source_code")
690
+ )
574
691
 
575
692
  # Now perform the update within the session
576
693
  async with db_registry.async_session() as session:
@@ -183,9 +183,9 @@ class ModalDeploymentManager:
183
183
  existing_app = await self._try_get_existing_app(sbx_config, version_hash, user)
184
184
  if existing_app:
185
185
  return existing_app, version_hash
186
- raise RuntimeError(f"Deployment completed but app not found")
186
+ raise RuntimeError("Deployment completed but app not found")
187
187
  else:
188
- raise RuntimeError(f"Timeout waiting for deployment")
188
+ raise RuntimeError("Timeout waiting for deployment")
189
189
 
190
190
  # We're deploying - mark as in progress
191
191
  deployment_key = None
@@ -10,8 +10,7 @@ from letta.orm.errors import NoResultFound
10
10
  from letta.orm.organization import Organization as OrganizationModel
11
11
  from letta.orm.user import User as UserModel
12
12
  from letta.otel.tracing import trace_method
13
- from letta.schemas.user import User as PydanticUser
14
- from letta.schemas.user import UserUpdate
13
+ from letta.schemas.user import User as PydanticUser, UserUpdate
15
14
  from letta.server.db import db_registry
16
15
  from letta.utils import enforce_types
17
16
 
letta/settings.py CHANGED
@@ -23,7 +23,7 @@ class ToolSettings(BaseSettings):
23
23
 
24
24
  # Search Providers
25
25
  tavily_api_key: str | None = Field(default=None, description="API key for using Tavily as a search provider.")
26
- firecrawl_api_key: str | None = Field(default=None, description="API key for using Firecrawl as a search provider.")
26
+ exa_api_key: str | None = Field(default=None, description="API key for using Exa as a search provider.")
27
27
 
28
28
  # Local Sandbox configurations
29
29
  tool_exec_dir: Optional[str] = None
@@ -89,7 +89,6 @@ class SummarizerSettings(BaseSettings):
89
89
 
90
90
 
91
91
  class ModelSettings(BaseSettings):
92
-
93
92
  model_config = SettingsConfigDict(env_file=".env", extra="ignore")
94
93
 
95
94
  global_max_context_window_limit: int = 32000
@@ -146,6 +145,7 @@ class ModelSettings(BaseSettings):
146
145
  gemini_api_key: Optional[str] = None
147
146
  gemini_base_url: str = "https://generativelanguage.googleapis.com/"
148
147
  gemini_force_minimum_thinking_budget: bool = False
148
+ gemini_max_retries: int = 5
149
149
 
150
150
  # google vertex
151
151
  google_cloud_project: Optional[str] = None
@@ -205,6 +205,7 @@ class Settings(BaseSettings):
205
205
  letta_dir: Optional[Path] = Field(Path.home() / ".letta", alias="LETTA_DIR")
206
206
  debug: Optional[bool] = False
207
207
  cors_origins: Optional[list] = cors_origins
208
+ environment: Optional[str] = Field(default=None, description="Application environment (PRODUCTION, DEV, etc.)")
208
209
 
209
210
  # SSE Streaming keepalive settings
210
211
  enable_keepalive: bool = Field(True, description="Enable keepalive messages in SSE streams to prevent timeouts")
@@ -299,7 +300,8 @@ class Settings(BaseSettings):
299
300
  # For tpuf - currently only for archival memories
300
301
  use_tpuf: bool = False
301
302
  tpuf_api_key: Optional[str] = None
302
- tpuf_region: str = "gcp-us-central1.turbopuffer.com"
303
+ tpuf_region: str = "gcp-us-central1"
304
+ embed_all_messages: bool = False
303
305
 
304
306
  # File processing timeout settings
305
307
  file_processing_timeout_minutes: int = 30
@@ -117,9 +117,9 @@ class StreamingCLIInterface(AgentChunkStreamingInterface):
117
117
 
118
118
  # Starting a new buffer line
119
119
  if not self.streaming_buffer_type:
120
- assert not (
121
- message_delta.content is not None and message_delta.tool_calls is not None and len(message_delta.tool_calls)
122
- ), f"Error: got both content and tool_calls in message stream\n{message_delta}"
120
+ assert not (message_delta.content is not None and message_delta.tool_calls is not None and len(message_delta.tool_calls)), (
121
+ f"Error: got both content and tool_calls in message stream\n{message_delta}"
122
+ )
123
123
 
124
124
  if message_delta.content is not None:
125
125
  # Write out the prefix for inner thoughts
letta/system.py CHANGED
@@ -187,7 +187,7 @@ def package_summarize_message(summary, summary_message_count, hidden_message_cou
187
187
 
188
188
  def package_summarize_message_no_counts(summary, timezone):
189
189
  context_message = (
190
- f"Note: prior messages have been hidden from view due to conversation memory constraints.\n"
190
+ "Note: prior messages have been hidden from view due to conversation memory constraints.\n"
191
191
  + f"The following is a summary of the previous messages:\n {summary}"
192
192
  )
193
193
 
letta/utils.py CHANGED
@@ -1149,7 +1149,6 @@ class CancellationSignal:
1149
1149
  """
1150
1150
 
1151
1151
  def __init__(self, job_manager=None, job_id=None, actor=None):
1152
-
1153
1152
  from letta.log import get_logger
1154
1153
  from letta.schemas.user import User
1155
1154
  from letta.services.job_manager import JobManager
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: letta-nightly
3
- Version: 0.11.6.dev20250902104140
3
+ Version: 0.11.7.dev20250904045700
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  Author-email: Letta Team <contact@letta.com>
6
6
  License: Apache License
@@ -18,15 +18,15 @@ Requires-Dist: composio-core>=0.7.7
18
18
  Requires-Dist: datamodel-code-generator[http]>=0.25.0
19
19
  Requires-Dist: demjson3>=3.0.6
20
20
  Requires-Dist: docstring-parser<0.17,>=0.16
21
+ Requires-Dist: exa-py>=1.15.4
21
22
  Requires-Dist: faker>=36.1.0
22
- Requires-Dist: firecrawl-py<3.0.0,>=2.8.0
23
23
  Requires-Dist: grpcio-tools>=1.68.1
24
24
  Requires-Dist: grpcio>=1.68.1
25
25
  Requires-Dist: html2text>=2020.1.16
26
26
  Requires-Dist: httpx-sse>=0.4.0
27
27
  Requires-Dist: httpx>=0.28.0
28
28
  Requires-Dist: jinja2>=3.1.5
29
- Requires-Dist: letta-client>=0.1.277
29
+ Requires-Dist: letta-client==0.1.307
30
30
  Requires-Dist: llama-index-embeddings-openai>=0.3.1
31
31
  Requires-Dist: llama-index>=0.12.2
32
32
  Requires-Dist: markitdown[docx,pdf,pptx]>=0.1.2
@@ -53,7 +53,9 @@ Requires-Dist: python-multipart>=0.0.19
53
53
  Requires-Dist: pytz>=2023.3.post1
54
54
  Requires-Dist: pyyaml>=6.0.1
55
55
  Requires-Dist: questionary>=2.0.1
56
+ Requires-Dist: readability-lxml
56
57
  Requires-Dist: rich>=13.9.4
58
+ Requires-Dist: ruff[dev]>=0.12.10
57
59
  Requires-Dist: sentry-sdk[fastapi]==2.19.1
58
60
  Requires-Dist: setuptools>=70
59
61
  Requires-Dist: sqlalchemy-json>=0.7.0
@@ -63,6 +65,7 @@ Requires-Dist: sqlmodel>=0.0.16
63
65
  Requires-Dist: structlog>=25.4.0
64
66
  Requires-Dist: tavily-python>=0.7.2
65
67
  Requires-Dist: tqdm>=4.66.1
68
+ Requires-Dist: trafilatura
66
69
  Requires-Dist: typer>=0.15.2
67
70
  Provides-Extra: bedrock
68
71
  Requires-Dist: aioboto3>=14.3.0; extra == 'bedrock'
@@ -71,22 +74,23 @@ Provides-Extra: cloud-tool-sandbox
71
74
  Requires-Dist: e2b-code-interpreter>=1.0.3; extra == 'cloud-tool-sandbox'
72
75
  Provides-Extra: desktop
73
76
  Requires-Dist: aiosqlite>=0.21.0; extra == 'desktop'
77
+ Requires-Dist: async-lru>=2.0.5; extra == 'desktop'
74
78
  Requires-Dist: docker>=7.1.0; extra == 'desktop'
75
79
  Requires-Dist: fastapi>=0.115.6; extra == 'desktop'
76
80
  Requires-Dist: langchain-community>=0.3.7; extra == 'desktop'
77
81
  Requires-Dist: langchain>=0.3.7; extra == 'desktop'
78
82
  Requires-Dist: locust>=2.31.5; extra == 'desktop'
83
+ Requires-Dist: magika>=0.6.2; extra == 'desktop'
84
+ Requires-Dist: pgserver>=0.1.4; extra == 'desktop'
79
85
  Requires-Dist: pgvector>=0.2.3; extra == 'desktop'
80
86
  Requires-Dist: sqlite-vec>=0.1.7a2; extra == 'desktop'
87
+ Requires-Dist: tiktoken>=0.11.0; extra == 'desktop'
81
88
  Requires-Dist: uvicorn>=0.24.0.post1; extra == 'desktop'
82
89
  Requires-Dist: websockets; extra == 'desktop'
83
90
  Requires-Dist: wikipedia>=1.4.0; extra == 'desktop'
84
91
  Provides-Extra: dev
85
- Requires-Dist: autoflake>=2.3.0; extra == 'dev'
86
- Requires-Dist: black[jupyter]>=24.4.2; extra == 'dev'
87
92
  Requires-Dist: ipdb>=0.13.13; extra == 'dev'
88
93
  Requires-Dist: ipykernel>=6.29.5; extra == 'dev'
89
- Requires-Dist: isort>=5.13.2; extra == 'dev'
90
94
  Requires-Dist: pexpect>=4.9.0; extra == 'dev'
91
95
  Requires-Dist: pre-commit>=3.5.0; extra == 'dev'
92
96
  Requires-Dist: pyright>=1.1.347; extra == 'dev'
@@ -101,7 +105,7 @@ Requires-Dist: granian[reload,uvloop]>=2.3.2; extra == 'experimental'
101
105
  Requires-Dist: uvloop>=0.21.0; extra == 'experimental'
102
106
  Provides-Extra: external-tools
103
107
  Requires-Dist: docker>=7.1.0; extra == 'external-tools'
104
- Requires-Dist: firecrawl-py<3.0.0,>=2.8.0; extra == 'external-tools'
108
+ Requires-Dist: exa-py>=1.15.4; extra == 'external-tools'
105
109
  Requires-Dist: langchain-community>=0.3.7; extra == 'external-tools'
106
110
  Requires-Dist: langchain>=0.3.7; extra == 'external-tools'
107
111
  Requires-Dist: turbopuffer>=0.5.17; extra == 'external-tools'