letta-nightly 0.8.5.dev20250625104328__py3-none-any.whl → 0.8.6.dev20250626104326__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. letta/agent.py +16 -12
  2. letta/agents/base_agent.py +4 -1
  3. letta/agents/helpers.py +35 -3
  4. letta/agents/letta_agent.py +132 -106
  5. letta/agents/letta_agent_batch.py +4 -3
  6. letta/agents/voice_agent.py +12 -2
  7. letta/agents/voice_sleeptime_agent.py +12 -2
  8. letta/constants.py +24 -3
  9. letta/data_sources/redis_client.py +6 -0
  10. letta/errors.py +5 -0
  11. letta/functions/function_sets/files.py +10 -3
  12. letta/functions/function_sets/multi_agent.py +0 -32
  13. letta/groups/sleeptime_multi_agent_v2.py +6 -0
  14. letta/helpers/converters.py +4 -1
  15. letta/helpers/datetime_helpers.py +16 -23
  16. letta/helpers/message_helper.py +5 -2
  17. letta/helpers/tool_rule_solver.py +29 -2
  18. letta/interfaces/openai_streaming_interface.py +9 -2
  19. letta/llm_api/anthropic.py +11 -1
  20. letta/llm_api/anthropic_client.py +14 -3
  21. letta/llm_api/aws_bedrock.py +29 -15
  22. letta/llm_api/bedrock_client.py +74 -0
  23. letta/llm_api/google_ai_client.py +7 -3
  24. letta/llm_api/google_vertex_client.py +18 -4
  25. letta/llm_api/llm_client.py +7 -0
  26. letta/llm_api/openai_client.py +13 -0
  27. letta/orm/agent.py +5 -0
  28. letta/orm/block_history.py +1 -1
  29. letta/orm/enums.py +6 -25
  30. letta/orm/job.py +1 -2
  31. letta/orm/llm_batch_items.py +1 -1
  32. letta/orm/mcp_server.py +1 -1
  33. letta/orm/passage.py +7 -1
  34. letta/orm/sqlalchemy_base.py +7 -5
  35. letta/orm/tool.py +2 -1
  36. letta/schemas/agent.py +34 -10
  37. letta/schemas/enums.py +42 -1
  38. letta/schemas/job.py +6 -3
  39. letta/schemas/letta_request.py +4 -0
  40. letta/schemas/llm_batch_job.py +7 -2
  41. letta/schemas/memory.py +2 -2
  42. letta/schemas/providers.py +32 -6
  43. letta/schemas/run.py +1 -1
  44. letta/schemas/tool_rule.py +40 -12
  45. letta/serialize_schemas/pydantic_agent_schema.py +9 -2
  46. letta/server/rest_api/app.py +3 -2
  47. letta/server/rest_api/routers/v1/agents.py +25 -22
  48. letta/server/rest_api/routers/v1/runs.py +2 -3
  49. letta/server/rest_api/routers/v1/sources.py +31 -0
  50. letta/server/rest_api/routers/v1/voice.py +1 -0
  51. letta/server/rest_api/utils.py +38 -13
  52. letta/server/server.py +52 -21
  53. letta/services/agent_manager.py +58 -7
  54. letta/services/block_manager.py +1 -1
  55. letta/services/file_processor/chunker/line_chunker.py +2 -1
  56. letta/services/file_processor/file_processor.py +2 -9
  57. letta/services/files_agents_manager.py +177 -37
  58. letta/services/helpers/agent_manager_helper.py +77 -48
  59. letta/services/helpers/tool_parser_helper.py +2 -1
  60. letta/services/job_manager.py +33 -2
  61. letta/services/llm_batch_manager.py +1 -1
  62. letta/services/provider_manager.py +6 -4
  63. letta/services/tool_executor/core_tool_executor.py +1 -1
  64. letta/services/tool_executor/files_tool_executor.py +99 -30
  65. letta/services/tool_executor/multi_agent_tool_executor.py +1 -17
  66. letta/services/tool_executor/tool_execution_manager.py +6 -0
  67. letta/services/tool_executor/tool_executor_base.py +3 -0
  68. letta/services/tool_sandbox/base.py +39 -1
  69. letta/services/tool_sandbox/e2b_sandbox.py +7 -0
  70. letta/services/user_manager.py +3 -2
  71. letta/settings.py +8 -14
  72. letta/system.py +17 -17
  73. letta/templates/sandbox_code_file_async.py.j2 +59 -0
  74. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/METADATA +3 -2
  75. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/RECORD +78 -76
  76. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/LICENSE +0 -0
  77. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/WHEEL +0 -0
  78. {letta_nightly-0.8.5.dev20250625104328.dist-info → letta_nightly-0.8.6.dev20250626104326.dist-info}/entry_points.txt +0 -0
letta/schemas/agent.py CHANGED
@@ -4,8 +4,12 @@ from typing import Dict, List, Optional
4
4
 
5
5
  from pydantic import BaseModel, Field, field_validator, model_validator
6
6
 
7
- from letta.constants import CORE_MEMORY_LINE_NUMBER_WARNING, DEFAULT_EMBEDDING_CHUNK_SIZE
8
- from letta.helpers import ToolRulesSolver
7
+ from letta.constants import (
8
+ CORE_MEMORY_LINE_NUMBER_WARNING,
9
+ DEFAULT_EMBEDDING_CHUNK_SIZE,
10
+ FILE_MEMORY_EMPTY_MESSAGE,
11
+ FILE_MEMORY_EXISTS_MESSAGE,
12
+ )
9
13
  from letta.schemas.block import CreateBlock
10
14
  from letta.schemas.embedding_config import EmbeddingConfig
11
15
  from letta.schemas.environment_variables import AgentEnvironmentVariable
@@ -110,6 +114,9 @@ class AgentState(OrmMetadataBase, validate_assignment=True):
110
114
  last_run_completion: Optional[datetime] = Field(None, description="The timestamp when the agent last completed a run.")
111
115
  last_run_duration_ms: Optional[int] = Field(None, description="The duration in milliseconds of the agent's last run.")
112
116
 
117
+ # timezone
118
+ timezone: Optional[str] = Field(None, description="The timezone of the agent (IANA format).")
119
+
113
120
  def get_agent_env_vars_as_dict(self) -> Dict[str, str]:
114
121
  # Get environment variables for this agent specifically
115
122
  per_agent_env_vars = {}
@@ -193,6 +200,7 @@ class CreateAgent(BaseModel, validate_assignment=True): #
193
200
  )
194
201
  enable_sleeptime: Optional[bool] = Field(None, description="If set to True, memory management will move to a background agent thread.")
195
202
  response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the agent.")
203
+ timezone: Optional[str] = Field(None, description="The timezone of the agent (IANA format).")
196
204
 
197
205
  @field_validator("name")
198
206
  @classmethod
@@ -286,6 +294,7 @@ class UpdateAgent(BaseModel):
286
294
  response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the agent.")
287
295
  last_run_completion: Optional[datetime] = Field(None, description="The timestamp when the agent last completed a run.")
288
296
  last_run_duration_ms: Optional[int] = Field(None, description="The duration in milliseconds of the agent's last run.")
297
+ timezone: Optional[str] = Field(None, description="The timezone of the agent (IANA format).")
289
298
 
290
299
  class Config:
291
300
  extra = "ignore" # Ignores extra fields
@@ -301,11 +310,6 @@ class AgentStepResponse(BaseModel):
301
310
  usage: UsageStatistics = Field(..., description="Usage statistics of the LLM call during the agent's step.")
302
311
 
303
312
 
304
- class AgentStepState(BaseModel):
305
- step_number: int = Field(..., description="The current step number in the agent loop")
306
- tool_rules_solver: ToolRulesSolver = Field(..., description="The current state of the ToolRulesSolver")
307
-
308
-
309
313
  def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
310
314
 
311
315
  # Sleeptime agents use the MemGPT v2 memory tools (line numbers)
@@ -333,24 +337,34 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
333
337
  "{% if not loop.last %}\n{% endif %}"
334
338
  "{% endfor %}"
335
339
  "\n</memory_blocks>"
336
- "<files>\nThe following memory files are currently accessible:\n\n"
340
+ "\n\n{% if tool_usage_rules %}"
341
+ "<tool_usage_rules>\n"
342
+ "{{ tool_usage_rules.description }}\n\n"
343
+ "{{ tool_usage_rules.value }}\n"
344
+ "</tool_usage_rules>"
345
+ "{% endif %}"
346
+ f"\n\n<files>\n{{% if file_blocks %}}{FILE_MEMORY_EXISTS_MESSAGE}\n{{% else %}}{FILE_MEMORY_EMPTY_MESSAGE}{{% endif %}}"
337
347
  "{% for block in file_blocks %}"
338
348
  f"<file status=\"{{{{ '{FileStatus.open.value}' if block.value else '{FileStatus.closed.value}' }}}}\">\n"
339
349
  "<{{ block.label }}>\n"
350
+ "{% if block.description %}"
340
351
  "<description>\n"
341
352
  "{{ block.description }}\n"
342
353
  "</description>\n"
354
+ "{% endif %}"
343
355
  "<metadata>"
344
356
  "{% if block.read_only %}\n- read_only=true{% endif %}\n"
345
357
  "- chars_current={{ block.value|length }}\n"
346
358
  "- chars_limit={{ block.limit }}\n"
347
359
  "</metadata>\n"
360
+ "{% if block.value %}"
348
361
  "<value>\n"
349
362
  f"{CORE_MEMORY_LINE_NUMBER_WARNING}\n"
350
363
  "{% for line in block.value.split('\\n') %}"
351
- "Line {{ loop.index }}: {{ line }}\n"
364
+ "{{ loop.index }}: {{ line }}\n"
352
365
  "{% endfor %}"
353
366
  "</value>\n"
367
+ "{% endif %}"
354
368
  "</{{ block.label }}>\n"
355
369
  "</file>\n"
356
370
  "{% if not loop.last %}\n{% endif %}"
@@ -379,21 +393,31 @@ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
379
393
  "{% if not loop.last %}\n{% endif %}"
380
394
  "{% endfor %}"
381
395
  "\n</memory_blocks>"
382
- "<files>\nThe following memory files are currently accessible:\n\n"
396
+ "\n\n{% if tool_usage_rules %}"
397
+ "<tool_usage_rules>\n"
398
+ "{{ tool_usage_rules.description }}\n\n"
399
+ "{{ tool_usage_rules.value }}\n"
400
+ "</tool_usage_rules>"
401
+ "{% endif %}"
402
+ f"\n\n<files>\n{{% if file_blocks %}}{FILE_MEMORY_EXISTS_MESSAGE}\n{{% else %}}{FILE_MEMORY_EMPTY_MESSAGE}{{% endif %}}"
383
403
  "{% for block in file_blocks %}"
384
404
  f"<file status=\"{{{{ '{FileStatus.open.value}' if block.value else '{FileStatus.closed.value}' }}}}\">\n"
385
405
  "<{{ block.label }}>\n"
406
+ "{% if block.description %}"
386
407
  "<description>\n"
387
408
  "{{ block.description }}\n"
388
409
  "</description>\n"
410
+ "{% endif %}"
389
411
  "<metadata>"
390
412
  "{% if block.read_only %}\n- read_only=true{% endif %}\n"
391
413
  "- chars_current={{ block.value|length }}\n"
392
414
  "- chars_limit={{ block.limit }}\n"
393
415
  "</metadata>\n"
416
+ "{% if block.value %}"
394
417
  "<value>\n"
395
418
  "{{ block.value }}\n"
396
419
  "</value>\n"
420
+ "{% endif %}"
397
421
  "</{{ block.label }}>\n"
398
422
  "</file>\n"
399
423
  "{% if not loop.last %}\n{% endif %}"
letta/schemas/enums.py CHANGED
@@ -45,7 +45,7 @@ class JobStatus(str, Enum):
45
45
  Status of the job.
46
46
  """
47
47
 
48
- not_started = "not_started"
48
+ # TODO (cliandy): removed `not_started`, but what does `pending` or `expired` here mean and where do we use them?
49
49
  created = "created"
50
50
  running = "running"
51
51
  completed = "completed"
@@ -86,6 +86,7 @@ class ToolRuleType(str, Enum):
86
86
  constrain_child_tools = "constrain_child_tools"
87
87
  max_count_per_step = "max_count_per_step"
88
88
  parent_last_tool = "parent_last_tool"
89
+ required_before_exit = "required_before_exit" # tool must be called before loop can exit
89
90
 
90
91
 
91
92
  class FileProcessingStatus(str, Enum):
@@ -94,3 +95,43 @@ class FileProcessingStatus(str, Enum):
94
95
  EMBEDDING = "embedding"
95
96
  COMPLETED = "completed"
96
97
  ERROR = "error"
98
+
99
+
100
+ class ToolType(str, Enum):
101
+ CUSTOM = "custom"
102
+ LETTA_CORE = "letta_core"
103
+ LETTA_MEMORY_CORE = "letta_memory_core"
104
+ LETTA_MULTI_AGENT_CORE = "letta_multi_agent_core"
105
+ LETTA_SLEEPTIME_CORE = "letta_sleeptime_core"
106
+ LETTA_VOICE_SLEEPTIME_CORE = "letta_voice_sleeptime_core"
107
+ LETTA_BUILTIN = "letta_builtin"
108
+ LETTA_FILES_CORE = "letta_files_core"
109
+ EXTERNAL_COMPOSIO = "external_composio"
110
+ EXTERNAL_LANGCHAIN = "external_langchain"
111
+ # TODO is "external" the right name here? Since as of now, MCP is local / doesn't support remote?
112
+ EXTERNAL_MCP = "external_mcp"
113
+
114
+
115
+ class JobType(str, Enum):
116
+ JOB = "job"
117
+ RUN = "run"
118
+ BATCH = "batch"
119
+
120
+
121
+ class ToolSourceType(str, Enum):
122
+ """Defines what a tool was derived from"""
123
+
124
+ python = "python"
125
+ json = "json"
126
+
127
+
128
+ class ActorType(str, Enum):
129
+ LETTA_USER = "letta_user"
130
+ LETTA_AGENT = "letta_agent"
131
+ LETTA_SYSTEM = "letta_system"
132
+
133
+
134
+ class MCPServerType(str, Enum):
135
+ SSE = "sse"
136
+ STDIO = "stdio"
137
+ STREAMABLE_HTTP = "streamable_http"
letta/schemas/job.py CHANGED
@@ -1,12 +1,12 @@
1
1
  from datetime import datetime
2
- from typing import Optional
2
+ from typing import List, Optional
3
3
 
4
4
  from pydantic import BaseModel, Field
5
5
 
6
6
  from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
7
- from letta.orm.enums import JobType
8
- from letta.schemas.enums import JobStatus
7
+ from letta.schemas.enums import JobStatus, JobType
9
8
  from letta.schemas.letta_base import OrmMetadataBase
9
+ from letta.schemas.letta_message import MessageType
10
10
 
11
11
 
12
12
  class JobBase(OrmMetadataBase):
@@ -94,3 +94,6 @@ class LettaRequestConfig(BaseModel):
94
94
  default=DEFAULT_MESSAGE_TOOL_KWARG,
95
95
  description="The name of the message argument in the designated message tool.",
96
96
  )
97
+ include_return_message_types: Optional[List[MessageType]] = Field(
98
+ default=None, description="Only return specified message types in the response. If `None` (default) returns all messages."
99
+ )
@@ -39,6 +39,10 @@ class LettaStreamingRequest(LettaRequest):
39
39
  )
40
40
 
41
41
 
42
+ class LettaAsyncRequest(LettaRequest):
43
+ callback_url: Optional[str] = Field(None, description="Optional callback URL to POST to when the job completes")
44
+
45
+
42
46
  class LettaBatchRequest(LettaRequest):
43
47
  agent_id: str = Field(..., description="The ID of the agent to send this batch request for")
44
48
 
@@ -2,14 +2,19 @@ from datetime import datetime
2
2
  from typing import Optional, Union
3
3
 
4
4
  from anthropic.types.beta.messages import BetaMessageBatch, BetaMessageBatchIndividualResponse
5
- from pydantic import Field
5
+ from pydantic import BaseModel, Field
6
6
 
7
- from letta.schemas.agent import AgentStepState
7
+ from letta.helpers import ToolRulesSolver
8
8
  from letta.schemas.enums import AgentStepStatus, JobStatus, ProviderType
9
9
  from letta.schemas.letta_base import OrmMetadataBase
10
10
  from letta.schemas.llm_config import LLMConfig
11
11
 
12
12
 
13
+ class AgentStepState(BaseModel):
14
+ step_number: int = Field(..., description="The current step number in the agent loop")
15
+ tool_rules_solver: ToolRulesSolver = Field(..., description="The current state of the ToolRulesSolver")
16
+
17
+
13
18
  class LLMBatchItemBase(OrmMetadataBase, validate_assignment=True):
14
19
  __id_prefix__ = "batch_item"
15
20
 
letta/schemas/memory.py CHANGED
@@ -108,10 +108,10 @@ class Memory(BaseModel, validate_assignment=True):
108
108
  except Exception as e:
109
109
  raise ValueError(f"Prompt template is not compatible with current memory structure: {str(e)}")
110
110
 
111
- def compile(self) -> str:
111
+ def compile(self, tool_usage_rules=None) -> str:
112
112
  """Generate a string representation of the memory in-context using the Jinja2 template"""
113
113
  template = Template(self.prompt_template)
114
- return template.render(blocks=self.blocks, file_blocks=self.file_blocks)
114
+ return template.render(blocks=self.blocks, file_blocks=self.file_blocks, tool_usage_rules=tool_usage_rules)
115
115
 
116
116
  def list_block_labels(self) -> List[str]:
117
117
  """Return a list of the block names held inside the memory object"""
@@ -98,7 +98,7 @@ class Provider(ProviderBase):
98
98
  case ProviderType.anthropic:
99
99
  return AnthropicProvider(**self.model_dump(exclude_none=True))
100
100
  case ProviderType.bedrock:
101
- return AnthropicBedrockProvider(**self.model_dump(exclude_none=True))
101
+ return BedrockProvider(**self.model_dump(exclude_none=True))
102
102
  case ProviderType.ollama:
103
103
  return OllamaProvider(**self.model_dump(exclude_none=True))
104
104
  case ProviderType.google_ai:
@@ -1513,15 +1513,15 @@ class CohereProvider(OpenAIProvider):
1513
1513
  pass
1514
1514
 
1515
1515
 
1516
- class AnthropicBedrockProvider(Provider):
1516
+ class BedrockProvider(Provider):
1517
1517
  provider_type: Literal[ProviderType.bedrock] = Field(ProviderType.bedrock, description="The type of the provider.")
1518
1518
  provider_category: ProviderCategory = Field(ProviderCategory.base, description="The category of the provider (base or byok)")
1519
- aws_region: str = Field(..., description="AWS region for Bedrock")
1519
+ region: str = Field(..., description="AWS region for Bedrock")
1520
1520
 
1521
1521
  def list_llm_models(self):
1522
1522
  from letta.llm_api.aws_bedrock import bedrock_get_model_list
1523
1523
 
1524
- models = bedrock_get_model_list(self.aws_region)
1524
+ models = bedrock_get_model_list(self.region)
1525
1525
 
1526
1526
  configs = []
1527
1527
  for model_summary in models:
@@ -1539,6 +1539,32 @@ class AnthropicBedrockProvider(Provider):
1539
1539
  )
1540
1540
  return configs
1541
1541
 
1542
+ async def list_llm_models_async(self) -> List[LLMConfig]:
1543
+ from letta.llm_api.aws_bedrock import bedrock_get_model_list_async
1544
+
1545
+ models = await bedrock_get_model_list_async(
1546
+ self.access_key,
1547
+ self.api_key,
1548
+ self.region,
1549
+ )
1550
+
1551
+ configs = []
1552
+ for model_summary in models:
1553
+ model_arn = model_summary["inferenceProfileArn"]
1554
+ configs.append(
1555
+ LLMConfig(
1556
+ model=model_arn,
1557
+ model_endpoint_type=self.provider_type.value,
1558
+ model_endpoint=None,
1559
+ context_window=self.get_model_context_window(model_arn),
1560
+ handle=self.get_handle(model_arn),
1561
+ provider_name=self.name,
1562
+ provider_category=self.provider_category,
1563
+ )
1564
+ )
1565
+
1566
+ return configs
1567
+
1542
1568
  def list_embedding_models(self):
1543
1569
  return []
1544
1570
 
@@ -1548,7 +1574,7 @@ class AnthropicBedrockProvider(Provider):
1548
1574
 
1549
1575
  return bedrock_get_model_context_window(model_name)
1550
1576
 
1551
- def get_handle(self, model_name: str) -> str:
1577
+ def get_handle(self, model_name: str, is_embedding: bool = False, base_name: Optional[str] = None) -> str:
1552
1578
  print(model_name)
1553
1579
  model = model_name.split(".")[-1]
1554
- return f"bedrock/{model}"
1580
+ return f"{self.name}/{model}"
letta/schemas/run.py CHANGED
@@ -2,7 +2,7 @@ from typing import Optional
2
2
 
3
3
  from pydantic import Field
4
4
 
5
- from letta.orm.enums import JobType
5
+ from letta.schemas.enums import JobType
6
6
  from letta.schemas.job import Job, JobBase, LettaRequestConfig
7
7
 
8
8
 
@@ -52,7 +52,7 @@ class ChildToolRule(BaseToolRule):
52
52
  type: Literal[ToolRuleType.constrain_child_tools] = ToolRuleType.constrain_child_tools
53
53
  children: List[str] = Field(..., description="The children tools that can be invoked.")
54
54
  prompt_template: Optional[str] = Field(
55
- default="<tool_constraint>After using {{ tool_name }}, you can only use these tools: {{ children | join(', ') }}</tool_constraint>",
55
+ default="<tool_rule>\nAfter using {{ tool_name }}, you must use one of these tools: {{ children | join(', ') }}\n</tool_rule>",
56
56
  description="Optional Jinja2 template for generating agent prompt about this tool rule.",
57
57
  )
58
58
 
@@ -61,7 +61,7 @@ class ChildToolRule(BaseToolRule):
61
61
  return set(self.children) if last_tool == self.tool_name else available_tools
62
62
 
63
63
  def _get_default_template(self) -> Optional[str]:
64
- return "<tool_constraint>After using {{ tool_name }}, you can only use these tools: {{ children | join(', ') }}</tool_constraint>"
64
+ return "<tool_rule>\nAfter using {{ tool_name }}, you must use one of these tools: {{ children | join(', ') }}\n</tool_rule>"
65
65
 
66
66
 
67
67
  class ParentToolRule(BaseToolRule):
@@ -72,7 +72,7 @@ class ParentToolRule(BaseToolRule):
72
72
  type: Literal[ToolRuleType.parent_last_tool] = ToolRuleType.parent_last_tool
73
73
  children: List[str] = Field(..., description="The children tools that can be invoked.")
74
74
  prompt_template: Optional[str] = Field(
75
- default="<tool_constraint>{{ children | join(', ') }} can only be used after {{ tool_name }}</tool_constraint>",
75
+ default="<tool_rule>\n{{ children | join(', ') }} can only be used after {{ tool_name }}\n</tool_rule>",
76
76
  description="Optional Jinja2 template for generating agent prompt about this tool rule.",
77
77
  )
78
78
 
@@ -81,7 +81,7 @@ class ParentToolRule(BaseToolRule):
81
81
  return set(self.children) if last_tool == self.tool_name else available_tools - set(self.children)
82
82
 
83
83
  def _get_default_template(self) -> Optional[str]:
84
- return "<tool_constraint>{{ children | join(', ') }} can only be used after {{ tool_name }}</tool_constraint>"
84
+ return "<tool_rule>\n{{ children | join(', ') }} can only be used after {{ tool_name }}\n</tool_rule>"
85
85
 
86
86
 
87
87
  class ConditionalToolRule(BaseToolRule):
@@ -94,7 +94,7 @@ class ConditionalToolRule(BaseToolRule):
94
94
  child_output_mapping: Dict[Any, str] = Field(..., description="The output case to check for mapping")
95
95
  require_output_mapping: bool = Field(default=False, description="Whether to throw an error when output doesn't match any case")
96
96
  prompt_template: Optional[str] = Field(
97
- default="<tool_constraint>{{ tool_name }} will determine which tool to use next based on its output</tool_constraint>",
97
+ default="<tool_rule>\n{{ tool_name }} will determine which tool to use next based on its output\n</tool_rule>",
98
98
  description="Optional Jinja2 template for generating agent prompt about this tool rule.",
99
99
  )
100
100
 
@@ -143,7 +143,7 @@ class ConditionalToolRule(BaseToolRule):
143
143
  return str(function_output) == str(key)
144
144
 
145
145
  def _get_default_template(self) -> Optional[str]:
146
- return "<tool_constraint>{{ tool_name }} will determine which tool to use next based on its output</tool_constraint>"
146
+ return "<tool_rule>\n{{ tool_name }} will determine which tool to use next based on its output\n</tool_rule>"
147
147
 
148
148
 
149
149
  class InitToolRule(BaseToolRule):
@@ -161,12 +161,12 @@ class TerminalToolRule(BaseToolRule):
161
161
 
162
162
  type: Literal[ToolRuleType.exit_loop] = ToolRuleType.exit_loop
163
163
  prompt_template: Optional[str] = Field(
164
- default="<tool_constraint>{{ tool_name }} ends the conversation when called</tool_constraint>",
164
+ default="<tool_rule>\n{{ tool_name }} ends your response (yields control) when called\n</tool_rule>",
165
165
  description="Optional Jinja2 template for generating agent prompt about this tool rule.",
166
166
  )
167
167
 
168
168
  def _get_default_template(self) -> Optional[str]:
169
- return "<tool_constraint>{{ tool_name }} ends the conversation when called</tool_constraint>"
169
+ return "<tool_rule>\n{{ tool_name }} ends your response (yields control) when called\n</tool_rule>"
170
170
 
171
171
 
172
172
  class ContinueToolRule(BaseToolRule):
@@ -176,11 +176,30 @@ class ContinueToolRule(BaseToolRule):
176
176
 
177
177
  type: Literal[ToolRuleType.continue_loop] = ToolRuleType.continue_loop
178
178
  prompt_template: Optional[str] = Field(
179
- default="<tool_constraint>{{ tool_name }} requires continuing the conversation when called</tool_constraint>",
179
+ default="<tool_rule>\n{{ tool_name }} requires continuing your response when called\n</tool_rule>",
180
180
  description="Optional Jinja2 template for generating agent prompt about this tool rule.",
181
181
  )
182
182
 
183
183
 
184
+ class RequiredBeforeExitToolRule(BaseToolRule):
185
+ """
186
+ Represents a tool rule configuration where this tool must be called before the agent loop can exit.
187
+ """
188
+
189
+ type: Literal[ToolRuleType.required_before_exit] = ToolRuleType.required_before_exit
190
+ prompt_template: Optional[str] = Field(
191
+ default="<tool_rule>{{ tool_name }} must be called before ending the conversation</tool_rule>",
192
+ description="Optional Jinja2 template for generating agent prompt about this tool rule.",
193
+ )
194
+
195
+ def get_valid_tools(self, tool_call_history: List[str], available_tools: Set[str], last_function_response: Optional[str]) -> Set[str]:
196
+ """Returns all available tools - the logic for preventing exit is handled elsewhere."""
197
+ return available_tools
198
+
199
+ def _get_default_template(self) -> Optional[str]:
200
+ return "<tool_rule>{{ tool_name }} must be called before ending the conversation</tool_rule>"
201
+
202
+
184
203
  class MaxCountPerStepToolRule(BaseToolRule):
185
204
  """
186
205
  Represents a tool rule configuration which constrains the total number of times this tool can be invoked in a single step.
@@ -189,7 +208,7 @@ class MaxCountPerStepToolRule(BaseToolRule):
189
208
  type: Literal[ToolRuleType.max_count_per_step] = ToolRuleType.max_count_per_step
190
209
  max_count_limit: int = Field(..., description="The max limit for the total number of times this tool can be invoked in a single step.")
191
210
  prompt_template: Optional[str] = Field(
192
- default="<tool_constraint>{{ tool_name }}: max {{ max_count_limit }} use(s) per turn</tool_constraint>",
211
+ default="<tool_rule>\n{{ tool_name }}: max {{ max_count_limit }} use(s) per response\n</tool_rule>",
193
212
  description="Optional Jinja2 template for generating agent prompt about this tool rule.",
194
213
  )
195
214
 
@@ -204,10 +223,19 @@ class MaxCountPerStepToolRule(BaseToolRule):
204
223
  return available_tools
205
224
 
206
225
  def _get_default_template(self) -> Optional[str]:
207
- return "<tool_constraint>{{ tool_name }}: max {{ max_count_limit }} use(s) per turn</tool_constraint>"
226
+ return "<tool_rule>\n{{ tool_name }}: max {{ max_count_limit }} use(s) per response\n</tool_rule>"
208
227
 
209
228
 
210
229
  ToolRule = Annotated[
211
- Union[ChildToolRule, InitToolRule, TerminalToolRule, ConditionalToolRule, ContinueToolRule, MaxCountPerStepToolRule, ParentToolRule],
230
+ Union[
231
+ ChildToolRule,
232
+ InitToolRule,
233
+ TerminalToolRule,
234
+ ConditionalToolRule,
235
+ ContinueToolRule,
236
+ RequiredBeforeExitToolRule,
237
+ MaxCountPerStepToolRule,
238
+ ParentToolRule,
239
+ ],
212
240
  Field(discriminator="type"),
213
241
  ]
@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union
3
3
  from pydantic import BaseModel, Field
4
4
 
5
5
  from letta.schemas.embedding_config import EmbeddingConfig
6
- from letta.schemas.letta_message_content import TextContent
6
+ from letta.schemas.letta_message_content import LettaMessageContentUnion
7
7
  from letta.schemas.llm_config import LLMConfig
8
8
 
9
9
 
@@ -25,7 +25,14 @@ class MessageSchema(BaseModel):
25
25
  model: Optional[str]
26
26
  name: Optional[str]
27
27
  role: str
28
- content: List[TextContent] # TODO: Expand to more in the future
28
+ content: List[LettaMessageContentUnion] = Field(
29
+ ...,
30
+ json_schema_extra={
31
+ "items": {
32
+ "$ref": "#/components/schemas/LettaMessageContentUnion",
33
+ }
34
+ },
35
+ )
29
36
  tool_call_id: Optional[str]
30
37
  tool_calls: List[Any]
31
38
  tool_returns: List[Any]
@@ -385,9 +385,10 @@ def start_server(
385
385
  address=host or "127.0.0.1", # Note granian address must be an ip address
386
386
  port=port or REST_DEFAULT_PORT,
387
387
  workers=settings.uvicorn_workers,
388
- # threads=
388
+ # runtime_blocking_threads=
389
+ # runtime_threads=
389
390
  reload=reload or settings.uvicorn_reload,
390
- reload_ignore_patterns=["openapi_letta.json"],
391
+ reload_paths=["../letta/"],
391
392
  reload_ignore_worker_failure=True,
392
393
  reload_tick=4000, # set to 4s to prevent crashing on weird state
393
394
  # log_level="info"