jl-ecms-client 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of jl-ecms-client might be problematic. Click here for more details.

Files changed (53) hide show
  1. jl_ecms_client-0.2.8.dist-info/METADATA +295 -0
  2. jl_ecms_client-0.2.8.dist-info/RECORD +53 -0
  3. jl_ecms_client-0.2.8.dist-info/WHEEL +5 -0
  4. jl_ecms_client-0.2.8.dist-info/licenses/LICENSE +190 -0
  5. jl_ecms_client-0.2.8.dist-info/top_level.txt +1 -0
  6. mirix/client/__init__.py +14 -0
  7. mirix/client/client.py +405 -0
  8. mirix/client/constants.py +60 -0
  9. mirix/client/remote_client.py +1136 -0
  10. mirix/client/utils.py +34 -0
  11. mirix/helpers/__init__.py +1 -0
  12. mirix/helpers/converters.py +429 -0
  13. mirix/helpers/datetime_helpers.py +90 -0
  14. mirix/helpers/json_helpers.py +47 -0
  15. mirix/helpers/message_helpers.py +74 -0
  16. mirix/helpers/tool_rule_solver.py +166 -0
  17. mirix/schemas/__init__.py +1 -0
  18. mirix/schemas/agent.py +401 -0
  19. mirix/schemas/block.py +188 -0
  20. mirix/schemas/cloud_file_mapping.py +29 -0
  21. mirix/schemas/embedding_config.py +114 -0
  22. mirix/schemas/enums.py +69 -0
  23. mirix/schemas/environment_variables.py +82 -0
  24. mirix/schemas/episodic_memory.py +170 -0
  25. mirix/schemas/file.py +57 -0
  26. mirix/schemas/health.py +10 -0
  27. mirix/schemas/knowledge_vault.py +181 -0
  28. mirix/schemas/llm_config.py +187 -0
  29. mirix/schemas/memory.py +318 -0
  30. mirix/schemas/message.py +1315 -0
  31. mirix/schemas/mirix_base.py +107 -0
  32. mirix/schemas/mirix_message.py +411 -0
  33. mirix/schemas/mirix_message_content.py +230 -0
  34. mirix/schemas/mirix_request.py +39 -0
  35. mirix/schemas/mirix_response.py +183 -0
  36. mirix/schemas/openai/__init__.py +1 -0
  37. mirix/schemas/openai/chat_completion_request.py +122 -0
  38. mirix/schemas/openai/chat_completion_response.py +144 -0
  39. mirix/schemas/openai/chat_completions.py +127 -0
  40. mirix/schemas/openai/embedding_response.py +11 -0
  41. mirix/schemas/openai/openai.py +229 -0
  42. mirix/schemas/organization.py +38 -0
  43. mirix/schemas/procedural_memory.py +151 -0
  44. mirix/schemas/providers.py +816 -0
  45. mirix/schemas/resource_memory.py +134 -0
  46. mirix/schemas/sandbox_config.py +132 -0
  47. mirix/schemas/semantic_memory.py +162 -0
  48. mirix/schemas/source.py +96 -0
  49. mirix/schemas/step.py +53 -0
  50. mirix/schemas/tool.py +241 -0
  51. mirix/schemas/tool_rule.py +209 -0
  52. mirix/schemas/usage.py +31 -0
  53. mirix/schemas/user.py +67 -0
@@ -0,0 +1,166 @@
1
+ import json
2
+ from typing import List, Optional, Union
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from mirix.schemas.enums import ToolRuleType
7
+ from mirix.schemas.tool_rule import (
8
+ BaseToolRule,
9
+ ChildToolRule,
10
+ ConditionalToolRule,
11
+ InitToolRule,
12
+ TerminalToolRule,
13
+ )
14
+
15
+
16
+ class ToolRuleValidationError(Exception):
17
+ """Custom exception for tool rule validation errors in ToolRulesSolver."""
18
+
19
+ def __init__(self, message: str):
20
+ super().__init__(f"ToolRuleValidationError: {message}")
21
+
22
+
23
+ class ToolRulesSolver(BaseModel):
24
+ init_tool_rules: List[InitToolRule] = Field(
25
+ default_factory=list,
26
+ description="Initial tool rules to be used at the start of tool execution.",
27
+ )
28
+ tool_rules: List[Union[ChildToolRule, ConditionalToolRule]] = Field(
29
+ default_factory=list,
30
+ description="Standard tool rules for controlling execution sequence and allowed transitions.",
31
+ )
32
+ terminal_tool_rules: List[TerminalToolRule] = Field(
33
+ default_factory=list,
34
+ description="Terminal tool rules that end the agent loop if called.",
35
+ )
36
+ last_tool_name: Optional[str] = Field(
37
+ None, description="The most recent tool used, updated with each tool call."
38
+ )
39
+
40
+ def __init__(self, tool_rules: List[BaseToolRule], **kwargs):
41
+ super().__init__(**kwargs)
42
+ # Separate the provided tool rules into init, standard, and terminal categories
43
+ for rule in tool_rules:
44
+ if rule.type == ToolRuleType.run_first:
45
+ assert isinstance(rule, InitToolRule)
46
+ self.init_tool_rules.append(rule)
47
+ elif rule.type == ToolRuleType.constrain_child_tools:
48
+ assert isinstance(rule, ChildToolRule)
49
+ self.tool_rules.append(rule)
50
+ elif rule.type == ToolRuleType.conditional:
51
+ assert isinstance(rule, ConditionalToolRule)
52
+ self.validate_conditional_tool(rule)
53
+ self.tool_rules.append(rule)
54
+ elif rule.type == ToolRuleType.exit_loop:
55
+ assert isinstance(rule, TerminalToolRule)
56
+ self.terminal_tool_rules.append(rule)
57
+
58
+ def update_tool_usage(self, tool_name: str):
59
+ """Update the internal state to track the last tool called."""
60
+ self.last_tool_name = tool_name
61
+
62
+ def get_allowed_tool_names(
63
+ self, error_on_empty: bool = False, last_function_response: Optional[str] = None
64
+ ) -> List[str]:
65
+ """Get a list of tool names allowed based on the last tool called."""
66
+ if self.last_tool_name is None:
67
+ # Use initial tool rules if no tool has been called yet
68
+ return [rule.tool_name for rule in self.init_tool_rules]
69
+ else:
70
+ # Find a matching ToolRule for the last tool used
71
+ current_rule = next(
72
+ (
73
+ rule
74
+ for rule in self.tool_rules
75
+ if rule.tool_name == self.last_tool_name
76
+ ),
77
+ None,
78
+ )
79
+
80
+ if current_rule is None:
81
+ if error_on_empty:
82
+ raise ValueError(f"No tool rule found for {self.last_tool_name}")
83
+ return []
84
+
85
+ # If the current rule is a conditional tool rule, use the LLM response to
86
+ # determine which child tool to use
87
+ if isinstance(current_rule, ConditionalToolRule):
88
+ if not last_function_response:
89
+ raise ValueError(
90
+ "Conditional tool rule requires an LLM response to determine which child tool to use"
91
+ )
92
+ next_tool = self.evaluate_conditional_tool(
93
+ current_rule, last_function_response
94
+ )
95
+ return [next_tool] if next_tool else []
96
+
97
+ return current_rule.children if current_rule.children else []
98
+
99
+ def is_terminal_tool(self, tool_name: str) -> bool:
100
+ """Check if the tool is defined as a terminal tool in the terminal tool rules."""
101
+ return any(rule.tool_name == tool_name for rule in self.terminal_tool_rules)
102
+
103
+ def has_children_tools(self, tool_name):
104
+ """Check if the tool has children tools"""
105
+ return any(rule.tool_name == tool_name for rule in self.tool_rules)
106
+
107
+ def validate_conditional_tool(self, rule: ConditionalToolRule):
108
+ """
109
+ Validate a conditional tool rule
110
+
111
+ Args:
112
+ rule (ConditionalToolRule): The conditional tool rule to validate
113
+
114
+ Raises:
115
+ ToolRuleValidationError: If the rule is invalid
116
+ """
117
+ if len(rule.child_output_mapping) == 0:
118
+ raise ToolRuleValidationError(
119
+ "Conditional tool rule must have at least one child tool."
120
+ )
121
+ return True
122
+
123
+ def evaluate_conditional_tool(
124
+ self, tool: ConditionalToolRule, last_function_response: str
125
+ ) -> str:
126
+ """
127
+ Parse function response to determine which child tool to use based on the mapping
128
+
129
+ Args:
130
+ tool (ConditionalToolRule): The conditional tool rule
131
+ last_function_response (str): The function response in JSON format
132
+
133
+ Returns:
134
+ str: The name of the child tool to use next
135
+ """
136
+ json_response = json.loads(last_function_response)
137
+ function_output = json_response["message"]
138
+
139
+ # Try to match the function output with a mapping key
140
+ for key in tool.child_output_mapping:
141
+ # Convert function output to match key type for comparison
142
+ if isinstance(key, bool):
143
+ typed_output = function_output.lower() == "true"
144
+ elif isinstance(key, int):
145
+ try:
146
+ typed_output = int(function_output)
147
+ except (ValueError, TypeError):
148
+ continue
149
+ elif isinstance(key, float):
150
+ try:
151
+ typed_output = float(function_output)
152
+ except (ValueError, TypeError):
153
+ continue
154
+ else: # string
155
+ if function_output == "True" or function_output == "False":
156
+ typed_output = function_output.lower()
157
+ elif function_output == "None":
158
+ typed_output = None
159
+ else:
160
+ typed_output = function_output
161
+
162
+ if typed_output == key:
163
+ return tool.child_output_mapping[key]
164
+
165
+ # If no match found, use default
166
+ return tool.default_child
@@ -0,0 +1 @@
1
+ # Mirix schemas package
mirix/schemas/agent.py ADDED
@@ -0,0 +1,401 @@
1
+ from enum import Enum
2
+ from typing import Any, Dict, List, Optional, Union
3
+
4
+ from pydantic import BaseModel, Field, field_validator
5
+
6
+ from mirix.client.constants import DEFAULT_EMBEDDING_CHUNK_SIZE
7
+ from mirix.helpers import ToolRulesSolver
8
+ from mirix.schemas.block import CreateBlock
9
+ from mirix.schemas.embedding_config import EmbeddingConfig
10
+ from mirix.schemas.llm_config import LLMConfig
11
+ from mirix.schemas.memory import Memory
12
+ from mirix.schemas.message import Message, MessageCreate
13
+ from mirix.schemas.mirix_base import OrmMetadataBase
14
+ from mirix.schemas.openai.chat_completion_response import UsageStatistics
15
+ from mirix.schemas.tool import Tool
16
+ from mirix.schemas.tool_rule import ToolRule
17
+
18
+ # Removed create_random_username import - server generates names if not provided
19
+
20
+
21
+ class AgentType(str, Enum):
22
+ """
23
+ Enum to represent the type of agent.
24
+ """
25
+
26
+ coder_agent = "coder_agent"
27
+ chat_agent = "chat_agent"
28
+ reflexion_agent = "reflexion_agent"
29
+ background_agent = "background_agent"
30
+ episodic_memory_agent = "episodic_memory_agent"
31
+ procedural_memory_agent = "procedural_memory_agent"
32
+ resource_memory_agent = "resource_memory_agent"
33
+ knowledge_vault_memory_agent = "knowledge_vault_memory_agent"
34
+ meta_memory_agent = "meta_memory_agent"
35
+ semantic_memory_agent = "semantic_memory_agent"
36
+ core_memory_agent = "core_memory_agent"
37
+
38
+
39
+ class AgentState(OrmMetadataBase, validate_assignment=True):
40
+ """
41
+ Representation of an agent's state. This is the state of the agent at a given time, and is persisted in the DB backend. The state has all the information needed to recreate a persisted agent.
42
+
43
+ Parameters:
44
+ id (str): The unique identifier of the agent.
45
+ name (str): The name of the agent (must be unique to the user).
46
+ created_at (datetime): The datetime the agent was created.
47
+ memory (Memory): The in-context memory of the agent.
48
+ tools (List[str]): The tools used by the agent. This includes any memory editing functions specified in `memory`.
49
+ system (str): The system prompt used by the agent.
50
+ llm_config (LLMConfig): The LLM configuration used by the agent.
51
+ embedding_config (EmbeddingConfig): The embedding configuration used by the agent.
52
+
53
+ """
54
+
55
+ __id_prefix__ = "agent"
56
+
57
+ # NOTE: this is what is returned to the client and also what is used to initialize `Agent`
58
+ id: str = Field(..., description="The id of the agent. Assigned by the database.")
59
+ name: str = Field(..., description="The name of the agent.")
60
+ # tool rules
61
+ tool_rules: Optional[List[ToolRule]] = Field(
62
+ default=None, description="The list of tool rules."
63
+ )
64
+
65
+ # in-context memory
66
+ message_ids: Optional[List[str]] = Field(
67
+ default=None,
68
+ description="The ids of the messages in the agent's in-context memory.",
69
+ )
70
+
71
+ # system prompt
72
+ system: str = Field(..., description="The system prompt used by the agent.")
73
+
74
+ # agent configuration
75
+ agent_type: AgentType = Field(..., description="The type of agent.")
76
+
77
+ # llm information
78
+ llm_config: LLMConfig = Field(
79
+ ..., description="The LLM configuration used by the agent."
80
+ )
81
+ embedding_config: EmbeddingConfig = Field(
82
+ ..., description="The embedding configuration used by the agent."
83
+ )
84
+
85
+ # This is an object representing the in-process state of a running `Agent`
86
+ # Field in this object can be theoretically edited by tools, and will be persisted by the ORM
87
+ organization_id: Optional[str] = Field(
88
+ None,
89
+ description="The unique identifier of the organization associated with the agent.",
90
+ )
91
+
92
+ description: Optional[str] = Field(
93
+ None, description="The description of the agent."
94
+ )
95
+ parent_id: Optional[str] = Field(
96
+ None, description="The parent agent ID (for sub-agents in a meta-agent)."
97
+ )
98
+ children: Optional[List["AgentState"]] = Field(
99
+ default=None, description="Child agents (sub-agents) if this is a parent agent."
100
+ )
101
+
102
+ memory: Memory = Field(..., description="The in-context memory of the agent.")
103
+ tools: List[Tool] = Field(..., description="The tools used by the agent.")
104
+ mcp_tools: Optional[List[str]] = Field(
105
+ default_factory=list,
106
+ description="List of connected MCP server names (e.g., ['gmail-native'])",
107
+ )
108
+
109
+
110
+ class CreateAgent(BaseModel, validate_assignment=True): #
111
+ # all optional as server can generate defaults
112
+ name: Optional[str] = Field(
113
+ None,
114
+ description="The name of the agent. If not provided, server will generate one.",
115
+ )
116
+
117
+ # memory creation
118
+ memory_blocks: Optional[List[CreateBlock]] = Field(
119
+ None,
120
+ description="The blocks to create in the agent's in-context memory.",
121
+ )
122
+ # TODO: This is a legacy field and should be removed ASAP to force `tool_ids` usage
123
+ tools: Optional[List[str]] = Field(None, description="The tools used by the agent.")
124
+ tool_ids: Optional[List[str]] = Field(
125
+ None, description="The ids of the tools used by the agent."
126
+ )
127
+ tool_rules: Optional[List[ToolRule]] = Field(
128
+ None, description="The tool rules governing the agent."
129
+ )
130
+ system: Optional[str] = Field(
131
+ None, description="The system prompt used by the agent."
132
+ )
133
+ agent_type: AgentType = Field(
134
+ default_factory=lambda: AgentType.chat_agent, description="The type of agent."
135
+ )
136
+ llm_config: Optional[LLMConfig] = Field(
137
+ None, description="The LLM configuration used by the agent."
138
+ )
139
+ embedding_config: Optional[EmbeddingConfig] = Field(
140
+ None, description="The embedding configuration used by the agent."
141
+ )
142
+ # Note: if this is None, then we'll populate with the standard "more human than human" initial message sequence
143
+ # If the client wants to make this empty, then the client can set the arg to an empty list
144
+ initial_message_sequence: Optional[List[MessageCreate]] = Field(
145
+ None,
146
+ description="The initial set of messages to put in the agent's in-context memory.",
147
+ )
148
+ include_base_tools: bool = Field(
149
+ True,
150
+ description="If true, attaches the Mirix core tools (e.g. archival_memory and core_memory related functions).",
151
+ )
152
+ include_multi_agent_tools: bool = Field(
153
+ False,
154
+ description="If true, attaches the Mirix multi-agent tools (e.g. sending a message to another agent).",
155
+ )
156
+ parent_id: Optional[str] = Field(
157
+ None, description="The parent agent ID (for sub-agents in a meta-agent)."
158
+ )
159
+ model: Optional[str] = Field(
160
+ None,
161
+ description="The LLM configuration handle used by the agent, specified in the format "
162
+ "provider/model-name, as an alternative to specifying llm_config.",
163
+ )
164
+ embedding: Optional[str] = Field(
165
+ None,
166
+ description="The embedding configuration handle used by the agent, specified in the format provider/model-name.",
167
+ )
168
+ context_window_limit: Optional[int] = Field(
169
+ None, description="The context window limit used by the agent."
170
+ )
171
+ embedding_chunk_size: Optional[int] = Field(
172
+ DEFAULT_EMBEDDING_CHUNK_SIZE,
173
+ description="The embedding chunk size used by the agent.",
174
+ )
175
+ from_template: Optional[str] = Field(
176
+ None, description="The template id used to configure the agent"
177
+ )
178
+ template: bool = Field(False, description="Whether the agent is a template")
179
+ project: Optional[str] = Field(
180
+ None, description="The project slug that the agent will be associated with."
181
+ )
182
+ tool_exec_environment_variables: Optional[Dict[str, str]] = Field(
183
+ None,
184
+ description="The environment variables for tool execution specific to this agent.",
185
+ )
186
+ memory_variables: Optional[Dict[str, str]] = Field(
187
+ None, description="The variables that should be set for the agent."
188
+ )
189
+ mcp_tools: Optional[List[str]] = Field(
190
+ None, description="List of MCP server names to connect to this agent."
191
+ )
192
+
193
+ @field_validator("name")
194
+ @classmethod
195
+ def validate_name(cls, name: str) -> str:
196
+ """Validate the requested new agent name (prevent bad inputs)"""
197
+
198
+ import re
199
+
200
+ if not name:
201
+ # don't check if not provided
202
+ return name
203
+
204
+ # TODO: this check should also be added to other model (e.g. User.name)
205
+ # Length check
206
+ if not (1 <= len(name) <= 50):
207
+ raise ValueError("Name length must be between 1 and 50 characters.")
208
+
209
+ # Regex for allowed characters (alphanumeric, spaces, hyphens, underscores)
210
+ if not re.match("^[A-Za-z0-9 _-]+$", name):
211
+ raise ValueError("Name contains invalid characters.")
212
+
213
+ # Further checks can be added here...
214
+ # TODO
215
+
216
+ return name
217
+
218
+ @field_validator("model")
219
+ @classmethod
220
+ def validate_model(cls, model: Optional[str]) -> Optional[str]:
221
+ if not model:
222
+ return model
223
+
224
+ provider_name, model_name = model.split("/", 1)
225
+ if not provider_name or not model_name:
226
+ raise ValueError(
227
+ "The llm config handle should be in the format provider/model-name"
228
+ )
229
+
230
+ return model
231
+
232
+ @field_validator("embedding")
233
+ @classmethod
234
+ def validate_embedding(cls, embedding: Optional[str]) -> Optional[str]:
235
+ if not embedding:
236
+ return embedding
237
+
238
+ provider_name, embedding_name = embedding.split("/", 1)
239
+ if not provider_name or not embedding_name:
240
+ raise ValueError(
241
+ "The embedding config handle should be in the format provider/model-name"
242
+ )
243
+
244
+ return embedding
245
+
246
+
247
+ class UpdateAgent(BaseModel):
248
+ name: Optional[str] = Field(None, description="The name of the agent.")
249
+ tool_ids: Optional[List[str]] = Field(
250
+ None, description="The ids of the tools used by the agent."
251
+ )
252
+ block_ids: Optional[List[str]] = Field(
253
+ None, description="The ids of the blocks used by the agent."
254
+ )
255
+ system: Optional[str] = Field(
256
+ None, description="The system prompt used by the agent."
257
+ )
258
+ tool_rules: Optional[List[ToolRule]] = Field(
259
+ None, description="The tool rules governing the agent."
260
+ )
261
+ llm_config: Optional[LLMConfig] = Field(
262
+ None, description="The LLM configuration used by the agent."
263
+ )
264
+ embedding_config: Optional[EmbeddingConfig] = Field(
265
+ None, description="The embedding configuration used by the agent."
266
+ )
267
+ message_ids: Optional[List[str]] = Field(
268
+ None, description="The ids of the messages in the agent's in-context memory."
269
+ )
270
+ description: Optional[str] = Field(
271
+ None, description="The description of the agent."
272
+ )
273
+ parent_id: Optional[str] = Field(
274
+ None, description="The parent agent ID (for sub-agents in a meta-agent)."
275
+ )
276
+ mcp_tools: Optional[List[str]] = Field(
277
+ None, description="List of MCP server names to connect to this agent."
278
+ )
279
+
280
+ class Config:
281
+ extra = "ignore" # Ignores extra fields
282
+
283
+
284
+ class CreateMetaAgent(BaseModel):
285
+ """Request schema for creating a MetaAgent."""
286
+
287
+ name: Optional[str] = Field(
288
+ None,
289
+ description="Optional name for the MetaAgent. If None, a random name will be generated.",
290
+ )
291
+ agents: List[Union[str, Dict[str, Any]]] = Field(
292
+ default_factory=lambda: [
293
+ "core_memory_agent",
294
+ "resource_memory_agent",
295
+ "semantic_memory_agent",
296
+ "episodic_memory_agent",
297
+ "procedural_memory_agent",
298
+ "knowledge_vault_memory_agent",
299
+ "meta_memory_agent",
300
+ "reflexion_agent",
301
+ "background_agent",
302
+ ],
303
+ description="List of memory agent names or dicts with agent configs. Supports both 'agent_name' strings and {'agent_name': {'blocks': [...], ...}} dicts.",
304
+ )
305
+ system_prompts: Optional[Dict[str, str]] = Field(
306
+ None,
307
+ description="Dictionary mapping agent names to their system prompt text. Takes precedence over system_prompts_folder.",
308
+ )
309
+ llm_config: Optional[LLMConfig] = Field(
310
+ None,
311
+ description="LLM configuration for memory agents. Required if no default is set.",
312
+ )
313
+ embedding_config: Optional[EmbeddingConfig] = Field(
314
+ None,
315
+ description="Embedding configuration for memory agents. Required if no default is set.",
316
+ )
317
+
318
+ class UpdateMetaAgent(BaseModel):
319
+ """Request schema for updating a MetaAgent."""
320
+
321
+ name: Optional[str] = Field(
322
+ None,
323
+ description="Optional new name for the MetaAgent.",
324
+ )
325
+ agents: Optional[List[Union[str, Dict[str, Any]]]] = Field(
326
+ None,
327
+ description="List of memory agent names or dicts with agent configs. Will be compared with existing agents to determine what to add/remove.",
328
+ )
329
+ system_prompts: Optional[Dict[str, str]] = Field(
330
+ None,
331
+ description="Dictionary mapping agent names to their system prompt text. Updates only the specified agents.",
332
+ )
333
+ llm_config: Optional[LLMConfig] = Field(
334
+ None,
335
+ description="LLM configuration for meta agent and its sub-agents.",
336
+ )
337
+ embedding_config: Optional[EmbeddingConfig] = Field(
338
+ None,
339
+ description="Embedding configuration for meta agent and its sub-agents.",
340
+ )
341
+
342
+ class Config:
343
+ extra = "ignore" # Ignores extra fields
344
+
345
+
346
+ class AgentStepResponse(BaseModel):
347
+ messages: List[Message] = Field(
348
+ ..., description="The messages generated during the agent's step."
349
+ )
350
+ continue_chaining: bool = Field(
351
+ ...,
352
+ description="Whether the agent requested a contine_chaining (i.e. follow-up execution).",
353
+ )
354
+ function_failed: bool = Field(
355
+ ..., description="Whether the agent step ended because a function call failed."
356
+ )
357
+ in_context_memory_warning: bool = Field(
358
+ ...,
359
+ description="Whether the agent step ended because the in-context memory is near its limit.",
360
+ )
361
+ usage: UsageStatistics = Field(
362
+ ..., description="Usage statistics of the LLM call during the agent's step."
363
+ )
364
+ traj: Optional[dict] = Field(
365
+ None, description="Action, Observation, State at the current step"
366
+ )
367
+
368
+
369
+ class AgentStepState(BaseModel):
370
+ step_number: int = Field(
371
+ ..., description="The current step number in the agent loop"
372
+ )
373
+ tool_rules_solver: ToolRulesSolver = Field(
374
+ ..., description="The current state of the ToolRulesSolver"
375
+ )
376
+
377
+
378
+ def get_prompt_template_for_agent_type(agent_type: Optional[AgentType] = None):
379
+ if agent_type == AgentType.sleeptime_agent:
380
+ return (
381
+ "{% for block in blocks %}"
382
+ '<{{ block.label }} characters="{{ block.value|length }}/{{ block.limit }}">\n'
383
+ "{% for line in block.value.split('\\n') %}"
384
+ "Line {{ loop.index }}: {{ line }}\n"
385
+ "{% endfor %}"
386
+ "</{{ block.label }}>"
387
+ "{% if not loop.last %}\n{% endif %}"
388
+ "{% endfor %}"
389
+ )
390
+ return (
391
+ "{% for block in blocks %}"
392
+ '<{{ block.label }} characters="{{ block.value|length }}/{{ block.limit }}">\n'
393
+ "{{ block.value }}\n"
394
+ "</{{ block.label }}>"
395
+ "{% if not loop.last %}\n{% endif %}"
396
+ "{% endfor %}"
397
+ )
398
+
399
+
400
+ # Rebuild model to support forward references (children field)
401
+ AgentState.model_rebuild()