soprano-sdk 0.2.19__py3-none-any.whl → 0.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,351 @@
1
+ """
2
+ Follow-up node strategy for handling conversational Q&A.
3
+
4
+ This node type allows users to ask follow-up questions and receive answers
5
+ based on the full workflow context. Unlike collect_input_with_agent where
6
+ the agent initiates, here the user initiates by asking questions first.
7
+ """
8
+ from typing import Dict, Any, List
9
+
10
+ from langgraph.types import interrupt
11
+
12
+ from .base import ActionStrategy
13
+ from ..agents.factory import AgentFactory, AgentAdapter
14
+ from ..core.constants import WorkflowKeys
15
+ from ..core.state import initialize_state
16
+ from ..utils.logger import logger
17
+ from ..utils.tracing import trace_node_execution, trace_agent_invocation, add_node_result
18
+
19
+ # Patterns for detecting special responses
20
+ OUT_OF_SCOPE_PATTERN = "OUT_OF_SCOPE:"
21
+ INTENT_CHANGE_PATTERN = "INTENT_CHANGE:"
22
+ CLOSURE_PATTERN = "CLOSURE:"
23
+
24
+ # Default patterns that indicate user is done with follow-up
25
+ DEFAULT_CLOSURE_PATTERNS = [
26
+ "ok", "okay", "thank you", "thanks", "got it", "done",
27
+ "that's all", "no more questions", "i understand", "perfect",
28
+ "great", "alright", "understood"
29
+ ]
30
+
31
+
32
+ def _build_follow_up_instructions(
33
+ base_instructions: str,
34
+ context_summary: str,
35
+ collector_nodes: Dict[str, str],
36
+ enable_out_of_scope: bool,
37
+ scope_description: str
38
+ ) -> str:
39
+ """Build complete agent instructions for follow-up node."""
40
+ instructions = f"""{base_instructions}
41
+
42
+ {context_summary}
43
+
44
+ RESPONSE GUIDELINES:
45
+ 1. Answer the user's follow-up questions using the context above
46
+ 2. Be concise and helpful
47
+ 3. Do NOT ask for new information - just answer questions about existing data
48
+ """
49
+
50
+ # Add intent change detection if collector nodes exist
51
+ if collector_nodes:
52
+ nodes_str = "\n".join(f"- {k}: {v}" for k, v in collector_nodes.items())
53
+ instructions += f"""
54
+ INTENT CHANGE DETECTION:
55
+ If the user wants to change or update previously collected information, respond ONLY with:
56
+ INTENT_CHANGE: <node_name>
57
+
58
+ Do NOT provide any other response when detecting intent change.
59
+
60
+ Available nodes for intent change:
61
+ {nodes_str}
62
+ """
63
+
64
+ # Add out-of-scope detection if enabled
65
+ if enable_out_of_scope:
66
+ instructions += f"""
67
+ OUT-OF-SCOPE DETECTION:
68
+ Your current task is: {scope_description}
69
+
70
+ If the user's query is COMPLETELY UNRELATED to this task, respond ONLY with:
71
+ OUT_OF_SCOPE: <brief description of what user is asking about>
72
+
73
+ Do NOT attempt to answer out-of-scope questions.
74
+ """
75
+
76
+ return instructions
77
+
78
+
79
+ class FollowUpStrategy(ActionStrategy):
80
+ """
81
+ Strategy for handling follow-up questions and routing based on user intent.
82
+
83
+ Key difference from CollectInputStrategy:
84
+ - collect_input: Agent asks first, user responds
85
+ - follow_up: User asks first, agent responds
86
+
87
+ Features:
88
+ - Multi-turn conversation with full workflow state context
89
+ - Closure detection (user says "ok", "thank you", etc.)
90
+ - Intent change detection (route to collector nodes)
91
+ - Transition-based routing (route to any configured node)
92
+ - Out-of-scope detection (signal to parent orchestrator)
93
+ """
94
+
95
+ def __init__(self, step_config: Dict[str, Any], engine_context: Any):
96
+ super().__init__(step_config, engine_context)
97
+ self.agent_config = step_config.get('agent', {})
98
+ self.transitions = self._get_transitions()
99
+ self.next_step = step_config.get('next')
100
+
101
+ # Follow-up specific config
102
+ self.closure_patterns = step_config.get(
103
+ 'closure_patterns',
104
+ DEFAULT_CLOSURE_PATTERNS
105
+ )
106
+ self.enable_out_of_scope = self.agent_config.get('detect_out_of_scope', False)
107
+ self.scope_description = self.agent_config.get(
108
+ 'scope_description',
109
+ self.agent_config.get('description', 'answering follow-up questions')
110
+ )
111
+
112
+ @property
113
+ def _conversation_key(self) -> str:
114
+ return f'{self.step_id}_conversation'
115
+
116
+ def pre_execute(self, state: Dict[str, Any]) -> Dict[str, Any]:
117
+ """Setup before execution - minimal for follow-up node."""
118
+ pass
119
+
120
+ def execute(self, state: Dict[str, Any]) -> Dict[str, Any]:
121
+ """Execute the follow-up node logic."""
122
+ with trace_node_execution(
123
+ node_id=self.step_id,
124
+ node_type="follow_up",
125
+ output_field=None
126
+ ) as span:
127
+ state = initialize_state(state)
128
+ conversation = self._get_or_create_conversation(state)
129
+ is_self_loop = self._is_self_loop(state)
130
+
131
+ # Get prompt for user
132
+ # - First entry: No prompt, user initiates
133
+ # - Self-loop: Show last assistant response as prompt
134
+ if is_self_loop:
135
+ prompt = self._get_last_assistant_message(conversation)
136
+ else:
137
+ prompt = None # User initiates on first entry
138
+ span.add_event("follow_up.first_entry")
139
+
140
+ # Interrupt to get user input
141
+ user_input = interrupt(prompt)
142
+ conversation.append({"role": "user", "content": user_input})
143
+ span.add_event("user.input_received", {"input_length": len(user_input)})
144
+
145
+ # Check for closure (user is done)
146
+ if self._is_closure_intent(user_input):
147
+ span.add_event("closure.detected")
148
+ return self._handle_closure(state)
149
+
150
+ # Create agent with full state context
151
+ agent = self._create_agent(state)
152
+
153
+ # Get agent response
154
+ with trace_agent_invocation(
155
+ agent_name=self.agent_config.get('name', self.step_id),
156
+ model=self.agent_config.get('model', 'default')
157
+ ):
158
+ agent_response = agent.invoke(conversation)
159
+ conversation.append({"role": "assistant", "content": str(agent_response)})
160
+
161
+ # Check for out-of-scope
162
+ if self.enable_out_of_scope and self._is_out_of_scope(agent_response):
163
+ span.add_event("out_of_scope.detected")
164
+ return self._handle_out_of_scope(agent_response, state, user_input)
165
+
166
+ # Check for intent change
167
+ if self._is_intent_change(agent_response):
168
+ span.add_event("intent_change.detected")
169
+ return self._handle_intent_change(agent_response, state)
170
+
171
+ # Check for explicit routing via transitions
172
+ if routing_target := self._check_transitions(agent_response):
173
+ span.add_event("transition.matched", {"target": routing_target})
174
+ self._set_status(state, routing_target)
175
+ if routing_target in self.engine_context.outcome_map:
176
+ self._set_outcome(state, routing_target)
177
+ return state
178
+
179
+ # Default: self-loop for more Q&A
180
+ self._set_status(state, 'answering')
181
+ self._update_conversation(state, conversation)
182
+ add_node_result(span, None, None, state.get(WorkflowKeys.STATUS))
183
+
184
+ return state
185
+
186
+ def _is_self_loop(self, state: Dict[str, Any]) -> bool:
187
+ """Check if we're in a self-loop (returning after answering)."""
188
+ return state.get(WorkflowKeys.STATUS) == f'{self.step_id}_answering'
189
+
190
+ def _get_or_create_conversation(self, state: Dict[str, Any]) -> List[Dict[str, str]]:
191
+ """Get or create conversation history for this node."""
192
+ conversations = state.get(WorkflowKeys.CONVERSATIONS, {})
193
+ if self._conversation_key not in conversations:
194
+ conversations[self._conversation_key] = []
195
+ state[WorkflowKeys.CONVERSATIONS] = conversations
196
+ return conversations[self._conversation_key]
197
+
198
+ def _update_conversation(self, state: Dict[str, Any], conversation: List[Dict[str, str]]):
199
+ """Update conversation in state."""
200
+ state[WorkflowKeys.CONVERSATIONS][self._conversation_key] = conversation
201
+
202
+ def _get_last_assistant_message(self, conversation: List[Dict[str, str]]) -> str:
203
+ """Get the last assistant message from conversation."""
204
+ return next(
205
+ (msg['content'] for msg in reversed(conversation) if msg['role'] == 'assistant'),
206
+ None
207
+ )
208
+
209
+ def _is_closure_intent(self, user_input: str) -> bool:
210
+ """Check if user input indicates they're done with follow-up."""
211
+ normalized = user_input.lower().strip()
212
+ # Check for exact matches or patterns within the input
213
+ for pattern in self.closure_patterns:
214
+ if pattern in normalized:
215
+ return True
216
+ return False
217
+
218
+ def _handle_closure(self, state: Dict[str, Any]) -> Dict[str, Any]:
219
+ """Handle user indicating they're done - proceed to next step."""
220
+ logger.info(f"Closure detected in follow-up node '{self.step_id}'")
221
+ if self.next_step:
222
+ self._set_status(state, self.next_step)
223
+ if self.next_step in self.engine_context.outcome_map:
224
+ self._set_outcome(state, self.next_step)
225
+ else:
226
+ self._set_status(state, 'complete')
227
+ state[WorkflowKeys.MESSAGES] = ["Follow-up complete."]
228
+ return state
229
+
230
+ def _is_out_of_scope(self, agent_response: str) -> bool:
231
+ """Check if agent response indicates out-of-scope query."""
232
+ return str(agent_response).startswith(OUT_OF_SCOPE_PATTERN)
233
+
234
+ def _handle_out_of_scope(
235
+ self,
236
+ agent_response: str,
237
+ state: Dict[str, Any],
238
+ user_message: str
239
+ ) -> Dict[str, Any]:
240
+ """Handle out-of-scope user input by signaling to parent orchestrator."""
241
+ reason = str(agent_response).split(OUT_OF_SCOPE_PATTERN)[1].strip()
242
+ logger.info(f"Out-of-scope detected in follow-up '{self.step_id}': {reason}")
243
+
244
+ state['_out_of_scope_reason'] = reason
245
+ interrupt({
246
+ "type": "out_of_scope",
247
+ "step_id": self.step_id,
248
+ "reason": reason,
249
+ "user_message": user_message
250
+ })
251
+ return state
252
+
253
+ def _is_intent_change(self, agent_response: str) -> bool:
254
+ """Check if agent response indicates intent change."""
255
+ return str(agent_response).startswith(INTENT_CHANGE_PATTERN)
256
+
257
+ def _handle_intent_change(self, agent_response: str, state: Dict[str, Any]) -> Dict[str, Any]:
258
+ """Handle intent change to another collector node."""
259
+ target_node = str(agent_response).split(INTENT_CHANGE_PATTERN)[1].strip()
260
+ logger.info(f"Intent change detected in follow-up '{self.step_id}' -> {target_node}")
261
+
262
+ # Route to the target collector node
263
+ self._set_status(state, target_node)
264
+ return state
265
+
266
+ def _check_transitions(self, agent_response: str) -> str:
267
+ """Check if agent response matches any transition pattern."""
268
+ response_str = str(agent_response)
269
+ for transition in self.transitions:
270
+ patterns = transition.get('pattern', [])
271
+ if isinstance(patterns, str):
272
+ patterns = [patterns]
273
+
274
+ for pattern in patterns:
275
+ if pattern in response_str:
276
+ return transition.get('next')
277
+ return None
278
+
279
+ def _get_model_config(self) -> Dict[str, Any]:
280
+ """Get model configuration for the agent."""
281
+ model_config = self.engine_context.get_config_value('model_config')
282
+ if not model_config:
283
+ raise ValueError("Model config not found in engine context")
284
+
285
+ if model_id := self.agent_config.get("model"):
286
+ model_config = model_config.copy()
287
+ model_config["model_name"] = model_id
288
+
289
+ return model_config
290
+
291
+ def _load_agent_tools(self, state: Dict[str, Any]) -> List:
292
+ """Load tools for the agent."""
293
+ return [
294
+ self.engine_context.tool_repository.load(tool_name, state)
295
+ for tool_name in self.agent_config.get('tools', [])
296
+ ]
297
+
298
+ def _create_agent(self, state: Dict[str, Any]) -> AgentAdapter:
299
+ """Create agent with full workflow context in instructions."""
300
+ try:
301
+ model_config = self._get_model_config()
302
+ agent_tools = self._load_agent_tools(state)
303
+ collector_nodes = state.get(WorkflowKeys.COLLECTOR_NODES, {})
304
+
305
+ # Build context summary from collected fields
306
+ context_summary = self._build_context_summary(state)
307
+
308
+ # Build complete instructions
309
+ base_instructions = self.agent_config.get(
310
+ 'instructions',
311
+ "You are a helpful assistant answering follow-up questions."
312
+ )
313
+ instructions = _build_follow_up_instructions(
314
+ base_instructions=base_instructions,
315
+ context_summary=context_summary,
316
+ collector_nodes=collector_nodes,
317
+ enable_out_of_scope=self.enable_out_of_scope,
318
+ scope_description=self.scope_description
319
+ )
320
+
321
+ # Inject localization instructions at the start (per-turn)
322
+ localization_instructions = self.engine_context.get_localization_instructions(state)
323
+ if localization_instructions:
324
+ instructions = f"{localization_instructions}\n\n{instructions}"
325
+
326
+ framework = self.engine_context.get_config_value('agent_framework', 'langgraph')
327
+
328
+ return AgentFactory.create_agent(
329
+ framework=framework,
330
+ name=self.agent_config.get('name', f'{self.step_id}FollowUp'),
331
+ model_config=model_config,
332
+ tools=agent_tools,
333
+ system_prompt=instructions
334
+ )
335
+
336
+ except Exception as e:
337
+ raise RuntimeError(f"Failed to create agent for follow-up '{self.step_id}': {e}")
338
+
339
+ def _build_context_summary(self, state: Dict[str, Any]) -> str:
340
+ """Build a summary of collected workflow data for the agent."""
341
+ context_lines = ["Current workflow context:"]
342
+
343
+ for field in self.engine_context.data_fields:
344
+ field_name = field['name']
345
+ if field_name in state and state[field_name] is not None:
346
+ context_lines.append(f"- {field_name}: {state[field_name]}")
347
+
348
+ if len(context_lines) == 1:
349
+ context_lines.append("- (No data collected yet)")
350
+
351
+ return "\n".join(context_lines)
@@ -36,6 +36,10 @@ class WorkflowRouter:
36
36
  logger.info(f"Self-loop: {self.step_id} (async pending)")
37
37
  return self.step_id
38
38
 
39
+ if status == f'{self.step_id}_answering':
40
+ logger.info(f"Self-loop: {self.step_id} (follow-up answering)")
41
+ return self.step_id
42
+
39
43
  if status == f'{self.step_id}_error' :
40
44
  logger.info(f"Error encountered in {self.step_id}, ending workflow")
41
45
  return END
@@ -77,8 +81,8 @@ class WorkflowRouter:
77
81
  def get_routing_map(self, collector_nodes: List[str]) -> Dict[str, str]:
78
82
  routing_map = {}
79
83
 
80
- # Self-loop for nodes that can interrupt (agent input or async)
81
- if self.action in ('collect_input_with_agent', 'call_async_function'):
84
+ # Self-loop for nodes that can interrupt (agent input, async, or follow-up)
85
+ if self.action in ('collect_input_with_agent', 'call_async_function', 'follow_up'):
82
86
  routing_map[self.step_id] = self.step_id
83
87
 
84
88
  for transition in self.transitions:
soprano_sdk/tools.py CHANGED
@@ -10,7 +10,7 @@ from .utils.logger import logger
10
10
  from langfuse.langchain import CallbackHandler
11
11
 
12
12
  from .core.engine import load_workflow
13
- from .core.constants import MFAConfig, InterruptType
13
+ from .core.constants import MFAConfig, InterruptType, WorkflowKeys
14
14
 
15
15
 
16
16
  class WorkflowTool:
@@ -54,7 +54,9 @@ class WorkflowTool:
54
54
  self,
55
55
  thread_id: Optional[str] = None,
56
56
  user_message: Optional[str] = None,
57
- initial_context: Optional[Dict[str, Any]] = None
57
+ initial_context: Optional[Dict[str, Any]] = None,
58
+ target_language: Optional[str] = None,
59
+ target_script: Optional[str] = None
58
60
  ) -> str:
59
61
  """Execute the workflow with automatic state detection
60
62
 
@@ -65,16 +67,25 @@ class WorkflowTool:
65
67
  thread_id: Thread ID for state tracking
66
68
  user_message: User's message (used for resume if workflow is interrupted)
67
69
  initial_context: Context to inject for fresh starts (e.g., {"order_id": "123"})
70
+ target_language: Target language for this turn (e.g., "Tamil", "Hindi")
71
+ target_script: Target script for this turn (e.g., "Tamil", "Devanagari")
68
72
 
69
73
  Returns:
70
74
  Final outcome message or interrupt prompt
71
75
  """
72
76
  from langgraph.types import Command
73
77
  from soprano_sdk.utils.tracing import trace_workflow_execution
74
-
78
+
75
79
  if thread_id is None:
76
80
  thread_id = str(uuid.uuid4())
77
-
81
+
82
+ # Build localization state for this turn
83
+ localization_state = {}
84
+ if target_language:
85
+ localization_state[WorkflowKeys.TARGET_LANGUAGE] = target_language
86
+ if target_script:
87
+ localization_state[WorkflowKeys.TARGET_SCRIPT] = target_script
88
+
78
89
  with trace_workflow_execution(
79
90
  workflow_name=self.engine.workflow_name,
80
91
  thread_id=thread_id,
@@ -99,8 +110,10 @@ class WorkflowTool:
99
110
  "filtered_out": list(set(initial_context.keys()) - set(filtered_context.keys()))
100
111
  })
101
112
 
113
+ # Merge localization state with filtered context
114
+ update_state = {**filtered_context, **localization_state}
102
115
  result = self.graph.invoke(
103
- Command(resume=user_message or "", update=filtered_context),
116
+ Command(resume=user_message or "", update=update_state),
104
117
  config=config
105
118
  )
106
119
 
@@ -112,7 +125,9 @@ class WorkflowTool:
112
125
  self.engine.update_context(initial_context)
113
126
  span.add_event("context.updated", {"fields": list(initial_context.keys())})
114
127
 
115
- result = self.graph.invoke(initial_context, config=config)
128
+ # Merge localization state with initial context
129
+ invoke_state = {**(initial_context or {}), **localization_state}
130
+ result = self.graph.invoke(invoke_state, config=config)
116
131
 
117
132
  final_state = self.graph.get_state(config)
118
133
  if not final_state.next and self.checkpointer:
@@ -129,6 +144,16 @@ class WorkflowTool:
129
144
  pending_metadata = json.dumps(interrupt_value.get("pending", {}))
130
145
  return f"{InterruptType.ASYNC}|{thread_id}|{self.name}|{pending_metadata}"
131
146
 
147
+ # Check if this is an out-of-scope interrupt
148
+ if isinstance(interrupt_value, dict) and interrupt_value.get("type") == "out_of_scope":
149
+ span.set_attribute("workflow.status", "out_of_scope")
150
+ span.set_attribute("out_of_scope.step_id", interrupt_value.get("step_id", ""))
151
+ payload = json.dumps({
152
+ "reason": interrupt_value.get("reason", "User query is out of scope"),
153
+ "user_message": interrupt_value.get("user_message", "")
154
+ })
155
+ return f"{InterruptType.OUT_OF_SCOPE}|{thread_id}|{self.name}|{payload}"
156
+
132
157
  # User input interrupt (existing behavior)
133
158
  span.set_attribute("workflow.status", "interrupted")
134
159
  prompt = interrupt_value
@@ -186,21 +211,35 @@ class WorkflowTool:
186
211
  def resume(
187
212
  self,
188
213
  thread_id: str,
189
- resume_value: Union[str, Dict[str, Any]]
214
+ resume_value: Union[str, Dict[str, Any]],
215
+ target_language: Optional[str] = None,
216
+ target_script: Optional[str] = None
190
217
  ) -> str:
191
218
  """Resume an interrupted workflow with user input or async result
192
219
 
193
220
  Args:
194
221
  thread_id: Thread ID of the interrupted workflow
195
222
  resume_value: User's response (str) or async operation result (dict)
223
+ target_language: Target language for this turn (e.g., "Tamil", "Hindi")
224
+ target_script: Target script for this turn (e.g., "Tamil", "Devanagari")
196
225
 
197
226
  Returns:
198
227
  Either another interrupt prompt/async metadata or final outcome message
199
228
  """
200
229
  from langgraph.types import Command
201
230
 
231
+ # Build localization state for this turn
232
+ localization_state = {}
233
+ if target_language:
234
+ localization_state[WorkflowKeys.TARGET_LANGUAGE] = target_language
235
+ if target_script:
236
+ localization_state[WorkflowKeys.TARGET_SCRIPT] = target_script
237
+
202
238
  config = {"configurable": {"thread_id": thread_id}}
203
- result = self.graph.invoke(Command(resume=resume_value), config=config)
239
+ if localization_state:
240
+ result = self.graph.invoke(Command(resume=resume_value, update=localization_state), config=config)
241
+ else:
242
+ result = self.graph.invoke(Command(resume=resume_value), config=config)
204
243
 
205
244
  # Check if workflow needs more input or has another async operation
206
245
  if "__interrupt__" in result and result["__interrupt__"]:
@@ -211,6 +250,14 @@ class WorkflowTool:
211
250
  pending_metadata = json.dumps(interrupt_value.get("pending", {}))
212
251
  return f"{InterruptType.ASYNC}|{thread_id}|{self.name}|{pending_metadata}"
213
252
 
253
+ # Check if this is an out-of-scope interrupt
254
+ if isinstance(interrupt_value, dict) and interrupt_value.get("type") == "out_of_scope":
255
+ payload = json.dumps({
256
+ "reason": interrupt_value.get("reason", "User query is out of scope"),
257
+ "user_message": interrupt_value.get("user_message", "")
258
+ })
259
+ return f"{InterruptType.OUT_OF_SCOPE}|{thread_id}|{self.name}|{payload}"
260
+
214
261
  # User input interrupt
215
262
  return f"{InterruptType.USER_INPUT}|{thread_id}|{self.name}|{interrupt_value}"
216
263
 
@@ -22,6 +22,48 @@ WORKFLOW_SCHEMA = {
22
22
  "default": "langgraph",
23
23
  "description": "Agent framework to use for all agents in this workflow (default: langgraph)"
24
24
  },
25
+ "humanization_agent": {
26
+ "type": "object",
27
+ "description": "Configuration for LLM-powered message humanization (enabled by default)",
28
+ "properties": {
29
+ "enabled": {
30
+ "type": "boolean",
31
+ "default": True,
32
+ "description": "Whether to enable LLM humanization for outcome messages (default: true)"
33
+ },
34
+ "model": {
35
+ "type": "string",
36
+ "description": "Model to use for humanization (overrides model_config.model_name)"
37
+ },
38
+ "base_url": {
39
+ "type": "string",
40
+ "format": "uri",
41
+ "description": "Base URL for humanization model (overrides model_config.base_url)"
42
+ },
43
+ "instructions": {
44
+ "type": "string",
45
+ "description": "Custom system prompt for the humanization agent"
46
+ }
47
+ }
48
+ },
49
+ "localization": {
50
+ "type": "object",
51
+ "description": "Default localization configuration (can be overridden per-turn via execute() parameters)",
52
+ "properties": {
53
+ "language": {
54
+ "type": "string",
55
+ "description": "Default target language (e.g., Tamil, Hindi, Spanish)"
56
+ },
57
+ "script": {
58
+ "type": "string",
59
+ "description": "Default target script/writing system (e.g., Tamil, Devanagari, Latin)"
60
+ },
61
+ "instructions": {
62
+ "type": "string",
63
+ "description": "Custom instructions for language/script requirements"
64
+ }
65
+ }
66
+ },
25
67
  "data": {
26
68
  "type": "array",
27
69
  "description": "Data fields used in the workflow",
@@ -58,6 +100,10 @@ WORKFLOW_SCHEMA = {
58
100
  "description": "Name of a field from the data array"
59
101
  }
60
102
  },
103
+ "failure_message":{
104
+ "type": "string",
105
+ "description": "Default message to be returned when any error occurs within the framework."
106
+ },
61
107
  "steps": {
62
108
  "type": "array",
63
109
  "description": "Workflow steps",
@@ -286,6 +332,11 @@ WORKFLOW_SCHEMA = {
286
332
  "message": {
287
333
  "type": "string",
288
334
  "description": "Outcome message (supports {field} placeholders)"
335
+ },
336
+ "humanize": {
337
+ "type": "boolean",
338
+ "default": True,
339
+ "description": "Whether to apply LLM humanization to this outcome's message (default: true)"
289
340
  }
290
341
  }
291
342
  }