soprano-sdk 0.2.19__py3-none-any.whl → 0.2.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- soprano_sdk/agents/structured_output.py +5 -1
- soprano_sdk/core/constants.py +45 -0
- soprano_sdk/core/engine.py +183 -5
- soprano_sdk/core/state.py +2 -2
- soprano_sdk/nodes/base.py +6 -1
- soprano_sdk/nodes/collect_input.py +85 -7
- soprano_sdk/nodes/factory.py +2 -0
- soprano_sdk/nodes/follow_up.py +351 -0
- soprano_sdk/routing/router.py +6 -2
- soprano_sdk/tools.py +55 -8
- soprano_sdk/validation/schema.py +51 -0
- {soprano_sdk-0.2.19.dist-info → soprano_sdk-0.2.21.dist-info}/METADATA +251 -1
- {soprano_sdk-0.2.19.dist-info → soprano_sdk-0.2.21.dist-info}/RECORD +15 -14
- {soprano_sdk-0.2.19.dist-info → soprano_sdk-0.2.21.dist-info}/WHEEL +0 -0
- {soprano_sdk-0.2.19.dist-info → soprano_sdk-0.2.21.dist-info}/licenses/LICENSE +0 -0
|
@@ -16,15 +16,19 @@ def create_structured_output_model(
|
|
|
16
16
|
fields: List[Dict[str, Any]],
|
|
17
17
|
model_name: str = "StructuredOutput",
|
|
18
18
|
needs_intent_change: bool = False,
|
|
19
|
+
enable_out_of_scope: bool = False,
|
|
19
20
|
) -> Type[BaseModel]:
|
|
20
21
|
if not fields:
|
|
21
22
|
raise ValueError("At least one field definition is required")
|
|
22
|
-
|
|
23
|
+
|
|
23
24
|
field_definitions = {"bot_response": (Optional[str], Field(None, description="bot response for the user query, only use this for clarification or asking for more information"))}
|
|
24
25
|
|
|
25
26
|
if needs_intent_change:
|
|
26
27
|
field_definitions["intent_change"] = (Optional[str], Field(None, description="node name for handling new intent"))
|
|
27
28
|
|
|
29
|
+
if enable_out_of_scope:
|
|
30
|
+
field_definitions["out_of_scope"] = (Optional[str], Field(None, description="Brief description of what the user is trying to do if their query is completely outside the scope of this workflow"))
|
|
31
|
+
|
|
28
32
|
for field_def in fields:
|
|
29
33
|
field_name = field_def.get("name")
|
|
30
34
|
field_type = field_def.get("type")
|
soprano_sdk/core/constants.py
CHANGED
|
@@ -17,18 +17,22 @@ class WorkflowKeys:
|
|
|
17
17
|
NODE_FIELD_MAP = '_node_field_map'
|
|
18
18
|
COMPUTED_FIELDS = '_computed_fields'
|
|
19
19
|
ERROR = 'error'
|
|
20
|
+
TARGET_LANGUAGE = '_target_language'
|
|
21
|
+
TARGET_SCRIPT = '_target_script'
|
|
20
22
|
|
|
21
23
|
|
|
22
24
|
class ActionType(Enum):
|
|
23
25
|
COLLECT_INPUT_WITH_AGENT = 'collect_input_with_agent'
|
|
24
26
|
CALL_FUNCTION = 'call_function'
|
|
25
27
|
CALL_ASYNC_FUNCTION = 'call_async_function'
|
|
28
|
+
FOLLOW_UP = 'follow_up'
|
|
26
29
|
|
|
27
30
|
|
|
28
31
|
class InterruptType:
|
|
29
32
|
"""Interrupt type markers for workflow pauses"""
|
|
30
33
|
USER_INPUT = '__WORKFLOW_INTERRUPT__'
|
|
31
34
|
ASYNC = '__ASYNC_INTERRUPT__'
|
|
35
|
+
OUT_OF_SCOPE = '__OUT_OF_SCOPE_INTERRUPT__'
|
|
32
36
|
|
|
33
37
|
|
|
34
38
|
class DataType(Enum):
|
|
@@ -68,6 +72,47 @@ DEFAULT_TIMEOUT = 300
|
|
|
68
72
|
MAX_ATTEMPTS_MESSAGE = "I'm having trouble understanding your {field}. Please contact customer service for assistance."
|
|
69
73
|
WORKFLOW_COMPLETE_MESSAGE = "Workflow completed."
|
|
70
74
|
|
|
75
|
+
# Humanization defaults
|
|
76
|
+
DEFAULT_HUMANIZATION_ENABLED = True
|
|
77
|
+
DEFAULT_HUMANIZATION_SYSTEM_PROMPT = """You are a helpful assistant that transforms template-based messages into natural, conversational responses.
|
|
78
|
+
|
|
79
|
+
Your task:
|
|
80
|
+
1. Take the reference message provided and rewrite it naturally
|
|
81
|
+
2. Maintain ALL factual information and important details from the reference
|
|
82
|
+
3. Use the conversation history for context and tone matching
|
|
83
|
+
4. Be warm, professional, and helpful
|
|
84
|
+
5. Keep the response concise but complete
|
|
85
|
+
|
|
86
|
+
Reference message to humanize:
|
|
87
|
+
{reference_message}
|
|
88
|
+
|
|
89
|
+
Respond with ONLY the humanized message. Do not add explanations or meta-commentary."""
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class HumanizationKeys:
|
|
93
|
+
"""Keys for humanization agent configuration"""
|
|
94
|
+
ENABLED = 'enabled'
|
|
95
|
+
MODEL = 'model'
|
|
96
|
+
BASE_URL = 'base_url'
|
|
97
|
+
INSTRUCTIONS = 'instructions'
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# Localization defaults
|
|
101
|
+
DEFAULT_LOCALIZATION_INSTRUCTIONS = """LANGUAGE REQUIREMENT:
|
|
102
|
+
You MUST respond in {language} using {script} script.
|
|
103
|
+
- All your responses must be in {language}
|
|
104
|
+
- Use the {script} writing system
|
|
105
|
+
- Maintain the same meaning and tone as you would in English
|
|
106
|
+
- Do not mix languages unless quoting the user
|
|
107
|
+
"""
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class LocalizationKeys:
|
|
111
|
+
"""Keys for localization configuration"""
|
|
112
|
+
LANGUAGE = 'language'
|
|
113
|
+
SCRIPT = 'script'
|
|
114
|
+
INSTRUCTIONS = 'instructions'
|
|
115
|
+
|
|
71
116
|
|
|
72
117
|
class MFAConfig(BaseSettings):
|
|
73
118
|
"""
|
soprano_sdk/core/engine.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Optional, Dict, Any, Tuple
|
|
1
|
+
from typing import Optional, Dict, Any, Tuple, List
|
|
2
2
|
|
|
3
3
|
import yaml
|
|
4
4
|
from jinja2 import Environment
|
|
@@ -7,7 +7,16 @@ from langgraph.constants import START
|
|
|
7
7
|
from langgraph.graph import StateGraph
|
|
8
8
|
from langgraph.graph.state import CompiledStateGraph
|
|
9
9
|
|
|
10
|
-
from .constants import
|
|
10
|
+
from .constants import (
|
|
11
|
+
WorkflowKeys,
|
|
12
|
+
MFAConfig,
|
|
13
|
+
DEFAULT_HUMANIZATION_ENABLED,
|
|
14
|
+
DEFAULT_HUMANIZATION_SYSTEM_PROMPT,
|
|
15
|
+
HumanizationKeys,
|
|
16
|
+
DEFAULT_LOCALIZATION_INSTRUCTIONS,
|
|
17
|
+
LocalizationKeys
|
|
18
|
+
)
|
|
19
|
+
from ..agents.factory import AgentFactory
|
|
11
20
|
from .state import create_state_model
|
|
12
21
|
from ..nodes.factory import NodeFactory
|
|
13
22
|
from ..routing.router import WorkflowRouter
|
|
@@ -41,6 +50,7 @@ class WorkflowEngine:
|
|
|
41
50
|
self.data_fields = self.load_data()
|
|
42
51
|
self.outcomes = self.load_outcomes()
|
|
43
52
|
self.metadata = self.config.get('metadata', {})
|
|
53
|
+
self.failure_message: Optional[str] = self.config.get('failure_message')
|
|
44
54
|
|
|
45
55
|
self.StateType = create_state_model(self.data_fields)
|
|
46
56
|
|
|
@@ -147,6 +157,167 @@ class WorkflowEngine:
|
|
|
147
157
|
except Exception as e:
|
|
148
158
|
raise RuntimeError(f"Failed to build workflow graph: {e}")
|
|
149
159
|
|
|
160
|
+
def _aggregate_conversation_history(self, state: Dict[str, Any]) -> List[Dict[str, str]]:
|
|
161
|
+
"""Aggregate all conversations from collector nodes in execution order."""
|
|
162
|
+
conversations = state.get(WorkflowKeys.CONVERSATIONS, {})
|
|
163
|
+
node_order = state.get(WorkflowKeys.NODE_EXECUTION_ORDER, [])
|
|
164
|
+
node_field_map = state.get(WorkflowKeys.NODE_FIELD_MAP, {})
|
|
165
|
+
|
|
166
|
+
aggregated = []
|
|
167
|
+
for node_id in node_order:
|
|
168
|
+
field = node_field_map.get(node_id)
|
|
169
|
+
if field:
|
|
170
|
+
conv_key = f"{field}_conversation"
|
|
171
|
+
if conv_messages := conversations.get(conv_key):
|
|
172
|
+
aggregated.extend(conv_messages)
|
|
173
|
+
|
|
174
|
+
return aggregated
|
|
175
|
+
|
|
176
|
+
def _get_humanization_config(self) -> Dict[str, Any]:
|
|
177
|
+
"""Get humanization agent configuration from workflow config."""
|
|
178
|
+
return self.config.get('humanization_agent', {})
|
|
179
|
+
|
|
180
|
+
def _should_humanize_outcome(self, outcome: Dict[str, Any]) -> bool:
|
|
181
|
+
"""Determine if an outcome should be humanized."""
|
|
182
|
+
# Check workflow-level setting (default: enabled)
|
|
183
|
+
workflow_humanization = self._get_humanization_config()
|
|
184
|
+
workflow_enabled = workflow_humanization.get(
|
|
185
|
+
HumanizationKeys.ENABLED,
|
|
186
|
+
DEFAULT_HUMANIZATION_ENABLED
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
if not workflow_enabled:
|
|
190
|
+
return False
|
|
191
|
+
|
|
192
|
+
# Check per-outcome setting (default: True, inherit from workflow)
|
|
193
|
+
return outcome.get('humanize', True)
|
|
194
|
+
|
|
195
|
+
def _get_humanization_model_config(self) -> Optional[Dict[str, Any]]:
|
|
196
|
+
"""Get model config for humanization, with overrides applied."""
|
|
197
|
+
model_config = self.get_config_value('model_config')
|
|
198
|
+
if not model_config:
|
|
199
|
+
return None
|
|
200
|
+
|
|
201
|
+
humanization_config = self._get_humanization_config()
|
|
202
|
+
model_config = model_config.copy() # Don't mutate original
|
|
203
|
+
|
|
204
|
+
# Apply overrides from humanization_agent config
|
|
205
|
+
if model := humanization_config.get(HumanizationKeys.MODEL):
|
|
206
|
+
model_config['model_name'] = model
|
|
207
|
+
if base_url := humanization_config.get(HumanizationKeys.BASE_URL):
|
|
208
|
+
model_config['base_url'] = base_url
|
|
209
|
+
|
|
210
|
+
return model_config
|
|
211
|
+
|
|
212
|
+
def _get_localization_config(self) -> Dict[str, Any]:
|
|
213
|
+
"""Get localization configuration from workflow config."""
|
|
214
|
+
return self.config.get('localization', {})
|
|
215
|
+
|
|
216
|
+
def get_localization_instructions(self, state: Dict[str, Any]) -> str:
|
|
217
|
+
"""Get localization instructions based on state (per-turn) or YAML defaults.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
state: Current workflow state containing per-turn language/script values
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
Localization instructions string to prepend to agent prompts, or empty string if no localization
|
|
224
|
+
"""
|
|
225
|
+
# First check state for per-turn values
|
|
226
|
+
language = state.get(WorkflowKeys.TARGET_LANGUAGE)
|
|
227
|
+
script = state.get(WorkflowKeys.TARGET_SCRIPT)
|
|
228
|
+
|
|
229
|
+
# Fall back to YAML defaults if not in state
|
|
230
|
+
yaml_config = self._get_localization_config()
|
|
231
|
+
if not language:
|
|
232
|
+
language = yaml_config.get(LocalizationKeys.LANGUAGE)
|
|
233
|
+
if not script:
|
|
234
|
+
script = yaml_config.get(LocalizationKeys.SCRIPT)
|
|
235
|
+
|
|
236
|
+
# No localization if neither specified
|
|
237
|
+
if not language and not script:
|
|
238
|
+
return ""
|
|
239
|
+
|
|
240
|
+
# Use custom instructions if provided in YAML
|
|
241
|
+
custom_instructions = yaml_config.get(LocalizationKeys.INSTRUCTIONS)
|
|
242
|
+
if custom_instructions:
|
|
243
|
+
return custom_instructions.format(
|
|
244
|
+
language=language or "the target language",
|
|
245
|
+
script=script or "the appropriate script"
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
return DEFAULT_LOCALIZATION_INSTRUCTIONS.format(
|
|
249
|
+
language=language or "the target language",
|
|
250
|
+
script=script or "the appropriate script"
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
def _humanize_message(self, reference_message: str, state: Dict[str, Any]) -> str:
|
|
254
|
+
"""Use LLM to humanize the reference message using conversation context."""
|
|
255
|
+
try:
|
|
256
|
+
model_config = self._get_humanization_model_config()
|
|
257
|
+
if not model_config:
|
|
258
|
+
logger.warning("No model_config found, skipping humanization")
|
|
259
|
+
return reference_message
|
|
260
|
+
|
|
261
|
+
humanization_config = self._get_humanization_config()
|
|
262
|
+
|
|
263
|
+
# Build system prompt
|
|
264
|
+
custom_instructions = humanization_config.get(HumanizationKeys.INSTRUCTIONS)
|
|
265
|
+
if custom_instructions:
|
|
266
|
+
system_prompt = f"{custom_instructions}\n\nReference message to humanize:\n{reference_message}"
|
|
267
|
+
else:
|
|
268
|
+
system_prompt = DEFAULT_HUMANIZATION_SYSTEM_PROMPT.format(
|
|
269
|
+
reference_message=reference_message
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Inject localization instructions if specified
|
|
273
|
+
localization_instructions = self.get_localization_instructions(state)
|
|
274
|
+
if localization_instructions:
|
|
275
|
+
system_prompt = f"{localization_instructions}\n\n{system_prompt}"
|
|
276
|
+
|
|
277
|
+
# Aggregate conversation history
|
|
278
|
+
conversation_history = self._aggregate_conversation_history(state)
|
|
279
|
+
|
|
280
|
+
# Create agent for humanization
|
|
281
|
+
framework = self.get_config_value('agent_framework', 'langgraph')
|
|
282
|
+
agent = AgentFactory.create_agent(
|
|
283
|
+
framework=framework,
|
|
284
|
+
name="HumanizationAgent",
|
|
285
|
+
model_config=model_config,
|
|
286
|
+
tools=[],
|
|
287
|
+
system_prompt=system_prompt,
|
|
288
|
+
structured_output_model=None
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Invoke agent with conversation history
|
|
292
|
+
if conversation_history:
|
|
293
|
+
messages = conversation_history + [
|
|
294
|
+
{"role": "user", "content": "Please humanize the reference message based on our conversation."}
|
|
295
|
+
]
|
|
296
|
+
else:
|
|
297
|
+
messages = [
|
|
298
|
+
{"role": "user", "content": "Please humanize the reference message."}
|
|
299
|
+
]
|
|
300
|
+
|
|
301
|
+
humanized_response = agent.invoke(messages)
|
|
302
|
+
|
|
303
|
+
# Handle different response types
|
|
304
|
+
if isinstance(humanized_response, dict):
|
|
305
|
+
humanized_message = humanized_response.get('content', str(humanized_response))
|
|
306
|
+
else:
|
|
307
|
+
humanized_message = str(humanized_response)
|
|
308
|
+
|
|
309
|
+
# Validate we got a meaningful response
|
|
310
|
+
if not humanized_message or humanized_message.strip() == '':
|
|
311
|
+
logger.warning("Humanization returned empty response, using original message")
|
|
312
|
+
return reference_message
|
|
313
|
+
|
|
314
|
+
logger.info(f"Message humanized successfully: {humanized_message[:100]}...")
|
|
315
|
+
return humanized_message
|
|
316
|
+
|
|
317
|
+
except Exception as e:
|
|
318
|
+
logger.warning(f"Humanization failed, using original message: {e}")
|
|
319
|
+
return reference_message
|
|
320
|
+
|
|
150
321
|
def get_outcome_message(self, state: Dict[str, Any]) -> str:
|
|
151
322
|
outcome_id = state.get(WorkflowKeys.OUTCOME_ID)
|
|
152
323
|
step_id = state.get(WorkflowKeys.STEP_ID)
|
|
@@ -155,9 +326,16 @@ class WorkflowEngine:
|
|
|
155
326
|
if outcome and 'message' in outcome:
|
|
156
327
|
message = outcome['message']
|
|
157
328
|
template_loader = self.get_config_value("template_loader", Environment())
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
329
|
+
rendered_message = template_loader.from_string(message).render(state)
|
|
330
|
+
|
|
331
|
+
# Apply humanization if enabled for this outcome
|
|
332
|
+
if self._should_humanize_outcome(outcome):
|
|
333
|
+
final_message = self._humanize_message(rendered_message, state)
|
|
334
|
+
else:
|
|
335
|
+
final_message = rendered_message
|
|
336
|
+
|
|
337
|
+
logger.info(f"Outcome message generated in step {step_id}: {final_message}")
|
|
338
|
+
return final_message
|
|
161
339
|
|
|
162
340
|
if error := state.get("error"):
|
|
163
341
|
logger.info(f"Outcome error found in step {step_id}: {error}")
|
soprano_sdk/core/state.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import types
|
|
2
|
-
from typing import Annotated, Optional, Dict, List, Any
|
|
2
|
+
from typing import Annotated, Optional, Dict, List, Any, Union
|
|
3
3
|
|
|
4
4
|
from typing_extensions import TypedDict
|
|
5
5
|
|
|
@@ -46,7 +46,7 @@ def create_state_model(data_fields: List[dict]):
|
|
|
46
46
|
fields['_node_execution_order'] = Annotated[List[str], replace]
|
|
47
47
|
fields['_node_field_map'] = Annotated[Dict[str, str], replace]
|
|
48
48
|
fields['_computed_fields'] = Annotated[List[str], replace]
|
|
49
|
-
fields['error'] = Annotated[
|
|
49
|
+
fields['error'] = Annotated[Union[str, Dict[str, str], None], replace]
|
|
50
50
|
fields['_mfa'] = Annotated[Optional[Dict[str, str]], replace]
|
|
51
51
|
fields['_mfa_config'] = Annotated[Optional[Any], replace]
|
|
52
52
|
fields['mfa_input'] = Annotated[Optional[Dict[str, str]], replace]
|
soprano_sdk/nodes/base.py
CHANGED
|
@@ -40,7 +40,12 @@ class ActionStrategy(ABC):
|
|
|
40
40
|
except Exception as e:
|
|
41
41
|
logger.error(f"Node {self.step_id} failed: {e}", exc_info=True)
|
|
42
42
|
self._set_status(state, WorkflowKeys.ERROR)
|
|
43
|
-
|
|
43
|
+
|
|
44
|
+
# Use custom error message from engine context if available
|
|
45
|
+
failure_message = getattr(self.engine_context, 'failure_message', None)
|
|
46
|
+
error_message = failure_message if failure_message else f"Unable to complete the request: {str(e)}"
|
|
47
|
+
|
|
48
|
+
state[WorkflowKeys.ERROR] = error_message
|
|
44
49
|
state[WorkflowKeys.OUTCOME_ID] = WorkflowKeys.ERROR
|
|
45
50
|
return state
|
|
46
51
|
|
|
@@ -25,6 +25,31 @@ from ..utils.tracing import trace_node_execution, trace_agent_invocation, add_no
|
|
|
25
25
|
VALIDATION_ERROR_MESSAGE = "validation failed for the provided input, please enter valid input"
|
|
26
26
|
INVALID_INPUT_MESSAGE = "Looks like the input is invalid. Please double-check and re-enter it."
|
|
27
27
|
COLLECTION_FAILURE_MESSAGE = "I couldn't understand your response. Please try again and provide the required information."
|
|
28
|
+
OUT_OF_SCOPE_PATTERN = "OUT_OF_SCOPE:"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def _wrap_instructions_with_out_of_scope_detection(
|
|
32
|
+
instructions: str,
|
|
33
|
+
scope_description: str,
|
|
34
|
+
with_structured_output: bool
|
|
35
|
+
) -> str:
|
|
36
|
+
"""Append out-of-scope detection instructions. Backward compatible - works with any scope_description."""
|
|
37
|
+
return f"""{instructions}
|
|
38
|
+
|
|
39
|
+
OUT-OF-SCOPE DETECTION:
|
|
40
|
+
Your current task is: {scope_description}
|
|
41
|
+
|
|
42
|
+
If the user's query is COMPLETELY UNRELATED to this task:
|
|
43
|
+
- {"Set out_of_scope to a brief description of what the user is trying to do" if with_structured_output else "Respond with: OUT_OF_SCOPE: <brief description of user intent>"}
|
|
44
|
+
- Do NOT attempt to answer or redirect the query
|
|
45
|
+
- Do NOT confuse this with intent changes between collection steps
|
|
46
|
+
|
|
47
|
+
Examples of out-of-scope queries:
|
|
48
|
+
- Asking about completely different products/services
|
|
49
|
+
- Requesting actions unrelated to the current task
|
|
50
|
+
- General questions that don't fit the current workflow
|
|
51
|
+
"""
|
|
52
|
+
|
|
28
53
|
|
|
29
54
|
def _wrap_instructions_with_intent_detection(
|
|
30
55
|
instructions: str,
|
|
@@ -97,6 +122,13 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
97
122
|
self.next_step = self.step_config.get("next", None)
|
|
98
123
|
self.is_structured_output = self.agent_config.get("structured_output", {}).get("enabled", False)
|
|
99
124
|
|
|
125
|
+
# Out-of-scope detection configuration (disabled by default)
|
|
126
|
+
self.enable_out_of_scope = self.agent_config.get("detect_out_of_scope", False)
|
|
127
|
+
self.scope_description = self.agent_config.get(
|
|
128
|
+
"scope_description",
|
|
129
|
+
self.agent_config.get("description", f"collecting {self.field}")
|
|
130
|
+
)
|
|
131
|
+
|
|
100
132
|
rollback_strategy_name = engine_context.get_config_value("rollback_strategy", "history_based")
|
|
101
133
|
self.rollback_strategy = _create_rollback_strategy(rollback_strategy_name)
|
|
102
134
|
logger.info(f"Using rollback strategy: {self.rollback_strategy.get_strategy_name()}")
|
|
@@ -168,11 +200,22 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
168
200
|
):
|
|
169
201
|
agent_response = _get_agent_response(agent, conversation)
|
|
170
202
|
|
|
203
|
+
# Get user message for out-of-scope handling
|
|
204
|
+
user_message = next(
|
|
205
|
+
(msg['content'] for msg in reversed(conversation) if msg['role'] == 'user'),
|
|
206
|
+
""
|
|
207
|
+
)
|
|
208
|
+
|
|
171
209
|
if self.is_structured_output:
|
|
172
|
-
state = self._handle_structured_output_transition(state, conversation, agent_response)
|
|
210
|
+
state = self._handle_structured_output_transition(state, conversation, agent_response, user_message)
|
|
173
211
|
add_node_result(span, self.field, state.get(self.field), state.get(WorkflowKeys.STATUS))
|
|
174
212
|
return self._add_node_to_execution_order(state)
|
|
175
213
|
|
|
214
|
+
# Check for out-of-scope before intent change (for non-structured output)
|
|
215
|
+
if self.enable_out_of_scope and agent_response.startswith(OUT_OF_SCOPE_PATTERN):
|
|
216
|
+
span.add_event("out_of_scope.detected")
|
|
217
|
+
return self._handle_out_of_scope(agent_response, state, user_message)
|
|
218
|
+
|
|
176
219
|
if agent_response.startswith(TransitionPattern.INTENT_CHANGE):
|
|
177
220
|
span.add_event("intent.change_detected")
|
|
178
221
|
return self._handle_intent_change(agent_response, state)
|
|
@@ -305,12 +348,24 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
305
348
|
|
|
306
349
|
instructions = self._render_template_string(instructions, state)
|
|
307
350
|
|
|
351
|
+
# Inject localization instructions at the start (per-turn)
|
|
352
|
+
localization_instructions = self.engine_context.get_localization_instructions(state)
|
|
353
|
+
if localization_instructions:
|
|
354
|
+
instructions = f"{localization_instructions}\n\n{instructions}"
|
|
355
|
+
|
|
308
356
|
if collector_nodes:
|
|
309
357
|
collector_nodes_for_intent_change = {
|
|
310
358
|
node_id: node_desc for node_id, node_desc in collector_nodes.items()
|
|
311
359
|
if node_id not in self.engine_context.mfa_validator_steps
|
|
312
360
|
}
|
|
313
361
|
instructions = _wrap_instructions_with_intent_detection(instructions, collector_nodes_for_intent_change, self.is_structured_output)
|
|
362
|
+
|
|
363
|
+
# Add out-of-scope detection instructions if enabled
|
|
364
|
+
if self.enable_out_of_scope:
|
|
365
|
+
instructions = _wrap_instructions_with_out_of_scope_detection(
|
|
366
|
+
instructions, self.scope_description, self.is_structured_output
|
|
367
|
+
)
|
|
368
|
+
|
|
314
369
|
return instructions
|
|
315
370
|
|
|
316
371
|
def _load_agent_tools(self, state: Dict[str, Any]) -> List:
|
|
@@ -323,18 +378,19 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
323
378
|
structured_output_config = self.agent_config.get('structured_output')
|
|
324
379
|
if not structured_output_config or not structured_output_config.get('enabled'):
|
|
325
380
|
return None
|
|
326
|
-
|
|
381
|
+
|
|
327
382
|
fields = structured_output_config.get('fields', [])
|
|
328
383
|
if not fields:
|
|
329
384
|
return None
|
|
330
|
-
|
|
385
|
+
|
|
331
386
|
validate_field_definitions(fields)
|
|
332
387
|
model_name = f"{self.field.title().replace('_', '')}StructuredOutput"
|
|
333
|
-
|
|
388
|
+
|
|
334
389
|
return create_structured_output_model(
|
|
335
390
|
fields=fields,
|
|
336
391
|
model_name=model_name,
|
|
337
|
-
needs_intent_change=len(collector_nodes) > 0
|
|
392
|
+
needs_intent_change=len(collector_nodes) > 0,
|
|
393
|
+
enable_out_of_scope=self.enable_out_of_scope
|
|
338
394
|
)
|
|
339
395
|
|
|
340
396
|
def _create_agent(self, state: Dict[str, Any]) -> AgentAdapter:
|
|
@@ -399,7 +455,7 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
399
455
|
target_node = target_node_or_response.split(TransitionPattern.INTENT_CHANGE)[1].strip()
|
|
400
456
|
else:
|
|
401
457
|
target_node = target_node_or_response
|
|
402
|
-
|
|
458
|
+
|
|
403
459
|
logger.info(f"Intent change detected: {self.step_id} -> {target_node}")
|
|
404
460
|
|
|
405
461
|
rollback_state = self._rollback_state_to_node(state, target_node)
|
|
@@ -410,6 +466,24 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
410
466
|
|
|
411
467
|
return rollback_state
|
|
412
468
|
|
|
469
|
+
def _handle_out_of_scope(self, reason: str, state: Dict[str, Any], user_message: str) -> Dict[str, Any]:
|
|
470
|
+
"""Handle out-of-scope user input by signaling to parent orchestrator"""
|
|
471
|
+
if isinstance(reason, str) and OUT_OF_SCOPE_PATTERN in reason:
|
|
472
|
+
reason = reason.split(OUT_OF_SCOPE_PATTERN)[1].strip()
|
|
473
|
+
|
|
474
|
+
logger.info(f"Out-of-scope detected in step '{self.step_id}': {reason}")
|
|
475
|
+
|
|
476
|
+
# Store the out-of-scope reason and interrupt with user message
|
|
477
|
+
state['_out_of_scope_reason'] = reason
|
|
478
|
+
interrupt({
|
|
479
|
+
"type": "out_of_scope",
|
|
480
|
+
"step_id": self.step_id,
|
|
481
|
+
"reason": reason,
|
|
482
|
+
"user_message": user_message
|
|
483
|
+
})
|
|
484
|
+
|
|
485
|
+
return state
|
|
486
|
+
|
|
413
487
|
def _rollback_state_to_node(
|
|
414
488
|
self,
|
|
415
489
|
state: Dict[str, Any],
|
|
@@ -491,13 +565,17 @@ class CollectInputStrategy(ActionStrategy):
|
|
|
491
565
|
|
|
492
566
|
return state
|
|
493
567
|
|
|
494
|
-
def _handle_structured_output_transition(self, state: Dict[str, Any], conversation: List, agent_response: Any) -> Dict[str, Any]:
|
|
568
|
+
def _handle_structured_output_transition(self, state: Dict[str, Any], conversation: List, agent_response: Any, user_message: str = "") -> Dict[str, Any]:
|
|
495
569
|
|
|
496
570
|
try:
|
|
497
571
|
agent_response = json.loads(agent_response) if isinstance(agent_response, str) else agent_response
|
|
498
572
|
except (json.JSONDecodeError, TypeError, ValueError):
|
|
499
573
|
pass
|
|
500
574
|
|
|
575
|
+
# Check for out-of-scope first (before intent change)
|
|
576
|
+
if self.enable_out_of_scope and (out_of_scope_reason := agent_response.get("out_of_scope")):
|
|
577
|
+
return self._handle_out_of_scope(out_of_scope_reason, state, user_message)
|
|
578
|
+
|
|
501
579
|
if target_node := agent_response.get("intent_change"):
|
|
502
580
|
return self._handle_intent_change(target_node, state)
|
|
503
581
|
|
soprano_sdk/nodes/factory.py
CHANGED
|
@@ -4,6 +4,7 @@ from .base import ActionStrategy
|
|
|
4
4
|
from .call_function import CallFunctionStrategy
|
|
5
5
|
from .collect_input import CollectInputStrategy
|
|
6
6
|
from .async_function import AsyncFunctionStrategy
|
|
7
|
+
from .follow_up import FollowUpStrategy
|
|
7
8
|
from ..core.constants import ActionType
|
|
8
9
|
from ..utils.logger import logger
|
|
9
10
|
|
|
@@ -46,3 +47,4 @@ class NodeFactory:
|
|
|
46
47
|
NodeFactory.register(ActionType.COLLECT_INPUT_WITH_AGENT.value, CollectInputStrategy)
|
|
47
48
|
NodeFactory.register(ActionType.CALL_FUNCTION.value, CallFunctionStrategy)
|
|
48
49
|
NodeFactory.register(ActionType.CALL_ASYNC_FUNCTION.value, AsyncFunctionStrategy)
|
|
50
|
+
NodeFactory.register(ActionType.FOLLOW_UP.value, FollowUpStrategy)
|