rasa-pro 3.12.0.dev9__py3-none-any.whl → 3.12.0.dev10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (56) hide show
  1. rasa/core/actions/action.py +17 -3
  2. rasa/core/actions/action_handle_digressions.py +142 -0
  3. rasa/core/actions/forms.py +4 -2
  4. rasa/core/channels/voice_ready/audiocodes.py +42 -23
  5. rasa/core/channels/voice_stream/tts/azure.py +2 -1
  6. rasa/core/migrate.py +2 -2
  7. rasa/core/policies/flows/flow_executor.py +33 -1
  8. rasa/dialogue_understanding/commands/can_not_handle_command.py +2 -2
  9. rasa/dialogue_understanding/commands/cancel_flow_command.py +62 -4
  10. rasa/dialogue_understanding/commands/change_flow_command.py +2 -2
  11. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +2 -2
  12. rasa/dialogue_understanding/commands/clarify_command.py +2 -2
  13. rasa/dialogue_understanding/commands/correct_slots_command.py +11 -2
  14. rasa/dialogue_understanding/commands/handle_digressions_command.py +150 -0
  15. rasa/dialogue_understanding/commands/human_handoff_command.py +2 -2
  16. rasa/dialogue_understanding/commands/knowledge_answer_command.py +2 -2
  17. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +2 -2
  18. rasa/dialogue_understanding/commands/set_slot_command.py +7 -15
  19. rasa/dialogue_understanding/commands/skip_question_command.py +2 -2
  20. rasa/dialogue_understanding/commands/start_flow_command.py +43 -2
  21. rasa/dialogue_understanding/commands/utils.py +1 -1
  22. rasa/dialogue_understanding/constants.py +1 -0
  23. rasa/dialogue_understanding/generator/command_generator.py +10 -76
  24. rasa/dialogue_understanding/generator/command_parser.py +1 -1
  25. rasa/dialogue_understanding/generator/llm_based_command_generator.py +126 -2
  26. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +10 -2
  27. rasa/dialogue_understanding/generator/nlu_command_adapter.py +4 -2
  28. rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2 +40 -40
  29. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +11 -19
  30. rasa/dialogue_understanding/patterns/correction.py +13 -1
  31. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +62 -2
  32. rasa/dialogue_understanding/patterns/handle_digressions.py +81 -0
  33. rasa/dialogue_understanding/processor/command_processor.py +117 -28
  34. rasa/dialogue_understanding/utils.py +31 -0
  35. rasa/dialogue_understanding_test/test_case_simulation/test_case_tracker_simulator.py +2 -2
  36. rasa/shared/core/constants.py +22 -1
  37. rasa/shared/core/domain.py +6 -4
  38. rasa/shared/core/events.py +13 -2
  39. rasa/shared/core/flows/flow.py +17 -0
  40. rasa/shared/core/flows/flows_yaml_schema.json +38 -0
  41. rasa/shared/core/flows/steps/collect.py +18 -1
  42. rasa/shared/core/flows/utils.py +16 -1
  43. rasa/shared/core/slot_mappings.py +6 -6
  44. rasa/shared/core/slots.py +19 -0
  45. rasa/shared/core/trackers.py +3 -1
  46. rasa/shared/nlu/constants.py +1 -0
  47. rasa/shared/providers/llm/_base_litellm_client.py +0 -40
  48. rasa/shared/utils/llm.py +1 -86
  49. rasa/shared/utils/schemas/domain.yml +0 -1
  50. rasa/validator.py +172 -22
  51. rasa/version.py +1 -1
  52. {rasa_pro-3.12.0.dev9.dist-info → rasa_pro-3.12.0.dev10.dist-info}/METADATA +1 -1
  53. {rasa_pro-3.12.0.dev9.dist-info → rasa_pro-3.12.0.dev10.dist-info}/RECORD +56 -53
  54. {rasa_pro-3.12.0.dev9.dist-info → rasa_pro-3.12.0.dev10.dist-info}/NOTICE +0 -0
  55. {rasa_pro-3.12.0.dev9.dist-info → rasa_pro-3.12.0.dev10.dist-info}/WHEEL +0 -0
  56. {rasa_pro-3.12.0.dev9.dist-info → rasa_pro-3.12.0.dev10.dist-info}/entry_points.txt +0 -0
@@ -98,9 +98,11 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
98
98
  Returns:
99
99
  The commands triggered by NLU.
100
100
  """
101
+ prior_commands = self._get_prior_commands(message)
102
+
101
103
  if tracker is None or flows.is_empty():
102
104
  # cannot do anything if there are no flows or no tracker
103
- return []
105
+ return prior_commands
104
106
 
105
107
  domain = kwargs.get("domain", None)
106
108
  commands = self.convert_nlu_to_commands(message, tracker, flows, domain)
@@ -146,7 +148,7 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
146
148
  commands=commands,
147
149
  )
148
150
 
149
- return commands
151
+ return prior_commands + commands
150
152
 
151
153
  @staticmethod
152
154
  def convert_nlu_to_commands(
@@ -1,58 +1,58 @@
1
1
  Your task is to analyze the current conversation context and generate a list of actions to start new business processes that we call flows, to extract slots, or respond to small talk and knowledge requests.
2
2
 
3
-
4
- ## Available Actions:
5
- * Starting a flow, described by "start flow_name". For example, "start transfer_money" or "start list_contacts"
6
- * Slot setting, described by "set slot_name slot_value". For example, "set transfer_money_recipient Freddy". Can be used to correct and change previously set values.
7
- * Cancelling the current flow, described by "cancel"
8
- * Clarifying which flow should be started in ambiguous cases. For example, "clarify list_contacts add_contact remove_contact" if the user just wrote "contacts" and there are multiple potential candidates.
9
- * Skipping the current question when the user explicitly asks for it, described by "skip".
10
- * Responding to knowledge-oriented user messages, described by "search"
11
- * Responding to a casual, non-task-oriented user message, described by "chat".
12
- * Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one, described by "hand over".
13
-
14
-
15
- ## General Tips
16
- * Do not fill slots with abstract values or placeholders.
17
- * Only use information provided by the user.
18
- * Use clarification in ambiguous cases.
19
- * Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
20
- * Strictly adhere to the provided action format.
21
- * For categorical slots try to match the user message with potential slot values. Use "other" if you cannot match it
22
- * Focus on the last message and take it one step at a time.
23
- * Use the previous conversation steps only to aid understanding.
24
-
25
-
26
- ## Available Flows:
3
+ These are the flows that can be started, with their description and slots:
27
4
  {% for flow in available_flows %}
28
- * {{ flow.name }}: {{ flow.description }}
5
+ {{ flow.name }}: {{ flow.description }}
29
6
  {% for slot in flow.slots -%}
30
- * {{ slot.name }}{% if slot.description %} ({{ slot.description }}){% endif %}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
7
+ slot: {{ slot.name }}{% if slot.description %} ({{ slot.description }}){% endif %}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
31
8
  {% endfor %}
32
9
  {%- endfor %}
33
10
 
11
+ ===
12
+ Here is what happened previously in the conversation:
13
+ {{ current_conversation }}
34
14
 
35
- ## Current State
15
+ ===
36
16
  {% if current_flow != None %}
37
17
  You are currently in the flow "{{ current_flow }}".
38
18
  You have just asked the user for the slot "{{ current_slot }}"{% if current_slot_description %} ({{ current_slot_description }}){% endif %}.
39
19
 
40
20
  {% if flow_slots|length > 0 %}
41
- Here are the slots of the flow "{{ current_flow }}":
21
+ Here are the slots of the currently active flow:
42
22
  {% for slot in flow_slots -%}
43
- * name: {{ slot.name }}, value: {{ slot.value }}, type: {{ slot.type }}, description: {{ slot.description}}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
23
+ - name: {{ slot.name }}, value: {{ slot.value }}, type: {{ slot.type }}, description: {{ slot.description}}{% if slot.allowed_values %}, allowed values: {{ slot.allowed_values }}{% endif %}
44
24
  {% endfor %}
45
25
  {% endif %}
46
26
  {% else %}
47
- You are currently not inside any flow.
27
+ You are currently not in any flow and so there are no active slots.
28
+ This means you can only set a slot if you first start a flow that requires that slot.
48
29
  {% endif %}
49
-
50
-
51
- ## Conversation History
52
- {{ current_conversation }}
53
-
54
-
55
- ## Task
56
- Create an action list with one action per line in response to the users last message: """{{ user_message }}""".
57
-
58
- Your action list:
30
+ If you start a flow, first start the flow and then optionally fill that flow's slots with information the user provided in their message.
31
+
32
+ The user just said """{{ user_message }}""".
33
+
34
+ ===
35
+ Based on this information generate a list of actions you want to take. Your job is to start flows and to fill slots where appropriate. Any logic of what happens afterwards is handled by the flow engine. These are your available actions:
36
+ * Slot setting, described by "SetSlot(slot_name, slot_value)". An example would be "SetSlot(recipient, Freddy)"
37
+ * Starting another flow, described by "StartFlow(flow_name)". An example would be "StartFlow(transfer_money)"
38
+ * Cancelling the current flow, described by "CancelFlow()"
39
+ * Clarifying which flow should be started. An example would be Clarify(list_contacts, add_contact, remove_contact) if the user just wrote "contacts" and there are multiple potential candidates. It also works with a single flow name to confirm you understood correctly, as in Clarify(transfer_money).
40
+ * Intercepting and handle user messages with the intent to bypass the current step in the flow, described by "SkipQuestion()". Examples of user skip phrases are: "Go to the next question", "Ask me something else".
41
+ * Responding to knowledge-oriented user messages, described by "SearchAndReply()"
42
+ * Responding to a casual, non-task-oriented user message, described by "ChitChat()".
43
+ * Handing off to a human, in case the user seems frustrated or explicitly asks to speak to one, described by "HumanHandoff()".
44
+ * Repeat the last bot messages, described by "RepeatLastBotMessages()". This is useful when the user asks to repeat the last bot messages.
45
+
46
+ ===
47
+ Write out the actions you want to take, one per line, in the order they should take place.
48
+ Do not fill slots with abstract values or placeholders.
49
+ Only use information provided by the user.
50
+ Only start a flow if it's completely clear what the user wants. Imagine you were a person reading this message. If it's not 100% clear, clarify the next step.
51
+ Don't be overly confident. Take a conservative approach and clarify before proceeding.
52
+ If the user asks for two things which seem contradictory, clarify before starting a flow.
53
+ If it's not clear whether the user wants to skip the step or to cancel the flow, cancel the flow.
54
+ Strictly adhere to the provided action types listed above.
55
+ Focus on the last message and take it one step at a time.
56
+ Use the previous conversation steps only to aid understanding.
57
+
58
+ Your action list:
@@ -51,7 +51,6 @@ from rasa.shared.utils.llm import (
51
51
  sanitize_message_for_prompt,
52
52
  tracker_as_readable_transcript,
53
53
  )
54
- from rasa.utils.beta import BetaNotEnabledException, ensure_beta_feature_is_enabled
55
54
  from rasa.utils.log_utils import log_llm
56
55
 
57
56
  COMMAND_PROMPT_FILE_NAME = "command_prompt.jinja2"
@@ -111,7 +110,6 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
111
110
  )
112
111
 
113
112
  self.trace_prompt_tokens = self.config.get("trace_prompt_tokens", False)
114
- self.repeat_command_enabled = self.is_repeat_command_enabled()
115
113
 
116
114
  ### Implementations of LLMBasedCommandGenerator parent
117
115
  @staticmethod
@@ -198,9 +196,14 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
198
196
  Returns:
199
197
  The commands generated by the llm.
200
198
  """
199
+ prior_commands = self._get_prior_commands(message)
200
+
201
201
  if tracker is None or flows.is_empty():
202
202
  # cannot do anything if there are no flows or no tracker
203
- return []
203
+ return prior_commands
204
+
205
+ if self._should_skip_llm_call(prior_commands, flows, tracker):
206
+ return prior_commands
204
207
 
205
208
  try:
206
209
  commands = await self._predict_commands(message, flows, tracker)
@@ -209,7 +212,7 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
209
212
  # "predict" the ErrorCommand
210
213
  commands = [ErrorCommand()]
211
214
 
212
- if not commands:
215
+ if not commands and not prior_commands:
213
216
  # no commands are parsed or there's an invalid command
214
217
  structlogger.warning(
215
218
  "single_step_llm_command_generator.predict_commands",
@@ -230,7 +233,10 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
230
233
  commands=commands,
231
234
  )
232
235
 
233
- return commands
236
+ domain = kwargs.get("domain")
237
+ commands = self._check_commands_against_slot_mappings(commands, tracker, domain)
238
+
239
+ return prior_commands + commands
234
240
 
235
241
  async def _predict_commands(
236
242
  self,
@@ -406,20 +412,6 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
406
412
  "current_slot": current_slot,
407
413
  "current_slot_description": current_slot_description,
408
414
  "user_message": latest_user_message,
409
- "is_repeat_command_enabled": self.repeat_command_enabled,
410
415
  }
411
416
 
412
417
  return self.compile_template(self.prompt_template).render(**inputs)
413
-
414
- def is_repeat_command_enabled(self) -> bool:
415
- """Check for feature flag"""
416
- RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME = "RASA_PRO_BETA_REPEAT_COMMAND"
417
- try:
418
- ensure_beta_feature_is_enabled(
419
- "Repeat Command",
420
- env_flag=RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME,
421
- )
422
- except BetaNotEnabledException:
423
- return False
424
-
425
- return True
@@ -54,6 +54,8 @@ class CorrectionPatternFlowStackFrame(PatternFlowStackFrame):
54
54
  """The ID of the flow to reset to."""
55
55
  reset_step_id: Optional[str] = None
56
56
  """The ID of the step to reset to."""
57
+ new_slot_values: List[Any] = field(default_factory=list)
58
+ """The new values for the corrected slots."""
57
59
 
58
60
  @classmethod
59
61
  def type(cls) -> str:
@@ -70,6 +72,10 @@ class CorrectionPatternFlowStackFrame(PatternFlowStackFrame):
70
72
  Returns:
71
73
  The created `DialogueStackFrame`.
72
74
  """
75
+ new_slot_values = [
76
+ val.get("value") for _, val in data["corrected_slots"].items()
77
+ ]
78
+
73
79
  return CorrectionPatternFlowStackFrame(
74
80
  frame_id=data["frame_id"],
75
81
  step_id=data["step_id"],
@@ -77,6 +83,7 @@ class CorrectionPatternFlowStackFrame(PatternFlowStackFrame):
77
83
  corrected_slots=data["corrected_slots"],
78
84
  reset_flow_id=data["reset_flow_id"],
79
85
  reset_step_id=data["reset_step_id"],
86
+ new_slot_values=new_slot_values,
80
87
  )
81
88
 
82
89
 
@@ -118,7 +125,12 @@ class ActionCorrectFlowSlot(action.Action):
118
125
  )
119
126
  events.extend(tracker.create_stack_updated_events(updated_stack))
120
127
 
121
- events.extend([SlotSet(k, v) for k, v in top.corrected_slots.items()])
128
+ events.extend(
129
+ [
130
+ SlotSet(name, value=val.get("value"), filled_by=val.get("filled_by"))
131
+ for name, val in top.corrected_slots.items()
132
+ ]
133
+ )
122
134
  return events
123
135
 
124
136
 
@@ -1,6 +1,17 @@
1
1
  version: "3.1"
2
2
  responses:
3
3
 
4
+ utter_ask_continue_previous_flow:
5
+ - text: "Confirm if you would like to continue with the initial topic: {{context.interrupted_flow_id}}?"
6
+ metadata:
7
+ rephrase: True
8
+ template: jinja
9
+ buttons:
10
+ - title: Continue with the previous topic.
11
+ payload: /SetSlots(continue_previous_flow=True)
12
+ - title: Switch to new topic.
13
+ payload: /SetSlots(continue_previous_flow=False)
14
+
4
15
  utter_ask_rephrase:
5
16
  - text: I’m sorry I am unable to understand you, could you please rephrase?
6
17
 
@@ -9,6 +20,20 @@ responses:
9
20
  metadata:
10
21
  rephrase: True
11
22
 
23
+ utter_block_digressions:
24
+ - text: "We can look into {{ context.interrupting_flow_id }} later. Let's focus on the current topic: {{ context.interrupted_flow_id }}."
25
+ metadata:
26
+ rephrase: True
27
+ template: jinja
28
+ - text: "Let's continue with the current topic: {{ context.interrupted_flow_id }}."
29
+ condition:
30
+ - type: slot
31
+ name: continue_previous_flow
32
+ value: True
33
+ metadata:
34
+ rephrase: True
35
+ template: jinja
36
+
12
37
  utter_boolean_slot_rejection:
13
38
  - text: "Sorry, the value you provided, `{{value}}`, is not valid. Please respond with a valid value."
14
39
  metadata:
@@ -35,8 +60,14 @@ responses:
35
60
  rephrase: True
36
61
  template: jinja
37
62
 
63
+ utter_continue_interruption:
64
+ - text: "Let's continue with the chosen topic instead: {{ context.interrupting_flow_id }}."
65
+ metadata:
66
+ rephrase: True
67
+ template: jinja
68
+
38
69
  utter_corrected_previous_input:
39
- - text: "Ok, I am updating {{ context.corrected_slots.keys()|join(', ') }} to {{ context.corrected_slots.values()|join(', ') }} respectively."
70
+ - text: "Ok, I am updating {{ context.corrected_slots.keys()|join(', ') }} to {{ context.new_slot_values | join(', ') }} respectively."
40
71
  metadata:
41
72
  rephrase: True
42
73
  template: jinja
@@ -119,7 +150,10 @@ slots:
119
150
  type: float
120
151
  initial_value: 0.0
121
152
  max_value: 1000000
122
-
153
+ continue_previous_flow:
154
+ type: bool
155
+ mappings:
156
+ - type: from_llm
123
157
 
124
158
  flows:
125
159
  pattern_cancel_flow:
@@ -163,6 +197,7 @@ flows:
163
197
  steps:
164
198
  - action: action_clarify_flows
165
199
  - action: utter_clarification_options_rasa
200
+ - action: action_listen
166
201
 
167
202
  pattern_code_change:
168
203
  description: Conversation repair flow for cleaning the stack after an assistant update
@@ -212,6 +247,31 @@ flows:
212
247
  next: END
213
248
  - else: END
214
249
 
250
+ pattern_handle_digressions:
251
+ description: Conversation repair flow for handling digressions
252
+ name: pattern handle digressions
253
+ steps:
254
+ - noop: true
255
+ id: branching
256
+ next:
257
+ - if: context.ask_confirm_digressions contains context.interrupting_flow_id
258
+ then: continue_previous_flow
259
+ - if: context.block_digressions contains context.interrupting_flow_id
260
+ then: block_digression
261
+ - else: continue_digression
262
+ - id: continue_previous_flow
263
+ collect: continue_previous_flow
264
+ next:
265
+ - if: slots.continue_previous_flow
266
+ then: block_digression
267
+ - else: continue_digression
268
+ - id: block_digression
269
+ action: action_block_digression
270
+ next: END
271
+ - id: continue_digression
272
+ action: action_continue_digression
273
+ next: END
274
+
215
275
  pattern_human_handoff:
216
276
  description: Conversation repair flow for switching users to a human agent if their request can't be handled
217
277
  name: pattern human handoff
@@ -0,0 +1,81 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Any, Dict, Set
5
+
6
+ from rasa.dialogue_understanding.stack.frames import PatternFlowStackFrame
7
+ from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX
8
+ from rasa.shared.core.constants import (
9
+ KEY_ASK_CONFIRM_DIGRESSIONS,
10
+ KEY_BLOCK_DIGRESSIONS,
11
+ )
12
+
13
+ FLOW_PATTERN_HANDLE_DIGRESSIONS = (
14
+ RASA_DEFAULT_FLOW_PATTERN_PREFIX + "handle_digressions"
15
+ )
16
+
17
+
18
+ @dataclass
19
+ class HandleDigressionsPatternFlowStackFrame(PatternFlowStackFrame):
20
+ """A pattern flow stack frame that gets added if an interruption is completed."""
21
+
22
+ flow_id: str = FLOW_PATTERN_HANDLE_DIGRESSIONS
23
+ """The ID of the flow."""
24
+ interrupting_flow_id: str = ""
25
+ """The ID of the flow that interrupted the active flow."""
26
+ interrupted_flow_id: str = ""
27
+ """The name of the active flow that was interrupted."""
28
+ interrupted_step_id: str = ""
29
+ """The ID of the step that was interrupted."""
30
+ ask_confirm_digressions: Set[str] = field(default_factory=set)
31
+ """The set of interrupting flow names to confirm."""
32
+ block_digressions: Set[str] = field(default_factory=set)
33
+ """The set of interrupting flow names to block."""
34
+
35
+ @classmethod
36
+ def type(cls) -> str:
37
+ """Returns the type of the frame."""
38
+ return FLOW_PATTERN_HANDLE_DIGRESSIONS
39
+
40
+ @staticmethod
41
+ def from_dict(data: Dict[str, Any]) -> HandleDigressionsPatternFlowStackFrame:
42
+ """Creates a `DialogueStackFrame` from a dictionary.
43
+
44
+ Args:
45
+ data: The dictionary to create the `DialogueStackFrame` from.
46
+
47
+ Returns:
48
+ The created `DialogueStackFrame`.
49
+ """
50
+ return HandleDigressionsPatternFlowStackFrame(
51
+ frame_id=data["frame_id"],
52
+ step_id=data["step_id"],
53
+ interrupted_step_id=data["interrupted_step_id"],
54
+ interrupted_flow_id=data["interrupted_flow_id"],
55
+ interrupting_flow_id=data["interrupting_flow_id"],
56
+ ask_confirm_digressions=set(data.get(KEY_ASK_CONFIRM_DIGRESSIONS, [])),
57
+ # This attribute must be converted to a set to enable usage
58
+ # of subset `contains` pypred operator in the default pattern
59
+ # conditional branching
60
+ block_digressions=set(data.get(KEY_BLOCK_DIGRESSIONS, [])),
61
+ )
62
+
63
+ def __eq__(self, other: Any) -> bool:
64
+ if not isinstance(other, HandleDigressionsPatternFlowStackFrame):
65
+ return False
66
+ return (
67
+ self.flow_id == other.flow_id
68
+ and self.interrupted_step_id == other.interrupted_step_id
69
+ and self.interrupted_flow_id == other.interrupted_flow_id
70
+ and self.interrupting_flow_id == other.interrupting_flow_id
71
+ and self.ask_confirm_digressions == other.ask_confirm_digressions
72
+ and self.block_digressions == other.block_digressions
73
+ )
74
+
75
+ def as_dict(self) -> Dict[str, Any]:
76
+ """Returns the frame as a dictionary."""
77
+ data = super().as_dict()
78
+ # converting back to list to avoid serialization issues
79
+ data[KEY_ASK_CONFIRM_DIGRESSIONS] = list(self.ask_confirm_digressions)
80
+ data[KEY_BLOCK_DIGRESSIONS] = list(self.block_digressions)
81
+ return data
@@ -18,6 +18,9 @@ from rasa.dialogue_understanding.commands import (
18
18
  from rasa.dialogue_understanding.commands.handle_code_change_command import (
19
19
  HandleCodeChangeCommand,
20
20
  )
21
+ from rasa.dialogue_understanding.commands.handle_digressions_command import (
22
+ HandleDigressionsCommand,
23
+ )
21
24
  from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
22
25
  from rasa.dialogue_understanding.patterns.chitchat import FLOW_PATTERN_CHITCHAT
23
26
  from rasa.dialogue_understanding.patterns.collect_information import (
@@ -42,6 +45,8 @@ from rasa.shared.constants import (
42
45
  from rasa.shared.core.constants import (
43
46
  ACTION_TRIGGER_CHITCHAT,
44
47
  FLOW_HASHES_SLOT,
48
+ KEY_ALLOW_NLU_CORRECTION,
49
+ KEY_MAPPING_TYPE,
45
50
  SlotMappingType,
46
51
  )
47
52
  from rasa.shared.core.events import Event, SlotSet
@@ -395,6 +400,28 @@ def clean_up_commands(
395
400
  command=command,
396
401
  )
397
402
 
403
+ elif isinstance(command, StartFlowCommand) and active_flow is not None:
404
+ # push handle digressions command if we are at a collect step of
405
+ # a flow and a new flow is started
406
+ collect_info = get_current_collect_step(tracker.stack, all_flows)
407
+ current_flow = all_flows.flow_by_id(active_flow)
408
+ current_flow_condition = current_flow and (
409
+ current_flow.ask_confirm_digressions or current_flow.block_digressions
410
+ )
411
+
412
+ if collect_info and (
413
+ collect_info.ask_confirm_digressions
414
+ or collect_info.block_digressions
415
+ or current_flow_condition
416
+ ):
417
+ clean_commands.append(HandleDigressionsCommand(flow=command.flow))
418
+ structlogger.debug(
419
+ "command_processor.clean_up_commands.push_handle_digressions",
420
+ command=command,
421
+ )
422
+ else:
423
+ clean_commands.append(command)
424
+
398
425
  # handle chitchat command differently from other free-form answer commands
399
426
  elif isinstance(command, ChitChatAnswerCommand):
400
427
  clean_commands = clean_up_chitchat_command(
@@ -527,13 +554,48 @@ def clean_up_slot_command(
527
554
  )
528
555
  return resulting_commands
529
556
 
530
- if not should_slot_be_set(slot, command):
557
+ if not should_slot_be_set(slot, command, resulting_commands):
558
+ structlogger.debug(
559
+ "command_processor.clean_up_slot_command.skip_command.extractor_"
560
+ "does_not_match_slot_mapping",
561
+ extractor=command.extractor,
562
+ slot_name=slot.name,
563
+ )
564
+
565
+ # prevent adding a cannot handle command in case commands_so_far already
566
+ # contains a valid prior set slot command for the same slot whose current
567
+ # slot command was rejected by should_slot_be_set
568
+ slot_command_exists_already = any(
569
+ isinstance(command, SetSlotCommand) and command.name == slot.name
570
+ for command in resulting_commands
571
+ )
572
+
531
573
  cannot_handle = CannotHandleCommand(reason=CANNOT_HANDLE_REASON)
532
- if cannot_handle not in resulting_commands:
574
+ if not slot_command_exists_already and cannot_handle not in resulting_commands:
533
575
  resulting_commands.append(cannot_handle)
534
576
 
535
577
  return resulting_commands
536
578
 
579
+ if (
580
+ slot.filled_by == SetSlotExtractor.NLU.value
581
+ and command.extractor == SetSlotExtractor.LLM.value
582
+ ):
583
+ allow_nlu_correction = any(
584
+ [
585
+ mapping.get(KEY_ALLOW_NLU_CORRECTION, False)
586
+ for mapping in slot.mappings
587
+ if mapping.get(KEY_MAPPING_TYPE) == SlotMappingType.FROM_LLM.value
588
+ ]
589
+ )
590
+
591
+ if not allow_nlu_correction:
592
+ structlogger.debug(
593
+ "command_processor.clean_up_slot_command"
594
+ ".skip_command.disallow_llm_correction_of_nlu_set_value",
595
+ command=command,
596
+ )
597
+ return resulting_commands
598
+
537
599
  if command.name in slots_so_far and command.name != ROUTE_TO_CALM_SLOT:
538
600
  current_collect_info = get_current_collect_step(stack, all_flows)
539
601
 
@@ -574,7 +636,7 @@ def clean_up_slot_command(
574
636
  )
575
637
 
576
638
  # Group all corrections into one command
577
- corrected_slot = CorrectedSlot(command.name, command.value)
639
+ corrected_slot = CorrectedSlot(command.name, command.value, command.extractor)
578
640
  for c in resulting_commands:
579
641
  if isinstance(c, CorrectSlotsCommand):
580
642
  c.corrected_slots.append(corrected_slot)
@@ -658,7 +720,9 @@ def clean_up_chitchat_command(
658
720
  return resulting_commands
659
721
 
660
722
 
661
- def should_slot_be_set(slot: Slot, command: SetSlotCommand) -> bool:
723
+ def should_slot_be_set(
724
+ slot: Slot, command: SetSlotCommand, commands_so_far: Optional[List[Command]] = None
725
+ ) -> bool:
662
726
  """Check if a slot should be set by a command."""
663
727
  if command.extractor == SetSlotExtractor.COMMAND_PAYLOAD_READER.value:
664
728
  # if the command is issued by the command payload reader, it means the slot
@@ -666,37 +730,62 @@ def should_slot_be_set(slot: Slot, command: SetSlotCommand) -> bool:
666
730
  # we can always set it
667
731
  return True
668
732
 
733
+ if commands_so_far is None:
734
+ commands_so_far = []
735
+
736
+ set_slot_commands_so_far = [
737
+ command
738
+ for command in commands_so_far
739
+ if isinstance(command, SetSlotCommand) and command.name == slot.name
740
+ ]
741
+
669
742
  slot_mappings = slot.mappings
670
743
 
671
- if not slot_mappings:
672
- slot_mappings = [{"type": SlotMappingType.FROM_LLM.value}]
744
+ if not slot.mappings:
745
+ slot_mappings = [{KEY_MAPPING_TYPE: SlotMappingType.FROM_LLM.value}]
673
746
 
674
- for mapping in slot_mappings:
675
- mapping_type = SlotMappingType(
676
- mapping.get("type", SlotMappingType.FROM_LLM.value)
677
- )
747
+ mapping_types = [
748
+ SlotMappingType(mapping.get(KEY_MAPPING_TYPE, SlotMappingType.FROM_LLM.value))
749
+ for mapping in slot_mappings
750
+ ]
678
751
 
679
- should_be_set_by_llm = (
680
- command.extractor == SetSlotExtractor.LLM.value
681
- and mapping_type == SlotMappingType.FROM_LLM
682
- )
683
- should_be_set_by_nlu = (
684
- command.extractor == SetSlotExtractor.NLU.value
685
- and mapping_type.is_predefined_type()
752
+ slot_has_nlu_mapping = any(
753
+ [mapping_type.is_predefined_type() for mapping_type in mapping_types]
754
+ )
755
+ slot_has_llm_mapping = any(
756
+ [mapping_type == SlotMappingType.FROM_LLM for mapping_type in mapping_types]
757
+ )
758
+ slot_has_custom_mapping = any(
759
+ [mapping_type == SlotMappingType.CUSTOM for mapping_type in mapping_types]
760
+ )
761
+
762
+ if set_slot_commands_so_far and command.extractor == SetSlotExtractor.LLM.value:
763
+ # covers the following scenarios:
764
+ # scenario 1: NLU mapping extracts a value for slot_a → If LLM extracts a value for slot_a, it is discarded. # noqa: E501
765
+ # scenario 2: NLU mapping is unable to extract a value for slot_a → If LLM extracts a value for slot_a, it is accepted. # noqa: E501
766
+ command_has_nlu_extractor = any(
767
+ [
768
+ command.extractor == SetSlotExtractor.NLU.value
769
+ for command in set_slot_commands_so_far
770
+ ]
686
771
  )
772
+ return not command_has_nlu_extractor and slot_has_llm_mapping
687
773
 
688
- if should_be_set_by_llm or should_be_set_by_nlu:
689
- # if the extractor matches the mapping type, we can continue
690
- # setting the slot
691
- break
774
+ if (
775
+ slot_has_nlu_mapping
776
+ and command.extractor == SetSlotExtractor.LLM.value
777
+ and not slot_has_llm_mapping
778
+ ):
779
+ return False
692
780
 
693
- structlogger.debug(
694
- "command_processor.clean_up_slot_command.skip_command.extractor_"
695
- "does_not_match_slot_mapping",
696
- extractor=command.extractor,
697
- slot_name=slot.name,
698
- mapping_type=mapping_type.value,
699
- )
781
+ if (
782
+ slot_has_llm_mapping
783
+ and command.extractor == SetSlotExtractor.NLU.value
784
+ and not slot_has_nlu_mapping
785
+ ):
786
+ return False
787
+
788
+ if slot_has_custom_mapping and not (slot_has_nlu_mapping or slot_has_llm_mapping):
700
789
  return False
701
790
 
702
791
  return True
@@ -5,7 +5,10 @@ from rasa.dialogue_understanding.commands import Command
5
5
  from rasa.dialogue_understanding.constants import (
6
6
  RASA_RECORD_COMMANDS_AND_PROMPTS_ENV_VAR_NAME,
7
7
  )
8
+ from rasa.shared.constants import ROUTE_TO_CALM_SLOT
9
+ from rasa.shared.core.trackers import DialogueStateTracker
8
10
  from rasa.shared.nlu.constants import (
11
+ COMMANDS,
9
12
  KEY_COMPONENT_NAME,
10
13
  KEY_LLM_RESPONSE_METADATA,
11
14
  KEY_PROMPT_NAME,
@@ -13,6 +16,7 @@ from rasa.shared.nlu.constants import (
13
16
  KEY_USER_PROMPT,
14
17
  PREDICTED_COMMANDS,
15
18
  PROMPTS,
19
+ SET_SLOT_COMMAND,
16
20
  )
17
21
  from rasa.shared.nlu.training_data.message import Message
18
22
  from rasa.shared.providers.llm.llm_response import LLMResponse
@@ -131,3 +135,30 @@ def add_prompt_to_message_parse_data(
131
135
 
132
136
  # Update the message with the new prompts list.
133
137
  message.set(PROMPTS, prompts, add_to_output=True)
138
+
139
+
140
+ def _handle_via_nlu_in_coexistence(
141
+ tracker: Optional[DialogueStateTracker], message: Message
142
+ ) -> bool:
143
+ """Check if the message should be handled by the NLU subsystem in coexistence mode.""" # noqa: E501
144
+ if not tracker:
145
+ return False
146
+
147
+ if not tracker.has_coexistence_routing_slot:
148
+ return False
149
+
150
+ value = tracker.get_slot(ROUTE_TO_CALM_SLOT)
151
+ if value is not None:
152
+ return not value
153
+
154
+ # routing slot has been reset so we need to check
155
+ # the command issued by the Router component
156
+ if message.get(COMMANDS):
157
+ for command in message.get(COMMANDS):
158
+ if (
159
+ command.get("command") == SET_SLOT_COMMAND
160
+ and command.get("name") == ROUTE_TO_CALM_SLOT
161
+ ):
162
+ return not command.get("value")
163
+
164
+ return False