rasa-pro 3.12.0rc1__py3-none-any.whl → 3.12.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (70) hide show
  1. README.md +10 -13
  2. rasa/cli/dialogue_understanding_test.py +5 -8
  3. rasa/cli/llm_fine_tuning.py +47 -12
  4. rasa/cli/project_templates/calm/domain/list_contacts.yml +1 -2
  5. rasa/cli/project_templates/calm/domain/remove_contact.yml +1 -2
  6. rasa/cli/project_templates/calm/domain/shared.yml +1 -4
  7. rasa/core/actions/action_handle_digressions.py +35 -13
  8. rasa/core/channels/voice_stream/asr/asr_event.py +5 -0
  9. rasa/core/channels/voice_stream/audiocodes.py +19 -6
  10. rasa/core/channels/voice_stream/call_state.py +3 -9
  11. rasa/core/channels/voice_stream/genesys.py +40 -55
  12. rasa/core/channels/voice_stream/voice_channel.py +61 -39
  13. rasa/core/policies/flows/flow_executor.py +7 -2
  14. rasa/core/processor.py +0 -1
  15. rasa/core/tracker_store.py +123 -34
  16. rasa/dialogue_understanding/commands/can_not_handle_command.py +1 -1
  17. rasa/dialogue_understanding/commands/cancel_flow_command.py +1 -1
  18. rasa/dialogue_understanding/commands/change_flow_command.py +1 -1
  19. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +1 -1
  20. rasa/dialogue_understanding/commands/clarify_command.py +1 -1
  21. rasa/dialogue_understanding/commands/command_syntax_manager.py +1 -1
  22. rasa/dialogue_understanding/commands/handle_digressions_command.py +1 -7
  23. rasa/dialogue_understanding/commands/human_handoff_command.py +1 -1
  24. rasa/dialogue_understanding/commands/knowledge_answer_command.py +1 -1
  25. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +1 -1
  26. rasa/dialogue_understanding/commands/set_slot_command.py +2 -1
  27. rasa/dialogue_understanding/commands/skip_question_command.py +1 -1
  28. rasa/dialogue_understanding/commands/start_flow_command.py +3 -1
  29. rasa/dialogue_understanding/commands/utils.py +2 -32
  30. rasa/dialogue_understanding/generator/command_parser.py +41 -0
  31. rasa/dialogue_understanding/generator/constants.py +7 -2
  32. rasa/dialogue_understanding/generator/llm_based_command_generator.py +9 -2
  33. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +1 -1
  34. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +29 -48
  35. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_fallback_other_models_template.jinja2 +57 -0
  36. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +23 -50
  37. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +141 -27
  38. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +32 -18
  39. rasa/dialogue_understanding/processor/command_processor.py +43 -23
  40. rasa/dialogue_understanding/stack/utils.py +49 -6
  41. rasa/dialogue_understanding_test/du_test_case.py +30 -10
  42. rasa/dialogue_understanding_test/du_test_result.py +1 -1
  43. rasa/e2e_test/assertions.py +6 -8
  44. rasa/e2e_test/llm_judge_prompts/answer_relevance_prompt_template.jinja2 +5 -1
  45. rasa/e2e_test/llm_judge_prompts/groundedness_prompt_template.jinja2 +4 -0
  46. rasa/engine/language.py +67 -25
  47. rasa/llm_fine_tuning/conversations.py +3 -31
  48. rasa/llm_fine_tuning/llm_data_preparation_module.py +5 -3
  49. rasa/llm_fine_tuning/paraphrasing/rephrase_validator.py +18 -13
  50. rasa/llm_fine_tuning/paraphrasing_module.py +6 -2
  51. rasa/llm_fine_tuning/train_test_split_module.py +27 -27
  52. rasa/llm_fine_tuning/utils.py +7 -0
  53. rasa/shared/constants.py +4 -0
  54. rasa/shared/core/domain.py +2 -0
  55. rasa/shared/core/slots.py +6 -0
  56. rasa/shared/providers/_configs/azure_entra_id_config.py +8 -8
  57. rasa/shared/providers/llm/litellm_router_llm_client.py +1 -0
  58. rasa/shared/providers/llm/openai_llm_client.py +2 -2
  59. rasa/shared/providers/router/_base_litellm_router_client.py +38 -7
  60. rasa/shared/utils/llm.py +69 -10
  61. rasa/telemetry.py +13 -3
  62. rasa/tracing/instrumentation/attribute_extractors.py +2 -5
  63. rasa/validator.py +2 -2
  64. rasa/version.py +1 -1
  65. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/METADATA +12 -14
  66. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/RECORD +69 -68
  67. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +0 -68
  68. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/NOTICE +0 -0
  69. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/WHEEL +0 -0
  70. {rasa_pro-3.12.0rc1.dist-info → rasa_pro-3.12.0rc3.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,4 @@
1
+ import copy
1
2
  from typing import Any, Dict, List, Optional, Text
2
3
 
3
4
  import structlog
@@ -19,10 +20,16 @@ from rasa.dialogue_understanding.generator.command_parser import (
19
20
  )
20
21
  from rasa.dialogue_understanding.generator.constants import (
21
22
  COMMAND_PROMPT_FILE_NAME,
22
- DEFAULT_LLM_CONFIG,
23
+ DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
23
24
  FLOW_RETRIEVAL_KEY,
24
25
  LLM_BASED_COMMAND_GENERATOR_CONFIG_FILE,
25
26
  LLM_CONFIG_KEY,
27
+ MODEL_CONFIG_KEY,
28
+ MODEL_NAME_CLAUDE_3_5_SONNET_20240620,
29
+ MODEL_NAME_GPT_4O_2024_11_20,
30
+ OPENAI_PROVIDER,
31
+ PROVIDER_CONFIG_KEY,
32
+ TIMEOUT_CONFIG_KEY,
26
33
  USER_INPUT_CONFIG_KEY,
27
34
  )
28
35
  from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
@@ -36,9 +43,14 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
36
43
  from rasa.engine.storage.resource import Resource
37
44
  from rasa.engine.storage.storage import ModelStorage
38
45
  from rasa.shared.constants import (
46
+ ANTHROPIC_PROVIDER,
47
+ AWS_BEDROCK_PROVIDER,
48
+ AZURE_OPENAI_PROVIDER,
39
49
  EMBEDDINGS_CONFIG_KEY,
50
+ MAX_TOKENS_CONFIG_KEY,
40
51
  PROMPT_TEMPLATE_CONFIG_KEY,
41
52
  ROUTE_TO_CALM_SLOT,
53
+ TEMPERATURE_CONFIG_KEY,
42
54
  )
43
55
  from rasa.shared.core.flows import FlowsList
44
56
  from rasa.shared.core.trackers import DialogueStateTracker
@@ -61,19 +73,92 @@ from rasa.utils.log_utils import log_llm
61
73
  structlogger = structlog.get_logger()
62
74
 
63
75
 
76
+ DEFAULT_LLM_CONFIG = {
77
+ PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
78
+ MODEL_CONFIG_KEY: MODEL_NAME_GPT_4O_2024_11_20,
79
+ TEMPERATURE_CONFIG_KEY: 0.0,
80
+ MAX_TOKENS_CONFIG_KEY: DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
81
+ TIMEOUT_CONFIG_KEY: 7,
82
+ }
83
+
64
84
  MODEL_PROMPT_MAPPER = {
65
- "openai/gpt-4o-2024-11-20": "command_prompt_v2_gpt_4o_2024_11_20_template.jinja2",
66
- "azure/gpt-4o-2024-11-20": "command_prompt_v2_gpt_4o_2024_11_20_template.jinja2",
67
- "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0": (
85
+ f"{OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
86
+ "command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
87
+ ),
88
+ f"{AZURE_OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
89
+ "command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
90
+ ),
91
+ f"{AWS_BEDROCK_PROVIDER}/anthropic.{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}-v1:0": (
68
92
  "command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
69
93
  ),
70
- "anthropic/claude-3-5-sonnet-20240620": (
94
+ f"{ANTHROPIC_PROVIDER}/{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}": (
71
95
  "command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
72
96
  ),
73
97
  }
74
98
 
99
+ # When model is not configured, then we use the default prompt template
100
+ DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
101
+ "command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
102
+ )
103
+ # When the configured model is not found in the model prompt mapper, then we use the
104
+ # fallback prompt template
105
+ FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
106
+ "command_prompt_v2_fallback_other_models_template.jinja2"
107
+ )
108
+
109
+
110
+ class CommandParserValidatorSingleton:
111
+ """Singleton class to validate the command parser.
112
+
113
+ This class is used to validate the command parser. It keeps track of the number of
114
+ consecutive turns where no commands are parsed by the command parser. If the
115
+ number of consecutive turns exceeds a certain threshold, a warning is logged.
116
+ The prompt can use a DSL syntax that can be incompatible with the command syntax
117
+ used by the command parser. This class helps to detect such incompatibilities.
118
+ """
119
+
120
+ MAX_CONSECUTIVE_TURNS_NO_COMMAND_PREDICTED = 5
121
+ _NO_COMMAND_PREDICTED_TURN_COUNTER = 0
122
+ _command_parser_validated = False
123
+
124
+ @classmethod
125
+ def get_no_command_predicted_turn_counter(cls) -> int:
126
+ return cls._NO_COMMAND_PREDICTED_TURN_COUNTER
127
+
128
+ @classmethod
129
+ def should_validate_command_parser(cls) -> bool:
130
+ return not cls._command_parser_validated
75
131
 
76
- DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME = "command_prompt_v2_default.jinja2"
132
+ @classmethod
133
+ def reset_command_parser_validation(cls) -> None:
134
+ cls._NO_COMMAND_PREDICTED_TURN_COUNTER = 0
135
+ cls._command_parser_validated = False
136
+
137
+ @classmethod
138
+ def validate_if_commands_are_parsed_from_llm_response(
139
+ cls, commands: List[Command], llm_response: str
140
+ ) -> None:
141
+ if llm_response and not commands:
142
+ cls._NO_COMMAND_PREDICTED_TURN_COUNTER += 1
143
+ else:
144
+ # Reset the counter if commands are generated, and mark
145
+ # the command parser as validated.
146
+ cls._NO_COMMAND_PREDICTED_TURN_COUNTER = 0
147
+ cls._command_parser_validated = True
148
+ return
149
+
150
+ if (
151
+ cls._NO_COMMAND_PREDICTED_TURN_COUNTER
152
+ >= cls.MAX_CONSECUTIVE_TURNS_NO_COMMAND_PREDICTED
153
+ ):
154
+ structlogger.warning(
155
+ "llm_command_generator.predict_commands.command_parser_not_working",
156
+ event_info=(
157
+ f"No commands were generated by the command parser for the last "
158
+ f"{cls._NO_COMMAND_PREDICTED_TURN_COUNTER} times. Check if you "
159
+ "are running incompatible prompt and LLM command generator."
160
+ ),
161
+ )
77
162
 
78
163
 
79
164
  @DefaultV1Recipe.register(
@@ -101,23 +186,19 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
101
186
  **kwargs,
102
187
  )
103
188
 
104
- # Get the default prompt template based on the model name
105
- default_command_prompt_template = get_default_prompt_template_based_on_model(
106
- config, MODEL_PROMPT_MAPPER, DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME
189
+ # Get the prompt template from the config or the default prompt template.
190
+ self.prompt_template = self.resolve_component_prompt_template(
191
+ self.config, prompt_template
107
192
  )
108
193
 
109
- # Set the prompt template either from the config or the default prompt template.
110
- self.prompt_template = prompt_template or get_prompt_template(
111
- config.get(PROMPT_TEMPLATE_CONFIG_KEY),
112
- default_command_prompt_template,
194
+ # Set the command syntax version to v2
195
+ CommandSyntaxManager.set_syntax_version(
196
+ self.get_component_command_syntax_version()
113
197
  )
114
198
 
115
199
  self.trace_prompt_tokens = self.config.get("trace_prompt_tokens", False)
116
200
  self.repeat_command_enabled = self.is_repeat_command_enabled()
117
201
 
118
- # Set the command syntax version to v2
119
- CommandSyntaxManager.set_syntax_version(CommandSyntaxVersion.v2)
120
-
121
202
  ### Implementations of LLMBasedCommandGenerator parent
122
203
  @staticmethod
123
204
  def get_default_config() -> Dict[str, Any]:
@@ -164,7 +245,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
164
245
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
165
246
  cls.perform_llm_health_check(
166
247
  llm_config,
167
- DEFAULT_LLM_CONFIG,
248
+ cls.get_default_llm_config(),
168
249
  "compact_llm_command_generator.load",
169
250
  cls.__name__,
170
251
  )
@@ -230,7 +311,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
230
311
  structlogger.warning(
231
312
  "llm_command_generator.predict_commands",
232
313
  message="No commands were predicted as the LLM response could "
233
- "not be parsed or the LLM responded with an invalid command."
314
+ "not be parsed or the LLM responded with an invalid command. "
234
315
  "Returning a CannotHandleCommand instead.",
235
316
  )
236
317
  commands = [CannotHandleCommand()]
@@ -306,6 +387,11 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
306
387
 
307
388
  commands = self.parse_commands(action_list, tracker, flows)
308
389
 
390
+ if CommandParserValidatorSingleton.should_validate_command_parser():
391
+ CommandParserValidatorSingleton.validate_if_commands_are_parsed_from_llm_response(
392
+ commands, action_list
393
+ )
394
+
309
395
  self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
310
396
  add_commands_to_message_parse_data(message, self.__class__.__name__, commands)
311
397
  add_prompt_to_message_parse_data(
@@ -350,8 +436,8 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
350
436
  """
351
437
  commands = parse_commands_using_command_parsers(actions, flows)
352
438
  if not commands:
353
- structlogger.debug(
354
- f"{cls.__class__.__name__}.parse_commands",
439
+ structlogger.warning(
440
+ f"{cls.__name__}.parse_commands",
355
441
  message="No commands were parsed from the LLM actions.",
356
442
  actions=actions,
357
443
  )
@@ -441,13 +527,6 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
441
527
  def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
442
528
  """Add a fingerprint for the graph."""
443
529
  # Get the default prompt template based on the model name
444
- default_command_prompt_template = get_default_prompt_template_based_on_model(
445
- config, MODEL_PROMPT_MAPPER, DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME
446
- )
447
- prompt_template = get_prompt_template(
448
- config.get(PROMPT_TEMPLATE_CONFIG_KEY),
449
- default_command_prompt_template,
450
- )
451
530
  llm_config = resolve_model_client_config(
452
531
  config.get(LLM_CONFIG_KEY), CompactLLMCommandGenerator.__name__
453
532
  )
@@ -455,6 +534,41 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
455
534
  config.get(FLOW_RETRIEVAL_KEY, {}).get(EMBEDDINGS_CONFIG_KEY),
456
535
  FlowRetrieval.__name__,
457
536
  )
537
+
538
+ # Create a copy of the config to avoid modifying the original config
539
+ # and update the llm config with the resolved llm config.
540
+ _config_copy = copy.deepcopy(config)
541
+ _config_copy[LLM_CONFIG_KEY] = llm_config
542
+ prompt_template = cls.resolve_component_prompt_template(_config_copy)
543
+
458
544
  return deep_container_fingerprint(
459
545
  [prompt_template, llm_config, embedding_config]
460
546
  )
547
+
548
+ @staticmethod
549
+ def get_default_llm_config() -> Dict[str, Any]:
550
+ """Get the default LLM config for the command generator."""
551
+ return DEFAULT_LLM_CONFIG
552
+
553
+ @staticmethod
554
+ def get_component_command_syntax_version() -> CommandSyntaxVersion:
555
+ return CommandSyntaxVersion.v2
556
+
557
+ @staticmethod
558
+ def resolve_component_prompt_template(
559
+ config: Dict[str, Any], prompt_template: Optional[str] = None
560
+ ) -> Optional[str]:
561
+ """Get the prompt template from the config or the default prompt template."""
562
+ # Get the default prompt template based on the model name.
563
+ default_command_prompt_template = get_default_prompt_template_based_on_model(
564
+ config.get(LLM_CONFIG_KEY, {}) or {},
565
+ MODEL_PROMPT_MAPPER,
566
+ DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
567
+ FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
568
+ )
569
+
570
+ # Return the prompt template either from the config or the default prompt.
571
+ return prompt_template or get_prompt_template(
572
+ config.get(PROMPT_TEMPLATE_CONFIG_KEY),
573
+ default_command_prompt_template,
574
+ )
@@ -8,6 +8,7 @@ from rasa.dialogue_understanding.commands.command_syntax_manager import (
8
8
  CommandSyntaxVersion,
9
9
  )
10
10
  from rasa.dialogue_understanding.generator.constants import (
11
+ DEFAULT_LLM_CONFIG,
11
12
  FLOW_RETRIEVAL_KEY,
12
13
  LLM_CONFIG_KEY,
13
14
  USER_INPUT_CONFIG_KEY,
@@ -71,18 +72,14 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
71
72
  "Please use the config parameter 'prompt_template' instead. "
72
73
  ),
73
74
  )
74
- config_prompt = (
75
- config.get(PROMPT_CONFIG_KEY)
76
- or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
77
- or None
78
- )
79
- self.prompt_template = prompt_template or get_prompt_template(
80
- config_prompt,
81
- DEFAULT_COMMAND_PROMPT_TEMPLATE,
75
+ self.prompt_template = self.resolve_component_prompt_template(
76
+ config, prompt_template
82
77
  )
83
78
 
84
79
  # Set the command syntax version to v1
85
- CommandSyntaxManager.set_syntax_version(CommandSyntaxVersion.v1)
80
+ CommandSyntaxManager.set_syntax_version(
81
+ self.get_component_command_syntax_version()
82
+ )
86
83
 
87
84
  @staticmethod
88
85
  def get_default_config() -> Dict[str, Any]:
@@ -98,15 +95,7 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
98
95
  @classmethod
99
96
  def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
100
97
  """Add a fingerprint for the graph."""
101
- config_prompt = (
102
- config.get(PROMPT_CONFIG_KEY)
103
- or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
104
- or None
105
- )
106
- prompt_template = get_prompt_template(
107
- config_prompt,
108
- DEFAULT_COMMAND_PROMPT_TEMPLATE,
109
- )
98
+ prompt_template = cls.resolve_component_prompt_template(config)
110
99
  llm_config = resolve_model_client_config(
111
100
  config.get(LLM_CONFIG_KEY), SingleStepLLMCommandGenerator.__name__
112
101
  )
@@ -117,3 +106,28 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
117
106
  return deep_container_fingerprint(
118
107
  [prompt_template, llm_config, embedding_config]
119
108
  )
109
+
110
+ @staticmethod
111
+ def get_default_llm_config() -> Dict[str, Any]:
112
+ """Get the default LLM config for the command generator."""
113
+ return DEFAULT_LLM_CONFIG
114
+
115
+ @staticmethod
116
+ def get_component_command_syntax_version() -> CommandSyntaxVersion:
117
+ return CommandSyntaxVersion.v1
118
+
119
+ @staticmethod
120
+ def resolve_component_prompt_template(
121
+ config: Dict[str, Any], prompt_template: Optional[str] = None
122
+ ) -> Optional[str]:
123
+ """Get the prompt template from the config or the default prompt template."""
124
+ # Get the default prompt template based on the model name.
125
+ config_prompt = (
126
+ config.get(PROMPT_CONFIG_KEY)
127
+ or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
128
+ or None
129
+ )
130
+ return prompt_template or get_prompt_template(
131
+ config_prompt,
132
+ DEFAULT_COMMAND_PROMPT_TEMPLATE,
133
+ )
@@ -42,6 +42,7 @@ from rasa.dialogue_understanding.stack.frames import (
42
42
  from rasa.dialogue_understanding.stack.utils import (
43
43
  filled_slots_for_active_flow,
44
44
  top_flow_frame,
45
+ top_user_flow_frame,
45
46
  )
46
47
  from rasa.engine.graph import ExecutionContext
47
48
  from rasa.shared.constants import (
@@ -263,10 +264,10 @@ def execute_commands(
263
264
  events.extend(new_events)
264
265
  tracker.update_with_events(new_events)
265
266
 
266
- new_events = push_stack_frames_to_follow_commands(
267
- tracker, stack_frames_to_follow_commands
268
- )
269
- events.extend(new_events)
267
+ new_events = push_stack_frames_to_follow_commands(
268
+ tracker, stack_frames_to_follow_commands
269
+ )
270
+ events.extend(new_events)
270
271
 
271
272
  return remove_duplicated_set_slots(events)
272
273
 
@@ -430,28 +431,22 @@ def clean_up_commands(
430
431
  command=command,
431
432
  )
432
433
 
433
- elif isinstance(command, StartFlowCommand) and command.flow == active_flow:
434
- # drop a start flow command if the starting flow is equal to the currently
435
- # active flow
436
- structlogger.debug(
437
- "command_processor.clean_up_commands.skip_command_flow_already_active",
438
- command=command,
434
+ elif isinstance(command, StartFlowCommand):
435
+ top_user_frame = top_user_flow_frame(
436
+ tracker.stack, ignore_call_and_link_frames=False
439
437
  )
438
+ top_flow_id = top_user_frame.flow_id if top_user_frame else ""
440
439
 
441
- elif isinstance(command, StartFlowCommand) and active_flow is not None:
442
- # push handle digressions command if we are at a collect step of
443
- # a flow and a new flow is started
444
- collect_info = get_current_collect_step(tracker.stack, all_flows)
445
- current_flow = all_flows.flow_by_id(active_flow)
446
- current_flow_condition = current_flow and (
447
- current_flow.ask_confirm_digressions or current_flow.block_digressions
448
- )
440
+ if top_flow_id == command.flow:
441
+ # drop a start flow command if the starting flow is equal
442
+ # to the currently active flow
443
+ structlogger.debug(
444
+ "command_processor.clean_up_commands.skip_command_flow_already_active",
445
+ command=command,
446
+ )
447
+ continue
449
448
 
450
- if collect_info and (
451
- collect_info.ask_confirm_digressions
452
- or collect_info.block_digressions
453
- or current_flow_condition
454
- ):
449
+ if should_add_handle_digressions_command(tracker, all_flows, top_flow_id):
455
450
  clean_commands.append(HandleDigressionsCommand(flow=command.flow))
456
451
  structlogger.debug(
457
452
  "command_processor.clean_up_commands.push_handle_digressions",
@@ -848,3 +843,28 @@ def filter_cannot_handle_command_for_skipped_slots(
848
843
  and CANNOT_HANDLE_REASON == command.reason
849
844
  )
850
845
  ]
846
+
847
+
848
+ def should_add_handle_digressions_command(
849
+ tracker: DialogueStateTracker, all_flows: FlowsList, top_flow_id: str
850
+ ) -> bool:
851
+ """Check if a handle digressions command should be added to the commands.
852
+
853
+ The command should replace a StartFlow command only if we are at a collect step of
854
+ a flow and a new flow is predicted by the command generator to start.
855
+ """
856
+ current_flow = all_flows.flow_by_id(top_flow_id)
857
+ current_flow_condition = current_flow and (
858
+ current_flow.ask_confirm_digressions or current_flow.block_digressions
859
+ )
860
+
861
+ collect_info = get_current_collect_step(tracker.stack, all_flows)
862
+
863
+ if collect_info and (
864
+ collect_info.ask_confirm_digressions
865
+ or collect_info.block_digressions
866
+ or current_flow_condition
867
+ ):
868
+ return True
869
+
870
+ return False
@@ -4,6 +4,9 @@ from typing import List, Optional, Set, Tuple
4
4
  from rasa.dialogue_understanding.patterns.collect_information import (
5
5
  CollectInformationPatternFlowStackFrame,
6
6
  )
7
+ from rasa.dialogue_understanding.patterns.continue_interrupted import (
8
+ ContinueInterruptedPatternFlowStackFrame,
9
+ )
7
10
  from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
8
11
  from rasa.dialogue_understanding.stack.frames import (
9
12
  BaseFlowStackFrame,
@@ -60,7 +63,9 @@ def top_flow_frame(
60
63
  return None
61
64
 
62
65
 
63
- def top_user_flow_frame(dialogue_stack: DialogueStack) -> Optional[UserFlowStackFrame]:
66
+ def top_user_flow_frame(
67
+ dialogue_stack: DialogueStack, ignore_call_and_link_frames: bool = True
68
+ ) -> Optional[UserFlowStackFrame]:
64
69
  """Returns the topmost user flow frame from the tracker.
65
70
 
66
71
  User flows are flows that are created by developers of an assistant and
@@ -72,16 +77,19 @@ def top_user_flow_frame(dialogue_stack: DialogueStack) -> Optional[UserFlowStack
72
77
 
73
78
  Args:
74
79
  dialogue_stack: The dialogue stack to use.
80
+ ignore_call_and_link_frames: Whether to ignore user frames of type `call`
81
+ and `link`. By default, these frames are ignored.
75
82
 
76
83
  Returns:
77
84
  The topmost user flow frame from the tracker.
78
85
  """
79
86
  for frame in reversed(dialogue_stack.frames):
80
- if (
81
- isinstance(frame, UserFlowStackFrame)
82
- and frame.frame_type != FlowStackFrameType.CALL
83
- and frame.frame_type != FlowStackFrameType.LINK
84
- ):
87
+ if isinstance(frame, UserFlowStackFrame):
88
+ if ignore_call_and_link_frames and (
89
+ frame.frame_type == FlowStackFrameType.CALL
90
+ or frame.frame_type == FlowStackFrameType.LINK
91
+ ):
92
+ continue
85
93
  return frame
86
94
  return None
87
95
 
@@ -213,3 +221,38 @@ def get_collect_steps_excluding_ask_before_filling_for_active_flow(
213
221
  for step in active_flow.get_collect_steps()
214
222
  if not step.ask_before_filling
215
223
  )
224
+
225
+
226
+ def remove_digression_from_stack(stack: DialogueStack, flow_id: str) -> DialogueStack:
227
+ """Remove a specific flow frame from the stack and other frames that reference it.
228
+
229
+ The main use-case is to prevent duplicate digressions from being added to the stack.
230
+
231
+ Args:
232
+ stack: The dialogue stack.
233
+ flow_id: The flow to remove.
234
+
235
+ Returns:
236
+ The updated dialogue stack.
237
+ """
238
+ updated_stack = stack.copy()
239
+ original_frames = updated_stack.frames[:]
240
+ found_digression_index = -1
241
+ for index, frame in enumerate(original_frames):
242
+ if isinstance(frame, BaseFlowStackFrame) and frame.flow_id == flow_id:
243
+ updated_stack.frames.pop(index)
244
+ found_digression_index = index
245
+
246
+ # we also need to remove the `ContinueInterruptedPatternFlowStackFrame`
247
+ elif (
248
+ isinstance(frame, ContinueInterruptedPatternFlowStackFrame)
249
+ and frame.previous_flow_name == flow_id
250
+ and found_digression_index + 1 == index
251
+ ):
252
+ # we know that this frame is always added after the digressing flow frame
253
+ # that was blocked previously by action_block_digressions,
254
+ # so this check would occur after the digressing flow was popped.
255
+ # Therefore, we need to update the index dynamically before popping.
256
+ updated_stack.frames.pop(index - 1)
257
+
258
+ return updated_stack
@@ -85,15 +85,33 @@ class DialogueUnderstandingOutput(BaseModel):
85
85
  for command in predicted_commands
86
86
  ]
87
87
 
88
- def get_component_names_that_predicted_commands(self) -> List[str]:
89
- """Get all component names that have predicted commands."""
90
- if self.commands is None:
91
- return []
92
- return [
93
- component_name
94
- for component_name, predicted_commands in self.commands.items()
95
- if predicted_commands
96
- ]
88
+ def get_component_names_that_predicted_commands_or_have_llm_response(
89
+ self,
90
+ ) -> List[str]:
91
+ """Get all component names that have predicted commands or recieved
92
+ non-empty response from LLM.
93
+ """
94
+ component_names_that_predicted_commands = (
95
+ [
96
+ component_name
97
+ for component_name, predicted_commands in self.commands.items()
98
+ if predicted_commands
99
+ ]
100
+ if self.commands
101
+ else []
102
+ )
103
+ components_with_prompts = (
104
+ [
105
+ str(prompt.get(KEY_COMPONENT_NAME, None))
106
+ for prompt in self.prompts
107
+ if prompt.get(KEY_LLM_RESPONSE_METADATA, None)
108
+ ]
109
+ if self.prompts
110
+ else []
111
+ )
112
+ return list(
113
+ set(component_names_that_predicted_commands + components_with_prompts)
114
+ )
97
115
 
98
116
  def get_component_name_to_prompt_info(self) -> Dict[str, List[Dict[str, Any]]]:
99
117
  """Return a dictionary of component names to prompt information.
@@ -120,7 +138,9 @@ class DialogueUnderstandingOutput(BaseModel):
120
138
  return {}
121
139
 
122
140
  data: Dict[str, List[Dict[str, Any]]] = {}
123
- relevant_component_names = self.get_component_names_that_predicted_commands()
141
+ relevant_component_names = (
142
+ self.get_component_names_that_predicted_commands_or_have_llm_response()
143
+ )
124
144
 
125
145
  for prompt_data in self.prompts:
126
146
  component_name = prompt_data[KEY_COMPONENT_NAME]
@@ -84,7 +84,7 @@ class FailedTestStep(BaseModel):
84
84
 
85
85
  if step.dialogue_understanding_output:
86
86
  predicted_commands = step.dialogue_understanding_output.commands
87
- command_generators = step.dialogue_understanding_output.get_component_names_that_predicted_commands() # noqa: E501
87
+ command_generators = step.dialogue_understanding_output.get_component_names_that_predicted_commands_or_have_llm_response() # noqa: E501
88
88
  prompts = (
89
89
  step.dialogue_understanding_output.get_component_name_to_prompt_info()
90
90
  )
@@ -972,7 +972,7 @@ class GenerativeResponseMixin(Assertion):
972
972
  data.pop("metric_adjective")
973
973
  return data
974
974
 
975
- def _render_prompt(self, step_text: str, matching_event: BotUttered) -> str:
975
+ def _render_prompt(self, matching_event: BotUttered) -> str:
976
976
  raise NotImplementedError
977
977
 
978
978
  def _get_processed_output(self, parsed_llm_output: Dict[str, Any]) -> List[Any]:
@@ -999,7 +999,7 @@ class GenerativeResponseMixin(Assertion):
999
999
  ) -> Tuple[Optional[AssertionFailure], Optional[Event]]:
1000
1000
  """Run the LLM evaluation on the given event."""
1001
1001
  bot_message = matching_event.text
1002
- prompt = self._render_prompt(step_text, matching_event)
1002
+ prompt = self._render_prompt(matching_event)
1003
1003
  llm_response = self._invoke_llm(llm_judge_config, prompt)
1004
1004
 
1005
1005
  try:
@@ -1160,9 +1160,9 @@ class GenerativeResponseIsRelevantAssertion(GenerativeResponseMixin):
1160
1160
  def type(cls) -> str:
1161
1161
  return AssertionType.GENERATIVE_RESPONSE_IS_RELEVANT.value
1162
1162
 
1163
- def _render_prompt(self, step_text: str, matching_event: BotUttered) -> str:
1163
+ def _render_prompt(self, matching_event: BotUttered) -> str:
1164
1164
  """Render the prompt."""
1165
- inputs = _get_prompt_inputs(self.type(), step_text, matching_event)
1165
+ inputs = _get_prompt_inputs(self.type(), matching_event)
1166
1166
  prompt_template = _get_default_prompt_template(
1167
1167
  DEFAULT_ANSWER_RELEVANCE_PROMPT_TEMPLATE_FILE_NAME
1168
1168
  )
@@ -1206,11 +1206,10 @@ class GenerativeResponseIsGroundedAssertion(GenerativeResponseMixin):
1206
1206
  def type(cls) -> str:
1207
1207
  return AssertionType.GENERATIVE_RESPONSE_IS_GROUNDED.value
1208
1208
 
1209
- def _render_prompt(self, step_text: str, matching_event: BotUttered) -> str:
1209
+ def _render_prompt(self, matching_event: BotUttered) -> str:
1210
1210
  """Render the prompt."""
1211
1211
  inputs = _get_prompt_inputs(
1212
1212
  assertion_type=self.type(),
1213
- step_text=step_text,
1214
1213
  matching_event=matching_event,
1215
1214
  ground_truth=self.ground_truth,
1216
1215
  )
@@ -1336,12 +1335,11 @@ def _get_default_prompt_template(default_prompt_template_file_name: str) -> str:
1336
1335
 
1337
1336
  def _get_prompt_inputs(
1338
1337
  assertion_type: str,
1339
- step_text: str,
1340
1338
  matching_event: BotUttered,
1341
1339
  ground_truth: Optional[str] = None,
1342
1340
  ) -> Dict[str, Any]:
1343
1341
  if assertion_type == AssertionType.GENERATIVE_RESPONSE_IS_RELEVANT.value:
1344
- return {"num_variations": "3", "user_message": step_text}
1342
+ return {"num_variations": "3", "bot_message": matching_event.text}
1345
1343
  elif assertion_type == AssertionType.GENERATIVE_RESPONSE_IS_GROUNDED.value:
1346
1344
  ground_truth_event_metadata = matching_event.metadata.get(
1347
1345
  SEARCH_RESULTS_METADATA_KEY, ""
@@ -84,6 +84,10 @@ These are a few examples of how to generate question variations and identify non
84
84
 
85
85
  ## Task Inputs
86
86
 
87
- - **Input Answer**: {{ user_message }}
87
+ - **Input Answer**: {{ bot_message }}
88
+
89
+ ## Task Outputs
90
+
91
+ Do not include any additional explanations in your output. Only provide the JSON object as described in the task steps.
88
92
 
89
93
  Your output:
@@ -162,4 +162,8 @@ These are a few examples of how to evaluate the correctness of the answer based
162
162
  - **Input Answer**: {{ bot_message }}
163
163
  - **Ground Truth**: {{ ground_truth }}
164
164
 
165
+ ## Task Outputs
166
+
167
+ Do not include any additional explanations in your output. Only provide the JSON object as described in the task steps.
168
+
165
169
  Your output: