rasa-pro 3.12.18.dev1__py3-none-any.whl → 3.12.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (37) hide show
  1. rasa/__init__.py +0 -6
  2. rasa/core/actions/action.py +1 -4
  3. rasa/core/policies/intentless_policy.py +1 -3
  4. rasa/core/processor.py +50 -5
  5. rasa/core/utils.py +11 -2
  6. rasa/dialogue_understanding/coexistence/llm_based_router.py +1 -0
  7. rasa/dialogue_understanding/commands/__init__.py +4 -0
  8. rasa/dialogue_understanding/commands/cancel_flow_command.py +3 -1
  9. rasa/dialogue_understanding/commands/set_slot_command.py +6 -0
  10. rasa/dialogue_understanding/commands/utils.py +26 -2
  11. rasa/dialogue_understanding/generator/command_generator.py +11 -1
  12. rasa/dialogue_understanding/generator/llm_based_command_generator.py +4 -15
  13. rasa/dialogue_understanding/generator/llm_command_generator.py +1 -3
  14. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +4 -44
  15. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +1 -14
  16. rasa/dialogue_understanding/processor/command_processor.py +5 -5
  17. rasa/dialogue_understanding/stack/frames/flow_stack_frame.py +17 -4
  18. rasa/dialogue_understanding/stack/utils.py +3 -1
  19. rasa/dialogue_understanding/utils.py +68 -12
  20. rasa/dialogue_understanding_test/du_test_schema.yml +3 -3
  21. rasa/e2e_test/e2e_test_schema.yml +3 -3
  22. rasa/hooks.py +0 -55
  23. rasa/llm_fine_tuning/utils.py +2 -4
  24. rasa/shared/constants.py +0 -5
  25. rasa/shared/providers/constants.py +0 -9
  26. rasa/shared/providers/llm/_base_litellm_client.py +4 -14
  27. rasa/shared/providers/llm/litellm_router_llm_client.py +7 -17
  28. rasa/shared/providers/llm/llm_client.py +15 -24
  29. rasa/shared/providers/llm/self_hosted_llm_client.py +2 -10
  30. rasa/tracing/instrumentation/attribute_extractors.py +2 -2
  31. rasa/version.py +1 -1
  32. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.19.dist-info}/METADATA +1 -2
  33. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.19.dist-info}/RECORD +36 -37
  34. rasa/monkey_patches.py +0 -91
  35. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.19.dist-info}/NOTICE +0 -0
  36. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.19.dist-info}/WHEEL +0 -0
  37. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.19.dist-info}/entry_points.txt +0 -0
rasa/__init__.py CHANGED
@@ -5,11 +5,5 @@ from rasa import version
5
5
  # define the version before the other imports since these need it
6
6
  __version__ = version.__version__
7
7
 
8
- from litellm.integrations.langfuse.langfuse import LangFuseLogger
9
-
10
- from rasa.monkey_patches import litellm_langfuse_logger_init_fixed
11
-
12
- # Monkey-patch the init method as early as possible before the class is used
13
- LangFuseLogger.__init__ = litellm_langfuse_logger_init_fixed # type: ignore
14
8
 
15
9
  logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -1137,15 +1137,12 @@ class ActionSendText(Action):
1137
1137
  tracker: "DialogueStateTracker",
1138
1138
  domain: "Domain",
1139
1139
  metadata: Optional[Dict[Text, Any]] = None,
1140
- create_bot_uttered_event: bool = True,
1141
1140
  ) -> List[Event]:
1142
1141
  """Runs action. Please see parent class for the full docstring."""
1143
1142
  fallback = {"text": ""}
1144
1143
  metadata_copy = copy.deepcopy(metadata) if metadata else {}
1145
1144
  message = metadata_copy.get("message", fallback)
1146
- if create_bot_uttered_event:
1147
- return [create_bot_utterance(message)]
1148
- return []
1145
+ return [create_bot_utterance(message)]
1149
1146
 
1150
1147
 
1151
1148
  class ActionExtractSlots(Action):
@@ -721,9 +721,7 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
721
721
  final_response_examples.append(resp)
722
722
 
723
723
  llm_response = await self.generate_answer(
724
- final_response_examples,
725
- conversation_samples,
726
- history,
724
+ final_response_examples, conversation_samples, history
727
725
  )
728
726
  if not llm_response:
729
727
  structlogger.debug("intentless_policy.prediction.skip_llm_fail")
rasa/core/processor.py CHANGED
@@ -34,6 +34,9 @@ from rasa.dialogue_understanding.commands import (
34
34
  CannotHandleCommand,
35
35
  Command,
36
36
  NoopCommand,
37
+ RestartCommand,
38
+ SessionEndCommand,
39
+ SessionStartCommand,
37
40
  SetSlotCommand,
38
41
  )
39
42
  from rasa.dialogue_understanding.commands.utils import (
@@ -880,19 +883,61 @@ class MessageProcessor:
880
883
  tracker.has_coexistence_routing_slot
881
884
  and tracker.get_slot(ROUTE_TO_CALM_SLOT) is None
882
885
  ):
883
- # if we are currently not routing to either CALM or dm1
884
- # we make a sticky routing to CALM if there are any commands
885
- # from the trigger intent parsing
886
- # or a sticky routing to dm1 if there are no commands
886
+ # If we are currently not routing to either CALM or DM1:
887
+ # - Sticky route to CALM if there are any commands
888
+ # from the trigger intent parsing
889
+ # - Sticky route to DM1 if there are no commands present
890
+ route_to_calm_slot_value = self._determine_route_to_calm_slot_value(
891
+ nlu_adapted_commands
892
+ )
887
893
  commands += [
888
894
  SetSlotCommand(
889
- ROUTE_TO_CALM_SLOT, len(nlu_adapted_commands) > 0
895
+ ROUTE_TO_CALM_SLOT, route_to_calm_slot_value
890
896
  ).as_dict()
891
897
  ]
892
898
 
893
899
  parse_data[COMMANDS] = commands
894
900
  return parse_data
895
901
 
902
+ def _determine_route_to_calm_slot_value(
903
+ self, nlu_adapted_commands: List[Dict[str, Any]]
904
+ ) -> Optional[bool]:
905
+ """Determines what value should be assigned to `ROUTE_TO_CALM_SLOT`.
906
+
907
+ Returns:
908
+ - True: If any command other than:
909
+ - SessionStartCommand
910
+ - SessionEndCommand
911
+ - RestartCommand
912
+ is present.
913
+ - None: If only ignored system commands are present.
914
+ - False If no commands at all.
915
+ """
916
+ system_commands_to_ignore = [
917
+ SessionStartCommand.command(),
918
+ SessionEndCommand.command(),
919
+ RestartCommand.command(),
920
+ ]
921
+
922
+ # Exclude the system commands, as it doesn't originate from the user's
923
+ # input intent and shouldn't influence the decision for setting
924
+ # ROUTE_TO_CALM_SLOT.
925
+ intent_triggered_commands = [
926
+ command
927
+ for command in nlu_adapted_commands
928
+ if command.get("command") not in system_commands_to_ignore
929
+ ]
930
+
931
+ if len(intent_triggered_commands) > 0:
932
+ # There are commands other than system commands present - route to CALM
933
+ return True
934
+ elif len(nlu_adapted_commands) > 0:
935
+ # Only system command is present — defer routing decision
936
+ return None
937
+ else:
938
+ # No commands at all — route to DM1
939
+ return False
940
+
896
941
  def _update_full_retrieval_intent(self, parse_data: Dict[Text, Any]) -> None:
897
942
  """Update the parse data with the full retrieval intent.
898
943
 
rasa/core/utils.py CHANGED
@@ -389,16 +389,25 @@ def should_force_slot_filling(
389
389
  and the name of the slot if applicable.
390
390
  """
391
391
  from rasa.dialogue_understanding.processor.command_processor import (
392
+ find_updated_flows,
392
393
  get_current_collect_step,
393
394
  )
394
395
 
395
396
  if tracker is None:
396
- structlogger.error(
397
- "slot.force_slot_filling.error",
397
+ structlogger.debug(
398
+ "slot.force_slot_filling.no_found_tracker",
398
399
  event_info="Tracker is None. Cannot force slot filling.",
399
400
  )
400
401
  return False, None
401
402
 
403
+ updated_flows = find_updated_flows(tracker, flows)
404
+ if updated_flows:
405
+ structlogger.debug(
406
+ "slot.force_slot_filling.running_flows_were_updated",
407
+ updated_flow_ids=updated_flows,
408
+ )
409
+ return False, None
410
+
402
411
  stack = tracker.stack
403
412
  step = get_current_collect_step(stack, flows)
404
413
  if step is None or not step.force_slot_filling:
@@ -171,6 +171,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
171
171
  **kwargs: Any,
172
172
  ) -> "LLMBasedRouter":
173
173
  """Loads trained component (see parent class for full docstring)."""
174
+
174
175
  # Perform health check on the resolved LLM client config
175
176
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
176
177
  cls.perform_llm_health_check(
@@ -16,6 +16,9 @@ from rasa.dialogue_understanding.commands.error_command import ErrorCommand
16
16
  from rasa.dialogue_understanding.commands.free_form_answer_command import (
17
17
  FreeFormAnswerCommand,
18
18
  )
19
+ from rasa.dialogue_understanding.commands.handle_code_change_command import (
20
+ HandleCodeChangeCommand,
21
+ )
19
22
  from rasa.dialogue_understanding.commands.human_handoff_command import (
20
23
  HumanHandoffCommand,
21
24
  )
@@ -49,6 +52,7 @@ __all__ = [
49
52
  "SetSlotCommand",
50
53
  "StartFlowCommand",
51
54
  "HumanHandoffCommand",
55
+ "HandleCodeChangeCommand",
52
56
  "CorrectSlotsCommand",
53
57
  "CorrectedSlot",
54
58
  "ErrorCommand",
@@ -95,7 +95,9 @@ class CancelFlowCommand(Command):
95
95
  original_stack = original_tracker.stack
96
96
 
97
97
  applied_events: List[Event] = []
98
- user_frame = top_user_flow_frame(original_stack)
98
+ user_frame = top_user_flow_frame(
99
+ original_stack, ignore_call_and_link_frames=False
100
+ )
99
101
  current_flow = user_frame.flow(all_flows) if user_frame else None
100
102
 
101
103
  if not current_flow:
@@ -13,6 +13,7 @@ from rasa.dialogue_understanding.commands.command_syntax_manager import (
13
13
  )
14
14
  from rasa.dialogue_understanding.commands.utils import (
15
15
  clean_extracted_value,
16
+ find_default_flows_collecting_slot,
16
17
  get_nullable_slot_value,
17
18
  )
18
19
  from rasa.dialogue_understanding.patterns.collect_information import (
@@ -136,6 +137,11 @@ class SetSlotCommand(Command):
136
137
  ):
137
138
  # Get the other predicted flows from the most recent message on the tracker.
138
139
  predicted_flows = get_flows_predicted_to_start_from_tracker(tracker)
140
+ if not predicted_flows:
141
+ # If no predicted flows, check for default flows collecting the slot.
142
+ predicted_flows = find_default_flows_collecting_slot(
143
+ self.name, all_flows
144
+ )
139
145
  use_slot_fill = any(
140
146
  step.collect == self.name and not step.ask_before_filling
141
147
  for flow in all_flows.underlying_flows
@@ -7,18 +7,18 @@ from rasa.dialogue_understanding.patterns.validate_slot import (
7
7
  )
8
8
  from rasa.shared.constants import ACTION_ASK_PREFIX, UTTER_ASK_PREFIX
9
9
  from rasa.shared.core.events import Event, SlotSet
10
+ from rasa.shared.core.flows import FlowsList
10
11
  from rasa.shared.core.slots import Slot
11
12
  from rasa.shared.core.trackers import DialogueStateTracker
12
13
 
13
14
  if TYPE_CHECKING:
14
15
  from rasa.dialogue_understanding.commands import StartFlowCommand
15
- from rasa.shared.core.flows import FlowsList
16
16
 
17
17
  structlogger = structlog.get_logger()
18
18
 
19
19
 
20
20
  def start_flow_by_name(
21
- flow_name: str, flows: "FlowsList"
21
+ flow_name: str, flows: FlowsList
22
22
  ) -> Optional["StartFlowCommand"]:
23
23
  from rasa.dialogue_understanding.commands import StartFlowCommand
24
24
 
@@ -126,3 +126,27 @@ def create_validate_frames_from_slot_set_events(
126
126
  validate_frames.append(frame)
127
127
 
128
128
  return tracker, validate_frames
129
+
130
+
131
+ def find_default_flows_collecting_slot(
132
+ slot_name: str, all_flows: FlowsList
133
+ ) -> List[str]:
134
+ """Find default flows that have collect steps matching the specified slot name.
135
+
136
+ Args:
137
+ slot_name: The name of the slot to search for.
138
+ all_flows: All flows in the assistant.
139
+
140
+ Returns:
141
+ List of flow IDs for default flows that collect the specified slot
142
+ without asking before filling.
143
+ """
144
+ return [
145
+ flow.id
146
+ for flow in all_flows.underlying_flows
147
+ if flow.is_rasa_default_flow
148
+ and any(
149
+ step.collect == slot_name and not step.ask_before_filling
150
+ for step in flow.get_collect_steps()
151
+ )
152
+ ]
@@ -8,6 +8,7 @@ from rasa.dialogue_understanding.commands import (
8
8
  Command,
9
9
  CorrectSlotsCommand,
10
10
  ErrorCommand,
11
+ HandleCodeChangeCommand,
11
12
  SetSlotCommand,
12
13
  StartFlowCommand,
13
14
  )
@@ -396,15 +397,24 @@ class CommandGenerator:
396
397
  The filtered commands.
397
398
  """
398
399
  from rasa.dialogue_understanding.processor.command_processor import (
400
+ find_updated_flows,
399
401
  get_current_collect_step,
400
402
  )
401
403
 
402
404
  if tracker is None:
403
- structlogger.error(
405
+ structlogger.debug(
404
406
  "command_generator.filter_commands_during_force_slot_filling.tracker_not_found",
405
407
  )
406
408
  return commands
407
409
 
410
+ updated_flows = find_updated_flows(tracker, available_flows)
411
+ if updated_flows:
412
+ structlogger.debug(
413
+ "command_generator.filter_commands_during_force_slot_filling.running_flows_were_updated",
414
+ updated_flow_ids=updated_flows,
415
+ )
416
+ return [HandleCodeChangeCommand()]
417
+
408
418
  stack = tracker.stack
409
419
  step = get_current_collect_step(stack, available_flows)
410
420
 
@@ -1,8 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
- from asyncio import Lock
3
2
  from functools import lru_cache
4
3
  from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
5
- from uuid import UUID, uuid4
6
4
 
7
5
  import structlog
8
6
  from jinja2 import Environment, Template, select_autoescape
@@ -91,9 +89,6 @@ class LLMBasedCommandGenerator(
91
89
  else:
92
90
  self.flow_retrieval = None
93
91
 
94
- self.sender_id_to_session_id_mapping: Dict[str, UUID] = {}
95
- self._lock = Lock()
96
-
97
92
  ### Abstract methods
98
93
  @staticmethod
99
94
  @abstractmethod
@@ -230,7 +225,8 @@ class LLMBasedCommandGenerator(
230
225
 
231
226
  @lru_cache
232
227
  def compile_template(self, template: str) -> Template:
233
- """Compile the prompt template and register custom filters.
228
+ """
229
+ Compile the prompt template and register custom filters.
234
230
  Compiling the template is an expensive operation,
235
231
  so we cache the result.
236
232
  """
@@ -332,9 +328,7 @@ class LLMBasedCommandGenerator(
332
328
 
333
329
  @measure_llm_latency
334
330
  async def invoke_llm(
335
- self,
336
- prompt: Union[List[dict], List[str], str],
337
- metadata: Optional[Dict[str, Any]] = None,
331
+ self, prompt: Union[List[dict], List[str], str]
338
332
  ) -> Optional[LLMResponse]:
339
333
  """Use LLM to generate a response.
340
334
 
@@ -347,7 +341,6 @@ class LLMBasedCommandGenerator(
347
341
  - a list of messages. Each message is a string and will be formatted
348
342
  as a user message.
349
343
  - a single message as a string which will be formatted as user message.
350
- metadata: Optional metadata to be passed to the LLM call.
351
344
 
352
345
  Returns:
353
346
  An LLMResponse object.
@@ -359,7 +352,7 @@ class LLMBasedCommandGenerator(
359
352
  self.config.get(LLM_CONFIG_KEY), self.get_default_llm_config()
360
353
  )
361
354
  try:
362
- return await llm.acompletion(prompt, metadata)
355
+ return await llm.acompletion(prompt)
363
356
  except Exception as e:
364
357
  # unfortunately, langchain does not wrap LLM exceptions which means
365
358
  # we have to catch all exceptions here
@@ -662,7 +655,3 @@ class LLMBasedCommandGenerator(
662
655
  def get_default_llm_config() -> Dict[str, Any]:
663
656
  """Get the default LLM config for the command generator."""
664
657
  return DEFAULT_LLM_CONFIG
665
-
666
- async def _get_or_create_session_id(self, sender_id: str) -> UUID:
667
- async with self._lock:
668
- return self.sender_id_to_session_id_mapping.setdefault(sender_id, uuid4())
@@ -55,9 +55,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
55
55
  )
56
56
 
57
57
  async def invoke_llm(
58
- self,
59
- prompt: Union[List[dict], List[str], str],
60
- metadata: Optional[Dict[str, Any]] = None,
58
+ self, prompt: Union[List[dict], List[str], str]
61
59
  ) -> Optional[LLMResponse]:
62
60
  try:
63
61
  return await super().invoke_llm(prompt)
@@ -42,9 +42,6 @@ from rasa.engine.storage.resource import Resource
42
42
  from rasa.engine.storage.storage import ModelStorage
43
43
  from rasa.shared.constants import (
44
44
  EMBEDDINGS_CONFIG_KEY,
45
- LANGFUSE_CUSTOM_METADATA_DICT,
46
- LANGFUSE_METADATA_SESSION_ID,
47
- LANGFUSE_TAGS,
48
45
  RASA_PATTERN_CANNOT_HANDLE_NOT_SUPPORTED,
49
46
  ROUTE_TO_CALM_SLOT,
50
47
  )
@@ -110,7 +107,7 @@ structlogger = structlog.get_logger()
110
107
  )
111
108
  @deprecated(
112
109
  reason=(
113
- "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
110
+ "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
114
111
  "Rasa `4.0.0`."
115
112
  )
116
113
  )
@@ -495,20 +492,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
495
492
  prompt=prompt,
496
493
  )
497
494
 
498
- if tracker:
499
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
500
- else:
501
- session_id = "unknown"
502
- metadata = {
503
- LANGFUSE_METADATA_SESSION_ID: session_id,
504
- LANGFUSE_CUSTOM_METADATA_DICT: {
505
- "component": self.__class__.__name__,
506
- "function": "_predict_commands_for_active_flow",
507
- },
508
- LANGFUSE_TAGS: [self.__class__.__name__],
509
- }
510
-
511
- response = await self.invoke_llm(prompt, metadata)
495
+ response = await self.invoke_llm(prompt)
512
496
  llm_response = LLMResponse.ensure_llm_response(response)
513
497
  actions = None
514
498
  if llm_response and llm_response.choices:
@@ -562,20 +546,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
562
546
  ".prompt_rendered",
563
547
  prompt=prompt,
564
548
  )
565
- if tracker:
566
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
567
- else:
568
- session_id = "unknown"
569
- metadata = {
570
- LANGFUSE_METADATA_SESSION_ID: session_id,
571
- LANGFUSE_CUSTOM_METADATA_DICT: {
572
- "component": self.__class__.__name__,
573
- "function": "_predict_commands_for_handling_flows",
574
- },
575
- LANGFUSE_TAGS: [self.__class__.__name__],
576
- }
577
549
 
578
- response = await self.invoke_llm(prompt, metadata)
550
+ response = await self.invoke_llm(prompt)
579
551
  llm_response = LLMResponse.ensure_llm_response(response)
580
552
  actions = None
581
553
  if llm_response and llm_response.choices:
@@ -664,20 +636,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
664
636
  flow=newly_started_flow.id,
665
637
  prompt=prompt,
666
638
  )
667
- if tracker:
668
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
669
- else:
670
- session_id = "unknown"
671
- metadata = {
672
- LANGFUSE_METADATA_SESSION_ID: session_id,
673
- LANGFUSE_CUSTOM_METADATA_DICT: {
674
- "component": self.__class__.__name__,
675
- "function": "_predict_commands_for_newly_started_flow",
676
- },
677
- LANGFUSE_TAGS: [self.__class__.__name__],
678
- }
679
639
 
680
- response = await self.invoke_llm(prompt, metadata)
640
+ response = await self.invoke_llm(prompt)
681
641
  llm_response = LLMResponse.ensure_llm_response(response)
682
642
  actions = None
683
643
  if llm_response and llm_response.choices:
@@ -47,9 +47,6 @@ from rasa.shared.constants import (
47
47
  AWS_BEDROCK_PROVIDER,
48
48
  AZURE_OPENAI_PROVIDER,
49
49
  EMBEDDINGS_CONFIG_KEY,
50
- LANGFUSE_CUSTOM_METADATA_DICT,
51
- LANGFUSE_METADATA_SESSION_ID,
52
- LANGFUSE_TAGS,
53
50
  MAX_COMPLETION_TOKENS_CONFIG_KEY,
54
51
  PROMPT_TEMPLATE_CONFIG_KEY,
55
52
  ROUTE_TO_CALM_SLOT,
@@ -369,17 +366,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
369
366
  prompt=flow_prompt,
370
367
  )
371
368
 
372
- if tracker:
373
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
374
- else:
375
- session_id = "unknown"
376
- metadata = {
377
- LANGFUSE_METADATA_SESSION_ID: session_id,
378
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
379
- LANGFUSE_TAGS: [self.__class__.__name__],
380
- }
381
-
382
- response = await self.invoke_llm(flow_prompt, metadata)
369
+ response = await self.invoke_llm(flow_prompt)
383
370
  llm_response = LLMResponse.ensure_llm_response(response)
384
371
  # The check for 'None' maintains compatibility with older versions
385
372
  # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
@@ -214,18 +214,18 @@ def execute_commands(
214
214
  commands: List[Command] = get_commands_from_tracker(tracker)
215
215
  original_tracker = tracker.copy()
216
216
 
217
- commands = clean_up_commands(
218
- commands, tracker, all_flows, execution_context, story_graph, domain
219
- )
220
-
221
217
  updated_flows = find_updated_flows(tracker, all_flows)
222
218
  if updated_flows:
223
- # Override commands
219
+ # if there are updated flows, we need to handle the code change
224
220
  structlogger.debug(
225
221
  "command_processor.execute_commands.running_flows_were_updated",
226
222
  updated_flow_ids=updated_flows,
227
223
  )
228
224
  commands = [HandleCodeChangeCommand()]
225
+ else:
226
+ commands = clean_up_commands(
227
+ commands, tracker, all_flows, execution_context, story_graph, domain
228
+ )
229
229
 
230
230
  # store current flow hashes if they changed
231
231
  new_hashes = calculate_flow_fingerprints(all_flows)
@@ -53,7 +53,8 @@ class FlowStackFrameType(str, Enum):
53
53
  typ: The string to create the `FlowStackFrameType` from.
54
54
 
55
55
  Returns:
56
- The created `FlowStackFrameType`."""
56
+ The created `FlowStackFrameType`.
57
+ """
57
58
  if typ is None:
58
59
  return FlowStackFrameType.REGULAR
59
60
  elif typ == FlowStackFrameType.INTERRUPT.value:
@@ -107,7 +108,8 @@ class BaseFlowStackFrame(DialogueStackFrame):
107
108
  all_flows: All flows in the assistant.
108
109
 
109
110
  Returns:
110
- The current flow."""
111
+ The current flow.
112
+ """
111
113
  flow = all_flows.flow_by_id(self.flow_id)
112
114
  if not flow:
113
115
  # we shouldn't ever end up with a frame that belongs to a non
@@ -122,9 +124,20 @@ class BaseFlowStackFrame(DialogueStackFrame):
122
124
  all_flows: All flows in the assistant.
123
125
 
124
126
  Returns:
125
- The current flow step."""
127
+ The current flow step.
128
+ """
126
129
  flow = self.flow(all_flows)
127
- step = flow.step_by_id(self.step_id)
130
+
131
+ step_id = self.step_id
132
+ # in 3.11.4 we added the flow_id as a prefix to the step_id
133
+ # this causes issues when loading old dialogues as the prefix is missing
134
+ # (see https://rasahq.atlassian.net/jira/software/c/projects/ENG/boards/43?selectedIssue=ENG-1939)
135
+ # so we try to find the step by adding the flow prefix to old step_ids as well
136
+ # TODO: remove this in 4.0.0
137
+ alternative_step_id = f"{self.flow_id}_{self.step_id}"
138
+
139
+ step = flow.step_by_id(step_id) or flow.step_by_id(alternative_step_id)
140
+
128
141
  if not step:
129
142
  # we shouldn't ever end up with a frame that belongs to a non
130
143
  # existing step, but if we do, we should raise an error
@@ -209,7 +209,9 @@ def get_collect_steps_excluding_ask_before_filling_for_active_flow(
209
209
  All collect steps that are part of the current active flow,
210
210
  excluding the collect steps that have to be asked before filling.
211
211
  """
212
- active_frame = top_user_flow_frame(dialogue_stack)
212
+ active_frame = top_user_flow_frame(
213
+ dialogue_stack, ignore_call_and_link_frames=False
214
+ )
213
215
  if active_frame is None:
214
216
  return set()
215
217
  active_flow = active_frame.flow(all_flows)