rasa-pro 3.12.18.dev1__py3-none-any.whl → 3.12.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (53) hide show
  1. rasa/__init__.py +0 -6
  2. rasa/core/actions/action.py +2 -5
  3. rasa/core/actions/action_repeat_bot_messages.py +18 -22
  4. rasa/core/channels/voice_stream/asr/asr_engine.py +5 -1
  5. rasa/core/channels/voice_stream/asr/azure.py +9 -0
  6. rasa/core/channels/voice_stream/asr/deepgram.py +5 -0
  7. rasa/core/channels/voice_stream/audiocodes.py +9 -4
  8. rasa/core/channels/voice_stream/twilio_media_streams.py +7 -0
  9. rasa/core/channels/voice_stream/voice_channel.py +47 -9
  10. rasa/core/policies/enterprise_search_policy.py +196 -72
  11. rasa/core/policies/intentless_policy.py +1 -3
  12. rasa/core/processor.py +50 -5
  13. rasa/core/utils.py +11 -2
  14. rasa/dialogue_understanding/coexistence/llm_based_router.py +1 -0
  15. rasa/dialogue_understanding/commands/__init__.py +4 -0
  16. rasa/dialogue_understanding/commands/cancel_flow_command.py +3 -1
  17. rasa/dialogue_understanding/commands/correct_slots_command.py +0 -10
  18. rasa/dialogue_understanding/commands/set_slot_command.py +6 -0
  19. rasa/dialogue_understanding/commands/utils.py +26 -2
  20. rasa/dialogue_understanding/generator/command_generator.py +15 -5
  21. rasa/dialogue_understanding/generator/llm_based_command_generator.py +4 -15
  22. rasa/dialogue_understanding/generator/llm_command_generator.py +1 -3
  23. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +4 -44
  24. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +1 -14
  25. rasa/dialogue_understanding/processor/command_processor.py +23 -16
  26. rasa/dialogue_understanding/stack/frames/flow_stack_frame.py +17 -4
  27. rasa/dialogue_understanding/stack/utils.py +3 -1
  28. rasa/dialogue_understanding/utils.py +68 -12
  29. rasa/dialogue_understanding_test/du_test_schema.yml +3 -3
  30. rasa/e2e_test/e2e_test_coverage_report.py +1 -1
  31. rasa/e2e_test/e2e_test_schema.yml +3 -3
  32. rasa/hooks.py +0 -55
  33. rasa/llm_fine_tuning/annotation_module.py +43 -11
  34. rasa/llm_fine_tuning/utils.py +2 -4
  35. rasa/shared/constants.py +0 -5
  36. rasa/shared/core/constants.py +1 -0
  37. rasa/shared/core/flows/constants.py +2 -0
  38. rasa/shared/core/flows/flow.py +129 -13
  39. rasa/shared/core/flows/flows_list.py +18 -1
  40. rasa/shared/core/flows/steps/link.py +7 -2
  41. rasa/shared/providers/constants.py +0 -9
  42. rasa/shared/providers/llm/_base_litellm_client.py +4 -14
  43. rasa/shared/providers/llm/litellm_router_llm_client.py +7 -17
  44. rasa/shared/providers/llm/llm_client.py +15 -24
  45. rasa/shared/providers/llm/self_hosted_llm_client.py +2 -10
  46. rasa/tracing/instrumentation/attribute_extractors.py +2 -2
  47. rasa/version.py +1 -1
  48. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.25.dist-info}/METADATA +3 -4
  49. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.25.dist-info}/RECORD +52 -53
  50. rasa/monkey_patches.py +0 -91
  51. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.25.dist-info}/NOTICE +0 -0
  52. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.25.dist-info}/WHEEL +0 -0
  53. {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.25.dist-info}/entry_points.txt +0 -0
rasa/core/processor.py CHANGED
@@ -34,6 +34,9 @@ from rasa.dialogue_understanding.commands import (
34
34
  CannotHandleCommand,
35
35
  Command,
36
36
  NoopCommand,
37
+ RestartCommand,
38
+ SessionEndCommand,
39
+ SessionStartCommand,
37
40
  SetSlotCommand,
38
41
  )
39
42
  from rasa.dialogue_understanding.commands.utils import (
@@ -880,19 +883,61 @@ class MessageProcessor:
880
883
  tracker.has_coexistence_routing_slot
881
884
  and tracker.get_slot(ROUTE_TO_CALM_SLOT) is None
882
885
  ):
883
- # if we are currently not routing to either CALM or dm1
884
- # we make a sticky routing to CALM if there are any commands
885
- # from the trigger intent parsing
886
- # or a sticky routing to dm1 if there are no commands
886
+ # If we are currently not routing to either CALM or DM1:
887
+ # - Sticky route to CALM if there are any commands
888
+ # from the trigger intent parsing
889
+ # - Sticky route to DM1 if there are no commands present
890
+ route_to_calm_slot_value = self._determine_route_to_calm_slot_value(
891
+ nlu_adapted_commands
892
+ )
887
893
  commands += [
888
894
  SetSlotCommand(
889
- ROUTE_TO_CALM_SLOT, len(nlu_adapted_commands) > 0
895
+ ROUTE_TO_CALM_SLOT, route_to_calm_slot_value
890
896
  ).as_dict()
891
897
  ]
892
898
 
893
899
  parse_data[COMMANDS] = commands
894
900
  return parse_data
895
901
 
902
+ def _determine_route_to_calm_slot_value(
903
+ self, nlu_adapted_commands: List[Dict[str, Any]]
904
+ ) -> Optional[bool]:
905
+ """Determines what value should be assigned to `ROUTE_TO_CALM_SLOT`.
906
+
907
+ Returns:
908
+ - True: If any command other than:
909
+ - SessionStartCommand
910
+ - SessionEndCommand
911
+ - RestartCommand
912
+ is present.
913
+ - None: If only ignored system commands are present.
914
+ - False If no commands at all.
915
+ """
916
+ system_commands_to_ignore = [
917
+ SessionStartCommand.command(),
918
+ SessionEndCommand.command(),
919
+ RestartCommand.command(),
920
+ ]
921
+
922
+ # Exclude the system commands, as it doesn't originate from the user's
923
+ # input intent and shouldn't influence the decision for setting
924
+ # ROUTE_TO_CALM_SLOT.
925
+ intent_triggered_commands = [
926
+ command
927
+ for command in nlu_adapted_commands
928
+ if command.get("command") not in system_commands_to_ignore
929
+ ]
930
+
931
+ if len(intent_triggered_commands) > 0:
932
+ # There are commands other than system commands present - route to CALM
933
+ return True
934
+ elif len(nlu_adapted_commands) > 0:
935
+ # Only system command is present — defer routing decision
936
+ return None
937
+ else:
938
+ # No commands at all — route to DM1
939
+ return False
940
+
896
941
  def _update_full_retrieval_intent(self, parse_data: Dict[Text, Any]) -> None:
897
942
  """Update the parse data with the full retrieval intent.
898
943
 
rasa/core/utils.py CHANGED
@@ -389,16 +389,25 @@ def should_force_slot_filling(
389
389
  and the name of the slot if applicable.
390
390
  """
391
391
  from rasa.dialogue_understanding.processor.command_processor import (
392
+ find_updated_flows,
392
393
  get_current_collect_step,
393
394
  )
394
395
 
395
396
  if tracker is None:
396
- structlogger.error(
397
- "slot.force_slot_filling.error",
397
+ structlogger.debug(
398
+ "slot.force_slot_filling.no_found_tracker",
398
399
  event_info="Tracker is None. Cannot force slot filling.",
399
400
  )
400
401
  return False, None
401
402
 
403
+ updated_flows = find_updated_flows(tracker, flows)
404
+ if updated_flows:
405
+ structlogger.debug(
406
+ "slot.force_slot_filling.running_flows_were_updated",
407
+ updated_flow_ids=updated_flows,
408
+ )
409
+ return False, None
410
+
402
411
  stack = tracker.stack
403
412
  step = get_current_collect_step(stack, flows)
404
413
  if step is None or not step.force_slot_filling:
@@ -171,6 +171,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
171
171
  **kwargs: Any,
172
172
  ) -> "LLMBasedRouter":
173
173
  """Loads trained component (see parent class for full docstring)."""
174
+
174
175
  # Perform health check on the resolved LLM client config
175
176
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
176
177
  cls.perform_llm_health_check(
@@ -16,6 +16,9 @@ from rasa.dialogue_understanding.commands.error_command import ErrorCommand
16
16
  from rasa.dialogue_understanding.commands.free_form_answer_command import (
17
17
  FreeFormAnswerCommand,
18
18
  )
19
+ from rasa.dialogue_understanding.commands.handle_code_change_command import (
20
+ HandleCodeChangeCommand,
21
+ )
19
22
  from rasa.dialogue_understanding.commands.human_handoff_command import (
20
23
  HumanHandoffCommand,
21
24
  )
@@ -49,6 +52,7 @@ __all__ = [
49
52
  "SetSlotCommand",
50
53
  "StartFlowCommand",
51
54
  "HumanHandoffCommand",
55
+ "HandleCodeChangeCommand",
52
56
  "CorrectSlotsCommand",
53
57
  "CorrectedSlot",
54
58
  "ErrorCommand",
@@ -95,7 +95,9 @@ class CancelFlowCommand(Command):
95
95
  original_stack = original_tracker.stack
96
96
 
97
97
  applied_events: List[Event] = []
98
- user_frame = top_user_flow_frame(original_stack)
98
+ user_frame = top_user_flow_frame(
99
+ original_stack, ignore_call_and_link_frames=False
100
+ )
99
101
  current_flow = user_frame.flow(all_flows) if user_frame else None
100
102
 
101
103
  if not current_flow:
@@ -231,16 +231,6 @@ class CorrectSlotsCommand(Command):
231
231
  proposed_slots, all_flows, tracker
232
232
  )
233
233
 
234
- if not earliest_collect and not is_reset_only:
235
- # if we could not find any step in the flow, where the slots were
236
- # previously set, and we also don't want to reset the slots, do
237
- # not correct the slots.
238
- structlogger.debug(
239
- "command_executor.skip_correction",
240
- is_reset_only=is_reset_only,
241
- )
242
- return None
243
-
244
234
  return CorrectionPatternFlowStackFrame(
245
235
  is_reset_only=is_reset_only,
246
236
  corrected_slots=proposed_slots,
@@ -13,6 +13,7 @@ from rasa.dialogue_understanding.commands.command_syntax_manager import (
13
13
  )
14
14
  from rasa.dialogue_understanding.commands.utils import (
15
15
  clean_extracted_value,
16
+ find_default_flows_collecting_slot,
16
17
  get_nullable_slot_value,
17
18
  )
18
19
  from rasa.dialogue_understanding.patterns.collect_information import (
@@ -136,6 +137,11 @@ class SetSlotCommand(Command):
136
137
  ):
137
138
  # Get the other predicted flows from the most recent message on the tracker.
138
139
  predicted_flows = get_flows_predicted_to_start_from_tracker(tracker)
140
+ if not predicted_flows:
141
+ # If no predicted flows, check for default flows collecting the slot.
142
+ predicted_flows = find_default_flows_collecting_slot(
143
+ self.name, all_flows
144
+ )
139
145
  use_slot_fill = any(
140
146
  step.collect == self.name and not step.ask_before_filling
141
147
  for flow in all_flows.underlying_flows
@@ -7,18 +7,18 @@ from rasa.dialogue_understanding.patterns.validate_slot import (
7
7
  )
8
8
  from rasa.shared.constants import ACTION_ASK_PREFIX, UTTER_ASK_PREFIX
9
9
  from rasa.shared.core.events import Event, SlotSet
10
+ from rasa.shared.core.flows import FlowsList
10
11
  from rasa.shared.core.slots import Slot
11
12
  from rasa.shared.core.trackers import DialogueStateTracker
12
13
 
13
14
  if TYPE_CHECKING:
14
15
  from rasa.dialogue_understanding.commands import StartFlowCommand
15
- from rasa.shared.core.flows import FlowsList
16
16
 
17
17
  structlogger = structlog.get_logger()
18
18
 
19
19
 
20
20
  def start_flow_by_name(
21
- flow_name: str, flows: "FlowsList"
21
+ flow_name: str, flows: FlowsList
22
22
  ) -> Optional["StartFlowCommand"]:
23
23
  from rasa.dialogue_understanding.commands import StartFlowCommand
24
24
 
@@ -126,3 +126,27 @@ def create_validate_frames_from_slot_set_events(
126
126
  validate_frames.append(frame)
127
127
 
128
128
  return tracker, validate_frames
129
+
130
+
131
+ def find_default_flows_collecting_slot(
132
+ slot_name: str, all_flows: FlowsList
133
+ ) -> List[str]:
134
+ """Find default flows that have collect steps matching the specified slot name.
135
+
136
+ Args:
137
+ slot_name: The name of the slot to search for.
138
+ all_flows: All flows in the assistant.
139
+
140
+ Returns:
141
+ List of flow IDs for default flows that collect the specified slot
142
+ without asking before filling.
143
+ """
144
+ return [
145
+ flow.id
146
+ for flow in all_flows.underlying_flows
147
+ if flow.is_rasa_default_flow
148
+ and any(
149
+ step.collect == slot_name and not step.ask_before_filling
150
+ for step in flow.get_collect_steps()
151
+ )
152
+ ]
@@ -8,6 +8,7 @@ from rasa.dialogue_understanding.commands import (
8
8
  Command,
9
9
  CorrectSlotsCommand,
10
10
  ErrorCommand,
11
+ HandleCodeChangeCommand,
11
12
  SetSlotCommand,
12
13
  StartFlowCommand,
13
14
  )
@@ -113,7 +114,7 @@ class CommandGenerator:
113
114
  # slot asked by the active collect step.
114
115
  # Or return a CannotHandleCommand if no matching command is found.
115
116
  commands = self._filter_commands_during_force_slot_filling(
116
- commands, available_flows, tracker
117
+ commands, flows, tracker
117
118
  )
118
119
 
119
120
  commands_dicts = [command.as_dict() for command in commands]
@@ -382,31 +383,40 @@ class CommandGenerator:
382
383
  @staticmethod
383
384
  def _filter_commands_during_force_slot_filling(
384
385
  commands: List[Command],
385
- available_flows: FlowsList,
386
+ flows: FlowsList,
386
387
  tracker: Optional[DialogueStateTracker] = None,
387
388
  ) -> List[Command]:
388
389
  """Filter commands during a collect step that has set `force_slot_filling`.
389
390
 
390
391
  Args:
391
392
  commands: The commands to filter.
392
- available_flows: The available flows.
393
+ flows: All flows.
393
394
  tracker: The tracker.
394
395
 
395
396
  Returns:
396
397
  The filtered commands.
397
398
  """
398
399
  from rasa.dialogue_understanding.processor.command_processor import (
400
+ find_updated_flows,
399
401
  get_current_collect_step,
400
402
  )
401
403
 
402
404
  if tracker is None:
403
- structlogger.error(
405
+ structlogger.debug(
404
406
  "command_generator.filter_commands_during_force_slot_filling.tracker_not_found",
405
407
  )
406
408
  return commands
407
409
 
410
+ updated_flows = find_updated_flows(tracker, flows)
411
+ if updated_flows:
412
+ structlogger.debug(
413
+ "command_generator.filter_commands_during_force_slot_filling.running_flows_were_updated",
414
+ updated_flow_ids=updated_flows,
415
+ )
416
+ return [HandleCodeChangeCommand()]
417
+
408
418
  stack = tracker.stack
409
- step = get_current_collect_step(stack, available_flows)
419
+ step = get_current_collect_step(stack, flows)
410
420
 
411
421
  if step is None or not step.force_slot_filling:
412
422
  return commands
@@ -1,8 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
- from asyncio import Lock
3
2
  from functools import lru_cache
4
3
  from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
5
- from uuid import UUID, uuid4
6
4
 
7
5
  import structlog
8
6
  from jinja2 import Environment, Template, select_autoescape
@@ -91,9 +89,6 @@ class LLMBasedCommandGenerator(
91
89
  else:
92
90
  self.flow_retrieval = None
93
91
 
94
- self.sender_id_to_session_id_mapping: Dict[str, UUID] = {}
95
- self._lock = Lock()
96
-
97
92
  ### Abstract methods
98
93
  @staticmethod
99
94
  @abstractmethod
@@ -230,7 +225,8 @@ class LLMBasedCommandGenerator(
230
225
 
231
226
  @lru_cache
232
227
  def compile_template(self, template: str) -> Template:
233
- """Compile the prompt template and register custom filters.
228
+ """
229
+ Compile the prompt template and register custom filters.
234
230
  Compiling the template is an expensive operation,
235
231
  so we cache the result.
236
232
  """
@@ -332,9 +328,7 @@ class LLMBasedCommandGenerator(
332
328
 
333
329
  @measure_llm_latency
334
330
  async def invoke_llm(
335
- self,
336
- prompt: Union[List[dict], List[str], str],
337
- metadata: Optional[Dict[str, Any]] = None,
331
+ self, prompt: Union[List[dict], List[str], str]
338
332
  ) -> Optional[LLMResponse]:
339
333
  """Use LLM to generate a response.
340
334
 
@@ -347,7 +341,6 @@ class LLMBasedCommandGenerator(
347
341
  - a list of messages. Each message is a string and will be formatted
348
342
  as a user message.
349
343
  - a single message as a string which will be formatted as user message.
350
- metadata: Optional metadata to be passed to the LLM call.
351
344
 
352
345
  Returns:
353
346
  An LLMResponse object.
@@ -359,7 +352,7 @@ class LLMBasedCommandGenerator(
359
352
  self.config.get(LLM_CONFIG_KEY), self.get_default_llm_config()
360
353
  )
361
354
  try:
362
- return await llm.acompletion(prompt, metadata)
355
+ return await llm.acompletion(prompt)
363
356
  except Exception as e:
364
357
  # unfortunately, langchain does not wrap LLM exceptions which means
365
358
  # we have to catch all exceptions here
@@ -662,7 +655,3 @@ class LLMBasedCommandGenerator(
662
655
  def get_default_llm_config() -> Dict[str, Any]:
663
656
  """Get the default LLM config for the command generator."""
664
657
  return DEFAULT_LLM_CONFIG
665
-
666
- async def _get_or_create_session_id(self, sender_id: str) -> UUID:
667
- async with self._lock:
668
- return self.sender_id_to_session_id_mapping.setdefault(sender_id, uuid4())
@@ -55,9 +55,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
55
55
  )
56
56
 
57
57
  async def invoke_llm(
58
- self,
59
- prompt: Union[List[dict], List[str], str],
60
- metadata: Optional[Dict[str, Any]] = None,
58
+ self, prompt: Union[List[dict], List[str], str]
61
59
  ) -> Optional[LLMResponse]:
62
60
  try:
63
61
  return await super().invoke_llm(prompt)
@@ -42,9 +42,6 @@ from rasa.engine.storage.resource import Resource
42
42
  from rasa.engine.storage.storage import ModelStorage
43
43
  from rasa.shared.constants import (
44
44
  EMBEDDINGS_CONFIG_KEY,
45
- LANGFUSE_CUSTOM_METADATA_DICT,
46
- LANGFUSE_METADATA_SESSION_ID,
47
- LANGFUSE_TAGS,
48
45
  RASA_PATTERN_CANNOT_HANDLE_NOT_SUPPORTED,
49
46
  ROUTE_TO_CALM_SLOT,
50
47
  )
@@ -110,7 +107,7 @@ structlogger = structlog.get_logger()
110
107
  )
111
108
  @deprecated(
112
109
  reason=(
113
- "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
110
+ "The MultiStepLLMCommandGenerator is deprecated and will be removed in "
114
111
  "Rasa `4.0.0`."
115
112
  )
116
113
  )
@@ -495,20 +492,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
495
492
  prompt=prompt,
496
493
  )
497
494
 
498
- if tracker:
499
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
500
- else:
501
- session_id = "unknown"
502
- metadata = {
503
- LANGFUSE_METADATA_SESSION_ID: session_id,
504
- LANGFUSE_CUSTOM_METADATA_DICT: {
505
- "component": self.__class__.__name__,
506
- "function": "_predict_commands_for_active_flow",
507
- },
508
- LANGFUSE_TAGS: [self.__class__.__name__],
509
- }
510
-
511
- response = await self.invoke_llm(prompt, metadata)
495
+ response = await self.invoke_llm(prompt)
512
496
  llm_response = LLMResponse.ensure_llm_response(response)
513
497
  actions = None
514
498
  if llm_response and llm_response.choices:
@@ -562,20 +546,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
562
546
  ".prompt_rendered",
563
547
  prompt=prompt,
564
548
  )
565
- if tracker:
566
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
567
- else:
568
- session_id = "unknown"
569
- metadata = {
570
- LANGFUSE_METADATA_SESSION_ID: session_id,
571
- LANGFUSE_CUSTOM_METADATA_DICT: {
572
- "component": self.__class__.__name__,
573
- "function": "_predict_commands_for_handling_flows",
574
- },
575
- LANGFUSE_TAGS: [self.__class__.__name__],
576
- }
577
549
 
578
- response = await self.invoke_llm(prompt, metadata)
550
+ response = await self.invoke_llm(prompt)
579
551
  llm_response = LLMResponse.ensure_llm_response(response)
580
552
  actions = None
581
553
  if llm_response and llm_response.choices:
@@ -664,20 +636,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
664
636
  flow=newly_started_flow.id,
665
637
  prompt=prompt,
666
638
  )
667
- if tracker:
668
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
669
- else:
670
- session_id = "unknown"
671
- metadata = {
672
- LANGFUSE_METADATA_SESSION_ID: session_id,
673
- LANGFUSE_CUSTOM_METADATA_DICT: {
674
- "component": self.__class__.__name__,
675
- "function": "_predict_commands_for_newly_started_flow",
676
- },
677
- LANGFUSE_TAGS: [self.__class__.__name__],
678
- }
679
639
 
680
- response = await self.invoke_llm(prompt, metadata)
640
+ response = await self.invoke_llm(prompt)
681
641
  llm_response = LLMResponse.ensure_llm_response(response)
682
642
  actions = None
683
643
  if llm_response and llm_response.choices:
@@ -47,9 +47,6 @@ from rasa.shared.constants import (
47
47
  AWS_BEDROCK_PROVIDER,
48
48
  AZURE_OPENAI_PROVIDER,
49
49
  EMBEDDINGS_CONFIG_KEY,
50
- LANGFUSE_CUSTOM_METADATA_DICT,
51
- LANGFUSE_METADATA_SESSION_ID,
52
- LANGFUSE_TAGS,
53
50
  MAX_COMPLETION_TOKENS_CONFIG_KEY,
54
51
  PROMPT_TEMPLATE_CONFIG_KEY,
55
52
  ROUTE_TO_CALM_SLOT,
@@ -369,17 +366,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
369
366
  prompt=flow_prompt,
370
367
  )
371
368
 
372
- if tracker:
373
- session_id = str(await self._get_or_create_session_id(tracker.sender_id))
374
- else:
375
- session_id = "unknown"
376
- metadata = {
377
- LANGFUSE_METADATA_SESSION_ID: session_id,
378
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
379
- LANGFUSE_TAGS: [self.__class__.__name__],
380
- }
381
-
382
- response = await self.invoke_llm(flow_prompt, metadata)
369
+ response = await self.invoke_llm(flow_prompt)
383
370
  llm_response = LLMResponse.ensure_llm_response(response)
384
371
  # The check for 'None' maintains compatibility with older versions
385
372
  # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
@@ -64,12 +64,6 @@ from rasa.shared.nlu.constants import COMMANDS
64
64
 
65
65
  structlogger = structlog.get_logger()
66
66
 
67
- CANNOT_HANDLE_REASON = (
68
- "A command generator attempted to set a slot "
69
- "with a value extracted by an extractor "
70
- "that is incompatible with the slot mapping type."
71
- )
72
-
73
67
 
74
68
  def contains_command(commands: List[Command], typ: Type[Command]) -> bool:
75
69
  """Check if a list of commands contains a command of a given type.
@@ -214,18 +208,18 @@ def execute_commands(
214
208
  commands: List[Command] = get_commands_from_tracker(tracker)
215
209
  original_tracker = tracker.copy()
216
210
 
217
- commands = clean_up_commands(
218
- commands, tracker, all_flows, execution_context, story_graph, domain
219
- )
220
-
221
211
  updated_flows = find_updated_flows(tracker, all_flows)
222
212
  if updated_flows:
223
- # Override commands
213
+ # if there are updated flows, we need to handle the code change
224
214
  structlogger.debug(
225
215
  "command_processor.execute_commands.running_flows_were_updated",
226
216
  updated_flow_ids=updated_flows,
227
217
  )
228
218
  commands = [HandleCodeChangeCommand()]
219
+ else:
220
+ commands = clean_up_commands(
221
+ commands, tracker, all_flows, execution_context, story_graph, domain
222
+ )
229
223
 
230
224
  # store current flow hashes if they changed
231
225
  new_hashes = calculate_flow_fingerprints(all_flows)
@@ -405,7 +399,12 @@ def clean_up_commands(
405
399
  """
406
400
  domain = domain if domain else Domain.empty()
407
401
 
408
- slots_so_far, active_flow = filled_slots_for_active_flow(tracker, all_flows)
402
+ slots_so_far, _ = filled_slots_for_active_flow(tracker, all_flows)
403
+
404
+ # update the slots so far with the slots that were set in the tracker
405
+ slots_so_far.update(
406
+ {event.key for event in tracker.events if isinstance(event, SlotSet)}
407
+ )
409
408
 
410
409
  clean_commands: List[Command] = []
411
410
 
@@ -588,6 +587,11 @@ def clean_up_slot_command(
588
587
  "command_processor.clean_up_slot_command.skip_command_slot_not_in_domain",
589
588
  command=command,
590
589
  )
590
+ resulting_commands.append(
591
+ CannotHandleCommand(
592
+ reason="The slot predicted by the LLM is not defined in the domain."
593
+ )
594
+ )
591
595
  return resulting_commands
592
596
 
593
597
  if not should_slot_be_set(slot, command, resulting_commands):
@@ -606,7 +610,10 @@ def clean_up_slot_command(
606
610
  for command in resulting_commands
607
611
  )
608
612
 
609
- cannot_handle = CannotHandleCommand(reason=CANNOT_HANDLE_REASON)
613
+ cannot_handle = CannotHandleCommand(
614
+ reason="A command generator attempted to set a slot with a value extracted "
615
+ "by an extractor that is incompatible with the slot mapping type."
616
+ )
610
617
  if not slot_command_exists_already and cannot_handle not in resulting_commands:
611
618
  resulting_commands.append(cannot_handle)
612
619
 
@@ -640,9 +647,9 @@ def clean_up_slot_command(
640
647
  resulting_commands.append(command)
641
648
  return resulting_commands
642
649
 
643
- if (slot := tracker.slots.get(command.name)) is not None and slot.value == str(
644
- command.value
645
- ):
650
+ if (slot := tracker.slots.get(command.name)) is not None and str(
651
+ slot.value
652
+ ) == str(command.value):
646
653
  # the slot is already set, we don't need to set it again
647
654
  structlogger.debug(
648
655
  "command_processor.clean_up_slot_command.skip_command_slot_already_set",
@@ -53,7 +53,8 @@ class FlowStackFrameType(str, Enum):
53
53
  typ: The string to create the `FlowStackFrameType` from.
54
54
 
55
55
  Returns:
56
- The created `FlowStackFrameType`."""
56
+ The created `FlowStackFrameType`.
57
+ """
57
58
  if typ is None:
58
59
  return FlowStackFrameType.REGULAR
59
60
  elif typ == FlowStackFrameType.INTERRUPT.value:
@@ -107,7 +108,8 @@ class BaseFlowStackFrame(DialogueStackFrame):
107
108
  all_flows: All flows in the assistant.
108
109
 
109
110
  Returns:
110
- The current flow."""
111
+ The current flow.
112
+ """
111
113
  flow = all_flows.flow_by_id(self.flow_id)
112
114
  if not flow:
113
115
  # we shouldn't ever end up with a frame that belongs to a non
@@ -122,9 +124,20 @@ class BaseFlowStackFrame(DialogueStackFrame):
122
124
  all_flows: All flows in the assistant.
123
125
 
124
126
  Returns:
125
- The current flow step."""
127
+ The current flow step.
128
+ """
126
129
  flow = self.flow(all_flows)
127
- step = flow.step_by_id(self.step_id)
130
+
131
+ step_id = self.step_id
132
+ # in 3.11.4 we added the flow_id as a prefix to the step_id
133
+ # this causes issues when loading old dialogues as the prefix is missing
134
+ # (see https://rasahq.atlassian.net/jira/software/c/projects/ENG/boards/43?selectedIssue=ENG-1939)
135
+ # so we try to find the step by adding the flow prefix to old step_ids as well
136
+ # TODO: remove this in 4.0.0
137
+ alternative_step_id = f"{self.flow_id}_{self.step_id}"
138
+
139
+ step = flow.step_by_id(step_id) or flow.step_by_id(alternative_step_id)
140
+
128
141
  if not step:
129
142
  # we shouldn't ever end up with a frame that belongs to a non
130
143
  # existing step, but if we do, we should raise an error
@@ -209,7 +209,9 @@ def get_collect_steps_excluding_ask_before_filling_for_active_flow(
209
209
  All collect steps that are part of the current active flow,
210
210
  excluding the collect steps that have to be asked before filling.
211
211
  """
212
- active_frame = top_user_flow_frame(dialogue_stack)
212
+ active_frame = top_user_flow_frame(
213
+ dialogue_stack, ignore_call_and_link_frames=False
214
+ )
213
215
  if active_frame is None:
214
216
  return set()
215
217
  active_flow = active_frame.flow(all_flows)