rasa-pro 3.11.3a1.dev7__py3-none-any.whl → 3.11.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (135) hide show
  1. rasa/core/actions/action.py +10 -17
  2. rasa/core/actions/direct_custom_actions_executor.py +0 -1
  3. rasa/core/channels/__init__.py +0 -2
  4. rasa/core/channels/inspector/dist/assets/Tableau10-1b767f5e.js +1 -0
  5. rasa/core/channels/inspector/dist/assets/arc-f0f8bd46.js +1 -0
  6. rasa/core/channels/inspector/dist/assets/blockDiagram-38ab4fdb-7162c77d.js +118 -0
  7. rasa/core/channels/inspector/dist/assets/c4Diagram-3d4e48cf-b1d0d098.js +10 -0
  8. rasa/core/channels/inspector/dist/assets/channel-e265ea59.js +1 -0
  9. rasa/core/channels/inspector/dist/assets/classDiagram-70f12bd4-807a1b27.js +2 -0
  10. rasa/core/channels/inspector/dist/assets/classDiagram-v2-f2320105-5238dcdb.js +2 -0
  11. rasa/core/channels/inspector/dist/assets/clone-21f8a43d.js +1 -0
  12. rasa/core/channels/inspector/dist/assets/{createText-62fc7601-ef476ecd.js → createText-2e5e7dd3-75dfaa67.js} +1 -1
  13. rasa/core/channels/inspector/dist/assets/edges-e0da2a9e-df20501d.js +4 -0
  14. rasa/core/channels/inspector/dist/assets/{erDiagram-9d236eb7-fac75185.js → erDiagram-9861fffd-13cf4797.js} +4 -4
  15. rasa/core/channels/inspector/dist/assets/flowDb-956e92f1-a4991264.js +10 -0
  16. rasa/core/channels/inspector/dist/assets/flowDiagram-66a62f08-ccecf773.js +4 -0
  17. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-96b9c2cf-5c8ce12d.js +1 -0
  18. rasa/core/channels/inspector/dist/assets/flowchart-elk-definition-4a651766-b5801783.js +139 -0
  19. rasa/core/channels/inspector/dist/assets/ganttDiagram-c361ad54-161e079a.js +257 -0
  20. rasa/core/channels/inspector/dist/assets/gitGraphDiagram-72cf32ee-f38e86a4.js +70 -0
  21. rasa/core/channels/inspector/dist/assets/graph-be6ef5d8.js +1 -0
  22. rasa/core/channels/inspector/dist/assets/index-3862675e-d9ce8994.js +1 -0
  23. rasa/core/channels/inspector/dist/assets/{index-37817b51.js → index-7794b245.js} +200 -195
  24. rasa/core/channels/inspector/dist/assets/{infoDiagram-736b4530-6b731386.js → infoDiagram-f8f76790-5000a3dc.js} +1 -1
  25. rasa/core/channels/inspector/dist/assets/{journeyDiagram-df861f2b-e8579ac6.js → journeyDiagram-49397b02-8ef0a17a.js} +4 -4
  26. rasa/core/channels/inspector/dist/assets/katex-498eb57e.js +261 -0
  27. rasa/core/channels/inspector/dist/assets/layout-d649bc98.js +1 -0
  28. rasa/core/channels/inspector/dist/assets/{line-dc73d3fc.js → line-95add810.js} +1 -1
  29. rasa/core/channels/inspector/dist/assets/linear-f6025094.js +1 -0
  30. rasa/core/channels/inspector/dist/assets/mindmap-definition-fc14e90a-2e8531c4.js +312 -0
  31. rasa/core/channels/inspector/dist/assets/{pieDiagram-dbbf0591-bdf5f29b.js → pieDiagram-8a3498a8-918adfdb.js} +7 -7
  32. rasa/core/channels/inspector/dist/assets/{quadrantDiagram-4d7f4fd6-c7a0cbe4.js → quadrantDiagram-120e2f19-cbd01797.js} +1 -1
  33. rasa/core/channels/inspector/dist/assets/{requirementDiagram-6fc4c22a-7ec5410f.js → requirementDiagram-deff3bca-6a8b877b.js} +2 -2
  34. rasa/core/channels/inspector/dist/assets/sankeyDiagram-04a897e0-c377c3fe.js +8 -0
  35. rasa/core/channels/inspector/dist/assets/sequenceDiagram-704730f1-ab9e9b7f.js +122 -0
  36. rasa/core/channels/inspector/dist/assets/stateDiagram-587899a1-5e6ae67d.js +1 -0
  37. rasa/core/channels/inspector/dist/assets/stateDiagram-v2-d93cdb3a-40643476.js +1 -0
  38. rasa/core/channels/inspector/dist/assets/{styles-9c745c82-7a4e0e61.js → styles-6aaf32cf-afb8d108.js} +1 -1
  39. rasa/core/channels/inspector/dist/assets/styles-9a916d00-7edc9423.js +160 -0
  40. rasa/core/channels/inspector/dist/assets/styles-c10674c1-c1d8f7e9.js +116 -0
  41. rasa/core/channels/inspector/dist/assets/svgDrawCommon-08f97a94-f494b2ef.js +1 -0
  42. rasa/core/channels/inspector/dist/assets/{timeline-definition-5b62e21b-01ea12df.js → timeline-definition-85554ec2-11c7cdd0.js} +3 -3
  43. rasa/core/channels/inspector/dist/assets/{xychartDiagram-2b33534f-89407137.js → xychartDiagram-e933f94c-3f191ec1.js} +3 -3
  44. rasa/core/channels/inspector/dist/index.html +3 -1
  45. rasa/core/channels/inspector/index.html +2 -0
  46. rasa/core/channels/inspector/package.json +10 -3
  47. rasa/core/channels/inspector/src/App.tsx +1 -4
  48. rasa/core/channels/inspector/yarn.lock +89 -99
  49. rasa/core/channels/socketio.py +23 -2
  50. rasa/core/channels/voice_ready/audiocodes.py +42 -23
  51. rasa/core/nlg/contextual_response_rephraser.py +9 -62
  52. rasa/core/policies/enterprise_search_policy.py +12 -77
  53. rasa/core/policies/flows/flow_executor.py +2 -26
  54. rasa/core/processor.py +8 -11
  55. rasa/dialogue_understanding/generator/command_generator.py +5 -120
  56. rasa/dialogue_understanding/generator/llm_based_command_generator.py +5 -5
  57. rasa/dialogue_understanding/generator/llm_command_generator.py +1 -2
  58. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +4 -50
  59. rasa/dialogue_understanding/generator/nlu_command_adapter.py +0 -3
  60. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +3 -16
  61. rasa/dialogue_understanding/patterns/continue_interrupted.py +9 -0
  62. rasa/dialogue_understanding/stack/utils.py +1 -0
  63. rasa/e2e_test/e2e_test_case.py +1 -2
  64. rasa/e2e_test/utils/e2e_yaml_utils.py +1 -1
  65. rasa/e2e_test/utils/io.py +1 -1
  66. rasa/e2e_test/utils/validation.py +2 -100
  67. rasa/engine/recipes/default_recipe.py +49 -63
  68. rasa/engine/recipes/graph_recipe.py +7 -8
  69. rasa/hooks.py +9 -14
  70. rasa/model_manager/socket_bridge.py +2 -7
  71. rasa/model_manager/warm_rasa_process.py +4 -9
  72. rasa/model_training.py +1 -2
  73. rasa/nlu/classifiers/fallback_classifier.py +0 -3
  74. rasa/plugin.py +0 -11
  75. rasa/server.py +40 -2
  76. rasa/shared/constants.py +6 -26
  77. rasa/shared/core/events.py +8 -8
  78. rasa/shared/core/flows/flow.py +4 -4
  79. rasa/shared/core/flows/flow_step.py +15 -10
  80. rasa/shared/core/flows/flow_step_links.py +20 -12
  81. rasa/shared/core/flows/flow_step_sequence.py +5 -3
  82. rasa/shared/core/flows/steps/action.py +3 -2
  83. rasa/shared/core/flows/steps/call.py +3 -3
  84. rasa/shared/core/flows/steps/collect.py +6 -3
  85. rasa/shared/core/flows/steps/continuation.py +3 -1
  86. rasa/shared/core/flows/steps/end.py +3 -1
  87. rasa/shared/core/flows/steps/internal.py +2 -1
  88. rasa/shared/core/flows/steps/link.py +5 -3
  89. rasa/shared/core/flows/steps/no_operation.py +5 -3
  90. rasa/shared/core/flows/steps/set_slots.py +3 -2
  91. rasa/shared/core/flows/steps/start.py +3 -1
  92. rasa/shared/nlu/constants.py +0 -7
  93. rasa/shared/providers/llm/llm_response.py +1 -42
  94. rasa/shared/utils/llm.py +1 -1
  95. rasa/shared/utils/schemas/events.py +1 -1
  96. rasa/shared/utils/yaml.py +5 -6
  97. rasa/studio/upload.py +5 -19
  98. rasa/telemetry.py +33 -40
  99. rasa/tracing/instrumentation/attribute_extractors.py +9 -12
  100. rasa/validator.py +41 -32
  101. rasa/version.py +1 -1
  102. {rasa_pro-3.11.3a1.dev7.dist-info → rasa_pro-3.11.5.dist-info}/METADATA +7 -7
  103. {rasa_pro-3.11.3a1.dev7.dist-info → rasa_pro-3.11.5.dist-info}/RECORD +106 -107
  104. rasa/core/channels/inspector/dist/assets/arc-861ddd57.js +0 -1
  105. rasa/core/channels/inspector/dist/assets/c4Diagram-d0fbc5ce-921f02db.js +0 -10
  106. rasa/core/channels/inspector/dist/assets/classDiagram-936ed81e-b436c4f8.js +0 -2
  107. rasa/core/channels/inspector/dist/assets/classDiagram-v2-c3cb15f1-511a23cb.js +0 -2
  108. rasa/core/channels/inspector/dist/assets/edges-f2ad444c-f1878e0a.js +0 -4
  109. rasa/core/channels/inspector/dist/assets/flowDb-1972c806-201c5bbc.js +0 -6
  110. rasa/core/channels/inspector/dist/assets/flowDiagram-7ea5b25a-f904ae41.js +0 -4
  111. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-855bc5b3-b080d6f2.js +0 -1
  112. rasa/core/channels/inspector/dist/assets/flowchart-elk-definition-abe16c3d-1813da66.js +0 -139
  113. rasa/core/channels/inspector/dist/assets/ganttDiagram-9b5ea136-872af172.js +0 -266
  114. rasa/core/channels/inspector/dist/assets/gitGraphDiagram-99d0ae7c-34a0af5a.js +0 -70
  115. rasa/core/channels/inspector/dist/assets/index-2c4b9a3b-42ba3e3d.js +0 -1
  116. rasa/core/channels/inspector/dist/assets/layout-89e6403a.js +0 -1
  117. rasa/core/channels/inspector/dist/assets/linear-f5b1d2bc.js +0 -1
  118. rasa/core/channels/inspector/dist/assets/mindmap-definition-beec6740-82cb74fa.js +0 -109
  119. rasa/core/channels/inspector/dist/assets/sankeyDiagram-8f13d901-caee5554.js +0 -8
  120. rasa/core/channels/inspector/dist/assets/sequenceDiagram-b655622a-2935f8db.js +0 -122
  121. rasa/core/channels/inspector/dist/assets/stateDiagram-59f0c015-8f5d9693.js +0 -1
  122. rasa/core/channels/inspector/dist/assets/stateDiagram-v2-2b26beab-d565d1de.js +0 -1
  123. rasa/core/channels/inspector/dist/assets/styles-080da4f6-75ad421d.js +0 -110
  124. rasa/core/channels/inspector/dist/assets/styles-3dcbcfbf-7e764226.js +0 -159
  125. rasa/core/channels/inspector/dist/assets/svgDrawCommon-4835440b-4019d1bf.js +0 -1
  126. rasa/core/channels/studio_chat.py +0 -192
  127. rasa/dialogue_understanding/constants.py +0 -1
  128. rasa/dialogue_understanding/utils.py +0 -21
  129. rasa/dialogue_understanding_test/__init__.py +0 -0
  130. rasa/dialogue_understanding_test/constants.py +0 -15
  131. rasa/dialogue_understanding_test/du_test_case.py +0 -118
  132. rasa/dialogue_understanding_test/du_test_result.py +0 -11
  133. {rasa_pro-3.11.3a1.dev7.dist-info → rasa_pro-3.11.5.dist-info}/NOTICE +0 -0
  134. {rasa_pro-3.11.3a1.dev7.dist-info → rasa_pro-3.11.5.dist-info}/WHEEL +0 -0
  135. {rasa_pro-3.11.3a1.dev7.dist-info → rasa_pro-3.11.5.dist-info}/entry_points.txt +0 -0
@@ -2,7 +2,6 @@ import importlib.resources
2
2
  import json
3
3
  import re
4
4
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text
5
-
6
5
  import dotenv
7
6
  import structlog
8
7
  from jinja2 import Template
@@ -64,19 +63,11 @@ from rasa.shared.core.events import Event, UserUttered, BotUttered
64
63
  from rasa.shared.core.generator import TrackerWithCachedStates
65
64
  from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
66
65
  from rasa.shared.exceptions import RasaException, FileIOException
67
- from rasa.shared.nlu.constants import (
68
- PROMPTS,
69
- KEY_USER_PROMPT,
70
- KEY_LLM_RESPONSE_METADATA,
71
- KEY_PROMPT_NAME,
72
- KEY_COMPONENT_NAME,
73
- )
74
66
  from rasa.shared.nlu.training_data.training_data import TrainingData
75
67
  from rasa.shared.providers.embedding._langchain_embedding_client_adapter import (
76
68
  _LangchainEmbeddingClientAdapter,
77
69
  )
78
70
  from rasa.shared.providers.llm.llm_client import LLMClient
79
- from rasa.shared.providers.llm.llm_response import LLMResponse
80
71
  from rasa.shared.utils.cli import print_error_and_exit
81
72
  from rasa.shared.utils.health_check.embeddings_health_check_mixin import (
82
73
  EmbeddingsHealthCheckMixin,
@@ -281,43 +272,6 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
281
272
  # Wrap the embedding client in the adapter
282
273
  return _LangchainEmbeddingClientAdapter(client)
283
274
 
284
- @classmethod
285
- def _add_prompt_and_llm_response_to_latest_message(
286
- cls,
287
- tracker: DialogueStateTracker,
288
- prompt_name: str,
289
- user_prompt: str,
290
- llm_response: Optional[LLMResponse] = None,
291
- ) -> None:
292
- """Stores the prompt and LLMResponse metadata in the tracker.
293
-
294
- Args:
295
- tracker: The DialogueStateTracker containing the current conversation state.
296
- prompt_name: A name identifying prompt usage.
297
- user_prompt: The user prompt that was sent to the LLM.
298
- llm_response: The response object from the LLM (None if no response).
299
- """
300
- from rasa.dialogue_understanding.utils import record_commands_and_prompts
301
-
302
- if not record_commands_and_prompts:
303
- return
304
-
305
- if not tracker.latest_message:
306
- return
307
-
308
- parse_data = tracker.latest_message.parse_data
309
- if PROMPTS not in parse_data:
310
- parse_data[PROMPTS] = [] # type: ignore[literal-required]
311
-
312
- prompt_data: Dict[Text, Any] = {
313
- KEY_COMPONENT_NAME: cls.__name__,
314
- KEY_PROMPT_NAME: prompt_name,
315
- KEY_USER_PROMPT: user_prompt,
316
- KEY_LLM_RESPONSE_METADATA: llm_response.to_dict() if llm_response else None,
317
- }
318
-
319
- parse_data[PROMPTS].append(prompt_data) # type: ignore[literal-required]
320
-
321
275
  def train( # type: ignore[override]
322
276
  self,
323
277
  training_trackers: List[TrackerWithCachedStates],
@@ -544,27 +498,13 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
544
498
 
545
499
  if self.use_llm:
546
500
  prompt = self._render_prompt(tracker, documents.results)
547
- llm_response = await self._generate_llm_answer(llm, prompt)
548
- llm_response = LLMResponse.ensure_llm_response(llm_response)
549
-
550
- self._add_prompt_and_llm_response_to_latest_message(
551
- tracker=tracker,
552
- prompt_name="enterprise_search_prompt",
553
- user_prompt=prompt,
554
- llm_response=llm_response,
555
- )
501
+ llm_answer = await self._generate_llm_answer(llm, prompt)
556
502
 
557
- if llm_response is None or not llm_response.choices:
558
- logger.debug(f"{logger_key}.no_llm_response")
559
- response = None
560
- else:
561
- llm_answer = llm_response.choices[0]
503
+ if self.citation_enabled:
504
+ llm_answer = self.post_process_citations(llm_answer)
562
505
 
563
- if self.citation_enabled:
564
- llm_answer = self.post_process_citations(llm_answer)
565
-
566
- logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
567
- response = llm_answer
506
+ logger.debug(f"{logger_key}.llm_answer", llm_answer=llm_answer)
507
+ response = llm_answer
568
508
  else:
569
509
  response = documents.results[0].metadata.get("answer", None)
570
510
  if not response:
@@ -576,6 +516,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
576
516
  "enterprise_search_policy.predict_action_probabilities.no_llm",
577
517
  search_results=documents,
578
518
  )
519
+
579
520
  if response is None:
580
521
  return self._create_prediction_internal_error(domain, tracker)
581
522
 
@@ -640,18 +581,10 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
640
581
 
641
582
  async def _generate_llm_answer(
642
583
  self, llm: LLMClient, prompt: Text
643
- ) -> Optional[LLMResponse]:
644
- """Fetches an LLM completion for the provided prompt.
645
-
646
- Args:
647
- llm: The LLM client used to get the completion.
648
- prompt: The prompt text to send to the model.
649
-
650
- Returns:
651
- An LLMResponse object, or None if the call fails.
652
- """
584
+ ) -> Optional[Text]:
653
585
  try:
654
- return await llm.acompletion(prompt)
586
+ llm_response = await llm.acompletion(prompt)
587
+ llm_answer = llm_response.choices[0]
655
588
  except Exception as e:
656
589
  # unfortunately, langchain does not wrap LLM exceptions which means
657
590
  # we have to catch all exceptions here
@@ -659,7 +592,9 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
659
592
  "enterprise_search_policy._generate_llm_answer.llm_error",
660
593
  error=e,
661
594
  )
662
- return None
595
+ llm_answer = None
596
+
597
+ return llm_answer
663
598
 
664
599
  def _create_prediction(
665
600
  self,
@@ -82,7 +82,6 @@ from rasa.shared.core.flows.steps import (
82
82
  NoOperationFlowStep,
83
83
  )
84
84
  from rasa.shared.core.flows.steps.collect import SlotRejection
85
- from rasa.shared.core.flows.steps.constants import START_STEP
86
85
  from rasa.shared.core.slots import Slot
87
86
  from rasa.shared.core.trackers import (
88
87
  DialogueStateTracker,
@@ -317,7 +316,7 @@ def reset_scoped_slots(
317
316
  def _reset_slot(slot_name: Text, dialogue_tracker: DialogueStateTracker) -> None:
318
317
  slot = dialogue_tracker.slots.get(slot_name, None)
319
318
  initial_value = slot.initial_value if slot else None
320
- events.append(SlotSet(slot_name, initial_value, metadata={"reset": True}))
319
+ events.append(SlotSet(slot_name, initial_value))
321
320
 
322
321
  if (
323
322
  isinstance(current_frame, UserFlowStackFrame)
@@ -449,7 +448,6 @@ def advance_flows_until_next_action(
449
448
  tracker,
450
449
  available_actions,
451
450
  flows,
452
- previous_step_id,
453
451
  )
454
452
  new_events = step_result.events
455
453
  if (
@@ -466,9 +464,6 @@ def advance_flows_until_next_action(
466
464
  new_events.insert(
467
465
  idx, FlowCompleted(active_frame.flow_id, previous_step_id)
468
466
  )
469
- attach_stack_metadata_to_events(
470
- next_step.id, current_flow.id, new_events
471
- )
472
467
  tracker.update_stack(step_stack)
473
468
  tracker.update_with_events(new_events)
474
469
 
@@ -572,17 +567,6 @@ def validate_custom_slot_mappings(
572
567
  return True
573
568
 
574
569
 
575
- def attach_stack_metadata_to_events(
576
- step_id: str,
577
- flow_id: str,
578
- events: List[Event],
579
- ) -> None:
580
- """Attach the stack metadata to the events."""
581
- for event in events:
582
- event.metadata[STEP_ID_METADATA_KEY] = step_id
583
- event.metadata[ACTIVE_FLOW_METADATA_KEY] = flow_id
584
-
585
-
586
570
  def run_step(
587
571
  step: FlowStep,
588
572
  flow: Flow,
@@ -590,7 +574,6 @@ def run_step(
590
574
  tracker: DialogueStateTracker,
591
575
  available_actions: List[str],
592
576
  flows: FlowsList,
593
- previous_step_id: str,
594
577
  ) -> FlowStepResult:
595
578
  """Run a single step of a flow.
596
579
 
@@ -608,19 +591,12 @@ def run_step(
608
591
  tracker: The tracker to run the step on.
609
592
  available_actions: The actions that are available in the domain.
610
593
  flows: All flows.
611
- previous_step_id: The ID of the previous step.
612
594
 
613
595
  Returns:
614
596
  A result of running the step describing where to transition to.
615
597
  """
616
598
  initial_events: List[Event] = []
617
- if previous_step_id == START_STEP:
618
- # if the previous step id is the start step, we need to add a flow
619
- # started event to the initial events.
620
- # we can't use the current step to check this, as the current step is the
621
- # first step in the flow -> other steps might link to this flow, so the
622
- # only reliable way to check if we are starting a new flow is checking for
623
- # the START_STEP meta step
599
+ if step == flow.first_step_in_flow():
624
600
  initial_events.append(FlowStarted(flow.id, metadata=stack.current_context()))
625
601
 
626
602
  if isinstance(step, CollectInformationFlowStep):
rasa/core/processor.py CHANGED
@@ -818,9 +818,8 @@ class MessageProcessor:
818
818
  return parse_data
819
819
 
820
820
  def _sanitize_message(self, message: UserMessage) -> UserMessage:
821
- """Sanitize user messages.
822
-
823
- Removes prepended slashes before the actual content.
821
+ """Sanitize user message by removing prepended slashes before the
822
+ actual content.
824
823
  """
825
824
  # Regex pattern to match leading slashes and any whitespace before
826
825
  # actual content
@@ -922,7 +921,9 @@ class MessageProcessor:
922
921
  return [command.as_dict() for command in commands]
923
922
 
924
923
  def _contains_undefined_intent(self, message: Message) -> bool:
925
- """Checks if the message contains an undefined intent."""
924
+ """Checks if the message contains an intent that is undefined
925
+ in the domain.
926
+ """
926
927
  intent_name = message.get(INTENT, {}).get("name")
927
928
  return intent_name is not None and intent_name not in self.domain.intents
928
929
 
@@ -986,8 +987,6 @@ class MessageProcessor:
986
987
  if parse_data["entities"]:
987
988
  self._log_slots(tracker)
988
989
 
989
- plugin_manager().hook.after_new_user_message(tracker=tracker)
990
-
991
990
  logger.debug(
992
991
  f"Logged UserUtterance - tracker now has {len(tracker.events)} events."
993
992
  )
@@ -1306,7 +1305,7 @@ class MessageProcessor:
1306
1305
  self._log_slots(tracker)
1307
1306
 
1308
1307
  await self.execute_side_effects(events, tracker, output_channel)
1309
- plugin_manager().hook.after_action_executed(tracker=tracker)
1308
+
1310
1309
  return self.should_predict_another_action(action.name())
1311
1310
 
1312
1311
  def _log_action_on_tracker(
@@ -1442,10 +1441,8 @@ class MessageProcessor:
1442
1441
  return len(filtered_commands) > 0
1443
1442
 
1444
1443
  def _is_calm_assistant(self) -> bool:
1445
- """Inspects the nodes of the graph schema to decide if we are in CALM.
1446
-
1447
- To determine whether we are in CALM mode, we check if any node is
1448
- associated with the `FlowPolicy`, which is indicative of a
1444
+ """Inspects the nodes of the graph schema to determine whether
1445
+ any node is associated with the `FlowPolicy`, which is indicative of a
1449
1446
  CALM assistant setup.
1450
1447
 
1451
1448
  Returns:
@@ -10,29 +10,18 @@ from rasa.dialogue_understanding.commands import (
10
10
  ErrorCommand,
11
11
  )
12
12
  from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
13
- from rasa.shared.constants import (
14
- RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_TOO_LONG,
15
- RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_EMPTY,
16
- )
17
13
  from rasa.shared.core.constants import SlotMappingType
18
14
  from rasa.shared.core.domain import Domain
19
15
  from rasa.shared.core.flows import FlowsList
20
16
  from rasa.shared.core.slot_mappings import SlotFillingManager
21
17
  from rasa.shared.core.trackers import DialogueStateTracker
22
- from rasa.shared.nlu.constants import (
23
- COMMANDS,
24
- TEXT,
25
- PREDICTED_COMMANDS,
26
- PROMPTS,
27
- KEY_USER_PROMPT,
28
- KEY_SYSTEM_PROMPT,
29
- KEY_LLM_RESPONSE_METADATA,
30
- KEY_PROMPT_NAME,
31
- KEY_COMPONENT_NAME,
32
- )
33
18
  from rasa.shared.nlu.training_data.message import Message
34
- from rasa.shared.providers.llm.llm_response import LLMResponse
19
+ from rasa.shared.nlu.constants import COMMANDS, TEXT
35
20
  from rasa.shared.utils.llm import DEFAULT_MAX_USER_INPUT_CHARACTERS
21
+ from rasa.shared.constants import (
22
+ RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_TOO_LONG,
23
+ RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_EMPTY,
24
+ )
36
25
 
37
26
  structlogger = structlog.get_logger()
38
27
 
@@ -204,7 +193,6 @@ class CommandGenerator:
204
193
  flows: The flows to use for command prediction.
205
194
  tracker: The tracker containing the conversation history up to now.
206
195
  **kwargs: Keyword arguments for forward compatibility.
207
-
208
196
  Returns:
209
197
  The predicted commands.
210
198
  """
@@ -353,106 +341,3 @@ class CommandGenerator:
353
341
  ]
354
342
 
355
343
  return filtered_commands
356
-
357
- @staticmethod
358
- def _add_commands_to_message_parse_data(
359
- message: Message, component_name: str, commands: List[Command]
360
- ) -> None:
361
- """Add commands to the message parse data.
362
-
363
- Commands are only added in case the flag 'record_commands_and_prompts' is set.
364
- Example of predicted commands in the message parse data:
365
- Message(data={
366
- PREDICTED_COMMANDS: {
367
- "MultiStepLLMCommandGenerator": [
368
- {"command": "set_slot", "name": "slot_name", "value": "slot_value"},
369
- ],
370
- "NLUCommandAdapter": [
371
- {"command": "start_flow", "name": "test_flow"},
372
- ]
373
- }
374
- })
375
- """
376
- from rasa.dialogue_understanding.utils import record_commands_and_prompts
377
-
378
- # only set commands if the flag "record_commands_and_prompts" is set to True
379
- if not record_commands_and_prompts:
380
- return
381
-
382
- commands_as_dict = [command.as_dict() for command in commands]
383
-
384
- if message.get(PREDICTED_COMMANDS) is not None:
385
- predicted_commands = message.get(PREDICTED_COMMANDS)
386
- if component_name in predicted_commands:
387
- predicted_commands[component_name].extend(commands_as_dict)
388
- else:
389
- predicted_commands[component_name] = commands_as_dict
390
- else:
391
- predicted_commands = {component_name: commands_as_dict}
392
-
393
- message.set(
394
- PREDICTED_COMMANDS,
395
- predicted_commands,
396
- add_to_output=True,
397
- )
398
-
399
- @staticmethod
400
- def _add_prompt_to_message_parse_data(
401
- message: Message,
402
- component_name: str,
403
- prompt_name: str,
404
- user_prompt: str,
405
- system_prompt: Optional[str] = None,
406
- llm_response: Optional[LLMResponse] = None,
407
- ) -> None:
408
- """Add prompt to the message parse data.
409
-
410
- Prompt is only added in case the flag 'record_commands_and_prompts' is set.
411
- Example of prompts in the message parse data:
412
- Message(data={
413
- PROMPTS: [
414
- {
415
- "component_name": "MultiStepLLMCommandGenerator",
416
- "prompt_name": "fill_slots_prompt",
417
- "user_prompt": "...",
418
- "system_prompt": "...",
419
- "llm_response_metadata": { ... }
420
- },
421
- {
422
- "component_name": "MultiStepLLMCommandGenerator",
423
- "prompt_name": "handle_flows_prompt",
424
- "user_prompt": "...",
425
- "system_prompt": "...",
426
- "llm_response_metadata": { ... }
427
- },
428
- {
429
- "component_name": "SingleStepLLMCommandGenerator",
430
- "prompt_name": "prompt_template",
431
- "user_prompt": "...",
432
- "system_prompt": "...",
433
- "llm_response_metadata": { ... }
434
- }
435
- ]
436
- })
437
- """
438
- from rasa.dialogue_understanding.utils import record_commands_and_prompts
439
-
440
- # Only set prompt if the flag "record_commands_and_prompts" is set to True.
441
- if not record_commands_and_prompts:
442
- return
443
-
444
- # Construct the dictionary with prompt details.
445
- prompt_data: Dict[Text, Any] = {
446
- KEY_COMPONENT_NAME: component_name,
447
- KEY_PROMPT_NAME: prompt_name,
448
- KEY_USER_PROMPT: user_prompt,
449
- KEY_LLM_RESPONSE_METADATA: llm_response.to_dict() if llm_response else None,
450
- **({KEY_SYSTEM_PROMPT: system_prompt} if system_prompt else {}),
451
- }
452
-
453
- # Get or create a top-level "prompts" list.
454
- prompts = message.get(PROMPTS) or []
455
- prompts.append(prompt_data)
456
-
457
- # Update the message with the new prompts list.
458
- message.set(PROMPTS, prompts, add_to_output=True)
@@ -32,7 +32,6 @@ from rasa.shared.exceptions import ProviderClientAPIException
32
32
  from rasa.shared.nlu.constants import FLOWS_IN_PROMPT
33
33
  from rasa.shared.nlu.training_data.message import Message
34
34
  from rasa.shared.nlu.training_data.training_data import TrainingData
35
- from rasa.shared.providers.llm.llm_response import LLMResponse
36
35
  from rasa.shared.utils.health_check.llm_health_check_mixin import LLMHealthCheckMixin
37
36
  from rasa.shared.utils.llm import (
38
37
  allowed_values_for_slot,
@@ -305,21 +304,22 @@ class LLMBasedCommandGenerator(
305
304
  )
306
305
  return filtered_flows
307
306
 
308
- async def invoke_llm(self, prompt: Text) -> Optional[LLMResponse]:
307
+ async def invoke_llm(self, prompt: Text) -> Optional[Text]:
309
308
  """Use LLM to generate a response.
310
309
 
311
310
  Args:
312
311
  prompt: The prompt to send to the LLM.
313
312
 
314
313
  Returns:
315
- An LLMResponse object.
314
+ The generated text.
316
315
 
317
316
  Raises:
318
- ProviderClientAPIException: If an error occurs during the LLM API call.
317
+ ProviderClientAPIException if an error during API call.
319
318
  """
320
319
  llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
321
320
  try:
322
- return await llm.acompletion(prompt)
321
+ llm_response = await llm.acompletion(prompt)
322
+ return llm_response.choices[0]
323
323
  except Exception as e:
324
324
  # unfortunately, langchain does not wrap LLM exceptions which means
325
325
  # we have to catch all exceptions here
@@ -10,7 +10,6 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
10
10
  from rasa.engine.storage.resource import Resource
11
11
  from rasa.engine.storage.storage import ModelStorage
12
12
  from rasa.shared.exceptions import ProviderClientAPIException
13
- from rasa.shared.providers.llm.llm_response import LLMResponse
14
13
  from rasa.shared.utils.io import raise_deprecation_warning
15
14
 
16
15
  structlogger = structlog.get_logger()
@@ -54,7 +53,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
54
53
  **kwargs,
55
54
  )
56
55
 
57
- async def invoke_llm(self, prompt: Text) -> Optional[LLMResponse]:
56
+ async def invoke_llm(self, prompt: Text) -> Optional[Text]:
58
57
  try:
59
58
  return await super().invoke_llm(prompt)
60
59
  except ProviderClientAPIException:
@@ -51,7 +51,6 @@ from rasa.shared.core.trackers import DialogueStateTracker
51
51
  from rasa.shared.exceptions import ProviderClientAPIException
52
52
  from rasa.shared.nlu.constants import TEXT
53
53
  from rasa.shared.nlu.training_data.message import Message
54
- from rasa.shared.providers.llm.llm_response import LLMResponse
55
54
  from rasa.shared.utils.io import deep_container_fingerprint
56
55
  from rasa.shared.utils.llm import (
57
56
  get_prompt_template,
@@ -145,6 +144,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
145
144
  **kwargs: Any,
146
145
  ) -> "MultiStepLLMCommandGenerator":
147
146
  """Loads trained component (see parent class for full docstring)."""
147
+
148
148
  # Perform health check of the LLM client config
149
149
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
150
150
  cls.perform_llm_health_check(
@@ -200,9 +200,6 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
200
200
  message, flows, tracker
201
201
  )
202
202
  commands = self._clean_up_commands(commands)
203
- self._add_commands_to_message_parse_data(
204
- message, MultiStepLLMCommandGenerator.__name__, commands
205
- )
206
203
  except ProviderClientAPIException:
207
204
  # if any step resulted in API exception, the command prediction cannot
208
205
  # be completed, "predict" the ErrorCommand
@@ -536,12 +533,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
536
533
  prompt=prompt,
537
534
  )
538
535
 
539
- response = await self.invoke_llm(prompt)
540
- llm_response = LLMResponse.ensure_llm_response(response)
541
- actions = None
542
- if llm_response and llm_response.choices:
543
- actions = llm_response.choices[0]
544
-
536
+ actions = await self.invoke_llm(prompt)
545
537
  structlogger.debug(
546
538
  "multi_step_llm_command_generator"
547
539
  ".predict_commands_for_active_flow"
@@ -550,16 +542,6 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
550
542
  )
551
543
 
552
544
  commands = self.parse_commands(actions, tracker, available_flows)
553
-
554
- if commands:
555
- self._add_prompt_to_message_parse_data(
556
- message=message,
557
- component_name=MultiStepLLMCommandGenerator.__name__,
558
- prompt_name="fill_slots_for_active_flow_prompt",
559
- user_prompt=prompt,
560
- llm_response=llm_response,
561
- )
562
-
563
545
  return commands
564
546
 
565
547
  async def _predict_commands_for_handling_flows(
@@ -591,12 +573,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
591
573
  prompt=prompt,
592
574
  )
593
575
 
594
- response = await self.invoke_llm(prompt)
595
- llm_response = LLMResponse.ensure_llm_response(response)
596
- actions = None
597
- if llm_response and llm_response.choices:
598
- actions = llm_response.choices[0]
599
-
576
+ actions = await self.invoke_llm(prompt)
600
577
  structlogger.debug(
601
578
  "multi_step_llm_command_generator"
602
579
  ".predict_commands_for_handling_flows"
@@ -608,15 +585,6 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
608
585
  # filter out flows that are already started and active
609
586
  commands = self._filter_redundant_start_flow_commands(tracker, commands)
610
587
 
611
- if commands:
612
- self._add_prompt_to_message_parse_data(
613
- message=message,
614
- component_name=MultiStepLLMCommandGenerator.__name__,
615
- prompt_name="handle_flows_prompt",
616
- user_prompt=prompt,
617
- llm_response=llm_response,
618
- )
619
-
620
588
  return commands
621
589
 
622
590
  @staticmethod
@@ -681,12 +649,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
681
649
  prompt=prompt,
682
650
  )
683
651
 
684
- response = await self.invoke_llm(prompt)
685
- llm_response = LLMResponse.ensure_llm_response(response)
686
- actions = None
687
- if llm_response and llm_response.choices:
688
- actions = llm_response.choices[0]
689
-
652
+ actions = await self.invoke_llm(prompt)
690
653
  structlogger.debug(
691
654
  "multi_step_llm_command_generator"
692
655
  ".predict_commands_for_newly_started_flow"
@@ -711,15 +674,6 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
711
674
  commands=commands,
712
675
  )
713
676
 
714
- if commands:
715
- self._add_prompt_to_message_parse_data(
716
- message=message,
717
- component_name=MultiStepLLMCommandGenerator.__name__,
718
- prompt_name="fill_slots_for_new_flow_prompt",
719
- user_prompt=prompt,
720
- llm_response=llm_response,
721
- )
722
-
723
677
  return commands
724
678
 
725
679
  def _prepare_inputs(
@@ -205,9 +205,6 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
205
205
  commands=commands,
206
206
  )
207
207
 
208
- CommandGenerator._add_commands_to_message_parse_data(
209
- message, NLUCommandAdapter.__name__, commands
210
- )
211
208
  return commands
212
209
 
213
210
 
@@ -46,7 +46,6 @@ from rasa.shared.core.trackers import DialogueStateTracker
46
46
  from rasa.shared.exceptions import ProviderClientAPIException
47
47
  from rasa.shared.nlu.constants import TEXT, LLM_COMMANDS, LLM_PROMPT
48
48
  from rasa.shared.nlu.training_data.message import Message
49
- from rasa.shared.providers.llm.llm_response import LLMResponse
50
49
  from rasa.shared.utils.io import deep_container_fingerprint
51
50
  from rasa.shared.utils.llm import (
52
51
  get_prompt_template,
@@ -138,6 +137,7 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
138
137
  **kwargs: Any,
139
138
  ) -> "SingleStepLLMCommandGenerator":
140
139
  """Loads trained component (see parent class for full docstring)."""
140
+
141
141
  # Perform health check of the LLM API endpoint
142
142
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
143
143
  cls.perform_llm_health_check(
@@ -265,16 +265,13 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
265
265
  prompt=flow_prompt,
266
266
  )
267
267
 
268
- response = await self.invoke_llm(flow_prompt)
269
- llm_response = LLMResponse.ensure_llm_response(response)
268
+ action_list = await self.invoke_llm(flow_prompt)
270
269
  # The check for 'None' maintains compatibility with older versions
271
270
  # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
272
271
  # might return 'None' to indicate a failure to generate actions.
273
- if llm_response is None or not llm_response.choices:
272
+ if action_list is None:
274
273
  return [ErrorCommand()]
275
274
 
276
- action_list = llm_response.choices[0]
277
-
278
275
  log_llm(
279
276
  logger=structlogger,
280
277
  log_module="SingleStepLLMCommandGenerator",
@@ -285,16 +282,6 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
285
282
  commands = self.parse_commands(action_list, tracker, flows)
286
283
 
287
284
  self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
288
- self._add_commands_to_message_parse_data(
289
- message, SingleStepLLMCommandGenerator.__name__, commands
290
- )
291
- self._add_prompt_to_message_parse_data(
292
- message=message,
293
- component_name=SingleStepLLMCommandGenerator.__name__,
294
- prompt_name="command_generator_prompt",
295
- user_prompt=flow_prompt,
296
- llm_response=llm_response,
297
- )
298
285
 
299
286
  return commands
300
287
 
@@ -40,3 +40,12 @@ class ContinueInterruptedPatternFlowStackFrame(PatternFlowStackFrame):
40
40
  step_id=data["step_id"],
41
41
  previous_flow_name=data["previous_flow_name"],
42
42
  )
43
+
44
+ def __eq__(self, other: Any) -> bool:
45
+ if not isinstance(other, ContinueInterruptedPatternFlowStackFrame):
46
+ return False
47
+ return (
48
+ self.flow_id == other.flow_id
49
+ and self.step_id == other.step_id
50
+ and self.previous_flow_name == other.previous_flow_name
51
+ )
@@ -77,6 +77,7 @@ def top_user_flow_frame(dialogue_stack: DialogueStack) -> Optional[UserFlowStack
77
77
  if (
78
78
  isinstance(frame, UserFlowStackFrame)
79
79
  and frame.frame_type != FlowStackFrameType.CALL
80
+ and frame.frame_type != FlowStackFrameType.LINK
80
81
  ):
81
82
  return frame
82
83
  return None