rasa-pro 3.12.6.dev2__py3-none-any.whl → 3.12.7.dev2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (55) hide show
  1. rasa/__init__.py +0 -6
  2. rasa/cli/run.py +10 -6
  3. rasa/cli/utils.py +7 -0
  4. rasa/core/actions/action.py +0 -6
  5. rasa/core/channels/voice_ready/audiocodes.py +46 -17
  6. rasa/core/nlg/contextual_response_rephraser.py +4 -21
  7. rasa/core/nlg/summarize.py +1 -15
  8. rasa/core/policies/enterprise_search_policy.py +3 -16
  9. rasa/core/policies/flows/flow_executor.py +3 -38
  10. rasa/core/policies/intentless_policy.py +4 -17
  11. rasa/core/policies/policy.py +0 -2
  12. rasa/core/processor.py +19 -5
  13. rasa/core/utils.py +53 -0
  14. rasa/dialogue_understanding/coexistence/llm_based_router.py +4 -18
  15. rasa/dialogue_understanding/commands/cancel_flow_command.py +4 -59
  16. rasa/dialogue_understanding/commands/start_flow_command.py +0 -41
  17. rasa/dialogue_understanding/generator/command_generator.py +67 -0
  18. rasa/dialogue_understanding/generator/llm_based_command_generator.py +4 -20
  19. rasa/dialogue_understanding/generator/llm_command_generator.py +1 -3
  20. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +1 -12
  21. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +0 -61
  22. rasa/dialogue_understanding/processor/command_processor.py +7 -65
  23. rasa/dialogue_understanding/stack/utils.py +0 -38
  24. rasa/e2e_test/utils/validation.py +3 -3
  25. rasa/hooks.py +0 -55
  26. rasa/shared/constants.py +0 -5
  27. rasa/shared/core/constants.py +0 -8
  28. rasa/shared/core/domain.py +12 -3
  29. rasa/shared/core/flows/flow.py +0 -17
  30. rasa/shared/core/flows/flows_yaml_schema.json +3 -38
  31. rasa/shared/core/flows/steps/collect.py +5 -18
  32. rasa/shared/core/flows/utils.py +1 -16
  33. rasa/shared/core/slot_mappings.py +11 -5
  34. rasa/shared/nlu/constants.py +0 -1
  35. rasa/shared/providers/constants.py +0 -9
  36. rasa/shared/providers/llm/_base_litellm_client.py +4 -14
  37. rasa/shared/providers/llm/litellm_router_llm_client.py +7 -17
  38. rasa/shared/providers/llm/llm_client.py +15 -24
  39. rasa/shared/providers/llm/self_hosted_llm_client.py +2 -10
  40. rasa/shared/utils/common.py +11 -1
  41. rasa/shared/utils/health_check/health_check.py +1 -7
  42. rasa/tracing/instrumentation/attribute_extractors.py +4 -4
  43. rasa/tracing/instrumentation/intentless_policy_instrumentation.py +1 -2
  44. rasa/utils/licensing.py +0 -15
  45. rasa/validator.py +1 -123
  46. rasa/version.py +1 -1
  47. {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.12.7.dev2.dist-info}/METADATA +3 -4
  48. {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.12.7.dev2.dist-info}/RECORD +51 -55
  49. rasa/core/actions/action_handle_digressions.py +0 -164
  50. rasa/dialogue_understanding/commands/handle_digressions_command.py +0 -144
  51. rasa/dialogue_understanding/patterns/handle_digressions.py +0 -81
  52. rasa/monkey_patches.py +0 -91
  53. {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.12.7.dev2.dist-info}/NOTICE +0 -0
  54. {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.12.7.dev2.dist-info}/WHEEL +0 -0
  55. {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.12.7.dev2.dist-info}/entry_points.txt +0 -0
rasa/__init__.py CHANGED
@@ -5,11 +5,5 @@ from rasa import version
5
5
  # define the version before the other imports since these need it
6
6
  __version__ = version.__version__
7
7
 
8
- from litellm.integrations.langfuse.langfuse import LangFuseLogger
9
-
10
- from rasa.monkey_patches import litellm_langfuse_logger_init_fixed
11
-
12
- # Monkey-patch the init method as early as possible before the class is used
13
- LangFuseLogger.__init__ = litellm_langfuse_logger_init_fixed # type: ignore
14
8
 
15
9
  logging.getLogger(__name__).addHandler(logging.NullHandler())
rasa/cli/run.py CHANGED
@@ -64,12 +64,16 @@ def run_actions(args: argparse.Namespace) -> None:
64
64
 
65
65
  def _validate_model_path(model_path: Text, parameter: Text, default: Text) -> Text:
66
66
  if model_path is not None and not os.path.exists(model_path):
67
- reason_str = f"'{model_path}' not found."
68
- if model_path is None:
69
- reason_str = f"Parameter '{parameter}' not set."
70
-
71
- logger.debug(f"{reason_str} Using default location '{default}' instead.")
72
-
67
+ raise ModelNotFound(
68
+ f"The provided model path '{model_path}' could not be found. "
69
+ "Provide an existing model path."
70
+ )
71
+
72
+ if model_path is None:
73
+ logger.debug(
74
+ f"Parameter '{parameter}' not set. "
75
+ "Using default location '{default}' instead."
76
+ )
73
77
  os.makedirs(default, exist_ok=True)
74
78
  model_path = default
75
79
 
rasa/cli/utils.py CHANGED
@@ -14,6 +14,7 @@ import structlog
14
14
  import rasa.shared.utils.cli
15
15
  import rasa.shared.utils.io
16
16
  from rasa import telemetry
17
+ from rasa.exceptions import ModelNotFound
17
18
  from rasa.shared.constants import (
18
19
  ASSISTANT_ID_DEFAULT_VALUE,
19
20
  ASSISTANT_ID_KEY,
@@ -77,6 +78,12 @@ def get_validated_path(
77
78
  if current and os.path.exists(current):
78
79
  return current
79
80
 
81
+ if parameter == "model":
82
+ raise ModelNotFound(
83
+ f"The provided model path '{current}' could not be found. "
84
+ "Provide an existing model path."
85
+ )
86
+
80
87
  # try to find a valid option among the defaults
81
88
  if isinstance(default, str) or isinstance(default, Path):
82
89
  default_options = [str(default)]
@@ -105,10 +105,6 @@ logger = logging.getLogger(__name__)
105
105
  def default_actions(action_endpoint: Optional[EndpointConfig] = None) -> List["Action"]:
106
106
  """List default actions."""
107
107
  from rasa.core.actions.action_clean_stack import ActionCleanStack
108
- from rasa.core.actions.action_handle_digressions import (
109
- ActionBlockDigressions,
110
- ActionContinueDigression,
111
- )
112
108
  from rasa.core.actions.action_hangup import ActionHangup
113
109
  from rasa.core.actions.action_repeat_bot_messages import ActionRepeatBotMessages
114
110
  from rasa.core.actions.action_run_slot_rejections import ActionRunSlotRejections
@@ -143,8 +139,6 @@ def default_actions(action_endpoint: Optional[EndpointConfig] = None) -> List["A
143
139
  ActionResetRouting(),
144
140
  ActionHangup(),
145
141
  ActionRepeatBotMessages(),
146
- ActionBlockDigressions(),
147
- ActionContinueDigression(),
148
142
  ]
149
143
 
150
144
 
@@ -115,11 +115,21 @@ class Conversation:
115
115
  async def handle_activities(
116
116
  self,
117
117
  message: Dict[Text, Any],
118
+ input_channel_name: str,
118
119
  output_channel: OutputChannel,
119
120
  on_new_message: Callable[[UserMessage], Awaitable[Any]],
120
121
  ) -> None:
121
122
  """Handle activities sent by Audiocodes."""
122
123
  structlogger.debug("audiocodes.handle.activities")
124
+ if input_channel_name == "":
125
+ structlogger.warning(
126
+ "audiocodes.handle.activities.empty_input_channel_name",
127
+ event_info=(
128
+ "Audiocodes input channel name is empty "
129
+ f"for conversation {self.conversation_id}"
130
+ ),
131
+ )
132
+
123
133
  for activity in message["activities"]:
124
134
  text = None
125
135
  if activity[ACTIVITY_ID_KEY] in self.activity_ids:
@@ -143,6 +153,7 @@ class Conversation:
143
153
  metadata = self.get_metadata(activity)
144
154
  user_msg = UserMessage(
145
155
  text=text,
156
+ input_channel=input_channel_name,
146
157
  output_channel=output_channel,
147
158
  sender_id=self.conversation_id,
148
159
  metadata=metadata,
@@ -394,7 +405,12 @@ class AudiocodesInput(InputChannel):
394
405
  # start a background task to handle activities
395
406
  self._create_task(
396
407
  conversation_id,
397
- conversation.handle_activities(request.json, ac_output, on_new_message),
408
+ conversation.handle_activities(
409
+ request.json,
410
+ input_channel_name=self.name(),
411
+ output_channel=ac_output,
412
+ on_new_message=on_new_message,
413
+ ),
398
414
  )
399
415
  return response.json(response_json)
400
416
 
@@ -407,23 +423,9 @@ class AudiocodesInput(InputChannel):
407
423
  Example of payload:
408
424
  {"conversation": <conversation_id>, "reason": Optional[Text]}.
409
425
  """
410
- self._get_conversation(request.token, conversation_id)
411
- reason = {"reason": request.json.get("reason")}
412
- await on_new_message(
413
- UserMessage(
414
- text=f"{INTENT_MESSAGE_PREFIX}session_end",
415
- output_channel=None,
416
- sender_id=conversation_id,
417
- metadata=reason,
418
- )
419
- )
420
- del self.conversations[conversation_id]
421
- structlogger.debug(
422
- "audiocodes.disconnect",
423
- conversation=conversation_id,
424
- request=request.json,
426
+ return await self._handle_disconnect(
427
+ request, conversation_id, on_new_message
425
428
  )
426
- return response.json({})
427
429
 
428
430
  @ac_webhook.route("/conversation/<conversation_id>/keepalive", methods=["POST"])
429
431
  async def keepalive(request: Request, conversation_id: Text) -> HTTPResponse:
@@ -438,6 +440,32 @@ class AudiocodesInput(InputChannel):
438
440
 
439
441
  return ac_webhook
440
442
 
443
+ async def _handle_disconnect(
444
+ self,
445
+ request: Request,
446
+ conversation_id: Text,
447
+ on_new_message: Callable[[UserMessage], Awaitable[Any]],
448
+ ) -> HTTPResponse:
449
+ """Triggered when the call is disconnected."""
450
+ self._get_conversation(request.token, conversation_id)
451
+ reason = {"reason": request.json.get("reason")}
452
+ await on_new_message(
453
+ UserMessage(
454
+ text=f"{INTENT_MESSAGE_PREFIX}session_end",
455
+ input_channel=self.name(),
456
+ output_channel=None,
457
+ sender_id=conversation_id,
458
+ metadata=reason,
459
+ )
460
+ )
461
+ del self.conversations[conversation_id]
462
+ structlogger.debug(
463
+ "audiocodes.disconnect",
464
+ conversation=conversation_id,
465
+ request=request.json,
466
+ )
467
+ return response.json({})
468
+
441
469
 
442
470
  class AudiocodesOutput(OutputChannel):
443
471
  @classmethod
@@ -445,6 +473,7 @@ class AudiocodesOutput(OutputChannel):
445
473
  return CHANNEL_NAME
446
474
 
447
475
  def __init__(self) -> None:
476
+ super().__init__()
448
477
  self.messages: List[Dict] = []
449
478
 
450
479
  async def add_message(self, message: Dict) -> None:
@@ -7,10 +7,6 @@ from rasa import telemetry
7
7
  from rasa.core.nlg.response import TemplatedNaturalLanguageGenerator
8
8
  from rasa.core.nlg.summarize import summarize_conversation
9
9
  from rasa.shared.constants import (
10
- LANGFUSE_CUSTOM_METADATA_DICT,
11
- LANGFUSE_METADATA_SESSION_ID,
12
- LANGFUSE_METADATA_USER_ID,
13
- LANGFUSE_TAGS,
14
10
  LLM_CONFIG_KEY,
15
11
  MODEL_CONFIG_KEY,
16
12
  MODEL_GROUP_ID_CONFIG_KEY,
@@ -43,7 +39,6 @@ from rasa.shared.utils.llm import (
43
39
  tracker_as_readable_transcript,
44
40
  )
45
41
  from rasa.utils.endpoints import EndpointConfig
46
- from rasa.utils.licensing import get_human_readable_licence_owner
47
42
  from rasa.utils.log_utils import log_llm
48
43
 
49
44
  structlogger = structlog.get_logger()
@@ -135,7 +130,6 @@ class ContextualResponseRephraser(
135
130
  "contextual_response_rephraser.init",
136
131
  ContextualResponseRephraser.__name__,
137
132
  )
138
- self.user_id = get_human_readable_licence_owner()
139
133
 
140
134
  @classmethod
141
135
  def _add_prompt_and_llm_metadata_to_response(
@@ -205,9 +199,7 @@ class ContextualResponseRephraser(
205
199
  return None
206
200
 
207
201
  @measure_llm_latency
208
- async def _generate_llm_response(
209
- self, prompt: str, sender_id: str
210
- ) -> Optional[LLMResponse]:
202
+ async def _generate_llm_response(self, prompt: str) -> Optional[LLMResponse]:
211
203
  """Use LLM to generate a response.
212
204
 
213
205
  Returns an LLMResponse object containing both the generated text
@@ -215,21 +207,14 @@ class ContextualResponseRephraser(
215
207
 
216
208
  Args:
217
209
  prompt: The prompt to send to the LLM.
218
- sender_id: sender_id from the tracker.
219
210
 
220
211
  Returns:
221
212
  An LLMResponse object if successful, otherwise None.
222
213
  """
223
214
  llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
224
- metadata = {
225
- LANGFUSE_METADATA_USER_ID: self.user_id,
226
- LANGFUSE_METADATA_SESSION_ID: sender_id,
227
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
228
- LANGFUSE_TAGS: [self.__class__.__name__],
229
- }
230
215
 
231
216
  try:
232
- return await llm.acompletion(prompt, metadata)
217
+ return await llm.acompletion(prompt)
233
218
  except Exception as e:
234
219
  # unfortunately, langchain does not wrap LLM exceptions which means
235
220
  # we have to catch all exceptions here
@@ -273,9 +258,7 @@ class ContextualResponseRephraser(
273
258
  The history for the prompt.
274
259
  """
275
260
  llm = llm_factory(self.llm_config, DEFAULT_LLM_CONFIG)
276
- return await summarize_conversation(
277
- tracker, llm, max_turns=5, user_id=self.user_id, sender_id=tracker.sender_id
278
- )
261
+ return await summarize_conversation(tracker, llm, max_turns=5)
279
262
 
280
263
  async def rephrase(
281
264
  self,
@@ -332,7 +315,7 @@ class ContextualResponseRephraser(
332
315
  or self.llm_property(MODEL_NAME_CONFIG_KEY),
333
316
  llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
334
317
  )
335
- llm_response = await self._generate_llm_response(prompt, tracker.sender_id)
318
+ llm_response = await self._generate_llm_response(prompt)
336
319
  llm_response = LLMResponse.ensure_llm_response(llm_response)
337
320
 
338
321
  response = self._add_prompt_and_llm_metadata_to_response(
@@ -4,12 +4,6 @@ import structlog
4
4
  from jinja2 import Template
5
5
 
6
6
  from rasa.core.tracker_store import DialogueStateTracker
7
- from rasa.shared.constants import (
8
- LANGFUSE_CUSTOM_METADATA_DICT,
9
- LANGFUSE_METADATA_SESSION_ID,
10
- LANGFUSE_METADATA_USER_ID,
11
- LANGFUSE_TAGS,
12
- )
13
7
  from rasa.shared.providers.llm.llm_client import LLMClient
14
8
  from rasa.shared.utils.llm import (
15
9
  tracker_as_readable_transcript,
@@ -52,8 +46,6 @@ async def summarize_conversation(
52
46
  tracker: DialogueStateTracker,
53
47
  llm: LLMClient,
54
48
  max_turns: Optional[int] = MAX_TURNS_DEFAULT,
55
- user_id: Optional[str] = None,
56
- sender_id: Optional[str] = None,
57
49
  ) -> str:
58
50
  """Summarizes the dialogue using the LLM.
59
51
 
@@ -66,14 +58,8 @@ async def summarize_conversation(
66
58
  The summary of the dialogue.
67
59
  """
68
60
  prompt = _create_summarization_prompt(tracker, max_turns)
69
- metadata = {
70
- LANGFUSE_METADATA_USER_ID: user_id or "unknown",
71
- LANGFUSE_METADATA_SESSION_ID: sender_id or "",
72
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": "summarize_conversation"},
73
- LANGFUSE_TAGS: ["summarize_conversation"],
74
- }
75
61
  try:
76
- llm_response = await llm.acompletion(prompt, metadata)
62
+ llm_response = await llm.acompletion(prompt)
77
63
  summarization = llm_response.choices[0].strip()
78
64
  structlogger.debug(
79
65
  "summarization.success", summarization=summarization, prompt=prompt
@@ -46,10 +46,6 @@ from rasa.graph_components.providers.forms_provider import Forms
46
46
  from rasa.graph_components.providers.responses_provider import Responses
47
47
  from rasa.shared.constants import (
48
48
  EMBEDDINGS_CONFIG_KEY,
49
- LANGFUSE_CUSTOM_METADATA_DICT,
50
- LANGFUSE_METADATA_SESSION_ID,
51
- LANGFUSE_METADATA_USER_ID,
52
- LANGFUSE_TAGS,
53
49
  MODEL_CONFIG_KEY,
54
50
  MODEL_GROUP_ID_CONFIG_KEY,
55
51
  MODEL_NAME_CONFIG_KEY,
@@ -549,9 +545,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
549
545
 
550
546
  if self.use_llm:
551
547
  prompt = self._render_prompt(tracker, documents.results)
552
- llm_response = await self._generate_llm_answer(
553
- llm, prompt, tracker.sender_id
554
- )
548
+ llm_response = await self._generate_llm_answer(llm, prompt)
555
549
  llm_response = LLMResponse.ensure_llm_response(llm_response)
556
550
 
557
551
  self._add_prompt_and_llm_response_to_latest_message(
@@ -647,26 +641,19 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
647
641
 
648
642
  @measure_llm_latency
649
643
  async def _generate_llm_answer(
650
- self, llm: LLMClient, prompt: Text, sender_id: str
644
+ self, llm: LLMClient, prompt: Text
651
645
  ) -> Optional[LLMResponse]:
652
646
  """Fetches an LLM completion for the provided prompt.
653
647
 
654
648
  Args:
655
649
  llm: The LLM client used to get the completion.
656
650
  prompt: The prompt text to send to the model.
657
- sender_id: sender_id from the tracker.
658
651
 
659
652
  Returns:
660
653
  An LLMResponse object, or None if the call fails.
661
654
  """
662
- metadata = {
663
- LANGFUSE_METADATA_USER_ID: self.user_id,
664
- LANGFUSE_METADATA_SESSION_ID: sender_id,
665
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
666
- LANGFUSE_TAGS: [self.__class__.__name__],
667
- }
668
655
  try:
669
- return await llm.acompletion(prompt, metadata)
656
+ return await llm.acompletion(prompt)
670
657
  except Exception as e:
671
658
  # unfortunately, langchain does not wrap LLM exceptions which means
672
659
  # we have to catch all exceptions here
@@ -23,7 +23,6 @@ from rasa.core.policies.flows.flow_step_result import (
23
23
  )
24
24
  from rasa.dialogue_understanding.commands import CancelFlowCommand
25
25
  from rasa.dialogue_understanding.patterns.cancel import CancelPatternFlowStackFrame
26
- from rasa.dialogue_understanding.patterns.clarify import ClarifyPatternFlowStackFrame
27
26
  from rasa.dialogue_understanding.patterns.collect_information import (
28
27
  CollectInformationPatternFlowStackFrame,
29
28
  )
@@ -51,7 +50,6 @@ from rasa.dialogue_understanding.stack.frames.flow_stack_frame import (
51
50
  )
52
51
  from rasa.dialogue_understanding.stack.utils import (
53
52
  top_user_flow_frame,
54
- user_flows_on_the_stack,
55
53
  )
56
54
  from rasa.shared.constants import RASA_PATTERN_HUMAN_HANDOFF
57
55
  from rasa.shared.core.constants import (
@@ -280,33 +278,6 @@ def trigger_pattern_continue_interrupted(
280
278
  return events
281
279
 
282
280
 
283
- def trigger_pattern_clarification(
284
- current_frame: DialogueStackFrame, stack: DialogueStack, flows: FlowsList
285
- ) -> None:
286
- """Trigger the pattern to clarify which topic to continue if needed."""
287
- if not isinstance(current_frame, UserFlowStackFrame):
288
- return None
289
-
290
- if current_frame.frame_type in [
291
- FlowStackFrameType.CALL,
292
- FlowStackFrameType.INTERRUPT,
293
- ]:
294
- # we want to return to the flow that called
295
- # the current flow or the flow that was interrupted
296
- # by the current flow
297
- return None
298
-
299
- pending_flows = [
300
- flows.flow_by_id(frame.flow_id)
301
- for frame in stack.frames
302
- if isinstance(frame, UserFlowStackFrame)
303
- and frame.flow_id != current_frame.flow_id
304
- ]
305
-
306
- flow_names = [flow.readable_name() for flow in pending_flows if flow is not None]
307
- stack.push(ClarifyPatternFlowStackFrame(names=flow_names))
308
-
309
-
310
281
  def trigger_pattern_completed(
311
282
  current_frame: DialogueStackFrame, stack: DialogueStack, flows: FlowsList
312
283
  ) -> None:
@@ -675,15 +646,9 @@ def _run_end_step(
675
646
  structlogger.debug("flow.step.run.flow_end")
676
647
  current_frame = stack.pop()
677
648
  trigger_pattern_completed(current_frame, stack, flows)
678
- resumed_events = []
679
- if len(user_flows_on_the_stack(stack)) > 1:
680
- # if there are more user flows on the stack,
681
- # we need to trigger the pattern clarify
682
- trigger_pattern_clarification(current_frame, stack, flows)
683
- else:
684
- resumed_events = trigger_pattern_continue_interrupted(
685
- current_frame, stack, flows, tracker
686
- )
649
+ resumed_events = trigger_pattern_continue_interrupted(
650
+ current_frame, stack, flows, tracker
651
+ )
687
652
  reset_events: List[Event] = reset_scoped_slots(current_frame, flow, tracker)
688
653
  return ContinueFlowWithNextStep(
689
654
  events=initial_events + reset_events + resumed_events, has_flow_ended=True
@@ -30,10 +30,6 @@ from rasa.graph_components.providers.forms_provider import Forms
30
30
  from rasa.graph_components.providers.responses_provider import Responses
31
31
  from rasa.shared.constants import (
32
32
  EMBEDDINGS_CONFIG_KEY,
33
- LANGFUSE_CUSTOM_METADATA_DICT,
34
- LANGFUSE_METADATA_SESSION_ID,
35
- LANGFUSE_METADATA_USER_ID,
36
- LANGFUSE_TAGS,
37
33
  LLM_CONFIG_KEY,
38
34
  MODEL_CONFIG_KEY,
39
35
  MODEL_GROUP_ID_CONFIG_KEY,
@@ -623,7 +619,6 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
623
619
  response_examples: List[str],
624
620
  conversation_samples: List[str],
625
621
  history: str,
626
- sender_id: str,
627
622
  ) -> Optional[str]:
628
623
  """Make the llm call to generate an answer."""
629
624
  llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
@@ -639,19 +634,11 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
639
634
  log_event="intentless_policy.generate_answer.prompt_rendered",
640
635
  prompt=prompt,
641
636
  )
642
- return await self._generate_llm_answer(llm, prompt, sender_id)
637
+ return await self._generate_llm_answer(llm, prompt)
643
638
 
644
- async def _generate_llm_answer(
645
- self, llm: LLMClient, prompt: str, sender_id: str
646
- ) -> Optional[str]:
647
- metadata = {
648
- LANGFUSE_METADATA_USER_ID: self.user_id,
649
- LANGFUSE_METADATA_SESSION_ID: sender_id,
650
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
651
- LANGFUSE_TAGS: [self.__class__.__name__],
652
- }
639
+ async def _generate_llm_answer(self, llm: LLMClient, prompt: str) -> Optional[str]:
653
640
  try:
654
- llm_response = await llm.acompletion(prompt, metadata)
641
+ llm_response = await llm.acompletion(prompt)
655
642
  return llm_response.choices[0]
656
643
  except Exception as e:
657
644
  # unfortunately, langchain does not wrap LLM exceptions which means
@@ -727,7 +714,7 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
727
714
  final_response_examples.append(resp)
728
715
 
729
716
  llm_response = await self.generate_answer(
730
- final_response_examples, conversation_samples, history, tracker.sender_id
717
+ final_response_examples, conversation_samples, history
731
718
  )
732
719
  if not llm_response:
733
720
  structlogger.debug("intentless_policy.prediction.skip_llm_fail")
@@ -39,7 +39,6 @@ from rasa.shared.core.generator import TrackerWithCachedStates
39
39
  from rasa.shared.core.trackers import DialogueStateTracker
40
40
  from rasa.shared.exceptions import FileIOException, RasaException
41
41
  from rasa.shared.nlu.constants import ACTION_NAME, ACTION_TEXT, ENTITIES, INTENT, TEXT
42
- from rasa.utils.licensing import get_human_readable_licence_owner
43
42
 
44
43
  if TYPE_CHECKING:
45
44
  from rasa.core.featurizers.tracker_featurizers import (
@@ -173,7 +172,6 @@ class Policy(GraphComponent):
173
172
 
174
173
  self._model_storage = model_storage
175
174
  self._resource = resource
176
- self.user_id = get_human_readable_licence_owner()
177
175
 
178
176
  @classmethod
179
177
  def create(
rasa/core/processor.py CHANGED
@@ -76,6 +76,7 @@ from rasa.shared.core.constants import (
76
76
  SLOT_SILENCE_TIMEOUT,
77
77
  USER_INTENT_RESTART,
78
78
  USER_INTENT_SILENCE_TIMEOUT,
79
+ SetSlotExtractor,
79
80
  )
80
81
  from rasa.shared.core.events import (
81
82
  ActionExecuted,
@@ -766,13 +767,26 @@ class MessageProcessor:
766
767
  if self.http_interpreter:
767
768
  parse_data = await self.http_interpreter.parse(message)
768
769
  else:
769
- regex_reader = create_regex_pattern_reader(message, self.domain)
770
-
771
770
  processed_message = Message({TEXT: message.text})
772
- if regex_reader:
773
- processed_message = regex_reader.unpack_regex_message(
774
- message=processed_message, domain=self.domain
771
+
772
+ all_flows = await self.get_flows()
773
+ should_force_slot_command, slot_name = (
774
+ rasa.core.utils.should_force_slot_filling(tracker, all_flows)
775
+ )
776
+
777
+ if should_force_slot_command:
778
+ command = SetSlotCommand(
779
+ name=slot_name,
780
+ value=message.text,
781
+ extractor=SetSlotExtractor.COMMAND_PAYLOAD_READER.value,
775
782
  )
783
+ processed_message.set(COMMANDS, [command.as_dict()], add_to_output=True)
784
+ else:
785
+ regex_reader = create_regex_pattern_reader(message, self.domain)
786
+ if regex_reader:
787
+ processed_message = regex_reader.unpack_regex_message(
788
+ message=processed_message, domain=self.domain
789
+ )
776
790
 
777
791
  # Invalid use of slash syntax, sanitize the message before passing
778
792
  # it to the graph
rasa/core/utils.py CHANGED
@@ -19,6 +19,7 @@ from rasa.core.constants import (
19
19
  )
20
20
  from rasa.core.lock_store import InMemoryLockStore, LockStore, RedisLockStore
21
21
  from rasa.shared.constants import DEFAULT_ENDPOINTS_PATH, TCP_PROTOCOL
22
+ from rasa.shared.core.constants import SlotMappingType
22
23
  from rasa.shared.core.trackers import DialogueStateTracker
23
24
  from rasa.utils.endpoints import (
24
25
  EndpointConfig,
@@ -30,6 +31,7 @@ from rasa.utils.io import write_yaml
30
31
  if TYPE_CHECKING:
31
32
  from rasa.core.nlg import NaturalLanguageGenerator
32
33
  from rasa.shared.core.domain import Domain
34
+ from rasa.shared.core.flows.flows_list import FlowsList
33
35
 
34
36
  structlogger = structlog.get_logger()
35
37
 
@@ -364,3 +366,54 @@ def add_bot_utterance_metadata(
364
366
  ]
365
367
 
366
368
  return message
369
+
370
+
371
+ def should_force_slot_filling(
372
+ tracker: Optional[DialogueStateTracker], flows: "FlowsList"
373
+ ) -> Tuple[bool, Optional[str]]:
374
+ """Check if the flow should force slot filling.
375
+
376
+ This is only valid when the flow is at a collect information step which
377
+ has set `force_slot_filling` to true and the slot has a valid `from_text` mapping.
378
+
379
+ Args:
380
+ tracker: The dialogue state tracker.
381
+ flows: The list of flows.
382
+
383
+ Returns:
384
+ A tuple of a boolean indicating if the flow should force slot filling
385
+ and the name of the slot if applicable.
386
+ """
387
+ from rasa.dialogue_understanding.processor.command_processor import (
388
+ get_current_collect_step,
389
+ )
390
+
391
+ if tracker is None:
392
+ structlogger.error(
393
+ "slot.force_slot_filling.error",
394
+ event_info="Tracker is None. Cannot force slot filling.",
395
+ )
396
+ return False, None
397
+
398
+ stack = tracker.stack
399
+ step = get_current_collect_step(stack, flows)
400
+ if step is None or not step.force_slot_filling:
401
+ return False, None
402
+
403
+ slot_name = step.collect
404
+ slot = tracker.slots.get(slot_name)
405
+
406
+ if not slot:
407
+ structlogger.debug(
408
+ "slot.force_slot_filling.error",
409
+ event_info=f"Slot '{slot_name}' not found in tracker. "
410
+ f"Cannot force slot filling. "
411
+ f"Please check if the slot is defined in the domain.",
412
+ )
413
+ return False, None
414
+
415
+ for slot_mapping in slot.mappings:
416
+ if slot_mapping.type == SlotMappingType.FROM_TEXT:
417
+ return True, slot_name
418
+
419
+ return False, None
@@ -23,10 +23,6 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
23
23
  from rasa.engine.storage.resource import Resource
24
24
  from rasa.engine.storage.storage import ModelStorage
25
25
  from rasa.shared.constants import (
26
- LANGFUSE_CUSTOM_METADATA_DICT,
27
- LANGFUSE_METADATA_SESSION_ID,
28
- LANGFUSE_METADATA_USER_ID,
29
- LANGFUSE_TAGS,
30
26
  MODEL_CONFIG_KEY,
31
27
  OPENAI_PROVIDER,
32
28
  PROMPT_CONFIG_KEY,
@@ -47,7 +43,6 @@ from rasa.shared.utils.llm import (
47
43
  llm_factory,
48
44
  resolve_model_client_config,
49
45
  )
50
- from rasa.utils.licensing import get_human_readable_licence_owner
51
46
  from rasa.utils.log_utils import log_llm
52
47
 
53
48
  LLM_BASED_ROUTER_PROMPT_FILE_NAME = "llm_based_router_prompt.jinja2"
@@ -118,7 +113,6 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
118
113
  self._model_storage = model_storage
119
114
  self._resource = resource
120
115
  self.validate_config()
121
- self.user_id = get_human_readable_licence_owner()
122
116
 
123
117
  def validate_config(self) -> None:
124
118
  """Validate the config of the router."""
@@ -166,6 +160,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
166
160
  **kwargs: Any,
167
161
  ) -> "LLMBasedRouter":
168
162
  """Loads trained component (see parent class for full docstring)."""
163
+
169
164
  # Perform health check on the resolved LLM client config
170
165
  llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
171
166
  cls.perform_llm_health_check(
@@ -237,7 +232,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
237
232
  prompt=prompt,
238
233
  )
239
234
  # generating answer
240
- answer = await self._generate_answer_using_llm(prompt, tracker.sender_id)
235
+ answer = await self._generate_answer_using_llm(prompt)
241
236
  log_llm(
242
237
  logger=structlogger,
243
238
  log_module="LLMBasedRouter",
@@ -297,9 +292,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
297
292
 
298
293
  return Template(self.prompt_template).render(**inputs)
299
294
 
300
- async def _generate_answer_using_llm(
301
- self, prompt: str, sender_id: str
302
- ) -> Optional[str]:
295
+ async def _generate_answer_using_llm(self, prompt: str) -> Optional[str]:
303
296
  """Use LLM to generate a response.
304
297
 
305
298
  Args:
@@ -310,15 +303,8 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
310
303
  """
311
304
  llm = llm_factory(self.config.get(LLM_CONFIG_KEY), DEFAULT_LLM_CONFIG)
312
305
 
313
- metadata = {
314
- LANGFUSE_METADATA_USER_ID: self.user_id,
315
- LANGFUSE_METADATA_SESSION_ID: sender_id,
316
- LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
317
- LANGFUSE_TAGS: [self.__class__.__name__],
318
- }
319
-
320
306
  try:
321
- llm_response = await llm.acompletion(prompt, metadata)
307
+ llm_response = await llm.acompletion(prompt)
322
308
  return llm_response.choices[0]
323
309
  except Exception as e:
324
310
  # unfortunately, langchain does not wrap LLM exceptions which means