rasa-pro 3.12.0.dev12__py3-none-any.whl → 3.12.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (153) hide show
  1. rasa/anonymization/anonymization_rule_executor.py +16 -10
  2. rasa/cli/data.py +16 -0
  3. rasa/cli/inspect.py +20 -1
  4. rasa/cli/project_templates/calm/config.yml +2 -2
  5. rasa/cli/project_templates/calm/endpoints.yml +2 -2
  6. rasa/cli/shell.py +3 -3
  7. rasa/cli/utils.py +12 -0
  8. rasa/core/actions/action.py +99 -193
  9. rasa/core/actions/action_handle_digressions.py +142 -0
  10. rasa/core/actions/action_run_slot_rejections.py +16 -4
  11. rasa/core/actions/forms.py +10 -5
  12. rasa/core/channels/__init__.py +4 -0
  13. rasa/core/channels/studio_chat.py +19 -0
  14. rasa/core/channels/telegram.py +42 -24
  15. rasa/core/channels/voice_ready/audiocodes.py +42 -23
  16. rasa/core/channels/voice_ready/utils.py +1 -1
  17. rasa/core/channels/voice_stream/asr/asr_engine.py +10 -4
  18. rasa/core/channels/voice_stream/asr/azure.py +14 -1
  19. rasa/core/channels/voice_stream/asr/deepgram.py +20 -4
  20. rasa/core/channels/voice_stream/audiocodes.py +264 -0
  21. rasa/core/channels/voice_stream/browser_audio.py +5 -1
  22. rasa/core/channels/voice_stream/call_state.py +10 -1
  23. rasa/core/channels/voice_stream/genesys.py +335 -0
  24. rasa/core/channels/voice_stream/tts/azure.py +11 -2
  25. rasa/core/channels/voice_stream/tts/cartesia.py +29 -10
  26. rasa/core/channels/voice_stream/twilio_media_streams.py +2 -1
  27. rasa/core/channels/voice_stream/voice_channel.py +25 -3
  28. rasa/core/constants.py +2 -0
  29. rasa/core/migrate.py +2 -2
  30. rasa/core/nlg/contextual_response_rephraser.py +18 -1
  31. rasa/core/nlg/generator.py +83 -15
  32. rasa/core/nlg/response.py +6 -3
  33. rasa/core/nlg/translate.py +55 -0
  34. rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2 +1 -1
  35. rasa/core/policies/flows/flow_executor.py +47 -46
  36. rasa/core/processor.py +72 -9
  37. rasa/core/run.py +4 -3
  38. rasa/dialogue_understanding/commands/can_not_handle_command.py +20 -2
  39. rasa/dialogue_understanding/commands/cancel_flow_command.py +80 -4
  40. rasa/dialogue_understanding/commands/change_flow_command.py +20 -2
  41. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +20 -2
  42. rasa/dialogue_understanding/commands/clarify_command.py +29 -3
  43. rasa/dialogue_understanding/commands/command.py +1 -16
  44. rasa/dialogue_understanding/commands/command_syntax_manager.py +55 -0
  45. rasa/dialogue_understanding/commands/correct_slots_command.py +11 -2
  46. rasa/dialogue_understanding/commands/handle_digressions_command.py +150 -0
  47. rasa/dialogue_understanding/commands/human_handoff_command.py +20 -2
  48. rasa/dialogue_understanding/commands/knowledge_answer_command.py +20 -2
  49. rasa/dialogue_understanding/commands/prompt_command.py +94 -0
  50. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +20 -2
  51. rasa/dialogue_understanding/commands/set_slot_command.py +29 -15
  52. rasa/dialogue_understanding/commands/skip_question_command.py +20 -2
  53. rasa/dialogue_understanding/commands/start_flow_command.py +61 -2
  54. rasa/dialogue_understanding/commands/utils.py +98 -4
  55. rasa/dialogue_understanding/constants.py +1 -0
  56. rasa/dialogue_understanding/generator/__init__.py +2 -0
  57. rasa/dialogue_understanding/generator/command_generator.py +110 -73
  58. rasa/dialogue_understanding/generator/command_parser.py +16 -13
  59. rasa/dialogue_understanding/generator/constants.py +3 -0
  60. rasa/dialogue_understanding/generator/llm_based_command_generator.py +170 -5
  61. rasa/dialogue_understanding/generator/llm_command_generator.py +5 -3
  62. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +26 -4
  63. rasa/dialogue_understanding/generator/nlu_command_adapter.py +44 -3
  64. rasa/dialogue_understanding/generator/prompt_templates/__init__.py +0 -0
  65. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2 +60 -0
  66. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +77 -0
  67. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +68 -0
  68. rasa/dialogue_understanding/generator/{single_step/command_prompt_template.jinja2 → prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2} +1 -1
  69. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +460 -0
  70. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +12 -318
  71. rasa/dialogue_understanding/generator/utils.py +32 -1
  72. rasa/dialogue_understanding/patterns/collect_information.py +1 -1
  73. rasa/dialogue_understanding/patterns/correction.py +13 -1
  74. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +78 -2
  75. rasa/dialogue_understanding/patterns/handle_digressions.py +81 -0
  76. rasa/dialogue_understanding/patterns/validate_slot.py +65 -0
  77. rasa/dialogue_understanding/processor/command_processor.py +154 -28
  78. rasa/dialogue_understanding/utils.py +31 -0
  79. rasa/dialogue_understanding_test/README.md +50 -0
  80. rasa/dialogue_understanding_test/du_test_case.py +28 -8
  81. rasa/dialogue_understanding_test/du_test_result.py +13 -9
  82. rasa/dialogue_understanding_test/io.py +14 -0
  83. rasa/dialogue_understanding_test/test_case_simulation/test_case_tracker_simulator.py +3 -3
  84. rasa/e2e_test/utils/io.py +0 -37
  85. rasa/engine/graph.py +1 -0
  86. rasa/engine/language.py +140 -0
  87. rasa/engine/recipes/config_files/default_config.yml +4 -0
  88. rasa/engine/recipes/default_recipe.py +2 -0
  89. rasa/engine/recipes/graph_recipe.py +2 -0
  90. rasa/engine/storage/local_model_storage.py +1 -0
  91. rasa/engine/storage/storage.py +4 -1
  92. rasa/model_manager/runner_service.py +7 -4
  93. rasa/model_manager/socket_bridge.py +7 -6
  94. rasa/model_manager/warm_rasa_process.py +0 -1
  95. rasa/model_training.py +24 -27
  96. rasa/shared/constants.py +15 -13
  97. rasa/shared/core/constants.py +30 -3
  98. rasa/shared/core/domain.py +13 -20
  99. rasa/shared/core/events.py +13 -2
  100. rasa/shared/core/flows/constants.py +11 -0
  101. rasa/shared/core/flows/flow.py +100 -19
  102. rasa/shared/core/flows/flows_yaml_schema.json +69 -3
  103. rasa/shared/core/flows/steps/collect.py +19 -37
  104. rasa/shared/core/flows/utils.py +43 -4
  105. rasa/shared/core/flows/validation.py +1 -1
  106. rasa/shared/core/slot_mappings.py +350 -111
  107. rasa/shared/core/slots.py +154 -3
  108. rasa/shared/core/trackers.py +77 -2
  109. rasa/shared/importers/importer.py +50 -2
  110. rasa/shared/nlu/constants.py +1 -0
  111. rasa/shared/nlu/training_data/schemas/responses.yml +19 -12
  112. rasa/shared/providers/_configs/azure_entra_id_config.py +541 -0
  113. rasa/shared/providers/_configs/azure_openai_client_config.py +138 -3
  114. rasa/shared/providers/_configs/client_config.py +3 -1
  115. rasa/shared/providers/_configs/default_litellm_client_config.py +3 -1
  116. rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py +3 -1
  117. rasa/shared/providers/_configs/litellm_router_client_config.py +3 -1
  118. rasa/shared/providers/_configs/model_group_config.py +4 -2
  119. rasa/shared/providers/_configs/oauth_config.py +33 -0
  120. rasa/shared/providers/_configs/openai_client_config.py +3 -1
  121. rasa/shared/providers/_configs/rasa_llm_client_config.py +3 -1
  122. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +3 -1
  123. rasa/shared/providers/constants.py +6 -0
  124. rasa/shared/providers/embedding/azure_openai_embedding_client.py +28 -3
  125. rasa/shared/providers/embedding/litellm_router_embedding_client.py +3 -1
  126. rasa/shared/providers/llm/_base_litellm_client.py +42 -17
  127. rasa/shared/providers/llm/azure_openai_llm_client.py +81 -25
  128. rasa/shared/providers/llm/default_litellm_llm_client.py +3 -1
  129. rasa/shared/providers/llm/litellm_router_llm_client.py +29 -8
  130. rasa/shared/providers/llm/llm_client.py +23 -7
  131. rasa/shared/providers/llm/openai_llm_client.py +9 -3
  132. rasa/shared/providers/llm/rasa_llm_client.py +11 -2
  133. rasa/shared/providers/llm/self_hosted_llm_client.py +30 -11
  134. rasa/shared/providers/router/_base_litellm_router_client.py +3 -1
  135. rasa/shared/providers/router/router_client.py +3 -1
  136. rasa/shared/utils/constants.py +3 -0
  137. rasa/shared/utils/llm.py +31 -8
  138. rasa/shared/utils/pykwalify_extensions.py +24 -0
  139. rasa/shared/utils/schemas/domain.yml +26 -1
  140. rasa/telemetry.py +45 -14
  141. rasa/tracing/config.py +2 -0
  142. rasa/tracing/constants.py +12 -0
  143. rasa/tracing/instrumentation/instrumentation.py +36 -0
  144. rasa/tracing/instrumentation/metrics.py +41 -0
  145. rasa/tracing/metric_instrument_provider.py +40 -0
  146. rasa/utils/common.py +0 -1
  147. rasa/validator.py +561 -89
  148. rasa/version.py +1 -1
  149. {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/METADATA +2 -1
  150. {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/RECORD +153 -134
  151. {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/NOTICE +0 -0
  152. {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/WHEEL +0 -0
  153. {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/entry_points.txt +0 -0
@@ -21,6 +21,7 @@ structlogger = structlog.get_logger()
21
21
  @dataclass
22
22
  class AzureTTSConfig(TTSEngineConfig):
23
23
  speech_region: Optional[str] = None
24
+ endpoint: Optional[str] = None
24
25
 
25
26
 
26
27
  class AzureTTS(TTSEngine[AzureTTSConfig]):
@@ -76,12 +77,19 @@ class AzureTTS(TTSEngine[AzureTTSConfig]):
76
77
 
77
78
  @staticmethod
78
79
  def get_tts_endpoint(config: AzureTTSConfig) -> str:
79
- return f"https://{config.speech_region}.tts.speech.microsoft.com/cognitiveservices/v1"
80
+ if config.endpoint is not None:
81
+ return config.endpoint
82
+ else:
83
+ return (
84
+ f"https://{config.speech_region}.tts.speech.microsoft.com/"
85
+ f"cognitiveservices/v1"
86
+ )
80
87
 
81
88
  @staticmethod
82
89
  def create_request_body(text: str, conf: AzureTTSConfig) -> str:
83
90
  return f"""
84
- <speak version='1.0' xml:lang='{conf.language}'>
91
+ <speak version='1.0' xml:lang='{conf.language}' xmlns:mstts='http://www.w3.org/2001/mstts'
92
+ xmlns='http://www.w3.org/2001/10/synthesis'>
85
93
  <voice xml:lang='{conf.language}' name='{conf.voice}'>
86
94
  {text}
87
95
  </voice>
@@ -98,6 +106,7 @@ class AzureTTS(TTSEngine[AzureTTSConfig]):
98
106
  voice="en-US-JennyNeural",
99
107
  timeout=10,
100
108
  speech_region="eastus",
109
+ endpoint=None,
101
110
  )
102
111
 
103
112
  @classmethod
@@ -1,3 +1,5 @@
1
+ import base64
2
+ import json
1
3
  import os
2
4
  from dataclasses import dataclass
3
5
  from typing import AsyncIterator, Dict, Optional
@@ -22,6 +24,7 @@ structlogger = structlog.get_logger()
22
24
  class CartesiaTTSConfig(TTSEngineConfig):
23
25
  model_id: Optional[str] = None
24
26
  version: Optional[str] = None
27
+ endpoint: Optional[str] = None
25
28
 
26
29
 
27
30
  class CartesiaTTS(TTSEngine[CartesiaTTSConfig]):
@@ -36,11 +39,6 @@ class CartesiaTTS(TTSEngine[CartesiaTTSConfig]):
36
39
  if self.__class__.session is None or self.__class__.session.closed:
37
40
  self.__class__.session = aiohttp.ClientSession(timeout=timeout)
38
41
 
39
- @staticmethod
40
- def get_tts_endpoint() -> str:
41
- """Create the endpoint string for cartesia."""
42
- return "https://api.cartesia.ai/tts/bytes"
43
-
44
42
  @staticmethod
45
43
  def get_request_body(text: str, config: CartesiaTTSConfig) -> Dict:
46
44
  """Create the request body for cartesia."""
@@ -77,7 +75,7 @@ class CartesiaTTS(TTSEngine[CartesiaTTSConfig]):
77
75
  config = self.config.merge(config)
78
76
  payload = self.get_request_body(text, config)
79
77
  headers = self.get_request_headers(config)
80
- url = self.get_tts_endpoint()
78
+ url = self.config.endpoint
81
79
  if self.session is None:
82
80
  raise ConnectionException("Client session is not initialized")
83
81
  try:
@@ -85,16 +83,36 @@ class CartesiaTTS(TTSEngine[CartesiaTTSConfig]):
85
83
  url, headers=headers, json=payload, chunked=True
86
84
  ) as response:
87
85
  if 200 <= response.status < 300:
88
- async for data in response.content.iter_chunked(1024):
89
- yield self.engine_bytes_to_rasa_audio_bytes(data)
86
+ async for chunk in response.content:
87
+ # we are looking for chunks in the response that look like
88
+ # b"data: {..., data: <base64 encoded audio bytes> ...}"
89
+ # and extract the audio bytes from that
90
+ if chunk.startswith(b"data: "):
91
+ json_bytes = chunk[5:-1]
92
+ json_data = json.loads(json_bytes.decode())
93
+ if "data" in json_data:
94
+ base64_encoded_bytes = json_data["data"]
95
+ channel_bytes = base64.b64decode(base64_encoded_bytes)
96
+ yield self.engine_bytes_to_rasa_audio_bytes(
97
+ channel_bytes
98
+ )
90
99
  return
100
+ elif response.status == 401:
101
+ structlogger.error(
102
+ "cartesia.synthesize.rest.unauthorized",
103
+ status_code=response.status,
104
+ )
105
+ raise TTSError(
106
+ "Unauthorized. Please make sure you have the correct API key."
107
+ )
91
108
  else:
109
+ response_text = await response.text()
92
110
  structlogger.error(
93
111
  "cartesia.synthesize.rest.failed",
94
112
  status_code=response.status,
95
- msg=response.text(),
113
+ msg=response_text,
96
114
  )
97
- raise TTSError(f"TTS failed: {response.text()}")
115
+ raise TTSError(f"TTS failed: {response_text}")
98
116
  except ClientConnectorError as e:
99
117
  raise TTSError(e)
100
118
  except TimeoutError as e:
@@ -112,6 +130,7 @@ class CartesiaTTS(TTSEngine[CartesiaTTSConfig]):
112
130
  timeout=10,
113
131
  model_id="sonic-english",
114
132
  version="2024-06-10",
133
+ endpoint="https://api.cartesia.ai/tts/sse",
115
134
  )
116
135
 
117
136
  @classmethod
@@ -98,6 +98,7 @@ class TwilioMediaStreamsInputChannel(VoiceInputChannel):
98
98
  def map_input_message(
99
99
  self,
100
100
  message: Any,
101
+ ws: Websocket,
101
102
  ) -> VoiceChannelAction:
102
103
  data = json.loads(message)
103
104
  if data["event"] == "media":
@@ -142,7 +143,7 @@ class TwilioMediaStreamsInputChannel(VoiceInputChannel):
142
143
  def blueprint(
143
144
  self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
144
145
  ) -> Blueprint:
145
- """Defines a Sanic bluelogger.debug."""
146
+ """Defines a Sanic blueprint for the voice input channel."""
146
147
  blueprint = Blueprint("twilio_media_streams", __name__)
147
148
 
148
149
  @blueprint.route("/", methods=["GET"])
@@ -148,6 +148,19 @@ class VoiceOutputChannel(OutputChannel):
148
148
  await self.voice_websocket.send(marker_message)
149
149
  self.latest_message_id = mark_id
150
150
 
151
+ async def send_start_marker(self, recipient_id: str) -> None:
152
+ """Send a marker message before the first audio chunk."""
153
+ # Default implementation uses the generic marker message
154
+ await self.send_marker_message(recipient_id)
155
+
156
+ async def send_intermediate_marker(self, recipient_id: str) -> None:
157
+ """Send a marker message during audio streaming."""
158
+ await self.send_marker_message(recipient_id)
159
+
160
+ async def send_end_marker(self, recipient_id: str) -> None:
161
+ """Send a marker message after the last audio chunk."""
162
+ await self.send_marker_message(recipient_id)
163
+
151
164
  def update_silence_timeout(self) -> None:
152
165
  """Updates the silence timeout for the session."""
153
166
  if self.tracker_state:
@@ -173,6 +186,13 @@ class VoiceOutputChannel(OutputChannel):
173
186
  cached_audio_bytes = self.tts_cache.get(text)
174
187
  collected_audio_bytes = RasaAudioBytes(b"")
175
188
  seconds_marker = -1
189
+
190
+ # Send start marker before first chunk
191
+ try:
192
+ await self.send_start_marker(recipient_id)
193
+ except (WebsocketClosed, ServerError):
194
+ call_state.connection_failed = True # type: ignore[attr-defined]
195
+
176
196
  if cached_audio_bytes:
177
197
  audio_stream = self.chunk_audio(cached_audio_bytes)
178
198
  else:
@@ -189,15 +209,16 @@ class VoiceOutputChannel(OutputChannel):
189
209
  await self.send_audio_bytes(recipient_id, audio_bytes)
190
210
  full_seconds_of_audio = len(collected_audio_bytes) // HERTZ
191
211
  if full_seconds_of_audio > seconds_marker:
192
- await self.send_marker_message(recipient_id)
212
+ await self.send_intermediate_marker(recipient_id)
193
213
  seconds_marker = full_seconds_of_audio
194
214
 
195
215
  except (WebsocketClosed, ServerError):
196
216
  # ignore sending error, and keep collecting and caching audio bytes
197
217
  call_state.connection_failed = True # type: ignore[attr-defined]
198
218
  collected_audio_bytes = RasaAudioBytes(collected_audio_bytes + audio_bytes)
219
+
199
220
  try:
200
- await self.send_marker_message(recipient_id)
221
+ await self.send_end_marker(recipient_id)
201
222
  except (WebsocketClosed, ServerError):
202
223
  # ignore sending error
203
224
  pass
@@ -315,6 +336,7 @@ class VoiceInputChannel(InputChannel):
315
336
  def map_input_message(
316
337
  self,
317
338
  message: Any,
339
+ ws: Websocket,
318
340
  ) -> VoiceChannelAction:
319
341
  """Map a channel input message to a voice channel action."""
320
342
  raise NotImplementedError
@@ -340,7 +362,7 @@ class VoiceInputChannel(InputChannel):
340
362
  async def consume_audio_bytes() -> None:
341
363
  async for message in channel_websocket:
342
364
  is_bot_speaking_before = call_state.is_bot_speaking
343
- channel_action = self.map_input_message(message)
365
+ channel_action = self.map_input_message(message, channel_websocket)
344
366
  is_bot_speaking_after = call_state.is_bot_speaking
345
367
 
346
368
  if not is_bot_speaking_before and is_bot_speaking_after:
rasa/core/constants.py CHANGED
@@ -110,3 +110,5 @@ UTTER_SOURCE_METADATA_KEY = "utter_source"
110
110
  DOMAIN_GROUND_TRUTH_METADATA_KEY = "domain_ground_truth"
111
111
  ACTIVE_FLOW_METADATA_KEY = "active_flow"
112
112
  STEP_ID_METADATA_KEY = "step_id"
113
+ KEY_IS_CALM_SYSTEM = "is_calm_system"
114
+ KEY_IS_COEXISTENCE_ASSISTANT = "is_coexistence_assistant"
rasa/core/migrate.py CHANGED
@@ -14,7 +14,7 @@ from rasa.shared.constants import (
14
14
  )
15
15
  from rasa.shared.core.constants import (
16
16
  ACTIVE_LOOP,
17
- MAPPING_TYPE,
17
+ KEY_MAPPING_TYPE,
18
18
  REQUESTED_SLOT,
19
19
  SLOT_MAPPINGS,
20
20
  SlotMappingType,
@@ -43,7 +43,7 @@ def _create_back_up(domain_file: Path, backup_location: Path) -> Dict[Text, Any]
43
43
  def _get_updated_mapping_condition(
44
44
  condition: Dict[Text, Text], mapping: Dict[Text, Any], slot_name: Text
45
45
  ) -> Dict[Text, Text]:
46
- if mapping.get(MAPPING_TYPE) not in [
46
+ if mapping.get(KEY_MAPPING_TYPE) not in [
47
47
  str(SlotMappingType.FROM_ENTITY),
48
48
  str(SlotMappingType.FROM_TRIGGER_INTENT),
49
49
  ]:
@@ -64,7 +64,7 @@ DEFAULT_LLM_CONFIG = {
64
64
  DEFAULT_RESPONSE_VARIATION_PROMPT_TEMPLATE = """The following is a conversation with
65
65
  an AI assistant. The assistant is helpful, creative, clever, and very friendly.
66
66
  Rephrase the suggested AI response staying close to the original message and retaining
67
- its meaning. Use simple english.
67
+ its meaning. Use simple {{language}}.
68
68
 
69
69
  Context / previous conversation with the user:
70
70
  {{history}}
@@ -164,6 +164,22 @@ class ContextualResponseRephraser(
164
164
  response[PROMPTS] = prompts
165
165
  return response
166
166
 
167
+ @staticmethod
168
+ def get_language_label(tracker: DialogueStateTracker) -> str:
169
+ """Fetches the label of the language to be used for the rephraser.
170
+
171
+ Args:
172
+ tracker: The tracker to get the language from.
173
+
174
+ Returns:
175
+ The label of the current language, or "English" if no language is set.
176
+ """
177
+ return (
178
+ tracker.current_language.label
179
+ if tracker.current_language
180
+ else tracker.default_language.label
181
+ )
182
+
167
183
  def _last_message_if_human(self, tracker: DialogueStateTracker) -> Optional[str]:
168
184
  """Returns the latest message from the tracker.
169
185
 
@@ -281,6 +297,7 @@ class ContextualResponseRephraser(
281
297
  suggested_response=response_text,
282
298
  current_input=current_input,
283
299
  slots=tracker.current_slot_values(),
300
+ language=self.get_language_label(tracker),
284
301
  )
285
302
  log_llm(
286
303
  logger=structlogger,
@@ -1,6 +1,9 @@
1
- import logging
2
1
  from typing import Any, Dict, List, Optional, Text, Union
3
2
 
3
+ import structlog
4
+ from jinja2 import Template
5
+ from pypred import Predicate
6
+
4
7
  import rasa.shared.utils.common
5
8
  import rasa.shared.utils.io
6
9
  from rasa.shared.constants import CHANNEL, RESPONSE_CONDITION
@@ -8,7 +11,7 @@ from rasa.shared.core.domain import Domain
8
11
  from rasa.shared.core.trackers import DialogueStateTracker
9
12
  from rasa.utils.endpoints import EndpointConfig
10
13
 
11
- logger = logging.getLogger(__name__)
14
+ structlogger = structlog.get_logger()
12
15
 
13
16
 
14
17
  class NaturalLanguageGenerator:
@@ -74,7 +77,11 @@ def _create_from_endpoint_config(
74
77
  else:
75
78
  nlg = _load_from_module_name_in_endpoint_config(endpoint_config, domain)
76
79
 
77
- logger.debug(f"Instantiated NLG to '{nlg.__class__.__name__}'.")
80
+ structlogger.debug(
81
+ "rasa.core.nlg.generator.create",
82
+ nlg_class_name=nlg.__class__.__name__,
83
+ event_info=f"Instantiated NLG to '{nlg.__class__.__name__}'.",
84
+ )
78
85
  return nlg
79
86
 
80
87
 
@@ -112,18 +119,15 @@ class ResponseVariationFilter:
112
119
  ) -> bool:
113
120
  """Checks if the conditional response variation matches the filled slots."""
114
121
  constraints = response.get(RESPONSE_CONDITION, [])
115
- for constraint in constraints:
116
- name = constraint["name"]
117
- value = constraint["value"]
118
- filled_slots_value = filled_slots.get(name)
119
- if isinstance(filled_slots_value, str) and isinstance(value, str):
120
- if filled_slots_value.casefold() != value.casefold():
122
+ if isinstance(constraints, str) and not _evaluate_predicate(
123
+ constraints, filled_slots
124
+ ):
125
+ return False
126
+
127
+ elif isinstance(constraints, list):
128
+ for constraint in constraints:
129
+ if not _evaluate_and_deprecate_condition(constraint, filled_slots):
121
130
  return False
122
- # slot values can be of different data types
123
- # such as int, float, bool, etc. hence, this check
124
- # executes when slot values are not strings
125
- elif filled_slots_value != value:
126
- return False
127
131
 
128
132
  return True
129
133
 
@@ -180,7 +184,21 @@ class ResponseVariationFilter:
180
184
  if conditional_no_channel:
181
185
  return conditional_no_channel
182
186
 
183
- return default_no_channel
187
+ if default_no_channel:
188
+ return default_no_channel
189
+
190
+ # if there is no response variation selected,
191
+ # return the internal error response to prevent
192
+ # the bot from staying silent
193
+ structlogger.error(
194
+ "rasa.core.nlg.generator.responses_for_utter_action.no_response",
195
+ utter_action=utter_action,
196
+ event_info=f"No response variation selected for the predicted "
197
+ f"utterance {utter_action}. Please check you have provided "
198
+ f"a default variation and that all the conditions are valid. "
199
+ f"Returning the internal error response.",
200
+ )
201
+ return self.responses.get("utter_internal_error_rasa", [])
184
202
 
185
203
  def get_response_variation_id(
186
204
  self,
@@ -228,3 +246,53 @@ class ResponseVariationFilter:
228
246
  response_ids.add(response_variation_id)
229
247
 
230
248
  return True
249
+
250
+
251
+ def _evaluate_and_deprecate_condition(
252
+ constraint: Dict[Text, Any], filled_slots: Dict[Text, Any]
253
+ ) -> bool:
254
+ """Evaluates the condition of a response variation."""
255
+ rasa.shared.utils.io.raise_deprecation_warning(
256
+ "Using a dictionary as a condition in a response variation is deprecated. "
257
+ "Please use a pypred string predicate instead. "
258
+ "Dictionary conditions will be removed in Rasa Open Source 4.0.0 .",
259
+ warn_until_version="4.0.0",
260
+ )
261
+
262
+ name = constraint["name"]
263
+ value = constraint["value"]
264
+ filled_slots_value = filled_slots.get(name)
265
+ if isinstance(filled_slots_value, str) and isinstance(value, str):
266
+ if filled_slots_value.casefold() != value.casefold():
267
+ return False
268
+ # slot values can be of different data types
269
+ # such as int, float, bool, etc. hence, this check
270
+ # executes when slot values are not strings
271
+ elif filled_slots_value != value:
272
+ return False
273
+
274
+ return True
275
+
276
+
277
+ def _evaluate_predicate(constraint: str, filled_slots: Dict[Text, Any]) -> bool:
278
+ """Evaluates the condition of a response variation."""
279
+ context = {"slots": filled_slots}
280
+ document = context.copy()
281
+ try:
282
+ rendered_template = Template(constraint).render(context)
283
+ predicate = Predicate(rendered_template)
284
+ result = predicate.evaluate(document)
285
+ structlogger.debug(
286
+ "rasa.core.nlg.generator.evaluate_conditional_response_predicate",
287
+ predicate=predicate.description(),
288
+ result=result,
289
+ )
290
+ return result
291
+ except (TypeError, Exception) as e:
292
+ structlogger.error(
293
+ "rasa.core.nlg.generator.evaluate_conditional_response_predicate.error",
294
+ predicate=constraint,
295
+ document=document,
296
+ error=str(e),
297
+ )
298
+ return False
rasa/core/nlg/response.py CHANGED
@@ -49,9 +49,12 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
49
49
  selected_response = np.random.choice(suitable_responses)
50
50
  condition = selected_response.get(RESPONSE_CONDITION)
51
51
  if condition:
52
- formatted_response_conditions = self._format_response_conditions(
53
- condition
54
- )
52
+ if isinstance(condition, list):
53
+ formatted_response_conditions = (
54
+ self._format_response_conditions(condition)
55
+ )
56
+ else:
57
+ formatted_response_conditions = condition
55
58
  logger.debug(
56
59
  "Selecting response variation with conditions:"
57
60
  f"{formatted_response_conditions}"
@@ -0,0 +1,55 @@
1
+ from typing import Any, Dict, List, Optional, Text
2
+
3
+ from rasa.engine.language import Language
4
+ from rasa.shared.core.flows.constants import KEY_TRANSLATION
5
+
6
+
7
+ def get_translated_text(
8
+ text: Optional[Text],
9
+ translation: Dict[Text, Any],
10
+ language: Optional[Language] = None,
11
+ ) -> Optional[Text]:
12
+ """Get the translated text from the message.
13
+
14
+ Args:
15
+ text: The default text to use if no translation is found.
16
+ translation: The translations for the text.
17
+ language: The language to use for the translation.
18
+
19
+ Returns:
20
+ The translated text if found, otherwise the default text.
21
+ """
22
+ language_code = language.code if language else None
23
+ return translation.get(language_code, text)
24
+
25
+
26
+ def get_translated_buttons(
27
+ buttons: Optional[List[Dict[Text, Any]]], language: Optional[Language] = None
28
+ ) -> Optional[List[Dict[Text, Any]]]:
29
+ """Get the translated buttons from the message.
30
+
31
+ Args:
32
+ buttons: The default buttons to use if no translation is found.
33
+ language: The language to use for the translation.
34
+
35
+ Returns:
36
+ The translated buttons if found; otherwise, the default buttons.
37
+ """
38
+ if buttons is None:
39
+ return None
40
+
41
+ language_code = language.code if language else None
42
+ translated_buttons = []
43
+ for button in buttons:
44
+ translation = button.get(KEY_TRANSLATION, {})
45
+ language_translation = translation.get(language_code, {})
46
+
47
+ # Maintain the original key order to ensure
48
+ # accurate comparisons of BotUtter events.
49
+ translated_button = {
50
+ key: language_translation.get(key, button.get(key))
51
+ for key, value in button.items()
52
+ if key != KEY_TRANSLATION
53
+ }
54
+ translated_buttons.append(translated_button)
55
+ return translated_buttons
@@ -4,7 +4,7 @@ If the answer is not known or cannot be determined from the provided documents o
4
4
  Use the following documents to answer the question:
5
5
  {% for doc in docs %}
6
6
  {{ loop.cycle("*")}}. {{ doc.metadata }}
7
- {{ doc.page_content }}
7
+ {{ doc.text }}
8
8
  {% endfor %}
9
9
 
10
10
  {% if citation_enabled %}
@@ -23,6 +23,7 @@ from rasa.core.policies.flows.flow_step_result import (
23
23
  )
24
24
  from rasa.dialogue_understanding.commands import CancelFlowCommand
25
25
  from rasa.dialogue_understanding.patterns.cancel import CancelPatternFlowStackFrame
26
+ from rasa.dialogue_understanding.patterns.clarify import ClarifyPatternFlowStackFrame
26
27
  from rasa.dialogue_understanding.patterns.collect_information import (
27
28
  CollectInformationPatternFlowStackFrame,
28
29
  )
@@ -50,9 +51,12 @@ from rasa.dialogue_understanding.stack.frames.flow_stack_frame import (
50
51
  )
51
52
  from rasa.dialogue_understanding.stack.utils import (
52
53
  top_user_flow_frame,
54
+ user_flows_on_the_stack,
53
55
  )
54
56
  from rasa.shared.constants import RASA_PATTERN_HUMAN_HANDOFF
55
- from rasa.shared.core.constants import ACTION_LISTEN_NAME, SlotMappingType
57
+ from rasa.shared.core.constants import (
58
+ ACTION_LISTEN_NAME,
59
+ )
56
60
  from rasa.shared.core.events import (
57
61
  Event,
58
62
  FlowCompleted,
@@ -81,9 +85,8 @@ from rasa.shared.core.flows.steps import (
81
85
  NoOperationFlowStep,
82
86
  SetSlotsFlowStep,
83
87
  )
84
- from rasa.shared.core.flows.steps.collect import SlotRejection
85
88
  from rasa.shared.core.flows.steps.constants import START_STEP
86
- from rasa.shared.core.slots import Slot
89
+ from rasa.shared.core.slots import Slot, SlotRejection
87
90
  from rasa.shared.core.trackers import (
88
91
  DialogueStateTracker,
89
92
  )
@@ -239,7 +242,10 @@ def events_for_collect_step_execution(
239
242
 
240
243
 
241
244
  def trigger_pattern_continue_interrupted(
242
- current_frame: DialogueStackFrame, stack: DialogueStack, flows: FlowsList
245
+ current_frame: DialogueStackFrame,
246
+ stack: DialogueStack,
247
+ flows: FlowsList,
248
+ tracker: DialogueStateTracker,
243
249
  ) -> List[Event]:
244
250
  """Trigger the pattern to continue an interrupted flow if needed."""
245
251
  events: List[Event] = []
@@ -262,7 +268,9 @@ def trigger_pattern_continue_interrupted(
262
268
  ):
263
269
  stack.push(
264
270
  ContinueInterruptedPatternFlowStackFrame(
265
- previous_flow_name=interrupted_user_flow.readable_name(),
271
+ previous_flow_name=interrupted_user_flow.readable_name(
272
+ language=tracker.current_language
273
+ ),
266
274
  )
267
275
  )
268
276
  events.append(
@@ -272,6 +280,28 @@ def trigger_pattern_continue_interrupted(
272
280
  return events
273
281
 
274
282
 
283
+ def trigger_pattern_clarification(
284
+ current_frame: DialogueStackFrame, stack: DialogueStack, flows: FlowsList
285
+ ) -> None:
286
+ """Trigger the pattern to clarify which topic to continue if needed."""
287
+ if not isinstance(current_frame, UserFlowStackFrame):
288
+ return None
289
+
290
+ if current_frame.frame_type == FlowStackFrameType.CALL:
291
+ # we want to return to the flow that called the current flow
292
+ return None
293
+
294
+ pending_flows = [
295
+ flows.flow_by_id(frame.flow_id)
296
+ for frame in stack.frames
297
+ if isinstance(frame, UserFlowStackFrame)
298
+ and frame.flow_id != current_frame.flow_id
299
+ ]
300
+
301
+ flow_names = [flow.readable_name() for flow in pending_flows if flow is not None]
302
+ stack.push(ClarifyPatternFlowStackFrame(names=flow_names))
303
+
304
+
275
305
  def trigger_pattern_completed(
276
306
  current_frame: DialogueStackFrame, stack: DialogueStack, flows: FlowsList
277
307
  ) -> None:
@@ -283,6 +313,9 @@ def trigger_pattern_completed(
283
313
  or isinstance(current_frame, SearchPatternFlowStackFrame)
284
314
  ):
285
315
  completed_flow = current_frame.flow(flows)
316
+ if not completed_flow.run_pattern_completed:
317
+ return
318
+
286
319
  completed_flow_name = completed_flow.readable_name() if completed_flow else None
287
320
  stack.push(
288
321
  CompletedPatternFlowStackFrame(
@@ -540,38 +573,6 @@ def cancel_flow_and_push_internal_error(stack: DialogueStack, flow_name: str) ->
540
573
  stack.push(InternalErrorPatternFlowStackFrame())
541
574
 
542
575
 
543
- def validate_custom_slot_mappings(
544
- step: CollectInformationFlowStep,
545
- stack: DialogueStack,
546
- tracker: DialogueStateTracker,
547
- available_actions: List[str],
548
- flow_name: str,
549
- ) -> bool:
550
- """Validate a slot with custom mappings.
551
-
552
- If invalid, trigger pattern_internal_error and return False.
553
- """
554
- slot = tracker.slots.get(step.collect, None)
555
- slot_mappings = slot.mappings if slot else []
556
- for mapping in slot_mappings:
557
- if (
558
- mapping.get("type") == SlotMappingType.CUSTOM.value
559
- and mapping.get("action") is None
560
- ):
561
- # this is a slot that must be filled by a custom action
562
- # check if collect_action exists
563
- if step.collect_action not in available_actions:
564
- structlogger.error(
565
- "flow.step.run.collect_action_not_found_for_custom_slot_mapping",
566
- action=step.collect_action,
567
- collect=step.collect,
568
- )
569
- cancel_flow_and_push_internal_error(stack, flow_name)
570
- return False
571
-
572
- return True
573
-
574
-
575
576
  def attach_stack_metadata_to_events(
576
577
  step_id: str,
577
578
  flow_id: str,
@@ -669,7 +670,15 @@ def _run_end_step(
669
670
  structlogger.debug("flow.step.run.flow_end")
670
671
  current_frame = stack.pop()
671
672
  trigger_pattern_completed(current_frame, stack, flows)
672
- resumed_events = trigger_pattern_continue_interrupted(current_frame, stack, flows)
673
+ resumed_events = []
674
+ if len(user_flows_on_the_stack(stack)) > 1:
675
+ # if there are more user flows on the stack,
676
+ # we need to trigger the pattern clarify
677
+ trigger_pattern_clarification(current_frame, stack, flows)
678
+ else:
679
+ resumed_events = trigger_pattern_continue_interrupted(
680
+ current_frame, stack, flows, tracker
681
+ )
673
682
  reset_events: List[Event] = reset_scoped_slots(current_frame, flow, tracker)
674
683
  return ContinueFlowWithNextStep(
675
684
  events=initial_events + reset_events + resumed_events, has_flow_ended=True
@@ -760,14 +769,6 @@ def _run_collect_information_step(
760
769
  # if we return any other FlowStepResult, the assistant will stay silent
761
770
  # instead of triggering the internal error pattern
762
771
  return ContinueFlowWithNextStep(events=initial_events)
763
- is_mapping_valid = validate_custom_slot_mappings(
764
- step, stack, tracker, available_actions, flow_name
765
- )
766
-
767
- if not is_mapping_valid:
768
- # if we return any other FlowStepResult, the assistant will stay silent
769
- # instead of triggering the internal error pattern
770
- return ContinueFlowWithNextStep(events=initial_events)
771
772
 
772
773
  structlogger.debug("flow.step.run.collect")
773
774
  trigger_pattern_ask_collect_information(