rasa-pro 3.12.0.dev13__py3-none-any.whl → 3.12.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (139) hide show
  1. README.md +10 -13
  2. rasa/anonymization/anonymization_rule_executor.py +16 -10
  3. rasa/cli/data.py +16 -0
  4. rasa/cli/project_templates/calm/config.yml +2 -2
  5. rasa/cli/project_templates/calm/domain/list_contacts.yml +1 -2
  6. rasa/cli/project_templates/calm/domain/remove_contact.yml +1 -2
  7. rasa/cli/project_templates/calm/domain/shared.yml +1 -4
  8. rasa/cli/project_templates/calm/endpoints.yml +2 -2
  9. rasa/cli/utils.py +12 -0
  10. rasa/core/actions/action.py +84 -191
  11. rasa/core/actions/action_handle_digressions.py +35 -13
  12. rasa/core/actions/action_run_slot_rejections.py +16 -4
  13. rasa/core/channels/__init__.py +2 -0
  14. rasa/core/channels/studio_chat.py +19 -0
  15. rasa/core/channels/telegram.py +42 -24
  16. rasa/core/channels/voice_ready/utils.py +1 -1
  17. rasa/core/channels/voice_stream/asr/asr_engine.py +10 -4
  18. rasa/core/channels/voice_stream/asr/azure.py +14 -1
  19. rasa/core/channels/voice_stream/asr/deepgram.py +20 -4
  20. rasa/core/channels/voice_stream/audiocodes.py +264 -0
  21. rasa/core/channels/voice_stream/browser_audio.py +4 -1
  22. rasa/core/channels/voice_stream/call_state.py +3 -0
  23. rasa/core/channels/voice_stream/genesys.py +6 -2
  24. rasa/core/channels/voice_stream/tts/azure.py +9 -1
  25. rasa/core/channels/voice_stream/tts/cartesia.py +14 -8
  26. rasa/core/channels/voice_stream/voice_channel.py +23 -2
  27. rasa/core/constants.py +2 -0
  28. rasa/core/nlg/contextual_response_rephraser.py +18 -1
  29. rasa/core/nlg/generator.py +83 -15
  30. rasa/core/nlg/response.py +6 -3
  31. rasa/core/nlg/translate.py +55 -0
  32. rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2 +1 -1
  33. rasa/core/policies/flows/flow_executor.py +19 -7
  34. rasa/core/processor.py +71 -9
  35. rasa/dialogue_understanding/commands/can_not_handle_command.py +20 -2
  36. rasa/dialogue_understanding/commands/cancel_flow_command.py +24 -6
  37. rasa/dialogue_understanding/commands/change_flow_command.py +20 -2
  38. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +20 -2
  39. rasa/dialogue_understanding/commands/clarify_command.py +29 -3
  40. rasa/dialogue_understanding/commands/command.py +1 -16
  41. rasa/dialogue_understanding/commands/command_syntax_manager.py +55 -0
  42. rasa/dialogue_understanding/commands/handle_digressions_command.py +1 -7
  43. rasa/dialogue_understanding/commands/human_handoff_command.py +20 -2
  44. rasa/dialogue_understanding/commands/knowledge_answer_command.py +20 -2
  45. rasa/dialogue_understanding/commands/prompt_command.py +94 -0
  46. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +20 -2
  47. rasa/dialogue_understanding/commands/set_slot_command.py +24 -2
  48. rasa/dialogue_understanding/commands/skip_question_command.py +20 -2
  49. rasa/dialogue_understanding/commands/start_flow_command.py +22 -2
  50. rasa/dialogue_understanding/commands/utils.py +71 -4
  51. rasa/dialogue_understanding/generator/__init__.py +2 -0
  52. rasa/dialogue_understanding/generator/command_parser.py +15 -12
  53. rasa/dialogue_understanding/generator/constants.py +3 -0
  54. rasa/dialogue_understanding/generator/llm_based_command_generator.py +12 -5
  55. rasa/dialogue_understanding/generator/llm_command_generator.py +5 -3
  56. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +17 -3
  57. rasa/dialogue_understanding/generator/prompt_templates/__init__.py +0 -0
  58. rasa/dialogue_understanding/generator/{single_step → prompt_templates}/command_prompt_template.jinja2 +2 -0
  59. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +77 -0
  60. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +68 -0
  61. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +84 -0
  62. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +522 -0
  63. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +12 -310
  64. rasa/dialogue_understanding/patterns/collect_information.py +1 -1
  65. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +16 -0
  66. rasa/dialogue_understanding/patterns/validate_slot.py +65 -0
  67. rasa/dialogue_understanding/processor/command_processor.py +39 -0
  68. rasa/dialogue_understanding/stack/utils.py +38 -0
  69. rasa/dialogue_understanding_test/du_test_case.py +58 -18
  70. rasa/dialogue_understanding_test/du_test_result.py +14 -10
  71. rasa/dialogue_understanding_test/io.py +14 -0
  72. rasa/e2e_test/assertions.py +6 -8
  73. rasa/e2e_test/llm_judge_prompts/answer_relevance_prompt_template.jinja2 +5 -1
  74. rasa/e2e_test/llm_judge_prompts/groundedness_prompt_template.jinja2 +4 -0
  75. rasa/e2e_test/utils/io.py +0 -37
  76. rasa/engine/graph.py +1 -0
  77. rasa/engine/language.py +140 -0
  78. rasa/engine/recipes/config_files/default_config.yml +4 -0
  79. rasa/engine/recipes/default_recipe.py +2 -0
  80. rasa/engine/recipes/graph_recipe.py +2 -0
  81. rasa/engine/storage/local_model_storage.py +1 -0
  82. rasa/engine/storage/storage.py +4 -1
  83. rasa/llm_fine_tuning/conversations.py +1 -1
  84. rasa/model_manager/runner_service.py +7 -4
  85. rasa/model_manager/socket_bridge.py +7 -6
  86. rasa/shared/constants.py +15 -13
  87. rasa/shared/core/constants.py +2 -0
  88. rasa/shared/core/flows/constants.py +11 -0
  89. rasa/shared/core/flows/flow.py +83 -19
  90. rasa/shared/core/flows/flows_yaml_schema.json +31 -3
  91. rasa/shared/core/flows/steps/collect.py +1 -36
  92. rasa/shared/core/flows/utils.py +28 -4
  93. rasa/shared/core/flows/validation.py +1 -1
  94. rasa/shared/core/slot_mappings.py +208 -5
  95. rasa/shared/core/slots.py +137 -1
  96. rasa/shared/core/trackers.py +74 -1
  97. rasa/shared/importers/importer.py +50 -2
  98. rasa/shared/nlu/training_data/schemas/responses.yml +19 -12
  99. rasa/shared/providers/_configs/azure_entra_id_config.py +541 -0
  100. rasa/shared/providers/_configs/azure_openai_client_config.py +138 -3
  101. rasa/shared/providers/_configs/client_config.py +3 -1
  102. rasa/shared/providers/_configs/default_litellm_client_config.py +3 -1
  103. rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py +3 -1
  104. rasa/shared/providers/_configs/litellm_router_client_config.py +3 -1
  105. rasa/shared/providers/_configs/model_group_config.py +4 -2
  106. rasa/shared/providers/_configs/oauth_config.py +33 -0
  107. rasa/shared/providers/_configs/openai_client_config.py +3 -1
  108. rasa/shared/providers/_configs/rasa_llm_client_config.py +3 -1
  109. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +3 -1
  110. rasa/shared/providers/constants.py +6 -0
  111. rasa/shared/providers/embedding/azure_openai_embedding_client.py +28 -3
  112. rasa/shared/providers/embedding/litellm_router_embedding_client.py +3 -1
  113. rasa/shared/providers/llm/_base_litellm_client.py +42 -17
  114. rasa/shared/providers/llm/azure_openai_llm_client.py +81 -25
  115. rasa/shared/providers/llm/default_litellm_llm_client.py +3 -1
  116. rasa/shared/providers/llm/litellm_router_llm_client.py +29 -8
  117. rasa/shared/providers/llm/llm_client.py +23 -7
  118. rasa/shared/providers/llm/openai_llm_client.py +9 -3
  119. rasa/shared/providers/llm/rasa_llm_client.py +11 -2
  120. rasa/shared/providers/llm/self_hosted_llm_client.py +30 -11
  121. rasa/shared/providers/router/_base_litellm_router_client.py +3 -1
  122. rasa/shared/providers/router/router_client.py +3 -1
  123. rasa/shared/utils/constants.py +3 -0
  124. rasa/shared/utils/llm.py +33 -7
  125. rasa/shared/utils/pykwalify_extensions.py +24 -0
  126. rasa/shared/utils/schemas/domain.yml +26 -0
  127. rasa/telemetry.py +2 -1
  128. rasa/tracing/config.py +2 -0
  129. rasa/tracing/constants.py +12 -0
  130. rasa/tracing/instrumentation/instrumentation.py +36 -0
  131. rasa/tracing/instrumentation/metrics.py +41 -0
  132. rasa/tracing/metric_instrument_provider.py +40 -0
  133. rasa/validator.py +372 -7
  134. rasa/version.py +1 -1
  135. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc2.dist-info}/METADATA +13 -14
  136. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc2.dist-info}/RECORD +139 -124
  137. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc2.dist-info}/NOTICE +0 -0
  138. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc2.dist-info}/WHEEL +0 -0
  139. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc2.dist-info}/entry_points.txt +0 -0
@@ -2,7 +2,7 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple
2
2
 
3
3
  from pydantic import BaseModel, Field
4
4
 
5
- from rasa.dialogue_understanding.commands import Command
5
+ from rasa.dialogue_understanding.commands.prompt_command import PromptCommand
6
6
  from rasa.dialogue_understanding.generator.command_parser import parse_commands
7
7
  from rasa.dialogue_understanding_test.command_comparison import are_command_lists_equal
8
8
  from rasa.dialogue_understanding_test.constants import (
@@ -30,6 +30,7 @@ from rasa.shared.nlu.constants import (
30
30
  KEY_USAGE = "usage"
31
31
  KEY_PROMPT_TOKENS = "prompt_tokens"
32
32
  KEY_COMPLETION_TOKENS = "completion_tokens"
33
+ KEY_CHOICES = "choices"
33
34
 
34
35
 
35
36
  class DialogueUnderstandingOutput(BaseModel):
@@ -65,11 +66,18 @@ class DialogueUnderstandingOutput(BaseModel):
65
66
  """
66
67
 
67
68
  # Dict with component name as key and list of commands as value
68
- commands: Dict[str, List[Command]]
69
+ commands: Dict[str, List[PromptCommand]]
69
70
  # List of prompts
70
71
  prompts: Optional[List[Dict[str, Any]]] = None
71
72
 
72
- def get_predicted_commands(self) -> List[Command]:
73
+ class Config:
74
+ """Skip validation for PromptCommand protocol as pydantic does not know how to
75
+ serialize or handle instances of a protocol.
76
+ """
77
+
78
+ arbitrary_types_allowed = True
79
+
80
+ def get_predicted_commands(self) -> List[PromptCommand]:
73
81
  """Get all commands from the output."""
74
82
  return [
75
83
  command
@@ -77,15 +85,33 @@ class DialogueUnderstandingOutput(BaseModel):
77
85
  for command in predicted_commands
78
86
  ]
79
87
 
80
- def get_component_names_that_predicted_commands(self) -> List[str]:
81
- """Get all component names that have predicted commands."""
82
- if self.commands is None:
83
- return []
84
- return [
85
- component_name
86
- for component_name, predicted_commands in self.commands.items()
87
- if predicted_commands
88
- ]
88
+ def get_component_names_that_predicted_commands_or_have_llm_response(
89
+ self,
90
+ ) -> List[str]:
91
+ """Get all component names that have predicted commands or recieved
92
+ non-empty response from LLM.
93
+ """
94
+ component_names_that_predicted_commands = (
95
+ [
96
+ component_name
97
+ for component_name, predicted_commands in self.commands.items()
98
+ if predicted_commands
99
+ ]
100
+ if self.commands
101
+ else []
102
+ )
103
+ components_with_prompts = (
104
+ [
105
+ str(prompt.get(KEY_COMPONENT_NAME, None))
106
+ for prompt in self.prompts
107
+ if prompt.get(KEY_LLM_RESPONSE_METADATA, None)
108
+ ]
109
+ if self.prompts
110
+ else []
111
+ )
112
+ return list(
113
+ set(component_names_that_predicted_commands + components_with_prompts)
114
+ )
89
115
 
90
116
  def get_component_name_to_prompt_info(self) -> Dict[str, List[Dict[str, Any]]]:
91
117
  """Return a dictionary of component names to prompt information.
@@ -112,7 +138,9 @@ class DialogueUnderstandingOutput(BaseModel):
112
138
  return {}
113
139
 
114
140
  data: Dict[str, List[Dict[str, Any]]] = {}
115
- relevant_component_names = self.get_component_names_that_predicted_commands()
141
+ relevant_component_names = (
142
+ self.get_component_names_that_predicted_commands_or_have_llm_response()
143
+ )
116
144
 
117
145
  for prompt_data in self.prompts:
118
146
  component_name = prompt_data[KEY_COMPONENT_NAME]
@@ -144,6 +172,11 @@ class DialogueUnderstandingOutput(BaseModel):
144
172
  KEY_COMPLETION_TOKENS
145
173
  )
146
174
 
175
+ choices = prompt_data.get(KEY_LLM_RESPONSE_METADATA, {}).get(KEY_CHOICES)
176
+ if choices and len(choices) > 0:
177
+ # Add the action list returned by the LLM to the prompt_info
178
+ prompt_info[KEY_CHOICES] = choices[0]
179
+
147
180
  data[component_name].append(prompt_info)
148
181
 
149
182
  return data
@@ -155,9 +188,16 @@ class DialogueUnderstandingTestStep(BaseModel):
155
188
  template: Optional[str] = None
156
189
  line: Optional[int] = None
157
190
  metadata_name: Optional[str] = None
158
- commands: Optional[List[Command]] = None
191
+ commands: Optional[List[PromptCommand]] = None
159
192
  dialogue_understanding_output: Optional[DialogueUnderstandingOutput] = None
160
193
 
194
+ class Config:
195
+ """Skip validation for PromptCommand protocol as pydantic does not know how to
196
+ serialize or handle instances of a protocol.
197
+ """
198
+
199
+ arbitrary_types_allowed = True
200
+
161
201
  def as_dict(self) -> Dict[str, Any]:
162
202
  if self.actor == ACTOR_USER:
163
203
  if self.commands:
@@ -178,7 +218,7 @@ class DialogueUnderstandingTestStep(BaseModel):
178
218
  def from_dict(
179
219
  step: Dict[str, Any],
180
220
  flows: FlowsList,
181
- custom_command_classes: List[Command] = [],
221
+ custom_command_classes: List[PromptCommand] = [],
182
222
  remove_default_commands: List[str] = [],
183
223
  ) -> "DialogueUnderstandingTestStep":
184
224
  """Creates a DialogueUnderstandingTestStep from a dictionary.
@@ -224,7 +264,7 @@ class DialogueUnderstandingTestStep(BaseModel):
224
264
  commands=commands,
225
265
  )
226
266
 
227
- def get_predicted_commands(self) -> List[Command]:
267
+ def get_predicted_commands(self) -> List[PromptCommand]:
228
268
  """Get all predicted commands from the test case."""
229
269
  if self.dialogue_understanding_output is None:
230
270
  return []
@@ -314,7 +354,7 @@ class DialogueUnderstandingTestCase(BaseModel):
314
354
  input_test_case: Dict[str, Any],
315
355
  flows: FlowsList,
316
356
  file: Optional[str] = None,
317
- custom_command_classes: List[Command] = [],
357
+ custom_command_classes: List[PromptCommand] = [],
318
358
  remove_default_commands: List[str] = [],
319
359
  ) -> "DialogueUnderstandingTestCase":
320
360
  """Creates a DialogueUnderstandingTestCase from a dictionary.
@@ -361,7 +401,7 @@ class DialogueUnderstandingTestCase(BaseModel):
361
401
 
362
402
  return [step.to_str() for step in steps]
363
403
 
364
- def get_expected_commands(self) -> List[Command]:
404
+ def get_expected_commands(self) -> List[PromptCommand]:
365
405
  """Get all commands from the test steps."""
366
406
  return [
367
407
  command
@@ -5,16 +5,13 @@ from typing import Any, Dict, List, Optional, Text
5
5
  import numpy as np
6
6
  from pydantic import BaseModel
7
7
 
8
- from rasa.dialogue_understanding.commands import Command
8
+ from rasa.dialogue_understanding.commands.prompt_command import PromptCommand
9
9
  from rasa.dialogue_understanding_test.du_test_case import (
10
10
  DialogueUnderstandingTestCase,
11
11
  DialogueUnderstandingTestStep,
12
12
  )
13
13
  from rasa.dialogue_understanding_test.utils import get_command_comparison
14
- from rasa.shared.nlu.constants import (
15
- KEY_SYSTEM_PROMPT,
16
- KEY_USER_PROMPT,
17
- )
14
+ from rasa.shared.nlu.constants import KEY_SYSTEM_PROMPT, KEY_USER_PROMPT
18
15
 
19
16
  if typing.TYPE_CHECKING:
20
17
  from rasa.dialogue_understanding_test.command_metric_calculation import (
@@ -46,7 +43,7 @@ class DialogueUnderstandingTestResult(BaseModel):
46
43
  passed: bool
47
44
  error_line: Optional[int] = None
48
45
 
49
- def get_expected_commands(self) -> List[Command]:
46
+ def get_expected_commands(self) -> List[PromptCommand]:
50
47
  return self.test_case.get_expected_commands()
51
48
 
52
49
 
@@ -60,10 +57,17 @@ class FailedTestStep(BaseModel):
60
57
  pass_status: bool
61
58
  command_generators: List[str]
62
59
  prompts: Optional[Dict[str, List[Dict[str, Any]]]] = None
63
- expected_commands: List[Command]
64
- predicted_commands: Dict[str, List[Command]]
60
+ expected_commands: List[PromptCommand]
61
+ predicted_commands: Dict[str, List[PromptCommand]]
65
62
  conversation_with_diff: List[str]
66
63
 
64
+ class Config:
65
+ """Skip validation for PromptCommand protocol as pydantic does not know how to
66
+ serialize or handle instances of a protocol.
67
+ """
68
+
69
+ arbitrary_types_allowed = True
70
+
67
71
  @classmethod
68
72
  def from_dialogue_understanding_test_step(
69
73
  cls,
@@ -74,13 +78,13 @@ class FailedTestStep(BaseModel):
74
78
  user_utterance = step.text or ""
75
79
  line_number = step.line or -1
76
80
 
77
- predicted_commands: Dict[str, List[Command]] = {}
81
+ predicted_commands: Dict[str, List[PromptCommand]] = {}
78
82
  prompts: Optional[Dict[str, List[Dict[str, Any]]]] = None
79
83
  command_generators: List[str] = []
80
84
 
81
85
  if step.dialogue_understanding_output:
82
86
  predicted_commands = step.dialogue_understanding_output.commands
83
- command_generators = step.dialogue_understanding_output.get_component_names_that_predicted_commands() # noqa: E501
87
+ command_generators = step.dialogue_understanding_output.get_component_names_that_predicted_commands_or_have_llm_response() # noqa: E501
84
88
  prompts = (
85
89
  step.dialogue_understanding_output.get_component_name_to_prompt_info()
86
90
  )
@@ -8,6 +8,7 @@ import rasa.shared.data
8
8
  from rasa.dialogue_understanding_test.command_metric_calculation import CommandMetrics
9
9
  from rasa.dialogue_understanding_test.constants import SCHEMA_FILE_PATH
10
10
  from rasa.dialogue_understanding_test.du_test_case import (
11
+ KEY_CHOICES,
11
12
  KEY_COMPLETION_TOKENS,
12
13
  KEY_PROMPT_TOKENS,
13
14
  )
@@ -309,6 +310,7 @@ def print_failed_cases(
309
310
  print_prompt(step)
310
311
  rich.print("\n[red3]-- CONVERSATION --[/red3]")
311
312
  rich.print("\n".join(step.conversation_with_diff))
313
+ print_llm_output(step)
312
314
 
313
315
 
314
316
  def print_prompt(step: FailedTestStep) -> None:
@@ -341,6 +343,18 @@ def print_prompt(step: FailedTestStep) -> None:
341
343
  )
342
344
 
343
345
 
346
+ def print_llm_output(step: FailedTestStep) -> None:
347
+ if not step.prompts:
348
+ return
349
+
350
+ for component, component_prompts in step.prompts.items():
351
+ for prompt_data in component_prompts:
352
+ if KEY_CHOICES in prompt_data:
353
+ rich.print("\n[red3]-- CHOICES --[/red3]")
354
+ rich.print(prompt_data.get(KEY_CHOICES))
355
+ rich.print("[red3]-------------[/red3]")
356
+
357
+
344
358
  def print_command_summary(metrics: Dict[str, CommandMetrics]) -> None:
345
359
  """Print the command summary.
346
360
 
@@ -972,7 +972,7 @@ class GenerativeResponseMixin(Assertion):
972
972
  data.pop("metric_adjective")
973
973
  return data
974
974
 
975
- def _render_prompt(self, step_text: str, matching_event: BotUttered) -> str:
975
+ def _render_prompt(self, matching_event: BotUttered) -> str:
976
976
  raise NotImplementedError
977
977
 
978
978
  def _get_processed_output(self, parsed_llm_output: Dict[str, Any]) -> List[Any]:
@@ -999,7 +999,7 @@ class GenerativeResponseMixin(Assertion):
999
999
  ) -> Tuple[Optional[AssertionFailure], Optional[Event]]:
1000
1000
  """Run the LLM evaluation on the given event."""
1001
1001
  bot_message = matching_event.text
1002
- prompt = self._render_prompt(step_text, matching_event)
1002
+ prompt = self._render_prompt(matching_event)
1003
1003
  llm_response = self._invoke_llm(llm_judge_config, prompt)
1004
1004
 
1005
1005
  try:
@@ -1160,9 +1160,9 @@ class GenerativeResponseIsRelevantAssertion(GenerativeResponseMixin):
1160
1160
  def type(cls) -> str:
1161
1161
  return AssertionType.GENERATIVE_RESPONSE_IS_RELEVANT.value
1162
1162
 
1163
- def _render_prompt(self, step_text: str, matching_event: BotUttered) -> str:
1163
+ def _render_prompt(self, matching_event: BotUttered) -> str:
1164
1164
  """Render the prompt."""
1165
- inputs = _get_prompt_inputs(self.type(), step_text, matching_event)
1165
+ inputs = _get_prompt_inputs(self.type(), matching_event)
1166
1166
  prompt_template = _get_default_prompt_template(
1167
1167
  DEFAULT_ANSWER_RELEVANCE_PROMPT_TEMPLATE_FILE_NAME
1168
1168
  )
@@ -1206,11 +1206,10 @@ class GenerativeResponseIsGroundedAssertion(GenerativeResponseMixin):
1206
1206
  def type(cls) -> str:
1207
1207
  return AssertionType.GENERATIVE_RESPONSE_IS_GROUNDED.value
1208
1208
 
1209
- def _render_prompt(self, step_text: str, matching_event: BotUttered) -> str:
1209
+ def _render_prompt(self, matching_event: BotUttered) -> str:
1210
1210
  """Render the prompt."""
1211
1211
  inputs = _get_prompt_inputs(
1212
1212
  assertion_type=self.type(),
1213
- step_text=step_text,
1214
1213
  matching_event=matching_event,
1215
1214
  ground_truth=self.ground_truth,
1216
1215
  )
@@ -1336,12 +1335,11 @@ def _get_default_prompt_template(default_prompt_template_file_name: str) -> str:
1336
1335
 
1337
1336
  def _get_prompt_inputs(
1338
1337
  assertion_type: str,
1339
- step_text: str,
1340
1338
  matching_event: BotUttered,
1341
1339
  ground_truth: Optional[str] = None,
1342
1340
  ) -> Dict[str, Any]:
1343
1341
  if assertion_type == AssertionType.GENERATIVE_RESPONSE_IS_RELEVANT.value:
1344
- return {"num_variations": "3", "user_message": step_text}
1342
+ return {"num_variations": "3", "bot_message": matching_event.text}
1345
1343
  elif assertion_type == AssertionType.GENERATIVE_RESPONSE_IS_GROUNDED.value:
1346
1344
  ground_truth_event_metadata = matching_event.metadata.get(
1347
1345
  SEARCH_RESULTS_METADATA_KEY, ""
@@ -84,6 +84,10 @@ These are a few examples of how to generate question variations and identify non
84
84
 
85
85
  ## Task Inputs
86
86
 
87
- - **Input Answer**: {{ user_message }}
87
+ - **Input Answer**: {{ bot_message }}
88
+
89
+ ## Task Outputs
90
+
91
+ Do not include any additional explanations in your output. Only provide the JSON object as described in the task steps.
88
92
 
89
93
  Your output:
@@ -162,4 +162,8 @@ These are a few examples of how to evaluate the correctness of the answer based
162
162
  - **Input Answer**: {{ bot_message }}
163
163
  - **Ground Truth**: {{ ground_truth }}
164
164
 
165
+ ## Task Outputs
166
+
167
+ Do not include any additional explanations in your output. Only provide the JSON object as described in the task steps.
168
+
165
169
  Your output:
rasa/e2e_test/utils/io.py CHANGED
@@ -49,7 +49,6 @@ if TYPE_CHECKING:
49
49
  from rasa.e2e_test.e2e_test_result import TestResult
50
50
 
51
51
 
52
- RASA_PRO_BETA_E2E_ASSERTIONS_ENV_VAR_NAME = "RASA_PRO_BETA_E2E_ASSERTIONS"
53
52
  RASA_PRO_BETA_STUB_CUSTOM_ACTION_ENV_VAR_NAME = "RASA_PRO_BETA_STUB_CUSTOM_ACTION"
54
53
 
55
54
  structlogger = structlog.get_logger()
@@ -278,16 +277,6 @@ def split_into_passed_failed(
278
277
  return passed_cases, failed_cases
279
278
 
280
279
 
281
- def has_test_case_with_assertions(test_cases: List[TestCase]) -> bool:
282
- """Check if the test cases contain assertions."""
283
- try:
284
- next(test_case for test_case in test_cases if test_case.uses_assertions())
285
- except StopIteration:
286
- return False
287
-
288
- return True
289
-
290
-
291
280
  @lru_cache(maxsize=1)
292
281
  def extract_test_case_from_path(path: str) -> Tuple[str, str]:
293
282
  """Extract test case from path if specified.
@@ -442,7 +431,6 @@ def read_test_cases(path: str) -> TestSuite:
442
431
  fixtures: Dict[str, Fixture] = {}
443
432
  metadata: Dict[str, Metadata] = {}
444
433
  stub_custom_actions: Dict[str, StubCustomAction] = {}
445
- beta_flag_verified = False
446
434
 
447
435
  # Process each test file
448
436
  for test_file in test_files:
@@ -460,10 +448,6 @@ def read_test_cases(path: str) -> TestSuite:
460
448
  stub_custom_actions.update(
461
449
  extract_stub_custom_actions(test_file_content, test_file)
462
450
  )
463
-
464
- beta_flag_verified = verify_beta_feature_flag_for_assertions(
465
- test_cases, beta_flag_verified
466
- )
467
451
  input_test_cases.extend(test_cases)
468
452
 
469
453
  validate_test_case(test_case_name, input_test_cases, fixtures, metadata)
@@ -489,27 +473,6 @@ def check_beta_feature_flag_for_custom_actions_stubs() -> None:
489
473
  rasa.shared.utils.cli.print_error_and_exit(str(exc))
490
474
 
491
475
 
492
- def verify_beta_feature_flag_for_assertions(
493
- test_cases: List[TestCase], beta_flag_verified: bool
494
- ) -> bool:
495
- """Verify the beta feature flag for assertions."""
496
- if beta_flag_verified:
497
- return True
498
-
499
- if not has_test_case_with_assertions(test_cases):
500
- return beta_flag_verified
501
-
502
- try:
503
- ensure_beta_feature_is_enabled(
504
- "end-to-end testing with assertions",
505
- RASA_PRO_BETA_E2E_ASSERTIONS_ENV_VAR_NAME,
506
- )
507
- except BetaNotEnabledException as exc:
508
- rasa.shared.utils.cli.print_error_and_exit(str(exc))
509
-
510
- return True
511
-
512
-
513
476
  def _save_coverage_report(
514
477
  report: Optional[pd.DataFrame], test_status: str, output_dir: str
515
478
  ) -> None:
rasa/engine/graph.py CHANGED
@@ -640,3 +640,4 @@ class GraphModelConfiguration:
640
640
  language: Optional[Text]
641
641
  core_target: Optional[Text]
642
642
  nlu_target: Optional[Text]
643
+ additional_languages: Optional[List[Text]]
@@ -0,0 +1,140 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, Dict, Text
3
+
4
+ from langcodes import Language as LangcodesLanguage
5
+
6
+ from rasa.shared.exceptions import RasaException
7
+
8
+ CUSTOM_LANGUAGE_CODE_PREFIX = "x-"
9
+
10
+
11
+ @dataclass(frozen=True)
12
+ class Language:
13
+ code: str
14
+ label: str
15
+ is_default: bool
16
+
17
+ @classmethod
18
+ def from_language_code(
19
+ cls, language_code: str, is_default: bool = False
20
+ ) -> "Language":
21
+ """Creates a Language object from a language code.
22
+
23
+ Args:
24
+ language_code: The language code.
25
+ is_default: Whether the language is the default language.
26
+
27
+ Returns:
28
+ A Language object.
29
+
30
+ Raises:
31
+ RasaException: If the language code or custom language code is invalid.
32
+ """
33
+ language = LangcodesLanguage.make(language_code)
34
+ cls.validate_language(language)
35
+
36
+ return cls(
37
+ code=language_code,
38
+ label=cls.get_language_label(language),
39
+ is_default=is_default,
40
+ )
41
+
42
+ @staticmethod
43
+ def get_language_label(language: LangcodesLanguage) -> str:
44
+ """Gets the display name of a language.
45
+
46
+ For custom languages (in the format "x-<base_lang>-<custom_label>"),
47
+ the label is derived from the base language code.
48
+ This method considers that the language code has previously been validated.
49
+
50
+ Args:
51
+ language: The language code.
52
+
53
+ Returns:
54
+ The display name of the language.
55
+ """
56
+ language_code = str(language)
57
+
58
+ if language_code.startswith(CUSTOM_LANGUAGE_CODE_PREFIX):
59
+ # If it's a custom language, derive the label from the base language code.
60
+ parts = language_code.split("-")
61
+ base_language_code = parts[1]
62
+ base_language = LangcodesLanguage.make(base_language_code)
63
+ return base_language.display_name()
64
+ else:
65
+ return language.display_name()
66
+
67
+ @classmethod
68
+ def validate_language(cls, language: LangcodesLanguage) -> None:
69
+ """Validates a language code.
70
+
71
+ Args:
72
+ language: The language object to validate.
73
+
74
+ Raises:
75
+ RasaException: If the language validation fails.
76
+ """
77
+ if not language.is_valid():
78
+ raise RasaException(f"Language '{language}' is not a valid language code.")
79
+
80
+ language_code = str(language)
81
+ if language_code.startswith(CUSTOM_LANGUAGE_CODE_PREFIX):
82
+ cls.validate_custom_language(language_code)
83
+
84
+ @staticmethod
85
+ def validate_custom_language(custom_language_code: str) -> None:
86
+ """Validates a custom language code.
87
+
88
+ A valid custom language code should adhere to the format:
89
+ "x-<existing_language_code>-<custom_label>"
90
+ Example: x-en-formal
91
+
92
+ Args:
93
+ custom_language_code: The custom language code to validate.
94
+
95
+ Raises:
96
+ RasaException: If the custom language code validation fails.
97
+ """
98
+ # Ensure the custom language code starts with the custom prefix.
99
+ if not custom_language_code.startswith(CUSTOM_LANGUAGE_CODE_PREFIX):
100
+ raise RasaException(
101
+ f"Custom language '{custom_language_code}' must "
102
+ f"start with '{CUSTOM_LANGUAGE_CODE_PREFIX}'."
103
+ )
104
+
105
+ # Split the language code into parts.
106
+ parts = custom_language_code.split("-")
107
+ if len(parts) != 3:
108
+ raise RasaException(
109
+ f"Custom language '{custom_language_code}' must be in the format "
110
+ f"'{CUSTOM_LANGUAGE_CODE_PREFIX}<language_code>-<custom_label>'."
111
+ )
112
+
113
+ # Validate the base language code using langcodes.
114
+ base_language_code = parts[1]
115
+ base_language = LangcodesLanguage.make(base_language_code)
116
+ if not base_language.is_valid():
117
+ raise RasaException(
118
+ f"Base language '{base_language_code}' in custom language "
119
+ f"'{custom_language_code}' is not a valid language code."
120
+ )
121
+
122
+ # Ensure the custom label is not empty.
123
+ custom_label = parts[2]
124
+ if not custom_label:
125
+ raise RasaException(
126
+ f"Custom label in custom language "
127
+ f"'{custom_language_code}' cannot be empty."
128
+ )
129
+
130
+ def as_dict(self) -> Dict[Text, Any]:
131
+ """Converts the Language dataclass instance into a dictionary.
132
+
133
+ Returns:
134
+ A dictionary representing the Language object.
135
+ """
136
+ return {
137
+ "code": self.code,
138
+ "label": self.label,
139
+ "is_default": self.is_default,
140
+ }
@@ -9,6 +9,10 @@ assistant_id: placeholder_default
9
9
  # Configuration for the Rasa NLU components.
10
10
  # https://rasa.com/docs/rasa-pro/nlu-based-assistants/components
11
11
  language: en
12
+ additional_languages:
13
+ - it
14
+ - de
15
+ - es
12
16
 
13
17
  pipeline:
14
18
  - name: WhitespaceTokenizer
@@ -49,6 +49,7 @@ from rasa.graph_components.providers.training_tracker_provider import (
49
49
  )
50
50
  from rasa.shared.constants import (
51
51
  ASSISTANT_ID_KEY,
52
+ CONFIG_ADDITIONAL_LANGUAGES_KEY,
52
53
  CONFIG_LANGUAGE_KEY,
53
54
  CONFIG_NAME_KEY,
54
55
  CONFIG_PIPELINE_KEY,
@@ -247,6 +248,7 @@ class DefaultV1Recipe(Recipe):
247
248
  language=config.get(CONFIG_LANGUAGE_KEY),
248
249
  core_target=core_target,
249
250
  nlu_target=f"{GRAPH_NODE_RUN_PREFIX}{RegexMessageHandler.__name__}",
251
+ additional_languages=config.get(CONFIG_ADDITIONAL_LANGUAGES_KEY),
250
252
  )
251
253
 
252
254
  def _create_train_nodes(
@@ -5,6 +5,7 @@ from rasa.engine.graph import GraphModelConfiguration, GraphSchema
5
5
  from rasa.engine.recipes.recipe import Recipe
6
6
  from rasa.shared.constants import (
7
7
  ASSISTANT_ID_KEY,
8
+ CONFIG_ADDITIONAL_LANGUAGES_KEY,
8
9
  CONFIG_LANGUAGE_KEY,
9
10
  DOCS_URL_GRAPH_RECIPE,
10
11
  )
@@ -76,4 +77,5 @@ class GraphV1Recipe(Recipe):
76
77
  language=config.get(CONFIG_LANGUAGE_KEY),
77
78
  core_target=core_target,
78
79
  nlu_target=nlu_target,
80
+ additional_languages=config.get(CONFIG_ADDITIONAL_LANGUAGES_KEY),
79
81
  )
@@ -240,6 +240,7 @@ class LocalModelStorage(ModelStorage):
240
240
  training_type=model_configuration.training_type,
241
241
  project_fingerprint=rasa.model.project_fingerprint(),
242
242
  language=model_configuration.language,
243
+ additional_languages=model_configuration.additional_languages,
243
244
  core_target=model_configuration.core_target,
244
245
  nlu_target=model_configuration.nlu_target,
245
246
  )
@@ -7,7 +7,7 @@ from contextlib import contextmanager
7
7
  from dataclasses import dataclass
8
8
  from datetime import datetime
9
9
  from pathlib import Path
10
- from typing import Any, Dict, Generator, Optional, Text, Tuple, Union
10
+ from typing import Any, Dict, Generator, List, Optional, Text, Tuple, Union
11
11
 
12
12
  from packaging import version
13
13
 
@@ -142,6 +142,7 @@ class ModelMetadata:
142
142
  core_target: Optional[Text]
143
143
  nlu_target: Text
144
144
  language: Optional[Text]
145
+ additional_languages: Optional[List[Text]]
145
146
  training_type: TrainingType = TrainingType.BOTH
146
147
 
147
148
  def __post_init__(self) -> None:
@@ -171,6 +172,7 @@ class ModelMetadata:
171
172
  "core_target": self.core_target,
172
173
  "nlu_target": self.nlu_target,
173
174
  "language": self.language,
175
+ "additional_languages": self.additional_languages,
174
176
  }
175
177
 
176
178
  @classmethod
@@ -198,4 +200,5 @@ class ModelMetadata:
198
200
  core_target=serialized["core_target"],
199
201
  nlu_target=serialized["nlu_target"],
200
202
  language=serialized["language"],
203
+ additional_languages=serialized.get("additional_languages"),
201
204
  )
@@ -45,7 +45,7 @@ class ConversationStep:
45
45
  elif isinstance(command, SetSlotCommand):
46
46
  output.append(f"SetSlot({command.name}, {command.value})")
47
47
  elif isinstance(command, ClarifyCommand):
48
- output.append(f"Clarify({command.options})")
48
+ output.append(f"Clarify({', '.join(command.options)})")
49
49
  elif isinstance(command, CancelFlowCommand):
50
50
  output.append("CancelFlow()")
51
51
  elif isinstance(command, ChitChatAnswerCommand):
@@ -258,10 +258,13 @@ def run_bot(
258
258
 
259
259
  async def update_bot_status(bot: BotSession) -> None:
260
260
  """Update the status of a bot based on the process return code."""
261
- if bot.has_died_recently():
262
- set_bot_status_to_stopped(bot)
263
- elif await bot.completed_startup_recently():
264
- set_bot_status_to_running(bot)
261
+ try:
262
+ if bot.has_died_recently():
263
+ set_bot_status_to_stopped(bot)
264
+ elif await bot.completed_startup_recently():
265
+ set_bot_status_to_running(bot)
266
+ except Exception as e:
267
+ structlogger.error("model_runner.update_bot_status.error", error=str(e))
265
268
 
266
269
 
267
270
  def terminate_bot(bot: BotSession) -> None: