rasa-pro 3.12.4__py3-none-any.whl → 3.13.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (47) hide show
  1. rasa/cli/scaffold.py +1 -1
  2. rasa/core/actions/action.py +38 -28
  3. rasa/core/actions/action_run_slot_rejections.py +1 -1
  4. rasa/core/channels/studio_chat.py +16 -43
  5. rasa/core/information_retrieval/faiss.py +62 -6
  6. rasa/core/nlg/contextual_response_rephraser.py +7 -6
  7. rasa/core/nlg/generator.py +5 -21
  8. rasa/core/nlg/response.py +6 -43
  9. rasa/core/nlg/translate.py +0 -8
  10. rasa/core/policies/enterprise_search_policy.py +1 -0
  11. rasa/core/policies/intentless_policy.py +6 -59
  12. rasa/dialogue_understanding/commands/knowledge_answer_command.py +2 -2
  13. rasa/dialogue_understanding/generator/_jinja_filters.py +9 -0
  14. rasa/dialogue_understanding/generator/constants.py +4 -0
  15. rasa/dialogue_understanding/generator/llm_based_command_generator.py +18 -3
  16. rasa/dialogue_understanding/generator/nlu_command_adapter.py +1 -1
  17. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +3 -3
  18. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +7 -4
  19. rasa/dialogue_understanding/processor/command_processor.py +20 -5
  20. rasa/dialogue_understanding/processor/command_processor_component.py +5 -2
  21. rasa/dialogue_understanding_test/command_metric_calculation.py +7 -40
  22. rasa/dialogue_understanding_test/command_metrics.py +38 -0
  23. rasa/dialogue_understanding_test/du_test_case.py +58 -25
  24. rasa/dialogue_understanding_test/du_test_result.py +228 -132
  25. rasa/dialogue_understanding_test/du_test_runner.py +10 -1
  26. rasa/dialogue_understanding_test/io.py +35 -8
  27. rasa/e2e_test/llm_judge_prompts/answer_relevance_prompt_template.jinja2 +1 -1
  28. rasa/engine/validation.py +36 -1
  29. rasa/model_manager/model_api.py +1 -1
  30. rasa/model_manager/socket_bridge.py +0 -7
  31. rasa/model_training.py +2 -1
  32. rasa/shared/constants.py +2 -0
  33. rasa/shared/core/policies/__init__.py +0 -0
  34. rasa/shared/core/policies/utils.py +87 -0
  35. rasa/shared/core/slot_mappings.py +12 -0
  36. rasa/shared/core/slots.py +1 -1
  37. rasa/shared/core/trackers.py +4 -10
  38. rasa/shared/providers/llm/default_litellm_llm_client.py +2 -2
  39. rasa/tracing/instrumentation/attribute_extractors.py +38 -6
  40. rasa/version.py +1 -1
  41. {rasa_pro-3.12.4.dist-info → rasa_pro-3.13.0.dev1.dist-info}/METADATA +5 -6
  42. {rasa_pro-3.12.4.dist-info → rasa_pro-3.13.0.dev1.dist-info}/RECORD +45 -43
  43. {rasa_pro-3.12.4.dist-info → rasa_pro-3.13.0.dev1.dist-info}/WHEEL +1 -1
  44. README.md +0 -38
  45. rasa/keys +0 -1
  46. {rasa_pro-3.12.4.dist-info → rasa_pro-3.13.0.dev1.dist-info}/NOTICE +0 -0
  47. {rasa_pro-3.12.4.dist-info → rasa_pro-3.13.0.dev1.dist-info}/entry_points.txt +0 -0
rasa/cli/scaffold.py CHANGED
@@ -64,7 +64,7 @@ def add_subparser(
64
64
  "--template",
65
65
  type=ProjectTemplateName,
66
66
  choices=list(ProjectTemplateName),
67
- default=ProjectTemplateName.DEFAULT,
67
+ default=ProjectTemplateName.CALM,
68
68
  help="Select the template to use for the project.",
69
69
  )
70
70
  scaffold_parser.set_defaults(func=run)
@@ -23,9 +23,11 @@ from rasa.core.constants import (
23
23
  KEY_IS_COEXISTENCE_ASSISTANT,
24
24
  UTTER_SOURCE_METADATA_KEY,
25
25
  )
26
+ from rasa.core.nlg.translate import get_translated_buttons, get_translated_text
26
27
  from rasa.core.policies.policy import PolicyPrediction
27
28
  from rasa.core.utils import add_bot_utterance_metadata
28
29
  from rasa.e2e_test.constants import KEY_STUB_CUSTOM_ACTIONS
30
+ from rasa.engine.language import Language
29
31
  from rasa.nlu.constants import (
30
32
  RESPONSE_SELECTOR_DEFAULT_INTENT,
31
33
  RESPONSE_SELECTOR_PREDICTION_KEY,
@@ -82,6 +84,7 @@ from rasa.shared.core.events import (
82
84
  UserUttered,
83
85
  )
84
86
  from rasa.shared.core.flows import FlowsList
87
+ from rasa.shared.core.flows.constants import KEY_TRANSLATION
85
88
  from rasa.shared.core.slot_mappings import (
86
89
  SlotFillingManager,
87
90
  extract_slot_value,
@@ -254,25 +257,36 @@ def action_for_name_or_text(
254
257
  return RemoteAction(action_name_or_text, action_endpoint)
255
258
 
256
259
 
257
- def create_bot_utterance(message: Dict[Text, Any]) -> BotUttered:
258
- """Create BotUttered event from message."""
259
- bot_message = BotUttered(
260
- text=message.pop(TEXT, None),
261
- data={
262
- ELEMENTS: message.pop(ELEMENTS, None),
263
- QUICK_REPLIES: message.pop(QUICK_REPLIES, None),
264
- BUTTONS: message.pop(BUTTONS, None),
265
- # for legacy / compatibility reasons we need to set the image
266
- # to be the attachment if there is no other attachment (the
267
- # `.get` is intentional - no `pop` as we still need the image`
268
- # property to set it in the following line)
269
- ATTACHMENT: message.pop(ATTACHMENT, None) or message.get(IMAGE, None),
270
- IMAGE: message.pop(IMAGE, None),
271
- CUSTOM: message.pop(CUSTOM, None),
272
- },
273
- metadata=message,
260
+ def create_bot_utterance(
261
+ message: Dict[Text, Any], language: Optional[Language] = None
262
+ ) -> BotUttered:
263
+ """Create BotUttered event from message with translation support."""
264
+ message_copy = copy.deepcopy(message)
265
+
266
+ text = get_translated_text(
267
+ text=message_copy.pop(TEXT, None),
268
+ translation=message_copy.pop(KEY_TRANSLATION, {}),
269
+ language=language,
270
+ )
271
+
272
+ buttons = get_translated_buttons(
273
+ buttons=message_copy.pop(BUTTONS, None), language=language
274
274
  )
275
- return bot_message
275
+
276
+ data = {
277
+ ELEMENTS: message_copy.pop(ELEMENTS, None),
278
+ QUICK_REPLIES: message_copy.pop(QUICK_REPLIES, None),
279
+ BUTTONS: buttons,
280
+ # for legacy / compatibility reasons we need to set the image
281
+ # to be the attachment if there is no other attachment (the
282
+ # `.get` is intentional - no `pop` as we still need the image`
283
+ # property to set it in the following line)
284
+ ATTACHMENT: message_copy.pop(ATTACHMENT, None) or message_copy.get(IMAGE, None),
285
+ IMAGE: message_copy.pop(IMAGE, None),
286
+ CUSTOM: message_copy.pop(CUSTOM, None),
287
+ }
288
+
289
+ return BotUttered(text=text, data=data, metadata=message_copy)
276
290
 
277
291
 
278
292
  class Action:
@@ -385,7 +399,7 @@ class ActionBotResponse(Action):
385
399
  message = add_bot_utterance_metadata(
386
400
  message, self.utter_action, nlg, domain, tracker
387
401
  )
388
- return [create_bot_utterance(message)]
402
+ return [create_bot_utterance(message, tracker.current_language)]
389
403
 
390
404
  def name(self) -> Text:
391
405
  """Returns action name."""
@@ -419,7 +433,7 @@ class ActionEndToEndResponse(Action):
419
433
  ) -> List[Event]:
420
434
  """Runs action (see parent class for full docstring)."""
421
435
  message = {"text": self.action_text}
422
- return [create_bot_utterance(message)]
436
+ return [create_bot_utterance(message, tracker.current_language)]
423
437
 
424
438
  def event_for_successful_execution(
425
439
  self,
@@ -885,10 +899,7 @@ class RemoteAction(Action):
885
899
  generated_response = response.pop("response", None)
886
900
  if generated_response is not None:
887
901
  draft = await nlg.generate(
888
- generated_response,
889
- tracker,
890
- output_channel.name(),
891
- **response,
902
+ generated_response, tracker, output_channel.name(), **response
892
903
  )
893
904
  if not draft:
894
905
  continue
@@ -906,7 +917,7 @@ class RemoteAction(Action):
906
917
  # Avoid overwriting `draft` values with empty values
907
918
  response = {k: v for k, v in response.items() if v}
908
919
  draft.update(response)
909
- bot_messages.append(create_bot_utterance(draft))
920
+ bot_messages.append(create_bot_utterance(draft, tracker.current_language))
910
921
 
911
922
  return bot_messages
912
923
 
@@ -1063,7 +1074,6 @@ def _revert_rephrasing_events() -> List[Event]:
1063
1074
  ]
1064
1075
 
1065
1076
 
1066
- # TODO: this should be removed, e.g. it uses a hardcoded message and no translation
1067
1077
  class ActionDefaultAskAffirmation(Action):
1068
1078
  """Default implementation which asks the user to affirm his intent.
1069
1079
 
@@ -1115,7 +1125,7 @@ class ActionDefaultAskAffirmation(Action):
1115
1125
  "utter_action": self.name(),
1116
1126
  }
1117
1127
 
1118
- return [create_bot_utterance(message)]
1128
+ return [create_bot_utterance(message, tracker.current_language)]
1119
1129
 
1120
1130
 
1121
1131
  class ActionDefaultAskRephrase(ActionBotResponse):
@@ -1148,7 +1158,7 @@ class ActionSendText(Action):
1148
1158
  fallback = {"text": ""}
1149
1159
  metadata_copy = copy.deepcopy(metadata) if metadata else {}
1150
1160
  message = metadata_copy.get("message", fallback)
1151
- return [create_bot_utterance(message)]
1161
+ return [create_bot_utterance(message, tracker.current_language)]
1152
1162
 
1153
1163
 
1154
1164
  class ActionExtractSlots(Action):
@@ -217,6 +217,6 @@ class ActionRunSlotRejections(Action):
217
217
  message = add_bot_utterance_metadata(
218
218
  message, utterance, nlg, domain, tracker
219
219
  )
220
- events.append(create_bot_utterance(message))
220
+ events.append(create_bot_utterance(message, tracker.current_language))
221
221
 
222
222
  return events
@@ -120,13 +120,6 @@ class StudioChatInput(SocketIOInput):
120
120
 
121
121
  self._register_tracker_update_hook()
122
122
 
123
- async def emit(self, event: str, data: Dict, room: str) -> None:
124
- """Emits an event to the websocket."""
125
- if not self.sio:
126
- structlogger.error("studio_chat.emit.sio_not_initialized")
127
- return
128
- await self.sio.emit(event, data, room=room)
129
-
130
123
  def _register_tracker_update_hook(self) -> None:
131
124
  plugin_manager().register(StudioTrackerUpdatePlugin(self))
132
125
 
@@ -136,7 +129,10 @@ class StudioChatInput(SocketIOInput):
136
129
 
137
130
  async def publish_tracker_update(self, sender_id: str, tracker_dump: Dict) -> None:
138
131
  """Publishes a tracker update notification to the websocket."""
139
- await self.emit("tracker", tracker_dump, room=sender_id)
132
+ if not self.sio:
133
+ structlogger.error("studio_chat.on_tracker_updated.sio_not_initialized")
134
+ return
135
+ await self.sio.emit("tracker", tracker_dump, room=sender_id)
140
136
 
141
137
  async def on_message_proxy(
142
138
  self,
@@ -176,45 +172,22 @@ class StudioChatInput(SocketIOInput):
176
172
  structlogger.error("studio_chat.sio.domain_not_initialized")
177
173
  return None
178
174
 
179
- tracker: Optional[DialogueStateTracker] = None
180
-
181
175
  async with self.agent.lock_store.lock(data["sender_id"]):
182
- try:
183
- tracker = DialogueStateTracker.from_dict(
184
- data["sender_id"], data["events"], domain.slots
185
- )
176
+ tracker = DialogueStateTracker.from_dict(
177
+ data["sender_id"], data["events"], domain.slots
178
+ )
179
+
180
+ # will override an existing tracker with the same id!
181
+ await self.agent.tracker_store.save(tracker)
186
182
 
187
- # will override an existing tracker with the same id!
183
+ processor = self.agent.processor
184
+ if processor and does_need_action_prediction(tracker):
185
+ output_channel = self.get_output_channel()
186
+
187
+ await processor._run_prediction_loop(output_channel, tracker)
188
+ await processor.run_anonymization_pipeline(tracker)
188
189
  await self.agent.tracker_store.save(tracker)
189
190
 
190
- processor = self.agent.processor
191
- if processor and does_need_action_prediction(tracker):
192
- output_channel = self.get_output_channel()
193
-
194
- await processor._run_prediction_loop(output_channel, tracker)
195
- await processor.run_anonymization_pipeline(tracker)
196
- await self.agent.tracker_store.save(tracker)
197
- except Exception as e:
198
- structlogger.error(
199
- "studio_chat.sio.handle_tracker_update.error",
200
- error=e,
201
- sender_id=data["sender_id"],
202
- )
203
- await self.emit(
204
- "error",
205
- {
206
- "message": "An error occurred while updating the conversation.",
207
- "error": str(e),
208
- "exception": str(type(e).__name__),
209
- },
210
- room=sid,
211
- )
212
- if not tracker:
213
- # in case the tracker couldn't be updated, we retrieve the prior
214
- # version and use that to populate the update
215
- tracker = await self.agent.tracker_store.get_or_create_tracker(
216
- data["sender_id"]
217
- )
218
191
  await self.on_tracker_updated(tracker)
219
192
 
220
193
  def blueprint(
@@ -31,10 +31,12 @@ class FAISS_Store(InformationRetrieval):
31
31
  index_path: str,
32
32
  docs_folder: Optional[str],
33
33
  create_index: Optional[bool] = False,
34
+ use_llm: bool = False,
34
35
  ):
35
36
  """Initializes the FAISS Store."""
36
37
  self.chunk_size = 1000
37
38
  self.chunk_overlap = 20
39
+ self.use_llm = use_llm
38
40
 
39
41
  path = Path(index_path) / "documents_faiss"
40
42
  if create_index:
@@ -71,6 +73,57 @@ class FAISS_Store(InformationRetrieval):
71
73
 
72
74
  return loader.load()
73
75
 
76
+ def _format_faqs(self, docs: List["Document"]) -> List["Document"]:
77
+ """Splits each loaded file into individual FAQs.
78
+
79
+ Args:
80
+ docs: Documents representing whole files containing FAQs.
81
+
82
+ Returns:
83
+ List of Document objects, each containing a separate FAQ.
84
+
85
+ Examples:
86
+ An example of a file containing FAQs:
87
+
88
+ Q: Who is Finley?
89
+ A: Finley is your smart assistant for the FinX App. You can add him to your
90
+ favorite messenger and tell him what you need help with.
91
+
92
+ Q: How does Finley work?
93
+ A: Finley is powered by the latest chatbot technology leveraging a unique
94
+ interplay of large language models and secure logic.
95
+
96
+ More details in documentation: https://rasa.com/docs/reference/config/policies/extractive-search/
97
+ """
98
+ structured_faqs = []
99
+ from langchain.schema import Document
100
+
101
+ for doc in docs:
102
+ faq_chunks = doc.page_content.strip().split("\n\n")
103
+
104
+ for chunk in faq_chunks:
105
+ lines = chunk.strip().split("\n")
106
+ if len(lines) < 2:
107
+ continue # Skip if something unexpected
108
+
109
+ question_line = lines[0].strip()
110
+ answer_line = lines[1].strip()
111
+
112
+ question = question_line.replace("Q: ", "").strip()
113
+ answer = answer_line.replace("A: ", "").strip()
114
+
115
+ doc_obj = Document(
116
+ page_content=question,
117
+ metadata={
118
+ "title": question.lower().replace(" ", "_")[:-1],
119
+ "type": "faq",
120
+ "answer": answer,
121
+ },
122
+ )
123
+
124
+ structured_faqs.append(doc_obj)
125
+ return structured_faqs
126
+
74
127
  def _create_document_index(
75
128
  self, docs_folder: Optional[str], embedding: "Embeddings"
76
129
  ) -> FAISS:
@@ -87,12 +140,15 @@ class FAISS_Store(InformationRetrieval):
87
140
  raise ValueError("parameter `docs_folder` needs to be specified")
88
141
 
89
142
  docs = self.load_documents(docs_folder)
90
- splitter = RecursiveCharacterTextSplitter(
91
- chunk_size=self.chunk_size,
92
- chunk_overlap=self.chunk_overlap,
93
- length_function=len,
94
- )
95
- doc_chunks = splitter.split_documents(docs)
143
+ if self.use_llm:
144
+ splitter = RecursiveCharacterTextSplitter(
145
+ chunk_size=self.chunk_size,
146
+ chunk_overlap=self.chunk_overlap,
147
+ length_function=len,
148
+ )
149
+ doc_chunks = splitter.split_documents(docs)
150
+ else:
151
+ doc_chunks = self._format_faqs(docs)
96
152
 
97
153
  logger.info(
98
154
  "information_retrieval.faiss_store._create_document_index",
@@ -200,10 +200,8 @@ class ContextualResponseRephraser(
200
200
 
201
201
  @measure_llm_latency
202
202
  async def _generate_llm_response(self, prompt: str) -> Optional[LLMResponse]:
203
- """Use LLM to generate a response.
204
-
205
- Returns an LLMResponse object containing both the generated text
206
- (choices) and metadata.
203
+ """Use LLM to generate a response, returning an LLMResponse object
204
+ containing both the generated text (choices) and metadata.
207
205
 
208
206
  Args:
209
207
  prompt: The prompt to send to the LLM.
@@ -369,9 +367,12 @@ class ContextualResponseRephraser(
369
367
  Returns:
370
368
  The generated response.
371
369
  """
372
- templated_response = await super().generate(
370
+ filled_slots = tracker.current_slot_values()
371
+ stack_context = tracker.stack.current_context()
372
+ templated_response = self.generate_from_slots(
373
373
  utter_action=utter_action,
374
- tracker=tracker,
374
+ filled_slots=filled_slots,
375
+ stack_context=stack_context,
375
376
  output_channel=output_channel,
376
377
  **kwargs,
377
378
  )
@@ -6,8 +6,6 @@ from pypred import Predicate
6
6
 
7
7
  import rasa.shared.utils.common
8
8
  import rasa.shared.utils.io
9
- from rasa.core.nlg.translate import has_translation
10
- from rasa.engine.language import Language
11
9
  from rasa.shared.constants import CHANNEL, RESPONSE_CONDITION
12
10
  from rasa.shared.core.domain import Domain
13
11
  from rasa.shared.core.trackers import DialogueStateTracker
@@ -133,23 +131,11 @@ class ResponseVariationFilter:
133
131
 
134
132
  return True
135
133
 
136
- def _filter_by_language(
137
- self, responses: List[Dict[Text, Any]], language: Optional[Language] = None
138
- ) -> List[Dict[Text, Any]]:
139
- if not language:
140
- return responses
141
-
142
- if filtered := [r for r in responses if has_translation(r, language)]:
143
- return filtered
144
- # if no translation is found, return the original response variations
145
- return responses
146
-
147
134
  def responses_for_utter_action(
148
135
  self,
149
136
  utter_action: Text,
150
137
  output_channel: Text,
151
138
  filled_slots: Dict[Text, Any],
152
- language: Optional[Language] = None,
153
139
  ) -> List[Dict[Text, Any]]:
154
140
  """Returns array of responses that fit the channel, action and condition."""
155
141
  # filter responses without a condition
@@ -190,16 +176,16 @@ class ResponseVariationFilter:
190
176
  )
191
177
 
192
178
  if conditional_channel:
193
- return self._filter_by_language(conditional_channel, language)
179
+ return conditional_channel
194
180
 
195
181
  if default_channel:
196
- return self._filter_by_language(default_channel, language)
182
+ return default_channel
197
183
 
198
184
  if conditional_no_channel:
199
- return self._filter_by_language(conditional_no_channel, language)
185
+ return conditional_no_channel
200
186
 
201
187
  if default_no_channel:
202
- return self._filter_by_language(default_no_channel, language)
188
+ return default_no_channel
203
189
 
204
190
  # if there is no response variation selected,
205
191
  # return the internal error response to prevent
@@ -212,9 +198,7 @@ class ResponseVariationFilter:
212
198
  f"a default variation and that all the conditions are valid. "
213
199
  f"Returning the internal error response.",
214
200
  )
215
- return self._filter_by_language(
216
- self.responses.get("utter_internal_error_rasa", []), language
217
- )
201
+ return self.responses.get("utter_internal_error_rasa", [])
218
202
 
219
203
  def get_response_variation_id(
220
204
  self,
rasa/core/nlg/response.py CHANGED
@@ -5,11 +5,8 @@ from typing import Any, Dict, List, Optional, Text
5
5
  from rasa.core.constants import DEFAULT_TEMPLATE_ENGINE, TEMPLATE_ENGINE_CONFIG_KEY
6
6
  from rasa.core.nlg import interpolator
7
7
  from rasa.core.nlg.generator import NaturalLanguageGenerator, ResponseVariationFilter
8
- from rasa.core.nlg.translate import get_translated_buttons, get_translated_text
9
- from rasa.engine.language import Language
10
- from rasa.shared.constants import BUTTONS, RESPONSE_CONDITION, TEXT
8
+ from rasa.shared.constants import RESPONSE_CONDITION
11
9
  from rasa.shared.core.domain import RESPONSE_KEYS_TO_INTERPOLATE
12
- from rasa.shared.core.flows.constants import KEY_TRANSLATION
13
10
  from rasa.shared.core.trackers import DialogueStateTracker
14
11
  from rasa.shared.nlu.constants import METADATA
15
12
 
@@ -33,11 +30,7 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
33
30
 
34
31
  # noinspection PyUnusedLocal
35
32
  def _random_response_for(
36
- self,
37
- utter_action: Text,
38
- output_channel: Text,
39
- filled_slots: Dict[Text, Any],
40
- language: Optional[Language] = None,
33
+ self, utter_action: Text, output_channel: Text, filled_slots: Dict[Text, Any]
41
34
  ) -> Optional[Dict[Text, Any]]:
42
35
  """Select random response for the utter action from available ones.
43
36
 
@@ -49,7 +42,7 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
49
42
  if utter_action in self.responses:
50
43
  response_filter = ResponseVariationFilter(self.responses)
51
44
  suitable_responses = response_filter.responses_for_utter_action(
52
- utter_action, output_channel, filled_slots, language
45
+ utter_action, output_channel, filled_slots
53
46
  )
54
47
 
55
48
  if suitable_responses:
@@ -82,36 +75,9 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
82
75
  """Generate a response for the requested utter action."""
83
76
  filled_slots = tracker.current_slot_values()
84
77
  stack_context = tracker.stack.current_context()
85
- response = self.generate_from_slots(
86
- utter_action,
87
- filled_slots,
88
- stack_context,
89
- output_channel,
90
- tracker.current_language,
91
- **kwargs,
78
+ return self.generate_from_slots(
79
+ utter_action, filled_slots, stack_context, output_channel, **kwargs
92
80
  )
93
- if response is not None:
94
- return self.translate_response(response, tracker.current_language)
95
- return None
96
-
97
- def translate_response(
98
- self, response: Dict[Text, Any], language: Optional[Language] = None
99
- ) -> Dict[Text, Any]:
100
- message_copy = copy.deepcopy(response)
101
-
102
- text = get_translated_text(
103
- text=message_copy.pop(TEXT, None),
104
- translation=message_copy.pop(KEY_TRANSLATION, {}),
105
- language=language,
106
- )
107
-
108
- buttons = get_translated_buttons(
109
- buttons=message_copy.pop(BUTTONS, None), language=language
110
- )
111
- message_copy[TEXT] = text
112
- if buttons:
113
- message_copy[BUTTONS] = buttons
114
- return message_copy
115
81
 
116
82
  def generate_from_slots(
117
83
  self,
@@ -119,15 +85,12 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
119
85
  filled_slots: Dict[Text, Any],
120
86
  stack_context: Dict[Text, Any],
121
87
  output_channel: Text,
122
- language: Optional[Language] = None,
123
88
  **kwargs: Any,
124
89
  ) -> Optional[Dict[Text, Any]]:
125
90
  """Generate a response for the requested utter action."""
126
91
  # Fetching a random response for the passed utter action
127
92
  r = copy.deepcopy(
128
- self._random_response_for(
129
- utter_action, output_channel, filled_slots, language
130
- )
93
+ self._random_response_for(utter_action, output_channel, filled_slots)
131
94
  )
132
95
  # Filling the slots in the response with placeholders and returning the response
133
96
  if r is not None:
@@ -23,14 +23,6 @@ def get_translated_text(
23
23
  return translation.get(language_code, text)
24
24
 
25
25
 
26
- def has_translation(
27
- message: Dict[Text, Any], language: Optional[Language] = None
28
- ) -> bool:
29
- """Check if the message has a translation for the given language."""
30
- language_code = language.code if language else None
31
- return language_code in message.get(KEY_TRANSLATION, {})
32
-
33
-
34
26
  def get_translated_buttons(
35
27
  buttons: Optional[List[Dict[Text, Any]]], language: Optional[Language] = None
36
28
  ) -> Optional[List[Dict[Text, Any]]]:
@@ -373,6 +373,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
373
373
  embeddings=embeddings,
374
374
  index_path=path,
375
375
  create_index=True,
376
+ use_llm=self.use_llm,
376
377
  )
377
378
  else:
378
379
  logger.info("enterprise_search_policy.train.custom", store_type=store_type)
@@ -1,7 +1,7 @@
1
1
  import importlib.resources
2
2
  import math
3
3
  from dataclasses import dataclass, field
4
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Text, Tuple
4
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Text, Tuple
5
5
 
6
6
  import structlog
7
7
  import tiktoken
@@ -18,7 +18,6 @@ from rasa.core.constants import (
18
18
  UTTER_SOURCE_METADATA_KEY,
19
19
  )
20
20
  from rasa.core.policies.policy import Policy, PolicyPrediction, SupportedData
21
- from rasa.dialogue_understanding.patterns.chitchat import FLOW_PATTERN_CHITCHAT
22
21
  from rasa.dialogue_understanding.stack.frames import (
23
22
  ChitChatStackFrame,
24
23
  DialogueStackFrame,
@@ -38,10 +37,9 @@ from rasa.shared.constants import (
38
37
  OPENAI_PROVIDER,
39
38
  PROMPT_CONFIG_KEY,
40
39
  PROVIDER_CONFIG_KEY,
41
- REQUIRED_SLOTS_KEY,
42
40
  TIMEOUT_CONFIG_KEY,
43
41
  )
44
- from rasa.shared.core.constants import ACTION_LISTEN_NAME, ACTION_TRIGGER_CHITCHAT
42
+ from rasa.shared.core.constants import ACTION_LISTEN_NAME
45
43
  from rasa.shared.core.domain import KEY_RESPONSES_TEXT, Domain
46
44
  from rasa.shared.core.events import (
47
45
  ActionExecuted,
@@ -51,6 +49,7 @@ from rasa.shared.core.events import (
51
49
  )
52
50
  from rasa.shared.core.flows import FlowsList
53
51
  from rasa.shared.core.generator import TrackerWithCachedStates
52
+ from rasa.shared.core.policies.utils import filter_responses_for_intentless_policy
54
53
  from rasa.shared.core.trackers import DialogueStateTracker
55
54
  from rasa.shared.exceptions import FileIOException, RasaCoreException
56
55
  from rasa.shared.nlu.constants import PREDICTED_CONFIDENCE_KEY
@@ -146,59 +145,6 @@ class Conversation:
146
145
  interactions: List[Interaction] = field(default_factory=list)
147
146
 
148
147
 
149
- def collect_form_responses(forms: Forms) -> Set[Text]:
150
- """Collect responses that belong the requested slots in forms.
151
-
152
- Args:
153
- forms: the forms from the domain
154
- Returns:
155
- all utterances used in forms
156
- """
157
- form_responses = set()
158
- for _, form_info in forms.data.items():
159
- for required_slot in form_info.get(REQUIRED_SLOTS_KEY, []):
160
- form_responses.add(f"utter_ask_{required_slot}")
161
- return form_responses
162
-
163
-
164
- def filter_responses(responses: Responses, forms: Forms, flows: FlowsList) -> Responses:
165
- """Filters out responses that are unwanted for the intentless policy.
166
-
167
- This includes utterances used in flows and forms.
168
-
169
- Args:
170
- responses: the responses from the domain
171
- forms: the forms from the domain
172
- flows: all flows
173
- Returns:
174
- The remaining, relevant responses for the intentless policy.
175
- """
176
- form_responses = collect_form_responses(forms)
177
- flow_responses = flows.utterances
178
- combined_responses = form_responses | flow_responses
179
- filtered_responses = {
180
- name: variants
181
- for name, variants in responses.data.items()
182
- if name not in combined_responses
183
- }
184
-
185
- pattern_chitchat = flows.flow_by_id(FLOW_PATTERN_CHITCHAT)
186
-
187
- # The following condition is highly unlikely, but mypy requires the case
188
- # of pattern_chitchat == None to be addressed
189
- if not pattern_chitchat:
190
- return Responses(data=filtered_responses)
191
-
192
- # if action_trigger_chitchat, filter out "utter_free_chitchat_response"
193
- has_action_trigger_chitchat = pattern_chitchat.has_action_step(
194
- ACTION_TRIGGER_CHITCHAT
195
- )
196
- if has_action_trigger_chitchat:
197
- filtered_responses.pop("utter_free_chitchat_response", None)
198
-
199
- return Responses(data=filtered_responses)
200
-
201
-
202
148
  def action_from_response(
203
149
  text: Optional[str], responses: Dict[Text, List[Dict[Text, Any]]]
204
150
  ) -> Optional[str]:
@@ -512,7 +458,9 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
512
458
  # Perform health checks of both LLM and embeddings client configs
513
459
  self._perform_health_checks(self.config, "intentless_policy.train")
514
460
 
515
- responses = filter_responses(responses, forms, flows or FlowsList([]))
461
+ responses = filter_responses_for_intentless_policy(
462
+ responses, forms, flows or FlowsList([])
463
+ )
516
464
  telemetry.track_intentless_policy_train()
517
465
  response_texts = [r for r in extract_ai_response_examples(responses.data)]
518
466
 
@@ -947,7 +895,6 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
947
895
  **kwargs: Any,
948
896
  ) -> "IntentlessPolicy":
949
897
  """Loads a trained policy (see parent class for full docstring)."""
950
-
951
898
  # Perform health checks of both LLM and embeddings client configs
952
899
  cls._perform_health_checks(config, "intentless_policy.load")
953
900
 
@@ -65,7 +65,7 @@ class KnowledgeAnswerCommand(FreeFormAnswerCommand):
65
65
  """Converts the command to a DSL string."""
66
66
  mapper = {
67
67
  CommandSyntaxVersion.v1: "SearchAndReply()",
68
- CommandSyntaxVersion.v2: "provide info",
68
+ CommandSyntaxVersion.v2: "search and reply",
69
69
  }
70
70
  return mapper.get(
71
71
  CommandSyntaxManager.get_syntax_version(),
@@ -81,7 +81,7 @@ class KnowledgeAnswerCommand(FreeFormAnswerCommand):
81
81
  def regex_pattern() -> str:
82
82
  mapper = {
83
83
  CommandSyntaxVersion.v1: r"SearchAndReply\(\)",
84
- CommandSyntaxVersion.v2: r"""^[\s\W\d]*provide info['"`]*$""",
84
+ CommandSyntaxVersion.v2: r"""^[\s\W\d]*search and reply['"`]*$""",
85
85
  }
86
86
  return mapper.get(
87
87
  CommandSyntaxManager.get_syntax_version(),