rasa-pro 3.12.5__py3-none-any.whl → 3.13.0.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (35) hide show
  1. rasa/cli/scaffold.py +1 -1
  2. rasa/core/actions/action.py +38 -28
  3. rasa/core/actions/action_run_slot_rejections.py +1 -1
  4. rasa/core/channels/studio_chat.py +16 -43
  5. rasa/core/information_retrieval/faiss.py +62 -6
  6. rasa/core/nlg/contextual_response_rephraser.py +7 -6
  7. rasa/core/nlg/generator.py +5 -21
  8. rasa/core/nlg/response.py +6 -43
  9. rasa/core/nlg/translate.py +0 -8
  10. rasa/core/policies/enterprise_search_policy.py +1 -0
  11. rasa/dialogue_understanding/commands/knowledge_answer_command.py +2 -2
  12. rasa/dialogue_understanding/generator/llm_based_command_generator.py +1 -2
  13. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +1 -1
  14. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +5 -2
  15. rasa/dialogue_understanding_test/command_metric_calculation.py +7 -40
  16. rasa/dialogue_understanding_test/command_metrics.py +38 -0
  17. rasa/dialogue_understanding_test/du_test_case.py +58 -25
  18. rasa/dialogue_understanding_test/du_test_result.py +228 -132
  19. rasa/dialogue_understanding_test/du_test_runner.py +10 -1
  20. rasa/dialogue_understanding_test/io.py +35 -8
  21. rasa/model_manager/model_api.py +1 -1
  22. rasa/model_manager/socket_bridge.py +0 -7
  23. rasa/shared/core/slot_mappings.py +12 -0
  24. rasa/shared/core/slots.py +1 -1
  25. rasa/shared/core/trackers.py +4 -10
  26. rasa/shared/providers/llm/default_litellm_llm_client.py +2 -2
  27. rasa/tracing/instrumentation/attribute_extractors.py +36 -6
  28. rasa/version.py +1 -1
  29. {rasa_pro-3.12.5.dist-info → rasa_pro-3.13.0.dev1.dist-info}/METADATA +5 -6
  30. {rasa_pro-3.12.5.dist-info → rasa_pro-3.13.0.dev1.dist-info}/RECORD +33 -34
  31. {rasa_pro-3.12.5.dist-info → rasa_pro-3.13.0.dev1.dist-info}/WHEEL +1 -1
  32. README.md +0 -38
  33. rasa/keys +0 -1
  34. {rasa_pro-3.12.5.dist-info → rasa_pro-3.13.0.dev1.dist-info}/NOTICE +0 -0
  35. {rasa_pro-3.12.5.dist-info → rasa_pro-3.13.0.dev1.dist-info}/entry_points.txt +0 -0
rasa/cli/scaffold.py CHANGED
@@ -64,7 +64,7 @@ def add_subparser(
64
64
  "--template",
65
65
  type=ProjectTemplateName,
66
66
  choices=list(ProjectTemplateName),
67
- default=ProjectTemplateName.DEFAULT,
67
+ default=ProjectTemplateName.CALM,
68
68
  help="Select the template to use for the project.",
69
69
  )
70
70
  scaffold_parser.set_defaults(func=run)
@@ -23,9 +23,11 @@ from rasa.core.constants import (
23
23
  KEY_IS_COEXISTENCE_ASSISTANT,
24
24
  UTTER_SOURCE_METADATA_KEY,
25
25
  )
26
+ from rasa.core.nlg.translate import get_translated_buttons, get_translated_text
26
27
  from rasa.core.policies.policy import PolicyPrediction
27
28
  from rasa.core.utils import add_bot_utterance_metadata
28
29
  from rasa.e2e_test.constants import KEY_STUB_CUSTOM_ACTIONS
30
+ from rasa.engine.language import Language
29
31
  from rasa.nlu.constants import (
30
32
  RESPONSE_SELECTOR_DEFAULT_INTENT,
31
33
  RESPONSE_SELECTOR_PREDICTION_KEY,
@@ -82,6 +84,7 @@ from rasa.shared.core.events import (
82
84
  UserUttered,
83
85
  )
84
86
  from rasa.shared.core.flows import FlowsList
87
+ from rasa.shared.core.flows.constants import KEY_TRANSLATION
85
88
  from rasa.shared.core.slot_mappings import (
86
89
  SlotFillingManager,
87
90
  extract_slot_value,
@@ -254,25 +257,36 @@ def action_for_name_or_text(
254
257
  return RemoteAction(action_name_or_text, action_endpoint)
255
258
 
256
259
 
257
- def create_bot_utterance(message: Dict[Text, Any]) -> BotUttered:
258
- """Create BotUttered event from message."""
259
- bot_message = BotUttered(
260
- text=message.pop(TEXT, None),
261
- data={
262
- ELEMENTS: message.pop(ELEMENTS, None),
263
- QUICK_REPLIES: message.pop(QUICK_REPLIES, None),
264
- BUTTONS: message.pop(BUTTONS, None),
265
- # for legacy / compatibility reasons we need to set the image
266
- # to be the attachment if there is no other attachment (the
267
- # `.get` is intentional - no `pop` as we still need the image`
268
- # property to set it in the following line)
269
- ATTACHMENT: message.pop(ATTACHMENT, None) or message.get(IMAGE, None),
270
- IMAGE: message.pop(IMAGE, None),
271
- CUSTOM: message.pop(CUSTOM, None),
272
- },
273
- metadata=message,
260
+ def create_bot_utterance(
261
+ message: Dict[Text, Any], language: Optional[Language] = None
262
+ ) -> BotUttered:
263
+ """Create BotUttered event from message with translation support."""
264
+ message_copy = copy.deepcopy(message)
265
+
266
+ text = get_translated_text(
267
+ text=message_copy.pop(TEXT, None),
268
+ translation=message_copy.pop(KEY_TRANSLATION, {}),
269
+ language=language,
270
+ )
271
+
272
+ buttons = get_translated_buttons(
273
+ buttons=message_copy.pop(BUTTONS, None), language=language
274
274
  )
275
- return bot_message
275
+
276
+ data = {
277
+ ELEMENTS: message_copy.pop(ELEMENTS, None),
278
+ QUICK_REPLIES: message_copy.pop(QUICK_REPLIES, None),
279
+ BUTTONS: buttons,
280
+ # for legacy / compatibility reasons we need to set the image
281
+ # to be the attachment if there is no other attachment (the
282
+ # `.get` is intentional - no `pop` as we still need the image`
283
+ # property to set it in the following line)
284
+ ATTACHMENT: message_copy.pop(ATTACHMENT, None) or message_copy.get(IMAGE, None),
285
+ IMAGE: message_copy.pop(IMAGE, None),
286
+ CUSTOM: message_copy.pop(CUSTOM, None),
287
+ }
288
+
289
+ return BotUttered(text=text, data=data, metadata=message_copy)
276
290
 
277
291
 
278
292
  class Action:
@@ -385,7 +399,7 @@ class ActionBotResponse(Action):
385
399
  message = add_bot_utterance_metadata(
386
400
  message, self.utter_action, nlg, domain, tracker
387
401
  )
388
- return [create_bot_utterance(message)]
402
+ return [create_bot_utterance(message, tracker.current_language)]
389
403
 
390
404
  def name(self) -> Text:
391
405
  """Returns action name."""
@@ -419,7 +433,7 @@ class ActionEndToEndResponse(Action):
419
433
  ) -> List[Event]:
420
434
  """Runs action (see parent class for full docstring)."""
421
435
  message = {"text": self.action_text}
422
- return [create_bot_utterance(message)]
436
+ return [create_bot_utterance(message, tracker.current_language)]
423
437
 
424
438
  def event_for_successful_execution(
425
439
  self,
@@ -885,10 +899,7 @@ class RemoteAction(Action):
885
899
  generated_response = response.pop("response", None)
886
900
  if generated_response is not None:
887
901
  draft = await nlg.generate(
888
- generated_response,
889
- tracker,
890
- output_channel.name(),
891
- **response,
902
+ generated_response, tracker, output_channel.name(), **response
892
903
  )
893
904
  if not draft:
894
905
  continue
@@ -906,7 +917,7 @@ class RemoteAction(Action):
906
917
  # Avoid overwriting `draft` values with empty values
907
918
  response = {k: v for k, v in response.items() if v}
908
919
  draft.update(response)
909
- bot_messages.append(create_bot_utterance(draft))
920
+ bot_messages.append(create_bot_utterance(draft, tracker.current_language))
910
921
 
911
922
  return bot_messages
912
923
 
@@ -1063,7 +1074,6 @@ def _revert_rephrasing_events() -> List[Event]:
1063
1074
  ]
1064
1075
 
1065
1076
 
1066
- # TODO: this should be removed, e.g. it uses a hardcoded message and no translation
1067
1077
  class ActionDefaultAskAffirmation(Action):
1068
1078
  """Default implementation which asks the user to affirm his intent.
1069
1079
 
@@ -1115,7 +1125,7 @@ class ActionDefaultAskAffirmation(Action):
1115
1125
  "utter_action": self.name(),
1116
1126
  }
1117
1127
 
1118
- return [create_bot_utterance(message)]
1128
+ return [create_bot_utterance(message, tracker.current_language)]
1119
1129
 
1120
1130
 
1121
1131
  class ActionDefaultAskRephrase(ActionBotResponse):
@@ -1148,7 +1158,7 @@ class ActionSendText(Action):
1148
1158
  fallback = {"text": ""}
1149
1159
  metadata_copy = copy.deepcopy(metadata) if metadata else {}
1150
1160
  message = metadata_copy.get("message", fallback)
1151
- return [create_bot_utterance(message)]
1161
+ return [create_bot_utterance(message, tracker.current_language)]
1152
1162
 
1153
1163
 
1154
1164
  class ActionExtractSlots(Action):
@@ -217,6 +217,6 @@ class ActionRunSlotRejections(Action):
217
217
  message = add_bot_utterance_metadata(
218
218
  message, utterance, nlg, domain, tracker
219
219
  )
220
- events.append(create_bot_utterance(message))
220
+ events.append(create_bot_utterance(message, tracker.current_language))
221
221
 
222
222
  return events
@@ -120,13 +120,6 @@ class StudioChatInput(SocketIOInput):
120
120
 
121
121
  self._register_tracker_update_hook()
122
122
 
123
- async def emit(self, event: str, data: Dict, room: str) -> None:
124
- """Emits an event to the websocket."""
125
- if not self.sio:
126
- structlogger.error("studio_chat.emit.sio_not_initialized")
127
- return
128
- await self.sio.emit(event, data, room=room)
129
-
130
123
  def _register_tracker_update_hook(self) -> None:
131
124
  plugin_manager().register(StudioTrackerUpdatePlugin(self))
132
125
 
@@ -136,7 +129,10 @@ class StudioChatInput(SocketIOInput):
136
129
 
137
130
  async def publish_tracker_update(self, sender_id: str, tracker_dump: Dict) -> None:
138
131
  """Publishes a tracker update notification to the websocket."""
139
- await self.emit("tracker", tracker_dump, room=sender_id)
132
+ if not self.sio:
133
+ structlogger.error("studio_chat.on_tracker_updated.sio_not_initialized")
134
+ return
135
+ await self.sio.emit("tracker", tracker_dump, room=sender_id)
140
136
 
141
137
  async def on_message_proxy(
142
138
  self,
@@ -176,45 +172,22 @@ class StudioChatInput(SocketIOInput):
176
172
  structlogger.error("studio_chat.sio.domain_not_initialized")
177
173
  return None
178
174
 
179
- tracker: Optional[DialogueStateTracker] = None
180
-
181
175
  async with self.agent.lock_store.lock(data["sender_id"]):
182
- try:
183
- tracker = DialogueStateTracker.from_dict(
184
- data["sender_id"], data["events"], domain.slots
185
- )
176
+ tracker = DialogueStateTracker.from_dict(
177
+ data["sender_id"], data["events"], domain.slots
178
+ )
179
+
180
+ # will override an existing tracker with the same id!
181
+ await self.agent.tracker_store.save(tracker)
186
182
 
187
- # will override an existing tracker with the same id!
183
+ processor = self.agent.processor
184
+ if processor and does_need_action_prediction(tracker):
185
+ output_channel = self.get_output_channel()
186
+
187
+ await processor._run_prediction_loop(output_channel, tracker)
188
+ await processor.run_anonymization_pipeline(tracker)
188
189
  await self.agent.tracker_store.save(tracker)
189
190
 
190
- processor = self.agent.processor
191
- if processor and does_need_action_prediction(tracker):
192
- output_channel = self.get_output_channel()
193
-
194
- await processor._run_prediction_loop(output_channel, tracker)
195
- await processor.run_anonymization_pipeline(tracker)
196
- await self.agent.tracker_store.save(tracker)
197
- except Exception as e:
198
- structlogger.error(
199
- "studio_chat.sio.handle_tracker_update.error",
200
- error=e,
201
- sender_id=data["sender_id"],
202
- )
203
- await self.emit(
204
- "error",
205
- {
206
- "message": "An error occurred while updating the conversation.",
207
- "error": str(e),
208
- "exception": str(type(e).__name__),
209
- },
210
- room=sid,
211
- )
212
- if not tracker:
213
- # in case the tracker couldn't be updated, we retrieve the prior
214
- # version and use that to populate the update
215
- tracker = await self.agent.tracker_store.get_or_create_tracker(
216
- data["sender_id"]
217
- )
218
191
  await self.on_tracker_updated(tracker)
219
192
 
220
193
  def blueprint(
@@ -31,10 +31,12 @@ class FAISS_Store(InformationRetrieval):
31
31
  index_path: str,
32
32
  docs_folder: Optional[str],
33
33
  create_index: Optional[bool] = False,
34
+ use_llm: bool = False,
34
35
  ):
35
36
  """Initializes the FAISS Store."""
36
37
  self.chunk_size = 1000
37
38
  self.chunk_overlap = 20
39
+ self.use_llm = use_llm
38
40
 
39
41
  path = Path(index_path) / "documents_faiss"
40
42
  if create_index:
@@ -71,6 +73,57 @@ class FAISS_Store(InformationRetrieval):
71
73
 
72
74
  return loader.load()
73
75
 
76
+ def _format_faqs(self, docs: List["Document"]) -> List["Document"]:
77
+ """Splits each loaded file into individual FAQs.
78
+
79
+ Args:
80
+ docs: Documents representing whole files containing FAQs.
81
+
82
+ Returns:
83
+ List of Document objects, each containing a separate FAQ.
84
+
85
+ Examples:
86
+ An example of a file containing FAQs:
87
+
88
+ Q: Who is Finley?
89
+ A: Finley is your smart assistant for the FinX App. You can add him to your
90
+ favorite messenger and tell him what you need help with.
91
+
92
+ Q: How does Finley work?
93
+ A: Finley is powered by the latest chatbot technology leveraging a unique
94
+ interplay of large language models and secure logic.
95
+
96
+ More details in documentation: https://rasa.com/docs/reference/config/policies/extractive-search/
97
+ """
98
+ structured_faqs = []
99
+ from langchain.schema import Document
100
+
101
+ for doc in docs:
102
+ faq_chunks = doc.page_content.strip().split("\n\n")
103
+
104
+ for chunk in faq_chunks:
105
+ lines = chunk.strip().split("\n")
106
+ if len(lines) < 2:
107
+ continue # Skip if something unexpected
108
+
109
+ question_line = lines[0].strip()
110
+ answer_line = lines[1].strip()
111
+
112
+ question = question_line.replace("Q: ", "").strip()
113
+ answer = answer_line.replace("A: ", "").strip()
114
+
115
+ doc_obj = Document(
116
+ page_content=question,
117
+ metadata={
118
+ "title": question.lower().replace(" ", "_")[:-1],
119
+ "type": "faq",
120
+ "answer": answer,
121
+ },
122
+ )
123
+
124
+ structured_faqs.append(doc_obj)
125
+ return structured_faqs
126
+
74
127
  def _create_document_index(
75
128
  self, docs_folder: Optional[str], embedding: "Embeddings"
76
129
  ) -> FAISS:
@@ -87,12 +140,15 @@ class FAISS_Store(InformationRetrieval):
87
140
  raise ValueError("parameter `docs_folder` needs to be specified")
88
141
 
89
142
  docs = self.load_documents(docs_folder)
90
- splitter = RecursiveCharacterTextSplitter(
91
- chunk_size=self.chunk_size,
92
- chunk_overlap=self.chunk_overlap,
93
- length_function=len,
94
- )
95
- doc_chunks = splitter.split_documents(docs)
143
+ if self.use_llm:
144
+ splitter = RecursiveCharacterTextSplitter(
145
+ chunk_size=self.chunk_size,
146
+ chunk_overlap=self.chunk_overlap,
147
+ length_function=len,
148
+ )
149
+ doc_chunks = splitter.split_documents(docs)
150
+ else:
151
+ doc_chunks = self._format_faqs(docs)
96
152
 
97
153
  logger.info(
98
154
  "information_retrieval.faiss_store._create_document_index",
@@ -200,10 +200,8 @@ class ContextualResponseRephraser(
200
200
 
201
201
  @measure_llm_latency
202
202
  async def _generate_llm_response(self, prompt: str) -> Optional[LLMResponse]:
203
- """Use LLM to generate a response.
204
-
205
- Returns an LLMResponse object containing both the generated text
206
- (choices) and metadata.
203
+ """Use LLM to generate a response, returning an LLMResponse object
204
+ containing both the generated text (choices) and metadata.
207
205
 
208
206
  Args:
209
207
  prompt: The prompt to send to the LLM.
@@ -369,9 +367,12 @@ class ContextualResponseRephraser(
369
367
  Returns:
370
368
  The generated response.
371
369
  """
372
- templated_response = await super().generate(
370
+ filled_slots = tracker.current_slot_values()
371
+ stack_context = tracker.stack.current_context()
372
+ templated_response = self.generate_from_slots(
373
373
  utter_action=utter_action,
374
- tracker=tracker,
374
+ filled_slots=filled_slots,
375
+ stack_context=stack_context,
375
376
  output_channel=output_channel,
376
377
  **kwargs,
377
378
  )
@@ -6,8 +6,6 @@ from pypred import Predicate
6
6
 
7
7
  import rasa.shared.utils.common
8
8
  import rasa.shared.utils.io
9
- from rasa.core.nlg.translate import has_translation
10
- from rasa.engine.language import Language
11
9
  from rasa.shared.constants import CHANNEL, RESPONSE_CONDITION
12
10
  from rasa.shared.core.domain import Domain
13
11
  from rasa.shared.core.trackers import DialogueStateTracker
@@ -133,23 +131,11 @@ class ResponseVariationFilter:
133
131
 
134
132
  return True
135
133
 
136
- def _filter_by_language(
137
- self, responses: List[Dict[Text, Any]], language: Optional[Language] = None
138
- ) -> List[Dict[Text, Any]]:
139
- if not language:
140
- return responses
141
-
142
- if filtered := [r for r in responses if has_translation(r, language)]:
143
- return filtered
144
- # if no translation is found, return the original response variations
145
- return responses
146
-
147
134
  def responses_for_utter_action(
148
135
  self,
149
136
  utter_action: Text,
150
137
  output_channel: Text,
151
138
  filled_slots: Dict[Text, Any],
152
- language: Optional[Language] = None,
153
139
  ) -> List[Dict[Text, Any]]:
154
140
  """Returns array of responses that fit the channel, action and condition."""
155
141
  # filter responses without a condition
@@ -190,16 +176,16 @@ class ResponseVariationFilter:
190
176
  )
191
177
 
192
178
  if conditional_channel:
193
- return self._filter_by_language(conditional_channel, language)
179
+ return conditional_channel
194
180
 
195
181
  if default_channel:
196
- return self._filter_by_language(default_channel, language)
182
+ return default_channel
197
183
 
198
184
  if conditional_no_channel:
199
- return self._filter_by_language(conditional_no_channel, language)
185
+ return conditional_no_channel
200
186
 
201
187
  if default_no_channel:
202
- return self._filter_by_language(default_no_channel, language)
188
+ return default_no_channel
203
189
 
204
190
  # if there is no response variation selected,
205
191
  # return the internal error response to prevent
@@ -212,9 +198,7 @@ class ResponseVariationFilter:
212
198
  f"a default variation and that all the conditions are valid. "
213
199
  f"Returning the internal error response.",
214
200
  )
215
- return self._filter_by_language(
216
- self.responses.get("utter_internal_error_rasa", []), language
217
- )
201
+ return self.responses.get("utter_internal_error_rasa", [])
218
202
 
219
203
  def get_response_variation_id(
220
204
  self,
rasa/core/nlg/response.py CHANGED
@@ -5,11 +5,8 @@ from typing import Any, Dict, List, Optional, Text
5
5
  from rasa.core.constants import DEFAULT_TEMPLATE_ENGINE, TEMPLATE_ENGINE_CONFIG_KEY
6
6
  from rasa.core.nlg import interpolator
7
7
  from rasa.core.nlg.generator import NaturalLanguageGenerator, ResponseVariationFilter
8
- from rasa.core.nlg.translate import get_translated_buttons, get_translated_text
9
- from rasa.engine.language import Language
10
- from rasa.shared.constants import BUTTONS, RESPONSE_CONDITION, TEXT
8
+ from rasa.shared.constants import RESPONSE_CONDITION
11
9
  from rasa.shared.core.domain import RESPONSE_KEYS_TO_INTERPOLATE
12
- from rasa.shared.core.flows.constants import KEY_TRANSLATION
13
10
  from rasa.shared.core.trackers import DialogueStateTracker
14
11
  from rasa.shared.nlu.constants import METADATA
15
12
 
@@ -33,11 +30,7 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
33
30
 
34
31
  # noinspection PyUnusedLocal
35
32
  def _random_response_for(
36
- self,
37
- utter_action: Text,
38
- output_channel: Text,
39
- filled_slots: Dict[Text, Any],
40
- language: Optional[Language] = None,
33
+ self, utter_action: Text, output_channel: Text, filled_slots: Dict[Text, Any]
41
34
  ) -> Optional[Dict[Text, Any]]:
42
35
  """Select random response for the utter action from available ones.
43
36
 
@@ -49,7 +42,7 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
49
42
  if utter_action in self.responses:
50
43
  response_filter = ResponseVariationFilter(self.responses)
51
44
  suitable_responses = response_filter.responses_for_utter_action(
52
- utter_action, output_channel, filled_slots, language
45
+ utter_action, output_channel, filled_slots
53
46
  )
54
47
 
55
48
  if suitable_responses:
@@ -82,36 +75,9 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
82
75
  """Generate a response for the requested utter action."""
83
76
  filled_slots = tracker.current_slot_values()
84
77
  stack_context = tracker.stack.current_context()
85
- response = self.generate_from_slots(
86
- utter_action,
87
- filled_slots,
88
- stack_context,
89
- output_channel,
90
- tracker.current_language,
91
- **kwargs,
78
+ return self.generate_from_slots(
79
+ utter_action, filled_slots, stack_context, output_channel, **kwargs
92
80
  )
93
- if response is not None:
94
- return self.translate_response(response, tracker.current_language)
95
- return None
96
-
97
- def translate_response(
98
- self, response: Dict[Text, Any], language: Optional[Language] = None
99
- ) -> Dict[Text, Any]:
100
- message_copy = copy.deepcopy(response)
101
-
102
- text = get_translated_text(
103
- text=message_copy.pop(TEXT, None),
104
- translation=message_copy.pop(KEY_TRANSLATION, {}),
105
- language=language,
106
- )
107
-
108
- buttons = get_translated_buttons(
109
- buttons=message_copy.pop(BUTTONS, None), language=language
110
- )
111
- message_copy[TEXT] = text
112
- if buttons:
113
- message_copy[BUTTONS] = buttons
114
- return message_copy
115
81
 
116
82
  def generate_from_slots(
117
83
  self,
@@ -119,15 +85,12 @@ class TemplatedNaturalLanguageGenerator(NaturalLanguageGenerator):
119
85
  filled_slots: Dict[Text, Any],
120
86
  stack_context: Dict[Text, Any],
121
87
  output_channel: Text,
122
- language: Optional[Language] = None,
123
88
  **kwargs: Any,
124
89
  ) -> Optional[Dict[Text, Any]]:
125
90
  """Generate a response for the requested utter action."""
126
91
  # Fetching a random response for the passed utter action
127
92
  r = copy.deepcopy(
128
- self._random_response_for(
129
- utter_action, output_channel, filled_slots, language
130
- )
93
+ self._random_response_for(utter_action, output_channel, filled_slots)
131
94
  )
132
95
  # Filling the slots in the response with placeholders and returning the response
133
96
  if r is not None:
@@ -23,14 +23,6 @@ def get_translated_text(
23
23
  return translation.get(language_code, text)
24
24
 
25
25
 
26
- def has_translation(
27
- message: Dict[Text, Any], language: Optional[Language] = None
28
- ) -> bool:
29
- """Check if the message has a translation for the given language."""
30
- language_code = language.code if language else None
31
- return language_code in message.get(KEY_TRANSLATION, {})
32
-
33
-
34
26
  def get_translated_buttons(
35
27
  buttons: Optional[List[Dict[Text, Any]]], language: Optional[Language] = None
36
28
  ) -> Optional[List[Dict[Text, Any]]]:
@@ -373,6 +373,7 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
373
373
  embeddings=embeddings,
374
374
  index_path=path,
375
375
  create_index=True,
376
+ use_llm=self.use_llm,
376
377
  )
377
378
  else:
378
379
  logger.info("enterprise_search_policy.train.custom", store_type=store_type)
@@ -65,7 +65,7 @@ class KnowledgeAnswerCommand(FreeFormAnswerCommand):
65
65
  """Converts the command to a DSL string."""
66
66
  mapper = {
67
67
  CommandSyntaxVersion.v1: "SearchAndReply()",
68
- CommandSyntaxVersion.v2: "provide info",
68
+ CommandSyntaxVersion.v2: "search and reply",
69
69
  }
70
70
  return mapper.get(
71
71
  CommandSyntaxManager.get_syntax_version(),
@@ -81,7 +81,7 @@ class KnowledgeAnswerCommand(FreeFormAnswerCommand):
81
81
  def regex_pattern() -> str:
82
82
  mapper = {
83
83
  CommandSyntaxVersion.v1: r"SearchAndReply\(\)",
84
- CommandSyntaxVersion.v2: r"""^[\s\W\d]*provide info['"`]*$""",
84
+ CommandSyntaxVersion.v2: r"""^[\s\W\d]*search and reply['"`]*$""",
85
85
  }
86
86
  return mapper.get(
87
87
  CommandSyntaxManager.get_syntax_version(),
@@ -230,19 +230,18 @@ class LLMBasedCommandGenerator(
230
230
  def compile_template(self, template: str) -> Template:
231
231
  """
232
232
  Compile the prompt template and register custom filters.
233
+
233
234
  Compiling the template is an expensive operation,
234
235
  so we cache the result.
235
236
  """
236
237
  # Create an environment
237
238
  # Autoescaping disabled explicitly for LLM prompt templates rendered from
238
-
239
239
  # strings (safe, not HTML)
240
240
  env = Environment(
241
241
  autoescape=select_autoescape(
242
242
  disabled_extensions=["jinja2"], default_for_string=False, default=True
243
243
  )
244
244
  )
245
-
246
245
  # Register filters
247
246
  env.filters[TO_JSON_ESCAPED_STRING_JINJA_FILTER] = to_json_escaped_string
248
247
 
@@ -8,7 +8,7 @@ Your task is to analyze the current conversation context and generate a list of
8
8
  * `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
9
9
  * `cancel flow`: Cancelling the current flow.
10
10
  * `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
11
- * `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
11
+ * `search and reply`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
12
12
  * `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
13
13
  * `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
14
14
 
@@ -16,7 +16,7 @@ Use the following structured data:
16
16
  * `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
17
17
  * `cancel flow`: Cancelling the current flow.
18
18
  * `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
19
- * `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
19
+ * `search and reply`: Responding to the user's message by accessing and supplying relevant information from the knowledge base to address their inquiry effectively.
20
20
  * `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
21
21
  * `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
22
22
 
@@ -27,8 +27,11 @@ Use the following structured data:
27
27
  * For categorical slots try to match the user message with allowed slot values. Use "other" if you cannot match it.
28
28
  * Set the boolean slots based on the user response. Map positive responses to `True`, and negative to `False`.
29
29
  * Extract text slot values exactly as provided by the user. Avoid assumptions, format changes, or partial extractions.
30
- * Only use information provided by the user.
31
30
  * Use clarification in ambiguous cases.
31
+ * Use `disambiguate flows` only when multiple flows could fit the same message (e.g., "card" could mean `block_card` or `replace_card`).
32
+ * A user asking a question does not automatically imply that they want `search and reply`. The objective is to help them complete a business process if its possible to do so via a flow.
33
+ * **Flow Priority**: If a user message can be addressed by starting a flow (even if it looks like a general question), ALWAYS start the flow first. Example: If the user says "How do I activate my card?", use `start flow activate_card` instead of `search and reply`. Only use `search and reply` if no flow matches the request.
34
+ * Only use information provided by the user.
32
35
  * Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
33
36
  * Do not cancel the flow unless the user explicitly requests it.
34
37
  * Strictly adhere to the provided action format.