rasa-pro 3.9.18__py3-none-any.whl → 3.10.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (183) hide show
  1. README.md +0 -374
  2. rasa/__init__.py +1 -2
  3. rasa/__main__.py +5 -0
  4. rasa/anonymization/anonymization_rule_executor.py +2 -2
  5. rasa/api.py +27 -23
  6. rasa/cli/arguments/data.py +27 -2
  7. rasa/cli/arguments/default_arguments.py +25 -3
  8. rasa/cli/arguments/run.py +9 -9
  9. rasa/cli/arguments/train.py +11 -3
  10. rasa/cli/data.py +70 -8
  11. rasa/cli/e2e_test.py +104 -431
  12. rasa/cli/evaluate.py +1 -1
  13. rasa/cli/interactive.py +1 -0
  14. rasa/cli/llm_fine_tuning.py +398 -0
  15. rasa/cli/project_templates/calm/endpoints.yml +1 -1
  16. rasa/cli/project_templates/tutorial/endpoints.yml +1 -1
  17. rasa/cli/run.py +15 -14
  18. rasa/cli/scaffold.py +10 -8
  19. rasa/cli/studio/studio.py +35 -5
  20. rasa/cli/train.py +56 -8
  21. rasa/cli/utils.py +22 -5
  22. rasa/cli/x.py +1 -1
  23. rasa/constants.py +7 -1
  24. rasa/core/actions/action.py +98 -49
  25. rasa/core/actions/action_run_slot_rejections.py +4 -1
  26. rasa/core/actions/custom_action_executor.py +9 -6
  27. rasa/core/actions/direct_custom_actions_executor.py +80 -0
  28. rasa/core/actions/e2e_stub_custom_action_executor.py +68 -0
  29. rasa/core/actions/grpc_custom_action_executor.py +2 -2
  30. rasa/core/actions/http_custom_action_executor.py +6 -5
  31. rasa/core/agent.py +21 -17
  32. rasa/core/channels/__init__.py +2 -0
  33. rasa/core/channels/audiocodes.py +1 -16
  34. rasa/core/channels/voice_aware/__init__.py +0 -0
  35. rasa/core/channels/voice_aware/jambonz.py +103 -0
  36. rasa/core/channels/voice_aware/jambonz_protocol.py +344 -0
  37. rasa/core/channels/voice_aware/utils.py +20 -0
  38. rasa/core/channels/voice_native/__init__.py +0 -0
  39. rasa/core/constants.py +6 -1
  40. rasa/core/information_retrieval/faiss.py +7 -4
  41. rasa/core/information_retrieval/information_retrieval.py +8 -0
  42. rasa/core/information_retrieval/milvus.py +9 -2
  43. rasa/core/information_retrieval/qdrant.py +1 -1
  44. rasa/core/nlg/contextual_response_rephraser.py +32 -10
  45. rasa/core/nlg/summarize.py +4 -3
  46. rasa/core/policies/enterprise_search_policy.py +113 -45
  47. rasa/core/policies/flows/flow_executor.py +122 -76
  48. rasa/core/policies/intentless_policy.py +83 -29
  49. rasa/core/processor.py +72 -54
  50. rasa/core/run.py +5 -4
  51. rasa/core/tracker_store.py +8 -4
  52. rasa/core/training/interactive.py +1 -1
  53. rasa/core/utils.py +56 -57
  54. rasa/dialogue_understanding/coexistence/llm_based_router.py +53 -13
  55. rasa/dialogue_understanding/commands/__init__.py +6 -0
  56. rasa/dialogue_understanding/commands/restart_command.py +58 -0
  57. rasa/dialogue_understanding/commands/session_start_command.py +59 -0
  58. rasa/dialogue_understanding/commands/utils.py +40 -0
  59. rasa/dialogue_understanding/generator/constants.py +10 -3
  60. rasa/dialogue_understanding/generator/flow_retrieval.py +21 -5
  61. rasa/dialogue_understanding/generator/llm_based_command_generator.py +13 -3
  62. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +134 -90
  63. rasa/dialogue_understanding/generator/nlu_command_adapter.py +47 -7
  64. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +127 -41
  65. rasa/dialogue_understanding/patterns/restart.py +37 -0
  66. rasa/dialogue_understanding/patterns/session_start.py +37 -0
  67. rasa/dialogue_understanding/processor/command_processor.py +16 -3
  68. rasa/dialogue_understanding/processor/command_processor_component.py +6 -2
  69. rasa/e2e_test/aggregate_test_stats_calculator.py +134 -0
  70. rasa/e2e_test/assertions.py +1223 -0
  71. rasa/e2e_test/assertions_schema.yml +106 -0
  72. rasa/e2e_test/constants.py +20 -0
  73. rasa/e2e_test/e2e_config.py +220 -0
  74. rasa/e2e_test/e2e_config_schema.yml +26 -0
  75. rasa/e2e_test/e2e_test_case.py +131 -8
  76. rasa/e2e_test/e2e_test_converter.py +363 -0
  77. rasa/e2e_test/e2e_test_converter_prompt.jinja2 +70 -0
  78. rasa/e2e_test/e2e_test_coverage_report.py +364 -0
  79. rasa/e2e_test/e2e_test_result.py +26 -6
  80. rasa/e2e_test/e2e_test_runner.py +493 -71
  81. rasa/e2e_test/e2e_test_schema.yml +96 -0
  82. rasa/e2e_test/pykwalify_extensions.py +39 -0
  83. rasa/e2e_test/stub_custom_action.py +70 -0
  84. rasa/e2e_test/utils/__init__.py +0 -0
  85. rasa/e2e_test/utils/e2e_yaml_utils.py +55 -0
  86. rasa/e2e_test/utils/io.py +598 -0
  87. rasa/e2e_test/utils/validation.py +80 -0
  88. rasa/engine/graph.py +9 -3
  89. rasa/engine/recipes/default_components.py +0 -2
  90. rasa/engine/recipes/default_recipe.py +10 -2
  91. rasa/engine/storage/local_model_storage.py +40 -12
  92. rasa/engine/validation.py +78 -1
  93. rasa/env.py +9 -0
  94. rasa/graph_components/providers/story_graph_provider.py +59 -6
  95. rasa/llm_fine_tuning/__init__.py +0 -0
  96. rasa/llm_fine_tuning/annotation_module.py +241 -0
  97. rasa/llm_fine_tuning/conversations.py +144 -0
  98. rasa/llm_fine_tuning/llm_data_preparation_module.py +178 -0
  99. rasa/llm_fine_tuning/notebooks/unsloth_finetuning.ipynb +407 -0
  100. rasa/llm_fine_tuning/paraphrasing/__init__.py +0 -0
  101. rasa/llm_fine_tuning/paraphrasing/conversation_rephraser.py +281 -0
  102. rasa/llm_fine_tuning/paraphrasing/default_rephrase_prompt_template.jina2 +44 -0
  103. rasa/llm_fine_tuning/paraphrasing/rephrase_validator.py +121 -0
  104. rasa/llm_fine_tuning/paraphrasing/rephrased_user_message.py +10 -0
  105. rasa/llm_fine_tuning/paraphrasing_module.py +128 -0
  106. rasa/llm_fine_tuning/storage.py +174 -0
  107. rasa/llm_fine_tuning/train_test_split_module.py +441 -0
  108. rasa/model_training.py +56 -16
  109. rasa/nlu/persistor.py +157 -36
  110. rasa/server.py +45 -10
  111. rasa/shared/constants.py +76 -16
  112. rasa/shared/core/domain.py +27 -19
  113. rasa/shared/core/events.py +28 -2
  114. rasa/shared/core/flows/flow.py +208 -13
  115. rasa/shared/core/flows/flow_path.py +84 -0
  116. rasa/shared/core/flows/flows_list.py +33 -11
  117. rasa/shared/core/flows/flows_yaml_schema.json +269 -193
  118. rasa/shared/core/flows/validation.py +112 -25
  119. rasa/shared/core/flows/yaml_flows_io.py +149 -10
  120. rasa/shared/core/trackers.py +6 -0
  121. rasa/shared/core/training_data/structures.py +20 -0
  122. rasa/shared/core/training_data/visualization.html +2 -2
  123. rasa/shared/exceptions.py +4 -0
  124. rasa/shared/importers/importer.py +64 -16
  125. rasa/shared/nlu/constants.py +2 -0
  126. rasa/shared/providers/_configs/__init__.py +0 -0
  127. rasa/shared/providers/_configs/azure_openai_client_config.py +183 -0
  128. rasa/shared/providers/_configs/client_config.py +57 -0
  129. rasa/shared/providers/_configs/default_litellm_client_config.py +130 -0
  130. rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py +234 -0
  131. rasa/shared/providers/_configs/openai_client_config.py +175 -0
  132. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +176 -0
  133. rasa/shared/providers/_configs/utils.py +101 -0
  134. rasa/shared/providers/_ssl_verification_utils.py +124 -0
  135. rasa/shared/providers/embedding/__init__.py +0 -0
  136. rasa/shared/providers/embedding/_base_litellm_embedding_client.py +259 -0
  137. rasa/shared/providers/embedding/_langchain_embedding_client_adapter.py +74 -0
  138. rasa/shared/providers/embedding/azure_openai_embedding_client.py +277 -0
  139. rasa/shared/providers/embedding/default_litellm_embedding_client.py +102 -0
  140. rasa/shared/providers/embedding/embedding_client.py +90 -0
  141. rasa/shared/providers/embedding/embedding_response.py +41 -0
  142. rasa/shared/providers/embedding/huggingface_local_embedding_client.py +191 -0
  143. rasa/shared/providers/embedding/openai_embedding_client.py +172 -0
  144. rasa/shared/providers/llm/__init__.py +0 -0
  145. rasa/shared/providers/llm/_base_litellm_client.py +251 -0
  146. rasa/shared/providers/llm/azure_openai_llm_client.py +338 -0
  147. rasa/shared/providers/llm/default_litellm_llm_client.py +84 -0
  148. rasa/shared/providers/llm/llm_client.py +76 -0
  149. rasa/shared/providers/llm/llm_response.py +50 -0
  150. rasa/shared/providers/llm/openai_llm_client.py +155 -0
  151. rasa/shared/providers/llm/self_hosted_llm_client.py +293 -0
  152. rasa/shared/providers/mappings.py +75 -0
  153. rasa/shared/utils/cli.py +30 -0
  154. rasa/shared/utils/io.py +65 -2
  155. rasa/shared/utils/llm.py +246 -200
  156. rasa/shared/utils/yaml.py +121 -15
  157. rasa/studio/auth.py +6 -4
  158. rasa/studio/config.py +13 -4
  159. rasa/studio/constants.py +1 -0
  160. rasa/studio/data_handler.py +10 -3
  161. rasa/studio/download.py +19 -13
  162. rasa/studio/train.py +2 -3
  163. rasa/studio/upload.py +19 -11
  164. rasa/telemetry.py +113 -58
  165. rasa/tracing/instrumentation/attribute_extractors.py +32 -17
  166. rasa/utils/common.py +18 -19
  167. rasa/utils/endpoints.py +7 -4
  168. rasa/utils/json_utils.py +60 -0
  169. rasa/utils/licensing.py +9 -1
  170. rasa/utils/ml_utils.py +4 -2
  171. rasa/validator.py +213 -3
  172. rasa/version.py +1 -1
  173. rasa_pro-3.10.16.dist-info/METADATA +196 -0
  174. {rasa_pro-3.9.18.dist-info → rasa_pro-3.10.16.dist-info}/RECORD +179 -113
  175. rasa/nlu/classifiers/llm_intent_classifier.py +0 -519
  176. rasa/shared/providers/openai/clients.py +0 -43
  177. rasa/shared/providers/openai/session_handler.py +0 -110
  178. rasa_pro-3.9.18.dist-info/METADATA +0 -563
  179. /rasa/{shared/providers/openai → cli/project_templates/tutorial/actions}/__init__.py +0 -0
  180. /rasa/cli/project_templates/tutorial/{actions.py → actions/actions.py} +0 -0
  181. {rasa_pro-3.9.18.dist-info → rasa_pro-3.10.16.dist-info}/NOTICE +0 -0
  182. {rasa_pro-3.9.18.dist-info → rasa_pro-3.10.16.dist-info}/WHEEL +0 -0
  183. {rasa_pro-3.9.18.dist-info → rasa_pro-3.10.16.dist-info}/entry_points.txt +0 -0
@@ -24,6 +24,7 @@ from rasa.dialogue_understanding.generator.constants import (
24
24
  LLM_CONFIG_KEY,
25
25
  USER_INPUT_CONFIG_KEY,
26
26
  FLOW_RETRIEVAL_KEY,
27
+ DEFAULT_LLM_CONFIG,
27
28
  )
28
29
  from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
29
30
  from rasa.dialogue_understanding.generator.llm_based_command_generator import (
@@ -53,6 +54,7 @@ from rasa.shared.utils.llm import (
53
54
  tracker_as_readable_transcript,
54
55
  sanitize_message_for_prompt,
55
56
  allowed_values_for_slot,
57
+ try_instantiate_llm_client,
56
58
  )
57
59
 
58
60
  # multistep template keys
@@ -141,6 +143,12 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
141
143
  prompts = cls._load_prompt_templates(model_storage, resource)
142
144
  # init base command generator
143
145
  command_generator = cls(config, model_storage, resource, prompts)
146
+ try_instantiate_llm_client(
147
+ command_generator.config.get(LLM_CONFIG_KEY),
148
+ DEFAULT_LLM_CONFIG,
149
+ "multi_step_llm_command_generator.load",
150
+ MultiStepLLMCommandGenerator.__name__,
151
+ )
144
152
  # load flow retrieval if enabled
145
153
  if command_generator.enabled_flow_retrieval:
146
154
  command_generator.flow_retrieval = cls.load_flow_retrival(
@@ -179,101 +187,29 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
179
187
  return []
180
188
 
181
189
  try:
182
- # retrieve relevant flows
183
- filtered_flows = await self.filter_flows(message, flows, tracker)
184
-
185
- # 1st step: Handle active flow
186
- if tracker.has_active_user_flow:
187
- commands_from_active_flow = (
188
- await self._predict_commands_for_active_flow(
189
- message,
190
- tracker,
191
- available_flows=filtered_flows,
192
- all_flows=flows,
193
- )
194
- )
195
- else:
196
- commands_from_active_flow = []
197
-
198
- # 2nd step: Check if we need to switch to another flow
199
- contains_change_flow_command = any(
200
- isinstance(command, ChangeFlowCommand)
201
- for command in commands_from_active_flow
202
- )
203
- should_change_flows = (
204
- not commands_from_active_flow or contains_change_flow_command
190
+ commands = await self._predict_commands_with_multi_step(
191
+ message, flows, tracker
205
192
  )
193
+ commands = self._clean_up_commands(commands)
194
+ except ProviderClientAPIException:
195
+ # if any step resulted in API exception, the command prediction cannot
196
+ # be completed, "predict" the ErrorCommand
197
+ commands = [ErrorCommand()]
206
198
 
207
- if should_change_flows:
208
- commands_for_handling_flows = (
209
- await self._predict_commands_for_handling_flows(
210
- message,
211
- tracker,
212
- available_flows=filtered_flows,
213
- all_flows=flows,
214
- )
215
- )
216
- else:
217
- commands_for_handling_flows = []
218
-
219
- if contains_change_flow_command:
220
- commands_from_active_flow.pop(
221
- commands_from_active_flow.index(ChangeFlowCommand())
222
- )
223
-
224
- # 3rd step: Fill slots for started flows
225
- newly_started_flows = FlowsList(
226
- [
227
- flow
228
- for command in commands_for_handling_flows
229
- if (
230
- isinstance(command, StartFlowCommand)
231
- and (flow := filtered_flows.flow_by_id(command.flow))
232
- is not None
233
- )
234
- ]
235
- )
199
+ if not commands:
200
+ # if for any reason the final list of commands is empty,
201
+ # "predict" CannotHandle
202
+ commands = [CannotHandleCommand()]
236
203
 
237
- commands_for_newly_started_flows = (
238
- await self._predict_commands_for_newly_started_flows(
239
- message,
240
- tracker,
241
- newly_started_flows=newly_started_flows,
242
- all_flows=flows,
243
- )
244
- )
204
+ if tracker.has_coexistence_routing_slot:
205
+ # if coexistence feature is used, set the routing slot
206
+ commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
245
207
 
246
- # if any step resulted in API exception,
247
- # the command prediction cannot be completed,
248
- # raise ErrorCommand
249
- except ProviderClientAPIException:
250
- return [ErrorCommand()]
251
-
252
- # concatenate predicted commands
253
- commands = list(
254
- set(
255
- commands_from_active_flow
256
- + commands_for_handling_flows
257
- + commands_for_newly_started_flows
258
- )
259
- )
260
- commands = self._clean_up_commands(commands)
261
208
  structlogger.debug(
262
- "multi_step_llm_command_generator" ".predict_commands" ".finished",
209
+ "multi_step_llm_command_generator.predict_commands.finished",
263
210
  commands=commands,
264
211
  )
265
212
 
266
- # if for any reason the final list of commands is empty,
267
- # return CannotHandle
268
- if not commands:
269
- # if action_list is None, we couldn't get any response from the LLM
270
- commands = [CannotHandleCommand()]
271
- else:
272
- # if the LLM command generator predicted valid commands and the
273
- # coexistence feature is used, set the routing slot
274
- if tracker.has_coexistence_routing_slot:
275
- commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
276
-
277
213
  return commands
278
214
 
279
215
  @classmethod
@@ -301,9 +237,9 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
301
237
  commands: List[Command] = []
302
238
 
303
239
  slot_set_re = re.compile(
304
- r"""SetSlot\((\"?[a-zA-Z_][a-zA-Z0-9_-]*?\"?), ?(.*)\)"""
240
+ r"""SetSlot\(['"]?([a-zA-Z_][a-zA-Z0-9_-]*)['"]?, ?['"]?(.*)['"]?\)"""
305
241
  )
306
- start_flow_re = re.compile(r"StartFlow\(([a-zA-Z0-9_-]+?)\)")
242
+ start_flow_re = re.compile(r"StartFlow\(['\"]?([a-zA-Z0-9_-]+)['\"]?\)")
307
243
  change_flow_re = re.compile(r"ChangeFlow\(\)")
308
244
  cancel_flow_re = re.compile(r"CancelFlow\(\)")
309
245
  chitchat_re = re.compile(r"ChitChat\(\)")
@@ -352,9 +288,19 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
352
288
  commands.append(HumanHandoffCommand())
353
289
  elif match := clarify_re.search(action):
354
290
  options = sorted([opt.strip() for opt in match.group(1).split(",")])
291
+ # Remove surrounding quotes if present
292
+ cleaned_options = []
293
+ for flow in options:
294
+ if (flow.startswith('"') and flow.endswith('"')) or (
295
+ flow.startswith("'") and flow.endswith("'")
296
+ ):
297
+ cleaned_options.append(flow[1:-1])
298
+ else:
299
+ cleaned_options.append(flow)
300
+ # check if flow is valid
355
301
  valid_options = [
356
302
  flow
357
- for flow in options
303
+ for flow in cleaned_options
358
304
  if flow in flows.user_flow_ids
359
305
  and flow not in user_flows_on_the_stack(tracker.stack)
360
306
  ]
@@ -365,6 +311,13 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
365
311
  elif change_flow_re.search(action):
366
312
  commands.append(ChangeFlowCommand())
367
313
 
314
+ if not commands:
315
+ structlogger.debug(
316
+ "multi_step_llm_command_generator.parse_commands",
317
+ message="No commands were parsed from the LLM actions.",
318
+ actions=actions,
319
+ )
320
+
368
321
  return commands
369
322
 
370
323
  ### Helper methods
@@ -440,6 +393,97 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
440
393
  file_path = path / file_name
441
394
  rasa.shared.utils.io.write_text_file(template, file_path)
442
395
 
396
+ async def _predict_commands_with_multi_step(
397
+ self,
398
+ message: Message,
399
+ flows: FlowsList,
400
+ tracker: DialogueStateTracker,
401
+ ) -> List[Command]:
402
+ """Predict commands using the LLM.
403
+
404
+ Args:
405
+ message: The message from the user.
406
+ flows: The flows available to the user.
407
+ tracker: The tracker containing the current state of the conversation.
408
+
409
+ Returns:
410
+ The commands generated by the llm.
411
+
412
+ Raises:
413
+ ProviderClientAPIException: If API calls raised an error.
414
+ """
415
+ # retrieve relevant flows
416
+ filtered_flows = await self.filter_flows(message, flows, tracker)
417
+
418
+ # 1st step: Handle active flow
419
+ if tracker.has_active_user_flow:
420
+ commands_from_active_flow = await self._predict_commands_for_active_flow(
421
+ message,
422
+ tracker,
423
+ available_flows=filtered_flows,
424
+ all_flows=flows,
425
+ )
426
+ else:
427
+ commands_from_active_flow = []
428
+
429
+ # 2nd step: Check if we need to switch to another flow
430
+ contains_change_flow_command = any(
431
+ isinstance(command, ChangeFlowCommand)
432
+ for command in commands_from_active_flow
433
+ )
434
+ should_change_flows = (
435
+ not commands_from_active_flow or contains_change_flow_command
436
+ )
437
+
438
+ if should_change_flows:
439
+ commands_for_handling_flows = (
440
+ await self._predict_commands_for_handling_flows(
441
+ message,
442
+ tracker,
443
+ available_flows=filtered_flows,
444
+ all_flows=flows,
445
+ )
446
+ )
447
+ else:
448
+ commands_for_handling_flows = []
449
+
450
+ if contains_change_flow_command:
451
+ commands_from_active_flow.pop(
452
+ commands_from_active_flow.index(ChangeFlowCommand())
453
+ )
454
+
455
+ # 3rd step: Fill slots for started flows
456
+ newly_started_flows = FlowsList(
457
+ [
458
+ flow
459
+ for command in commands_for_handling_flows
460
+ if (
461
+ isinstance(command, StartFlowCommand)
462
+ and (flow := filtered_flows.flow_by_id(command.flow)) is not None
463
+ )
464
+ ]
465
+ )
466
+
467
+ commands_for_newly_started_flows = (
468
+ await self._predict_commands_for_newly_started_flows(
469
+ message,
470
+ tracker,
471
+ newly_started_flows=newly_started_flows,
472
+ all_flows=flows,
473
+ )
474
+ )
475
+
476
+ # concatenate predicted commands
477
+ commands = list(
478
+ set(
479
+ commands_from_active_flow
480
+ + commands_for_handling_flows
481
+ + commands_for_newly_started_flows
482
+ )
483
+ )
484
+
485
+ return commands
486
+
443
487
  async def _predict_commands_for_active_flow(
444
488
  self,
445
489
  message: Message,
@@ -2,13 +2,15 @@ from typing import Dict, Text, Any, Optional, List
2
2
 
3
3
  import structlog
4
4
 
5
-
6
5
  from rasa.dialogue_understanding.commands import (
7
6
  Command,
8
7
  StartFlowCommand,
9
8
  SetSlotCommand,
10
9
  )
11
10
  from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
11
+ from rasa.dialogue_understanding.commands.utils import (
12
+ triggerable_pattern_to_command_class,
13
+ )
12
14
  from rasa.dialogue_understanding.generator import CommandGenerator
13
15
  from rasa.engine.graph import GraphComponent, ExecutionContext
14
16
  from rasa.engine.recipes.default_recipe import DefaultV1Recipe
@@ -17,6 +19,7 @@ from rasa.engine.storage.storage import ModelStorage
17
19
  from rasa.shared.constants import ROUTE_TO_CALM_SLOT
18
20
  from rasa.shared.core.domain import Domain
19
21
  from rasa.shared.core.flows.flows_list import FlowsList
22
+ from rasa.shared.core.flows.steps import CollectInformationFlowStep
20
23
  from rasa.shared.core.slot_mappings import (
21
24
  SlotFillingManager,
22
25
  extract_slot_value,
@@ -25,6 +28,7 @@ from rasa.shared.core.trackers import DialogueStateTracker
25
28
  from rasa.shared.nlu.constants import ENTITIES, INTENT
26
29
  from rasa.shared.nlu.training_data.message import Message
27
30
  from rasa.shared.nlu.training_data.training_data import TrainingData
31
+ from rasa.utils.log_utils import log_llm
28
32
 
29
33
  structlogger = structlog.get_logger()
30
34
 
@@ -126,13 +130,22 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
126
130
  clean_up_commands,
127
131
  )
128
132
 
129
- structlogger.info("nlu_command_adapter.cleaning_commands", commands=commands)
133
+ log_llm(
134
+ logger=structlogger,
135
+ log_module="NLUCommandAdapter",
136
+ log_event="nlu_command_adapter.predict_commands.finished",
137
+ commands=commands,
138
+ )
139
+
130
140
  if commands:
131
141
  commands = clean_up_commands(
132
142
  commands, tracker, flows, self._execution_context
133
143
  )
134
- structlogger.info(
135
- "nlu_command_adapter.clean_commands", clean_commands=commands
144
+ log_llm(
145
+ logger=structlogger,
146
+ log_module="NLUCommandAdapter",
147
+ log_event="nlu_command_adapter.clean_commands",
148
+ commands=commands,
136
149
  )
137
150
 
138
151
  return commands
@@ -162,7 +175,12 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
162
175
 
163
176
  for flow in flows:
164
177
  if flow.nlu_triggers and flow.nlu_triggers.is_triggered(message):
165
- commands.append(StartFlowCommand(flow.id))
178
+ if flow.is_rasa_default_flow:
179
+ pattern_command = triggerable_pattern_to_command_class.get(flow.id)
180
+ if pattern_command:
181
+ commands.append(pattern_command())
182
+ else:
183
+ commands.append(StartFlowCommand(flow.id))
166
184
 
167
185
  # there should be just one flow that can be triggered by the predicted intent
168
186
  # this is checked when loading the flows
@@ -180,7 +198,12 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
180
198
  set_slot_commands = _issue_set_slot_commands(message, tracker, flows, domain)
181
199
  commands.extend(set_slot_commands)
182
200
 
183
- structlogger.info("nlu_command_adapter.predict_commands", commands=commands)
201
+ log_llm(
202
+ logger=structlogger,
203
+ log_module="NLUCommandAdapter",
204
+ log_event="nlu_command_adapter.predict_commands",
205
+ commands=commands,
206
+ )
184
207
 
185
208
  return commands
186
209
 
@@ -195,7 +218,24 @@ def _issue_set_slot_commands(
195
218
  commands: List[Command] = []
196
219
  domain = domain if domain else Domain.empty()
197
220
  slot_filling_manager = SlotFillingManager(domain, tracker, message)
198
- available_slot_names = flows.available_slot_names()
221
+
222
+ # only use slots that don't have ask_before_filling set to True
223
+ available_slot_names = flows.available_slot_names(ask_before_filling=False)
224
+
225
+ # check if the current step is a CollectInformationFlowStep
226
+ # in case it has ask_before_filling set to True, we need to add the
227
+ # slot to the available_slot_names
228
+ if tracker.active_flow:
229
+ flow = flows.flow_by_id(tracker.active_flow)
230
+ step_id = tracker.current_step_id
231
+ if flow is not None:
232
+ current_step = flow.step_by_id(step_id)
233
+ if (
234
+ current_step
235
+ and isinstance(current_step, CollectInformationFlowStep)
236
+ and current_step.ask_before_filling
237
+ ):
238
+ available_slot_names.add(current_step.collect)
199
239
 
200
240
  for _, slot in tracker.slots.items():
201
241
  # if a slot is not collected in available flows,
@@ -1,8 +1,9 @@
1
1
  import importlib.resources
2
2
  import re
3
- import structlog
4
3
  from typing import Dict, Any, List, Optional, Text
5
4
 
5
+ import structlog
6
+
6
7
  import rasa.shared.utils.io
7
8
  from rasa.dialogue_understanding.commands import (
8
9
  Command,
@@ -16,33 +17,39 @@ from rasa.dialogue_understanding.commands import (
16
17
  ClarifyCommand,
17
18
  CannotHandleCommand,
18
19
  )
19
- from rasa.dialogue_understanding.generator.llm_based_command_generator import (
20
- LLMBasedCommandGenerator,
21
- )
22
20
  from rasa.dialogue_understanding.generator.constants import (
23
21
  LLM_CONFIG_KEY,
24
22
  USER_INPUT_CONFIG_KEY,
25
23
  FLOW_RETRIEVAL_KEY,
24
+ DEFAULT_LLM_CONFIG,
26
25
  )
27
26
  from rasa.dialogue_understanding.generator.flow_retrieval import (
28
27
  FlowRetrieval,
29
28
  )
29
+ from rasa.dialogue_understanding.generator.llm_based_command_generator import (
30
+ LLMBasedCommandGenerator,
31
+ )
30
32
  from rasa.dialogue_understanding.stack.utils import top_flow_frame
31
33
  from rasa.engine.graph import ExecutionContext
32
34
  from rasa.engine.recipes.default_recipe import DefaultV1Recipe
33
35
  from rasa.engine.storage.resource import Resource
34
36
  from rasa.engine.storage.storage import ModelStorage
35
- from rasa.shared.constants import ROUTE_TO_CALM_SLOT
37
+ from rasa.shared.constants import (
38
+ ROUTE_TO_CALM_SLOT,
39
+ PROMPT_CONFIG_KEY,
40
+ PROMPT_TEMPLATE_CONFIG_KEY,
41
+ )
36
42
  from rasa.shared.core.flows import FlowsList
37
43
  from rasa.shared.core.trackers import DialogueStateTracker
38
- from rasa.shared.nlu.constants import TEXT
39
- from rasa.shared.nlu.training_data.message import Message
40
44
  from rasa.shared.exceptions import ProviderClientAPIException
45
+ from rasa.shared.nlu.constants import TEXT, LLM_COMMANDS, LLM_PROMPT
46
+ from rasa.shared.nlu.training_data.message import Message
41
47
  from rasa.shared.utils.io import deep_container_fingerprint
42
48
  from rasa.shared.utils.llm import (
43
49
  get_prompt_template,
44
50
  tracker_as_readable_transcript,
45
51
  sanitize_message_for_prompt,
52
+ try_instantiate_llm_client,
46
53
  )
47
54
  from rasa.utils.log_utils import log_llm
48
55
 
@@ -82,7 +89,7 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
82
89
  )
83
90
 
84
91
  # Set the prompt template
85
- if config.get("prompt"):
92
+ if config.get(PROMPT_CONFIG_KEY):
86
93
  structlogger.warning(
87
94
  "single_step_llm_command_generator.init",
88
95
  event_info=(
@@ -91,7 +98,11 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
91
98
  "Please use the config parameter 'prompt_template' instead. "
92
99
  ),
93
100
  )
94
- config_prompt = config.get("prompt") or config.get("prompt_template") or None
101
+ config_prompt = (
102
+ config.get(PROMPT_CONFIG_KEY)
103
+ or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
104
+ or None
105
+ )
95
106
  self.prompt_template = prompt_template or get_prompt_template(
96
107
  config_prompt,
97
108
  DEFAULT_COMMAND_PROMPT_TEMPLATE,
@@ -104,8 +115,8 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
104
115
  def get_default_config() -> Dict[str, Any]:
105
116
  """The component's default config (see parent class for full docstring)."""
106
117
  return {
107
- "prompt": None, # Legacy
108
- "prompt_template": None,
118
+ PROMPT_CONFIG_KEY: None, # Legacy
119
+ PROMPT_TEMPLATE_CONFIG_KEY: None,
109
120
  USER_INPUT_CONFIG_KEY: None,
110
121
  LLM_CONFIG_KEY: None,
111
122
  FLOW_RETRIEVAL_KEY: FlowRetrieval.get_default_config(),
@@ -127,6 +138,12 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
127
138
  )
128
139
  # init base command generator
129
140
  command_generator = cls(config, model_storage, resource, prompt_template)
141
+ try_instantiate_llm_client(
142
+ command_generator.config.get(LLM_CONFIG_KEY),
143
+ DEFAULT_LLM_CONFIG,
144
+ "single_step_llm_command_generator.load",
145
+ SingleStepLLMCommandGenerator.__name__,
146
+ )
130
147
  # load flow retrieval if enabled
131
148
  if command_generator.enabled_flow_retrieval:
132
149
  command_generator.flow_retrieval = cls.load_flow_retrival(
@@ -167,11 +184,57 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
167
184
  # cannot do anything if there are no flows or no tracker
168
185
  return []
169
186
 
170
- # retrieve flows
171
187
  try:
172
- filtered_flows = await self.filter_flows(message, flows, tracker)
188
+ commands = await self._predict_commands(message, flows, tracker)
173
189
  except ProviderClientAPIException:
174
- return [ErrorCommand()]
190
+ # if command predictions resulted in API exception
191
+ # "predict" the ErrorCommand
192
+ commands = [ErrorCommand()]
193
+
194
+ if not commands:
195
+ # no commands are parsed or there's an invalid command
196
+ structlogger.warning(
197
+ "single_step_llm_command_generator.predict_commands",
198
+ message="No commands were predicted as the LLM response could "
199
+ "not be parsed or the LLM responded with an invalid command."
200
+ "Returning a CannotHandleCommand instead.",
201
+ )
202
+ commands = [CannotHandleCommand()]
203
+
204
+ if tracker.has_coexistence_routing_slot:
205
+ # if coexistence feature is used, set the routing slot
206
+ commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
207
+
208
+ log_llm(
209
+ logger=structlogger,
210
+ log_module="SingleStepLLMCommandGenerator",
211
+ log_event="llm_command_generator.predict_commands.finished",
212
+ commands=commands,
213
+ )
214
+
215
+ return commands
216
+
217
+ async def _predict_commands(
218
+ self,
219
+ message: Message,
220
+ flows: FlowsList,
221
+ tracker: Optional[DialogueStateTracker] = None,
222
+ ) -> List[Command]:
223
+ """Predict commands using the LLM.
224
+
225
+ Args:
226
+ message: The message from the user.
227
+ flows: The flows available to the user.
228
+ tracker: The tracker containing the current state of the conversation.
229
+
230
+ Returns:
231
+ The commands generated by the llm.
232
+
233
+ Raises:
234
+ ProviderClientAPIException: If API calls raised an error.
235
+ """
236
+ # retrieve flows
237
+ filtered_flows = await self.filter_flows(message, flows, tracker)
175
238
 
176
239
  flow_prompt = self.render_template(message, tracker, filtered_flows, flows)
177
240
  log_llm(
@@ -181,14 +244,11 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
181
244
  prompt=flow_prompt,
182
245
  )
183
246
 
184
- try:
185
- action_list = await self.invoke_llm(flow_prompt)
186
- # The check for 'None' maintains compatibility with older versions
187
- # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
188
- # might return 'None' to indicate a failure to generate actions.
189
- if action_list is None:
190
- return [ErrorCommand()]
191
- except ProviderClientAPIException:
247
+ action_list = await self.invoke_llm(flow_prompt)
248
+ # The check for 'None' maintains compatibility with older versions
249
+ # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
250
+ # might return 'None' to indicate a failure to generate actions.
251
+ if action_list is None:
192
252
  return [ErrorCommand()]
193
253
 
194
254
  log_llm(
@@ -200,23 +260,26 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
200
260
 
201
261
  commands = self.parse_commands(action_list, tracker, flows)
202
262
 
203
- if not commands:
204
- # no commands are parsed or there's an invalid command
205
- commands = [CannotHandleCommand()]
206
- else:
207
- # if the LLM command generator predicted valid commands and the
208
- # coexistence feature is used, set the routing slot
209
- if tracker.has_coexistence_routing_slot:
210
- commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
263
+ self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
211
264
 
212
- log_llm(
213
- logger=structlogger,
214
- log_module="SingleStepLLMCommandGenerator",
215
- log_event="llm_command_generator.predict_commands.finished",
216
- commands=commands,
217
- )
218
265
  return commands
219
266
 
267
+ @staticmethod
268
+ def _update_message_parse_data_for_fine_tuning(
269
+ message: Message, commands: List[Command], prompt: str
270
+ ) -> None:
271
+ from rasa.llm_fine_tuning.annotation_module import preparing_fine_tuning_data
272
+
273
+ if preparing_fine_tuning_data:
274
+ # Add commands and prompt to the message object in order to create
275
+ # prompt -> commands pairs for fine-tuning
276
+ message.set(
277
+ LLM_COMMANDS,
278
+ [command.as_dict() for command in commands],
279
+ add_to_output=True,
280
+ )
281
+ message.set(LLM_PROMPT, prompt, add_to_output=True)
282
+
220
283
  @classmethod
221
284
  def parse_commands(
222
285
  cls, actions: Optional[str], tracker: DialogueStateTracker, flows: FlowsList
@@ -236,14 +299,16 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
236
299
 
237
300
  commands: List[Command] = []
238
301
 
239
- slot_set_re = re.compile(r"""SetSlot\(([a-zA-Z_][a-zA-Z0-9_-]*?), ?(.*)\)""")
240
- start_flow_re = re.compile(r"StartFlow\(([a-zA-Z0-9_-]+?)\)")
302
+ slot_set_re = re.compile(
303
+ r"""SetSlot\(['"]?([a-zA-Z_][a-zA-Z0-9_-]*)['"]?, ?['"]?(.*)['"]?\)"""
304
+ )
305
+ start_flow_re = re.compile(r"StartFlow\(['\"]?([a-zA-Z0-9_-]+)['\"]?\)")
241
306
  cancel_flow_re = re.compile(r"CancelFlow\(\)")
242
307
  chitchat_re = re.compile(r"ChitChat\(\)")
243
308
  skip_question_re = re.compile(r"SkipQuestion\(\)")
244
309
  knowledge_re = re.compile(r"SearchAndReply\(\)")
245
310
  humand_handoff_re = re.compile(r"HumanHandoff\(\)")
246
- clarify_re = re.compile(r"Clarify\(([a-zA-Z0-9_, ]+)\)")
311
+ clarify_re = re.compile(r"Clarify\(([\"\'a-zA-Z0-9_, ]+)\)")
247
312
 
248
313
  for action in actions.strip().splitlines():
249
314
  if match := slot_set_re.search(action):
@@ -272,20 +337,41 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
272
337
  commands.append(HumanHandoffCommand())
273
338
  elif match := clarify_re.search(action):
274
339
  options = sorted([opt.strip() for opt in match.group(1).split(",")])
340
+ # Remove surrounding quotes if present
341
+ cleaned_options = []
342
+ for flow in options:
343
+ if (flow.startswith('"') and flow.endswith('"')) or (
344
+ flow.startswith("'") and flow.endswith("'")
345
+ ):
346
+ cleaned_options.append(flow[1:-1])
347
+ else:
348
+ cleaned_options.append(flow)
349
+ # check if flow is valid
275
350
  valid_options = [
276
- flow for flow in options if flow in flows.user_flow_ids
351
+ flow for flow in cleaned_options if flow in flows.user_flow_ids
277
352
  ]
278
353
  if len(set(valid_options)) == 1:
279
354
  commands.extend(cls.start_flow_by_name(valid_options[0], flows))
280
355
  elif len(valid_options) > 1:
281
356
  commands.append(ClarifyCommand(valid_options))
282
357
 
358
+ if not commands:
359
+ structlogger.debug(
360
+ "single_step_llm_command_generator.parse_commands",
361
+ message="No commands were parsed from the LLM actions.",
362
+ actions=actions,
363
+ )
364
+
283
365
  return commands
284
366
 
285
367
  @classmethod
286
368
  def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
287
369
  """Add a fingerprint of the knowledge base for the graph."""
288
- config_prompt = config.get("prompt") or config.get("prompt_template") or None
370
+ config_prompt = (
371
+ config.get(PROMPT_CONFIG_KEY)
372
+ or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
373
+ or None
374
+ )
289
375
  prompt_template = get_prompt_template(
290
376
  config_prompt,
291
377
  DEFAULT_COMMAND_PROMPT_TEMPLATE,