rasa-pro 3.12.0.dev13__py3-none-any.whl → 3.12.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (128) hide show
  1. rasa/anonymization/anonymization_rule_executor.py +16 -10
  2. rasa/cli/data.py +16 -0
  3. rasa/cli/project_templates/calm/config.yml +2 -2
  4. rasa/cli/project_templates/calm/endpoints.yml +2 -2
  5. rasa/cli/utils.py +12 -0
  6. rasa/core/actions/action.py +84 -191
  7. rasa/core/actions/action_run_slot_rejections.py +16 -4
  8. rasa/core/channels/__init__.py +2 -0
  9. rasa/core/channels/studio_chat.py +19 -0
  10. rasa/core/channels/telegram.py +42 -24
  11. rasa/core/channels/voice_ready/utils.py +1 -1
  12. rasa/core/channels/voice_stream/asr/asr_engine.py +10 -4
  13. rasa/core/channels/voice_stream/asr/azure.py +14 -1
  14. rasa/core/channels/voice_stream/asr/deepgram.py +20 -4
  15. rasa/core/channels/voice_stream/audiocodes.py +264 -0
  16. rasa/core/channels/voice_stream/browser_audio.py +4 -1
  17. rasa/core/channels/voice_stream/call_state.py +3 -0
  18. rasa/core/channels/voice_stream/genesys.py +6 -2
  19. rasa/core/channels/voice_stream/tts/azure.py +9 -1
  20. rasa/core/channels/voice_stream/tts/cartesia.py +14 -8
  21. rasa/core/channels/voice_stream/voice_channel.py +23 -2
  22. rasa/core/constants.py +2 -0
  23. rasa/core/nlg/contextual_response_rephraser.py +18 -1
  24. rasa/core/nlg/generator.py +83 -15
  25. rasa/core/nlg/response.py +6 -3
  26. rasa/core/nlg/translate.py +55 -0
  27. rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2 +1 -1
  28. rasa/core/policies/flows/flow_executor.py +12 -5
  29. rasa/core/processor.py +72 -9
  30. rasa/dialogue_understanding/commands/can_not_handle_command.py +20 -2
  31. rasa/dialogue_understanding/commands/cancel_flow_command.py +24 -6
  32. rasa/dialogue_understanding/commands/change_flow_command.py +20 -2
  33. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +20 -2
  34. rasa/dialogue_understanding/commands/clarify_command.py +29 -3
  35. rasa/dialogue_understanding/commands/command.py +1 -16
  36. rasa/dialogue_understanding/commands/command_syntax_manager.py +55 -0
  37. rasa/dialogue_understanding/commands/human_handoff_command.py +20 -2
  38. rasa/dialogue_understanding/commands/knowledge_answer_command.py +20 -2
  39. rasa/dialogue_understanding/commands/prompt_command.py +94 -0
  40. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +20 -2
  41. rasa/dialogue_understanding/commands/set_slot_command.py +24 -2
  42. rasa/dialogue_understanding/commands/skip_question_command.py +20 -2
  43. rasa/dialogue_understanding/commands/start_flow_command.py +20 -2
  44. rasa/dialogue_understanding/commands/utils.py +98 -4
  45. rasa/dialogue_understanding/generator/__init__.py +2 -0
  46. rasa/dialogue_understanding/generator/command_parser.py +15 -12
  47. rasa/dialogue_understanding/generator/constants.py +3 -0
  48. rasa/dialogue_understanding/generator/llm_based_command_generator.py +12 -5
  49. rasa/dialogue_understanding/generator/llm_command_generator.py +5 -3
  50. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +16 -2
  51. rasa/dialogue_understanding/generator/prompt_templates/__init__.py +0 -0
  52. rasa/dialogue_understanding/generator/{single_step → prompt_templates}/command_prompt_template.jinja2 +2 -0
  53. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +77 -0
  54. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +68 -0
  55. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +84 -0
  56. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +460 -0
  57. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +12 -310
  58. rasa/dialogue_understanding/patterns/collect_information.py +1 -1
  59. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +16 -0
  60. rasa/dialogue_understanding/patterns/validate_slot.py +65 -0
  61. rasa/dialogue_understanding/processor/command_processor.py +39 -0
  62. rasa/dialogue_understanding_test/du_test_case.py +28 -8
  63. rasa/dialogue_understanding_test/du_test_result.py +13 -9
  64. rasa/dialogue_understanding_test/io.py +14 -0
  65. rasa/e2e_test/utils/io.py +0 -37
  66. rasa/engine/graph.py +1 -0
  67. rasa/engine/language.py +140 -0
  68. rasa/engine/recipes/config_files/default_config.yml +4 -0
  69. rasa/engine/recipes/default_recipe.py +2 -0
  70. rasa/engine/recipes/graph_recipe.py +2 -0
  71. rasa/engine/storage/local_model_storage.py +1 -0
  72. rasa/engine/storage/storage.py +4 -1
  73. rasa/model_manager/runner_service.py +7 -4
  74. rasa/model_manager/socket_bridge.py +7 -6
  75. rasa/shared/constants.py +15 -13
  76. rasa/shared/core/constants.py +2 -0
  77. rasa/shared/core/flows/constants.py +11 -0
  78. rasa/shared/core/flows/flow.py +83 -19
  79. rasa/shared/core/flows/flows_yaml_schema.json +31 -3
  80. rasa/shared/core/flows/steps/collect.py +1 -36
  81. rasa/shared/core/flows/utils.py +28 -4
  82. rasa/shared/core/flows/validation.py +1 -1
  83. rasa/shared/core/slot_mappings.py +208 -5
  84. rasa/shared/core/slots.py +131 -1
  85. rasa/shared/core/trackers.py +74 -1
  86. rasa/shared/importers/importer.py +50 -2
  87. rasa/shared/nlu/training_data/schemas/responses.yml +19 -12
  88. rasa/shared/providers/_configs/azure_entra_id_config.py +541 -0
  89. rasa/shared/providers/_configs/azure_openai_client_config.py +138 -3
  90. rasa/shared/providers/_configs/client_config.py +3 -1
  91. rasa/shared/providers/_configs/default_litellm_client_config.py +3 -1
  92. rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py +3 -1
  93. rasa/shared/providers/_configs/litellm_router_client_config.py +3 -1
  94. rasa/shared/providers/_configs/model_group_config.py +4 -2
  95. rasa/shared/providers/_configs/oauth_config.py +33 -0
  96. rasa/shared/providers/_configs/openai_client_config.py +3 -1
  97. rasa/shared/providers/_configs/rasa_llm_client_config.py +3 -1
  98. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +3 -1
  99. rasa/shared/providers/constants.py +6 -0
  100. rasa/shared/providers/embedding/azure_openai_embedding_client.py +28 -3
  101. rasa/shared/providers/embedding/litellm_router_embedding_client.py +3 -1
  102. rasa/shared/providers/llm/_base_litellm_client.py +42 -17
  103. rasa/shared/providers/llm/azure_openai_llm_client.py +81 -25
  104. rasa/shared/providers/llm/default_litellm_llm_client.py +3 -1
  105. rasa/shared/providers/llm/litellm_router_llm_client.py +29 -8
  106. rasa/shared/providers/llm/llm_client.py +23 -7
  107. rasa/shared/providers/llm/openai_llm_client.py +9 -3
  108. rasa/shared/providers/llm/rasa_llm_client.py +11 -2
  109. rasa/shared/providers/llm/self_hosted_llm_client.py +30 -11
  110. rasa/shared/providers/router/_base_litellm_router_client.py +3 -1
  111. rasa/shared/providers/router/router_client.py +3 -1
  112. rasa/shared/utils/constants.py +3 -0
  113. rasa/shared/utils/llm.py +30 -7
  114. rasa/shared/utils/pykwalify_extensions.py +24 -0
  115. rasa/shared/utils/schemas/domain.yml +26 -0
  116. rasa/telemetry.py +2 -1
  117. rasa/tracing/config.py +2 -0
  118. rasa/tracing/constants.py +12 -0
  119. rasa/tracing/instrumentation/instrumentation.py +36 -0
  120. rasa/tracing/instrumentation/metrics.py +41 -0
  121. rasa/tracing/metric_instrument_provider.py +40 -0
  122. rasa/validator.py +372 -7
  123. rasa/version.py +1 -1
  124. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/METADATA +2 -1
  125. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/RECORD +128 -113
  126. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/NOTICE +0 -0
  127. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/WHEEL +0 -0
  128. {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,460 @@
1
+ from typing import Any, Dict, List, Optional, Text
2
+
3
+ import structlog
4
+
5
+ import rasa.shared.utils.io
6
+ from rasa.dialogue_understanding.commands import (
7
+ CannotHandleCommand,
8
+ Command,
9
+ ErrorCommand,
10
+ SetSlotCommand,
11
+ )
12
+ from rasa.dialogue_understanding.commands.command_syntax_manager import (
13
+ CommandSyntaxManager,
14
+ CommandSyntaxVersion,
15
+ )
16
+ from rasa.dialogue_understanding.generator import LLMBasedCommandGenerator
17
+ from rasa.dialogue_understanding.generator.command_parser import (
18
+ parse_commands as parse_commands_using_command_parsers,
19
+ )
20
+ from rasa.dialogue_understanding.generator.constants import (
21
+ COMMAND_PROMPT_FILE_NAME,
22
+ DEFAULT_LLM_CONFIG,
23
+ FLOW_RETRIEVAL_KEY,
24
+ LLM_BASED_COMMAND_GENERATOR_CONFIG_FILE,
25
+ LLM_CONFIG_KEY,
26
+ USER_INPUT_CONFIG_KEY,
27
+ )
28
+ from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
29
+ from rasa.dialogue_understanding.stack.utils import top_flow_frame
30
+ from rasa.dialogue_understanding.utils import (
31
+ add_commands_to_message_parse_data,
32
+ add_prompt_to_message_parse_data,
33
+ )
34
+ from rasa.engine.graph import ExecutionContext
35
+ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
36
+ from rasa.engine.storage.resource import Resource
37
+ from rasa.engine.storage.storage import ModelStorage
38
+ from rasa.shared.constants import (
39
+ EMBEDDINGS_CONFIG_KEY,
40
+ PROMPT_TEMPLATE_CONFIG_KEY,
41
+ ROUTE_TO_CALM_SLOT,
42
+ )
43
+ from rasa.shared.core.flows import FlowsList
44
+ from rasa.shared.core.trackers import DialogueStateTracker
45
+ from rasa.shared.exceptions import ProviderClientAPIException
46
+ from rasa.shared.nlu.constants import LLM_COMMANDS, LLM_PROMPT, TEXT
47
+ from rasa.shared.nlu.training_data.message import Message
48
+ from rasa.shared.providers.llm.llm_response import LLMResponse
49
+ from rasa.shared.utils.io import deep_container_fingerprint
50
+ from rasa.shared.utils.llm import (
51
+ allowed_values_for_slot,
52
+ get_default_prompt_template_based_on_model,
53
+ get_prompt_template,
54
+ resolve_model_client_config,
55
+ sanitize_message_for_prompt,
56
+ tracker_as_readable_transcript,
57
+ )
58
+ from rasa.utils.beta import BetaNotEnabledException, ensure_beta_feature_is_enabled
59
+ from rasa.utils.log_utils import log_llm
60
+
61
+ structlogger = structlog.get_logger()
62
+
63
+
64
+ MODEL_PROMPT_MAPPER = {
65
+ "openai/gpt-4o-2024-11-20": "command_prompt_v2_gpt_4o_2024_11_20_template.jinja2",
66
+ "azure/gpt-4o-2024-11-20": "command_prompt_v2_gpt_4o_2024_11_20_template.jinja2",
67
+ "bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0": (
68
+ "command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
69
+ ),
70
+ "anthropic/claude-3-5-sonnet-20240620": (
71
+ "command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
72
+ ),
73
+ }
74
+
75
+
76
+ DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME = "command_prompt_v2_default.jinja2"
77
+
78
+
79
+ @DefaultV1Recipe.register(
80
+ [
81
+ DefaultV1Recipe.ComponentType.COMMAND_GENERATOR,
82
+ ],
83
+ is_trainable=True,
84
+ )
85
+ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
86
+ """A single step LLM-based command generator."""
87
+
88
+ def __init__(
89
+ self,
90
+ config: Dict[str, Any],
91
+ model_storage: ModelStorage,
92
+ resource: Resource,
93
+ prompt_template: Optional[Text] = None,
94
+ **kwargs: Any,
95
+ ) -> None:
96
+ super().__init__(
97
+ config,
98
+ model_storage,
99
+ resource,
100
+ prompt_template=prompt_template,
101
+ **kwargs,
102
+ )
103
+
104
+ # Get the default prompt template based on the model name
105
+ default_command_prompt_template = get_default_prompt_template_based_on_model(
106
+ config, MODEL_PROMPT_MAPPER, DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME
107
+ )
108
+
109
+ # Set the prompt template either from the config or the default prompt template.
110
+ self.prompt_template = prompt_template or get_prompt_template(
111
+ config.get(PROMPT_TEMPLATE_CONFIG_KEY),
112
+ default_command_prompt_template,
113
+ )
114
+
115
+ self.trace_prompt_tokens = self.config.get("trace_prompt_tokens", False)
116
+ self.repeat_command_enabled = self.is_repeat_command_enabled()
117
+
118
+ # Set the command syntax version to v2
119
+ CommandSyntaxManager.set_syntax_version(CommandSyntaxVersion.v2)
120
+
121
+ ### Implementations of LLMBasedCommandGenerator parent
122
+ @staticmethod
123
+ def get_default_config() -> Dict[str, Any]:
124
+ """The component's default config (see parent class for full docstring)."""
125
+ return {
126
+ PROMPT_TEMPLATE_CONFIG_KEY: None,
127
+ USER_INPUT_CONFIG_KEY: None,
128
+ LLM_CONFIG_KEY: None,
129
+ FLOW_RETRIEVAL_KEY: FlowRetrieval.get_default_config(),
130
+ }
131
+
132
+ def persist(self) -> None:
133
+ """Persist this component to disk for future loading."""
134
+ self._persist_prompt_template()
135
+ self._persist_config()
136
+ if self.flow_retrieval is not None:
137
+ self.flow_retrieval.persist()
138
+
139
+ def _persist_prompt_template(self) -> None:
140
+ """Persist prompt template for future loading."""
141
+ with self._model_storage.write_to(self._resource) as path:
142
+ rasa.shared.utils.io.write_text_file(
143
+ self.prompt_template, path / COMMAND_PROMPT_FILE_NAME
144
+ )
145
+
146
+ def _persist_config(self) -> None:
147
+ """Persist config as a source of truth for resolved clients."""
148
+ with self._model_storage.write_to(self._resource) as path:
149
+ rasa.shared.utils.io.dump_obj_as_json_to_file(
150
+ path / LLM_BASED_COMMAND_GENERATOR_CONFIG_FILE, self.config
151
+ )
152
+
153
+ @classmethod
154
+ def load(
155
+ cls: Any,
156
+ config: Dict[str, Any],
157
+ model_storage: ModelStorage,
158
+ resource: Resource,
159
+ execution_context: ExecutionContext,
160
+ **kwargs: Any,
161
+ ) -> "CompactLLMCommandGenerator":
162
+ """Loads trained component (see parent class for full docstring)."""
163
+ # Perform health check of the LLM API endpoint
164
+ llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
165
+ cls.perform_llm_health_check(
166
+ llm_config,
167
+ DEFAULT_LLM_CONFIG,
168
+ "compact_llm_command_generator.load",
169
+ cls.__name__,
170
+ )
171
+
172
+ # load prompt template from the model storage.
173
+ prompt_template = cls.load_prompt_template_from_model_storage(
174
+ model_storage, resource, COMMAND_PROMPT_FILE_NAME
175
+ )
176
+
177
+ # init base command generator
178
+ command_generator = cls(config, model_storage, resource, prompt_template)
179
+ # load flow retrieval if enabled
180
+ if command_generator.enabled_flow_retrieval:
181
+ command_generator.flow_retrieval = cls.load_flow_retrival(
182
+ command_generator.config, model_storage, resource
183
+ )
184
+
185
+ return command_generator
186
+
187
+ async def predict_commands(
188
+ self,
189
+ message: Message,
190
+ flows: FlowsList,
191
+ tracker: Optional[DialogueStateTracker] = None,
192
+ **kwargs: Any,
193
+ ) -> List[Command]:
194
+ """Predict commands using the LLM.
195
+
196
+ Args:
197
+ message: The message from the user.
198
+ flows: The flows available to the user.
199
+ tracker: The tracker containing the current state of the conversation.
200
+ **kwargs: Keyword arguments for forward compatibility.
201
+
202
+ Returns:
203
+ The commands generated by the llm.
204
+ """
205
+ prior_commands = self._get_prior_commands(message)
206
+
207
+ if tracker is None or flows.is_empty():
208
+ # cannot do anything if there are no flows or no tracker
209
+ return prior_commands
210
+
211
+ if self._should_skip_llm_call(prior_commands, flows, tracker):
212
+ return prior_commands
213
+
214
+ try:
215
+ commands = await self._predict_commands(message, flows, tracker)
216
+ except ProviderClientAPIException:
217
+ # if command predictions resulted in API exception
218
+ # "predict" the ErrorCommand
219
+ commands = [ErrorCommand()]
220
+ structlogger.warning(
221
+ "llm_command_generator.predict_commands.api_exception",
222
+ event_info=(
223
+ "ProviderClientAPIException occurred while predicting commands."
224
+ ),
225
+ commands=commands,
226
+ )
227
+
228
+ if not commands and not prior_commands:
229
+ # no commands are parsed or there's an invalid command
230
+ structlogger.warning(
231
+ "llm_command_generator.predict_commands",
232
+ message="No commands were predicted as the LLM response could "
233
+ "not be parsed or the LLM responded with an invalid command."
234
+ "Returning a CannotHandleCommand instead.",
235
+ )
236
+ commands = [CannotHandleCommand()]
237
+
238
+ if tracker.has_coexistence_routing_slot:
239
+ # if coexistence feature is used, set the routing slot
240
+ commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
241
+
242
+ log_llm(
243
+ logger=structlogger,
244
+ log_module=self.__class__.__name__,
245
+ log_event="llm_command_generator.predict_commands.finished",
246
+ commands=commands,
247
+ )
248
+
249
+ domain = kwargs.get("domain")
250
+ commands = self._check_commands_against_slot_mappings(commands, tracker, domain)
251
+
252
+ return self._check_commands_overlap(prior_commands, commands)
253
+
254
+ async def _predict_commands(
255
+ self,
256
+ message: Message,
257
+ flows: FlowsList,
258
+ tracker: Optional[DialogueStateTracker] = None,
259
+ ) -> List[Command]:
260
+ """Predict commands using the LLM.
261
+
262
+ Args:
263
+ message: The message from the user.
264
+ flows: The flows available to the user.
265
+ tracker: The tracker containing the current state of the conversation.
266
+
267
+ Returns:
268
+ The commands generated by the llm.
269
+
270
+ Raises:
271
+ ProviderClientAPIException: If API calls raised an error.
272
+ """
273
+ # retrieve flows
274
+ filtered_flows = await self.filter_flows(message, flows, tracker)
275
+
276
+ flow_prompt = self.render_template(message, tracker, filtered_flows, flows)
277
+ log_llm(
278
+ logger=structlogger,
279
+ log_module=self.__class__.__name__,
280
+ log_event="llm_command_generator.predict_commands.prompt_rendered",
281
+ prompt=flow_prompt,
282
+ )
283
+
284
+ response = await self.invoke_llm(flow_prompt)
285
+ llm_response = LLMResponse.ensure_llm_response(response)
286
+ # The check for 'None' maintains compatibility with older versions
287
+ # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
288
+ # might return 'None' to indicate a failure to generate actions.
289
+ if llm_response is None or not llm_response.choices:
290
+ structlogger.warning(
291
+ "llm_command_generator.predict_commands.no_actions_generated",
292
+ event_info=(
293
+ "No actions were generated by the LLM. Returning an ErrorCommand."
294
+ ),
295
+ )
296
+ return [ErrorCommand()]
297
+
298
+ action_list = llm_response.choices[0]
299
+
300
+ log_llm(
301
+ logger=structlogger,
302
+ log_module=self.__class__.__name__,
303
+ log_event="llm_command_generator.predict_commands.actions_generated",
304
+ action_list=action_list,
305
+ )
306
+
307
+ commands = self.parse_commands(action_list, tracker, flows)
308
+
309
+ self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
310
+ add_commands_to_message_parse_data(message, self.__class__.__name__, commands)
311
+ add_prompt_to_message_parse_data(
312
+ message=message,
313
+ component_name=self.__class__.__name__,
314
+ prompt_name="command_generator_prompt",
315
+ user_prompt=flow_prompt,
316
+ llm_response=llm_response,
317
+ )
318
+
319
+ return commands
320
+
321
+ @staticmethod
322
+ def _update_message_parse_data_for_fine_tuning(
323
+ message: Message, commands: List[Command], prompt: str
324
+ ) -> None:
325
+ from rasa.llm_fine_tuning.annotation_module import preparing_fine_tuning_data
326
+
327
+ if preparing_fine_tuning_data:
328
+ # Add commands and prompt to the message object in order to create
329
+ # prompt -> commands pairs for fine-tuning
330
+ message.set(
331
+ LLM_COMMANDS,
332
+ [command.as_dict() for command in commands],
333
+ add_to_output=True,
334
+ )
335
+ message.set(LLM_PROMPT, prompt, add_to_output=True)
336
+
337
+ @classmethod
338
+ def parse_commands(
339
+ cls, actions: Optional[str], tracker: DialogueStateTracker, flows: FlowsList
340
+ ) -> List[Command]:
341
+ """Parse the actions returned by the llm into intent and entities.
342
+
343
+ Args:
344
+ actions: The actions returned by the llm.
345
+ tracker: The tracker containing the current state of the conversation.
346
+ flows: the list of flows
347
+
348
+ Returns:
349
+ The parsed commands.
350
+ """
351
+ commands = parse_commands_using_command_parsers(actions, flows)
352
+ if not commands:
353
+ structlogger.debug(
354
+ f"{cls.__class__.__name__}.parse_commands",
355
+ message="No commands were parsed from the LLM actions.",
356
+ actions=actions,
357
+ )
358
+
359
+ return commands
360
+
361
+ ### Helper methods
362
+ def render_template(
363
+ self,
364
+ message: Message,
365
+ tracker: DialogueStateTracker,
366
+ startable_flows: FlowsList,
367
+ all_flows: FlowsList,
368
+ ) -> str:
369
+ """Render the jinja template to create the prompt for the LLM.
370
+
371
+ Args:
372
+ message: The current message from the user.
373
+ tracker: The tracker containing the current state of the conversation.
374
+ startable_flows: The flows startable at this point in time by the user.
375
+ all_flows: all flows present in the assistant
376
+
377
+ Returns:
378
+ The rendered prompt template.
379
+ """
380
+ # need to make this distinction here because current step of the
381
+ # top_calling_frame would be the call step, but we need the collect step from
382
+ # the called frame. If no call is active calling and called frame are the same.
383
+ top_calling_frame = top_flow_frame(tracker.stack)
384
+ top_called_frame = top_flow_frame(tracker.stack, ignore_call_frames=False)
385
+
386
+ top_flow = top_calling_frame.flow(all_flows) if top_calling_frame else None
387
+ current_step = top_called_frame.step(all_flows) if top_called_frame else None
388
+
389
+ flow_slots = self.prepare_current_flow_slots_for_template(
390
+ top_flow, current_step, tracker
391
+ )
392
+ current_slot, current_slot_description = self.prepare_current_slot_for_template(
393
+ current_step
394
+ )
395
+ current_slot_type = None
396
+ current_slot_allowed_values = None
397
+ if current_slot:
398
+ current_slot_type = (
399
+ slot.type_name
400
+ if (slot := tracker.slots.get(current_slot)) is not None
401
+ else None
402
+ )
403
+ current_slot_allowed_values = allowed_values_for_slot(
404
+ tracker.slots.get(current_slot)
405
+ )
406
+ current_conversation = tracker_as_readable_transcript(tracker)
407
+ latest_user_message = sanitize_message_for_prompt(message.get(TEXT))
408
+ current_conversation += f"\nUSER: {latest_user_message}"
409
+
410
+ inputs = {
411
+ "available_flows": self.prepare_flows_for_template(
412
+ startable_flows, tracker
413
+ ),
414
+ "current_conversation": current_conversation,
415
+ "flow_slots": flow_slots,
416
+ "current_flow": top_flow.id if top_flow is not None else None,
417
+ "current_slot": current_slot,
418
+ "current_slot_description": current_slot_description,
419
+ "current_slot_type": current_slot_type,
420
+ "current_slot_allowed_values": current_slot_allowed_values,
421
+ "user_message": latest_user_message,
422
+ "is_repeat_command_enabled": self.repeat_command_enabled,
423
+ }
424
+
425
+ return self.compile_template(self.prompt_template).render(**inputs)
426
+
427
+ def is_repeat_command_enabled(self) -> bool:
428
+ """Check for feature flag"""
429
+ RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME = "RASA_PRO_BETA_REPEAT_COMMAND"
430
+ try:
431
+ ensure_beta_feature_is_enabled(
432
+ "Repeat Command",
433
+ env_flag=RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME,
434
+ )
435
+ except BetaNotEnabledException:
436
+ return False
437
+
438
+ return True
439
+
440
+ @classmethod
441
+ def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
442
+ """Add a fingerprint for the graph."""
443
+ # Get the default prompt template based on the model name
444
+ default_command_prompt_template = get_default_prompt_template_based_on_model(
445
+ config, MODEL_PROMPT_MAPPER, DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME
446
+ )
447
+ prompt_template = get_prompt_template(
448
+ config.get(PROMPT_TEMPLATE_CONFIG_KEY),
449
+ default_command_prompt_template,
450
+ )
451
+ llm_config = resolve_model_client_config(
452
+ config.get(LLM_CONFIG_KEY), CompactLLMCommandGenerator.__name__
453
+ )
454
+ embedding_config = resolve_model_client_config(
455
+ config.get(FLOW_RETRIEVAL_KEY, {}).get(EMBEDDINGS_CONFIG_KEY),
456
+ FlowRetrieval.__name__,
457
+ )
458
+ return deep_container_fingerprint(
459
+ [prompt_template, llm_config, embedding_config]
460
+ )