rasa-pro 3.13.0.dev7__py3-none-any.whl → 3.13.0.dev8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (150) hide show
  1. rasa/__main__.py +0 -3
  2. rasa/api.py +1 -1
  3. rasa/cli/dialogue_understanding_test.py +1 -1
  4. rasa/cli/e2e_test.py +1 -1
  5. rasa/cli/evaluate.py +1 -1
  6. rasa/cli/export.py +1 -1
  7. rasa/cli/llm_fine_tuning.py +12 -11
  8. rasa/cli/project_templates/defaults.py +133 -0
  9. rasa/cli/run.py +1 -1
  10. rasa/cli/studio/link.py +53 -0
  11. rasa/cli/studio/pull.py +78 -0
  12. rasa/cli/studio/push.py +78 -0
  13. rasa/cli/studio/studio.py +12 -0
  14. rasa/cli/studio/upload.py +8 -0
  15. rasa/cli/train.py +1 -1
  16. rasa/cli/utils.py +1 -1
  17. rasa/cli/x.py +1 -1
  18. rasa/constants.py +2 -0
  19. rasa/core/__init__.py +0 -16
  20. rasa/core/actions/action.py +5 -1
  21. rasa/core/actions/action_repeat_bot_messages.py +18 -22
  22. rasa/core/actions/action_run_slot_rejections.py +0 -1
  23. rasa/core/agent.py +16 -1
  24. rasa/core/available_endpoints.py +146 -0
  25. rasa/core/brokers/pika.py +1 -2
  26. rasa/core/channels/botframework.py +2 -2
  27. rasa/core/channels/channel.py +2 -2
  28. rasa/core/channels/hangouts.py +8 -5
  29. rasa/core/channels/mattermost.py +1 -1
  30. rasa/core/channels/rasa_chat.py +2 -4
  31. rasa/core/channels/rest.py +5 -4
  32. rasa/core/channels/studio_chat.py +3 -2
  33. rasa/core/channels/vier_cvg.py +1 -2
  34. rasa/core/channels/voice_ready/audiocodes.py +1 -8
  35. rasa/core/channels/voice_stream/audiocodes.py +7 -4
  36. rasa/core/channels/voice_stream/genesys.py +2 -2
  37. rasa/core/channels/voice_stream/twilio_media_streams.py +10 -5
  38. rasa/core/channels/voice_stream/voice_channel.py +33 -22
  39. rasa/core/http_interpreter.py +3 -7
  40. rasa/core/jobs.py +2 -1
  41. rasa/core/nlg/contextual_response_rephraser.py +34 -9
  42. rasa/core/nlg/generator.py +0 -1
  43. rasa/core/nlg/interpolator.py +2 -3
  44. rasa/core/nlg/summarize.py +39 -5
  45. rasa/core/policies/enterprise_search_policy.py +283 -62
  46. rasa/core/policies/enterprise_search_prompt_with_relevancy_check_and_citation_template.jinja2 +63 -0
  47. rasa/core/policies/flow_policy.py +1 -1
  48. rasa/core/policies/flows/flow_executor.py +96 -17
  49. rasa/core/policies/intentless_policy.py +9 -7
  50. rasa/core/processor.py +104 -51
  51. rasa/core/run.py +33 -11
  52. rasa/core/tracker_stores/tracker_store.py +1 -1
  53. rasa/core/training/interactive.py +1 -1
  54. rasa/core/utils.py +24 -97
  55. rasa/dialogue_understanding/coexistence/intent_based_router.py +2 -1
  56. rasa/dialogue_understanding/commands/can_not_handle_command.py +2 -0
  57. rasa/dialogue_understanding/commands/cancel_flow_command.py +2 -0
  58. rasa/dialogue_understanding/commands/chit_chat_answer_command.py +2 -0
  59. rasa/dialogue_understanding/commands/clarify_command.py +5 -1
  60. rasa/dialogue_understanding/commands/command_syntax_manager.py +1 -0
  61. rasa/dialogue_understanding/commands/human_handoff_command.py +2 -0
  62. rasa/dialogue_understanding/commands/knowledge_answer_command.py +4 -2
  63. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +2 -0
  64. rasa/dialogue_understanding/commands/set_slot_command.py +11 -1
  65. rasa/dialogue_understanding/commands/skip_question_command.py +2 -0
  66. rasa/dialogue_understanding/commands/start_flow_command.py +4 -0
  67. rasa/dialogue_understanding/commands/utils.py +26 -2
  68. rasa/dialogue_understanding/generator/__init__.py +7 -1
  69. rasa/dialogue_understanding/generator/command_generator.py +4 -2
  70. rasa/dialogue_understanding/generator/command_parser.py +2 -2
  71. rasa/dialogue_understanding/generator/command_parser_validator.py +63 -0
  72. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +12 -33
  73. rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v3_gpt_4o_2024_11_20_template.jinja2 +78 -0
  74. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +26 -461
  75. rasa/dialogue_understanding/generator/single_step/search_ready_llm_command_generator.py +147 -0
  76. rasa/dialogue_understanding/generator/single_step/single_step_based_llm_command_generator.py +477 -0
  77. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +8 -58
  78. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +37 -25
  79. rasa/dialogue_understanding/patterns/domain_for_patterns.py +190 -0
  80. rasa/dialogue_understanding/processor/command_processor.py +3 -3
  81. rasa/dialogue_understanding/processor/command_processor_component.py +3 -3
  82. rasa/dialogue_understanding/stack/frames/flow_stack_frame.py +17 -4
  83. rasa/dialogue_understanding/utils.py +68 -12
  84. rasa/dialogue_understanding_test/du_test_case.py +1 -1
  85. rasa/dialogue_understanding_test/du_test_runner.py +4 -22
  86. rasa/dialogue_understanding_test/test_case_simulation/test_case_tracker_simulator.py +2 -6
  87. rasa/e2e_test/e2e_test_runner.py +1 -1
  88. rasa/engine/constants.py +1 -1
  89. rasa/engine/recipes/default_recipe.py +26 -2
  90. rasa/engine/validation.py +3 -2
  91. rasa/hooks.py +0 -28
  92. rasa/llm_fine_tuning/annotation_module.py +39 -9
  93. rasa/llm_fine_tuning/conversations.py +3 -0
  94. rasa/llm_fine_tuning/llm_data_preparation_module.py +66 -49
  95. rasa/llm_fine_tuning/paraphrasing/rephrase_validator.py +52 -44
  96. rasa/llm_fine_tuning/paraphrasing_module.py +10 -12
  97. rasa/llm_fine_tuning/storage.py +4 -4
  98. rasa/llm_fine_tuning/utils.py +63 -1
  99. rasa/model_manager/model_api.py +88 -0
  100. rasa/model_manager/trainer_service.py +4 -4
  101. rasa/plugin.py +1 -11
  102. rasa/privacy/__init__.py +0 -0
  103. rasa/privacy/constants.py +83 -0
  104. rasa/privacy/event_broker_utils.py +77 -0
  105. rasa/privacy/privacy_config.py +281 -0
  106. rasa/privacy/privacy_config_schema.json +86 -0
  107. rasa/privacy/privacy_filter.py +340 -0
  108. rasa/privacy/privacy_manager.py +576 -0
  109. rasa/server.py +23 -2
  110. rasa/shared/constants.py +3 -0
  111. rasa/shared/core/constants.py +4 -3
  112. rasa/shared/core/domain.py +7 -0
  113. rasa/shared/core/events.py +37 -7
  114. rasa/shared/core/flows/flow.py +1 -2
  115. rasa/shared/core/flows/flows_yaml_schema.json +3 -0
  116. rasa/shared/core/flows/steps/collect.py +46 -2
  117. rasa/shared/core/slots.py +28 -0
  118. rasa/shared/exceptions.py +4 -0
  119. rasa/shared/utils/llm.py +161 -6
  120. rasa/shared/utils/yaml.py +32 -0
  121. rasa/studio/data_handler.py +3 -3
  122. rasa/studio/download/download.py +37 -60
  123. rasa/studio/download/flows.py +23 -31
  124. rasa/studio/link.py +200 -0
  125. rasa/studio/pull.py +94 -0
  126. rasa/studio/push.py +131 -0
  127. rasa/studio/upload.py +117 -67
  128. rasa/telemetry.py +82 -25
  129. rasa/tracing/config.py +3 -4
  130. rasa/tracing/constants.py +19 -1
  131. rasa/tracing/instrumentation/attribute_extractors.py +10 -2
  132. rasa/tracing/instrumentation/instrumentation.py +53 -2
  133. rasa/tracing/instrumentation/metrics.py +98 -15
  134. rasa/tracing/metric_instrument_provider.py +75 -3
  135. rasa/utils/common.py +1 -27
  136. rasa/utils/log_utils.py +1 -45
  137. rasa/validator.py +2 -8
  138. rasa/version.py +1 -1
  139. {rasa_pro-3.13.0.dev7.dist-info → rasa_pro-3.13.0.dev8.dist-info}/METADATA +5 -6
  140. {rasa_pro-3.13.0.dev7.dist-info → rasa_pro-3.13.0.dev8.dist-info}/RECORD +143 -129
  141. rasa/anonymization/__init__.py +0 -2
  142. rasa/anonymization/anonymisation_rule_yaml_reader.py +0 -91
  143. rasa/anonymization/anonymization_pipeline.py +0 -286
  144. rasa/anonymization/anonymization_rule_executor.py +0 -266
  145. rasa/anonymization/anonymization_rule_orchestrator.py +0 -119
  146. rasa/anonymization/schemas/config.yml +0 -47
  147. rasa/anonymization/utils.py +0 -118
  148. {rasa_pro-3.13.0.dev7.dist-info → rasa_pro-3.13.0.dev8.dist-info}/NOTICE +0 -0
  149. {rasa_pro-3.13.0.dev7.dist-info → rasa_pro-3.13.0.dev8.dist-info}/WHEEL +0 -0
  150. {rasa_pro-3.13.0.dev7.dist-info → rasa_pro-3.13.0.dev8.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,147 @@
1
+ from typing import Any, Dict, Literal, Optional, Text
2
+
3
+ import structlog
4
+
5
+ from rasa.dialogue_understanding.commands.command_syntax_manager import (
6
+ CommandSyntaxVersion,
7
+ )
8
+ from rasa.dialogue_understanding.generator.constants import (
9
+ DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
10
+ LLM_CONFIG_KEY,
11
+ MODEL_CONFIG_KEY,
12
+ MODEL_NAME_CLAUDE_3_5_SONNET_20240620,
13
+ MODEL_NAME_GPT_4O_2024_11_20,
14
+ OPENAI_PROVIDER,
15
+ PROVIDER_CONFIG_KEY,
16
+ TIMEOUT_CONFIG_KEY,
17
+ )
18
+ from rasa.dialogue_understanding.generator.single_step.single_step_based_llm_command_generator import ( # noqa: E501 # noqa: E501
19
+ SingleStepBasedLLMCommandGenerator,
20
+ )
21
+ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
22
+ from rasa.engine.storage.resource import Resource
23
+ from rasa.engine.storage.storage import ModelStorage
24
+ from rasa.shared.constants import (
25
+ ANTHROPIC_PROVIDER,
26
+ AWS_BEDROCK_PROVIDER,
27
+ AZURE_OPENAI_PROVIDER,
28
+ MAX_TOKENS_CONFIG_KEY,
29
+ PROMPT_TEMPLATE_CONFIG_KEY,
30
+ TEMPERATURE_CONFIG_KEY,
31
+ )
32
+ from rasa.shared.utils.llm import (
33
+ get_default_prompt_template_based_on_model,
34
+ get_prompt_template,
35
+ )
36
+
37
+ structlogger = structlog.get_logger()
38
+
39
+ DEFAULT_LLM_CONFIG = {
40
+ PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
41
+ MODEL_CONFIG_KEY: MODEL_NAME_GPT_4O_2024_11_20,
42
+ TEMPERATURE_CONFIG_KEY: 0.0,
43
+ MAX_TOKENS_CONFIG_KEY: DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
44
+ TIMEOUT_CONFIG_KEY: 7,
45
+ }
46
+
47
+ DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
48
+ "command_prompt_v3_gpt_4o_2024_11_20_template.jinja2"
49
+ )
50
+ FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
51
+ "command_prompt_v3_gpt_4o_2024_11_20_template.jinja2"
52
+ )
53
+ MODEL_PROMPT_MAPPER = {
54
+ f"{OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
55
+ "command_prompt_v3_gpt_4o_2024_11_20_template.jinja2"
56
+ ),
57
+ f"{AZURE_OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
58
+ "command_prompt_v3_gpt_4o_2024_11_20_template.jinja2"
59
+ ),
60
+ f"{AWS_BEDROCK_PROVIDER}/anthropic."
61
+ f"{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}-v1:0": (
62
+ "command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
63
+ ),
64
+ f"{ANTHROPIC_PROVIDER}/{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}": (
65
+ "command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
66
+ ),
67
+ }
68
+
69
+
70
+ @DefaultV1Recipe.register(
71
+ [
72
+ DefaultV1Recipe.ComponentType.COMMAND_GENERATOR,
73
+ ],
74
+ is_trainable=True,
75
+ )
76
+ class SearchReadyLLMCommandGenerator(SingleStepBasedLLMCommandGenerator):
77
+ """A single step LLM-based command generator."""
78
+
79
+ def __init__(
80
+ self,
81
+ config: Dict[str, Any],
82
+ model_storage: ModelStorage,
83
+ resource: Resource,
84
+ prompt_template: Optional[Text] = None,
85
+ **kwargs: Any,
86
+ ) -> None:
87
+ super().__init__(
88
+ config,
89
+ model_storage,
90
+ resource,
91
+ prompt_template=prompt_template,
92
+ **kwargs,
93
+ )
94
+
95
+ @staticmethod
96
+ def get_default_llm_config() -> Dict[str, Any]:
97
+ """Get the default LLM config for the command generator."""
98
+ return DEFAULT_LLM_CONFIG
99
+
100
+ @staticmethod
101
+ def get_default_prompt_template_file_name() -> str:
102
+ """Get the default prompt template file name for the command generator."""
103
+ return DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME
104
+
105
+ @staticmethod
106
+ def get_fallback_prompt_template_file_name() -> str:
107
+ """Get the fallback prompt template file name for the command generator."""
108
+ return FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME
109
+
110
+ @staticmethod
111
+ def get_model_prompt_mapper() -> Dict[str, str]:
112
+ """Get the model prompt mapper for the command generator."""
113
+ return MODEL_PROMPT_MAPPER
114
+
115
+ @staticmethod
116
+ def get_component_command_syntax_version() -> CommandSyntaxVersion:
117
+ return CommandSyntaxVersion.v3
118
+
119
+ @classmethod
120
+ def _resolve_component_prompt_template(
121
+ cls,
122
+ config: Dict[str, Any],
123
+ prompt_template: Optional[str] = None,
124
+ log_context: Optional[Literal["init", "fingerprint_addon"]] = None,
125
+ log_source_component: Optional[str] = "SearchReadyLLMCommandGenerator",
126
+ ) -> Optional[str]:
127
+ """Get the prompt template from the config or the default prompt template."""
128
+ if prompt_template is not None:
129
+ return prompt_template
130
+
131
+ # Get the default prompt template based on the model name.
132
+ default_command_prompt_template = get_default_prompt_template_based_on_model(
133
+ llm_config=config.get(LLM_CONFIG_KEY, {}) or {},
134
+ model_prompt_mapping=cls.get_model_prompt_mapper(),
135
+ default_prompt_path=cls.get_default_prompt_template_file_name(),
136
+ fallback_prompt_path=cls.get_fallback_prompt_template_file_name(),
137
+ log_source_component=log_source_component,
138
+ log_source_method=log_context,
139
+ )
140
+
141
+ # Return the prompt template either from the config or the default prompt.
142
+ return get_prompt_template(
143
+ config.get(PROMPT_TEMPLATE_CONFIG_KEY),
144
+ default_command_prompt_template,
145
+ log_source_component=log_source_component,
146
+ log_source_method=log_context,
147
+ )
@@ -0,0 +1,477 @@
1
+ import copy
2
+ from abc import ABC, abstractmethod
3
+ from typing import Any, Dict, List, Literal, Optional, Text
4
+
5
+ import structlog
6
+
7
+ import rasa.shared.utils.io
8
+ from rasa.dialogue_understanding.commands import (
9
+ CannotHandleCommand,
10
+ Command,
11
+ ErrorCommand,
12
+ SetSlotCommand,
13
+ )
14
+ from rasa.dialogue_understanding.commands.command_syntax_manager import (
15
+ CommandSyntaxManager,
16
+ CommandSyntaxVersion,
17
+ )
18
+ from rasa.dialogue_understanding.generator import LLMBasedCommandGenerator
19
+ from rasa.dialogue_understanding.generator.command_parser import (
20
+ parse_commands as parse_commands_using_command_parsers,
21
+ )
22
+ from rasa.dialogue_understanding.generator.command_parser_validator import (
23
+ CommandParserValidatorSingleton,
24
+ )
25
+ from rasa.dialogue_understanding.generator.constants import (
26
+ COMMAND_PROMPT_FILE_NAME,
27
+ FLOW_RETRIEVAL_KEY,
28
+ LLM_BASED_COMMAND_GENERATOR_CONFIG_FILE,
29
+ LLM_CONFIG_KEY,
30
+ USER_INPUT_CONFIG_KEY,
31
+ )
32
+ from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
33
+ from rasa.dialogue_understanding.stack.utils import top_flow_frame
34
+ from rasa.dialogue_understanding.utils import (
35
+ add_commands_to_message_parse_data,
36
+ add_prompt_to_message_parse_data,
37
+ )
38
+ from rasa.engine.graph import ExecutionContext
39
+ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
40
+ from rasa.engine.storage.resource import Resource
41
+ from rasa.engine.storage.storage import ModelStorage
42
+ from rasa.shared.constants import (
43
+ EMBEDDINGS_CONFIG_KEY,
44
+ PROMPT_TEMPLATE_CONFIG_KEY,
45
+ ROUTE_TO_CALM_SLOT,
46
+ )
47
+ from rasa.shared.core.flows import FlowsList
48
+ from rasa.shared.core.trackers import DialogueStateTracker
49
+ from rasa.shared.exceptions import ProviderClientAPIException
50
+ from rasa.shared.nlu.constants import LLM_COMMANDS, LLM_PROMPT, TEXT
51
+ from rasa.shared.nlu.training_data.message import Message
52
+ from rasa.shared.providers.llm.llm_response import LLMResponse
53
+ from rasa.shared.utils.constants import (
54
+ LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON,
55
+ LOG_COMPONENT_SOURCE_METHOD_INIT,
56
+ )
57
+ from rasa.shared.utils.io import deep_container_fingerprint
58
+ from rasa.shared.utils.llm import (
59
+ allowed_values_for_slot,
60
+ resolve_model_client_config,
61
+ sanitize_message_for_prompt,
62
+ tracker_as_readable_transcript,
63
+ )
64
+ from rasa.utils.beta import BetaNotEnabledException, ensure_beta_feature_is_enabled
65
+ from rasa.utils.log_utils import log_llm
66
+
67
+ structlogger = structlog.get_logger()
68
+
69
+
70
+ @DefaultV1Recipe.register(
71
+ [
72
+ DefaultV1Recipe.ComponentType.COMMAND_GENERATOR,
73
+ ],
74
+ is_trainable=True,
75
+ )
76
+ class SingleStepBasedLLMCommandGenerator(LLMBasedCommandGenerator, ABC):
77
+ """Abstract class single step based LLM command generator."""
78
+
79
+ def __init__(
80
+ self,
81
+ config: Dict[str, Any],
82
+ model_storage: ModelStorage,
83
+ resource: Resource,
84
+ prompt_template: Optional[Text] = None,
85
+ **kwargs: Any,
86
+ ) -> None:
87
+ super().__init__(
88
+ config,
89
+ model_storage,
90
+ resource,
91
+ prompt_template=prompt_template,
92
+ **kwargs,
93
+ )
94
+
95
+ # Get the prompt template from the config or the default prompt template.
96
+ self.prompt_template = self._resolve_component_prompt_template(
97
+ self.config,
98
+ prompt_template,
99
+ log_context=LOG_COMPONENT_SOURCE_METHOD_INIT,
100
+ log_source_component=self.__class__.__name__,
101
+ )
102
+
103
+ # Set the command syntax version.
104
+ CommandSyntaxManager.set_syntax_version(
105
+ self.get_component_command_syntax_version()
106
+ )
107
+
108
+ self.trace_prompt_tokens = self.config.get("trace_prompt_tokens", False)
109
+ self.repeat_command_enabled = self.is_repeat_command_enabled()
110
+
111
+ ### Implementations of LLMBasedCommandGenerator parent
112
+ @staticmethod
113
+ def get_default_config() -> Dict[str, Any]:
114
+ """The component's default config (see parent class for full docstring)."""
115
+ return {
116
+ PROMPT_TEMPLATE_CONFIG_KEY: None,
117
+ USER_INPUT_CONFIG_KEY: None,
118
+ LLM_CONFIG_KEY: None,
119
+ FLOW_RETRIEVAL_KEY: FlowRetrieval.get_default_config(),
120
+ }
121
+
122
+ def persist(self) -> None:
123
+ """Persist this component to disk for future loading."""
124
+ self._persist_prompt_template()
125
+ self._persist_config()
126
+ if self.flow_retrieval is not None:
127
+ self.flow_retrieval.persist()
128
+
129
+ def _persist_prompt_template(self) -> None:
130
+ """Persist prompt template for future loading."""
131
+ with self._model_storage.write_to(self._resource) as path:
132
+ rasa.shared.utils.io.write_text_file(
133
+ self.prompt_template, path / COMMAND_PROMPT_FILE_NAME
134
+ )
135
+
136
+ def _persist_config(self) -> None:
137
+ """Persist config as a source of truth for resolved clients."""
138
+ with self._model_storage.write_to(self._resource) as path:
139
+ rasa.shared.utils.io.dump_obj_as_json_to_file(
140
+ path / LLM_BASED_COMMAND_GENERATOR_CONFIG_FILE, self.config
141
+ )
142
+
143
+ @classmethod
144
+ def load(
145
+ cls: Any,
146
+ config: Dict[str, Any],
147
+ model_storage: ModelStorage,
148
+ resource: Resource,
149
+ execution_context: ExecutionContext,
150
+ **kwargs: Any,
151
+ ) -> "SingleStepBasedLLMCommandGenerator":
152
+ """Loads trained component (see parent class for full docstring)."""
153
+ # Perform health check of the LLM API endpoint
154
+ llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
155
+ cls.perform_llm_health_check(
156
+ llm_config,
157
+ cls.get_default_llm_config(),
158
+ "llm_based_command_generator.load",
159
+ cls.__name__,
160
+ )
161
+
162
+ # load prompt template from the model storage.
163
+ prompt_template = cls.load_prompt_template_from_model_storage(
164
+ model_storage, resource, COMMAND_PROMPT_FILE_NAME
165
+ )
166
+
167
+ # init base command generator
168
+ command_generator = cls(config, model_storage, resource, prompt_template)
169
+ # load flow retrieval if enabled
170
+ if command_generator.enabled_flow_retrieval:
171
+ command_generator.flow_retrieval = cls.load_flow_retrival(
172
+ command_generator.config, model_storage, resource
173
+ )
174
+
175
+ return command_generator
176
+
177
+ async def predict_commands(
178
+ self,
179
+ message: Message,
180
+ flows: FlowsList,
181
+ tracker: Optional[DialogueStateTracker] = None,
182
+ **kwargs: Any,
183
+ ) -> List[Command]:
184
+ """Predict commands using the LLM.
185
+
186
+ Args:
187
+ message: The message from the user.
188
+ flows: The flows available to the user.
189
+ tracker: The tracker containing the current state of the conversation.
190
+ **kwargs: Keyword arguments for forward compatibility.
191
+
192
+ Returns:
193
+ The commands generated by the llm.
194
+ """
195
+ prior_commands = self._get_prior_commands(message)
196
+
197
+ if tracker is None or flows.is_empty():
198
+ # cannot do anything if there are no flows or no tracker
199
+ return prior_commands
200
+
201
+ if self._should_skip_llm_call(prior_commands, flows, tracker):
202
+ return prior_commands
203
+
204
+ try:
205
+ commands = await self._predict_commands(message, flows, tracker)
206
+ except ProviderClientAPIException:
207
+ # if command predictions resulted in API exception
208
+ # "predict" the ErrorCommand
209
+ commands = [ErrorCommand()]
210
+ structlogger.warning(
211
+ "llm_command_generator.predict_commands.api_exception",
212
+ event_info=(
213
+ "ProviderClientAPIException occurred while predicting commands."
214
+ ),
215
+ commands=commands,
216
+ )
217
+
218
+ if not commands and not prior_commands:
219
+ # no commands are parsed or there's an invalid command
220
+ structlogger.warning(
221
+ "llm_command_generator.predict_commands",
222
+ message="No commands were predicted as the LLM response could "
223
+ "not be parsed or the LLM responded with an invalid command. "
224
+ "Returning a CannotHandleCommand instead.",
225
+ )
226
+ commands = [CannotHandleCommand()]
227
+
228
+ if tracker.has_coexistence_routing_slot:
229
+ # if coexistence feature is used, set the routing slot
230
+ commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
231
+
232
+ log_llm(
233
+ logger=structlogger,
234
+ log_module=self.__class__.__name__,
235
+ log_event="llm_command_generator.predict_commands.finished",
236
+ commands=commands,
237
+ )
238
+
239
+ domain = kwargs.get("domain")
240
+ commands = self._check_commands_against_slot_mappings(commands, tracker, domain)
241
+
242
+ return self._check_commands_overlap(prior_commands, commands)
243
+
244
+ async def _predict_commands(
245
+ self,
246
+ message: Message,
247
+ flows: FlowsList,
248
+ tracker: Optional[DialogueStateTracker] = None,
249
+ ) -> List[Command]:
250
+ """Predict commands using the LLM.
251
+
252
+ Args:
253
+ message: The message from the user.
254
+ flows: The flows available to the user.
255
+ tracker: The tracker containing the current state of the conversation.
256
+
257
+ Returns:
258
+ The commands generated by the llm.
259
+
260
+ Raises:
261
+ ProviderClientAPIException: If API calls raised an error.
262
+ """
263
+ # retrieve flows
264
+ filtered_flows = await self.filter_flows(message, flows, tracker)
265
+
266
+ flow_prompt = self.render_template(message, tracker, filtered_flows, flows)
267
+ log_llm(
268
+ logger=structlogger,
269
+ log_module=self.__class__.__name__,
270
+ log_event="llm_command_generator.predict_commands.prompt_rendered",
271
+ prompt=flow_prompt,
272
+ )
273
+
274
+ response = await self.invoke_llm(flow_prompt)
275
+ llm_response = LLMResponse.ensure_llm_response(response)
276
+ # The check for 'None' maintains compatibility with older versions
277
+ # of LLMCommandGenerator. In previous implementations, 'invoke_llm'
278
+ # might return 'None' to indicate a failure to generate actions.
279
+ if llm_response is None or not llm_response.choices:
280
+ structlogger.warning(
281
+ "llm_command_generator.predict_commands.no_actions_generated",
282
+ event_info=(
283
+ "No actions were generated by the LLM. Returning an ErrorCommand."
284
+ ),
285
+ )
286
+ return [ErrorCommand()]
287
+
288
+ action_list = llm_response.choices[0]
289
+
290
+ log_llm(
291
+ logger=structlogger,
292
+ log_module=self.__class__.__name__,
293
+ log_event="llm_command_generator.predict_commands.actions_generated",
294
+ action_list=action_list,
295
+ )
296
+
297
+ commands = self.parse_commands(action_list, tracker, flows)
298
+
299
+ if CommandParserValidatorSingleton.should_validate_command_parser():
300
+ CommandParserValidatorSingleton.validate_if_commands_are_parsed_from_llm_response(
301
+ commands, action_list
302
+ )
303
+
304
+ self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
305
+ add_commands_to_message_parse_data(message, self.__class__.__name__, commands)
306
+ add_prompt_to_message_parse_data(
307
+ message=message,
308
+ component_name=self.__class__.__name__,
309
+ prompt_name="command_generator_prompt",
310
+ user_prompt=flow_prompt,
311
+ llm_response=llm_response,
312
+ )
313
+
314
+ return commands
315
+
316
+ @staticmethod
317
+ def _update_message_parse_data_for_fine_tuning(
318
+ message: Message, commands: List[Command], prompt: str
319
+ ) -> None:
320
+ from rasa.llm_fine_tuning.annotation_module import preparing_fine_tuning_data
321
+
322
+ if preparing_fine_tuning_data:
323
+ # Add commands and prompt to the message object in order to create
324
+ # prompt -> commands pairs for fine-tuning
325
+ message.set(
326
+ LLM_COMMANDS,
327
+ [command.as_dict() for command in commands],
328
+ add_to_output=True,
329
+ )
330
+ message.set(LLM_PROMPT, prompt, add_to_output=True)
331
+
332
+ @classmethod
333
+ def parse_commands(
334
+ cls, actions: Optional[str], tracker: DialogueStateTracker, flows: FlowsList
335
+ ) -> List[Command]:
336
+ """Parse the actions returned by the llm into intent and entities.
337
+
338
+ Args:
339
+ actions: The actions returned by the llm.
340
+ tracker: The tracker containing the current state of the conversation.
341
+ flows: the list of flows
342
+
343
+ Returns:
344
+ The parsed commands.
345
+ """
346
+ commands = parse_commands_using_command_parsers(actions, flows)
347
+ if not commands:
348
+ structlogger.warning(
349
+ f"{cls.__name__}.parse_commands",
350
+ message="No commands were parsed from the LLM actions.",
351
+ actions=actions,
352
+ )
353
+
354
+ return commands
355
+
356
+ ### Helper methods
357
+ def render_template(
358
+ self,
359
+ message: Message,
360
+ tracker: DialogueStateTracker,
361
+ startable_flows: FlowsList,
362
+ all_flows: FlowsList,
363
+ ) -> str:
364
+ """Render the jinja template to create the prompt for the LLM.
365
+
366
+ Args:
367
+ message: The current message from the user.
368
+ tracker: The tracker containing the current state of the conversation.
369
+ startable_flows: The flows startable at this point in time by the user.
370
+ all_flows: all flows present in the assistant
371
+
372
+ Returns:
373
+ The rendered prompt template.
374
+ """
375
+ # need to make this distinction here because current step of the
376
+ # top_calling_frame would be the call step, but we need the collect step from
377
+ # the called frame. If no call is active calling and called frame are the same.
378
+ top_calling_frame = top_flow_frame(tracker.stack)
379
+ top_called_frame = top_flow_frame(tracker.stack, ignore_call_frames=False)
380
+
381
+ top_flow = top_calling_frame.flow(all_flows) if top_calling_frame else None
382
+ current_step = top_called_frame.step(all_flows) if top_called_frame else None
383
+
384
+ flow_slots = self.prepare_current_flow_slots_for_template(
385
+ top_flow, current_step, tracker
386
+ )
387
+ current_slot, current_slot_description = self.prepare_current_slot_for_template(
388
+ current_step
389
+ )
390
+ current_slot_type = None
391
+ current_slot_allowed_values = None
392
+ if current_slot:
393
+ current_slot_type = (
394
+ slot.type_name
395
+ if (slot := tracker.slots.get(current_slot)) is not None
396
+ else None
397
+ )
398
+ current_slot_allowed_values = allowed_values_for_slot(
399
+ tracker.slots.get(current_slot)
400
+ )
401
+ current_conversation = tracker_as_readable_transcript(tracker)
402
+ latest_user_message = sanitize_message_for_prompt(message.get(TEXT))
403
+ current_conversation += f"\nUSER: {latest_user_message}"
404
+
405
+ inputs = {
406
+ "available_flows": self.prepare_flows_for_template(
407
+ startable_flows, tracker
408
+ ),
409
+ "current_conversation": current_conversation,
410
+ "flow_slots": flow_slots,
411
+ "current_flow": top_flow.id if top_flow is not None else None,
412
+ "current_slot": current_slot,
413
+ "current_slot_description": current_slot_description,
414
+ "current_slot_type": current_slot_type,
415
+ "current_slot_allowed_values": current_slot_allowed_values,
416
+ "user_message": latest_user_message,
417
+ "is_repeat_command_enabled": self.repeat_command_enabled,
418
+ }
419
+
420
+ return self.compile_template(self.prompt_template).render(**inputs)
421
+
422
+ def is_repeat_command_enabled(self) -> bool:
423
+ """Check for feature flag"""
424
+ RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME = "RASA_PRO_BETA_REPEAT_COMMAND"
425
+ try:
426
+ ensure_beta_feature_is_enabled(
427
+ "Repeat Command",
428
+ env_flag=RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME,
429
+ )
430
+ except BetaNotEnabledException:
431
+ return False
432
+
433
+ return True
434
+
435
+ @classmethod
436
+ def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
437
+ """Add a fingerprint for the graph."""
438
+ # Get the default prompt template based on the model name
439
+ llm_config = resolve_model_client_config(
440
+ config.get(LLM_CONFIG_KEY), cls.__name__
441
+ )
442
+ embedding_config = resolve_model_client_config(
443
+ config.get(FLOW_RETRIEVAL_KEY, {}).get(EMBEDDINGS_CONFIG_KEY),
444
+ FlowRetrieval.__name__,
445
+ )
446
+
447
+ # Create a copy of the config to avoid modifying the original config
448
+ # and update the llm config with the resolved llm config.
449
+ _config_copy = copy.deepcopy(config)
450
+ _config_copy[LLM_CONFIG_KEY] = llm_config
451
+ prompt_template = cls._resolve_component_prompt_template(
452
+ _config_copy,
453
+ log_context=LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON,
454
+ log_source_component=cls.__name__,
455
+ )
456
+
457
+ return deep_container_fingerprint(
458
+ [prompt_template, llm_config, embedding_config]
459
+ )
460
+
461
+ @staticmethod
462
+ @abstractmethod
463
+ def get_component_command_syntax_version() -> CommandSyntaxVersion:
464
+ """Get the command syntax version for the command generator."""
465
+ pass
466
+
467
+ @classmethod
468
+ @abstractmethod
469
+ def _resolve_component_prompt_template(
470
+ cls: Any,
471
+ config: Dict[str, Any],
472
+ prompt_template: Optional[str] = None,
473
+ log_context: Optional[Literal["init", "fingerprint_addon"]] = None,
474
+ log_source_component: Optional[str] = "SingleStepBasedLLMCommandGenerator",
475
+ ) -> Optional[str]:
476
+ """Get the prompt template from the config or the default prompt template."""
477
+ pass