rasa-pro 3.12.0.dev12__py3-none-any.whl → 3.12.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/anonymization/anonymization_rule_executor.py +16 -10
- rasa/cli/data.py +16 -0
- rasa/cli/inspect.py +20 -1
- rasa/cli/project_templates/calm/config.yml +2 -2
- rasa/cli/project_templates/calm/endpoints.yml +2 -2
- rasa/cli/shell.py +3 -3
- rasa/cli/utils.py +12 -0
- rasa/core/actions/action.py +99 -193
- rasa/core/actions/action_handle_digressions.py +142 -0
- rasa/core/actions/action_run_slot_rejections.py +16 -4
- rasa/core/actions/forms.py +10 -5
- rasa/core/channels/__init__.py +4 -0
- rasa/core/channels/studio_chat.py +19 -0
- rasa/core/channels/telegram.py +42 -24
- rasa/core/channels/voice_ready/audiocodes.py +42 -23
- rasa/core/channels/voice_ready/utils.py +1 -1
- rasa/core/channels/voice_stream/asr/asr_engine.py +10 -4
- rasa/core/channels/voice_stream/asr/azure.py +14 -1
- rasa/core/channels/voice_stream/asr/deepgram.py +20 -4
- rasa/core/channels/voice_stream/audiocodes.py +264 -0
- rasa/core/channels/voice_stream/browser_audio.py +5 -1
- rasa/core/channels/voice_stream/call_state.py +10 -1
- rasa/core/channels/voice_stream/genesys.py +335 -0
- rasa/core/channels/voice_stream/tts/azure.py +11 -2
- rasa/core/channels/voice_stream/tts/cartesia.py +29 -10
- rasa/core/channels/voice_stream/twilio_media_streams.py +2 -1
- rasa/core/channels/voice_stream/voice_channel.py +25 -3
- rasa/core/constants.py +2 -0
- rasa/core/migrate.py +2 -2
- rasa/core/nlg/contextual_response_rephraser.py +18 -1
- rasa/core/nlg/generator.py +83 -15
- rasa/core/nlg/response.py +6 -3
- rasa/core/nlg/translate.py +55 -0
- rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2 +1 -1
- rasa/core/policies/flows/flow_executor.py +47 -46
- rasa/core/processor.py +72 -9
- rasa/core/run.py +4 -3
- rasa/dialogue_understanding/commands/can_not_handle_command.py +20 -2
- rasa/dialogue_understanding/commands/cancel_flow_command.py +80 -4
- rasa/dialogue_understanding/commands/change_flow_command.py +20 -2
- rasa/dialogue_understanding/commands/chit_chat_answer_command.py +20 -2
- rasa/dialogue_understanding/commands/clarify_command.py +29 -3
- rasa/dialogue_understanding/commands/command.py +1 -16
- rasa/dialogue_understanding/commands/command_syntax_manager.py +55 -0
- rasa/dialogue_understanding/commands/correct_slots_command.py +11 -2
- rasa/dialogue_understanding/commands/handle_digressions_command.py +150 -0
- rasa/dialogue_understanding/commands/human_handoff_command.py +20 -2
- rasa/dialogue_understanding/commands/knowledge_answer_command.py +20 -2
- rasa/dialogue_understanding/commands/prompt_command.py +94 -0
- rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +20 -2
- rasa/dialogue_understanding/commands/set_slot_command.py +29 -15
- rasa/dialogue_understanding/commands/skip_question_command.py +20 -2
- rasa/dialogue_understanding/commands/start_flow_command.py +61 -2
- rasa/dialogue_understanding/commands/utils.py +98 -4
- rasa/dialogue_understanding/constants.py +1 -0
- rasa/dialogue_understanding/generator/__init__.py +2 -0
- rasa/dialogue_understanding/generator/command_generator.py +110 -73
- rasa/dialogue_understanding/generator/command_parser.py +16 -13
- rasa/dialogue_understanding/generator/constants.py +3 -0
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +170 -5
- rasa/dialogue_understanding/generator/llm_command_generator.py +5 -3
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +26 -4
- rasa/dialogue_understanding/generator/nlu_command_adapter.py +44 -3
- rasa/dialogue_understanding/generator/prompt_templates/__init__.py +0 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2 +60 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +77 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +68 -0
- rasa/dialogue_understanding/generator/{single_step/command_prompt_template.jinja2 → prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2} +1 -1
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +460 -0
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +12 -318
- rasa/dialogue_understanding/generator/utils.py +32 -1
- rasa/dialogue_understanding/patterns/collect_information.py +1 -1
- rasa/dialogue_understanding/patterns/correction.py +13 -1
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +78 -2
- rasa/dialogue_understanding/patterns/handle_digressions.py +81 -0
- rasa/dialogue_understanding/patterns/validate_slot.py +65 -0
- rasa/dialogue_understanding/processor/command_processor.py +154 -28
- rasa/dialogue_understanding/utils.py +31 -0
- rasa/dialogue_understanding_test/README.md +50 -0
- rasa/dialogue_understanding_test/du_test_case.py +28 -8
- rasa/dialogue_understanding_test/du_test_result.py +13 -9
- rasa/dialogue_understanding_test/io.py +14 -0
- rasa/dialogue_understanding_test/test_case_simulation/test_case_tracker_simulator.py +3 -3
- rasa/e2e_test/utils/io.py +0 -37
- rasa/engine/graph.py +1 -0
- rasa/engine/language.py +140 -0
- rasa/engine/recipes/config_files/default_config.yml +4 -0
- rasa/engine/recipes/default_recipe.py +2 -0
- rasa/engine/recipes/graph_recipe.py +2 -0
- rasa/engine/storage/local_model_storage.py +1 -0
- rasa/engine/storage/storage.py +4 -1
- rasa/model_manager/runner_service.py +7 -4
- rasa/model_manager/socket_bridge.py +7 -6
- rasa/model_manager/warm_rasa_process.py +0 -1
- rasa/model_training.py +24 -27
- rasa/shared/constants.py +15 -13
- rasa/shared/core/constants.py +30 -3
- rasa/shared/core/domain.py +13 -20
- rasa/shared/core/events.py +13 -2
- rasa/shared/core/flows/constants.py +11 -0
- rasa/shared/core/flows/flow.py +100 -19
- rasa/shared/core/flows/flows_yaml_schema.json +69 -3
- rasa/shared/core/flows/steps/collect.py +19 -37
- rasa/shared/core/flows/utils.py +43 -4
- rasa/shared/core/flows/validation.py +1 -1
- rasa/shared/core/slot_mappings.py +350 -111
- rasa/shared/core/slots.py +154 -3
- rasa/shared/core/trackers.py +77 -2
- rasa/shared/importers/importer.py +50 -2
- rasa/shared/nlu/constants.py +1 -0
- rasa/shared/nlu/training_data/schemas/responses.yml +19 -12
- rasa/shared/providers/_configs/azure_entra_id_config.py +541 -0
- rasa/shared/providers/_configs/azure_openai_client_config.py +138 -3
- rasa/shared/providers/_configs/client_config.py +3 -1
- rasa/shared/providers/_configs/default_litellm_client_config.py +3 -1
- rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py +3 -1
- rasa/shared/providers/_configs/litellm_router_client_config.py +3 -1
- rasa/shared/providers/_configs/model_group_config.py +4 -2
- rasa/shared/providers/_configs/oauth_config.py +33 -0
- rasa/shared/providers/_configs/openai_client_config.py +3 -1
- rasa/shared/providers/_configs/rasa_llm_client_config.py +3 -1
- rasa/shared/providers/_configs/self_hosted_llm_client_config.py +3 -1
- rasa/shared/providers/constants.py +6 -0
- rasa/shared/providers/embedding/azure_openai_embedding_client.py +28 -3
- rasa/shared/providers/embedding/litellm_router_embedding_client.py +3 -1
- rasa/shared/providers/llm/_base_litellm_client.py +42 -17
- rasa/shared/providers/llm/azure_openai_llm_client.py +81 -25
- rasa/shared/providers/llm/default_litellm_llm_client.py +3 -1
- rasa/shared/providers/llm/litellm_router_llm_client.py +29 -8
- rasa/shared/providers/llm/llm_client.py +23 -7
- rasa/shared/providers/llm/openai_llm_client.py +9 -3
- rasa/shared/providers/llm/rasa_llm_client.py +11 -2
- rasa/shared/providers/llm/self_hosted_llm_client.py +30 -11
- rasa/shared/providers/router/_base_litellm_router_client.py +3 -1
- rasa/shared/providers/router/router_client.py +3 -1
- rasa/shared/utils/constants.py +3 -0
- rasa/shared/utils/llm.py +31 -8
- rasa/shared/utils/pykwalify_extensions.py +24 -0
- rasa/shared/utils/schemas/domain.yml +26 -1
- rasa/telemetry.py +45 -14
- rasa/tracing/config.py +2 -0
- rasa/tracing/constants.py +12 -0
- rasa/tracing/instrumentation/instrumentation.py +36 -0
- rasa/tracing/instrumentation/metrics.py +41 -0
- rasa/tracing/metric_instrument_provider.py +40 -0
- rasa/utils/common.py +0 -1
- rasa/validator.py +561 -89
- rasa/version.py +1 -1
- {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/METADATA +2 -1
- {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/RECORD +153 -134
- {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.0.dev12.dist-info → rasa_pro-3.12.0rc1.dist-info}/entry_points.txt +0 -0
|
@@ -1,34 +1,21 @@
|
|
|
1
1
|
import importlib.resources
|
|
2
|
-
from typing import Any, Dict,
|
|
2
|
+
from typing import Any, Dict, Optional, Text
|
|
3
3
|
|
|
4
4
|
import structlog
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
Command,
|
|
10
|
-
ErrorCommand,
|
|
11
|
-
SetSlotCommand,
|
|
12
|
-
)
|
|
13
|
-
from rasa.dialogue_understanding.generator.command_parser import (
|
|
14
|
-
parse_commands as parse_commands_using_command_parsers,
|
|
6
|
+
from rasa.dialogue_understanding.commands.command_syntax_manager import (
|
|
7
|
+
CommandSyntaxManager,
|
|
8
|
+
CommandSyntaxVersion,
|
|
15
9
|
)
|
|
16
10
|
from rasa.dialogue_understanding.generator.constants import (
|
|
17
|
-
DEFAULT_LLM_CONFIG,
|
|
18
11
|
FLOW_RETRIEVAL_KEY,
|
|
19
12
|
LLM_CONFIG_KEY,
|
|
20
13
|
USER_INPUT_CONFIG_KEY,
|
|
21
14
|
)
|
|
22
15
|
from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
|
|
23
|
-
from rasa.dialogue_understanding.generator.
|
|
24
|
-
|
|
25
|
-
)
|
|
26
|
-
from rasa.dialogue_understanding.stack.utils import top_flow_frame
|
|
27
|
-
from rasa.dialogue_understanding.utils import (
|
|
28
|
-
add_commands_to_message_parse_data,
|
|
29
|
-
add_prompt_to_message_parse_data,
|
|
16
|
+
from rasa.dialogue_understanding.generator.single_step.compact_llm_command_generator import ( # noqa: E501
|
|
17
|
+
CompactLLMCommandGenerator,
|
|
30
18
|
)
|
|
31
|
-
from rasa.engine.graph import ExecutionContext
|
|
32
19
|
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
33
20
|
from rasa.engine.storage.resource import Resource
|
|
34
21
|
from rasa.engine.storage.storage import ModelStorage
|
|
@@ -36,31 +23,15 @@ from rasa.shared.constants import (
|
|
|
36
23
|
EMBEDDINGS_CONFIG_KEY,
|
|
37
24
|
PROMPT_CONFIG_KEY,
|
|
38
25
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
39
|
-
ROUTE_TO_CALM_SLOT,
|
|
40
26
|
)
|
|
41
|
-
from rasa.shared.core.flows import FlowsList
|
|
42
|
-
from rasa.shared.core.trackers import DialogueStateTracker
|
|
43
|
-
from rasa.shared.exceptions import ProviderClientAPIException
|
|
44
|
-
from rasa.shared.nlu.constants import LLM_COMMANDS, LLM_PROMPT, TEXT
|
|
45
|
-
from rasa.shared.nlu.training_data.message import Message
|
|
46
|
-
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
47
27
|
from rasa.shared.utils.io import deep_container_fingerprint
|
|
48
|
-
from rasa.shared.utils.llm import
|
|
49
|
-
get_prompt_template,
|
|
50
|
-
resolve_model_client_config,
|
|
51
|
-
sanitize_message_for_prompt,
|
|
52
|
-
tracker_as_readable_transcript,
|
|
53
|
-
)
|
|
54
|
-
from rasa.utils.beta import BetaNotEnabledException, ensure_beta_feature_is_enabled
|
|
55
|
-
from rasa.utils.log_utils import log_llm
|
|
56
|
-
|
|
57
|
-
COMMAND_PROMPT_FILE_NAME = "command_prompt.jinja2"
|
|
28
|
+
from rasa.shared.utils.llm import get_prompt_template, resolve_model_client_config
|
|
58
29
|
|
|
59
30
|
DEFAULT_COMMAND_PROMPT_TEMPLATE = importlib.resources.read_text(
|
|
60
|
-
"rasa.dialogue_understanding.generator.
|
|
31
|
+
"rasa.dialogue_understanding.generator.prompt_templates",
|
|
61
32
|
"command_prompt_template.jinja2",
|
|
62
33
|
)
|
|
63
|
-
|
|
34
|
+
|
|
64
35
|
|
|
65
36
|
structlogger = structlog.get_logger()
|
|
66
37
|
|
|
@@ -71,7 +42,7 @@ structlogger = structlog.get_logger()
|
|
|
71
42
|
],
|
|
72
43
|
is_trainable=True,
|
|
73
44
|
)
|
|
74
|
-
class SingleStepLLMCommandGenerator(
|
|
45
|
+
class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
75
46
|
"""A single step LLM-based command generator."""
|
|
76
47
|
|
|
77
48
|
def __init__(
|
|
@@ -110,10 +81,9 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
110
81
|
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
111
82
|
)
|
|
112
83
|
|
|
113
|
-
|
|
114
|
-
|
|
84
|
+
# Set the command syntax version to v1
|
|
85
|
+
CommandSyntaxManager.set_syntax_version(CommandSyntaxVersion.v1)
|
|
115
86
|
|
|
116
|
-
### Implementations of LLMBasedCommandGenerator parent
|
|
117
87
|
@staticmethod
|
|
118
88
|
def get_default_config() -> Dict[str, Any]:
|
|
119
89
|
"""The component's default config (see parent class for full docstring)."""
|
|
@@ -125,216 +95,6 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
125
95
|
FLOW_RETRIEVAL_KEY: FlowRetrieval.get_default_config(),
|
|
126
96
|
}
|
|
127
97
|
|
|
128
|
-
@classmethod
|
|
129
|
-
def load(
|
|
130
|
-
cls: Any,
|
|
131
|
-
config: Dict[str, Any],
|
|
132
|
-
model_storage: ModelStorage,
|
|
133
|
-
resource: Resource,
|
|
134
|
-
execution_context: ExecutionContext,
|
|
135
|
-
**kwargs: Any,
|
|
136
|
-
) -> "SingleStepLLMCommandGenerator":
|
|
137
|
-
"""Loads trained component (see parent class for full docstring)."""
|
|
138
|
-
# Perform health check of the LLM API endpoint
|
|
139
|
-
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
140
|
-
cls.perform_llm_health_check(
|
|
141
|
-
llm_config,
|
|
142
|
-
DEFAULT_LLM_CONFIG,
|
|
143
|
-
"single_step_llm_command_generator.load",
|
|
144
|
-
SingleStepLLMCommandGenerator.__name__,
|
|
145
|
-
)
|
|
146
|
-
|
|
147
|
-
# load prompt template from the model storage.
|
|
148
|
-
prompt_template = cls.load_prompt_template_from_model_storage(
|
|
149
|
-
model_storage, resource, COMMAND_PROMPT_FILE_NAME
|
|
150
|
-
)
|
|
151
|
-
|
|
152
|
-
# init base command generator
|
|
153
|
-
command_generator = cls(config, model_storage, resource, prompt_template)
|
|
154
|
-
# load flow retrieval if enabled
|
|
155
|
-
if command_generator.enabled_flow_retrieval:
|
|
156
|
-
command_generator.flow_retrieval = cls.load_flow_retrival(
|
|
157
|
-
command_generator.config, model_storage, resource
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
return command_generator
|
|
161
|
-
|
|
162
|
-
def persist(self) -> None:
|
|
163
|
-
"""Persist this component to disk for future loading."""
|
|
164
|
-
self._persist_prompt_template()
|
|
165
|
-
self._persist_config()
|
|
166
|
-
if self.flow_retrieval is not None:
|
|
167
|
-
self.flow_retrieval.persist()
|
|
168
|
-
|
|
169
|
-
def _persist_prompt_template(self) -> None:
|
|
170
|
-
"""Persist prompt template for future loading."""
|
|
171
|
-
with self._model_storage.write_to(self._resource) as path:
|
|
172
|
-
rasa.shared.utils.io.write_text_file(
|
|
173
|
-
self.prompt_template, path / COMMAND_PROMPT_FILE_NAME
|
|
174
|
-
)
|
|
175
|
-
|
|
176
|
-
def _persist_config(self) -> None:
|
|
177
|
-
"""Persist config as a source of truth for resolved clients."""
|
|
178
|
-
with self._model_storage.write_to(self._resource) as path:
|
|
179
|
-
rasa.shared.utils.io.dump_obj_as_json_to_file(
|
|
180
|
-
path / SINGLE_STEP_LLM_COMMAND_GENERATOR_CONFIG_FILE, self.config
|
|
181
|
-
)
|
|
182
|
-
|
|
183
|
-
async def predict_commands(
|
|
184
|
-
self,
|
|
185
|
-
message: Message,
|
|
186
|
-
flows: FlowsList,
|
|
187
|
-
tracker: Optional[DialogueStateTracker] = None,
|
|
188
|
-
**kwargs: Any,
|
|
189
|
-
) -> List[Command]:
|
|
190
|
-
"""Predict commands using the LLM.
|
|
191
|
-
|
|
192
|
-
Args:
|
|
193
|
-
message: The message from the user.
|
|
194
|
-
flows: The flows available to the user.
|
|
195
|
-
tracker: The tracker containing the current state of the conversation.
|
|
196
|
-
**kwargs: Keyword arguments for forward compatibility.
|
|
197
|
-
|
|
198
|
-
Returns:
|
|
199
|
-
The commands generated by the llm.
|
|
200
|
-
"""
|
|
201
|
-
if tracker is None or flows.is_empty():
|
|
202
|
-
# cannot do anything if there are no flows or no tracker
|
|
203
|
-
return []
|
|
204
|
-
|
|
205
|
-
try:
|
|
206
|
-
commands = await self._predict_commands(message, flows, tracker)
|
|
207
|
-
except ProviderClientAPIException:
|
|
208
|
-
# if command predictions resulted in API exception
|
|
209
|
-
# "predict" the ErrorCommand
|
|
210
|
-
commands = [ErrorCommand()]
|
|
211
|
-
|
|
212
|
-
if not commands:
|
|
213
|
-
# no commands are parsed or there's an invalid command
|
|
214
|
-
structlogger.warning(
|
|
215
|
-
"single_step_llm_command_generator.predict_commands",
|
|
216
|
-
message="No commands were predicted as the LLM response could "
|
|
217
|
-
"not be parsed or the LLM responded with an invalid command."
|
|
218
|
-
"Returning a CannotHandleCommand instead.",
|
|
219
|
-
)
|
|
220
|
-
commands = [CannotHandleCommand()]
|
|
221
|
-
|
|
222
|
-
if tracker.has_coexistence_routing_slot:
|
|
223
|
-
# if coexistence feature is used, set the routing slot
|
|
224
|
-
commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
|
|
225
|
-
|
|
226
|
-
log_llm(
|
|
227
|
-
logger=structlogger,
|
|
228
|
-
log_module="SingleStepLLMCommandGenerator",
|
|
229
|
-
log_event="llm_command_generator.predict_commands.finished",
|
|
230
|
-
commands=commands,
|
|
231
|
-
)
|
|
232
|
-
|
|
233
|
-
return commands
|
|
234
|
-
|
|
235
|
-
async def _predict_commands(
|
|
236
|
-
self,
|
|
237
|
-
message: Message,
|
|
238
|
-
flows: FlowsList,
|
|
239
|
-
tracker: Optional[DialogueStateTracker] = None,
|
|
240
|
-
) -> List[Command]:
|
|
241
|
-
"""Predict commands using the LLM.
|
|
242
|
-
|
|
243
|
-
Args:
|
|
244
|
-
message: The message from the user.
|
|
245
|
-
flows: The flows available to the user.
|
|
246
|
-
tracker: The tracker containing the current state of the conversation.
|
|
247
|
-
|
|
248
|
-
Returns:
|
|
249
|
-
The commands generated by the llm.
|
|
250
|
-
|
|
251
|
-
Raises:
|
|
252
|
-
ProviderClientAPIException: If API calls raised an error.
|
|
253
|
-
"""
|
|
254
|
-
# retrieve flows
|
|
255
|
-
filtered_flows = await self.filter_flows(message, flows, tracker)
|
|
256
|
-
|
|
257
|
-
flow_prompt = self.render_template(message, tracker, filtered_flows, flows)
|
|
258
|
-
log_llm(
|
|
259
|
-
logger=structlogger,
|
|
260
|
-
log_module="SingleStepLLMCommandGenerator",
|
|
261
|
-
log_event="llm_command_generator.predict_commands.prompt_rendered",
|
|
262
|
-
prompt=flow_prompt,
|
|
263
|
-
)
|
|
264
|
-
|
|
265
|
-
response = await self.invoke_llm(flow_prompt)
|
|
266
|
-
llm_response = LLMResponse.ensure_llm_response(response)
|
|
267
|
-
# The check for 'None' maintains compatibility with older versions
|
|
268
|
-
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
|
269
|
-
# might return 'None' to indicate a failure to generate actions.
|
|
270
|
-
if llm_response is None or not llm_response.choices:
|
|
271
|
-
return [ErrorCommand()]
|
|
272
|
-
|
|
273
|
-
action_list = llm_response.choices[0]
|
|
274
|
-
|
|
275
|
-
log_llm(
|
|
276
|
-
logger=structlogger,
|
|
277
|
-
log_module="SingleStepLLMCommandGenerator",
|
|
278
|
-
log_event="llm_command_generator.predict_commands.actions_generated",
|
|
279
|
-
action_list=action_list,
|
|
280
|
-
)
|
|
281
|
-
|
|
282
|
-
commands = self.parse_commands(action_list, tracker, flows)
|
|
283
|
-
|
|
284
|
-
self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
|
|
285
|
-
add_commands_to_message_parse_data(
|
|
286
|
-
message, SingleStepLLMCommandGenerator.__name__, commands
|
|
287
|
-
)
|
|
288
|
-
add_prompt_to_message_parse_data(
|
|
289
|
-
message=message,
|
|
290
|
-
component_name=SingleStepLLMCommandGenerator.__name__,
|
|
291
|
-
prompt_name="command_generator_prompt",
|
|
292
|
-
user_prompt=flow_prompt,
|
|
293
|
-
llm_response=llm_response,
|
|
294
|
-
)
|
|
295
|
-
|
|
296
|
-
return commands
|
|
297
|
-
|
|
298
|
-
@staticmethod
|
|
299
|
-
def _update_message_parse_data_for_fine_tuning(
|
|
300
|
-
message: Message, commands: List[Command], prompt: str
|
|
301
|
-
) -> None:
|
|
302
|
-
from rasa.llm_fine_tuning.annotation_module import preparing_fine_tuning_data
|
|
303
|
-
|
|
304
|
-
if preparing_fine_tuning_data:
|
|
305
|
-
# Add commands and prompt to the message object in order to create
|
|
306
|
-
# prompt -> commands pairs for fine-tuning
|
|
307
|
-
message.set(
|
|
308
|
-
LLM_COMMANDS,
|
|
309
|
-
[command.as_dict() for command in commands],
|
|
310
|
-
add_to_output=True,
|
|
311
|
-
)
|
|
312
|
-
message.set(LLM_PROMPT, prompt, add_to_output=True)
|
|
313
|
-
|
|
314
|
-
@classmethod
|
|
315
|
-
def parse_commands(
|
|
316
|
-
cls, actions: Optional[str], tracker: DialogueStateTracker, flows: FlowsList
|
|
317
|
-
) -> List[Command]:
|
|
318
|
-
"""Parse the actions returned by the llm into intent and entities.
|
|
319
|
-
|
|
320
|
-
Args:
|
|
321
|
-
actions: The actions returned by the llm.
|
|
322
|
-
tracker: The tracker containing the current state of the conversation.
|
|
323
|
-
flows: the list of flows
|
|
324
|
-
|
|
325
|
-
Returns:
|
|
326
|
-
The parsed commands.
|
|
327
|
-
"""
|
|
328
|
-
commands = parse_commands_using_command_parsers(actions, flows)
|
|
329
|
-
if not commands:
|
|
330
|
-
structlogger.debug(
|
|
331
|
-
"single_step_llm_command_generator.parse_commands",
|
|
332
|
-
message="No commands were parsed from the LLM actions.",
|
|
333
|
-
actions=actions,
|
|
334
|
-
)
|
|
335
|
-
|
|
336
|
-
return commands
|
|
337
|
-
|
|
338
98
|
@classmethod
|
|
339
99
|
def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
|
|
340
100
|
"""Add a fingerprint for the graph."""
|
|
@@ -357,69 +117,3 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
357
117
|
return deep_container_fingerprint(
|
|
358
118
|
[prompt_template, llm_config, embedding_config]
|
|
359
119
|
)
|
|
360
|
-
|
|
361
|
-
### Helper methods
|
|
362
|
-
def render_template(
|
|
363
|
-
self,
|
|
364
|
-
message: Message,
|
|
365
|
-
tracker: DialogueStateTracker,
|
|
366
|
-
startable_flows: FlowsList,
|
|
367
|
-
all_flows: FlowsList,
|
|
368
|
-
) -> str:
|
|
369
|
-
"""Render the jinja template to create the prompt for the LLM.
|
|
370
|
-
|
|
371
|
-
Args:
|
|
372
|
-
message: The current message from the user.
|
|
373
|
-
tracker: The tracker containing the current state of the conversation.
|
|
374
|
-
startable_flows: The flows startable at this point in time by the user.
|
|
375
|
-
all_flows: all flows present in the assistant
|
|
376
|
-
|
|
377
|
-
Returns:
|
|
378
|
-
The rendered prompt template.
|
|
379
|
-
"""
|
|
380
|
-
# need to make this distinction here because current step of the
|
|
381
|
-
# top_calling_frame would be the call step, but we need the collect step from
|
|
382
|
-
# the called frame. If no call is active calling and called frame are the same.
|
|
383
|
-
top_calling_frame = top_flow_frame(tracker.stack)
|
|
384
|
-
top_called_frame = top_flow_frame(tracker.stack, ignore_call_frames=False)
|
|
385
|
-
|
|
386
|
-
top_flow = top_calling_frame.flow(all_flows) if top_calling_frame else None
|
|
387
|
-
current_step = top_called_frame.step(all_flows) if top_called_frame else None
|
|
388
|
-
|
|
389
|
-
flow_slots = self.prepare_current_flow_slots_for_template(
|
|
390
|
-
top_flow, current_step, tracker
|
|
391
|
-
)
|
|
392
|
-
current_slot, current_slot_description = self.prepare_current_slot_for_template(
|
|
393
|
-
current_step
|
|
394
|
-
)
|
|
395
|
-
current_conversation = tracker_as_readable_transcript(tracker)
|
|
396
|
-
latest_user_message = sanitize_message_for_prompt(message.get(TEXT))
|
|
397
|
-
current_conversation += f"\nUSER: {latest_user_message}"
|
|
398
|
-
|
|
399
|
-
inputs = {
|
|
400
|
-
"available_flows": self.prepare_flows_for_template(
|
|
401
|
-
startable_flows, tracker
|
|
402
|
-
),
|
|
403
|
-
"current_conversation": current_conversation,
|
|
404
|
-
"flow_slots": flow_slots,
|
|
405
|
-
"current_flow": top_flow.id if top_flow is not None else None,
|
|
406
|
-
"current_slot": current_slot,
|
|
407
|
-
"current_slot_description": current_slot_description,
|
|
408
|
-
"user_message": latest_user_message,
|
|
409
|
-
"is_repeat_command_enabled": self.repeat_command_enabled,
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
return self.compile_template(self.prompt_template).render(**inputs)
|
|
413
|
-
|
|
414
|
-
def is_repeat_command_enabled(self) -> bool:
|
|
415
|
-
"""Check for feature flag"""
|
|
416
|
-
RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME = "RASA_PRO_BETA_REPEAT_COMMAND"
|
|
417
|
-
try:
|
|
418
|
-
ensure_beta_feature_is_enabled(
|
|
419
|
-
"Repeat Command",
|
|
420
|
-
env_flag=RASA_PRO_BETA_REPEAT_COMMAND_ENV_VAR_NAME,
|
|
421
|
-
)
|
|
422
|
-
except BetaNotEnabledException:
|
|
423
|
-
return False
|
|
424
|
-
|
|
425
|
-
return True
|
|
@@ -1,14 +1,16 @@
|
|
|
1
|
-
from typing import Dict, Type
|
|
1
|
+
from typing import Dict, List, Set, Type
|
|
2
2
|
|
|
3
3
|
from rasa.dialogue_understanding.commands import (
|
|
4
4
|
CancelFlowCommand,
|
|
5
5
|
CannotHandleCommand,
|
|
6
6
|
ChitChatAnswerCommand,
|
|
7
7
|
Command,
|
|
8
|
+
CorrectSlotsCommand,
|
|
8
9
|
HumanHandoffCommand,
|
|
9
10
|
KnowledgeAnswerCommand,
|
|
10
11
|
RestartCommand,
|
|
11
12
|
SessionStartCommand,
|
|
13
|
+
SetSlotCommand,
|
|
12
14
|
SkipQuestionCommand,
|
|
13
15
|
)
|
|
14
16
|
from rasa.dialogue_understanding.commands.user_silence_command import UserSilenceCommand
|
|
@@ -43,3 +45,32 @@ triggerable_pattern_to_command_class: Dict[str, Type[Command]] = {
|
|
|
43
45
|
CannotHandlePatternFlowStackFrame.flow_id: CannotHandleCommand,
|
|
44
46
|
RestartPatternFlowStackFrame.flow_id: RestartCommand,
|
|
45
47
|
}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def filter_slot_commands(
|
|
51
|
+
commands: List[Command], overlapping_slot_names: Set[str]
|
|
52
|
+
) -> List[Command]:
|
|
53
|
+
"""Filter out slot commands that set overlapping slots."""
|
|
54
|
+
filtered_commands = []
|
|
55
|
+
|
|
56
|
+
for command in commands:
|
|
57
|
+
if (
|
|
58
|
+
isinstance(command, SetSlotCommand)
|
|
59
|
+
and command.name in overlapping_slot_names
|
|
60
|
+
):
|
|
61
|
+
continue
|
|
62
|
+
|
|
63
|
+
if isinstance(command, CorrectSlotsCommand):
|
|
64
|
+
allowed_slots = [
|
|
65
|
+
slot
|
|
66
|
+
for slot in command.corrected_slots
|
|
67
|
+
if slot.name not in overlapping_slot_names
|
|
68
|
+
]
|
|
69
|
+
if not allowed_slots:
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
command.corrected_slots = allowed_slots
|
|
73
|
+
|
|
74
|
+
filtered_commands.append(command)
|
|
75
|
+
|
|
76
|
+
return filtered_commands
|
|
@@ -8,7 +8,7 @@ from rasa.dialogue_understanding.stack.frames import (
|
|
|
8
8
|
PatternFlowStackFrame,
|
|
9
9
|
)
|
|
10
10
|
from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX
|
|
11
|
-
from rasa.shared.core.
|
|
11
|
+
from rasa.shared.core.slots import SlotRejection
|
|
12
12
|
|
|
13
13
|
FLOW_PATTERN_COLLECT_INFORMATION = (
|
|
14
14
|
RASA_DEFAULT_FLOW_PATTERN_PREFIX + "collect_information"
|
|
@@ -54,6 +54,8 @@ class CorrectionPatternFlowStackFrame(PatternFlowStackFrame):
|
|
|
54
54
|
"""The ID of the flow to reset to."""
|
|
55
55
|
reset_step_id: Optional[str] = None
|
|
56
56
|
"""The ID of the step to reset to."""
|
|
57
|
+
new_slot_values: List[Any] = field(default_factory=list)
|
|
58
|
+
"""The new values for the corrected slots."""
|
|
57
59
|
|
|
58
60
|
@classmethod
|
|
59
61
|
def type(cls) -> str:
|
|
@@ -70,6 +72,10 @@ class CorrectionPatternFlowStackFrame(PatternFlowStackFrame):
|
|
|
70
72
|
Returns:
|
|
71
73
|
The created `DialogueStackFrame`.
|
|
72
74
|
"""
|
|
75
|
+
new_slot_values = [
|
|
76
|
+
val.get("value") for _, val in data["corrected_slots"].items()
|
|
77
|
+
]
|
|
78
|
+
|
|
73
79
|
return CorrectionPatternFlowStackFrame(
|
|
74
80
|
frame_id=data["frame_id"],
|
|
75
81
|
step_id=data["step_id"],
|
|
@@ -77,6 +83,7 @@ class CorrectionPatternFlowStackFrame(PatternFlowStackFrame):
|
|
|
77
83
|
corrected_slots=data["corrected_slots"],
|
|
78
84
|
reset_flow_id=data["reset_flow_id"],
|
|
79
85
|
reset_step_id=data["reset_step_id"],
|
|
86
|
+
new_slot_values=new_slot_values,
|
|
80
87
|
)
|
|
81
88
|
|
|
82
89
|
|
|
@@ -118,7 +125,12 @@ class ActionCorrectFlowSlot(action.Action):
|
|
|
118
125
|
)
|
|
119
126
|
events.extend(tracker.create_stack_updated_events(updated_stack))
|
|
120
127
|
|
|
121
|
-
events.extend(
|
|
128
|
+
events.extend(
|
|
129
|
+
[
|
|
130
|
+
SlotSet(name, value=val.get("value"), filled_by=val.get("filled_by"))
|
|
131
|
+
for name, val in top.corrected_slots.items()
|
|
132
|
+
]
|
|
133
|
+
)
|
|
122
134
|
return events
|
|
123
135
|
|
|
124
136
|
|
|
@@ -1,6 +1,17 @@
|
|
|
1
1
|
version: "3.1"
|
|
2
2
|
responses:
|
|
3
3
|
|
|
4
|
+
utter_ask_continue_previous_flow:
|
|
5
|
+
- text: "Confirm if you would like to continue with the initial topic: {{context.interrupted_flow_id}}?"
|
|
6
|
+
metadata:
|
|
7
|
+
rephrase: True
|
|
8
|
+
template: jinja
|
|
9
|
+
buttons:
|
|
10
|
+
- title: Continue with the previous topic.
|
|
11
|
+
payload: /SetSlots(continue_previous_flow=True)
|
|
12
|
+
- title: Switch to new topic.
|
|
13
|
+
payload: /SetSlots(continue_previous_flow=False)
|
|
14
|
+
|
|
4
15
|
utter_ask_rephrase:
|
|
5
16
|
- text: I’m sorry I am unable to understand you, could you please rephrase?
|
|
6
17
|
|
|
@@ -9,6 +20,20 @@ responses:
|
|
|
9
20
|
metadata:
|
|
10
21
|
rephrase: True
|
|
11
22
|
|
|
23
|
+
utter_block_digressions:
|
|
24
|
+
- text: "We can look into {{ context.interrupting_flow_id }} later. Let's focus on the current topic: {{ context.interrupted_flow_id }}."
|
|
25
|
+
metadata:
|
|
26
|
+
rephrase: True
|
|
27
|
+
template: jinja
|
|
28
|
+
- text: "Let's continue with the current topic: {{ context.interrupted_flow_id }}."
|
|
29
|
+
condition:
|
|
30
|
+
- type: slot
|
|
31
|
+
name: continue_previous_flow
|
|
32
|
+
value: True
|
|
33
|
+
metadata:
|
|
34
|
+
rephrase: True
|
|
35
|
+
template: jinja
|
|
36
|
+
|
|
12
37
|
utter_boolean_slot_rejection:
|
|
13
38
|
- text: "Sorry, the value you provided, `{{value}}`, is not valid. Please respond with a valid value."
|
|
14
39
|
metadata:
|
|
@@ -35,8 +60,14 @@ responses:
|
|
|
35
60
|
rephrase: True
|
|
36
61
|
template: jinja
|
|
37
62
|
|
|
63
|
+
utter_continue_interruption:
|
|
64
|
+
- text: "Let's continue with the chosen topic instead: {{ context.interrupting_flow_id }}."
|
|
65
|
+
metadata:
|
|
66
|
+
rephrase: True
|
|
67
|
+
template: jinja
|
|
68
|
+
|
|
38
69
|
utter_corrected_previous_input:
|
|
39
|
-
- text: "Ok, I am updating {{ context.corrected_slots.keys()|join(', ') }} to {{ context.
|
|
70
|
+
- text: "Ok, I am updating {{ context.corrected_slots.keys()|join(', ') }} to {{ context.new_slot_values | join(', ') }} respectively."
|
|
40
71
|
metadata:
|
|
41
72
|
rephrase: True
|
|
42
73
|
template: jinja
|
|
@@ -119,7 +150,10 @@ slots:
|
|
|
119
150
|
type: float
|
|
120
151
|
initial_value: 0.0
|
|
121
152
|
max_value: 1000000
|
|
122
|
-
|
|
153
|
+
continue_previous_flow:
|
|
154
|
+
type: bool
|
|
155
|
+
mappings:
|
|
156
|
+
- type: from_llm
|
|
123
157
|
|
|
124
158
|
flows:
|
|
125
159
|
pattern_cancel_flow:
|
|
@@ -163,6 +197,7 @@ flows:
|
|
|
163
197
|
steps:
|
|
164
198
|
- action: action_clarify_flows
|
|
165
199
|
- action: utter_clarification_options_rasa
|
|
200
|
+
- action: action_listen
|
|
166
201
|
|
|
167
202
|
pattern_code_change:
|
|
168
203
|
description: Conversation repair flow for cleaning the stack after an assistant update
|
|
@@ -212,6 +247,31 @@ flows:
|
|
|
212
247
|
next: END
|
|
213
248
|
- else: END
|
|
214
249
|
|
|
250
|
+
pattern_handle_digressions:
|
|
251
|
+
description: Conversation repair flow for handling digressions
|
|
252
|
+
name: pattern handle digressions
|
|
253
|
+
steps:
|
|
254
|
+
- noop: true
|
|
255
|
+
id: branching
|
|
256
|
+
next:
|
|
257
|
+
- if: context.ask_confirm_digressions contains context.interrupting_flow_id
|
|
258
|
+
then: continue_previous_flow
|
|
259
|
+
- if: context.block_digressions contains context.interrupting_flow_id
|
|
260
|
+
then: block_digression
|
|
261
|
+
- else: continue_digression
|
|
262
|
+
- id: continue_previous_flow
|
|
263
|
+
collect: continue_previous_flow
|
|
264
|
+
next:
|
|
265
|
+
- if: slots.continue_previous_flow
|
|
266
|
+
then: block_digression
|
|
267
|
+
- else: continue_digression
|
|
268
|
+
- id: block_digression
|
|
269
|
+
action: action_block_digression
|
|
270
|
+
next: END
|
|
271
|
+
- id: continue_digression
|
|
272
|
+
action: action_continue_digression
|
|
273
|
+
next: END
|
|
274
|
+
|
|
215
275
|
pattern_human_handoff:
|
|
216
276
|
description: Conversation repair flow for switching users to a human agent if their request can't be handled
|
|
217
277
|
name: pattern human handoff
|
|
@@ -299,3 +359,19 @@ flows:
|
|
|
299
359
|
- action: action_hangup
|
|
300
360
|
next: END
|
|
301
361
|
- else: END
|
|
362
|
+
|
|
363
|
+
pattern_validate_slot:
|
|
364
|
+
description: Flow for running validations on slots
|
|
365
|
+
name: pattern validate slot
|
|
366
|
+
steps:
|
|
367
|
+
- id: start
|
|
368
|
+
action: action_run_slot_rejections
|
|
369
|
+
next:
|
|
370
|
+
- if: "slots.{{context.validate}} is not null"
|
|
371
|
+
then: END
|
|
372
|
+
- else: ask_refill
|
|
373
|
+
- id: ask_refill
|
|
374
|
+
action: "{{context.refill_utter}}"
|
|
375
|
+
- action: "{{context.refill_action}}"
|
|
376
|
+
- action: action_listen
|
|
377
|
+
next: start
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any, Dict, Set
|
|
5
|
+
|
|
6
|
+
from rasa.dialogue_understanding.stack.frames import PatternFlowStackFrame
|
|
7
|
+
from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX
|
|
8
|
+
from rasa.shared.core.constants import (
|
|
9
|
+
KEY_ASK_CONFIRM_DIGRESSIONS,
|
|
10
|
+
KEY_BLOCK_DIGRESSIONS,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
FLOW_PATTERN_HANDLE_DIGRESSIONS = (
|
|
14
|
+
RASA_DEFAULT_FLOW_PATTERN_PREFIX + "handle_digressions"
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class HandleDigressionsPatternFlowStackFrame(PatternFlowStackFrame):
|
|
20
|
+
"""A pattern flow stack frame that gets added if an interruption is completed."""
|
|
21
|
+
|
|
22
|
+
flow_id: str = FLOW_PATTERN_HANDLE_DIGRESSIONS
|
|
23
|
+
"""The ID of the flow."""
|
|
24
|
+
interrupting_flow_id: str = ""
|
|
25
|
+
"""The ID of the flow that interrupted the active flow."""
|
|
26
|
+
interrupted_flow_id: str = ""
|
|
27
|
+
"""The name of the active flow that was interrupted."""
|
|
28
|
+
interrupted_step_id: str = ""
|
|
29
|
+
"""The ID of the step that was interrupted."""
|
|
30
|
+
ask_confirm_digressions: Set[str] = field(default_factory=set)
|
|
31
|
+
"""The set of interrupting flow names to confirm."""
|
|
32
|
+
block_digressions: Set[str] = field(default_factory=set)
|
|
33
|
+
"""The set of interrupting flow names to block."""
|
|
34
|
+
|
|
35
|
+
@classmethod
|
|
36
|
+
def type(cls) -> str:
|
|
37
|
+
"""Returns the type of the frame."""
|
|
38
|
+
return FLOW_PATTERN_HANDLE_DIGRESSIONS
|
|
39
|
+
|
|
40
|
+
@staticmethod
|
|
41
|
+
def from_dict(data: Dict[str, Any]) -> HandleDigressionsPatternFlowStackFrame:
|
|
42
|
+
"""Creates a `DialogueStackFrame` from a dictionary.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
data: The dictionary to create the `DialogueStackFrame` from.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
The created `DialogueStackFrame`.
|
|
49
|
+
"""
|
|
50
|
+
return HandleDigressionsPatternFlowStackFrame(
|
|
51
|
+
frame_id=data["frame_id"],
|
|
52
|
+
step_id=data["step_id"],
|
|
53
|
+
interrupted_step_id=data["interrupted_step_id"],
|
|
54
|
+
interrupted_flow_id=data["interrupted_flow_id"],
|
|
55
|
+
interrupting_flow_id=data["interrupting_flow_id"],
|
|
56
|
+
ask_confirm_digressions=set(data.get(KEY_ASK_CONFIRM_DIGRESSIONS, [])),
|
|
57
|
+
# This attribute must be converted to a set to enable usage
|
|
58
|
+
# of subset `contains` pypred operator in the default pattern
|
|
59
|
+
# conditional branching
|
|
60
|
+
block_digressions=set(data.get(KEY_BLOCK_DIGRESSIONS, [])),
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
def __eq__(self, other: Any) -> bool:
|
|
64
|
+
if not isinstance(other, HandleDigressionsPatternFlowStackFrame):
|
|
65
|
+
return False
|
|
66
|
+
return (
|
|
67
|
+
self.flow_id == other.flow_id
|
|
68
|
+
and self.interrupted_step_id == other.interrupted_step_id
|
|
69
|
+
and self.interrupted_flow_id == other.interrupted_flow_id
|
|
70
|
+
and self.interrupting_flow_id == other.interrupting_flow_id
|
|
71
|
+
and self.ask_confirm_digressions == other.ask_confirm_digressions
|
|
72
|
+
and self.block_digressions == other.block_digressions
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
def as_dict(self) -> Dict[str, Any]:
|
|
76
|
+
"""Returns the frame as a dictionary."""
|
|
77
|
+
data = super().as_dict()
|
|
78
|
+
# converting back to list to avoid serialization issues
|
|
79
|
+
data[KEY_ASK_CONFIRM_DIGRESSIONS] = list(self.ask_confirm_digressions)
|
|
80
|
+
data[KEY_BLOCK_DIGRESSIONS] = list(self.block_digressions)
|
|
81
|
+
return data
|