rasa-pro 3.12.0.dev13__py3-none-any.whl → 3.12.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/anonymization/anonymization_rule_executor.py +16 -10
- rasa/cli/data.py +16 -0
- rasa/cli/project_templates/calm/config.yml +2 -2
- rasa/cli/project_templates/calm/endpoints.yml +2 -2
- rasa/cli/utils.py +12 -0
- rasa/core/actions/action.py +84 -191
- rasa/core/actions/action_run_slot_rejections.py +16 -4
- rasa/core/channels/__init__.py +2 -0
- rasa/core/channels/studio_chat.py +19 -0
- rasa/core/channels/telegram.py +42 -24
- rasa/core/channels/voice_ready/utils.py +1 -1
- rasa/core/channels/voice_stream/asr/asr_engine.py +10 -4
- rasa/core/channels/voice_stream/asr/azure.py +14 -1
- rasa/core/channels/voice_stream/asr/deepgram.py +20 -4
- rasa/core/channels/voice_stream/audiocodes.py +264 -0
- rasa/core/channels/voice_stream/browser_audio.py +4 -1
- rasa/core/channels/voice_stream/call_state.py +3 -0
- rasa/core/channels/voice_stream/genesys.py +6 -2
- rasa/core/channels/voice_stream/tts/azure.py +9 -1
- rasa/core/channels/voice_stream/tts/cartesia.py +14 -8
- rasa/core/channels/voice_stream/voice_channel.py +23 -2
- rasa/core/constants.py +2 -0
- rasa/core/nlg/contextual_response_rephraser.py +18 -1
- rasa/core/nlg/generator.py +83 -15
- rasa/core/nlg/response.py +6 -3
- rasa/core/nlg/translate.py +55 -0
- rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2 +1 -1
- rasa/core/policies/flows/flow_executor.py +12 -5
- rasa/core/processor.py +72 -9
- rasa/dialogue_understanding/commands/can_not_handle_command.py +20 -2
- rasa/dialogue_understanding/commands/cancel_flow_command.py +24 -6
- rasa/dialogue_understanding/commands/change_flow_command.py +20 -2
- rasa/dialogue_understanding/commands/chit_chat_answer_command.py +20 -2
- rasa/dialogue_understanding/commands/clarify_command.py +29 -3
- rasa/dialogue_understanding/commands/command.py +1 -16
- rasa/dialogue_understanding/commands/command_syntax_manager.py +55 -0
- rasa/dialogue_understanding/commands/human_handoff_command.py +20 -2
- rasa/dialogue_understanding/commands/knowledge_answer_command.py +20 -2
- rasa/dialogue_understanding/commands/prompt_command.py +94 -0
- rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +20 -2
- rasa/dialogue_understanding/commands/set_slot_command.py +24 -2
- rasa/dialogue_understanding/commands/skip_question_command.py +20 -2
- rasa/dialogue_understanding/commands/start_flow_command.py +20 -2
- rasa/dialogue_understanding/commands/utils.py +98 -4
- rasa/dialogue_understanding/generator/__init__.py +2 -0
- rasa/dialogue_understanding/generator/command_parser.py +15 -12
- rasa/dialogue_understanding/generator/constants.py +3 -0
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +12 -5
- rasa/dialogue_understanding/generator/llm_command_generator.py +5 -3
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +16 -2
- rasa/dialogue_understanding/generator/prompt_templates/__init__.py +0 -0
- rasa/dialogue_understanding/generator/{single_step → prompt_templates}/command_prompt_template.jinja2 +2 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +77 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +68 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +84 -0
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +460 -0
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +12 -310
- rasa/dialogue_understanding/patterns/collect_information.py +1 -1
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +16 -0
- rasa/dialogue_understanding/patterns/validate_slot.py +65 -0
- rasa/dialogue_understanding/processor/command_processor.py +39 -0
- rasa/dialogue_understanding_test/du_test_case.py +28 -8
- rasa/dialogue_understanding_test/du_test_result.py +13 -9
- rasa/dialogue_understanding_test/io.py +14 -0
- rasa/e2e_test/utils/io.py +0 -37
- rasa/engine/graph.py +1 -0
- rasa/engine/language.py +140 -0
- rasa/engine/recipes/config_files/default_config.yml +4 -0
- rasa/engine/recipes/default_recipe.py +2 -0
- rasa/engine/recipes/graph_recipe.py +2 -0
- rasa/engine/storage/local_model_storage.py +1 -0
- rasa/engine/storage/storage.py +4 -1
- rasa/model_manager/runner_service.py +7 -4
- rasa/model_manager/socket_bridge.py +7 -6
- rasa/shared/constants.py +15 -13
- rasa/shared/core/constants.py +2 -0
- rasa/shared/core/flows/constants.py +11 -0
- rasa/shared/core/flows/flow.py +83 -19
- rasa/shared/core/flows/flows_yaml_schema.json +31 -3
- rasa/shared/core/flows/steps/collect.py +1 -36
- rasa/shared/core/flows/utils.py +28 -4
- rasa/shared/core/flows/validation.py +1 -1
- rasa/shared/core/slot_mappings.py +208 -5
- rasa/shared/core/slots.py +131 -1
- rasa/shared/core/trackers.py +74 -1
- rasa/shared/importers/importer.py +50 -2
- rasa/shared/nlu/training_data/schemas/responses.yml +19 -12
- rasa/shared/providers/_configs/azure_entra_id_config.py +541 -0
- rasa/shared/providers/_configs/azure_openai_client_config.py +138 -3
- rasa/shared/providers/_configs/client_config.py +3 -1
- rasa/shared/providers/_configs/default_litellm_client_config.py +3 -1
- rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py +3 -1
- rasa/shared/providers/_configs/litellm_router_client_config.py +3 -1
- rasa/shared/providers/_configs/model_group_config.py +4 -2
- rasa/shared/providers/_configs/oauth_config.py +33 -0
- rasa/shared/providers/_configs/openai_client_config.py +3 -1
- rasa/shared/providers/_configs/rasa_llm_client_config.py +3 -1
- rasa/shared/providers/_configs/self_hosted_llm_client_config.py +3 -1
- rasa/shared/providers/constants.py +6 -0
- rasa/shared/providers/embedding/azure_openai_embedding_client.py +28 -3
- rasa/shared/providers/embedding/litellm_router_embedding_client.py +3 -1
- rasa/shared/providers/llm/_base_litellm_client.py +42 -17
- rasa/shared/providers/llm/azure_openai_llm_client.py +81 -25
- rasa/shared/providers/llm/default_litellm_llm_client.py +3 -1
- rasa/shared/providers/llm/litellm_router_llm_client.py +29 -8
- rasa/shared/providers/llm/llm_client.py +23 -7
- rasa/shared/providers/llm/openai_llm_client.py +9 -3
- rasa/shared/providers/llm/rasa_llm_client.py +11 -2
- rasa/shared/providers/llm/self_hosted_llm_client.py +30 -11
- rasa/shared/providers/router/_base_litellm_router_client.py +3 -1
- rasa/shared/providers/router/router_client.py +3 -1
- rasa/shared/utils/constants.py +3 -0
- rasa/shared/utils/llm.py +30 -7
- rasa/shared/utils/pykwalify_extensions.py +24 -0
- rasa/shared/utils/schemas/domain.yml +26 -0
- rasa/telemetry.py +2 -1
- rasa/tracing/config.py +2 -0
- rasa/tracing/constants.py +12 -0
- rasa/tracing/instrumentation/instrumentation.py +36 -0
- rasa/tracing/instrumentation/metrics.py +41 -0
- rasa/tracing/metric_instrument_provider.py +40 -0
- rasa/validator.py +372 -7
- rasa/version.py +1 -1
- {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/METADATA +2 -1
- {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/RECORD +128 -113
- {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.0.dev13.dist-info → rasa_pro-3.12.0rc1.dist-info}/entry_points.txt +0 -0
|
@@ -1,34 +1,21 @@
|
|
|
1
1
|
import importlib.resources
|
|
2
|
-
from typing import Any, Dict,
|
|
2
|
+
from typing import Any, Dict, Optional, Text
|
|
3
3
|
|
|
4
4
|
import structlog
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
Command,
|
|
10
|
-
ErrorCommand,
|
|
11
|
-
SetSlotCommand,
|
|
12
|
-
)
|
|
13
|
-
from rasa.dialogue_understanding.generator.command_parser import (
|
|
14
|
-
parse_commands as parse_commands_using_command_parsers,
|
|
6
|
+
from rasa.dialogue_understanding.commands.command_syntax_manager import (
|
|
7
|
+
CommandSyntaxManager,
|
|
8
|
+
CommandSyntaxVersion,
|
|
15
9
|
)
|
|
16
10
|
from rasa.dialogue_understanding.generator.constants import (
|
|
17
|
-
DEFAULT_LLM_CONFIG,
|
|
18
11
|
FLOW_RETRIEVAL_KEY,
|
|
19
12
|
LLM_CONFIG_KEY,
|
|
20
13
|
USER_INPUT_CONFIG_KEY,
|
|
21
14
|
)
|
|
22
15
|
from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
|
|
23
|
-
from rasa.dialogue_understanding.generator.
|
|
24
|
-
|
|
25
|
-
)
|
|
26
|
-
from rasa.dialogue_understanding.stack.utils import top_flow_frame
|
|
27
|
-
from rasa.dialogue_understanding.utils import (
|
|
28
|
-
add_commands_to_message_parse_data,
|
|
29
|
-
add_prompt_to_message_parse_data,
|
|
16
|
+
from rasa.dialogue_understanding.generator.single_step.compact_llm_command_generator import ( # noqa: E501
|
|
17
|
+
CompactLLMCommandGenerator,
|
|
30
18
|
)
|
|
31
|
-
from rasa.engine.graph import ExecutionContext
|
|
32
19
|
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
33
20
|
from rasa.engine.storage.resource import Resource
|
|
34
21
|
from rasa.engine.storage.storage import ModelStorage
|
|
@@ -36,30 +23,15 @@ from rasa.shared.constants import (
|
|
|
36
23
|
EMBEDDINGS_CONFIG_KEY,
|
|
37
24
|
PROMPT_CONFIG_KEY,
|
|
38
25
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
39
|
-
ROUTE_TO_CALM_SLOT,
|
|
40
26
|
)
|
|
41
|
-
from rasa.shared.core.flows import FlowsList
|
|
42
|
-
from rasa.shared.core.trackers import DialogueStateTracker
|
|
43
|
-
from rasa.shared.exceptions import ProviderClientAPIException
|
|
44
|
-
from rasa.shared.nlu.constants import LLM_COMMANDS, LLM_PROMPT, TEXT
|
|
45
|
-
from rasa.shared.nlu.training_data.message import Message
|
|
46
|
-
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
47
27
|
from rasa.shared.utils.io import deep_container_fingerprint
|
|
48
|
-
from rasa.shared.utils.llm import
|
|
49
|
-
get_prompt_template,
|
|
50
|
-
resolve_model_client_config,
|
|
51
|
-
sanitize_message_for_prompt,
|
|
52
|
-
tracker_as_readable_transcript,
|
|
53
|
-
)
|
|
54
|
-
from rasa.utils.log_utils import log_llm
|
|
55
|
-
|
|
56
|
-
COMMAND_PROMPT_FILE_NAME = "command_prompt.jinja2"
|
|
28
|
+
from rasa.shared.utils.llm import get_prompt_template, resolve_model_client_config
|
|
57
29
|
|
|
58
30
|
DEFAULT_COMMAND_PROMPT_TEMPLATE = importlib.resources.read_text(
|
|
59
|
-
"rasa.dialogue_understanding.generator.
|
|
31
|
+
"rasa.dialogue_understanding.generator.prompt_templates",
|
|
60
32
|
"command_prompt_template.jinja2",
|
|
61
33
|
)
|
|
62
|
-
|
|
34
|
+
|
|
63
35
|
|
|
64
36
|
structlogger = structlog.get_logger()
|
|
65
37
|
|
|
@@ -70,7 +42,7 @@ structlogger = structlog.get_logger()
|
|
|
70
42
|
],
|
|
71
43
|
is_trainable=True,
|
|
72
44
|
)
|
|
73
|
-
class SingleStepLLMCommandGenerator(
|
|
45
|
+
class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
74
46
|
"""A single step LLM-based command generator."""
|
|
75
47
|
|
|
76
48
|
def __init__(
|
|
@@ -109,9 +81,9 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
109
81
|
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
110
82
|
)
|
|
111
83
|
|
|
112
|
-
|
|
84
|
+
# Set the command syntax version to v1
|
|
85
|
+
CommandSyntaxManager.set_syntax_version(CommandSyntaxVersion.v1)
|
|
113
86
|
|
|
114
|
-
### Implementations of LLMBasedCommandGenerator parent
|
|
115
87
|
@staticmethod
|
|
116
88
|
def get_default_config() -> Dict[str, Any]:
|
|
117
89
|
"""The component's default config (see parent class for full docstring)."""
|
|
@@ -123,224 +95,6 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
123
95
|
FLOW_RETRIEVAL_KEY: FlowRetrieval.get_default_config(),
|
|
124
96
|
}
|
|
125
97
|
|
|
126
|
-
@classmethod
|
|
127
|
-
def load(
|
|
128
|
-
cls: Any,
|
|
129
|
-
config: Dict[str, Any],
|
|
130
|
-
model_storage: ModelStorage,
|
|
131
|
-
resource: Resource,
|
|
132
|
-
execution_context: ExecutionContext,
|
|
133
|
-
**kwargs: Any,
|
|
134
|
-
) -> "SingleStepLLMCommandGenerator":
|
|
135
|
-
"""Loads trained component (see parent class for full docstring)."""
|
|
136
|
-
# Perform health check of the LLM API endpoint
|
|
137
|
-
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
138
|
-
cls.perform_llm_health_check(
|
|
139
|
-
llm_config,
|
|
140
|
-
DEFAULT_LLM_CONFIG,
|
|
141
|
-
"single_step_llm_command_generator.load",
|
|
142
|
-
SingleStepLLMCommandGenerator.__name__,
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
# load prompt template from the model storage.
|
|
146
|
-
prompt_template = cls.load_prompt_template_from_model_storage(
|
|
147
|
-
model_storage, resource, COMMAND_PROMPT_FILE_NAME
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
# init base command generator
|
|
151
|
-
command_generator = cls(config, model_storage, resource, prompt_template)
|
|
152
|
-
# load flow retrieval if enabled
|
|
153
|
-
if command_generator.enabled_flow_retrieval:
|
|
154
|
-
command_generator.flow_retrieval = cls.load_flow_retrival(
|
|
155
|
-
command_generator.config, model_storage, resource
|
|
156
|
-
)
|
|
157
|
-
|
|
158
|
-
return command_generator
|
|
159
|
-
|
|
160
|
-
def persist(self) -> None:
|
|
161
|
-
"""Persist this component to disk for future loading."""
|
|
162
|
-
self._persist_prompt_template()
|
|
163
|
-
self._persist_config()
|
|
164
|
-
if self.flow_retrieval is not None:
|
|
165
|
-
self.flow_retrieval.persist()
|
|
166
|
-
|
|
167
|
-
def _persist_prompt_template(self) -> None:
|
|
168
|
-
"""Persist prompt template for future loading."""
|
|
169
|
-
with self._model_storage.write_to(self._resource) as path:
|
|
170
|
-
rasa.shared.utils.io.write_text_file(
|
|
171
|
-
self.prompt_template, path / COMMAND_PROMPT_FILE_NAME
|
|
172
|
-
)
|
|
173
|
-
|
|
174
|
-
def _persist_config(self) -> None:
|
|
175
|
-
"""Persist config as a source of truth for resolved clients."""
|
|
176
|
-
with self._model_storage.write_to(self._resource) as path:
|
|
177
|
-
rasa.shared.utils.io.dump_obj_as_json_to_file(
|
|
178
|
-
path / SINGLE_STEP_LLM_COMMAND_GENERATOR_CONFIG_FILE, self.config
|
|
179
|
-
)
|
|
180
|
-
|
|
181
|
-
async def predict_commands(
|
|
182
|
-
self,
|
|
183
|
-
message: Message,
|
|
184
|
-
flows: FlowsList,
|
|
185
|
-
tracker: Optional[DialogueStateTracker] = None,
|
|
186
|
-
**kwargs: Any,
|
|
187
|
-
) -> List[Command]:
|
|
188
|
-
"""Predict commands using the LLM.
|
|
189
|
-
|
|
190
|
-
Args:
|
|
191
|
-
message: The message from the user.
|
|
192
|
-
flows: The flows available to the user.
|
|
193
|
-
tracker: The tracker containing the current state of the conversation.
|
|
194
|
-
**kwargs: Keyword arguments for forward compatibility.
|
|
195
|
-
|
|
196
|
-
Returns:
|
|
197
|
-
The commands generated by the llm.
|
|
198
|
-
"""
|
|
199
|
-
prior_commands = self._get_prior_commands(message)
|
|
200
|
-
|
|
201
|
-
if tracker is None or flows.is_empty():
|
|
202
|
-
# cannot do anything if there are no flows or no tracker
|
|
203
|
-
return prior_commands
|
|
204
|
-
|
|
205
|
-
if self._should_skip_llm_call(prior_commands, flows, tracker):
|
|
206
|
-
return prior_commands
|
|
207
|
-
|
|
208
|
-
try:
|
|
209
|
-
commands = await self._predict_commands(message, flows, tracker)
|
|
210
|
-
except ProviderClientAPIException:
|
|
211
|
-
# if command predictions resulted in API exception
|
|
212
|
-
# "predict" the ErrorCommand
|
|
213
|
-
commands = [ErrorCommand()]
|
|
214
|
-
|
|
215
|
-
if not commands and not prior_commands:
|
|
216
|
-
# no commands are parsed or there's an invalid command
|
|
217
|
-
structlogger.warning(
|
|
218
|
-
"single_step_llm_command_generator.predict_commands",
|
|
219
|
-
message="No commands were predicted as the LLM response could "
|
|
220
|
-
"not be parsed or the LLM responded with an invalid command."
|
|
221
|
-
"Returning a CannotHandleCommand instead.",
|
|
222
|
-
)
|
|
223
|
-
commands = [CannotHandleCommand()]
|
|
224
|
-
|
|
225
|
-
if tracker.has_coexistence_routing_slot:
|
|
226
|
-
# if coexistence feature is used, set the routing slot
|
|
227
|
-
commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
|
|
228
|
-
|
|
229
|
-
log_llm(
|
|
230
|
-
logger=structlogger,
|
|
231
|
-
log_module="SingleStepLLMCommandGenerator",
|
|
232
|
-
log_event="llm_command_generator.predict_commands.finished",
|
|
233
|
-
commands=commands,
|
|
234
|
-
)
|
|
235
|
-
|
|
236
|
-
domain = kwargs.get("domain")
|
|
237
|
-
commands = self._check_commands_against_slot_mappings(commands, tracker, domain)
|
|
238
|
-
|
|
239
|
-
return self._check_commands_overlap(prior_commands, commands)
|
|
240
|
-
|
|
241
|
-
async def _predict_commands(
|
|
242
|
-
self,
|
|
243
|
-
message: Message,
|
|
244
|
-
flows: FlowsList,
|
|
245
|
-
tracker: Optional[DialogueStateTracker] = None,
|
|
246
|
-
) -> List[Command]:
|
|
247
|
-
"""Predict commands using the LLM.
|
|
248
|
-
|
|
249
|
-
Args:
|
|
250
|
-
message: The message from the user.
|
|
251
|
-
flows: The flows available to the user.
|
|
252
|
-
tracker: The tracker containing the current state of the conversation.
|
|
253
|
-
|
|
254
|
-
Returns:
|
|
255
|
-
The commands generated by the llm.
|
|
256
|
-
|
|
257
|
-
Raises:
|
|
258
|
-
ProviderClientAPIException: If API calls raised an error.
|
|
259
|
-
"""
|
|
260
|
-
# retrieve flows
|
|
261
|
-
filtered_flows = await self.filter_flows(message, flows, tracker)
|
|
262
|
-
|
|
263
|
-
flow_prompt = self.render_template(message, tracker, filtered_flows, flows)
|
|
264
|
-
log_llm(
|
|
265
|
-
logger=structlogger,
|
|
266
|
-
log_module="SingleStepLLMCommandGenerator",
|
|
267
|
-
log_event="llm_command_generator.predict_commands.prompt_rendered",
|
|
268
|
-
prompt=flow_prompt,
|
|
269
|
-
)
|
|
270
|
-
|
|
271
|
-
response = await self.invoke_llm(flow_prompt)
|
|
272
|
-
llm_response = LLMResponse.ensure_llm_response(response)
|
|
273
|
-
# The check for 'None' maintains compatibility with older versions
|
|
274
|
-
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
|
275
|
-
# might return 'None' to indicate a failure to generate actions.
|
|
276
|
-
if llm_response is None or not llm_response.choices:
|
|
277
|
-
return [ErrorCommand()]
|
|
278
|
-
|
|
279
|
-
action_list = llm_response.choices[0]
|
|
280
|
-
|
|
281
|
-
log_llm(
|
|
282
|
-
logger=structlogger,
|
|
283
|
-
log_module="SingleStepLLMCommandGenerator",
|
|
284
|
-
log_event="llm_command_generator.predict_commands.actions_generated",
|
|
285
|
-
action_list=action_list,
|
|
286
|
-
)
|
|
287
|
-
|
|
288
|
-
commands = self.parse_commands(action_list, tracker, flows)
|
|
289
|
-
|
|
290
|
-
self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
|
|
291
|
-
add_commands_to_message_parse_data(
|
|
292
|
-
message, SingleStepLLMCommandGenerator.__name__, commands
|
|
293
|
-
)
|
|
294
|
-
add_prompt_to_message_parse_data(
|
|
295
|
-
message=message,
|
|
296
|
-
component_name=SingleStepLLMCommandGenerator.__name__,
|
|
297
|
-
prompt_name="command_generator_prompt",
|
|
298
|
-
user_prompt=flow_prompt,
|
|
299
|
-
llm_response=llm_response,
|
|
300
|
-
)
|
|
301
|
-
|
|
302
|
-
return commands
|
|
303
|
-
|
|
304
|
-
@staticmethod
|
|
305
|
-
def _update_message_parse_data_for_fine_tuning(
|
|
306
|
-
message: Message, commands: List[Command], prompt: str
|
|
307
|
-
) -> None:
|
|
308
|
-
from rasa.llm_fine_tuning.annotation_module import preparing_fine_tuning_data
|
|
309
|
-
|
|
310
|
-
if preparing_fine_tuning_data:
|
|
311
|
-
# Add commands and prompt to the message object in order to create
|
|
312
|
-
# prompt -> commands pairs for fine-tuning
|
|
313
|
-
message.set(
|
|
314
|
-
LLM_COMMANDS,
|
|
315
|
-
[command.as_dict() for command in commands],
|
|
316
|
-
add_to_output=True,
|
|
317
|
-
)
|
|
318
|
-
message.set(LLM_PROMPT, prompt, add_to_output=True)
|
|
319
|
-
|
|
320
|
-
@classmethod
|
|
321
|
-
def parse_commands(
|
|
322
|
-
cls, actions: Optional[str], tracker: DialogueStateTracker, flows: FlowsList
|
|
323
|
-
) -> List[Command]:
|
|
324
|
-
"""Parse the actions returned by the llm into intent and entities.
|
|
325
|
-
|
|
326
|
-
Args:
|
|
327
|
-
actions: The actions returned by the llm.
|
|
328
|
-
tracker: The tracker containing the current state of the conversation.
|
|
329
|
-
flows: the list of flows
|
|
330
|
-
|
|
331
|
-
Returns:
|
|
332
|
-
The parsed commands.
|
|
333
|
-
"""
|
|
334
|
-
commands = parse_commands_using_command_parsers(actions, flows)
|
|
335
|
-
if not commands:
|
|
336
|
-
structlogger.debug(
|
|
337
|
-
"single_step_llm_command_generator.parse_commands",
|
|
338
|
-
message="No commands were parsed from the LLM actions.",
|
|
339
|
-
actions=actions,
|
|
340
|
-
)
|
|
341
|
-
|
|
342
|
-
return commands
|
|
343
|
-
|
|
344
98
|
@classmethod
|
|
345
99
|
def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
|
|
346
100
|
"""Add a fingerprint for the graph."""
|
|
@@ -363,55 +117,3 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
363
117
|
return deep_container_fingerprint(
|
|
364
118
|
[prompt_template, llm_config, embedding_config]
|
|
365
119
|
)
|
|
366
|
-
|
|
367
|
-
### Helper methods
|
|
368
|
-
def render_template(
|
|
369
|
-
self,
|
|
370
|
-
message: Message,
|
|
371
|
-
tracker: DialogueStateTracker,
|
|
372
|
-
startable_flows: FlowsList,
|
|
373
|
-
all_flows: FlowsList,
|
|
374
|
-
) -> str:
|
|
375
|
-
"""Render the jinja template to create the prompt for the LLM.
|
|
376
|
-
|
|
377
|
-
Args:
|
|
378
|
-
message: The current message from the user.
|
|
379
|
-
tracker: The tracker containing the current state of the conversation.
|
|
380
|
-
startable_flows: The flows startable at this point in time by the user.
|
|
381
|
-
all_flows: all flows present in the assistant
|
|
382
|
-
|
|
383
|
-
Returns:
|
|
384
|
-
The rendered prompt template.
|
|
385
|
-
"""
|
|
386
|
-
# need to make this distinction here because current step of the
|
|
387
|
-
# top_calling_frame would be the call step, but we need the collect step from
|
|
388
|
-
# the called frame. If no call is active calling and called frame are the same.
|
|
389
|
-
top_calling_frame = top_flow_frame(tracker.stack)
|
|
390
|
-
top_called_frame = top_flow_frame(tracker.stack, ignore_call_frames=False)
|
|
391
|
-
|
|
392
|
-
top_flow = top_calling_frame.flow(all_flows) if top_calling_frame else None
|
|
393
|
-
current_step = top_called_frame.step(all_flows) if top_called_frame else None
|
|
394
|
-
|
|
395
|
-
flow_slots = self.prepare_current_flow_slots_for_template(
|
|
396
|
-
top_flow, current_step, tracker
|
|
397
|
-
)
|
|
398
|
-
current_slot, current_slot_description = self.prepare_current_slot_for_template(
|
|
399
|
-
current_step
|
|
400
|
-
)
|
|
401
|
-
current_conversation = tracker_as_readable_transcript(tracker)
|
|
402
|
-
latest_user_message = sanitize_message_for_prompt(message.get(TEXT))
|
|
403
|
-
current_conversation += f"\nUSER: {latest_user_message}"
|
|
404
|
-
|
|
405
|
-
inputs = {
|
|
406
|
-
"available_flows": self.prepare_flows_for_template(
|
|
407
|
-
startable_flows, tracker
|
|
408
|
-
),
|
|
409
|
-
"current_conversation": current_conversation,
|
|
410
|
-
"flow_slots": flow_slots,
|
|
411
|
-
"current_flow": top_flow.id if top_flow is not None else None,
|
|
412
|
-
"current_slot": current_slot,
|
|
413
|
-
"current_slot_description": current_slot_description,
|
|
414
|
-
"user_message": latest_user_message,
|
|
415
|
-
}
|
|
416
|
-
|
|
417
|
-
return self.compile_template(self.prompt_template).render(**inputs)
|
|
@@ -8,7 +8,7 @@ from rasa.dialogue_understanding.stack.frames import (
|
|
|
8
8
|
PatternFlowStackFrame,
|
|
9
9
|
)
|
|
10
10
|
from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX
|
|
11
|
-
from rasa.shared.core.
|
|
11
|
+
from rasa.shared.core.slots import SlotRejection
|
|
12
12
|
|
|
13
13
|
FLOW_PATTERN_COLLECT_INFORMATION = (
|
|
14
14
|
RASA_DEFAULT_FLOW_PATTERN_PREFIX + "collect_information"
|
|
@@ -359,3 +359,19 @@ flows:
|
|
|
359
359
|
- action: action_hangup
|
|
360
360
|
next: END
|
|
361
361
|
- else: END
|
|
362
|
+
|
|
363
|
+
pattern_validate_slot:
|
|
364
|
+
description: Flow for running validations on slots
|
|
365
|
+
name: pattern validate slot
|
|
366
|
+
steps:
|
|
367
|
+
- id: start
|
|
368
|
+
action: action_run_slot_rejections
|
|
369
|
+
next:
|
|
370
|
+
- if: "slots.{{context.validate}} is not null"
|
|
371
|
+
then: END
|
|
372
|
+
- else: ask_refill
|
|
373
|
+
- id: ask_refill
|
|
374
|
+
action: "{{context.refill_utter}}"
|
|
375
|
+
- action: "{{context.refill_action}}"
|
|
376
|
+
- action: action_listen
|
|
377
|
+
next: start
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any, Dict, List
|
|
5
|
+
|
|
6
|
+
from rasa.dialogue_understanding.stack.frames import (
|
|
7
|
+
PatternFlowStackFrame,
|
|
8
|
+
)
|
|
9
|
+
from rasa.shared.constants import (
|
|
10
|
+
RASA_DEFAULT_FLOW_PATTERN_PREFIX,
|
|
11
|
+
REFILL_UTTER,
|
|
12
|
+
REJECTIONS,
|
|
13
|
+
)
|
|
14
|
+
from rasa.shared.core.slots import SlotRejection
|
|
15
|
+
|
|
16
|
+
FLOW_PATTERN_VALIDATE_SLOT = RASA_DEFAULT_FLOW_PATTERN_PREFIX + "validate_slot"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class ValidateSlotPatternFlowStackFrame(PatternFlowStackFrame):
|
|
21
|
+
"""A pattern flow stack frame to validate slots."""
|
|
22
|
+
|
|
23
|
+
flow_id: str = FLOW_PATTERN_VALIDATE_SLOT
|
|
24
|
+
"""The ID of the flow."""
|
|
25
|
+
validate: str = ""
|
|
26
|
+
"""The slot that should be validated."""
|
|
27
|
+
refill_utter: str = ""
|
|
28
|
+
"""The utter action that should be executed to ask the user to refill the
|
|
29
|
+
information."""
|
|
30
|
+
refill_action: str = ""
|
|
31
|
+
"""The action that should be executed to ask the user to refill the
|
|
32
|
+
information."""
|
|
33
|
+
rejections: List[SlotRejection] = field(default_factory=list)
|
|
34
|
+
"""The predicate check that should be applied to the filled slot.
|
|
35
|
+
If a predicate check fails, its `utter` action indicated under rejections
|
|
36
|
+
will be executed.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def type(cls) -> str:
|
|
41
|
+
"""Returns the type of the frame."""
|
|
42
|
+
return FLOW_PATTERN_VALIDATE_SLOT
|
|
43
|
+
|
|
44
|
+
@staticmethod
|
|
45
|
+
def from_dict(data: Dict[str, Any]) -> ValidateSlotPatternFlowStackFrame:
|
|
46
|
+
"""Creates a `DialogueStackFrame` from a dictionary.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
data: The dictionary to create the `DialogueStackFrame` from.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
The created `DialogueStackFrame`.
|
|
53
|
+
"""
|
|
54
|
+
rejections = [
|
|
55
|
+
SlotRejection.from_dict(rejection) for rejection in data.get(REJECTIONS, [])
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
return ValidateSlotPatternFlowStackFrame(
|
|
59
|
+
frame_id=data["frame_id"],
|
|
60
|
+
step_id=data["step_id"],
|
|
61
|
+
validate=data["validate"],
|
|
62
|
+
refill_action=data["refill_action"],
|
|
63
|
+
refill_utter=data[REFILL_UTTER],
|
|
64
|
+
rejections=rejections,
|
|
65
|
+
)
|
|
@@ -22,6 +22,9 @@ from rasa.dialogue_understanding.commands.handle_digressions_command import (
|
|
|
22
22
|
HandleDigressionsCommand,
|
|
23
23
|
)
|
|
24
24
|
from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
|
|
25
|
+
from rasa.dialogue_understanding.commands.utils import (
|
|
26
|
+
create_validate_frames_from_slot_set_events,
|
|
27
|
+
)
|
|
25
28
|
from rasa.dialogue_understanding.patterns.chitchat import FLOW_PATTERN_CHITCHAT
|
|
26
29
|
from rasa.dialogue_understanding.patterns.collect_information import (
|
|
27
30
|
CollectInformationPatternFlowStackFrame,
|
|
@@ -29,6 +32,9 @@ from rasa.dialogue_understanding.patterns.collect_information import (
|
|
|
29
32
|
from rasa.dialogue_understanding.patterns.correction import (
|
|
30
33
|
CorrectionPatternFlowStackFrame,
|
|
31
34
|
)
|
|
35
|
+
from rasa.dialogue_understanding.patterns.validate_slot import (
|
|
36
|
+
ValidateSlotPatternFlowStackFrame,
|
|
37
|
+
)
|
|
32
38
|
from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
|
|
33
39
|
from rasa.dialogue_understanding.stack.frames import (
|
|
34
40
|
BaseFlowStackFrame,
|
|
@@ -234,18 +240,51 @@ def execute_commands(
|
|
|
234
240
|
# and then pushing the commands onto the stack in the reversed order.
|
|
235
241
|
reversed_commands = list(reversed(commands))
|
|
236
242
|
|
|
243
|
+
# we need to keep track of the ValidateSlotPatternFlowStackFrame that
|
|
244
|
+
# should be pushed onto the stack before executing the StartFlowCommands.
|
|
245
|
+
# This is necessary to make sure that slots filled before the start of a
|
|
246
|
+
# flow can be immediately validated without waiting till the flow is started
|
|
247
|
+
# and completed.
|
|
248
|
+
stack_frames_to_follow_commands: List[ValidateSlotPatternFlowStackFrame] = []
|
|
249
|
+
|
|
237
250
|
validate_state_of_commands(commands)
|
|
238
251
|
|
|
239
252
|
for command in reversed_commands:
|
|
240
253
|
new_events = command.run_command_on_tracker(
|
|
241
254
|
tracker, all_flows, original_tracker
|
|
242
255
|
)
|
|
256
|
+
|
|
257
|
+
_, stack_frames_to_follow_commands = (
|
|
258
|
+
create_validate_frames_from_slot_set_events(
|
|
259
|
+
tracker, new_events, stack_frames_to_follow_commands
|
|
260
|
+
)
|
|
261
|
+
)
|
|
262
|
+
|
|
243
263
|
events.extend(new_events)
|
|
244
264
|
tracker.update_with_events(new_events)
|
|
245
265
|
|
|
266
|
+
new_events = push_stack_frames_to_follow_commands(
|
|
267
|
+
tracker, stack_frames_to_follow_commands
|
|
268
|
+
)
|
|
269
|
+
events.extend(new_events)
|
|
270
|
+
|
|
246
271
|
return remove_duplicated_set_slots(events)
|
|
247
272
|
|
|
248
273
|
|
|
274
|
+
def push_stack_frames_to_follow_commands(
|
|
275
|
+
tracker: DialogueStateTracker, stack_frames: List
|
|
276
|
+
) -> List[Event]:
|
|
277
|
+
"""Push stack frames to follow commands."""
|
|
278
|
+
new_events = []
|
|
279
|
+
|
|
280
|
+
for frame in stack_frames:
|
|
281
|
+
stack = tracker.stack
|
|
282
|
+
stack.push(frame)
|
|
283
|
+
new_events.extend(tracker.create_stack_updated_events(stack))
|
|
284
|
+
tracker.update_with_events(new_events)
|
|
285
|
+
return new_events
|
|
286
|
+
|
|
287
|
+
|
|
249
288
|
def remove_duplicated_set_slots(events: List[Event]) -> List[Event]:
|
|
250
289
|
"""Removes duplicated set slot events.
|
|
251
290
|
|
|
@@ -2,7 +2,7 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple
|
|
|
2
2
|
|
|
3
3
|
from pydantic import BaseModel, Field
|
|
4
4
|
|
|
5
|
-
from rasa.dialogue_understanding.commands import
|
|
5
|
+
from rasa.dialogue_understanding.commands.prompt_command import PromptCommand
|
|
6
6
|
from rasa.dialogue_understanding.generator.command_parser import parse_commands
|
|
7
7
|
from rasa.dialogue_understanding_test.command_comparison import are_command_lists_equal
|
|
8
8
|
from rasa.dialogue_understanding_test.constants import (
|
|
@@ -30,6 +30,7 @@ from rasa.shared.nlu.constants import (
|
|
|
30
30
|
KEY_USAGE = "usage"
|
|
31
31
|
KEY_PROMPT_TOKENS = "prompt_tokens"
|
|
32
32
|
KEY_COMPLETION_TOKENS = "completion_tokens"
|
|
33
|
+
KEY_CHOICES = "choices"
|
|
33
34
|
|
|
34
35
|
|
|
35
36
|
class DialogueUnderstandingOutput(BaseModel):
|
|
@@ -65,11 +66,18 @@ class DialogueUnderstandingOutput(BaseModel):
|
|
|
65
66
|
"""
|
|
66
67
|
|
|
67
68
|
# Dict with component name as key and list of commands as value
|
|
68
|
-
commands: Dict[str, List[
|
|
69
|
+
commands: Dict[str, List[PromptCommand]]
|
|
69
70
|
# List of prompts
|
|
70
71
|
prompts: Optional[List[Dict[str, Any]]] = None
|
|
71
72
|
|
|
72
|
-
|
|
73
|
+
class Config:
|
|
74
|
+
"""Skip validation for PromptCommand protocol as pydantic does not know how to
|
|
75
|
+
serialize or handle instances of a protocol.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
arbitrary_types_allowed = True
|
|
79
|
+
|
|
80
|
+
def get_predicted_commands(self) -> List[PromptCommand]:
|
|
73
81
|
"""Get all commands from the output."""
|
|
74
82
|
return [
|
|
75
83
|
command
|
|
@@ -144,6 +152,11 @@ class DialogueUnderstandingOutput(BaseModel):
|
|
|
144
152
|
KEY_COMPLETION_TOKENS
|
|
145
153
|
)
|
|
146
154
|
|
|
155
|
+
choices = prompt_data.get(KEY_LLM_RESPONSE_METADATA, {}).get(KEY_CHOICES)
|
|
156
|
+
if choices and len(choices) > 0:
|
|
157
|
+
# Add the action list returned by the LLM to the prompt_info
|
|
158
|
+
prompt_info[KEY_CHOICES] = choices[0]
|
|
159
|
+
|
|
147
160
|
data[component_name].append(prompt_info)
|
|
148
161
|
|
|
149
162
|
return data
|
|
@@ -155,9 +168,16 @@ class DialogueUnderstandingTestStep(BaseModel):
|
|
|
155
168
|
template: Optional[str] = None
|
|
156
169
|
line: Optional[int] = None
|
|
157
170
|
metadata_name: Optional[str] = None
|
|
158
|
-
commands: Optional[List[
|
|
171
|
+
commands: Optional[List[PromptCommand]] = None
|
|
159
172
|
dialogue_understanding_output: Optional[DialogueUnderstandingOutput] = None
|
|
160
173
|
|
|
174
|
+
class Config:
|
|
175
|
+
"""Skip validation for PromptCommand protocol as pydantic does not know how to
|
|
176
|
+
serialize or handle instances of a protocol.
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
arbitrary_types_allowed = True
|
|
180
|
+
|
|
161
181
|
def as_dict(self) -> Dict[str, Any]:
|
|
162
182
|
if self.actor == ACTOR_USER:
|
|
163
183
|
if self.commands:
|
|
@@ -178,7 +198,7 @@ class DialogueUnderstandingTestStep(BaseModel):
|
|
|
178
198
|
def from_dict(
|
|
179
199
|
step: Dict[str, Any],
|
|
180
200
|
flows: FlowsList,
|
|
181
|
-
custom_command_classes: List[
|
|
201
|
+
custom_command_classes: List[PromptCommand] = [],
|
|
182
202
|
remove_default_commands: List[str] = [],
|
|
183
203
|
) -> "DialogueUnderstandingTestStep":
|
|
184
204
|
"""Creates a DialogueUnderstandingTestStep from a dictionary.
|
|
@@ -224,7 +244,7 @@ class DialogueUnderstandingTestStep(BaseModel):
|
|
|
224
244
|
commands=commands,
|
|
225
245
|
)
|
|
226
246
|
|
|
227
|
-
def get_predicted_commands(self) -> List[
|
|
247
|
+
def get_predicted_commands(self) -> List[PromptCommand]:
|
|
228
248
|
"""Get all predicted commands from the test case."""
|
|
229
249
|
if self.dialogue_understanding_output is None:
|
|
230
250
|
return []
|
|
@@ -314,7 +334,7 @@ class DialogueUnderstandingTestCase(BaseModel):
|
|
|
314
334
|
input_test_case: Dict[str, Any],
|
|
315
335
|
flows: FlowsList,
|
|
316
336
|
file: Optional[str] = None,
|
|
317
|
-
custom_command_classes: List[
|
|
337
|
+
custom_command_classes: List[PromptCommand] = [],
|
|
318
338
|
remove_default_commands: List[str] = [],
|
|
319
339
|
) -> "DialogueUnderstandingTestCase":
|
|
320
340
|
"""Creates a DialogueUnderstandingTestCase from a dictionary.
|
|
@@ -361,7 +381,7 @@ class DialogueUnderstandingTestCase(BaseModel):
|
|
|
361
381
|
|
|
362
382
|
return [step.to_str() for step in steps]
|
|
363
383
|
|
|
364
|
-
def get_expected_commands(self) -> List[
|
|
384
|
+
def get_expected_commands(self) -> List[PromptCommand]:
|
|
365
385
|
"""Get all commands from the test steps."""
|
|
366
386
|
return [
|
|
367
387
|
command
|