rasa-pro 3.9.17__py3-none-any.whl → 3.10.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- README.md +5 -37
- rasa/__init__.py +1 -2
- rasa/__main__.py +5 -0
- rasa/anonymization/anonymization_rule_executor.py +2 -2
- rasa/api.py +26 -22
- rasa/cli/arguments/data.py +27 -2
- rasa/cli/arguments/default_arguments.py +25 -3
- rasa/cli/arguments/run.py +9 -9
- rasa/cli/arguments/train.py +2 -0
- rasa/cli/data.py +70 -8
- rasa/cli/e2e_test.py +108 -433
- rasa/cli/interactive.py +1 -0
- rasa/cli/llm_fine_tuning.py +395 -0
- rasa/cli/project_templates/calm/endpoints.yml +1 -1
- rasa/cli/project_templates/tutorial/endpoints.yml +1 -1
- rasa/cli/run.py +14 -13
- rasa/cli/scaffold.py +10 -8
- rasa/cli/train.py +8 -7
- rasa/cli/utils.py +15 -0
- rasa/constants.py +7 -1
- rasa/core/actions/action.py +98 -49
- rasa/core/actions/action_run_slot_rejections.py +4 -1
- rasa/core/actions/custom_action_executor.py +9 -6
- rasa/core/actions/direct_custom_actions_executor.py +80 -0
- rasa/core/actions/e2e_stub_custom_action_executor.py +68 -0
- rasa/core/actions/grpc_custom_action_executor.py +2 -2
- rasa/core/actions/http_custom_action_executor.py +6 -5
- rasa/core/agent.py +21 -17
- rasa/core/channels/__init__.py +2 -0
- rasa/core/channels/audiocodes.py +1 -16
- rasa/core/channels/voice_aware/__init__.py +0 -0
- rasa/core/channels/voice_aware/jambonz.py +103 -0
- rasa/core/channels/voice_aware/jambonz_protocol.py +344 -0
- rasa/core/channels/voice_aware/utils.py +20 -0
- rasa/core/channels/voice_native/__init__.py +0 -0
- rasa/core/constants.py +6 -1
- rasa/core/featurizers/single_state_featurizer.py +1 -22
- rasa/core/featurizers/tracker_featurizers.py +18 -115
- rasa/core/information_retrieval/faiss.py +7 -4
- rasa/core/information_retrieval/information_retrieval.py +8 -0
- rasa/core/information_retrieval/milvus.py +9 -2
- rasa/core/information_retrieval/qdrant.py +1 -1
- rasa/core/nlg/contextual_response_rephraser.py +32 -10
- rasa/core/nlg/summarize.py +4 -3
- rasa/core/policies/enterprise_search_policy.py +100 -44
- rasa/core/policies/flows/flow_executor.py +155 -98
- rasa/core/policies/intentless_policy.py +52 -28
- rasa/core/policies/ted_policy.py +33 -58
- rasa/core/policies/unexpected_intent_policy.py +7 -15
- rasa/core/processor.py +15 -46
- rasa/core/run.py +5 -4
- rasa/core/tracker_store.py +8 -4
- rasa/core/utils.py +45 -56
- rasa/dialogue_understanding/coexistence/llm_based_router.py +45 -12
- rasa/dialogue_understanding/commands/__init__.py +4 -0
- rasa/dialogue_understanding/commands/change_flow_command.py +0 -6
- rasa/dialogue_understanding/commands/session_start_command.py +59 -0
- rasa/dialogue_understanding/commands/set_slot_command.py +1 -5
- rasa/dialogue_understanding/commands/utils.py +38 -0
- rasa/dialogue_understanding/generator/constants.py +10 -3
- rasa/dialogue_understanding/generator/flow_retrieval.py +14 -5
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +12 -2
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +106 -87
- rasa/dialogue_understanding/generator/nlu_command_adapter.py +28 -6
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +90 -37
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +15 -15
- rasa/dialogue_understanding/patterns/session_start.py +37 -0
- rasa/dialogue_understanding/processor/command_processor.py +13 -14
- rasa/e2e_test/aggregate_test_stats_calculator.py +124 -0
- rasa/e2e_test/assertions.py +1181 -0
- rasa/e2e_test/assertions_schema.yml +106 -0
- rasa/e2e_test/constants.py +20 -0
- rasa/e2e_test/e2e_config.py +220 -0
- rasa/e2e_test/e2e_config_schema.yml +26 -0
- rasa/e2e_test/e2e_test_case.py +131 -8
- rasa/e2e_test/e2e_test_converter.py +363 -0
- rasa/e2e_test/e2e_test_converter_prompt.jinja2 +70 -0
- rasa/e2e_test/e2e_test_coverage_report.py +364 -0
- rasa/e2e_test/e2e_test_result.py +26 -6
- rasa/e2e_test/e2e_test_runner.py +498 -73
- rasa/e2e_test/e2e_test_schema.yml +96 -0
- rasa/e2e_test/pykwalify_extensions.py +39 -0
- rasa/e2e_test/stub_custom_action.py +70 -0
- rasa/e2e_test/utils/__init__.py +0 -0
- rasa/e2e_test/utils/e2e_yaml_utils.py +55 -0
- rasa/e2e_test/utils/io.py +596 -0
- rasa/e2e_test/utils/validation.py +80 -0
- rasa/engine/recipes/default_components.py +0 -2
- rasa/engine/storage/local_model_storage.py +0 -1
- rasa/env.py +9 -0
- rasa/llm_fine_tuning/__init__.py +0 -0
- rasa/llm_fine_tuning/annotation_module.py +241 -0
- rasa/llm_fine_tuning/conversations.py +144 -0
- rasa/llm_fine_tuning/llm_data_preparation_module.py +178 -0
- rasa/llm_fine_tuning/notebooks/unsloth_finetuning.ipynb +407 -0
- rasa/llm_fine_tuning/paraphrasing/__init__.py +0 -0
- rasa/llm_fine_tuning/paraphrasing/conversation_rephraser.py +281 -0
- rasa/llm_fine_tuning/paraphrasing/default_rephrase_prompt_template.jina2 +44 -0
- rasa/llm_fine_tuning/paraphrasing/rephrase_validator.py +121 -0
- rasa/llm_fine_tuning/paraphrasing/rephrased_user_message.py +10 -0
- rasa/llm_fine_tuning/paraphrasing_module.py +128 -0
- rasa/llm_fine_tuning/storage.py +174 -0
- rasa/llm_fine_tuning/train_test_split_module.py +441 -0
- rasa/model_training.py +48 -16
- rasa/nlu/classifiers/diet_classifier.py +25 -38
- rasa/nlu/classifiers/logistic_regression_classifier.py +9 -44
- rasa/nlu/classifiers/sklearn_intent_classifier.py +16 -37
- rasa/nlu/extractors/crf_entity_extractor.py +50 -93
- rasa/nlu/featurizers/sparse_featurizer/count_vectors_featurizer.py +45 -78
- rasa/nlu/featurizers/sparse_featurizer/lexical_syntactic_featurizer.py +17 -52
- rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py +3 -5
- rasa/nlu/persistor.py +129 -32
- rasa/server.py +45 -10
- rasa/shared/constants.py +63 -15
- rasa/shared/core/domain.py +15 -12
- rasa/shared/core/events.py +28 -2
- rasa/shared/core/flows/flow.py +208 -13
- rasa/shared/core/flows/flow_path.py +84 -0
- rasa/shared/core/flows/flows_list.py +28 -10
- rasa/shared/core/flows/flows_yaml_schema.json +269 -193
- rasa/shared/core/flows/validation.py +112 -25
- rasa/shared/core/flows/yaml_flows_io.py +149 -10
- rasa/shared/core/trackers.py +6 -0
- rasa/shared/core/training_data/visualization.html +2 -2
- rasa/shared/exceptions.py +4 -0
- rasa/shared/importers/importer.py +60 -11
- rasa/shared/importers/remote_importer.py +196 -0
- rasa/shared/nlu/constants.py +2 -0
- rasa/shared/nlu/training_data/features.py +2 -120
- rasa/shared/providers/_configs/__init__.py +0 -0
- rasa/shared/providers/_configs/azure_openai_client_config.py +181 -0
- rasa/shared/providers/_configs/client_config.py +57 -0
- rasa/shared/providers/_configs/default_litellm_client_config.py +130 -0
- rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py +234 -0
- rasa/shared/providers/_configs/openai_client_config.py +175 -0
- rasa/shared/providers/_configs/self_hosted_llm_client_config.py +171 -0
- rasa/shared/providers/_configs/utils.py +101 -0
- rasa/shared/providers/_ssl_verification_utils.py +124 -0
- rasa/shared/providers/embedding/__init__.py +0 -0
- rasa/shared/providers/embedding/_base_litellm_embedding_client.py +254 -0
- rasa/shared/providers/embedding/_langchain_embedding_client_adapter.py +74 -0
- rasa/shared/providers/embedding/azure_openai_embedding_client.py +277 -0
- rasa/shared/providers/embedding/default_litellm_embedding_client.py +102 -0
- rasa/shared/providers/embedding/embedding_client.py +90 -0
- rasa/shared/providers/embedding/embedding_response.py +41 -0
- rasa/shared/providers/embedding/huggingface_local_embedding_client.py +191 -0
- rasa/shared/providers/embedding/openai_embedding_client.py +172 -0
- rasa/shared/providers/llm/__init__.py +0 -0
- rasa/shared/providers/llm/_base_litellm_client.py +227 -0
- rasa/shared/providers/llm/azure_openai_llm_client.py +338 -0
- rasa/shared/providers/llm/default_litellm_llm_client.py +84 -0
- rasa/shared/providers/llm/llm_client.py +76 -0
- rasa/shared/providers/llm/llm_response.py +50 -0
- rasa/shared/providers/llm/openai_llm_client.py +155 -0
- rasa/shared/providers/llm/self_hosted_llm_client.py +169 -0
- rasa/shared/providers/mappings.py +75 -0
- rasa/shared/utils/cli.py +30 -0
- rasa/shared/utils/io.py +65 -3
- rasa/shared/utils/llm.py +223 -200
- rasa/shared/utils/yaml.py +122 -7
- rasa/studio/download.py +19 -13
- rasa/studio/train.py +2 -3
- rasa/studio/upload.py +2 -3
- rasa/telemetry.py +113 -58
- rasa/tracing/config.py +2 -3
- rasa/tracing/instrumentation/attribute_extractors.py +29 -17
- rasa/tracing/instrumentation/instrumentation.py +4 -47
- rasa/utils/common.py +18 -19
- rasa/utils/endpoints.py +7 -4
- rasa/utils/io.py +66 -0
- rasa/utils/json_utils.py +60 -0
- rasa/utils/licensing.py +9 -1
- rasa/utils/ml_utils.py +4 -2
- rasa/utils/tensorflow/model_data.py +193 -2
- rasa/validator.py +195 -1
- rasa/version.py +1 -1
- {rasa_pro-3.9.17.dist-info → rasa_pro-3.10.3.dist-info}/METADATA +25 -51
- {rasa_pro-3.9.17.dist-info → rasa_pro-3.10.3.dist-info}/RECORD +183 -119
- rasa/nlu/classifiers/llm_intent_classifier.py +0 -519
- rasa/shared/providers/openai/clients.py +0 -43
- rasa/shared/providers/openai/session_handler.py +0 -110
- rasa/utils/tensorflow/feature_array.py +0 -366
- /rasa/{shared/providers/openai → cli/project_templates/tutorial/actions}/__init__.py +0 -0
- /rasa/cli/project_templates/tutorial/{actions.py → actions/actions.py} +0 -0
- {rasa_pro-3.9.17.dist-info → rasa_pro-3.10.3.dist-info}/NOTICE +0 -0
- {rasa_pro-3.9.17.dist-info → rasa_pro-3.10.3.dist-info}/WHEEL +0 -0
- {rasa_pro-3.9.17.dist-info → rasa_pro-3.10.3.dist-info}/entry_points.txt +0 -0
|
@@ -179,101 +179,29 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
179
179
|
return []
|
|
180
180
|
|
|
181
181
|
try:
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
# 1st step: Handle active flow
|
|
186
|
-
if tracker.has_active_user_flow:
|
|
187
|
-
commands_from_active_flow = (
|
|
188
|
-
await self._predict_commands_for_active_flow(
|
|
189
|
-
message,
|
|
190
|
-
tracker,
|
|
191
|
-
available_flows=filtered_flows,
|
|
192
|
-
all_flows=flows,
|
|
193
|
-
)
|
|
194
|
-
)
|
|
195
|
-
else:
|
|
196
|
-
commands_from_active_flow = []
|
|
197
|
-
|
|
198
|
-
# 2nd step: Check if we need to switch to another flow
|
|
199
|
-
contains_change_flow_command = any(
|
|
200
|
-
isinstance(command, ChangeFlowCommand)
|
|
201
|
-
for command in commands_from_active_flow
|
|
182
|
+
commands = await self._predict_commands_with_multi_step(
|
|
183
|
+
message, flows, tracker
|
|
202
184
|
)
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
commands_for_handling_flows = (
|
|
209
|
-
await self._predict_commands_for_handling_flows(
|
|
210
|
-
message,
|
|
211
|
-
tracker,
|
|
212
|
-
available_flows=filtered_flows,
|
|
213
|
-
all_flows=flows,
|
|
214
|
-
)
|
|
215
|
-
)
|
|
216
|
-
else:
|
|
217
|
-
commands_for_handling_flows = []
|
|
218
|
-
|
|
219
|
-
if contains_change_flow_command:
|
|
220
|
-
commands_from_active_flow.pop(
|
|
221
|
-
commands_from_active_flow.index(ChangeFlowCommand())
|
|
222
|
-
)
|
|
185
|
+
commands = self._clean_up_commands(commands)
|
|
186
|
+
except ProviderClientAPIException:
|
|
187
|
+
# if any step resulted in API exception, the command prediction cannot
|
|
188
|
+
# be completed, "predict" the ErrorCommand
|
|
189
|
+
commands = [ErrorCommand()]
|
|
223
190
|
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
for command in commands_for_handling_flows
|
|
229
|
-
if (
|
|
230
|
-
isinstance(command, StartFlowCommand)
|
|
231
|
-
and (flow := filtered_flows.flow_by_id(command.flow))
|
|
232
|
-
is not None
|
|
233
|
-
)
|
|
234
|
-
]
|
|
235
|
-
)
|
|
191
|
+
if not commands:
|
|
192
|
+
# if for any reason the final list of commands is empty,
|
|
193
|
+
# "predict" CannotHandle
|
|
194
|
+
commands = [CannotHandleCommand()]
|
|
236
195
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
tracker,
|
|
241
|
-
newly_started_flows=newly_started_flows,
|
|
242
|
-
all_flows=flows,
|
|
243
|
-
)
|
|
244
|
-
)
|
|
196
|
+
if tracker.has_coexistence_routing_slot:
|
|
197
|
+
# if coexistence feature is used, set the routing slot
|
|
198
|
+
commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
|
|
245
199
|
|
|
246
|
-
# if any step resulted in API exception,
|
|
247
|
-
# the command prediction cannot be completed,
|
|
248
|
-
# raise ErrorCommand
|
|
249
|
-
except ProviderClientAPIException:
|
|
250
|
-
return [ErrorCommand()]
|
|
251
|
-
|
|
252
|
-
# concatenate predicted commands
|
|
253
|
-
commands = list(
|
|
254
|
-
set(
|
|
255
|
-
commands_from_active_flow
|
|
256
|
-
+ commands_for_handling_flows
|
|
257
|
-
+ commands_for_newly_started_flows
|
|
258
|
-
)
|
|
259
|
-
)
|
|
260
|
-
commands = self._clean_up_commands(commands)
|
|
261
200
|
structlogger.debug(
|
|
262
|
-
"multi_step_llm_command_generator
|
|
201
|
+
"multi_step_llm_command_generator.predict_commands.finished",
|
|
263
202
|
commands=commands,
|
|
264
203
|
)
|
|
265
204
|
|
|
266
|
-
# if for any reason the final list of commands is empty,
|
|
267
|
-
# return CannotHandle
|
|
268
|
-
if not commands:
|
|
269
|
-
# if action_list is None, we couldn't get any response from the LLM
|
|
270
|
-
commands = [CannotHandleCommand()]
|
|
271
|
-
else:
|
|
272
|
-
# if the LLM command generator predicted valid commands and the
|
|
273
|
-
# coexistence feature is used, set the routing slot
|
|
274
|
-
if tracker.has_coexistence_routing_slot:
|
|
275
|
-
commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
|
|
276
|
-
|
|
277
205
|
return commands
|
|
278
206
|
|
|
279
207
|
@classmethod
|
|
@@ -440,6 +368,97 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
440
368
|
file_path = path / file_name
|
|
441
369
|
rasa.shared.utils.io.write_text_file(template, file_path)
|
|
442
370
|
|
|
371
|
+
async def _predict_commands_with_multi_step(
|
|
372
|
+
self,
|
|
373
|
+
message: Message,
|
|
374
|
+
flows: FlowsList,
|
|
375
|
+
tracker: DialogueStateTracker,
|
|
376
|
+
) -> List[Command]:
|
|
377
|
+
"""Predict commands using the LLM.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
message: The message from the user.
|
|
381
|
+
flows: The flows available to the user.
|
|
382
|
+
tracker: The tracker containing the current state of the conversation.
|
|
383
|
+
|
|
384
|
+
Returns:
|
|
385
|
+
The commands generated by the llm.
|
|
386
|
+
|
|
387
|
+
Raises:
|
|
388
|
+
ProviderClientAPIException: If API calls raised an error.
|
|
389
|
+
"""
|
|
390
|
+
# retrieve relevant flows
|
|
391
|
+
filtered_flows = await self.filter_flows(message, flows, tracker)
|
|
392
|
+
|
|
393
|
+
# 1st step: Handle active flow
|
|
394
|
+
if tracker.has_active_user_flow:
|
|
395
|
+
commands_from_active_flow = await self._predict_commands_for_active_flow(
|
|
396
|
+
message,
|
|
397
|
+
tracker,
|
|
398
|
+
available_flows=filtered_flows,
|
|
399
|
+
all_flows=flows,
|
|
400
|
+
)
|
|
401
|
+
else:
|
|
402
|
+
commands_from_active_flow = []
|
|
403
|
+
|
|
404
|
+
# 2nd step: Check if we need to switch to another flow
|
|
405
|
+
contains_change_flow_command = any(
|
|
406
|
+
isinstance(command, ChangeFlowCommand)
|
|
407
|
+
for command in commands_from_active_flow
|
|
408
|
+
)
|
|
409
|
+
should_change_flows = (
|
|
410
|
+
not commands_from_active_flow or contains_change_flow_command
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
if should_change_flows:
|
|
414
|
+
commands_for_handling_flows = (
|
|
415
|
+
await self._predict_commands_for_handling_flows(
|
|
416
|
+
message,
|
|
417
|
+
tracker,
|
|
418
|
+
available_flows=filtered_flows,
|
|
419
|
+
all_flows=flows,
|
|
420
|
+
)
|
|
421
|
+
)
|
|
422
|
+
else:
|
|
423
|
+
commands_for_handling_flows = []
|
|
424
|
+
|
|
425
|
+
if contains_change_flow_command:
|
|
426
|
+
commands_from_active_flow.pop(
|
|
427
|
+
commands_from_active_flow.index(ChangeFlowCommand())
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
# 3rd step: Fill slots for started flows
|
|
431
|
+
newly_started_flows = FlowsList(
|
|
432
|
+
[
|
|
433
|
+
flow
|
|
434
|
+
for command in commands_for_handling_flows
|
|
435
|
+
if (
|
|
436
|
+
isinstance(command, StartFlowCommand)
|
|
437
|
+
and (flow := filtered_flows.flow_by_id(command.flow)) is not None
|
|
438
|
+
)
|
|
439
|
+
]
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
commands_for_newly_started_flows = (
|
|
443
|
+
await self._predict_commands_for_newly_started_flows(
|
|
444
|
+
message,
|
|
445
|
+
tracker,
|
|
446
|
+
newly_started_flows=newly_started_flows,
|
|
447
|
+
all_flows=flows,
|
|
448
|
+
)
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
# concatenate predicted commands
|
|
452
|
+
commands = list(
|
|
453
|
+
set(
|
|
454
|
+
commands_from_active_flow
|
|
455
|
+
+ commands_for_handling_flows
|
|
456
|
+
+ commands_for_newly_started_flows
|
|
457
|
+
)
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
return commands
|
|
461
|
+
|
|
443
462
|
async def _predict_commands_for_active_flow(
|
|
444
463
|
self,
|
|
445
464
|
message: Message,
|
|
@@ -2,13 +2,15 @@ from typing import Dict, Text, Any, Optional, List
|
|
|
2
2
|
|
|
3
3
|
import structlog
|
|
4
4
|
|
|
5
|
-
|
|
6
5
|
from rasa.dialogue_understanding.commands import (
|
|
7
6
|
Command,
|
|
8
7
|
StartFlowCommand,
|
|
9
8
|
SetSlotCommand,
|
|
10
9
|
)
|
|
11
10
|
from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
|
|
11
|
+
from rasa.dialogue_understanding.commands.utils import (
|
|
12
|
+
triggerable_pattern_to_command_class,
|
|
13
|
+
)
|
|
12
14
|
from rasa.dialogue_understanding.generator import CommandGenerator
|
|
13
15
|
from rasa.engine.graph import GraphComponent, ExecutionContext
|
|
14
16
|
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
@@ -25,6 +27,7 @@ from rasa.shared.core.trackers import DialogueStateTracker
|
|
|
25
27
|
from rasa.shared.nlu.constants import ENTITIES, INTENT
|
|
26
28
|
from rasa.shared.nlu.training_data.message import Message
|
|
27
29
|
from rasa.shared.nlu.training_data.training_data import TrainingData
|
|
30
|
+
from rasa.utils.log_utils import log_llm
|
|
28
31
|
|
|
29
32
|
structlogger = structlog.get_logger()
|
|
30
33
|
|
|
@@ -126,13 +129,22 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
|
|
|
126
129
|
clean_up_commands,
|
|
127
130
|
)
|
|
128
131
|
|
|
129
|
-
|
|
132
|
+
log_llm(
|
|
133
|
+
logger=structlogger,
|
|
134
|
+
log_module="NLUCommandAdapter",
|
|
135
|
+
log_event="nlu_command_adapter.predict_commands.finished",
|
|
136
|
+
commands=commands,
|
|
137
|
+
)
|
|
138
|
+
|
|
130
139
|
if commands:
|
|
131
140
|
commands = clean_up_commands(
|
|
132
141
|
commands, tracker, flows, self._execution_context
|
|
133
142
|
)
|
|
134
|
-
|
|
135
|
-
|
|
143
|
+
log_llm(
|
|
144
|
+
logger=structlogger,
|
|
145
|
+
log_module="NLUCommandAdapter",
|
|
146
|
+
log_event="nlu_command_adapter.clean_commands",
|
|
147
|
+
commands=commands,
|
|
136
148
|
)
|
|
137
149
|
|
|
138
150
|
return commands
|
|
@@ -162,7 +174,12 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
|
|
|
162
174
|
|
|
163
175
|
for flow in flows:
|
|
164
176
|
if flow.nlu_triggers and flow.nlu_triggers.is_triggered(message):
|
|
165
|
-
|
|
177
|
+
if flow.is_rasa_default_flow:
|
|
178
|
+
pattern_command = triggerable_pattern_to_command_class.get(flow.id)
|
|
179
|
+
if pattern_command:
|
|
180
|
+
commands.append(pattern_command())
|
|
181
|
+
else:
|
|
182
|
+
commands.append(StartFlowCommand(flow.id))
|
|
166
183
|
|
|
167
184
|
# there should be just one flow that can be triggered by the predicted intent
|
|
168
185
|
# this is checked when loading the flows
|
|
@@ -180,7 +197,12 @@ class NLUCommandAdapter(GraphComponent, CommandGenerator):
|
|
|
180
197
|
set_slot_commands = _issue_set_slot_commands(message, tracker, flows, domain)
|
|
181
198
|
commands.extend(set_slot_commands)
|
|
182
199
|
|
|
183
|
-
|
|
200
|
+
log_llm(
|
|
201
|
+
logger=structlogger,
|
|
202
|
+
log_module="NLUCommandAdapter",
|
|
203
|
+
log_event="nlu_command_adapter.predict_commands",
|
|
204
|
+
commands=commands,
|
|
205
|
+
)
|
|
184
206
|
|
|
185
207
|
return commands
|
|
186
208
|
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
import importlib.resources
|
|
2
2
|
import re
|
|
3
|
-
import structlog
|
|
4
3
|
from typing import Dict, Any, List, Optional, Text
|
|
5
4
|
|
|
5
|
+
import structlog
|
|
6
|
+
|
|
6
7
|
import rasa.shared.utils.io
|
|
7
8
|
from rasa.dialogue_understanding.commands import (
|
|
8
9
|
Command,
|
|
@@ -16,9 +17,6 @@ from rasa.dialogue_understanding.commands import (
|
|
|
16
17
|
ClarifyCommand,
|
|
17
18
|
CannotHandleCommand,
|
|
18
19
|
)
|
|
19
|
-
from rasa.dialogue_understanding.generator.llm_based_command_generator import (
|
|
20
|
-
LLMBasedCommandGenerator,
|
|
21
|
-
)
|
|
22
20
|
from rasa.dialogue_understanding.generator.constants import (
|
|
23
21
|
LLM_CONFIG_KEY,
|
|
24
22
|
USER_INPUT_CONFIG_KEY,
|
|
@@ -27,17 +25,24 @@ from rasa.dialogue_understanding.generator.constants import (
|
|
|
27
25
|
from rasa.dialogue_understanding.generator.flow_retrieval import (
|
|
28
26
|
FlowRetrieval,
|
|
29
27
|
)
|
|
28
|
+
from rasa.dialogue_understanding.generator.llm_based_command_generator import (
|
|
29
|
+
LLMBasedCommandGenerator,
|
|
30
|
+
)
|
|
30
31
|
from rasa.dialogue_understanding.stack.utils import top_flow_frame
|
|
31
32
|
from rasa.engine.graph import ExecutionContext
|
|
32
33
|
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
33
34
|
from rasa.engine.storage.resource import Resource
|
|
34
35
|
from rasa.engine.storage.storage import ModelStorage
|
|
35
|
-
from rasa.shared.constants import
|
|
36
|
+
from rasa.shared.constants import (
|
|
37
|
+
ROUTE_TO_CALM_SLOT,
|
|
38
|
+
PROMPT_CONFIG_KEY,
|
|
39
|
+
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
40
|
+
)
|
|
36
41
|
from rasa.shared.core.flows import FlowsList
|
|
37
42
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
38
|
-
from rasa.shared.nlu.constants import TEXT
|
|
39
|
-
from rasa.shared.nlu.training_data.message import Message
|
|
40
43
|
from rasa.shared.exceptions import ProviderClientAPIException
|
|
44
|
+
from rasa.shared.nlu.constants import TEXT, LLM_COMMANDS, LLM_PROMPT
|
|
45
|
+
from rasa.shared.nlu.training_data.message import Message
|
|
41
46
|
from rasa.shared.utils.io import deep_container_fingerprint
|
|
42
47
|
from rasa.shared.utils.llm import (
|
|
43
48
|
get_prompt_template,
|
|
@@ -82,7 +87,7 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
82
87
|
)
|
|
83
88
|
|
|
84
89
|
# Set the prompt template
|
|
85
|
-
if config.get(
|
|
90
|
+
if config.get(PROMPT_CONFIG_KEY):
|
|
86
91
|
structlogger.warning(
|
|
87
92
|
"single_step_llm_command_generator.init",
|
|
88
93
|
event_info=(
|
|
@@ -91,7 +96,11 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
91
96
|
"Please use the config parameter 'prompt_template' instead. "
|
|
92
97
|
),
|
|
93
98
|
)
|
|
94
|
-
config_prompt =
|
|
99
|
+
config_prompt = (
|
|
100
|
+
config.get(PROMPT_CONFIG_KEY)
|
|
101
|
+
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
102
|
+
or None
|
|
103
|
+
)
|
|
95
104
|
self.prompt_template = prompt_template or get_prompt_template(
|
|
96
105
|
config_prompt,
|
|
97
106
|
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
@@ -104,8 +113,8 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
104
113
|
def get_default_config() -> Dict[str, Any]:
|
|
105
114
|
"""The component's default config (see parent class for full docstring)."""
|
|
106
115
|
return {
|
|
107
|
-
|
|
108
|
-
|
|
116
|
+
PROMPT_CONFIG_KEY: None, # Legacy
|
|
117
|
+
PROMPT_TEMPLATE_CONFIG_KEY: None,
|
|
109
118
|
USER_INPUT_CONFIG_KEY: None,
|
|
110
119
|
LLM_CONFIG_KEY: None,
|
|
111
120
|
FLOW_RETRIEVAL_KEY: FlowRetrieval.get_default_config(),
|
|
@@ -167,11 +176,51 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
167
176
|
# cannot do anything if there are no flows or no tracker
|
|
168
177
|
return []
|
|
169
178
|
|
|
170
|
-
# retrieve flows
|
|
171
179
|
try:
|
|
172
|
-
|
|
180
|
+
commands = await self._predict_commands(message, flows, tracker)
|
|
173
181
|
except ProviderClientAPIException:
|
|
174
|
-
|
|
182
|
+
# if command predictions resulted in API exception
|
|
183
|
+
# "predict" the ErrorCommand
|
|
184
|
+
commands = [ErrorCommand()]
|
|
185
|
+
|
|
186
|
+
if not commands:
|
|
187
|
+
# no commands are parsed or there's an invalid command
|
|
188
|
+
commands = [CannotHandleCommand()]
|
|
189
|
+
|
|
190
|
+
if tracker.has_coexistence_routing_slot:
|
|
191
|
+
# if coexistence feature is used, set the routing slot
|
|
192
|
+
commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
|
|
193
|
+
|
|
194
|
+
log_llm(
|
|
195
|
+
logger=structlogger,
|
|
196
|
+
log_module="SingleStepLLMCommandGenerator",
|
|
197
|
+
log_event="llm_command_generator.predict_commands.finished",
|
|
198
|
+
commands=commands,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
return commands
|
|
202
|
+
|
|
203
|
+
async def _predict_commands(
|
|
204
|
+
self,
|
|
205
|
+
message: Message,
|
|
206
|
+
flows: FlowsList,
|
|
207
|
+
tracker: Optional[DialogueStateTracker] = None,
|
|
208
|
+
) -> List[Command]:
|
|
209
|
+
"""Predict commands using the LLM.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
message: The message from the user.
|
|
213
|
+
flows: The flows available to the user.
|
|
214
|
+
tracker: The tracker containing the current state of the conversation.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
The commands generated by the llm.
|
|
218
|
+
|
|
219
|
+
Raises:
|
|
220
|
+
ProviderClientAPIException: If API calls raised an error.
|
|
221
|
+
"""
|
|
222
|
+
# retrieve flows
|
|
223
|
+
filtered_flows = await self.filter_flows(message, flows, tracker)
|
|
175
224
|
|
|
176
225
|
flow_prompt = self.render_template(message, tracker, filtered_flows, flows)
|
|
177
226
|
log_llm(
|
|
@@ -181,14 +230,11 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
181
230
|
prompt=flow_prompt,
|
|
182
231
|
)
|
|
183
232
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
if action_list is None:
|
|
190
|
-
return [ErrorCommand()]
|
|
191
|
-
except ProviderClientAPIException:
|
|
233
|
+
action_list = await self.invoke_llm(flow_prompt)
|
|
234
|
+
# The check for 'None' maintains compatibility with older versions
|
|
235
|
+
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
|
236
|
+
# might return 'None' to indicate a failure to generate actions.
|
|
237
|
+
if action_list is None:
|
|
192
238
|
return [ErrorCommand()]
|
|
193
239
|
|
|
194
240
|
log_llm(
|
|
@@ -200,23 +246,26 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
200
246
|
|
|
201
247
|
commands = self.parse_commands(action_list, tracker, flows)
|
|
202
248
|
|
|
203
|
-
|
|
204
|
-
# no commands are parsed or there's an invalid command
|
|
205
|
-
commands = [CannotHandleCommand()]
|
|
206
|
-
else:
|
|
207
|
-
# if the LLM command generator predicted valid commands and the
|
|
208
|
-
# coexistence feature is used, set the routing slot
|
|
209
|
-
if tracker.has_coexistence_routing_slot:
|
|
210
|
-
commands += [SetSlotCommand(ROUTE_TO_CALM_SLOT, True)]
|
|
249
|
+
self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
|
|
211
250
|
|
|
212
|
-
log_llm(
|
|
213
|
-
logger=structlogger,
|
|
214
|
-
log_module="SingleStepLLMCommandGenerator",
|
|
215
|
-
log_event="llm_command_generator.predict_commands.finished",
|
|
216
|
-
commands=commands,
|
|
217
|
-
)
|
|
218
251
|
return commands
|
|
219
252
|
|
|
253
|
+
@staticmethod
|
|
254
|
+
def _update_message_parse_data_for_fine_tuning(
|
|
255
|
+
message: Message, commands: List[Command], prompt: str
|
|
256
|
+
) -> None:
|
|
257
|
+
from rasa.llm_fine_tuning.annotation_module import preparing_fine_tuning_data
|
|
258
|
+
|
|
259
|
+
if preparing_fine_tuning_data:
|
|
260
|
+
# Add commands and prompt to the message object in order to create
|
|
261
|
+
# prompt -> commands pairs for fine-tuning
|
|
262
|
+
message.set(
|
|
263
|
+
LLM_COMMANDS,
|
|
264
|
+
[command.as_dict() for command in commands],
|
|
265
|
+
add_to_output=True,
|
|
266
|
+
)
|
|
267
|
+
message.set(LLM_PROMPT, prompt, add_to_output=True)
|
|
268
|
+
|
|
220
269
|
@classmethod
|
|
221
270
|
def parse_commands(
|
|
222
271
|
cls, actions: Optional[str], tracker: DialogueStateTracker, flows: FlowsList
|
|
@@ -285,7 +334,11 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
285
334
|
@classmethod
|
|
286
335
|
def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
|
|
287
336
|
"""Add a fingerprint of the knowledge base for the graph."""
|
|
288
|
-
config_prompt =
|
|
337
|
+
config_prompt = (
|
|
338
|
+
config.get(PROMPT_CONFIG_KEY)
|
|
339
|
+
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
340
|
+
or None
|
|
341
|
+
)
|
|
289
342
|
prompt_template = get_prompt_template(
|
|
290
343
|
config_prompt,
|
|
291
344
|
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
@@ -55,7 +55,7 @@ responses:
|
|
|
55
55
|
template: jinja
|
|
56
56
|
|
|
57
57
|
utter_free_chitchat_response:
|
|
58
|
-
- text:
|
|
58
|
+
- text: placeholder_this_utterance_needs_the_rephraser
|
|
59
59
|
metadata:
|
|
60
60
|
rephrase: True
|
|
61
61
|
rephrase_prompt: |
|
|
@@ -160,9 +160,9 @@ flows:
|
|
|
160
160
|
action: action_run_slot_rejections
|
|
161
161
|
- action: validate_{{context.collect}}
|
|
162
162
|
next:
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
163
|
+
- if: "slots.{{context.collect}} is not null"
|
|
164
|
+
then: END
|
|
165
|
+
- else: ask_collect
|
|
166
166
|
- id: ask_collect
|
|
167
167
|
action: "{{context.utter}}"
|
|
168
168
|
- action: "{{context.collect_action}}"
|
|
@@ -205,17 +205,17 @@ flows:
|
|
|
205
205
|
steps:
|
|
206
206
|
- noop: true
|
|
207
207
|
next:
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
208
|
+
- if: "'{{context.error_type}}' = 'rasa_internal_error_user_input_too_long'"
|
|
209
|
+
then:
|
|
210
|
+
- action: utter_user_input_too_long_error_rasa
|
|
211
|
+
next: END
|
|
212
|
+
- if: "'{{context.error_type}}' = 'rasa_internal_error_user_input_empty'"
|
|
213
|
+
then:
|
|
214
|
+
- action: utter_user_input_empty_error_rasa
|
|
215
|
+
next: END
|
|
216
|
+
- else:
|
|
217
|
+
- action: utter_internal_error_rasa
|
|
218
|
+
next: END
|
|
219
219
|
|
|
220
220
|
|
|
221
221
|
pattern_restart:
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from rasa.dialogue_understanding.stack.frames import PatternFlowStackFrame
|
|
6
|
+
from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
FLOW_PATTERN_SESSION_START = RASA_DEFAULT_FLOW_PATTERN_PREFIX + "session_start"
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class SessionStartPatternFlowStackFrame(PatternFlowStackFrame):
|
|
14
|
+
"""A flow stack frame that can get added at the beginning of the conversation."""
|
|
15
|
+
|
|
16
|
+
flow_id: str = FLOW_PATTERN_SESSION_START
|
|
17
|
+
"""The ID of the flow."""
|
|
18
|
+
|
|
19
|
+
@classmethod
|
|
20
|
+
def type(cls) -> str:
|
|
21
|
+
"""Returns the type of the frame."""
|
|
22
|
+
return FLOW_PATTERN_SESSION_START
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def from_dict(data: Dict[str, Any]) -> SessionStartPatternFlowStackFrame:
|
|
26
|
+
"""Creates a `DialogueStackFrame` from a dictionary.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
data: The dictionary to create the `DialogueStackFrame` from.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
The created `DialogueStackFrame`.
|
|
33
|
+
"""
|
|
34
|
+
return SessionStartPatternFlowStackFrame(
|
|
35
|
+
frame_id=data["frame_id"],
|
|
36
|
+
step_id=data["step_id"],
|
|
37
|
+
)
|
|
@@ -487,23 +487,22 @@ def clean_up_slot_command(
|
|
|
487
487
|
stack = tracker.stack
|
|
488
488
|
|
|
489
489
|
resulting_commands = commands_so_far[:]
|
|
490
|
+
if command.name in slots_so_far and command.name != ROUTE_TO_CALM_SLOT:
|
|
491
|
+
slot = tracker.slots.get(command.name)
|
|
492
|
+
if slot is None:
|
|
493
|
+
structlogger.debug(
|
|
494
|
+
"command_processor.clean_up_slot_command.skip_command_slot_not_in_domain",
|
|
495
|
+
command=command,
|
|
496
|
+
)
|
|
497
|
+
return resulting_commands
|
|
490
498
|
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
command=command,
|
|
496
|
-
)
|
|
497
|
-
return resulting_commands
|
|
498
|
-
|
|
499
|
-
if not should_slot_be_set(slot, command):
|
|
500
|
-
cannot_handle = CannotHandleCommand(reason=CANNOT_HANDLE_REASON)
|
|
501
|
-
if cannot_handle not in resulting_commands:
|
|
502
|
-
resulting_commands.append(cannot_handle)
|
|
499
|
+
if not should_slot_be_set(slot, command):
|
|
500
|
+
cannot_handle = CannotHandleCommand(reason=CANNOT_HANDLE_REASON)
|
|
501
|
+
if cannot_handle not in resulting_commands:
|
|
502
|
+
resulting_commands.append(cannot_handle)
|
|
503
503
|
|
|
504
|
-
|
|
504
|
+
return resulting_commands
|
|
505
505
|
|
|
506
|
-
if command.name in slots_so_far and command.name != ROUTE_TO_CALM_SLOT:
|
|
507
506
|
current_collect_info = get_current_collect_step(stack, all_flows)
|
|
508
507
|
|
|
509
508
|
if current_collect_info and current_collect_info.collect == command.name:
|