rasa-pro 3.13.0.dev1__py3-none-any.whl → 3.13.0.dev3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/cli/run.py +10 -6
- rasa/cli/utils.py +7 -0
- rasa/core/actions/action.py +0 -6
- rasa/core/channels/channel.py +30 -0
- rasa/core/channels/voice_ready/audiocodes.py +52 -17
- rasa/core/channels/voice_ready/jambonz.py +25 -5
- rasa/core/channels/voice_ready/jambonz_protocol.py +4 -0
- rasa/core/channels/voice_stream/audiocodes.py +53 -9
- rasa/core/channels/voice_stream/genesys.py +146 -16
- rasa/core/information_retrieval/faiss.py +6 -62
- rasa/core/nlg/contextual_response_rephraser.py +3 -0
- rasa/core/policies/enterprise_search_policy.py +10 -1
- rasa/core/policies/flows/flow_executor.py +3 -38
- rasa/core/policies/intentless_policy.py +3 -0
- rasa/core/processor.py +27 -6
- rasa/core/utils.py +53 -0
- rasa/dialogue_understanding/coexistence/llm_based_router.py +8 -0
- rasa/dialogue_understanding/commands/cancel_flow_command.py +4 -59
- rasa/dialogue_understanding/commands/knowledge_answer_command.py +2 -2
- rasa/dialogue_understanding/commands/start_flow_command.py +0 -41
- rasa/dialogue_understanding/generator/command_generator.py +67 -0
- rasa/dialogue_understanding/generator/flow_retrieval.py +1 -4
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +2 -12
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +13 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +1 -1
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +2 -5
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +22 -10
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +27 -12
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +0 -61
- rasa/dialogue_understanding/processor/command_processor.py +7 -65
- rasa/dialogue_understanding/stack/utils.py +0 -38
- rasa/e2e_test/utils/validation.py +3 -3
- rasa/llm_fine_tuning/paraphrasing/conversation_rephraser.py +3 -0
- rasa/shared/core/constants.py +0 -8
- rasa/shared/core/domain.py +12 -3
- rasa/shared/core/flows/flow.py +0 -17
- rasa/shared/core/flows/flows_yaml_schema.json +3 -38
- rasa/shared/core/flows/steps/collect.py +5 -18
- rasa/shared/core/flows/utils.py +1 -16
- rasa/shared/core/slot_mappings.py +11 -5
- rasa/shared/nlu/constants.py +0 -1
- rasa/shared/utils/common.py +11 -1
- rasa/shared/utils/constants.py +3 -0
- rasa/shared/utils/llm.py +69 -23
- rasa/validator.py +1 -123
- rasa/version.py +1 -1
- {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev3.dist-info}/METADATA +2 -2
- {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev3.dist-info}/RECORD +51 -54
- rasa/core/actions/action_handle_digressions.py +0 -164
- rasa/dialogue_understanding/commands/handle_digressions_command.py +0 -144
- rasa/dialogue_understanding/patterns/handle_digressions.py +0 -81
- {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev3.dist-info}/NOTICE +0 -0
- {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev3.dist-info}/WHEEL +0 -0
- {rasa_pro-3.13.0.dev1.dist-info → rasa_pro-3.13.0.dev3.dist-info}/entry_points.txt +0 -0
|
@@ -12,9 +12,6 @@ from rasa.dialogue_understanding.commands import (
|
|
|
12
12
|
SetSlotCommand,
|
|
13
13
|
StartFlowCommand,
|
|
14
14
|
)
|
|
15
|
-
from rasa.dialogue_understanding.commands.handle_digressions_command import (
|
|
16
|
-
HandleDigressionsCommand,
|
|
17
|
-
)
|
|
18
15
|
from rasa.dialogue_understanding.constants import KEY_MINIMIZE_NUM_CALLS
|
|
19
16
|
from rasa.dialogue_understanding.generator import CommandGenerator
|
|
20
17
|
from rasa.dialogue_understanding.generator._jinja_filters import to_json_escaped_string
|
|
@@ -609,16 +606,9 @@ class LLMBasedCommandGenerator(
|
|
|
609
606
|
) -> bool:
|
|
610
607
|
"""Check if the LLM current commands should be merged with the prior commands.
|
|
611
608
|
|
|
612
|
-
This can be done if there are no prior start flow commands
|
|
613
|
-
no prior handle digressions commands.
|
|
609
|
+
This can be done if there are no prior start flow commands.
|
|
614
610
|
"""
|
|
615
|
-
|
|
616
|
-
command
|
|
617
|
-
for command in prior_commands
|
|
618
|
-
if isinstance(command, HandleDigressionsCommand)
|
|
619
|
-
]
|
|
620
|
-
|
|
621
|
-
return not prior_start_flow_names and not prior_handle_digressions
|
|
611
|
+
return not prior_start_flow_names
|
|
622
612
|
|
|
623
613
|
def _check_start_flow_command_overlap(
|
|
624
614
|
self,
|
|
@@ -52,6 +52,10 @@ from rasa.shared.exceptions import ProviderClientAPIException
|
|
|
52
52
|
from rasa.shared.nlu.constants import TEXT
|
|
53
53
|
from rasa.shared.nlu.training_data.message import Message
|
|
54
54
|
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
55
|
+
from rasa.shared.utils.constants import (
|
|
56
|
+
LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON,
|
|
57
|
+
LOG_COMPONENT_SOURCE_METHOD_INIT,
|
|
58
|
+
)
|
|
55
59
|
from rasa.shared.utils.io import deep_container_fingerprint, raise_deprecation_warning
|
|
56
60
|
from rasa.shared.utils.llm import (
|
|
57
61
|
allowed_values_for_slot,
|
|
@@ -330,6 +334,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
330
334
|
return get_prompt_template(
|
|
331
335
|
config.get("prompt_templates", {}).get(key, {}).get(FILE_PATH_KEY),
|
|
332
336
|
default_value,
|
|
337
|
+
log_source_component=MultiStepLLMCommandGenerator.__name__,
|
|
338
|
+
log_source_method=LOG_COMPONENT_SOURCE_METHOD_INIT,
|
|
333
339
|
)
|
|
334
340
|
|
|
335
341
|
@classmethod
|
|
@@ -786,17 +792,24 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
786
792
|
@classmethod
|
|
787
793
|
def fingerprint_addon(cls, config: Dict[str, Any]) -> Optional[str]:
|
|
788
794
|
"""Add a fingerprint for the graph."""
|
|
795
|
+
get_prompt_template_log_params = {
|
|
796
|
+
"log_source_component": MultiStepLLMCommandGenerator.__name__,
|
|
797
|
+
"log_source_method": LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON,
|
|
798
|
+
}
|
|
799
|
+
|
|
789
800
|
handle_flows_template = get_prompt_template(
|
|
790
801
|
config.get("prompt_templates", {})
|
|
791
802
|
.get(HANDLE_FLOWS_KEY, {})
|
|
792
803
|
.get(FILE_PATH_KEY),
|
|
793
804
|
DEFAULT_HANDLE_FLOWS_TEMPLATE,
|
|
805
|
+
**get_prompt_template_log_params,
|
|
794
806
|
)
|
|
795
807
|
fill_slots_template = get_prompt_template(
|
|
796
808
|
config.get("prompt_templates", {})
|
|
797
809
|
.get(FILL_SLOTS_KEY, {})
|
|
798
810
|
.get(FILE_PATH_KEY),
|
|
799
811
|
DEFAULT_FILL_SLOTS_TEMPLATE,
|
|
812
|
+
**get_prompt_template_log_params,
|
|
800
813
|
)
|
|
801
814
|
|
|
802
815
|
llm_config = resolve_model_client_config(
|
|
@@ -8,7 +8,7 @@ Your task is to analyze the current conversation context and generate a list of
|
|
|
8
8
|
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
|
|
9
9
|
* `cancel flow`: Cancelling the current flow.
|
|
10
10
|
* `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
|
|
11
|
-
* `
|
|
11
|
+
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
|
|
12
12
|
* `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
|
|
13
13
|
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
|
|
14
14
|
|
|
@@ -16,7 +16,7 @@ Use the following structured data:
|
|
|
16
16
|
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
|
|
17
17
|
* `cancel flow`: Cancelling the current flow.
|
|
18
18
|
* `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
|
|
19
|
-
* `
|
|
19
|
+
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
|
|
20
20
|
* `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
|
|
21
21
|
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
|
|
22
22
|
|
|
@@ -27,11 +27,8 @@ Use the following structured data:
|
|
|
27
27
|
* For categorical slots try to match the user message with allowed slot values. Use "other" if you cannot match it.
|
|
28
28
|
* Set the boolean slots based on the user response. Map positive responses to `True`, and negative to `False`.
|
|
29
29
|
* Extract text slot values exactly as provided by the user. Avoid assumptions, format changes, or partial extractions.
|
|
30
|
-
* Use clarification in ambiguous cases.
|
|
31
|
-
* Use `disambiguate flows` only when multiple flows could fit the same message (e.g., "card" could mean `block_card` or `replace_card`).
|
|
32
|
-
* A user asking a question does not automatically imply that they want `search and reply`. The objective is to help them complete a business process if its possible to do so via a flow.
|
|
33
|
-
* **Flow Priority**: If a user message can be addressed by starting a flow (even if it looks like a general question), ALWAYS start the flow first. Example: If the user says "How do I activate my card?", use `start flow activate_card` instead of `search and reply`. Only use `search and reply` if no flow matches the request.
|
|
34
30
|
* Only use information provided by the user.
|
|
31
|
+
* Use clarification in ambiguous cases.
|
|
35
32
|
* Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
|
|
36
33
|
* Do not cancel the flow unless the user explicitly requests it.
|
|
37
34
|
* Strictly adhere to the provided action format.
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import copy
|
|
2
|
-
from typing import Any, Dict, List, Optional, Text
|
|
2
|
+
from typing import Any, Dict, List, Literal, Optional, Text
|
|
3
3
|
|
|
4
4
|
import structlog
|
|
5
5
|
|
|
@@ -58,6 +58,10 @@ from rasa.shared.exceptions import ProviderClientAPIException
|
|
|
58
58
|
from rasa.shared.nlu.constants import LLM_COMMANDS, LLM_PROMPT, TEXT
|
|
59
59
|
from rasa.shared.nlu.training_data.message import Message
|
|
60
60
|
from rasa.shared.providers.llm.llm_response import LLMResponse
|
|
61
|
+
from rasa.shared.utils.constants import (
|
|
62
|
+
LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON,
|
|
63
|
+
LOG_COMPONENT_SOURCE_METHOD_INIT,
|
|
64
|
+
)
|
|
61
65
|
from rasa.shared.utils.io import deep_container_fingerprint
|
|
62
66
|
from rasa.shared.utils.llm import (
|
|
63
67
|
allowed_values_for_slot,
|
|
@@ -187,8 +191,8 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
187
191
|
)
|
|
188
192
|
|
|
189
193
|
# Get the prompt template from the config or the default prompt template.
|
|
190
|
-
self.prompt_template = self.
|
|
191
|
-
self.config, prompt_template
|
|
194
|
+
self.prompt_template = self._resolve_component_prompt_template(
|
|
195
|
+
self.config, prompt_template, log_context=LOG_COMPONENT_SOURCE_METHOD_INIT
|
|
192
196
|
)
|
|
193
197
|
|
|
194
198
|
# Set the command syntax version to v2
|
|
@@ -539,7 +543,9 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
539
543
|
# and update the llm config with the resolved llm config.
|
|
540
544
|
_config_copy = copy.deepcopy(config)
|
|
541
545
|
_config_copy[LLM_CONFIG_KEY] = llm_config
|
|
542
|
-
prompt_template = cls.
|
|
546
|
+
prompt_template = cls._resolve_component_prompt_template(
|
|
547
|
+
_config_copy, log_context=LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON
|
|
548
|
+
)
|
|
543
549
|
|
|
544
550
|
return deep_container_fingerprint(
|
|
545
551
|
[prompt_template, llm_config, embedding_config]
|
|
@@ -555,20 +561,26 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
555
561
|
return CommandSyntaxVersion.v2
|
|
556
562
|
|
|
557
563
|
@staticmethod
|
|
558
|
-
def
|
|
559
|
-
config: Dict[str, Any],
|
|
564
|
+
def _resolve_component_prompt_template(
|
|
565
|
+
config: Dict[str, Any],
|
|
566
|
+
prompt_template: Optional[str] = None,
|
|
567
|
+
log_context: Optional[Literal["init", "fingerprint_addon"]] = None,
|
|
560
568
|
) -> Optional[str]:
|
|
561
569
|
"""Get the prompt template from the config or the default prompt template."""
|
|
562
570
|
# Get the default prompt template based on the model name.
|
|
563
571
|
default_command_prompt_template = get_default_prompt_template_based_on_model(
|
|
564
|
-
config.get(LLM_CONFIG_KEY, {}) or {},
|
|
565
|
-
MODEL_PROMPT_MAPPER,
|
|
566
|
-
DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
567
|
-
FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
572
|
+
llm_config=config.get(LLM_CONFIG_KEY, {}) or {},
|
|
573
|
+
model_prompt_mapping=MODEL_PROMPT_MAPPER,
|
|
574
|
+
default_prompt_path=DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
575
|
+
fallback_prompt_path=FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
576
|
+
log_source_component=CompactLLMCommandGenerator.__name__,
|
|
577
|
+
log_source_method=log_context,
|
|
568
578
|
)
|
|
569
579
|
|
|
570
580
|
# Return the prompt template either from the config or the default prompt.
|
|
571
581
|
return prompt_template or get_prompt_template(
|
|
572
582
|
config.get(PROMPT_TEMPLATE_CONFIG_KEY),
|
|
573
583
|
default_command_prompt_template,
|
|
584
|
+
log_source_component=CompactLLMCommandGenerator.__name__,
|
|
585
|
+
log_source_method=log_context,
|
|
574
586
|
)
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import importlib.resources
|
|
2
|
-
from typing import Any, Dict, Optional, Text
|
|
2
|
+
from typing import Any, Dict, Literal, Optional, Text
|
|
3
3
|
|
|
4
4
|
import structlog
|
|
5
5
|
|
|
@@ -25,8 +25,12 @@ from rasa.shared.constants import (
|
|
|
25
25
|
PROMPT_CONFIG_KEY,
|
|
26
26
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
27
27
|
)
|
|
28
|
+
from rasa.shared.utils.constants import LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON
|
|
28
29
|
from rasa.shared.utils.io import deep_container_fingerprint
|
|
29
|
-
from rasa.shared.utils.llm import
|
|
30
|
+
from rasa.shared.utils.llm import (
|
|
31
|
+
get_prompt_template,
|
|
32
|
+
resolve_model_client_config,
|
|
33
|
+
)
|
|
30
34
|
|
|
31
35
|
DEFAULT_COMMAND_PROMPT_TEMPLATE = importlib.resources.read_text(
|
|
32
36
|
"rasa.dialogue_understanding.generator.prompt_templates",
|
|
@@ -72,9 +76,6 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
72
76
|
"Please use the config parameter 'prompt_template' instead. "
|
|
73
77
|
),
|
|
74
78
|
)
|
|
75
|
-
self.prompt_template = self.resolve_component_prompt_template(
|
|
76
|
-
config, prompt_template
|
|
77
|
-
)
|
|
78
79
|
|
|
79
80
|
# Set the command syntax version to v1
|
|
80
81
|
CommandSyntaxManager.set_syntax_version(
|
|
@@ -95,7 +96,9 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
95
96
|
@classmethod
|
|
96
97
|
def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
|
|
97
98
|
"""Add a fingerprint for the graph."""
|
|
98
|
-
prompt_template = cls.
|
|
99
|
+
prompt_template = cls._resolve_component_prompt_template(
|
|
100
|
+
config, log_context=LOG_COMPONENT_SOURCE_METHOD_FINGERPRINT_ADDON
|
|
101
|
+
)
|
|
99
102
|
llm_config = resolve_model_client_config(
|
|
100
103
|
config.get(LLM_CONFIG_KEY), SingleStepLLMCommandGenerator.__name__
|
|
101
104
|
)
|
|
@@ -117,17 +120,29 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
117
120
|
return CommandSyntaxVersion.v1
|
|
118
121
|
|
|
119
122
|
@staticmethod
|
|
120
|
-
def
|
|
121
|
-
config: Dict[str, Any],
|
|
123
|
+
def _resolve_component_prompt_template(
|
|
124
|
+
config: Dict[str, Any],
|
|
125
|
+
prompt_template: Optional[str] = None,
|
|
126
|
+
log_context: Optional[Literal["init", "fingerprint_addon"]] = None,
|
|
122
127
|
) -> Optional[str]:
|
|
123
128
|
"""Get the prompt template from the config or the default prompt template."""
|
|
124
|
-
#
|
|
125
|
-
|
|
129
|
+
# Case when model is being loaded
|
|
130
|
+
if prompt_template is not None:
|
|
131
|
+
return prompt_template
|
|
132
|
+
|
|
133
|
+
# The prompt can be configured in the config via the "prompt" (deprecated) or
|
|
134
|
+
# "prompt_template" properties
|
|
135
|
+
prompt_template_path = (
|
|
126
136
|
config.get(PROMPT_CONFIG_KEY)
|
|
127
137
|
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
128
138
|
or None
|
|
129
139
|
)
|
|
130
|
-
|
|
131
|
-
|
|
140
|
+
|
|
141
|
+
# Try to load the template from the given path or fallback to the default for
|
|
142
|
+
# the component
|
|
143
|
+
return get_prompt_template(
|
|
144
|
+
prompt_template_path,
|
|
132
145
|
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
146
|
+
log_source_component=SingleStepLLMCommandGenerator.__name__,
|
|
147
|
+
log_source_method=log_context,
|
|
133
148
|
)
|
|
@@ -1,17 +1,6 @@
|
|
|
1
1
|
version: "3.1"
|
|
2
2
|
responses:
|
|
3
3
|
|
|
4
|
-
utter_ask_continue_previous_flow:
|
|
5
|
-
- text: "Confirm if you would like to continue with the initial topic: {{context.interrupted_flow_id}}?"
|
|
6
|
-
metadata:
|
|
7
|
-
rephrase: True
|
|
8
|
-
template: jinja
|
|
9
|
-
buttons:
|
|
10
|
-
- title: Continue with the previous topic.
|
|
11
|
-
payload: /SetSlots(continue_previous_flow=True)
|
|
12
|
-
- title: Switch to new topic.
|
|
13
|
-
payload: /SetSlots(continue_previous_flow=False)
|
|
14
|
-
|
|
15
4
|
utter_ask_rephrase:
|
|
16
5
|
- text: I’m sorry I am unable to understand you, could you please rephrase?
|
|
17
6
|
|
|
@@ -20,20 +9,6 @@ responses:
|
|
|
20
9
|
metadata:
|
|
21
10
|
rephrase: True
|
|
22
11
|
|
|
23
|
-
utter_block_digressions:
|
|
24
|
-
- text: "We can look into {{ context.interrupting_flow_id }} later. Let's focus on the current topic: {{ context.interrupted_flow_id }}."
|
|
25
|
-
metadata:
|
|
26
|
-
rephrase: True
|
|
27
|
-
template: jinja
|
|
28
|
-
- text: "Let's continue with the current topic: {{ context.interrupted_flow_id }}."
|
|
29
|
-
condition:
|
|
30
|
-
- type: slot
|
|
31
|
-
name: continue_previous_flow
|
|
32
|
-
value: True
|
|
33
|
-
metadata:
|
|
34
|
-
rephrase: True
|
|
35
|
-
template: jinja
|
|
36
|
-
|
|
37
12
|
utter_boolean_slot_rejection:
|
|
38
13
|
- text: "Sorry, the value you provided, `{{value}}`, is not valid. Please respond with a valid value."
|
|
39
14
|
metadata:
|
|
@@ -60,12 +35,6 @@ responses:
|
|
|
60
35
|
rephrase: True
|
|
61
36
|
template: jinja
|
|
62
37
|
|
|
63
|
-
utter_continue_interruption:
|
|
64
|
-
- text: "Let's continue with the chosen topic instead: {{ context.interrupting_flow_id }}."
|
|
65
|
-
metadata:
|
|
66
|
-
rephrase: True
|
|
67
|
-
template: jinja
|
|
68
|
-
|
|
69
38
|
utter_corrected_previous_input:
|
|
70
39
|
- text: "Ok, I am updating {{ context.corrected_slots.keys()|join(', ') }} to {{ context.new_slot_values | join(', ') }} respectively."
|
|
71
40
|
metadata:
|
|
@@ -150,10 +119,6 @@ slots:
|
|
|
150
119
|
type: float
|
|
151
120
|
initial_value: 0.0
|
|
152
121
|
max_value: 1000000
|
|
153
|
-
continue_previous_flow:
|
|
154
|
-
type: bool
|
|
155
|
-
mappings:
|
|
156
|
-
- type: from_llm
|
|
157
122
|
|
|
158
123
|
flows:
|
|
159
124
|
pattern_cancel_flow:
|
|
@@ -197,7 +162,6 @@ flows:
|
|
|
197
162
|
steps:
|
|
198
163
|
- action: action_clarify_flows
|
|
199
164
|
- action: utter_clarification_options_rasa
|
|
200
|
-
- action: action_listen
|
|
201
165
|
|
|
202
166
|
pattern_code_change:
|
|
203
167
|
description: Conversation repair flow for cleaning the stack after an assistant update
|
|
@@ -247,31 +211,6 @@ flows:
|
|
|
247
211
|
next: END
|
|
248
212
|
- else: END
|
|
249
213
|
|
|
250
|
-
pattern_handle_digressions:
|
|
251
|
-
description: Conversation repair flow for handling digressions
|
|
252
|
-
name: pattern handle digressions
|
|
253
|
-
steps:
|
|
254
|
-
- noop: true
|
|
255
|
-
id: branching
|
|
256
|
-
next:
|
|
257
|
-
- if: context.ask_confirm_digressions contains context.interrupting_flow_id
|
|
258
|
-
then: continue_previous_flow
|
|
259
|
-
- if: context.block_digressions contains context.interrupting_flow_id
|
|
260
|
-
then: block_digression
|
|
261
|
-
- else: continue_digression
|
|
262
|
-
- id: continue_previous_flow
|
|
263
|
-
collect: continue_previous_flow
|
|
264
|
-
next:
|
|
265
|
-
- if: slots.continue_previous_flow
|
|
266
|
-
then: block_digression
|
|
267
|
-
- else: continue_digression
|
|
268
|
-
- id: block_digression
|
|
269
|
-
action: action_block_digression
|
|
270
|
-
next: END
|
|
271
|
-
- id: continue_digression
|
|
272
|
-
action: action_continue_digression
|
|
273
|
-
next: END
|
|
274
|
-
|
|
275
214
|
pattern_human_handoff:
|
|
276
215
|
description: Conversation repair flow for switching users to a human agent if their request can't be handled
|
|
277
216
|
name: pattern human handoff
|
|
@@ -18,9 +18,6 @@ from rasa.dialogue_understanding.commands import (
|
|
|
18
18
|
from rasa.dialogue_understanding.commands.handle_code_change_command import (
|
|
19
19
|
HandleCodeChangeCommand,
|
|
20
20
|
)
|
|
21
|
-
from rasa.dialogue_understanding.commands.handle_digressions_command import (
|
|
22
|
-
HandleDigressionsCommand,
|
|
23
|
-
)
|
|
24
21
|
from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
|
|
25
22
|
from rasa.dialogue_understanding.commands.utils import (
|
|
26
23
|
create_validate_frames_from_slot_set_events,
|
|
@@ -454,21 +451,7 @@ def clean_up_commands(
|
|
|
454
451
|
)
|
|
455
452
|
continue
|
|
456
453
|
|
|
457
|
-
|
|
458
|
-
handle_digression_command = HandleDigressionsCommand(flow=command.flow)
|
|
459
|
-
if handle_digression_command in clean_commands:
|
|
460
|
-
structlogger.debug(
|
|
461
|
-
"command_processor.clean_up_commands.skip_handle_digressions.command_already_present",
|
|
462
|
-
command=handle_digression_command,
|
|
463
|
-
)
|
|
464
|
-
continue
|
|
465
|
-
clean_commands.append(handle_digression_command)
|
|
466
|
-
structlogger.debug(
|
|
467
|
-
"command_processor.clean_up_commands.push_handle_digressions",
|
|
468
|
-
command=command,
|
|
469
|
-
)
|
|
470
|
-
else:
|
|
471
|
-
clean_commands.append(command)
|
|
454
|
+
clean_commands.append(command)
|
|
472
455
|
|
|
473
456
|
# handle chitchat command differently from other free-form answer commands
|
|
474
457
|
elif isinstance(command, ChitChatAnswerCommand):
|
|
@@ -503,21 +486,9 @@ def clean_up_commands(
|
|
|
503
486
|
# when coexistence is enabled, by default there will be a SetSlotCommand
|
|
504
487
|
# for the ROUTE_TO_CALM_SLOT slot.
|
|
505
488
|
if tracker.has_coexistence_routing_slot and len(clean_commands) > 2:
|
|
506
|
-
clean_commands =
|
|
489
|
+
clean_commands = filter_cannot_handle_command(clean_commands)
|
|
507
490
|
elif not tracker.has_coexistence_routing_slot and len(clean_commands) > 1:
|
|
508
|
-
clean_commands =
|
|
509
|
-
|
|
510
|
-
# remove cancel flow when there is a handle digression command
|
|
511
|
-
# otherwise the cancel command will cancel the active flow which defined a specific
|
|
512
|
-
# behavior for the digression
|
|
513
|
-
if contains_command(clean_commands, HandleDigressionsCommand) and contains_command(
|
|
514
|
-
clean_commands, CancelFlowCommand
|
|
515
|
-
):
|
|
516
|
-
clean_commands = [
|
|
517
|
-
command
|
|
518
|
-
for command in clean_commands
|
|
519
|
-
if not isinstance(command, CancelFlowCommand)
|
|
520
|
-
]
|
|
491
|
+
clean_commands = filter_cannot_handle_command(clean_commands)
|
|
521
492
|
|
|
522
493
|
clean_commands = ensure_max_number_of_command_type(
|
|
523
494
|
clean_commands, RepeatBotMessagesCommand, 1
|
|
@@ -857,12 +828,12 @@ def should_slot_be_set(
|
|
|
857
828
|
return True
|
|
858
829
|
|
|
859
830
|
|
|
860
|
-
def
|
|
831
|
+
def filter_cannot_handle_command(
|
|
861
832
|
clean_commands: List[Command],
|
|
862
833
|
) -> List[Command]:
|
|
863
|
-
"""Filter out a 'cannot handle' command
|
|
834
|
+
"""Filter out a 'cannot handle' command.
|
|
864
835
|
|
|
865
|
-
This is used to filter out a 'cannot handle' command
|
|
836
|
+
This is used to filter out a 'cannot handle' command
|
|
866
837
|
in case other commands are present.
|
|
867
838
|
|
|
868
839
|
Returns:
|
|
@@ -871,34 +842,5 @@ def filter_cannot_handle_command_for_skipped_slots(
|
|
|
871
842
|
return [
|
|
872
843
|
command
|
|
873
844
|
for command in clean_commands
|
|
874
|
-
if not (
|
|
875
|
-
isinstance(command, CannotHandleCommand)
|
|
876
|
-
and command.reason
|
|
877
|
-
and CANNOT_HANDLE_REASON == command.reason
|
|
878
|
-
)
|
|
845
|
+
if not isinstance(command, CannotHandleCommand)
|
|
879
846
|
]
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
def should_add_handle_digressions_command(
|
|
883
|
-
tracker: DialogueStateTracker, all_flows: FlowsList, top_flow_id: str
|
|
884
|
-
) -> bool:
|
|
885
|
-
"""Check if a handle digressions command should be added to the commands.
|
|
886
|
-
|
|
887
|
-
The command should replace a StartFlow command only if we are at a collect step of
|
|
888
|
-
a flow and a new flow is predicted by the command generator to start.
|
|
889
|
-
"""
|
|
890
|
-
current_flow = all_flows.flow_by_id(top_flow_id)
|
|
891
|
-
current_flow_condition = current_flow and (
|
|
892
|
-
current_flow.ask_confirm_digressions or current_flow.block_digressions
|
|
893
|
-
)
|
|
894
|
-
|
|
895
|
-
collect_info = get_current_collect_step(tracker.stack, all_flows)
|
|
896
|
-
|
|
897
|
-
if collect_info and (
|
|
898
|
-
collect_info.ask_confirm_digressions
|
|
899
|
-
or collect_info.block_digressions
|
|
900
|
-
or current_flow_condition
|
|
901
|
-
):
|
|
902
|
-
return True
|
|
903
|
-
|
|
904
|
-
return False
|
|
@@ -4,9 +4,6 @@ from typing import List, Optional, Set, Tuple
|
|
|
4
4
|
from rasa.dialogue_understanding.patterns.collect_information import (
|
|
5
5
|
CollectInformationPatternFlowStackFrame,
|
|
6
6
|
)
|
|
7
|
-
from rasa.dialogue_understanding.patterns.continue_interrupted import (
|
|
8
|
-
ContinueInterruptedPatternFlowStackFrame,
|
|
9
|
-
)
|
|
10
7
|
from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
|
|
11
8
|
from rasa.dialogue_understanding.stack.frames import (
|
|
12
9
|
BaseFlowStackFrame,
|
|
@@ -221,38 +218,3 @@ def get_collect_steps_excluding_ask_before_filling_for_active_flow(
|
|
|
221
218
|
for step in active_flow.get_collect_steps()
|
|
222
219
|
if not step.ask_before_filling
|
|
223
220
|
)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def remove_digression_from_stack(stack: DialogueStack, flow_id: str) -> DialogueStack:
|
|
227
|
-
"""Remove a specific flow frame from the stack and other frames that reference it.
|
|
228
|
-
|
|
229
|
-
The main use-case is to prevent duplicate digressions from being added to the stack.
|
|
230
|
-
|
|
231
|
-
Args:
|
|
232
|
-
stack: The dialogue stack.
|
|
233
|
-
flow_id: The flow to remove.
|
|
234
|
-
|
|
235
|
-
Returns:
|
|
236
|
-
The updated dialogue stack.
|
|
237
|
-
"""
|
|
238
|
-
updated_stack = stack.copy()
|
|
239
|
-
original_frames = updated_stack.frames[:]
|
|
240
|
-
found_digression_index = -1
|
|
241
|
-
for index, frame in enumerate(original_frames):
|
|
242
|
-
if isinstance(frame, BaseFlowStackFrame) and frame.flow_id == flow_id:
|
|
243
|
-
updated_stack.frames.pop(index)
|
|
244
|
-
found_digression_index = index
|
|
245
|
-
|
|
246
|
-
# we also need to remove the `ContinueInterruptedPatternFlowStackFrame`
|
|
247
|
-
elif (
|
|
248
|
-
isinstance(frame, ContinueInterruptedPatternFlowStackFrame)
|
|
249
|
-
and frame.previous_flow_name == flow_id
|
|
250
|
-
and found_digression_index + 1 == index
|
|
251
|
-
):
|
|
252
|
-
# we know that this frame is always added after the digressing flow frame
|
|
253
|
-
# that was blocked previously by action_block_digressions,
|
|
254
|
-
# so this check would occur after the digressing flow was popped.
|
|
255
|
-
# Therefore, we need to update the index dynamically before popping.
|
|
256
|
-
updated_stack.frames.pop(index - 1)
|
|
257
|
-
|
|
258
|
-
return updated_stack
|
|
@@ -7,6 +7,7 @@ import structlog
|
|
|
7
7
|
import rasa.shared.utils.io
|
|
8
8
|
from rasa.e2e_test.constants import SCHEMA_FILE_PATH
|
|
9
9
|
from rasa.e2e_test.e2e_test_case import Fixture, Metadata
|
|
10
|
+
from rasa.exceptions import ModelNotFound
|
|
10
11
|
from rasa.shared.utils.yaml import read_schema_file
|
|
11
12
|
|
|
12
13
|
if TYPE_CHECKING:
|
|
@@ -152,10 +153,9 @@ def validate_model_path(model_path: Optional[str], parameter: str, default: str)
|
|
|
152
153
|
return model_path
|
|
153
154
|
|
|
154
155
|
if model_path and not Path(model_path).exists():
|
|
155
|
-
|
|
156
|
+
raise ModelNotFound(
|
|
156
157
|
f"The provided model path '{model_path}' could not be found. "
|
|
157
|
-
|
|
158
|
-
UserWarning,
|
|
158
|
+
"Provide an existing model path."
|
|
159
159
|
)
|
|
160
160
|
|
|
161
161
|
elif model_path is None:
|
|
@@ -19,6 +19,7 @@ from rasa.shared.constants import (
|
|
|
19
19
|
)
|
|
20
20
|
from rasa.shared.exceptions import ProviderClientAPIException
|
|
21
21
|
from rasa.shared.providers.mappings import OPENAI_PROVIDER
|
|
22
|
+
from rasa.shared.utils.constants import LOG_COMPONENT_SOURCE_METHOD_INIT
|
|
22
23
|
from rasa.shared.utils.llm import (
|
|
23
24
|
USER,
|
|
24
25
|
get_prompt_template,
|
|
@@ -54,6 +55,8 @@ class ConversationRephraser:
|
|
|
54
55
|
self.prompt_template = get_prompt_template(
|
|
55
56
|
self.config.get(PROMPT_TEMPLATE_CONFIG_KEY),
|
|
56
57
|
DEFAULT_REPHRASING_PROMPT_TEMPLATE,
|
|
58
|
+
log_source_component=ConversationRephraser.__name__,
|
|
59
|
+
log_source_method=LOG_COMPONENT_SOURCE_METHOD_INIT,
|
|
57
60
|
)
|
|
58
61
|
|
|
59
62
|
@staticmethod
|
rasa/shared/core/constants.py
CHANGED
|
@@ -52,8 +52,6 @@ ACTION_TRIGGER_CHITCHAT = "action_trigger_chitchat"
|
|
|
52
52
|
ACTION_RESET_ROUTING = "action_reset_routing"
|
|
53
53
|
ACTION_HANGUP = "action_hangup"
|
|
54
54
|
ACTION_REPEAT_BOT_MESSAGES = "action_repeat_bot_messages"
|
|
55
|
-
ACTION_BLOCK_DIGRESSION = "action_block_digression"
|
|
56
|
-
ACTION_CONTINUE_DIGRESSION = "action_continue_digression"
|
|
57
55
|
|
|
58
56
|
ACTION_METADATA_EXECUTION_SUCCESS = "execution_success"
|
|
59
57
|
ACTION_METADATA_EXECUTION_ERROR_MESSAGE = "execution_error_message"
|
|
@@ -84,8 +82,6 @@ DEFAULT_ACTION_NAMES = [
|
|
|
84
82
|
ACTION_RESET_ROUTING,
|
|
85
83
|
ACTION_HANGUP,
|
|
86
84
|
ACTION_REPEAT_BOT_MESSAGES,
|
|
87
|
-
ACTION_BLOCK_DIGRESSION,
|
|
88
|
-
ACTION_CONTINUE_DIGRESSION,
|
|
89
85
|
]
|
|
90
86
|
|
|
91
87
|
ACTION_SHOULD_SEND_DOMAIN = "send_domain"
|
|
@@ -205,8 +201,4 @@ CLASSIFIER_NAME_FALLBACK = "FallbackClassifier"
|
|
|
205
201
|
|
|
206
202
|
POLICIES_THAT_EXTRACT_ENTITIES = {"TEDPolicy"}
|
|
207
203
|
|
|
208
|
-
# digression constants
|
|
209
|
-
KEY_ASK_CONFIRM_DIGRESSIONS = "ask_confirm_digressions"
|
|
210
|
-
KEY_BLOCK_DIGRESSIONS = "block_digressions"
|
|
211
|
-
|
|
212
204
|
ERROR_CODE_KEY = "error_code"
|
rasa/shared/core/domain.py
CHANGED
|
@@ -1678,6 +1678,14 @@ class Domain:
|
|
|
1678
1678
|
"""Write domain to a file."""
|
|
1679
1679
|
as_yaml = self.as_yaml()
|
|
1680
1680
|
rasa.shared.utils.io.write_text_file(as_yaml, filename)
|
|
1681
|
+
# run the check again on the written domain to catch any errors
|
|
1682
|
+
# that may have been missed in the user defined domain files
|
|
1683
|
+
structlogger.info(
|
|
1684
|
+
"domain.persist.domain_written_to_file",
|
|
1685
|
+
event_info="The entire domain content has been written to file.",
|
|
1686
|
+
filename=filename,
|
|
1687
|
+
)
|
|
1688
|
+
Domain.is_domain_file(filename)
|
|
1681
1689
|
|
|
1682
1690
|
def as_yaml(self) -> Text:
|
|
1683
1691
|
"""Dump the `Domain` object as a YAML string.
|
|
@@ -1972,17 +1980,18 @@ class Domain:
|
|
|
1972
1980
|
|
|
1973
1981
|
try:
|
|
1974
1982
|
content = read_yaml_file(filename, expand_env_vars=cls.expand_env_vars)
|
|
1975
|
-
except (RasaException, YamlSyntaxException):
|
|
1976
|
-
structlogger.
|
|
1983
|
+
except (RasaException, YamlSyntaxException) as error:
|
|
1984
|
+
structlogger.error(
|
|
1977
1985
|
"domain.cannot_load_domain_file",
|
|
1978
1986
|
file=filename,
|
|
1987
|
+
error=error,
|
|
1979
1988
|
event_info=(
|
|
1980
1989
|
f"The file {filename} could not be loaded as domain file. "
|
|
1981
1990
|
f"You can use https://yamlchecker.com/ to validate "
|
|
1982
1991
|
f"the YAML syntax of your file."
|
|
1983
1992
|
),
|
|
1984
1993
|
)
|
|
1985
|
-
|
|
1994
|
+
raise RasaException(f"Domain could not be loaded: {error}")
|
|
1986
1995
|
|
|
1987
1996
|
return any(key in content for key in ALL_DOMAIN_KEYS)
|
|
1988
1997
|
|
rasa/shared/core/flows/flow.py
CHANGED
|
@@ -13,10 +13,6 @@ from pypred import Predicate
|
|
|
13
13
|
import rasa.shared.utils.io
|
|
14
14
|
from rasa.engine.language import Language
|
|
15
15
|
from rasa.shared.constants import RASA_DEFAULT_FLOW_PATTERN_PREFIX
|
|
16
|
-
from rasa.shared.core.constants import (
|
|
17
|
-
KEY_ASK_CONFIRM_DIGRESSIONS,
|
|
18
|
-
KEY_BLOCK_DIGRESSIONS,
|
|
19
|
-
)
|
|
20
16
|
from rasa.shared.core.flows.constants import (
|
|
21
17
|
KEY_ALWAYS_INCLUDE_IN_PROMPT,
|
|
22
18
|
KEY_DESCRIPTION,
|
|
@@ -52,7 +48,6 @@ from rasa.shared.core.flows.steps.constants import (
|
|
|
52
48
|
START_STEP,
|
|
53
49
|
)
|
|
54
50
|
from rasa.shared.core.flows.steps.continuation import ContinueFlowStep
|
|
55
|
-
from rasa.shared.core.flows.utils import extract_digression_prop
|
|
56
51
|
from rasa.shared.core.slots import Slot
|
|
57
52
|
|
|
58
53
|
structlogger = structlog.get_logger()
|
|
@@ -94,10 +89,6 @@ class Flow:
|
|
|
94
89
|
"""The path to the file where the flow is stored."""
|
|
95
90
|
persisted_slots: List[str] = field(default_factory=list)
|
|
96
91
|
"""The list of slots that should be persisted after the flow ends."""
|
|
97
|
-
ask_confirm_digressions: List[str] = field(default_factory=list)
|
|
98
|
-
"""The flow ids for which the assistant should ask for confirmation."""
|
|
99
|
-
block_digressions: List[str] = field(default_factory=list)
|
|
100
|
-
"""The flow ids that the assistant should block from digressing to."""
|
|
101
92
|
run_pattern_completed: bool = True
|
|
102
93
|
"""Whether the pattern_completed flow should be run after the flow ends."""
|
|
103
94
|
|
|
@@ -138,10 +129,6 @@ class Flow:
|
|
|
138
129
|
# data. When the model is trained, take the provided file_path.
|
|
139
130
|
file_path=data.get(KEY_FILE_PATH) if KEY_FILE_PATH in data else file_path,
|
|
140
131
|
persisted_slots=data.get(KEY_PERSISTED_SLOTS, []),
|
|
141
|
-
ask_confirm_digressions=extract_digression_prop(
|
|
142
|
-
KEY_ASK_CONFIRM_DIGRESSIONS, data
|
|
143
|
-
),
|
|
144
|
-
block_digressions=extract_digression_prop(KEY_BLOCK_DIGRESSIONS, data),
|
|
145
132
|
run_pattern_completed=data.get(KEY_RUN_PATTERN_COMPLETED, True),
|
|
146
133
|
translation=extract_translations(
|
|
147
134
|
translation_data=data.get(KEY_TRANSLATION, {})
|
|
@@ -220,10 +207,6 @@ class Flow:
|
|
|
220
207
|
data[KEY_FILE_PATH] = self.file_path
|
|
221
208
|
if self.persisted_slots:
|
|
222
209
|
data[KEY_PERSISTED_SLOTS] = self.persisted_slots
|
|
223
|
-
if self.ask_confirm_digressions:
|
|
224
|
-
data[KEY_ASK_CONFIRM_DIGRESSIONS] = self.ask_confirm_digressions
|
|
225
|
-
if self.block_digressions:
|
|
226
|
-
data[KEY_BLOCK_DIGRESSIONS] = self.block_digressions
|
|
227
210
|
if self.run_pattern_completed is not None:
|
|
228
211
|
data["run_pattern_completed"] = self.run_pattern_completed
|
|
229
212
|
if self.translation:
|