rasa-pro 3.12.0rc2__py3-none-any.whl → 3.12.0rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/cli/dialogue_understanding_test.py +5 -8
- rasa/cli/llm_fine_tuning.py +47 -12
- rasa/core/channels/voice_stream/asr/asr_event.py +5 -0
- rasa/core/channels/voice_stream/audiocodes.py +19 -6
- rasa/core/channels/voice_stream/call_state.py +3 -9
- rasa/core/channels/voice_stream/genesys.py +40 -55
- rasa/core/channels/voice_stream/voice_channel.py +61 -39
- rasa/core/tracker_store.py +123 -34
- rasa/dialogue_understanding/commands/set_slot_command.py +1 -0
- rasa/dialogue_understanding/commands/utils.py +1 -4
- rasa/dialogue_understanding/generator/command_parser.py +41 -0
- rasa/dialogue_understanding/generator/constants.py +7 -2
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +9 -2
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +29 -48
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_fallback_other_models_template.jinja2 +57 -0
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +23 -50
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +76 -24
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +32 -18
- rasa/dialogue_understanding/processor/command_processor.py +39 -19
- rasa/dialogue_understanding/stack/utils.py +11 -6
- rasa/engine/language.py +67 -25
- rasa/llm_fine_tuning/conversations.py +3 -31
- rasa/llm_fine_tuning/llm_data_preparation_module.py +5 -3
- rasa/llm_fine_tuning/paraphrasing/rephrase_validator.py +18 -13
- rasa/llm_fine_tuning/paraphrasing_module.py +6 -2
- rasa/llm_fine_tuning/train_test_split_module.py +27 -27
- rasa/llm_fine_tuning/utils.py +7 -0
- rasa/shared/constants.py +4 -0
- rasa/shared/core/domain.py +2 -0
- rasa/shared/providers/_configs/azure_entra_id_config.py +8 -8
- rasa/shared/providers/llm/litellm_router_llm_client.py +1 -0
- rasa/shared/providers/router/_base_litellm_router_client.py +38 -7
- rasa/shared/utils/llm.py +69 -13
- rasa/telemetry.py +13 -3
- rasa/tracing/instrumentation/attribute_extractors.py +2 -5
- rasa/validator.py +2 -2
- rasa/version.py +1 -1
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.0rc3.dist-info}/METADATA +1 -1
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.0rc3.dist-info}/RECORD +42 -41
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +0 -68
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.0rc3.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.0rc3.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.0rc3.dist-info}/entry_points.txt +0 -0
|
@@ -1,73 +1,46 @@
|
|
|
1
1
|
## Task Description
|
|
2
2
|
Your task is to analyze the current conversation context and generate a list of actions to start new business processes that we call flows, to extract slots, or respond to small talk and knowledge requests.
|
|
3
3
|
|
|
4
|
-
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
## Available Flows and Slots
|
|
7
|
+
Use the following structured data:
|
|
8
|
+
```json
|
|
9
|
+
{"flows":[{% for flow in available_flows %}{"name":"{{ flow.name }}","description":"{{ flow.description }}"{% if flow.slots %},"slots":[{% for slot in flow.slots %}{"name":"{{ slot.name }}"{% if slot.description %},"description":"{{ slot.description }}"{% endif %}{% if slot.allowed_values %},"allowed_values":{{ slot.allowed_values }}{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]}
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
---
|
|
5
13
|
|
|
6
14
|
## Available Actions:
|
|
7
|
-
* `start flow flow_name`: Starting a flow. For example, `start flow transfer_money` or `start flow list_contacts
|
|
8
|
-
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values
|
|
9
|
-
* `cancel flow`: Cancelling the current flow
|
|
15
|
+
* `start flow flow_name`: Starting a flow. For example, `start flow transfer_money` or `start flow list_contacts`.
|
|
16
|
+
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
|
|
17
|
+
* `cancel flow`: Cancelling the current flow.
|
|
10
18
|
* `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
|
|
11
|
-
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services
|
|
19
|
+
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
|
|
12
20
|
* `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
|
|
13
|
-
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one
|
|
21
|
+
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
|
|
14
22
|
|
|
15
|
-
|
|
23
|
+
---
|
|
16
24
|
|
|
17
25
|
## General Tips
|
|
18
26
|
* Do not fill slots with abstract values or placeholders.
|
|
27
|
+
* For categorical slots try to match the user message with allowed slot values. Use "other" if you cannot match it.
|
|
28
|
+
* Set the boolean slots based on the user response. Map positive responses to `True`, and negative to `False`.
|
|
29
|
+
* Extract text slot values exactly as provided by the user. Avoid assumptions, format changes, or partial extractions.
|
|
19
30
|
* Only use information provided by the user.
|
|
20
31
|
* Use clarification in ambiguous cases.
|
|
21
32
|
* Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
|
|
33
|
+
* Do not cancel the flow unless the user explicitly requests it.
|
|
22
34
|
* Strictly adhere to the provided action format.
|
|
23
|
-
* For categorical slots try to match the user message with potential slot values. Use "other" if you cannot match it
|
|
24
35
|
* Focus on the last message and take it one step at a time.
|
|
25
36
|
* Use the previous conversation steps only to aid understanding.
|
|
26
37
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
## Available Flows and Slots
|
|
30
|
-
Use the following structured date:
|
|
31
|
-
```json
|
|
32
|
-
{
|
|
33
|
-
"flows": [
|
|
34
|
-
{% for flow in available_flows %}{
|
|
35
|
-
"name": "{{ flow.name }}",
|
|
36
|
-
"description": "{{ flow.description }}"{% if flow.slots %},
|
|
37
|
-
"slots": [{% for slot in flow.slots %}
|
|
38
|
-
{
|
|
39
|
-
"name": "{{ slot.name }}"{% if slot.description %},
|
|
40
|
-
"description": "{{ slot.description }}"{% endif %}{% if slot.allowed_values %},
|
|
41
|
-
"allowed_values": {{ slot.allowed_values }}{% endif %}
|
|
42
|
-
}{% if not loop.last %},{% endif %}{% endfor %}
|
|
43
|
-
]{% endif %}
|
|
44
|
-
}{% if not loop.last %},
|
|
45
|
-
{% endif %}{% endfor %}
|
|
46
|
-
]
|
|
47
|
-
}
|
|
48
|
-
```
|
|
49
|
-
|
|
50
|
-
--
|
|
38
|
+
---
|
|
51
39
|
|
|
52
40
|
## Current State
|
|
53
|
-
{% if current_flow != None %}Use the following structured
|
|
41
|
+
{% if current_flow != None %}Use the following structured data:
|
|
54
42
|
```json
|
|
55
|
-
{
|
|
56
|
-
"active_flow": "{{ current_flow }}",
|
|
57
|
-
"current_step": {
|
|
58
|
-
"requested_slot": "{{ current_slot }}",
|
|
59
|
-
"requested_slot_description": "{{ current_slot_description }}"
|
|
60
|
-
},
|
|
61
|
-
"slots": [{% for slot in flow_slots %}
|
|
62
|
-
{
|
|
63
|
-
"name": "{{ slot.name }}",
|
|
64
|
-
"value": "{{ slot.value }}",
|
|
65
|
-
"type": "{{ slot.type }}"{% if slot.description %},
|
|
66
|
-
"description": "{{ slot.description }}"{% endif %}{% if slot.allowed_values %},
|
|
67
|
-
"allowed_values": "{{ slot.allowed_values }}"{% endif %}
|
|
68
|
-
}{% if not loop.last %},{% endif %}{% endfor %}
|
|
69
|
-
]
|
|
70
|
-
}
|
|
43
|
+
{"active_flow":"{{ current_flow }}","current_step":{"requested_slot":"{{ current_slot }}","requested_slot_description":"{{ current_slot_description }}"},"slots":[{% for slot in flow_slots %}{"name":"{{ slot.name }}","value":"{{ slot.value }}","type":"{{ slot.type }}"{% if slot.description %},"description":"{{ slot.description }}"{% endif %}{% if slot.allowed_values %},"allowed_values":"{{ slot.allowed_values }}"{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]}
|
|
71
44
|
```{% else %}
|
|
72
45
|
You are currently not inside any flow.{% endif %}
|
|
73
46
|
|
|
@@ -79,6 +52,6 @@ You are currently not inside any flow.{% endif %}
|
|
|
79
52
|
---
|
|
80
53
|
|
|
81
54
|
## Task
|
|
82
|
-
Create an action list with one action per line in response to the
|
|
55
|
+
Create an action list with one action per line in response to the user's last message: """{{ user_message }}""".
|
|
83
56
|
|
|
84
57
|
Your action list:
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import copy
|
|
1
2
|
from typing import Any, Dict, List, Optional, Text
|
|
2
3
|
|
|
3
4
|
import structlog
|
|
@@ -19,10 +20,16 @@ from rasa.dialogue_understanding.generator.command_parser import (
|
|
|
19
20
|
)
|
|
20
21
|
from rasa.dialogue_understanding.generator.constants import (
|
|
21
22
|
COMMAND_PROMPT_FILE_NAME,
|
|
22
|
-
|
|
23
|
+
DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
|
|
23
24
|
FLOW_RETRIEVAL_KEY,
|
|
24
25
|
LLM_BASED_COMMAND_GENERATOR_CONFIG_FILE,
|
|
25
26
|
LLM_CONFIG_KEY,
|
|
27
|
+
MODEL_CONFIG_KEY,
|
|
28
|
+
MODEL_NAME_CLAUDE_3_5_SONNET_20240620,
|
|
29
|
+
MODEL_NAME_GPT_4O_2024_11_20,
|
|
30
|
+
OPENAI_PROVIDER,
|
|
31
|
+
PROVIDER_CONFIG_KEY,
|
|
32
|
+
TIMEOUT_CONFIG_KEY,
|
|
26
33
|
USER_INPUT_CONFIG_KEY,
|
|
27
34
|
)
|
|
28
35
|
from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
|
|
@@ -36,9 +43,14 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
|
36
43
|
from rasa.engine.storage.resource import Resource
|
|
37
44
|
from rasa.engine.storage.storage import ModelStorage
|
|
38
45
|
from rasa.shared.constants import (
|
|
46
|
+
ANTHROPIC_PROVIDER,
|
|
47
|
+
AWS_BEDROCK_PROVIDER,
|
|
48
|
+
AZURE_OPENAI_PROVIDER,
|
|
39
49
|
EMBEDDINGS_CONFIG_KEY,
|
|
50
|
+
MAX_TOKENS_CONFIG_KEY,
|
|
40
51
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
41
52
|
ROUTE_TO_CALM_SLOT,
|
|
53
|
+
TEMPERATURE_CONFIG_KEY,
|
|
42
54
|
)
|
|
43
55
|
from rasa.shared.core.flows import FlowsList
|
|
44
56
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
@@ -61,18 +73,38 @@ from rasa.utils.log_utils import log_llm
|
|
|
61
73
|
structlogger = structlog.get_logger()
|
|
62
74
|
|
|
63
75
|
|
|
76
|
+
DEFAULT_LLM_CONFIG = {
|
|
77
|
+
PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
|
|
78
|
+
MODEL_CONFIG_KEY: MODEL_NAME_GPT_4O_2024_11_20,
|
|
79
|
+
TEMPERATURE_CONFIG_KEY: 0.0,
|
|
80
|
+
MAX_TOKENS_CONFIG_KEY: DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
|
|
81
|
+
TIMEOUT_CONFIG_KEY: 7,
|
|
82
|
+
}
|
|
83
|
+
|
|
64
84
|
MODEL_PROMPT_MAPPER = {
|
|
65
|
-
"
|
|
66
|
-
|
|
67
|
-
|
|
85
|
+
f"{OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
|
|
86
|
+
"command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
|
|
87
|
+
),
|
|
88
|
+
f"{AZURE_OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
|
|
89
|
+
"command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
|
|
90
|
+
),
|
|
91
|
+
f"{AWS_BEDROCK_PROVIDER}/anthropic.{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}-v1:0": (
|
|
68
92
|
"command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
|
|
69
93
|
),
|
|
70
|
-
"
|
|
94
|
+
f"{ANTHROPIC_PROVIDER}/{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}": (
|
|
71
95
|
"command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
|
|
72
96
|
),
|
|
73
97
|
}
|
|
74
98
|
|
|
75
|
-
|
|
99
|
+
# When model is not configured, then we use the default prompt template
|
|
100
|
+
DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
|
|
101
|
+
"command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
|
|
102
|
+
)
|
|
103
|
+
# When the configured model is not found in the model prompt mapper, then we use the
|
|
104
|
+
# fallback prompt template
|
|
105
|
+
FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
|
|
106
|
+
"command_prompt_v2_fallback_other_models_template.jinja2"
|
|
107
|
+
)
|
|
76
108
|
|
|
77
109
|
|
|
78
110
|
class CommandParserValidatorSingleton:
|
|
@@ -154,25 +186,19 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
154
186
|
**kwargs,
|
|
155
187
|
)
|
|
156
188
|
|
|
157
|
-
# Get the
|
|
158
|
-
|
|
159
|
-
self.config
|
|
160
|
-
MODEL_PROMPT_MAPPER,
|
|
161
|
-
DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
189
|
+
# Get the prompt template from the config or the default prompt template.
|
|
190
|
+
self.prompt_template = self.resolve_component_prompt_template(
|
|
191
|
+
self.config, prompt_template
|
|
162
192
|
)
|
|
163
193
|
|
|
164
|
-
# Set the
|
|
165
|
-
|
|
166
|
-
self.
|
|
167
|
-
default_command_prompt_template,
|
|
194
|
+
# Set the command syntax version to v2
|
|
195
|
+
CommandSyntaxManager.set_syntax_version(
|
|
196
|
+
self.get_component_command_syntax_version()
|
|
168
197
|
)
|
|
169
198
|
|
|
170
199
|
self.trace_prompt_tokens = self.config.get("trace_prompt_tokens", False)
|
|
171
200
|
self.repeat_command_enabled = self.is_repeat_command_enabled()
|
|
172
201
|
|
|
173
|
-
# Set the command syntax version to v2
|
|
174
|
-
CommandSyntaxManager.set_syntax_version(CommandSyntaxVersion.v2)
|
|
175
|
-
|
|
176
202
|
### Implementations of LLMBasedCommandGenerator parent
|
|
177
203
|
@staticmethod
|
|
178
204
|
def get_default_config() -> Dict[str, Any]:
|
|
@@ -219,7 +245,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
219
245
|
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
220
246
|
cls.perform_llm_health_check(
|
|
221
247
|
llm_config,
|
|
222
|
-
|
|
248
|
+
cls.get_default_llm_config(),
|
|
223
249
|
"compact_llm_command_generator.load",
|
|
224
250
|
cls.__name__,
|
|
225
251
|
)
|
|
@@ -508,15 +534,41 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
508
534
|
config.get(FLOW_RETRIEVAL_KEY, {}).get(EMBEDDINGS_CONFIG_KEY),
|
|
509
535
|
FlowRetrieval.__name__,
|
|
510
536
|
)
|
|
537
|
+
|
|
538
|
+
# Create a copy of the config to avoid modifying the original config
|
|
539
|
+
# and update the llm config with the resolved llm config.
|
|
540
|
+
_config_copy = copy.deepcopy(config)
|
|
541
|
+
_config_copy[LLM_CONFIG_KEY] = llm_config
|
|
542
|
+
prompt_template = cls.resolve_component_prompt_template(_config_copy)
|
|
543
|
+
|
|
544
|
+
return deep_container_fingerprint(
|
|
545
|
+
[prompt_template, llm_config, embedding_config]
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
@staticmethod
|
|
549
|
+
def get_default_llm_config() -> Dict[str, Any]:
|
|
550
|
+
"""Get the default LLM config for the command generator."""
|
|
551
|
+
return DEFAULT_LLM_CONFIG
|
|
552
|
+
|
|
553
|
+
@staticmethod
|
|
554
|
+
def get_component_command_syntax_version() -> CommandSyntaxVersion:
|
|
555
|
+
return CommandSyntaxVersion.v2
|
|
556
|
+
|
|
557
|
+
@staticmethod
|
|
558
|
+
def resolve_component_prompt_template(
|
|
559
|
+
config: Dict[str, Any], prompt_template: Optional[str] = None
|
|
560
|
+
) -> Optional[str]:
|
|
561
|
+
"""Get the prompt template from the config or the default prompt template."""
|
|
562
|
+
# Get the default prompt template based on the model name.
|
|
511
563
|
default_command_prompt_template = get_default_prompt_template_based_on_model(
|
|
512
|
-
|
|
564
|
+
config.get(LLM_CONFIG_KEY, {}) or {},
|
|
513
565
|
MODEL_PROMPT_MAPPER,
|
|
514
566
|
DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
567
|
+
FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
515
568
|
)
|
|
516
|
-
|
|
569
|
+
|
|
570
|
+
# Return the prompt template either from the config or the default prompt.
|
|
571
|
+
return prompt_template or get_prompt_template(
|
|
517
572
|
config.get(PROMPT_TEMPLATE_CONFIG_KEY),
|
|
518
573
|
default_command_prompt_template,
|
|
519
574
|
)
|
|
520
|
-
return deep_container_fingerprint(
|
|
521
|
-
[prompt_template, llm_config, embedding_config]
|
|
522
|
-
)
|
|
@@ -8,6 +8,7 @@ from rasa.dialogue_understanding.commands.command_syntax_manager import (
|
|
|
8
8
|
CommandSyntaxVersion,
|
|
9
9
|
)
|
|
10
10
|
from rasa.dialogue_understanding.generator.constants import (
|
|
11
|
+
DEFAULT_LLM_CONFIG,
|
|
11
12
|
FLOW_RETRIEVAL_KEY,
|
|
12
13
|
LLM_CONFIG_KEY,
|
|
13
14
|
USER_INPUT_CONFIG_KEY,
|
|
@@ -71,18 +72,14 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
71
72
|
"Please use the config parameter 'prompt_template' instead. "
|
|
72
73
|
),
|
|
73
74
|
)
|
|
74
|
-
|
|
75
|
-
config
|
|
76
|
-
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
77
|
-
or None
|
|
78
|
-
)
|
|
79
|
-
self.prompt_template = prompt_template or get_prompt_template(
|
|
80
|
-
config_prompt,
|
|
81
|
-
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
75
|
+
self.prompt_template = self.resolve_component_prompt_template(
|
|
76
|
+
config, prompt_template
|
|
82
77
|
)
|
|
83
78
|
|
|
84
79
|
# Set the command syntax version to v1
|
|
85
|
-
CommandSyntaxManager.set_syntax_version(
|
|
80
|
+
CommandSyntaxManager.set_syntax_version(
|
|
81
|
+
self.get_component_command_syntax_version()
|
|
82
|
+
)
|
|
86
83
|
|
|
87
84
|
@staticmethod
|
|
88
85
|
def get_default_config() -> Dict[str, Any]:
|
|
@@ -98,15 +95,7 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
98
95
|
@classmethod
|
|
99
96
|
def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
|
|
100
97
|
"""Add a fingerprint for the graph."""
|
|
101
|
-
|
|
102
|
-
config.get(PROMPT_CONFIG_KEY)
|
|
103
|
-
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
104
|
-
or None
|
|
105
|
-
)
|
|
106
|
-
prompt_template = get_prompt_template(
|
|
107
|
-
config_prompt,
|
|
108
|
-
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
109
|
-
)
|
|
98
|
+
prompt_template = cls.resolve_component_prompt_template(config)
|
|
110
99
|
llm_config = resolve_model_client_config(
|
|
111
100
|
config.get(LLM_CONFIG_KEY), SingleStepLLMCommandGenerator.__name__
|
|
112
101
|
)
|
|
@@ -117,3 +106,28 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
117
106
|
return deep_container_fingerprint(
|
|
118
107
|
[prompt_template, llm_config, embedding_config]
|
|
119
108
|
)
|
|
109
|
+
|
|
110
|
+
@staticmethod
|
|
111
|
+
def get_default_llm_config() -> Dict[str, Any]:
|
|
112
|
+
"""Get the default LLM config for the command generator."""
|
|
113
|
+
return DEFAULT_LLM_CONFIG
|
|
114
|
+
|
|
115
|
+
@staticmethod
|
|
116
|
+
def get_component_command_syntax_version() -> CommandSyntaxVersion:
|
|
117
|
+
return CommandSyntaxVersion.v1
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def resolve_component_prompt_template(
|
|
121
|
+
config: Dict[str, Any], prompt_template: Optional[str] = None
|
|
122
|
+
) -> Optional[str]:
|
|
123
|
+
"""Get the prompt template from the config or the default prompt template."""
|
|
124
|
+
# Get the default prompt template based on the model name.
|
|
125
|
+
config_prompt = (
|
|
126
|
+
config.get(PROMPT_CONFIG_KEY)
|
|
127
|
+
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
128
|
+
or None
|
|
129
|
+
)
|
|
130
|
+
return prompt_template or get_prompt_template(
|
|
131
|
+
config_prompt,
|
|
132
|
+
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
133
|
+
)
|
|
@@ -42,6 +42,7 @@ from rasa.dialogue_understanding.stack.frames import (
|
|
|
42
42
|
from rasa.dialogue_understanding.stack.utils import (
|
|
43
43
|
filled_slots_for_active_flow,
|
|
44
44
|
top_flow_frame,
|
|
45
|
+
top_user_flow_frame,
|
|
45
46
|
)
|
|
46
47
|
from rasa.engine.graph import ExecutionContext
|
|
47
48
|
from rasa.shared.constants import (
|
|
@@ -430,28 +431,22 @@ def clean_up_commands(
|
|
|
430
431
|
command=command,
|
|
431
432
|
)
|
|
432
433
|
|
|
433
|
-
elif isinstance(command, StartFlowCommand)
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
structlogger.debug(
|
|
437
|
-
"command_processor.clean_up_commands.skip_command_flow_already_active",
|
|
438
|
-
command=command,
|
|
434
|
+
elif isinstance(command, StartFlowCommand):
|
|
435
|
+
top_user_frame = top_user_flow_frame(
|
|
436
|
+
tracker.stack, ignore_call_and_link_frames=False
|
|
439
437
|
)
|
|
438
|
+
top_flow_id = top_user_frame.flow_id if top_user_frame else ""
|
|
440
439
|
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
440
|
+
if top_flow_id == command.flow:
|
|
441
|
+
# drop a start flow command if the starting flow is equal
|
|
442
|
+
# to the currently active flow
|
|
443
|
+
structlogger.debug(
|
|
444
|
+
"command_processor.clean_up_commands.skip_command_flow_already_active",
|
|
445
|
+
command=command,
|
|
446
|
+
)
|
|
447
|
+
continue
|
|
449
448
|
|
|
450
|
-
if
|
|
451
|
-
collect_info.ask_confirm_digressions
|
|
452
|
-
or collect_info.block_digressions
|
|
453
|
-
or current_flow_condition
|
|
454
|
-
):
|
|
449
|
+
if should_add_handle_digressions_command(tracker, all_flows, top_flow_id):
|
|
455
450
|
clean_commands.append(HandleDigressionsCommand(flow=command.flow))
|
|
456
451
|
structlogger.debug(
|
|
457
452
|
"command_processor.clean_up_commands.push_handle_digressions",
|
|
@@ -848,3 +843,28 @@ def filter_cannot_handle_command_for_skipped_slots(
|
|
|
848
843
|
and CANNOT_HANDLE_REASON == command.reason
|
|
849
844
|
)
|
|
850
845
|
]
|
|
846
|
+
|
|
847
|
+
|
|
848
|
+
def should_add_handle_digressions_command(
|
|
849
|
+
tracker: DialogueStateTracker, all_flows: FlowsList, top_flow_id: str
|
|
850
|
+
) -> bool:
|
|
851
|
+
"""Check if a handle digressions command should be added to the commands.
|
|
852
|
+
|
|
853
|
+
The command should replace a StartFlow command only if we are at a collect step of
|
|
854
|
+
a flow and a new flow is predicted by the command generator to start.
|
|
855
|
+
"""
|
|
856
|
+
current_flow = all_flows.flow_by_id(top_flow_id)
|
|
857
|
+
current_flow_condition = current_flow and (
|
|
858
|
+
current_flow.ask_confirm_digressions or current_flow.block_digressions
|
|
859
|
+
)
|
|
860
|
+
|
|
861
|
+
collect_info = get_current_collect_step(tracker.stack, all_flows)
|
|
862
|
+
|
|
863
|
+
if collect_info and (
|
|
864
|
+
collect_info.ask_confirm_digressions
|
|
865
|
+
or collect_info.block_digressions
|
|
866
|
+
or current_flow_condition
|
|
867
|
+
):
|
|
868
|
+
return True
|
|
869
|
+
|
|
870
|
+
return False
|
|
@@ -63,7 +63,9 @@ def top_flow_frame(
|
|
|
63
63
|
return None
|
|
64
64
|
|
|
65
65
|
|
|
66
|
-
def top_user_flow_frame(
|
|
66
|
+
def top_user_flow_frame(
|
|
67
|
+
dialogue_stack: DialogueStack, ignore_call_and_link_frames: bool = True
|
|
68
|
+
) -> Optional[UserFlowStackFrame]:
|
|
67
69
|
"""Returns the topmost user flow frame from the tracker.
|
|
68
70
|
|
|
69
71
|
User flows are flows that are created by developers of an assistant and
|
|
@@ -75,16 +77,19 @@ def top_user_flow_frame(dialogue_stack: DialogueStack) -> Optional[UserFlowStack
|
|
|
75
77
|
|
|
76
78
|
Args:
|
|
77
79
|
dialogue_stack: The dialogue stack to use.
|
|
80
|
+
ignore_call_and_link_frames: Whether to ignore user frames of type `call`
|
|
81
|
+
and `link`. By default, these frames are ignored.
|
|
78
82
|
|
|
79
83
|
Returns:
|
|
80
84
|
The topmost user flow frame from the tracker.
|
|
81
85
|
"""
|
|
82
86
|
for frame in reversed(dialogue_stack.frames):
|
|
83
|
-
if (
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
87
|
+
if isinstance(frame, UserFlowStackFrame):
|
|
88
|
+
if ignore_call_and_link_frames and (
|
|
89
|
+
frame.frame_type == FlowStackFrameType.CALL
|
|
90
|
+
or frame.frame_type == FlowStackFrameType.LINK
|
|
91
|
+
):
|
|
92
|
+
continue
|
|
88
93
|
return frame
|
|
89
94
|
return None
|
|
90
95
|
|
rasa/engine/language.py
CHANGED
|
@@ -2,6 +2,8 @@ from dataclasses import dataclass
|
|
|
2
2
|
from typing import Any, Dict, Text
|
|
3
3
|
|
|
4
4
|
from langcodes import Language as LangcodesLanguage
|
|
5
|
+
from langcodes import standardize_tag
|
|
6
|
+
from langcodes.tag_parser import LanguageTagError
|
|
5
7
|
|
|
6
8
|
from rasa.shared.exceptions import RasaException
|
|
7
9
|
|
|
@@ -30,9 +32,14 @@ class Language:
|
|
|
30
32
|
Raises:
|
|
31
33
|
RasaException: If the language code or custom language code is invalid.
|
|
32
34
|
"""
|
|
33
|
-
|
|
34
|
-
|
|
35
|
+
if cls.is_custom_language_code(language_code):
|
|
36
|
+
cls.validate_custom_language_code(language_code)
|
|
37
|
+
elif not cls.is_language_code_bcp_47_standard(language_code):
|
|
38
|
+
raise RasaException(
|
|
39
|
+
f"Language '{language_code}' is not a BCP 47 standard language code."
|
|
40
|
+
)
|
|
35
41
|
|
|
42
|
+
language = LangcodesLanguage.get(language_code)
|
|
36
43
|
return cls(
|
|
37
44
|
code=language_code,
|
|
38
45
|
label=cls.get_language_label(language),
|
|
@@ -40,7 +47,38 @@ class Language:
|
|
|
40
47
|
)
|
|
41
48
|
|
|
42
49
|
@staticmethod
|
|
43
|
-
def
|
|
50
|
+
def is_language_code_bcp_47_standard(language_code: str) -> bool:
|
|
51
|
+
"""Checks if a language code is a BCP 47 standard language code.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
language_code: The language code to check.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
`True` if the language code is a BCP 47 standard, `False` otherwise.
|
|
58
|
+
"""
|
|
59
|
+
try:
|
|
60
|
+
standardized_language_code = standardize_tag(language_code)
|
|
61
|
+
return (
|
|
62
|
+
standardized_language_code == language_code
|
|
63
|
+
and LangcodesLanguage.get(language_code).is_valid()
|
|
64
|
+
)
|
|
65
|
+
except LanguageTagError:
|
|
66
|
+
return False
|
|
67
|
+
|
|
68
|
+
@staticmethod
|
|
69
|
+
def is_custom_language_code(language_code: str) -> bool:
|
|
70
|
+
"""Checks if a language code is a custom language code.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
language_code: The language code to check.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
`True` if the language code is a custom language code, `False` otherwise.
|
|
77
|
+
"""
|
|
78
|
+
return language_code.startswith(CUSTOM_LANGUAGE_CODE_PREFIX)
|
|
79
|
+
|
|
80
|
+
@classmethod
|
|
81
|
+
def get_language_label(cls, language: LangcodesLanguage) -> str:
|
|
44
82
|
"""Gets the display name of a language.
|
|
45
83
|
|
|
46
84
|
For custom languages (in the format "x-<base_lang>-<custom_label>"),
|
|
@@ -55,11 +93,11 @@ class Language:
|
|
|
55
93
|
"""
|
|
56
94
|
language_code = str(language)
|
|
57
95
|
|
|
58
|
-
if
|
|
96
|
+
if cls.is_custom_language_code(language_code):
|
|
59
97
|
# If it's a custom language, derive the label from the base language code.
|
|
60
|
-
|
|
61
|
-
base_language_code =
|
|
62
|
-
base_language = LangcodesLanguage.
|
|
98
|
+
without_prefix = language_code[len(CUSTOM_LANGUAGE_CODE_PREFIX) :]
|
|
99
|
+
base_language_code, _ = without_prefix.rsplit("-", 1)
|
|
100
|
+
base_language = LangcodesLanguage.get(base_language_code)
|
|
63
101
|
return base_language.display_name()
|
|
64
102
|
else:
|
|
65
103
|
return language.display_name()
|
|
@@ -79,15 +117,15 @@ class Language:
|
|
|
79
117
|
|
|
80
118
|
language_code = str(language)
|
|
81
119
|
if language_code.startswith(CUSTOM_LANGUAGE_CODE_PREFIX):
|
|
82
|
-
cls.
|
|
120
|
+
cls.validate_custom_language_code(language_code)
|
|
83
121
|
|
|
84
|
-
@
|
|
85
|
-
def
|
|
122
|
+
@classmethod
|
|
123
|
+
def validate_custom_language_code(cls, custom_language_code: str) -> None:
|
|
86
124
|
"""Validates a custom language code.
|
|
87
125
|
|
|
88
126
|
A valid custom language code should adhere to the format:
|
|
89
127
|
"x-<existing_language_code>-<custom_label>"
|
|
90
|
-
Example: x-en-formal
|
|
128
|
+
Example: x-en-formal or x-en-US-formal.
|
|
91
129
|
|
|
92
130
|
Args:
|
|
93
131
|
custom_language_code: The custom language code to validate.
|
|
@@ -102,29 +140,33 @@ class Language:
|
|
|
102
140
|
f"start with '{CUSTOM_LANGUAGE_CODE_PREFIX}'."
|
|
103
141
|
)
|
|
104
142
|
|
|
105
|
-
#
|
|
106
|
-
|
|
107
|
-
if
|
|
143
|
+
# Remove the custom prefix.
|
|
144
|
+
without_prefix = custom_language_code[len(CUSTOM_LANGUAGE_CODE_PREFIX) :]
|
|
145
|
+
if "-" not in without_prefix:
|
|
108
146
|
raise RasaException(
|
|
109
147
|
f"Custom language '{custom_language_code}' must be in the format "
|
|
110
148
|
f"'{CUSTOM_LANGUAGE_CODE_PREFIX}<language_code>-<custom_label>'."
|
|
111
149
|
)
|
|
112
150
|
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
base_language = LangcodesLanguage.make(base_language_code)
|
|
116
|
-
if not base_language.is_valid():
|
|
151
|
+
base_language_code, custom_label = without_prefix.rsplit("-", 1)
|
|
152
|
+
if not base_language_code:
|
|
117
153
|
raise RasaException(
|
|
118
|
-
f"Base language '{
|
|
119
|
-
f"
|
|
154
|
+
f"Base language in '{custom_language_code}' cannot be empty. "
|
|
155
|
+
f"Expected custom language code format is "
|
|
156
|
+
f"'{CUSTOM_LANGUAGE_CODE_PREFIX}<language_code>-<custom_label>'."
|
|
120
157
|
)
|
|
121
|
-
|
|
122
|
-
# Ensure the custom label is not empty.
|
|
123
|
-
custom_label = parts[2]
|
|
124
158
|
if not custom_label:
|
|
125
159
|
raise RasaException(
|
|
126
|
-
f"Custom label in
|
|
127
|
-
f"
|
|
160
|
+
f"Custom label in '{custom_language_code}' cannot be empty."
|
|
161
|
+
f"Expected custom language code format is "
|
|
162
|
+
f"'{CUSTOM_LANGUAGE_CODE_PREFIX}<language_code>-<custom_label>'."
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Validate the base language code using langcodes.
|
|
166
|
+
if not cls.is_language_code_bcp_47_standard(base_language_code):
|
|
167
|
+
raise RasaException(
|
|
168
|
+
f"Base language '{base_language_code}' in custom language "
|
|
169
|
+
f"'{custom_language_code}' is not a valid language code."
|
|
128
170
|
)
|
|
129
171
|
|
|
130
172
|
def as_dict(self) -> Dict[Text, Any]:
|
|
@@ -1,17 +1,7 @@
|
|
|
1
1
|
from dataclasses import dataclass, field
|
|
2
2
|
from typing import Any, Dict, Iterator, List, Optional, Union
|
|
3
3
|
|
|
4
|
-
from rasa.dialogue_understanding.commands import
|
|
5
|
-
CancelFlowCommand,
|
|
6
|
-
ChitChatAnswerCommand,
|
|
7
|
-
ClarifyCommand,
|
|
8
|
-
Command,
|
|
9
|
-
HumanHandoffCommand,
|
|
10
|
-
KnowledgeAnswerCommand,
|
|
11
|
-
SetSlotCommand,
|
|
12
|
-
SkipQuestionCommand,
|
|
13
|
-
StartFlowCommand,
|
|
14
|
-
)
|
|
4
|
+
from rasa.dialogue_understanding.commands.prompt_command import PromptCommand
|
|
15
5
|
from rasa.e2e_test.e2e_test_case import TestCase, TestStep
|
|
16
6
|
from rasa.shared.core.constants import USER
|
|
17
7
|
|
|
@@ -19,7 +9,7 @@ from rasa.shared.core.constants import USER
|
|
|
19
9
|
@dataclass
|
|
20
10
|
class ConversationStep:
|
|
21
11
|
original_test_step: TestStep
|
|
22
|
-
llm_commands: List[
|
|
12
|
+
llm_commands: List[PromptCommand]
|
|
23
13
|
llm_prompt: str
|
|
24
14
|
failed_rephrasings: List[str] = field(default_factory=list)
|
|
25
15
|
passed_rephrasings: List[str] = field(default_factory=list)
|
|
@@ -38,25 +28,7 @@ class ConversationStep:
|
|
|
38
28
|
return data
|
|
39
29
|
|
|
40
30
|
def _commands_to_str(self) -> List[str]:
|
|
41
|
-
|
|
42
|
-
for command in self.llm_commands:
|
|
43
|
-
if isinstance(command, StartFlowCommand):
|
|
44
|
-
output.append(f"StartFlow({command.flow})")
|
|
45
|
-
elif isinstance(command, SetSlotCommand):
|
|
46
|
-
output.append(f"SetSlot({command.name}, {command.value})")
|
|
47
|
-
elif isinstance(command, ClarifyCommand):
|
|
48
|
-
output.append(f"Clarify({', '.join(command.options)})")
|
|
49
|
-
elif isinstance(command, CancelFlowCommand):
|
|
50
|
-
output.append("CancelFlow()")
|
|
51
|
-
elif isinstance(command, ChitChatAnswerCommand):
|
|
52
|
-
output.append("ChitChat()")
|
|
53
|
-
elif isinstance(command, SkipQuestionCommand):
|
|
54
|
-
output.append("SkipQuestion()")
|
|
55
|
-
elif isinstance(command, KnowledgeAnswerCommand):
|
|
56
|
-
output.append("SearchAndReply()")
|
|
57
|
-
elif isinstance(command, HumanHandoffCommand):
|
|
58
|
-
output.append("HumanHandoff()")
|
|
59
|
-
return output
|
|
31
|
+
return [command.to_dsl() for command in self.llm_commands]
|
|
60
32
|
|
|
61
33
|
def commands_as_string(self) -> str:
|
|
62
34
|
return "\n".join(self._commands_to_str())
|