rasa-pro 3.12.0rc2__py3-none-any.whl → 3.12.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/cli/dialogue_understanding_test.py +5 -8
- rasa/cli/llm_fine_tuning.py +47 -12
- rasa/cli/train.py +3 -0
- rasa/cli/utils.py +6 -0
- rasa/core/channels/development_inspector.py +77 -21
- rasa/core/channels/inspector/dist/assets/{arc-f0f8bd46.js → arc-9f1365dc.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{blockDiagram-38ab4fdb-7162c77d.js → blockDiagram-38ab4fdb-e0f81b12.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{c4Diagram-3d4e48cf-b1d0d098.js → c4Diagram-3d4e48cf-9deaee1c.js} +1 -1
- rasa/core/channels/inspector/dist/assets/channel-44956714.js +1 -0
- rasa/core/channels/inspector/dist/assets/{classDiagram-70f12bd4-807a1b27.js → classDiagram-70f12bd4-20450a96.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{classDiagram-v2-f2320105-5238dcdb.js → classDiagram-v2-f2320105-749d2abf.js} +1 -1
- rasa/core/channels/inspector/dist/assets/clone-a9475142.js +1 -0
- rasa/core/channels/inspector/dist/assets/{createText-2e5e7dd3-75dfaa67.js → createText-2e5e7dd3-bef0b38c.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{edges-e0da2a9e-df20501d.js → edges-e0da2a9e-943801a7.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{erDiagram-9861fffd-13cf4797.js → erDiagram-9861fffd-d523a948.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{flowDb-956e92f1-a4991264.js → flowDb-956e92f1-54e4cf19.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{flowDiagram-66a62f08-ccecf773.js → flowDiagram-66a62f08-48bfbbe8.js} +1 -1
- rasa/core/channels/inspector/dist/assets/flowDiagram-v2-96b9c2cf-43fa749a.js +1 -0
- rasa/core/channels/inspector/dist/assets/{flowchart-elk-definition-4a651766-b5801783.js → flowchart-elk-definition-4a651766-17c30827.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{ganttDiagram-c361ad54-161e079a.js → ganttDiagram-c361ad54-43086f2d.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{gitGraphDiagram-72cf32ee-f38e86a4.js → gitGraphDiagram-72cf32ee-5c8b693e.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{graph-be6ef5d8.js → graph-41a90d26.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{index-3862675e-d9ce8994.js → index-3862675e-b43eeae9.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{index-7794b245.js → index-e8affe45.js} +155 -155
- rasa/core/channels/inspector/dist/assets/{infoDiagram-f8f76790-5000a3dc.js → infoDiagram-f8f76790-0b20676b.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{journeyDiagram-49397b02-8ef0a17a.js → journeyDiagram-49397b02-39bce7b5.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{layout-d649bc98.js → layout-dc8eeea4.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{line-95add810.js → line-c4d2e756.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{linear-f6025094.js → linear-86f6f2d9.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{mindmap-definition-fc14e90a-2e8531c4.js → mindmap-definition-fc14e90a-4216f771.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{pieDiagram-8a3498a8-918adfdb.js → pieDiagram-8a3498a8-1a0cfa96.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{quadrantDiagram-120e2f19-cbd01797.js → quadrantDiagram-120e2f19-f91e67cf.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{requirementDiagram-deff3bca-6a8b877b.js → requirementDiagram-deff3bca-d4046bed.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{sankeyDiagram-04a897e0-c377c3fe.js → sankeyDiagram-04a897e0-2cf6d1d7.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{sequenceDiagram-704730f1-ab9e9b7f.js → sequenceDiagram-704730f1-751ac4f5.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{stateDiagram-587899a1-5e6ae67d.js → stateDiagram-587899a1-f734f4d4.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{stateDiagram-v2-d93cdb3a-40643476.js → stateDiagram-v2-d93cdb3a-91c65710.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{styles-6aaf32cf-afb8d108.js → styles-6aaf32cf-e0cff7be.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{styles-9a916d00-7edc9423.js → styles-9a916d00-c8029e5d.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{styles-c10674c1-c1d8f7e9.js → styles-c10674c1-114f312a.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{svgDrawCommon-08f97a94-f494b2ef.js → svgDrawCommon-08f97a94-b7b9dc00.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{timeline-definition-85554ec2-11c7cdd0.js → timeline-definition-85554ec2-9536d189.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{xychartDiagram-e933f94c-3f191ec1.js → xychartDiagram-e933f94c-bf3b0f36.js} +1 -1
- rasa/core/channels/inspector/dist/index.html +1 -1
- rasa/core/channels/inspector/package.json +1 -0
- rasa/core/channels/inspector/src/App.tsx +15 -2
- rasa/core/channels/inspector/src/components/RasaLogo.tsx +31 -0
- rasa/core/channels/inspector/src/components/RecruitmentPanel.tsx +68 -0
- rasa/core/channels/inspector/src/components/Welcome.tsx +19 -13
- rasa/core/channels/inspector/yarn.lock +5 -0
- rasa/core/channels/voice_ready/audiocodes.py +2 -2
- rasa/core/channels/voice_stream/asr/asr_event.py +5 -0
- rasa/core/channels/voice_stream/audiocodes.py +63 -35
- rasa/core/channels/voice_stream/call_state.py +3 -9
- rasa/core/channels/voice_stream/genesys.py +40 -55
- rasa/core/channels/voice_stream/voice_channel.py +61 -39
- rasa/core/tracker_store.py +123 -34
- rasa/dialogue_understanding/commands/set_slot_command.py +1 -0
- rasa/dialogue_understanding/commands/utils.py +1 -4
- rasa/dialogue_understanding/generator/command_parser.py +41 -0
- rasa/dialogue_understanding/generator/constants.py +7 -2
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +33 -3
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +29 -48
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +23 -50
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +76 -24
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +32 -18
- rasa/dialogue_understanding/processor/command_processor.py +59 -20
- rasa/dialogue_understanding/stack/utils.py +11 -6
- rasa/engine/language.py +67 -25
- rasa/engine/validation.py +2 -0
- rasa/llm_fine_tuning/conversations.py +3 -31
- rasa/llm_fine_tuning/llm_data_preparation_module.py +5 -3
- rasa/llm_fine_tuning/paraphrasing/rephrase_validator.py +18 -13
- rasa/llm_fine_tuning/paraphrasing_module.py +6 -2
- rasa/llm_fine_tuning/train_test_split_module.py +27 -27
- rasa/llm_fine_tuning/utils.py +7 -0
- rasa/model_training.py +3 -1
- rasa/server.py +1 -0
- rasa/shared/constants.py +4 -0
- rasa/shared/core/domain.py +6 -0
- rasa/shared/importers/importer.py +9 -1
- rasa/shared/providers/_configs/azure_entra_id_config.py +8 -8
- rasa/shared/providers/llm/litellm_router_llm_client.py +1 -0
- rasa/shared/providers/router/_base_litellm_router_client.py +38 -7
- rasa/shared/utils/common.py +14 -0
- rasa/shared/utils/llm.py +69 -13
- rasa/telemetry.py +13 -3
- rasa/tracing/instrumentation/attribute_extractors.py +2 -5
- rasa/validator.py +4 -4
- rasa/version.py +1 -1
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.1.dist-info}/METADATA +2 -2
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.1.dist-info}/RECORD +95 -94
- rasa/core/channels/inspector/dist/assets/channel-e265ea59.js +0 -1
- rasa/core/channels/inspector/dist/assets/clone-21f8a43d.js +0 -1
- rasa/core/channels/inspector/dist/assets/flowDiagram-v2-96b9c2cf-5c8ce12d.js +0 -1
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_default.jinja2 +0 -68
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.1.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.1.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.0rc2.dist-info → rasa_pro-3.12.1.dist-info}/entry_points.txt +0 -0
|
@@ -1,77 +1,58 @@
|
|
|
1
|
+
## Task Description
|
|
1
2
|
Your task is to analyze the current conversation context and generate a list of actions to start new business processes that we call flows, to extract slots, or respond to small talk and knowledge requests.
|
|
2
3
|
|
|
4
|
+
--
|
|
5
|
+
|
|
3
6
|
## Available Actions:
|
|
4
|
-
* `start flow flow_name`: Starting a flow. For example, `start flow transfer_money` or `start flow list_contacts
|
|
5
|
-
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values
|
|
6
|
-
* `cancel flow`: Cancelling the current flow
|
|
7
|
+
* `start flow flow_name`: Starting a flow. For example, `start flow transfer_money` or `start flow list_contacts`.
|
|
8
|
+
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
|
|
9
|
+
* `cancel flow`: Cancelling the current flow.
|
|
7
10
|
* `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
|
|
8
|
-
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services
|
|
11
|
+
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
|
|
9
12
|
* `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
|
|
10
|
-
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one
|
|
13
|
+
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
|
|
11
14
|
|
|
15
|
+
--
|
|
12
16
|
|
|
13
17
|
## General Tips
|
|
14
18
|
* Do not fill slots with abstract values or placeholders.
|
|
19
|
+
* For categorical slots try to match the user message with allowed slot values. Use "other" if you cannot match it.
|
|
20
|
+
* Set the boolean slots based on the user response. Map positive responses to `True`, and negative to `False`.
|
|
21
|
+
* Always refer to the slot description to determine what information should be extracted and how it should be formatted.
|
|
22
|
+
* For text slots, extract values exactly as provided by the user unless the slot description specifies otherwise. Preserve formatting and avoid rewording, truncation, or making assumptions.
|
|
15
23
|
* Only use information provided by the user.
|
|
16
24
|
* Use clarification in ambiguous cases.
|
|
17
25
|
* Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
|
|
26
|
+
* Do not cancel the flow unless the user explicitly requests it.
|
|
18
27
|
* Strictly adhere to the provided action format.
|
|
19
|
-
* For categorical slots try to match the user message with potential slot values. Use "other" if you cannot match it
|
|
20
28
|
* Focus on the last message and take it one step at a time.
|
|
21
29
|
* Use the previous conversation steps only to aid understanding.
|
|
22
30
|
|
|
31
|
+
--
|
|
23
32
|
|
|
24
|
-
## Available Flows
|
|
25
|
-
Use the following structured
|
|
26
|
-
```
|
|
27
|
-
|
|
28
|
-
{% for flow in available_flows %}<flow>
|
|
29
|
-
<name>{{ flow.name }}</name>
|
|
30
|
-
<description>{{ flow.description }}</description>
|
|
31
|
-
<slots>{% for slot in flow.slots %}
|
|
32
|
-
<slot>
|
|
33
|
-
<name>{{ slot.name }}</name>
|
|
34
|
-
<description>{{ slot.description }}</description>
|
|
35
|
-
<allowed_values>{{ slot.allowed_values }}</allowed_values>
|
|
36
|
-
</slot>{% endfor %}
|
|
37
|
-
</slots>
|
|
38
|
-
</flow>
|
|
39
|
-
{% endfor %}
|
|
40
|
-
</flows>
|
|
33
|
+
## Available Flows and Slots
|
|
34
|
+
Use the following structured data:
|
|
35
|
+
```json
|
|
36
|
+
{"flows":[{% for flow in available_flows %}{"name":"{{ flow.name }}","description":"{{ flow.description }}"{% if flow.slots %},"slots":[{% for slot in flow.slots %}{"name":"{{ slot.name }}"{% if slot.description %},"description":"{{ slot.description }}"{% endif %}{% if slot.allowed_values %},"allowed_values":{{ slot.allowed_values }}{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]}
|
|
41
37
|
```
|
|
42
38
|
|
|
39
|
+
--
|
|
40
|
+
|
|
43
41
|
## Current State
|
|
44
|
-
{% if current_flow != None %}
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
<current_step>
|
|
50
|
-
<requested_slot>{{ current_slot }}</requested_slot>
|
|
51
|
-
<requested_slot_description>{{ current_slot_description }}</requested_slot_description>
|
|
52
|
-
</current_step>
|
|
53
|
-
<slots>
|
|
54
|
-
{% for slot in flow_slots %}<slot>
|
|
55
|
-
<name>{{ slot.name }}</name>
|
|
56
|
-
<value>{{ slot.value }}</value>
|
|
57
|
-
<type>{{ slot.type }}</type>
|
|
58
|
-
<description>{{ slot.description }}</description>{% if slot.allowed_values %}
|
|
59
|
-
<allowed_values>{{ slot.allowed_values }}</allowed_values>{% endif %}
|
|
60
|
-
</slot>
|
|
61
|
-
{% endfor %}
|
|
62
|
-
</slots>
|
|
63
|
-
</current_state>
|
|
64
|
-
```
|
|
65
|
-
{% else %}
|
|
66
|
-
You are currently not inside any flow.
|
|
67
|
-
{% endif %}
|
|
42
|
+
{% if current_flow != None %}Use the following structured data:
|
|
43
|
+
```json
|
|
44
|
+
{"active_flow":"{{ current_flow }}","current_step":{"requested_slot":"{{ current_slot }}","requested_slot_description":"{{ current_slot_description }}"},"slots":[{% for slot in flow_slots %}{"name":"{{ slot.name }}","value":"{{ slot.value }}","type":"{{ slot.type }}"{% if slot.description %},"description":"{{ slot.description }}"{% endif %}{% if slot.allowed_values %},"allowed_values":"{{ slot.allowed_values }}"{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]}
|
|
45
|
+
```{% else %}
|
|
46
|
+
You are currently not inside any flow.{% endif %}
|
|
68
47
|
|
|
48
|
+
---
|
|
69
49
|
|
|
70
50
|
## Conversation History
|
|
71
51
|
{{ current_conversation }}
|
|
72
52
|
|
|
53
|
+
---
|
|
73
54
|
|
|
74
55
|
## Task
|
|
75
|
-
Create an action list with one action per line in response to the
|
|
56
|
+
Create an action list with one action per line in response to the user's last message: """{{ user_message }}""".
|
|
76
57
|
|
|
77
58
|
Your action list:
|
|
@@ -1,73 +1,46 @@
|
|
|
1
1
|
## Task Description
|
|
2
2
|
Your task is to analyze the current conversation context and generate a list of actions to start new business processes that we call flows, to extract slots, or respond to small talk and knowledge requests.
|
|
3
3
|
|
|
4
|
-
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
## Available Flows and Slots
|
|
7
|
+
Use the following structured data:
|
|
8
|
+
```json
|
|
9
|
+
{"flows":[{% for flow in available_flows %}{"name":"{{ flow.name }}","description":"{{ flow.description }}"{% if flow.slots %},"slots":[{% for slot in flow.slots %}{"name":"{{ slot.name }}"{% if slot.description %},"description":"{{ slot.description }}"{% endif %}{% if slot.allowed_values %},"allowed_values":{{ slot.allowed_values }}{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]}
|
|
10
|
+
```
|
|
11
|
+
|
|
12
|
+
---
|
|
5
13
|
|
|
6
14
|
## Available Actions:
|
|
7
|
-
* `start flow flow_name`: Starting a flow. For example, `start flow transfer_money` or `start flow list_contacts
|
|
8
|
-
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values
|
|
9
|
-
* `cancel flow`: Cancelling the current flow
|
|
15
|
+
* `start flow flow_name`: Starting a flow. For example, `start flow transfer_money` or `start flow list_contacts`.
|
|
16
|
+
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
|
|
17
|
+
* `cancel flow`: Cancelling the current flow.
|
|
10
18
|
* `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
|
|
11
|
-
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services
|
|
19
|
+
* `provide info`: Responding to the user's questions by supplying relevant information, such as answering FAQs or explaining services.
|
|
12
20
|
* `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
|
|
13
|
-
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one
|
|
21
|
+
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
|
|
14
22
|
|
|
15
|
-
|
|
23
|
+
---
|
|
16
24
|
|
|
17
25
|
## General Tips
|
|
18
26
|
* Do not fill slots with abstract values or placeholders.
|
|
27
|
+
* For categorical slots try to match the user message with allowed slot values. Use "other" if you cannot match it.
|
|
28
|
+
* Set the boolean slots based on the user response. Map positive responses to `True`, and negative to `False`.
|
|
29
|
+
* Extract text slot values exactly as provided by the user. Avoid assumptions, format changes, or partial extractions.
|
|
19
30
|
* Only use information provided by the user.
|
|
20
31
|
* Use clarification in ambiguous cases.
|
|
21
32
|
* Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
|
|
33
|
+
* Do not cancel the flow unless the user explicitly requests it.
|
|
22
34
|
* Strictly adhere to the provided action format.
|
|
23
|
-
* For categorical slots try to match the user message with potential slot values. Use "other" if you cannot match it
|
|
24
35
|
* Focus on the last message and take it one step at a time.
|
|
25
36
|
* Use the previous conversation steps only to aid understanding.
|
|
26
37
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
## Available Flows and Slots
|
|
30
|
-
Use the following structured date:
|
|
31
|
-
```json
|
|
32
|
-
{
|
|
33
|
-
"flows": [
|
|
34
|
-
{% for flow in available_flows %}{
|
|
35
|
-
"name": "{{ flow.name }}",
|
|
36
|
-
"description": "{{ flow.description }}"{% if flow.slots %},
|
|
37
|
-
"slots": [{% for slot in flow.slots %}
|
|
38
|
-
{
|
|
39
|
-
"name": "{{ slot.name }}"{% if slot.description %},
|
|
40
|
-
"description": "{{ slot.description }}"{% endif %}{% if slot.allowed_values %},
|
|
41
|
-
"allowed_values": {{ slot.allowed_values }}{% endif %}
|
|
42
|
-
}{% if not loop.last %},{% endif %}{% endfor %}
|
|
43
|
-
]{% endif %}
|
|
44
|
-
}{% if not loop.last %},
|
|
45
|
-
{% endif %}{% endfor %}
|
|
46
|
-
]
|
|
47
|
-
}
|
|
48
|
-
```
|
|
49
|
-
|
|
50
|
-
--
|
|
38
|
+
---
|
|
51
39
|
|
|
52
40
|
## Current State
|
|
53
|
-
{% if current_flow != None %}Use the following structured
|
|
41
|
+
{% if current_flow != None %}Use the following structured data:
|
|
54
42
|
```json
|
|
55
|
-
{
|
|
56
|
-
"active_flow": "{{ current_flow }}",
|
|
57
|
-
"current_step": {
|
|
58
|
-
"requested_slot": "{{ current_slot }}",
|
|
59
|
-
"requested_slot_description": "{{ current_slot_description }}"
|
|
60
|
-
},
|
|
61
|
-
"slots": [{% for slot in flow_slots %}
|
|
62
|
-
{
|
|
63
|
-
"name": "{{ slot.name }}",
|
|
64
|
-
"value": "{{ slot.value }}",
|
|
65
|
-
"type": "{{ slot.type }}"{% if slot.description %},
|
|
66
|
-
"description": "{{ slot.description }}"{% endif %}{% if slot.allowed_values %},
|
|
67
|
-
"allowed_values": "{{ slot.allowed_values }}"{% endif %}
|
|
68
|
-
}{% if not loop.last %},{% endif %}{% endfor %}
|
|
69
|
-
]
|
|
70
|
-
}
|
|
43
|
+
{"active_flow":"{{ current_flow }}","current_step":{"requested_slot":"{{ current_slot }}","requested_slot_description":"{{ current_slot_description }}"},"slots":[{% for slot in flow_slots %}{"name":"{{ slot.name }}","value":"{{ slot.value }}","type":"{{ slot.type }}"{% if slot.description %},"description":"{{ slot.description }}"{% endif %}{% if slot.allowed_values %},"allowed_values":"{{ slot.allowed_values }}"{% endif %}}{% if not loop.last %},{% endif %}{% endfor %}]}
|
|
71
44
|
```{% else %}
|
|
72
45
|
You are currently not inside any flow.{% endif %}
|
|
73
46
|
|
|
@@ -79,6 +52,6 @@ You are currently not inside any flow.{% endif %}
|
|
|
79
52
|
---
|
|
80
53
|
|
|
81
54
|
## Task
|
|
82
|
-
Create an action list with one action per line in response to the
|
|
55
|
+
Create an action list with one action per line in response to the user's last message: """{{ user_message }}""".
|
|
83
56
|
|
|
84
57
|
Your action list:
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import copy
|
|
1
2
|
from typing import Any, Dict, List, Optional, Text
|
|
2
3
|
|
|
3
4
|
import structlog
|
|
@@ -19,10 +20,16 @@ from rasa.dialogue_understanding.generator.command_parser import (
|
|
|
19
20
|
)
|
|
20
21
|
from rasa.dialogue_understanding.generator.constants import (
|
|
21
22
|
COMMAND_PROMPT_FILE_NAME,
|
|
22
|
-
|
|
23
|
+
DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
|
|
23
24
|
FLOW_RETRIEVAL_KEY,
|
|
24
25
|
LLM_BASED_COMMAND_GENERATOR_CONFIG_FILE,
|
|
25
26
|
LLM_CONFIG_KEY,
|
|
27
|
+
MODEL_CONFIG_KEY,
|
|
28
|
+
MODEL_NAME_CLAUDE_3_5_SONNET_20240620,
|
|
29
|
+
MODEL_NAME_GPT_4O_2024_11_20,
|
|
30
|
+
OPENAI_PROVIDER,
|
|
31
|
+
PROVIDER_CONFIG_KEY,
|
|
32
|
+
TIMEOUT_CONFIG_KEY,
|
|
26
33
|
USER_INPUT_CONFIG_KEY,
|
|
27
34
|
)
|
|
28
35
|
from rasa.dialogue_understanding.generator.flow_retrieval import FlowRetrieval
|
|
@@ -36,9 +43,14 @@ from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
|
36
43
|
from rasa.engine.storage.resource import Resource
|
|
37
44
|
from rasa.engine.storage.storage import ModelStorage
|
|
38
45
|
from rasa.shared.constants import (
|
|
46
|
+
ANTHROPIC_PROVIDER,
|
|
47
|
+
AWS_BEDROCK_PROVIDER,
|
|
48
|
+
AZURE_OPENAI_PROVIDER,
|
|
39
49
|
EMBEDDINGS_CONFIG_KEY,
|
|
50
|
+
MAX_TOKENS_CONFIG_KEY,
|
|
40
51
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
41
52
|
ROUTE_TO_CALM_SLOT,
|
|
53
|
+
TEMPERATURE_CONFIG_KEY,
|
|
42
54
|
)
|
|
43
55
|
from rasa.shared.core.flows import FlowsList
|
|
44
56
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
@@ -61,18 +73,38 @@ from rasa.utils.log_utils import log_llm
|
|
|
61
73
|
structlogger = structlog.get_logger()
|
|
62
74
|
|
|
63
75
|
|
|
76
|
+
DEFAULT_LLM_CONFIG = {
|
|
77
|
+
PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
|
|
78
|
+
MODEL_CONFIG_KEY: MODEL_NAME_GPT_4O_2024_11_20,
|
|
79
|
+
TEMPERATURE_CONFIG_KEY: 0.0,
|
|
80
|
+
MAX_TOKENS_CONFIG_KEY: DEFAULT_OPENAI_MAX_GENERATED_TOKENS,
|
|
81
|
+
TIMEOUT_CONFIG_KEY: 7,
|
|
82
|
+
}
|
|
83
|
+
|
|
64
84
|
MODEL_PROMPT_MAPPER = {
|
|
65
|
-
"
|
|
66
|
-
|
|
67
|
-
|
|
85
|
+
f"{OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
|
|
86
|
+
"command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
|
|
87
|
+
),
|
|
88
|
+
f"{AZURE_OPENAI_PROVIDER}/{MODEL_NAME_GPT_4O_2024_11_20}": (
|
|
89
|
+
"command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
|
|
90
|
+
),
|
|
91
|
+
f"{AWS_BEDROCK_PROVIDER}/anthropic.{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}-v1:0": (
|
|
68
92
|
"command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
|
|
69
93
|
),
|
|
70
|
-
"
|
|
94
|
+
f"{ANTHROPIC_PROVIDER}/{MODEL_NAME_CLAUDE_3_5_SONNET_20240620}": (
|
|
71
95
|
"command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2"
|
|
72
96
|
),
|
|
73
97
|
}
|
|
74
98
|
|
|
75
|
-
|
|
99
|
+
# When model is not configured, then we use the default prompt template
|
|
100
|
+
DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
|
|
101
|
+
"command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
|
|
102
|
+
)
|
|
103
|
+
# When the configured model is not found in the model prompt mapper, then we use the
|
|
104
|
+
# fallback prompt template
|
|
105
|
+
FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME = (
|
|
106
|
+
"command_prompt_v2_gpt_4o_2024_11_20_template.jinja2"
|
|
107
|
+
)
|
|
76
108
|
|
|
77
109
|
|
|
78
110
|
class CommandParserValidatorSingleton:
|
|
@@ -154,25 +186,19 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
154
186
|
**kwargs,
|
|
155
187
|
)
|
|
156
188
|
|
|
157
|
-
# Get the
|
|
158
|
-
|
|
159
|
-
self.config
|
|
160
|
-
MODEL_PROMPT_MAPPER,
|
|
161
|
-
DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
189
|
+
# Get the prompt template from the config or the default prompt template.
|
|
190
|
+
self.prompt_template = self.resolve_component_prompt_template(
|
|
191
|
+
self.config, prompt_template
|
|
162
192
|
)
|
|
163
193
|
|
|
164
|
-
# Set the
|
|
165
|
-
|
|
166
|
-
self.
|
|
167
|
-
default_command_prompt_template,
|
|
194
|
+
# Set the command syntax version to v2
|
|
195
|
+
CommandSyntaxManager.set_syntax_version(
|
|
196
|
+
self.get_component_command_syntax_version()
|
|
168
197
|
)
|
|
169
198
|
|
|
170
199
|
self.trace_prompt_tokens = self.config.get("trace_prompt_tokens", False)
|
|
171
200
|
self.repeat_command_enabled = self.is_repeat_command_enabled()
|
|
172
201
|
|
|
173
|
-
# Set the command syntax version to v2
|
|
174
|
-
CommandSyntaxManager.set_syntax_version(CommandSyntaxVersion.v2)
|
|
175
|
-
|
|
176
202
|
### Implementations of LLMBasedCommandGenerator parent
|
|
177
203
|
@staticmethod
|
|
178
204
|
def get_default_config() -> Dict[str, Any]:
|
|
@@ -219,7 +245,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
219
245
|
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
220
246
|
cls.perform_llm_health_check(
|
|
221
247
|
llm_config,
|
|
222
|
-
|
|
248
|
+
cls.get_default_llm_config(),
|
|
223
249
|
"compact_llm_command_generator.load",
|
|
224
250
|
cls.__name__,
|
|
225
251
|
)
|
|
@@ -508,15 +534,41 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
508
534
|
config.get(FLOW_RETRIEVAL_KEY, {}).get(EMBEDDINGS_CONFIG_KEY),
|
|
509
535
|
FlowRetrieval.__name__,
|
|
510
536
|
)
|
|
537
|
+
|
|
538
|
+
# Create a copy of the config to avoid modifying the original config
|
|
539
|
+
# and update the llm config with the resolved llm config.
|
|
540
|
+
_config_copy = copy.deepcopy(config)
|
|
541
|
+
_config_copy[LLM_CONFIG_KEY] = llm_config
|
|
542
|
+
prompt_template = cls.resolve_component_prompt_template(_config_copy)
|
|
543
|
+
|
|
544
|
+
return deep_container_fingerprint(
|
|
545
|
+
[prompt_template, llm_config, embedding_config]
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
@staticmethod
|
|
549
|
+
def get_default_llm_config() -> Dict[str, Any]:
|
|
550
|
+
"""Get the default LLM config for the command generator."""
|
|
551
|
+
return DEFAULT_LLM_CONFIG
|
|
552
|
+
|
|
553
|
+
@staticmethod
|
|
554
|
+
def get_component_command_syntax_version() -> CommandSyntaxVersion:
|
|
555
|
+
return CommandSyntaxVersion.v2
|
|
556
|
+
|
|
557
|
+
@staticmethod
|
|
558
|
+
def resolve_component_prompt_template(
|
|
559
|
+
config: Dict[str, Any], prompt_template: Optional[str] = None
|
|
560
|
+
) -> Optional[str]:
|
|
561
|
+
"""Get the prompt template from the config or the default prompt template."""
|
|
562
|
+
# Get the default prompt template based on the model name.
|
|
511
563
|
default_command_prompt_template = get_default_prompt_template_based_on_model(
|
|
512
|
-
|
|
564
|
+
config.get(LLM_CONFIG_KEY, {}) or {},
|
|
513
565
|
MODEL_PROMPT_MAPPER,
|
|
514
566
|
DEFAULT_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
567
|
+
FALLBACK_COMMAND_PROMPT_TEMPLATE_FILE_NAME,
|
|
515
568
|
)
|
|
516
|
-
|
|
569
|
+
|
|
570
|
+
# Return the prompt template either from the config or the default prompt.
|
|
571
|
+
return prompt_template or get_prompt_template(
|
|
517
572
|
config.get(PROMPT_TEMPLATE_CONFIG_KEY),
|
|
518
573
|
default_command_prompt_template,
|
|
519
574
|
)
|
|
520
|
-
return deep_container_fingerprint(
|
|
521
|
-
[prompt_template, llm_config, embedding_config]
|
|
522
|
-
)
|
|
@@ -8,6 +8,7 @@ from rasa.dialogue_understanding.commands.command_syntax_manager import (
|
|
|
8
8
|
CommandSyntaxVersion,
|
|
9
9
|
)
|
|
10
10
|
from rasa.dialogue_understanding.generator.constants import (
|
|
11
|
+
DEFAULT_LLM_CONFIG,
|
|
11
12
|
FLOW_RETRIEVAL_KEY,
|
|
12
13
|
LLM_CONFIG_KEY,
|
|
13
14
|
USER_INPUT_CONFIG_KEY,
|
|
@@ -71,18 +72,14 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
71
72
|
"Please use the config parameter 'prompt_template' instead. "
|
|
72
73
|
),
|
|
73
74
|
)
|
|
74
|
-
|
|
75
|
-
config
|
|
76
|
-
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
77
|
-
or None
|
|
78
|
-
)
|
|
79
|
-
self.prompt_template = prompt_template or get_prompt_template(
|
|
80
|
-
config_prompt,
|
|
81
|
-
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
75
|
+
self.prompt_template = self.resolve_component_prompt_template(
|
|
76
|
+
config, prompt_template
|
|
82
77
|
)
|
|
83
78
|
|
|
84
79
|
# Set the command syntax version to v1
|
|
85
|
-
CommandSyntaxManager.set_syntax_version(
|
|
80
|
+
CommandSyntaxManager.set_syntax_version(
|
|
81
|
+
self.get_component_command_syntax_version()
|
|
82
|
+
)
|
|
86
83
|
|
|
87
84
|
@staticmethod
|
|
88
85
|
def get_default_config() -> Dict[str, Any]:
|
|
@@ -98,15 +95,7 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
98
95
|
@classmethod
|
|
99
96
|
def fingerprint_addon(cls: Any, config: Dict[str, Any]) -> Optional[str]:
|
|
100
97
|
"""Add a fingerprint for the graph."""
|
|
101
|
-
|
|
102
|
-
config.get(PROMPT_CONFIG_KEY)
|
|
103
|
-
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
104
|
-
or None
|
|
105
|
-
)
|
|
106
|
-
prompt_template = get_prompt_template(
|
|
107
|
-
config_prompt,
|
|
108
|
-
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
109
|
-
)
|
|
98
|
+
prompt_template = cls.resolve_component_prompt_template(config)
|
|
110
99
|
llm_config = resolve_model_client_config(
|
|
111
100
|
config.get(LLM_CONFIG_KEY), SingleStepLLMCommandGenerator.__name__
|
|
112
101
|
)
|
|
@@ -117,3 +106,28 @@ class SingleStepLLMCommandGenerator(CompactLLMCommandGenerator):
|
|
|
117
106
|
return deep_container_fingerprint(
|
|
118
107
|
[prompt_template, llm_config, embedding_config]
|
|
119
108
|
)
|
|
109
|
+
|
|
110
|
+
@staticmethod
|
|
111
|
+
def get_default_llm_config() -> Dict[str, Any]:
|
|
112
|
+
"""Get the default LLM config for the command generator."""
|
|
113
|
+
return DEFAULT_LLM_CONFIG
|
|
114
|
+
|
|
115
|
+
@staticmethod
|
|
116
|
+
def get_component_command_syntax_version() -> CommandSyntaxVersion:
|
|
117
|
+
return CommandSyntaxVersion.v1
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def resolve_component_prompt_template(
|
|
121
|
+
config: Dict[str, Any], prompt_template: Optional[str] = None
|
|
122
|
+
) -> Optional[str]:
|
|
123
|
+
"""Get the prompt template from the config or the default prompt template."""
|
|
124
|
+
# Get the default prompt template based on the model name.
|
|
125
|
+
config_prompt = (
|
|
126
|
+
config.get(PROMPT_CONFIG_KEY)
|
|
127
|
+
or config.get(PROMPT_TEMPLATE_CONFIG_KEY)
|
|
128
|
+
or None
|
|
129
|
+
)
|
|
130
|
+
return prompt_template or get_prompt_template(
|
|
131
|
+
config_prompt,
|
|
132
|
+
DEFAULT_COMMAND_PROMPT_TEMPLATE,
|
|
133
|
+
)
|
|
@@ -42,6 +42,7 @@ from rasa.dialogue_understanding.stack.frames import (
|
|
|
42
42
|
from rasa.dialogue_understanding.stack.utils import (
|
|
43
43
|
filled_slots_for_active_flow,
|
|
44
44
|
top_flow_frame,
|
|
45
|
+
top_user_flow_frame,
|
|
45
46
|
)
|
|
46
47
|
from rasa.engine.graph import ExecutionContext
|
|
47
48
|
from rasa.shared.constants import (
|
|
@@ -430,29 +431,30 @@ def clean_up_commands(
|
|
|
430
431
|
command=command,
|
|
431
432
|
)
|
|
432
433
|
|
|
433
|
-
elif isinstance(command, StartFlowCommand)
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
structlogger.debug(
|
|
437
|
-
"command_processor.clean_up_commands.skip_command_flow_already_active",
|
|
438
|
-
command=command,
|
|
434
|
+
elif isinstance(command, StartFlowCommand):
|
|
435
|
+
top_user_frame = top_user_flow_frame(
|
|
436
|
+
tracker.stack, ignore_call_and_link_frames=False
|
|
439
437
|
)
|
|
438
|
+
top_flow_id = top_user_frame.flow_id if top_user_frame else ""
|
|
440
439
|
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
440
|
+
if top_flow_id == command.flow:
|
|
441
|
+
# drop a start flow command if the starting flow is equal
|
|
442
|
+
# to the currently active flow
|
|
443
|
+
structlogger.debug(
|
|
444
|
+
"command_processor.clean_up_commands.skip_command_flow_already_active",
|
|
445
|
+
command=command,
|
|
446
|
+
)
|
|
447
|
+
continue
|
|
449
448
|
|
|
450
|
-
if
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
449
|
+
if should_add_handle_digressions_command(tracker, all_flows, top_flow_id):
|
|
450
|
+
handle_digression_command = HandleDigressionsCommand(flow=command.flow)
|
|
451
|
+
if handle_digression_command in clean_commands:
|
|
452
|
+
structlogger.debug(
|
|
453
|
+
"command_processor.clean_up_commands.skip_handle_digressions.command_already_present",
|
|
454
|
+
command=handle_digression_command,
|
|
455
|
+
)
|
|
456
|
+
continue
|
|
457
|
+
clean_commands.append(handle_digression_command)
|
|
456
458
|
structlogger.debug(
|
|
457
459
|
"command_processor.clean_up_commands.push_handle_digressions",
|
|
458
460
|
command=command,
|
|
@@ -492,6 +494,18 @@ def clean_up_commands(
|
|
|
492
494
|
elif not tracker.has_coexistence_routing_slot and len(clean_commands) > 1:
|
|
493
495
|
clean_commands = filter_cannot_handle_command_for_skipped_slots(clean_commands)
|
|
494
496
|
|
|
497
|
+
# remove cancel flow when there is a handle digression command
|
|
498
|
+
# otherwise the cancel command will cancel the active flow which defined a specific
|
|
499
|
+
# behavior for the digression
|
|
500
|
+
if contains_command(clean_commands, HandleDigressionsCommand) and contains_command(
|
|
501
|
+
clean_commands, CancelFlowCommand
|
|
502
|
+
):
|
|
503
|
+
clean_commands = [
|
|
504
|
+
command
|
|
505
|
+
for command in clean_commands
|
|
506
|
+
if not isinstance(command, CancelFlowCommand)
|
|
507
|
+
]
|
|
508
|
+
|
|
495
509
|
clean_commands = ensure_max_number_of_command_type(
|
|
496
510
|
clean_commands, RepeatBotMessagesCommand, 1
|
|
497
511
|
)
|
|
@@ -848,3 +862,28 @@ def filter_cannot_handle_command_for_skipped_slots(
|
|
|
848
862
|
and CANNOT_HANDLE_REASON == command.reason
|
|
849
863
|
)
|
|
850
864
|
]
|
|
865
|
+
|
|
866
|
+
|
|
867
|
+
def should_add_handle_digressions_command(
|
|
868
|
+
tracker: DialogueStateTracker, all_flows: FlowsList, top_flow_id: str
|
|
869
|
+
) -> bool:
|
|
870
|
+
"""Check if a handle digressions command should be added to the commands.
|
|
871
|
+
|
|
872
|
+
The command should replace a StartFlow command only if we are at a collect step of
|
|
873
|
+
a flow and a new flow is predicted by the command generator to start.
|
|
874
|
+
"""
|
|
875
|
+
current_flow = all_flows.flow_by_id(top_flow_id)
|
|
876
|
+
current_flow_condition = current_flow and (
|
|
877
|
+
current_flow.ask_confirm_digressions or current_flow.block_digressions
|
|
878
|
+
)
|
|
879
|
+
|
|
880
|
+
collect_info = get_current_collect_step(tracker.stack, all_flows)
|
|
881
|
+
|
|
882
|
+
if collect_info and (
|
|
883
|
+
collect_info.ask_confirm_digressions
|
|
884
|
+
or collect_info.block_digressions
|
|
885
|
+
or current_flow_condition
|
|
886
|
+
):
|
|
887
|
+
return True
|
|
888
|
+
|
|
889
|
+
return False
|
|
@@ -63,7 +63,9 @@ def top_flow_frame(
|
|
|
63
63
|
return None
|
|
64
64
|
|
|
65
65
|
|
|
66
|
-
def top_user_flow_frame(
|
|
66
|
+
def top_user_flow_frame(
|
|
67
|
+
dialogue_stack: DialogueStack, ignore_call_and_link_frames: bool = True
|
|
68
|
+
) -> Optional[UserFlowStackFrame]:
|
|
67
69
|
"""Returns the topmost user flow frame from the tracker.
|
|
68
70
|
|
|
69
71
|
User flows are flows that are created by developers of an assistant and
|
|
@@ -75,16 +77,19 @@ def top_user_flow_frame(dialogue_stack: DialogueStack) -> Optional[UserFlowStack
|
|
|
75
77
|
|
|
76
78
|
Args:
|
|
77
79
|
dialogue_stack: The dialogue stack to use.
|
|
80
|
+
ignore_call_and_link_frames: Whether to ignore user frames of type `call`
|
|
81
|
+
and `link`. By default, these frames are ignored.
|
|
78
82
|
|
|
79
83
|
Returns:
|
|
80
84
|
The topmost user flow frame from the tracker.
|
|
81
85
|
"""
|
|
82
86
|
for frame in reversed(dialogue_stack.frames):
|
|
83
|
-
if (
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
87
|
+
if isinstance(frame, UserFlowStackFrame):
|
|
88
|
+
if ignore_call_and_link_frames and (
|
|
89
|
+
frame.frame_type == FlowStackFrameType.CALL
|
|
90
|
+
or frame.frame_type == FlowStackFrameType.LINK
|
|
91
|
+
):
|
|
92
|
+
continue
|
|
88
93
|
return frame
|
|
89
94
|
return None
|
|
90
95
|
|