rasa-pro 3.12.6.dev2__py3-none-any.whl → 3.13.0.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/__init__.py +0 -6
- rasa/cli/scaffold.py +1 -1
- rasa/core/actions/action.py +38 -34
- rasa/core/actions/action_run_slot_rejections.py +1 -1
- rasa/core/channels/studio_chat.py +16 -43
- rasa/core/channels/voice_ready/audiocodes.py +46 -17
- rasa/core/information_retrieval/faiss.py +68 -7
- rasa/core/information_retrieval/information_retrieval.py +40 -2
- rasa/core/information_retrieval/milvus.py +7 -2
- rasa/core/information_retrieval/qdrant.py +7 -2
- rasa/core/nlg/contextual_response_rephraser.py +11 -27
- rasa/core/nlg/generator.py +5 -21
- rasa/core/nlg/response.py +6 -43
- rasa/core/nlg/summarize.py +1 -15
- rasa/core/nlg/translate.py +0 -8
- rasa/core/policies/enterprise_search_policy.py +64 -316
- rasa/core/policies/flows/flow_executor.py +3 -38
- rasa/core/policies/intentless_policy.py +4 -17
- rasa/core/policies/policy.py +0 -2
- rasa/core/processor.py +27 -6
- rasa/core/utils.py +53 -0
- rasa/dialogue_understanding/coexistence/llm_based_router.py +4 -18
- rasa/dialogue_understanding/commands/cancel_flow_command.py +4 -59
- rasa/dialogue_understanding/commands/knowledge_answer_command.py +2 -2
- rasa/dialogue_understanding/commands/start_flow_command.py +0 -41
- rasa/dialogue_understanding/generator/command_generator.py +67 -0
- rasa/dialogue_understanding/generator/command_parser.py +1 -1
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +7 -23
- rasa/dialogue_understanding/generator/llm_command_generator.py +1 -3
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2 +1 -1
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +1 -1
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +24 -2
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +8 -12
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +0 -61
- rasa/dialogue_understanding/processor/command_processor.py +7 -65
- rasa/dialogue_understanding/stack/utils.py +0 -38
- rasa/dialogue_understanding_test/command_metric_calculation.py +7 -40
- rasa/dialogue_understanding_test/command_metrics.py +38 -0
- rasa/dialogue_understanding_test/du_test_case.py +58 -25
- rasa/dialogue_understanding_test/du_test_result.py +228 -132
- rasa/dialogue_understanding_test/du_test_runner.py +10 -1
- rasa/dialogue_understanding_test/io.py +48 -16
- rasa/document_retrieval/__init__.py +0 -0
- rasa/document_retrieval/constants.py +32 -0
- rasa/document_retrieval/document_post_processor.py +351 -0
- rasa/document_retrieval/document_post_processor_prompt_template.jinja2 +0 -0
- rasa/document_retrieval/document_retriever.py +333 -0
- rasa/document_retrieval/knowledge_base_connectors/__init__.py +0 -0
- rasa/document_retrieval/knowledge_base_connectors/api_connector.py +39 -0
- rasa/document_retrieval/knowledge_base_connectors/knowledge_base_connector.py +34 -0
- rasa/document_retrieval/knowledge_base_connectors/vector_store_connector.py +226 -0
- rasa/document_retrieval/query_rewriter.py +234 -0
- rasa/document_retrieval/query_rewriter_prompt_template.jinja2 +8 -0
- rasa/engine/recipes/default_components.py +2 -0
- rasa/hooks.py +0 -55
- rasa/model_manager/model_api.py +1 -1
- rasa/model_manager/socket_bridge.py +0 -7
- rasa/shared/constants.py +0 -5
- rasa/shared/core/constants.py +0 -8
- rasa/shared/core/domain.py +12 -3
- rasa/shared/core/flows/flow.py +0 -17
- rasa/shared/core/flows/flows_yaml_schema.json +3 -38
- rasa/shared/core/flows/steps/collect.py +5 -18
- rasa/shared/core/flows/utils.py +1 -16
- rasa/shared/core/slot_mappings.py +11 -5
- rasa/shared/core/slots.py +1 -1
- rasa/shared/core/trackers.py +4 -10
- rasa/shared/nlu/constants.py +0 -1
- rasa/shared/providers/constants.py +0 -9
- rasa/shared/providers/llm/_base_litellm_client.py +4 -14
- rasa/shared/providers/llm/default_litellm_llm_client.py +2 -2
- rasa/shared/providers/llm/litellm_router_llm_client.py +7 -17
- rasa/shared/providers/llm/llm_client.py +15 -24
- rasa/shared/providers/llm/self_hosted_llm_client.py +2 -10
- rasa/shared/utils/common.py +11 -1
- rasa/shared/utils/health_check/health_check.py +1 -7
- rasa/shared/utils/llm.py +1 -1
- rasa/tracing/instrumentation/attribute_extractors.py +50 -17
- rasa/tracing/instrumentation/instrumentation.py +12 -12
- rasa/tracing/instrumentation/intentless_policy_instrumentation.py +1 -2
- rasa/utils/licensing.py +0 -15
- rasa/validator.py +1 -123
- rasa/version.py +1 -1
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/METADATA +2 -3
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/RECORD +88 -80
- rasa/core/actions/action_handle_digressions.py +0 -164
- rasa/dialogue_understanding/commands/handle_digressions_command.py +0 -144
- rasa/dialogue_understanding/patterns/handle_digressions.py +0 -81
- rasa/monkey_patches.py +0 -91
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/entry_points.txt +0 -0
|
@@ -16,7 +16,9 @@ Use the following structured data:
|
|
|
16
16
|
* `set slot slot_name slot_value`: Slot setting. For example, `set slot transfer_money_recipient Freddy`. Can be used to correct and change previously set values.
|
|
17
17
|
* `cancel flow`: Cancelling the current flow.
|
|
18
18
|
* `disambiguate flows flow_name1 flow_name2 ... flow_name_n`: Disambiguate which flow should be started when user input is ambiguous by listing the potential flows as options. For example, `disambiguate flows list_contacts add_contact remove_contact ...` if the user just wrote "contacts".
|
|
19
|
-
|
|
19
|
+
{%- if relevant_documents.results %}
|
|
20
|
+
* `search and reply`: Responding to the user's message by using the relevant FAQs (included in this prompt) retrieved from the knowledge base.
|
|
21
|
+
{%- endif %}
|
|
20
22
|
* `offtopic reply`: Responding to casual or social user messages that are unrelated to any flows, engaging in friendly conversation and addressing off-topic remarks.
|
|
21
23
|
* `hand over`: Handing over to a human, in case the user seems frustrated or explicitly asks to speak to one.
|
|
22
24
|
|
|
@@ -27,16 +29,36 @@ Use the following structured data:
|
|
|
27
29
|
* For categorical slots try to match the user message with allowed slot values. Use "other" if you cannot match it.
|
|
28
30
|
* Set the boolean slots based on the user response. Map positive responses to `True`, and negative to `False`.
|
|
29
31
|
* Extract text slot values exactly as provided by the user. Avoid assumptions, format changes, or partial extractions.
|
|
30
|
-
* Only use information provided by the user.
|
|
31
32
|
* Use clarification in ambiguous cases.
|
|
33
|
+
* Use `disambiguate flows` only when multiple flows could fit the same message (e.g., "card" could mean `block_card` or `replace_card`).
|
|
34
|
+
{%- if relevant_documents.results %}
|
|
35
|
+
* A user asking a question does not automatically imply that they want `search and reply`. The objective is to help them complete a business process if its possible to do so via a flow.
|
|
36
|
+
* **Flow Priority**: If a user message can be addressed by starting a flow (even if it looks like a general question), ALWAYS start the flow first. Example: If the user says "How do I activate my card?", use `start flow activate_card` instead of `search and reply`. Only use `search and reply` if no flow matches the request.
|
|
37
|
+
{%- endif %}
|
|
38
|
+
* Only use information provided by the user.
|
|
32
39
|
* Multiple flows can be started. If a user wants to digress into a second flow, you do not need to cancel the current flow.
|
|
33
40
|
* Do not cancel the flow unless the user explicitly requests it.
|
|
34
41
|
* Strictly adhere to the provided action format.
|
|
35
42
|
* Focus on the last message and take it one step at a time.
|
|
36
43
|
* Use the previous conversation steps only to aid understanding.
|
|
37
44
|
|
|
45
|
+
{%- if relevant_documents.results %}
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
## Relevant FAQs from the knowledge base
|
|
50
|
+
```json
|
|
51
|
+
{"documents":[{% for document in relevant_documents.results %}{"Q":"{{ document.text }}","A":"{{ document.metadata.answer }}"},{% endfor %}]}
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
---
|
|
55
|
+
|
|
56
|
+
{% else %}
|
|
57
|
+
|
|
38
58
|
---
|
|
39
59
|
|
|
60
|
+
{% endif -%}
|
|
61
|
+
|
|
40
62
|
## Current State
|
|
41
63
|
{% if current_flow != None %}Use the following structured data:
|
|
42
64
|
```json
|
|
@@ -4,6 +4,7 @@ from typing import Any, Dict, List, Optional, Text
|
|
|
4
4
|
import structlog
|
|
5
5
|
|
|
6
6
|
import rasa.shared.utils.io
|
|
7
|
+
from rasa.core.information_retrieval import SearchResultList
|
|
7
8
|
from rasa.dialogue_understanding.commands import (
|
|
8
9
|
CannotHandleCommand,
|
|
9
10
|
Command,
|
|
@@ -38,6 +39,7 @@ from rasa.dialogue_understanding.utils import (
|
|
|
38
39
|
add_commands_to_message_parse_data,
|
|
39
40
|
add_prompt_to_message_parse_data,
|
|
40
41
|
)
|
|
42
|
+
from rasa.document_retrieval.constants import POST_PROCESSED_DOCUMENTS_KEY
|
|
41
43
|
from rasa.engine.graph import ExecutionContext
|
|
42
44
|
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
|
|
43
45
|
from rasa.engine.storage.resource import Resource
|
|
@@ -47,10 +49,6 @@ from rasa.shared.constants import (
|
|
|
47
49
|
AWS_BEDROCK_PROVIDER,
|
|
48
50
|
AZURE_OPENAI_PROVIDER,
|
|
49
51
|
EMBEDDINGS_CONFIG_KEY,
|
|
50
|
-
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
51
|
-
LANGFUSE_METADATA_SESSION_ID,
|
|
52
|
-
LANGFUSE_METADATA_USER_ID,
|
|
53
|
-
LANGFUSE_TAGS,
|
|
54
52
|
MAX_TOKENS_CONFIG_KEY,
|
|
55
53
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
56
54
|
ROUTE_TO_CALM_SLOT,
|
|
@@ -366,14 +364,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
366
364
|
prompt=flow_prompt,
|
|
367
365
|
)
|
|
368
366
|
|
|
369
|
-
|
|
370
|
-
LANGFUSE_METADATA_USER_ID: self.user_id,
|
|
371
|
-
LANGFUSE_METADATA_SESSION_ID: tracker.sender_id if tracker else "",
|
|
372
|
-
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
373
|
-
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
374
|
-
}
|
|
375
|
-
|
|
376
|
-
response = await self.invoke_llm(flow_prompt, metadata)
|
|
367
|
+
response = await self.invoke_llm(flow_prompt)
|
|
377
368
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
378
369
|
# The check for 'None' maintains compatibility with older versions
|
|
379
370
|
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
|
@@ -504,6 +495,10 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
504
495
|
latest_user_message = sanitize_message_for_prompt(message.get(TEXT))
|
|
505
496
|
current_conversation += f"\nUSER: {latest_user_message}"
|
|
506
497
|
|
|
498
|
+
relevant_documents = SearchResultList.from_dict(
|
|
499
|
+
message.get(POST_PROCESSED_DOCUMENTS_KEY, [])
|
|
500
|
+
)
|
|
501
|
+
|
|
507
502
|
inputs = {
|
|
508
503
|
"available_flows": self.prepare_flows_for_template(
|
|
509
504
|
startable_flows, tracker
|
|
@@ -517,6 +512,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
517
512
|
"current_slot_allowed_values": current_slot_allowed_values,
|
|
518
513
|
"user_message": latest_user_message,
|
|
519
514
|
"is_repeat_command_enabled": self.repeat_command_enabled,
|
|
515
|
+
"relevant_documents": relevant_documents,
|
|
520
516
|
}
|
|
521
517
|
|
|
522
518
|
return self.compile_template(self.prompt_template).render(**inputs)
|
|
@@ -1,17 +1,6 @@
|
|
|
1
1
|
version: "3.1"
|
|
2
2
|
responses:
|
|
3
3
|
|
|
4
|
-
utter_ask_continue_previous_flow:
|
|
5
|
-
- text: "Confirm if you would like to continue with the initial topic: {{context.interrupted_flow_id}}?"
|
|
6
|
-
metadata:
|
|
7
|
-
rephrase: True
|
|
8
|
-
template: jinja
|
|
9
|
-
buttons:
|
|
10
|
-
- title: Continue with the previous topic.
|
|
11
|
-
payload: /SetSlots(continue_previous_flow=True)
|
|
12
|
-
- title: Switch to new topic.
|
|
13
|
-
payload: /SetSlots(continue_previous_flow=False)
|
|
14
|
-
|
|
15
4
|
utter_ask_rephrase:
|
|
16
5
|
- text: I’m sorry I am unable to understand you, could you please rephrase?
|
|
17
6
|
|
|
@@ -20,20 +9,6 @@ responses:
|
|
|
20
9
|
metadata:
|
|
21
10
|
rephrase: True
|
|
22
11
|
|
|
23
|
-
utter_block_digressions:
|
|
24
|
-
- text: "We can look into {{ context.interrupting_flow_id }} later. Let's focus on the current topic: {{ context.interrupted_flow_id }}."
|
|
25
|
-
metadata:
|
|
26
|
-
rephrase: True
|
|
27
|
-
template: jinja
|
|
28
|
-
- text: "Let's continue with the current topic: {{ context.interrupted_flow_id }}."
|
|
29
|
-
condition:
|
|
30
|
-
- type: slot
|
|
31
|
-
name: continue_previous_flow
|
|
32
|
-
value: True
|
|
33
|
-
metadata:
|
|
34
|
-
rephrase: True
|
|
35
|
-
template: jinja
|
|
36
|
-
|
|
37
12
|
utter_boolean_slot_rejection:
|
|
38
13
|
- text: "Sorry, the value you provided, `{{value}}`, is not valid. Please respond with a valid value."
|
|
39
14
|
metadata:
|
|
@@ -60,12 +35,6 @@ responses:
|
|
|
60
35
|
rephrase: True
|
|
61
36
|
template: jinja
|
|
62
37
|
|
|
63
|
-
utter_continue_interruption:
|
|
64
|
-
- text: "Let's continue with the chosen topic instead: {{ context.interrupting_flow_id }}."
|
|
65
|
-
metadata:
|
|
66
|
-
rephrase: True
|
|
67
|
-
template: jinja
|
|
68
|
-
|
|
69
38
|
utter_corrected_previous_input:
|
|
70
39
|
- text: "Ok, I am updating {{ context.corrected_slots.keys()|join(', ') }} to {{ context.new_slot_values | join(', ') }} respectively."
|
|
71
40
|
metadata:
|
|
@@ -150,10 +119,6 @@ slots:
|
|
|
150
119
|
type: float
|
|
151
120
|
initial_value: 0.0
|
|
152
121
|
max_value: 1000000
|
|
153
|
-
continue_previous_flow:
|
|
154
|
-
type: bool
|
|
155
|
-
mappings:
|
|
156
|
-
- type: from_llm
|
|
157
122
|
|
|
158
123
|
flows:
|
|
159
124
|
pattern_cancel_flow:
|
|
@@ -197,7 +162,6 @@ flows:
|
|
|
197
162
|
steps:
|
|
198
163
|
- action: action_clarify_flows
|
|
199
164
|
- action: utter_clarification_options_rasa
|
|
200
|
-
- action: action_listen
|
|
201
165
|
|
|
202
166
|
pattern_code_change:
|
|
203
167
|
description: Conversation repair flow for cleaning the stack after an assistant update
|
|
@@ -247,31 +211,6 @@ flows:
|
|
|
247
211
|
next: END
|
|
248
212
|
- else: END
|
|
249
213
|
|
|
250
|
-
pattern_handle_digressions:
|
|
251
|
-
description: Conversation repair flow for handling digressions
|
|
252
|
-
name: pattern handle digressions
|
|
253
|
-
steps:
|
|
254
|
-
- noop: true
|
|
255
|
-
id: branching
|
|
256
|
-
next:
|
|
257
|
-
- if: context.ask_confirm_digressions contains context.interrupting_flow_id
|
|
258
|
-
then: continue_previous_flow
|
|
259
|
-
- if: context.block_digressions contains context.interrupting_flow_id
|
|
260
|
-
then: block_digression
|
|
261
|
-
- else: continue_digression
|
|
262
|
-
- id: continue_previous_flow
|
|
263
|
-
collect: continue_previous_flow
|
|
264
|
-
next:
|
|
265
|
-
- if: slots.continue_previous_flow
|
|
266
|
-
then: block_digression
|
|
267
|
-
- else: continue_digression
|
|
268
|
-
- id: block_digression
|
|
269
|
-
action: action_block_digression
|
|
270
|
-
next: END
|
|
271
|
-
- id: continue_digression
|
|
272
|
-
action: action_continue_digression
|
|
273
|
-
next: END
|
|
274
|
-
|
|
275
214
|
pattern_human_handoff:
|
|
276
215
|
description: Conversation repair flow for switching users to a human agent if their request can't be handled
|
|
277
216
|
name: pattern human handoff
|
|
@@ -18,9 +18,6 @@ from rasa.dialogue_understanding.commands import (
|
|
|
18
18
|
from rasa.dialogue_understanding.commands.handle_code_change_command import (
|
|
19
19
|
HandleCodeChangeCommand,
|
|
20
20
|
)
|
|
21
|
-
from rasa.dialogue_understanding.commands.handle_digressions_command import (
|
|
22
|
-
HandleDigressionsCommand,
|
|
23
|
-
)
|
|
24
21
|
from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
|
|
25
22
|
from rasa.dialogue_understanding.commands.utils import (
|
|
26
23
|
create_validate_frames_from_slot_set_events,
|
|
@@ -454,21 +451,7 @@ def clean_up_commands(
|
|
|
454
451
|
)
|
|
455
452
|
continue
|
|
456
453
|
|
|
457
|
-
|
|
458
|
-
handle_digression_command = HandleDigressionsCommand(flow=command.flow)
|
|
459
|
-
if handle_digression_command in clean_commands:
|
|
460
|
-
structlogger.debug(
|
|
461
|
-
"command_processor.clean_up_commands.skip_handle_digressions.command_already_present",
|
|
462
|
-
command=handle_digression_command,
|
|
463
|
-
)
|
|
464
|
-
continue
|
|
465
|
-
clean_commands.append(handle_digression_command)
|
|
466
|
-
structlogger.debug(
|
|
467
|
-
"command_processor.clean_up_commands.push_handle_digressions",
|
|
468
|
-
command=command,
|
|
469
|
-
)
|
|
470
|
-
else:
|
|
471
|
-
clean_commands.append(command)
|
|
454
|
+
clean_commands.append(command)
|
|
472
455
|
|
|
473
456
|
# handle chitchat command differently from other free-form answer commands
|
|
474
457
|
elif isinstance(command, ChitChatAnswerCommand):
|
|
@@ -503,21 +486,9 @@ def clean_up_commands(
|
|
|
503
486
|
# when coexistence is enabled, by default there will be a SetSlotCommand
|
|
504
487
|
# for the ROUTE_TO_CALM_SLOT slot.
|
|
505
488
|
if tracker.has_coexistence_routing_slot and len(clean_commands) > 2:
|
|
506
|
-
clean_commands =
|
|
489
|
+
clean_commands = filter_cannot_handle_command(clean_commands)
|
|
507
490
|
elif not tracker.has_coexistence_routing_slot and len(clean_commands) > 1:
|
|
508
|
-
clean_commands =
|
|
509
|
-
|
|
510
|
-
# remove cancel flow when there is a handle digression command
|
|
511
|
-
# otherwise the cancel command will cancel the active flow which defined a specific
|
|
512
|
-
# behavior for the digression
|
|
513
|
-
if contains_command(clean_commands, HandleDigressionsCommand) and contains_command(
|
|
514
|
-
clean_commands, CancelFlowCommand
|
|
515
|
-
):
|
|
516
|
-
clean_commands = [
|
|
517
|
-
command
|
|
518
|
-
for command in clean_commands
|
|
519
|
-
if not isinstance(command, CancelFlowCommand)
|
|
520
|
-
]
|
|
491
|
+
clean_commands = filter_cannot_handle_command(clean_commands)
|
|
521
492
|
|
|
522
493
|
clean_commands = ensure_max_number_of_command_type(
|
|
523
494
|
clean_commands, RepeatBotMessagesCommand, 1
|
|
@@ -857,12 +828,12 @@ def should_slot_be_set(
|
|
|
857
828
|
return True
|
|
858
829
|
|
|
859
830
|
|
|
860
|
-
def
|
|
831
|
+
def filter_cannot_handle_command(
|
|
861
832
|
clean_commands: List[Command],
|
|
862
833
|
) -> List[Command]:
|
|
863
|
-
"""Filter out a 'cannot handle' command
|
|
834
|
+
"""Filter out a 'cannot handle' command.
|
|
864
835
|
|
|
865
|
-
This is used to filter out a 'cannot handle' command
|
|
836
|
+
This is used to filter out a 'cannot handle' command
|
|
866
837
|
in case other commands are present.
|
|
867
838
|
|
|
868
839
|
Returns:
|
|
@@ -871,34 +842,5 @@ def filter_cannot_handle_command_for_skipped_slots(
|
|
|
871
842
|
return [
|
|
872
843
|
command
|
|
873
844
|
for command in clean_commands
|
|
874
|
-
if not (
|
|
875
|
-
isinstance(command, CannotHandleCommand)
|
|
876
|
-
and command.reason
|
|
877
|
-
and CANNOT_HANDLE_REASON == command.reason
|
|
878
|
-
)
|
|
845
|
+
if not isinstance(command, CannotHandleCommand)
|
|
879
846
|
]
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
def should_add_handle_digressions_command(
|
|
883
|
-
tracker: DialogueStateTracker, all_flows: FlowsList, top_flow_id: str
|
|
884
|
-
) -> bool:
|
|
885
|
-
"""Check if a handle digressions command should be added to the commands.
|
|
886
|
-
|
|
887
|
-
The command should replace a StartFlow command only if we are at a collect step of
|
|
888
|
-
a flow and a new flow is predicted by the command generator to start.
|
|
889
|
-
"""
|
|
890
|
-
current_flow = all_flows.flow_by_id(top_flow_id)
|
|
891
|
-
current_flow_condition = current_flow and (
|
|
892
|
-
current_flow.ask_confirm_digressions or current_flow.block_digressions
|
|
893
|
-
)
|
|
894
|
-
|
|
895
|
-
collect_info = get_current_collect_step(tracker.stack, all_flows)
|
|
896
|
-
|
|
897
|
-
if collect_info and (
|
|
898
|
-
collect_info.ask_confirm_digressions
|
|
899
|
-
or collect_info.block_digressions
|
|
900
|
-
or current_flow_condition
|
|
901
|
-
):
|
|
902
|
-
return True
|
|
903
|
-
|
|
904
|
-
return False
|
|
@@ -4,9 +4,6 @@ from typing import List, Optional, Set, Tuple
|
|
|
4
4
|
from rasa.dialogue_understanding.patterns.collect_information import (
|
|
5
5
|
CollectInformationPatternFlowStackFrame,
|
|
6
6
|
)
|
|
7
|
-
from rasa.dialogue_understanding.patterns.continue_interrupted import (
|
|
8
|
-
ContinueInterruptedPatternFlowStackFrame,
|
|
9
|
-
)
|
|
10
7
|
from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
|
|
11
8
|
from rasa.dialogue_understanding.stack.frames import (
|
|
12
9
|
BaseFlowStackFrame,
|
|
@@ -221,38 +218,3 @@ def get_collect_steps_excluding_ask_before_filling_for_active_flow(
|
|
|
221
218
|
for step in active_flow.get_collect_steps()
|
|
222
219
|
if not step.ask_before_filling
|
|
223
220
|
)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
def remove_digression_from_stack(stack: DialogueStack, flow_id: str) -> DialogueStack:
|
|
227
|
-
"""Remove a specific flow frame from the stack and other frames that reference it.
|
|
228
|
-
|
|
229
|
-
The main use-case is to prevent duplicate digressions from being added to the stack.
|
|
230
|
-
|
|
231
|
-
Args:
|
|
232
|
-
stack: The dialogue stack.
|
|
233
|
-
flow_id: The flow to remove.
|
|
234
|
-
|
|
235
|
-
Returns:
|
|
236
|
-
The updated dialogue stack.
|
|
237
|
-
"""
|
|
238
|
-
updated_stack = stack.copy()
|
|
239
|
-
original_frames = updated_stack.frames[:]
|
|
240
|
-
found_digression_index = -1
|
|
241
|
-
for index, frame in enumerate(original_frames):
|
|
242
|
-
if isinstance(frame, BaseFlowStackFrame) and frame.flow_id == flow_id:
|
|
243
|
-
updated_stack.frames.pop(index)
|
|
244
|
-
found_digression_index = index
|
|
245
|
-
|
|
246
|
-
# we also need to remove the `ContinueInterruptedPatternFlowStackFrame`
|
|
247
|
-
elif (
|
|
248
|
-
isinstance(frame, ContinueInterruptedPatternFlowStackFrame)
|
|
249
|
-
and frame.previous_flow_name == flow_id
|
|
250
|
-
and found_digression_index + 1 == index
|
|
251
|
-
):
|
|
252
|
-
# we know that this frame is always added after the digressing flow frame
|
|
253
|
-
# that was blocked previously by action_block_digressions,
|
|
254
|
-
# so this check would occur after the digressing flow was popped.
|
|
255
|
-
# Therefore, we need to update the index dynamically before popping.
|
|
256
|
-
updated_stack.frames.pop(index - 1)
|
|
257
|
-
|
|
258
|
-
return updated_stack
|
|
@@ -1,54 +1,21 @@
|
|
|
1
|
+
import typing
|
|
1
2
|
from collections import defaultdict
|
|
2
3
|
from typing import Dict, List
|
|
3
4
|
|
|
4
|
-
from pydantic import BaseModel
|
|
5
|
-
|
|
6
5
|
from rasa.dialogue_understanding.commands import Command
|
|
7
6
|
from rasa.dialogue_understanding_test.command_comparison import (
|
|
8
7
|
is_command_present_in_list,
|
|
9
8
|
)
|
|
10
|
-
from rasa.dialogue_understanding_test.
|
|
11
|
-
DialogueUnderstandingTestResult,
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class CommandMetrics(BaseModel):
|
|
16
|
-
tp: int
|
|
17
|
-
fp: int
|
|
18
|
-
fn: int
|
|
19
|
-
total_count: int
|
|
20
|
-
|
|
21
|
-
@staticmethod
|
|
22
|
-
def _safe_divide(numerator: float, denominator: float) -> float:
|
|
23
|
-
"""Safely perform division, returning 0.0 if the denominator is zero."""
|
|
24
|
-
return numerator / denominator if denominator > 0 else 0.0
|
|
9
|
+
from rasa.dialogue_understanding_test.command_metrics import CommandMetrics
|
|
25
10
|
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
return self._safe_divide(self.tp, self.tp + self.fn)
|
|
31
|
-
|
|
32
|
-
def get_f1_score(self) -> float:
|
|
33
|
-
precision = self.get_precision()
|
|
34
|
-
recall = self.get_recall()
|
|
35
|
-
|
|
36
|
-
return self._safe_divide(2 * precision * recall, precision + recall)
|
|
37
|
-
|
|
38
|
-
def as_dict(self) -> Dict[str, float]:
|
|
39
|
-
return {
|
|
40
|
-
"tp": self.tp,
|
|
41
|
-
"fp": self.fp,
|
|
42
|
-
"fn": self.fn,
|
|
43
|
-
"precision": self.get_precision(),
|
|
44
|
-
"recall": self.get_recall(),
|
|
45
|
-
"f1_score": self.get_f1_score(),
|
|
46
|
-
"total_count": self.total_count,
|
|
47
|
-
}
|
|
11
|
+
if typing.TYPE_CHECKING:
|
|
12
|
+
from rasa.dialogue_understanding_test.du_test_result import (
|
|
13
|
+
DialogueUnderstandingTestResult,
|
|
14
|
+
)
|
|
48
15
|
|
|
49
16
|
|
|
50
17
|
def calculate_command_metrics(
|
|
51
|
-
test_results: List[DialogueUnderstandingTestResult],
|
|
18
|
+
test_results: List["DialogueUnderstandingTestResult"],
|
|
52
19
|
) -> Dict[str, CommandMetrics]:
|
|
53
20
|
"""Calculate the command metrics for the test result."""
|
|
54
21
|
metrics: Dict[str, CommandMetrics] = defaultdict(
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from typing import Dict
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class CommandMetrics(BaseModel):
|
|
7
|
+
tp: int
|
|
8
|
+
fp: int
|
|
9
|
+
fn: int
|
|
10
|
+
total_count: int
|
|
11
|
+
|
|
12
|
+
@staticmethod
|
|
13
|
+
def _safe_divide(numerator: float, denominator: float) -> float:
|
|
14
|
+
"""Safely perform division, returning 0.0 if the denominator is zero."""
|
|
15
|
+
return numerator / denominator if denominator > 0 else 0.0
|
|
16
|
+
|
|
17
|
+
def get_precision(self) -> float:
|
|
18
|
+
return self._safe_divide(self.tp, self.tp + self.fp)
|
|
19
|
+
|
|
20
|
+
def get_recall(self) -> float:
|
|
21
|
+
return self._safe_divide(self.tp, self.tp + self.fn)
|
|
22
|
+
|
|
23
|
+
def get_f1_score(self) -> float:
|
|
24
|
+
precision = self.get_precision()
|
|
25
|
+
recall = self.get_recall()
|
|
26
|
+
|
|
27
|
+
return self._safe_divide(2 * precision * recall, precision + recall)
|
|
28
|
+
|
|
29
|
+
def as_dict(self) -> Dict[str, float]:
|
|
30
|
+
return {
|
|
31
|
+
"tp": self.tp,
|
|
32
|
+
"fp": self.fp,
|
|
33
|
+
"fn": self.fn,
|
|
34
|
+
"precision": self.get_precision(),
|
|
35
|
+
"recall": self.get_recall(),
|
|
36
|
+
"f1_score": self.get_f1_score(),
|
|
37
|
+
"total_count": self.total_count,
|
|
38
|
+
}
|
|
@@ -1,7 +1,11 @@
|
|
|
1
|
+
from collections import defaultdict
|
|
1
2
|
from typing import Any, Dict, Iterator, List, Optional, Tuple
|
|
2
3
|
|
|
3
4
|
from pydantic import BaseModel, Field
|
|
4
5
|
|
|
6
|
+
from rasa.core import IntentlessPolicy
|
|
7
|
+
from rasa.core.nlg.contextual_response_rephraser import ContextualResponseRephraser
|
|
8
|
+
from rasa.core.policies.enterprise_search_policy import EnterpriseSearchPolicy
|
|
5
9
|
from rasa.dialogue_understanding.commands.prompt_command import PromptCommand
|
|
6
10
|
from rasa.dialogue_understanding.generator.command_parser import parse_commands
|
|
7
11
|
from rasa.dialogue_understanding_test.command_comparison import are_command_lists_equal
|
|
@@ -69,6 +73,8 @@ class DialogueUnderstandingOutput(BaseModel):
|
|
|
69
73
|
commands: Dict[str, List[PromptCommand]]
|
|
70
74
|
# List of prompts
|
|
71
75
|
prompts: Optional[List[Dict[str, Any]]] = None
|
|
76
|
+
# Latency of the full message roundtrip
|
|
77
|
+
latency: Optional[float] = None
|
|
72
78
|
|
|
73
79
|
class Config:
|
|
74
80
|
"""Skip validation for PromptCommand protocol as pydantic does not know how to
|
|
@@ -88,27 +94,41 @@ class DialogueUnderstandingOutput(BaseModel):
|
|
|
88
94
|
def get_component_names_that_predicted_commands_or_have_llm_response(
|
|
89
95
|
self,
|
|
90
96
|
) -> List[str]:
|
|
91
|
-
"""Get all component names
|
|
97
|
+
"""Get all relevant component names.
|
|
98
|
+
|
|
99
|
+
Components are relevant if they have predicted commands or received a
|
|
92
100
|
non-empty response from LLM.
|
|
93
101
|
"""
|
|
102
|
+
# Exclude components that are not related to Dialogue Understanding
|
|
103
|
+
component_names_to_exclude = [
|
|
104
|
+
EnterpriseSearchPolicy.__name__,
|
|
105
|
+
IntentlessPolicy.__name__,
|
|
106
|
+
ContextualResponseRephraser.__name__,
|
|
107
|
+
]
|
|
108
|
+
|
|
94
109
|
component_names_that_predicted_commands = (
|
|
95
110
|
[
|
|
96
111
|
component_name
|
|
97
112
|
for component_name, predicted_commands in self.commands.items()
|
|
98
113
|
if predicted_commands
|
|
114
|
+
and component_name not in component_names_to_exclude
|
|
99
115
|
]
|
|
100
116
|
if self.commands
|
|
101
117
|
else []
|
|
102
118
|
)
|
|
119
|
+
|
|
103
120
|
components_with_prompts = (
|
|
104
121
|
[
|
|
105
122
|
str(prompt.get(KEY_COMPONENT_NAME, None))
|
|
106
123
|
for prompt in self.prompts
|
|
107
124
|
if prompt.get(KEY_LLM_RESPONSE_METADATA, None)
|
|
125
|
+
and prompt.get(KEY_COMPONENT_NAME, None)
|
|
126
|
+
not in component_names_to_exclude
|
|
108
127
|
]
|
|
109
128
|
if self.prompts
|
|
110
129
|
else []
|
|
111
130
|
)
|
|
131
|
+
|
|
112
132
|
return list(
|
|
113
133
|
set(component_names_that_predicted_commands + components_with_prompts)
|
|
114
134
|
)
|
|
@@ -290,41 +310,54 @@ class DialogueUnderstandingTestStep(BaseModel):
|
|
|
290
310
|
|
|
291
311
|
return ""
|
|
292
312
|
|
|
293
|
-
def get_latencies(self) -> List[float]:
|
|
313
|
+
def get_latencies(self) -> Dict[str, List[float]]:
|
|
294
314
|
if self.dialogue_understanding_output is None:
|
|
295
|
-
return
|
|
315
|
+
return {}
|
|
296
316
|
|
|
297
|
-
|
|
317
|
+
component_name_to_prompt_info = (
|
|
318
|
+
self.dialogue_understanding_output.get_component_name_to_prompt_info()
|
|
319
|
+
)
|
|
298
320
|
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
for
|
|
302
|
-
|
|
303
|
-
]
|
|
321
|
+
latencies = defaultdict(list)
|
|
322
|
+
for component_name, prompt_info_list in component_name_to_prompt_info.items():
|
|
323
|
+
for prompt_info in prompt_info_list:
|
|
324
|
+
latencies[component_name].append(prompt_info.get(KEY_LATENCY, 0.0))
|
|
304
325
|
|
|
305
|
-
|
|
326
|
+
return latencies
|
|
327
|
+
|
|
328
|
+
def get_completion_tokens(self) -> Dict[str, List[float]]:
|
|
306
329
|
if self.dialogue_understanding_output is None:
|
|
307
|
-
return
|
|
330
|
+
return {}
|
|
308
331
|
|
|
309
|
-
|
|
332
|
+
component_name_to_prompt_info = (
|
|
333
|
+
self.dialogue_understanding_output.get_component_name_to_prompt_info()
|
|
334
|
+
)
|
|
310
335
|
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
for
|
|
314
|
-
|
|
315
|
-
|
|
336
|
+
completion_tokens = defaultdict(list)
|
|
337
|
+
for component_name, prompt_info_list in component_name_to_prompt_info.items():
|
|
338
|
+
for prompt_info in prompt_info_list:
|
|
339
|
+
completion_tokens[component_name].append(
|
|
340
|
+
prompt_info.get(KEY_COMPLETION_TOKENS, 0.0)
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
return completion_tokens
|
|
316
344
|
|
|
317
|
-
def get_prompt_tokens(self) -> List[
|
|
345
|
+
def get_prompt_tokens(self) -> Dict[str, List[float]]:
|
|
318
346
|
if self.dialogue_understanding_output is None:
|
|
319
|
-
return
|
|
347
|
+
return {}
|
|
320
348
|
|
|
321
|
-
|
|
349
|
+
component_name_to_prompt_info = (
|
|
350
|
+
self.dialogue_understanding_output.get_component_name_to_prompt_info()
|
|
351
|
+
)
|
|
322
352
|
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
for
|
|
326
|
-
|
|
327
|
-
|
|
353
|
+
prompt_tokens = defaultdict(list)
|
|
354
|
+
for component_name, prompt_info_list in component_name_to_prompt_info.items():
|
|
355
|
+
for prompt_info in prompt_info_list:
|
|
356
|
+
prompt_tokens[component_name].append(
|
|
357
|
+
prompt_info.get(KEY_PROMPT_TOKENS, 0.0)
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
return prompt_tokens
|
|
328
361
|
|
|
329
362
|
|
|
330
363
|
class DialogueUnderstandingTestCase(BaseModel):
|