rasa-pro 3.12.6.dev2__py3-none-any.whl → 3.13.0.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/__init__.py +0 -6
- rasa/cli/scaffold.py +1 -1
- rasa/core/actions/action.py +38 -34
- rasa/core/actions/action_run_slot_rejections.py +1 -1
- rasa/core/channels/studio_chat.py +16 -43
- rasa/core/channels/voice_ready/audiocodes.py +46 -17
- rasa/core/information_retrieval/faiss.py +68 -7
- rasa/core/information_retrieval/information_retrieval.py +40 -2
- rasa/core/information_retrieval/milvus.py +7 -2
- rasa/core/information_retrieval/qdrant.py +7 -2
- rasa/core/nlg/contextual_response_rephraser.py +11 -27
- rasa/core/nlg/generator.py +5 -21
- rasa/core/nlg/response.py +6 -43
- rasa/core/nlg/summarize.py +1 -15
- rasa/core/nlg/translate.py +0 -8
- rasa/core/policies/enterprise_search_policy.py +64 -316
- rasa/core/policies/flows/flow_executor.py +3 -38
- rasa/core/policies/intentless_policy.py +4 -17
- rasa/core/policies/policy.py +0 -2
- rasa/core/processor.py +27 -6
- rasa/core/utils.py +53 -0
- rasa/dialogue_understanding/coexistence/llm_based_router.py +4 -18
- rasa/dialogue_understanding/commands/cancel_flow_command.py +4 -59
- rasa/dialogue_understanding/commands/knowledge_answer_command.py +2 -2
- rasa/dialogue_understanding/commands/start_flow_command.py +0 -41
- rasa/dialogue_understanding/generator/command_generator.py +67 -0
- rasa/dialogue_understanding/generator/command_parser.py +1 -1
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +7 -23
- rasa/dialogue_understanding/generator/llm_command_generator.py +1 -3
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_template.jinja2 +1 -1
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_claude_3_5_sonnet_20240620_template.jinja2 +1 -1
- rasa/dialogue_understanding/generator/prompt_templates/command_prompt_v2_gpt_4o_2024_11_20_template.jinja2 +24 -2
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +8 -12
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +0 -61
- rasa/dialogue_understanding/processor/command_processor.py +7 -65
- rasa/dialogue_understanding/stack/utils.py +0 -38
- rasa/dialogue_understanding_test/command_metric_calculation.py +7 -40
- rasa/dialogue_understanding_test/command_metrics.py +38 -0
- rasa/dialogue_understanding_test/du_test_case.py +58 -25
- rasa/dialogue_understanding_test/du_test_result.py +228 -132
- rasa/dialogue_understanding_test/du_test_runner.py +10 -1
- rasa/dialogue_understanding_test/io.py +48 -16
- rasa/document_retrieval/__init__.py +0 -0
- rasa/document_retrieval/constants.py +32 -0
- rasa/document_retrieval/document_post_processor.py +351 -0
- rasa/document_retrieval/document_post_processor_prompt_template.jinja2 +0 -0
- rasa/document_retrieval/document_retriever.py +333 -0
- rasa/document_retrieval/knowledge_base_connectors/__init__.py +0 -0
- rasa/document_retrieval/knowledge_base_connectors/api_connector.py +39 -0
- rasa/document_retrieval/knowledge_base_connectors/knowledge_base_connector.py +34 -0
- rasa/document_retrieval/knowledge_base_connectors/vector_store_connector.py +226 -0
- rasa/document_retrieval/query_rewriter.py +234 -0
- rasa/document_retrieval/query_rewriter_prompt_template.jinja2 +8 -0
- rasa/engine/recipes/default_components.py +2 -0
- rasa/hooks.py +0 -55
- rasa/model_manager/model_api.py +1 -1
- rasa/model_manager/socket_bridge.py +0 -7
- rasa/shared/constants.py +0 -5
- rasa/shared/core/constants.py +0 -8
- rasa/shared/core/domain.py +12 -3
- rasa/shared/core/flows/flow.py +0 -17
- rasa/shared/core/flows/flows_yaml_schema.json +3 -38
- rasa/shared/core/flows/steps/collect.py +5 -18
- rasa/shared/core/flows/utils.py +1 -16
- rasa/shared/core/slot_mappings.py +11 -5
- rasa/shared/core/slots.py +1 -1
- rasa/shared/core/trackers.py +4 -10
- rasa/shared/nlu/constants.py +0 -1
- rasa/shared/providers/constants.py +0 -9
- rasa/shared/providers/llm/_base_litellm_client.py +4 -14
- rasa/shared/providers/llm/default_litellm_llm_client.py +2 -2
- rasa/shared/providers/llm/litellm_router_llm_client.py +7 -17
- rasa/shared/providers/llm/llm_client.py +15 -24
- rasa/shared/providers/llm/self_hosted_llm_client.py +2 -10
- rasa/shared/utils/common.py +11 -1
- rasa/shared/utils/health_check/health_check.py +1 -7
- rasa/shared/utils/llm.py +1 -1
- rasa/tracing/instrumentation/attribute_extractors.py +50 -17
- rasa/tracing/instrumentation/instrumentation.py +12 -12
- rasa/tracing/instrumentation/intentless_policy_instrumentation.py +1 -2
- rasa/utils/licensing.py +0 -15
- rasa/validator.py +1 -123
- rasa/version.py +1 -1
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/METADATA +2 -3
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/RECORD +88 -80
- rasa/core/actions/action_handle_digressions.py +0 -164
- rasa/dialogue_understanding/commands/handle_digressions_command.py +0 -144
- rasa/dialogue_understanding/patterns/handle_digressions.py +0 -81
- rasa/monkey_patches.py +0 -91
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.6.dev2.dist-info → rasa_pro-3.13.0.dev2.dist-info}/entry_points.txt +0 -0
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
import copy
|
|
2
|
-
import
|
|
2
|
+
from collections import defaultdict
|
|
3
3
|
from typing import Any, Dict, List, Optional, Text
|
|
4
4
|
|
|
5
5
|
import numpy as np
|
|
6
6
|
from pydantic import BaseModel
|
|
7
7
|
|
|
8
8
|
from rasa.dialogue_understanding.commands.prompt_command import PromptCommand
|
|
9
|
+
from rasa.dialogue_understanding_test.command_metrics import (
|
|
10
|
+
CommandMetrics,
|
|
11
|
+
)
|
|
9
12
|
from rasa.dialogue_understanding_test.du_test_case import (
|
|
10
13
|
DialogueUnderstandingTestCase,
|
|
11
14
|
DialogueUnderstandingTestStep,
|
|
@@ -13,26 +16,40 @@ from rasa.dialogue_understanding_test.du_test_case import (
|
|
|
13
16
|
from rasa.dialogue_understanding_test.utils import get_command_comparison
|
|
14
17
|
from rasa.shared.nlu.constants import KEY_SYSTEM_PROMPT, KEY_USER_PROMPT
|
|
15
18
|
|
|
16
|
-
if typing.TYPE_CHECKING:
|
|
17
|
-
from rasa.dialogue_understanding_test.command_metric_calculation import (
|
|
18
|
-
CommandMetrics,
|
|
19
|
-
)
|
|
20
|
-
|
|
21
19
|
KEY_TEST_CASES_ACCURACY = "test_cases"
|
|
22
20
|
KEY_USER_UTTERANCES_ACCURACY = "user_utterances"
|
|
23
21
|
|
|
22
|
+
KEY_COMMANDS_F1_MACRO = "macro"
|
|
23
|
+
KEY_COMMANDS_F1_MICRO = "micro"
|
|
24
|
+
KEY_COMMANDS_F1_WEIGHTED = "weighted_average"
|
|
25
|
+
|
|
26
|
+
OUTPUT_DUT_ACCURACY = "accuracy"
|
|
27
|
+
OUTPUT_DUT_ACCURACY_TEST_CASES = "test_cases"
|
|
28
|
+
OUTPUT_DUT_ACCURACY_USER_UTTERANCES = "user_utterances"
|
|
29
|
+
|
|
30
|
+
OUTPUT_COMMANDS_F1 = "f1_score"
|
|
31
|
+
OUTPUT_COMMANDS_F1_MACRO = "macro"
|
|
32
|
+
OUTPUT_COMMANDS_F1_MICRO = "micro"
|
|
33
|
+
OUTPUT_COMMANDS_F1_WEIGHTED = "weighted_average"
|
|
34
|
+
|
|
24
35
|
OUTPUT_NUMBER_OF_FAILED_TESTS = "number_of_failed_tests"
|
|
25
36
|
OUTPUT_NUMBER_OF_PASSED_TESTS = "number_of_passed_tests"
|
|
26
|
-
OUTPUT_TEST_CASES_ACCURACY = "test_cases_accuracy"
|
|
27
|
-
OUTPUT_USER_UTTERANCES_ACCURACY = "user_utterances_accuracy"
|
|
28
37
|
OUTPUT_NUMBER_OF_PASSED_USER_UTTERANCES = "number_of_passed_user_utterances"
|
|
29
38
|
OUTPUT_NUMBER_OF_FAILED_USER_UTTERANCES = "number_of_failed_user_utterances"
|
|
39
|
+
OUTPUT_NAMES_OF_FAILED_TESTS = "names_of_failed_tests"
|
|
40
|
+
OUTPUT_NAMES_OF_PASSED_TESTS = "names_of_passed_tests"
|
|
41
|
+
OUTPUT_FAILED_TEST_STEPS = "failed_test_steps"
|
|
42
|
+
OUTPUT_TEST_CASES_ACCURACY = "test_cases_accuracy"
|
|
43
|
+
OUTPUT_USER_UTTERANCES_ACCURACY = "user_utterances_accuracy"
|
|
30
44
|
OUTPUT_COMMAND_METRICS = "command_metrics"
|
|
45
|
+
OUTPUT_COMMANDS_F1_MACRO_INSTRUMENTATION_ATTR = "commands_f1_macro"
|
|
46
|
+
OUTPUT_COMMANDS_F1_MICRO_INSTRUMENTATION_ATTR = "commands_f1_micro"
|
|
47
|
+
OUTPUT_COMMANDS_F1_WEIGHTED_INSTRUMENTATION_ATTR = "commands_f1_weighted_average"
|
|
48
|
+
|
|
31
49
|
OUTPUT_LATENCY_METRICS = "latency"
|
|
32
50
|
OUTPUT_COMPLETION_TOKEN_METRICS = "completion_token"
|
|
33
51
|
OUTPUT_PROMPT_TOKEN_METRICS = "prompt_token"
|
|
34
|
-
|
|
35
|
-
OUTPUT_NAMES_OF_PASSED_TESTS = "names_of_passed_tests"
|
|
52
|
+
|
|
36
53
|
OUTPUT_LLM_COMMAND_GENERATOR_CONFIG = "llm_command_generator_config"
|
|
37
54
|
|
|
38
55
|
|
|
@@ -60,6 +77,7 @@ class FailedTestStep(BaseModel):
|
|
|
60
77
|
expected_commands: List[PromptCommand]
|
|
61
78
|
predicted_commands: Dict[str, List[PromptCommand]]
|
|
62
79
|
conversation_with_diff: List[str]
|
|
80
|
+
conversation_until_failed_user_utterance: List[str]
|
|
63
81
|
|
|
64
82
|
class Config:
|
|
65
83
|
"""Skip validation for PromptCommand protocol as pydantic does not know how to
|
|
@@ -90,10 +108,12 @@ class FailedTestStep(BaseModel):
|
|
|
90
108
|
)
|
|
91
109
|
|
|
92
110
|
step_index = test_case.steps.index(step)
|
|
93
|
-
|
|
94
|
-
conversation_with_diff = test_case.to_readable_conversation(
|
|
111
|
+
conversation_until_failed_user_utterance = test_case.to_readable_conversation(
|
|
95
112
|
until_step=step_index + 1
|
|
96
|
-
)
|
|
113
|
+
)
|
|
114
|
+
conversation_with_diff = (
|
|
115
|
+
conversation_until_failed_user_utterance + get_command_comparison(step)
|
|
116
|
+
)
|
|
97
117
|
|
|
98
118
|
return cls(
|
|
99
119
|
file=file_path,
|
|
@@ -106,12 +126,14 @@ class FailedTestStep(BaseModel):
|
|
|
106
126
|
expected_commands=step.commands or [],
|
|
107
127
|
predicted_commands=predicted_commands,
|
|
108
128
|
conversation_with_diff=conversation_with_diff,
|
|
129
|
+
conversation_until_failed_user_utterance=conversation_until_failed_user_utterance,
|
|
109
130
|
)
|
|
110
131
|
|
|
111
132
|
def to_dict(self, output_prompt: bool) -> Dict[str, Any]:
|
|
112
133
|
step_info = {
|
|
113
134
|
"file": self.file,
|
|
114
135
|
"test_case": self.test_case_name,
|
|
136
|
+
"conversation": self.conversation_until_failed_user_utterance,
|
|
115
137
|
"failed_user_utterance": self.failed_user_utterance,
|
|
116
138
|
"error_line": self.error_line,
|
|
117
139
|
"pass_status": self.pass_status,
|
|
@@ -155,25 +177,32 @@ class DialogueUnderstandingTestSuiteResult:
|
|
|
155
177
|
KEY_TEST_CASES_ACCURACY: 0.0,
|
|
156
178
|
KEY_USER_UTTERANCES_ACCURACY: 0.0,
|
|
157
179
|
}
|
|
180
|
+
self.f1_score = {
|
|
181
|
+
KEY_COMMANDS_F1_MACRO: 0.0,
|
|
182
|
+
KEY_COMMANDS_F1_MICRO: 0.0,
|
|
183
|
+
KEY_COMMANDS_F1_WEIGHTED: 0.0,
|
|
184
|
+
}
|
|
158
185
|
self.number_of_passed_tests = 0
|
|
159
186
|
self.number_of_failed_tests = 0
|
|
160
187
|
self.number_of_passed_user_utterances = 0
|
|
161
188
|
self.number_of_failed_user_utterances = 0
|
|
162
|
-
self.command_metrics: Optional[Dict[str,
|
|
189
|
+
self.command_metrics: Optional[Dict[str, CommandMetrics]] = None
|
|
163
190
|
self.names_of_failed_tests: List[str] = []
|
|
164
191
|
self.names_of_passed_tests: List[str] = []
|
|
165
192
|
self.failed_test_steps: List[FailedTestStep] = []
|
|
166
193
|
self.llm_config: Optional[Dict[str, Any]] = None
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
self.
|
|
194
|
+
# The performance metrics distribution per component
|
|
195
|
+
# For example: {"command_generator": {"p50": x, ...}, ...}
|
|
196
|
+
self.latency_metrics: Dict[str, Dict[str, float]] = {}
|
|
197
|
+
self.prompt_token_metrics: Dict[str, Dict[str, float]] = {}
|
|
198
|
+
self.completion_token_metrics: Dict[str, Dict[str, float]] = {}
|
|
170
199
|
|
|
171
200
|
@classmethod
|
|
172
201
|
def from_results(
|
|
173
202
|
cls,
|
|
174
203
|
failing_test_results: List[DialogueUnderstandingTestResult],
|
|
175
204
|
passing_test_results: List[DialogueUnderstandingTestResult],
|
|
176
|
-
command_metrics: Dict[str,
|
|
205
|
+
command_metrics: Dict[str, CommandMetrics],
|
|
177
206
|
llm_config: Optional[Dict[str, Any]],
|
|
178
207
|
) -> "DialogueUnderstandingTestSuiteResult":
|
|
179
208
|
"""Create a DialogueUnderstandingTestSuiteResult object from the test results.
|
|
@@ -207,6 +236,16 @@ class DialogueUnderstandingTestSuiteResult:
|
|
|
207
236
|
|
|
208
237
|
instance.command_metrics = command_metrics
|
|
209
238
|
|
|
239
|
+
instance.f1_score[KEY_COMMANDS_F1_MACRO] = cls.calculate_f1_macro(
|
|
240
|
+
command_metrics
|
|
241
|
+
)
|
|
242
|
+
instance.f1_score[KEY_COMMANDS_F1_MICRO] = cls.calculate_f1_micro(
|
|
243
|
+
command_metrics
|
|
244
|
+
)
|
|
245
|
+
instance.f1_score[KEY_COMMANDS_F1_WEIGHTED] = cls.calculate_f1_weighted(
|
|
246
|
+
command_metrics
|
|
247
|
+
)
|
|
248
|
+
|
|
210
249
|
instance.names_of_passed_tests = [
|
|
211
250
|
passing_test_result.test_case.full_name()
|
|
212
251
|
for passing_test_result in passing_test_results
|
|
@@ -234,131 +273,34 @@ class DialogueUnderstandingTestSuiteResult:
|
|
|
234
273
|
|
|
235
274
|
return instance
|
|
236
275
|
|
|
237
|
-
def _set_user_utterance_metrics(
|
|
238
|
-
self,
|
|
239
|
-
failing_test_results: List[DialogueUnderstandingTestResult],
|
|
240
|
-
passing_test_results: List[DialogueUnderstandingTestResult],
|
|
241
|
-
) -> None:
|
|
242
|
-
# Create list of booleans indicating whether each user utterance
|
|
243
|
-
# passed or failed
|
|
244
|
-
user_utterances_status = [
|
|
245
|
-
step.has_passed()
|
|
246
|
-
for test in failing_test_results + passing_test_results
|
|
247
|
-
for step in test.test_case.iterate_over_user_steps()
|
|
248
|
-
]
|
|
249
|
-
# Calculate number of passed and failed user utterances
|
|
250
|
-
self.number_of_passed_user_utterances = sum(user_utterances_status)
|
|
251
|
-
self.number_of_failed_user_utterances = (
|
|
252
|
-
len(user_utterances_status) - self.number_of_passed_user_utterances
|
|
253
|
-
)
|
|
254
|
-
# Calculate user utterance accuracy
|
|
255
|
-
self.accuracy[KEY_USER_UTTERANCES_ACCURACY] = (
|
|
256
|
-
self.number_of_passed_user_utterances
|
|
257
|
-
/ (
|
|
258
|
-
self.number_of_failed_user_utterances
|
|
259
|
-
+ self.number_of_passed_user_utterances
|
|
260
|
-
)
|
|
261
|
-
)
|
|
262
|
-
|
|
263
|
-
@staticmethod
|
|
264
|
-
def _create_failed_steps_from_results(
|
|
265
|
-
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
266
|
-
) -> List[FailedTestStep]:
|
|
267
|
-
"""Create list of FailedTestStep objects from failing test results.
|
|
268
|
-
|
|
269
|
-
Given a list of failing DialogueUnderstandingTestResult objects,
|
|
270
|
-
create and return a list of FailedTestStep objects for each failing user step.
|
|
271
|
-
|
|
272
|
-
Args:
|
|
273
|
-
failing_test_results: Results of failing Dialogue Understanding tests.
|
|
274
|
-
|
|
275
|
-
Returns:
|
|
276
|
-
List of aggregated FailedTestStep objects for logging to console and file.
|
|
277
|
-
"""
|
|
278
|
-
failed_test_steps: List[FailedTestStep] = []
|
|
279
|
-
|
|
280
|
-
for result in failing_test_results:
|
|
281
|
-
test_case = result.test_case
|
|
282
|
-
for step in test_case.failed_user_steps():
|
|
283
|
-
failed_test_steps.append(
|
|
284
|
-
FailedTestStep.from_dialogue_understanding_test_step(
|
|
285
|
-
step, test_case
|
|
286
|
-
)
|
|
287
|
-
)
|
|
288
|
-
|
|
289
|
-
return failed_test_steps
|
|
290
|
-
|
|
291
|
-
@staticmethod
|
|
292
|
-
def _calculate_percentiles(values: List[float]) -> Dict[str, float]:
|
|
293
|
-
return {
|
|
294
|
-
"p50": float(np.percentile(values, 50)) if values else 0.0,
|
|
295
|
-
"p90": float(np.percentile(values, 90)) if values else 0.0,
|
|
296
|
-
"p99": float(np.percentile(values, 99)) if values else 0.0,
|
|
297
|
-
}
|
|
298
|
-
|
|
299
|
-
@classmethod
|
|
300
|
-
def get_latency_metrics(
|
|
301
|
-
cls,
|
|
302
|
-
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
303
|
-
passing_test_results: List["DialogueUnderstandingTestResult"],
|
|
304
|
-
) -> Dict[str, float]:
|
|
305
|
-
latencies = [
|
|
306
|
-
latency
|
|
307
|
-
for result in failing_test_results + passing_test_results
|
|
308
|
-
for step in result.test_case.steps
|
|
309
|
-
for latency in step.get_latencies()
|
|
310
|
-
]
|
|
311
|
-
|
|
312
|
-
return cls._calculate_percentiles(latencies)
|
|
313
|
-
|
|
314
|
-
@classmethod
|
|
315
|
-
def get_prompt_token_metrics(
|
|
316
|
-
cls,
|
|
317
|
-
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
318
|
-
passing_test_results: List["DialogueUnderstandingTestResult"],
|
|
319
|
-
) -> Dict[str, float]:
|
|
320
|
-
tokens = [
|
|
321
|
-
token_count
|
|
322
|
-
for result in failing_test_results + passing_test_results
|
|
323
|
-
for step in result.test_case.steps
|
|
324
|
-
for token_count in step.get_prompt_tokens()
|
|
325
|
-
]
|
|
326
|
-
|
|
327
|
-
return cls._calculate_percentiles(tokens)
|
|
328
|
-
|
|
329
|
-
@classmethod
|
|
330
|
-
def get_completion_token_metrics(
|
|
331
|
-
cls,
|
|
332
|
-
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
333
|
-
passing_test_results: List["DialogueUnderstandingTestResult"],
|
|
334
|
-
) -> Dict[str, float]:
|
|
335
|
-
tokens = [
|
|
336
|
-
token_count
|
|
337
|
-
for result in failing_test_results + passing_test_results
|
|
338
|
-
for step in result.test_case.steps
|
|
339
|
-
for token_count in step.get_completion_tokens()
|
|
340
|
-
]
|
|
341
|
-
|
|
342
|
-
return cls._calculate_percentiles(tokens)
|
|
343
|
-
|
|
344
276
|
def to_dict(self, output_prompt: bool = False) -> Dict[str, Any]:
|
|
345
277
|
"""Builds a dictionary for writing test results to a YML file.
|
|
346
278
|
|
|
347
279
|
Args:
|
|
348
280
|
output_prompt: Whether to log the prompt or not.
|
|
349
281
|
"""
|
|
350
|
-
# 1. Accuracy block
|
|
351
282
|
result_dict: Dict[Text, Any] = {
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
283
|
+
# Accuracy block
|
|
284
|
+
OUTPUT_DUT_ACCURACY: {
|
|
285
|
+
OUTPUT_DUT_ACCURACY_TEST_CASES: self.accuracy[KEY_TEST_CASES_ACCURACY],
|
|
286
|
+
OUTPUT_DUT_ACCURACY_USER_UTTERANCES: self.accuracy[
|
|
287
|
+
KEY_USER_UTTERANCES_ACCURACY
|
|
288
|
+
],
|
|
289
|
+
},
|
|
290
|
+
# F1 block
|
|
291
|
+
OUTPUT_COMMANDS_F1: {
|
|
292
|
+
OUTPUT_COMMANDS_F1_MACRO: self.f1_score[KEY_COMMANDS_F1_MACRO],
|
|
293
|
+
OUTPUT_COMMANDS_F1_MICRO: self.f1_score[KEY_COMMANDS_F1_MICRO],
|
|
294
|
+
OUTPUT_COMMANDS_F1_WEIGHTED: self.f1_score[KEY_COMMANDS_F1_WEIGHTED],
|
|
355
295
|
},
|
|
296
|
+
# Other metrics block
|
|
356
297
|
OUTPUT_NUMBER_OF_PASSED_TESTS: self.number_of_passed_tests,
|
|
357
298
|
OUTPUT_NUMBER_OF_FAILED_TESTS: self.number_of_failed_tests,
|
|
358
299
|
OUTPUT_NUMBER_OF_PASSED_USER_UTTERANCES: self.number_of_passed_user_utterances, # noqa: E501
|
|
359
300
|
OUTPUT_NUMBER_OF_FAILED_USER_UTTERANCES: self.number_of_failed_user_utterances, # noqa: E501
|
|
360
301
|
}
|
|
361
302
|
|
|
303
|
+
# Command metrics block
|
|
362
304
|
cmd_metrics_output = {}
|
|
363
305
|
if self.command_metrics:
|
|
364
306
|
if isinstance(self.command_metrics, dict):
|
|
@@ -366,25 +308,179 @@ class DialogueUnderstandingTestSuiteResult:
|
|
|
366
308
|
cmd_metrics_output[cmd_name] = metrics_obj.as_dict()
|
|
367
309
|
else:
|
|
368
310
|
pass
|
|
369
|
-
|
|
370
311
|
result_dict[OUTPUT_COMMAND_METRICS] = cmd_metrics_output
|
|
371
312
|
|
|
313
|
+
# Latency and tokens metrics block
|
|
372
314
|
result_dict[OUTPUT_LATENCY_METRICS] = self.latency_metrics
|
|
373
315
|
result_dict[OUTPUT_PROMPT_TOKEN_METRICS] = self.prompt_token_metrics
|
|
374
316
|
result_dict[OUTPUT_COMPLETION_TOKEN_METRICS] = self.completion_token_metrics
|
|
375
317
|
|
|
318
|
+
# Passed and failed test names block
|
|
376
319
|
result_dict[OUTPUT_NAMES_OF_PASSED_TESTS] = self.names_of_passed_tests
|
|
377
320
|
result_dict[OUTPUT_NAMES_OF_FAILED_TESTS] = self.names_of_failed_tests
|
|
378
321
|
|
|
322
|
+
# Failed test steps block
|
|
379
323
|
failed_steps_list = []
|
|
380
324
|
for failed_test_step in self.failed_test_steps:
|
|
381
325
|
failed_steps_list.append(
|
|
382
326
|
failed_test_step.to_dict(output_prompt=output_prompt)
|
|
383
327
|
)
|
|
328
|
+
result_dict[OUTPUT_FAILED_TEST_STEPS] = failed_steps_list
|
|
384
329
|
|
|
385
|
-
|
|
386
|
-
|
|
330
|
+
# LLM config block
|
|
387
331
|
if self.llm_config:
|
|
388
332
|
result_dict[OUTPUT_LLM_COMMAND_GENERATOR_CONFIG] = self.llm_config
|
|
389
333
|
|
|
390
334
|
return result_dict
|
|
335
|
+
|
|
336
|
+
@staticmethod
|
|
337
|
+
def calculate_f1_macro(command_metrics: Dict[str, CommandMetrics]) -> float:
|
|
338
|
+
f1_scores = [metrics.get_f1_score() for metrics in command_metrics.values()]
|
|
339
|
+
return sum(f1_scores) / len(f1_scores)
|
|
340
|
+
|
|
341
|
+
@staticmethod
|
|
342
|
+
def calculate_f1_micro(command_metrics: Dict[str, CommandMetrics]) -> float:
|
|
343
|
+
combined_metrics = CommandMetrics(
|
|
344
|
+
tp=sum([metrics.tp for metrics in command_metrics.values()]),
|
|
345
|
+
fp=sum([metrics.fp for metrics in command_metrics.values()]),
|
|
346
|
+
fn=sum([metrics.fn for metrics in command_metrics.values()]),
|
|
347
|
+
total_count=sum(m.total_count for m in command_metrics.values()),
|
|
348
|
+
)
|
|
349
|
+
return combined_metrics.get_f1_score()
|
|
350
|
+
|
|
351
|
+
@staticmethod
|
|
352
|
+
def calculate_f1_weighted(command_metrics: Dict[str, CommandMetrics]) -> float:
|
|
353
|
+
class_counts = []
|
|
354
|
+
f1_scores = []
|
|
355
|
+
for metrics in command_metrics.values():
|
|
356
|
+
class_counts.append(metrics.total_count)
|
|
357
|
+
f1_scores.append(metrics.get_f1_score())
|
|
358
|
+
|
|
359
|
+
total_count = sum(class_counts)
|
|
360
|
+
weighted_f1 = sum(
|
|
361
|
+
(count / total_count) * f1 for f1, count in zip(f1_scores, class_counts)
|
|
362
|
+
)
|
|
363
|
+
return weighted_f1
|
|
364
|
+
|
|
365
|
+
@classmethod
|
|
366
|
+
def get_latency_metrics(
|
|
367
|
+
cls,
|
|
368
|
+
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
369
|
+
passing_test_results: List["DialogueUnderstandingTestResult"],
|
|
370
|
+
) -> Dict[str, Dict[str, float]]:
|
|
371
|
+
latencies = defaultdict(list)
|
|
372
|
+
|
|
373
|
+
for result in failing_test_results + passing_test_results:
|
|
374
|
+
for step in result.test_case.steps:
|
|
375
|
+
if (
|
|
376
|
+
step.dialogue_understanding_output
|
|
377
|
+
and step.dialogue_understanding_output.latency
|
|
378
|
+
):
|
|
379
|
+
latencies["total"].append(
|
|
380
|
+
step.dialogue_understanding_output.latency
|
|
381
|
+
)
|
|
382
|
+
for component_name, latency in step.get_latencies().items():
|
|
383
|
+
latencies[component_name].extend(latency)
|
|
384
|
+
|
|
385
|
+
return {
|
|
386
|
+
component_name: cls._calculate_percentiles(latency_list)
|
|
387
|
+
for component_name, latency_list in latencies.items()
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
@classmethod
|
|
391
|
+
def get_prompt_token_metrics(
|
|
392
|
+
cls,
|
|
393
|
+
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
394
|
+
passing_test_results: List["DialogueUnderstandingTestResult"],
|
|
395
|
+
) -> Dict[str, Dict[str, float]]:
|
|
396
|
+
tokens = defaultdict(list)
|
|
397
|
+
|
|
398
|
+
for result in failing_test_results + passing_test_results:
|
|
399
|
+
for step in result.test_case.steps:
|
|
400
|
+
for component_name, token_count in step.get_prompt_tokens().items():
|
|
401
|
+
tokens[component_name].extend(token_count)
|
|
402
|
+
|
|
403
|
+
return {
|
|
404
|
+
component_name: cls._calculate_percentiles(latency_list)
|
|
405
|
+
for component_name, latency_list in tokens.items()
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
@classmethod
|
|
409
|
+
def get_completion_token_metrics(
|
|
410
|
+
cls,
|
|
411
|
+
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
412
|
+
passing_test_results: List["DialogueUnderstandingTestResult"],
|
|
413
|
+
) -> Dict[str, Dict[str, float]]:
|
|
414
|
+
tokens = defaultdict(list)
|
|
415
|
+
|
|
416
|
+
for result in failing_test_results + passing_test_results:
|
|
417
|
+
for step in result.test_case.steps:
|
|
418
|
+
for component_name, token_count in step.get_completion_tokens().items():
|
|
419
|
+
tokens[component_name].extend(token_count)
|
|
420
|
+
|
|
421
|
+
return {
|
|
422
|
+
component_name: cls._calculate_percentiles(latency_list)
|
|
423
|
+
for component_name, latency_list in tokens.items()
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
@staticmethod
|
|
427
|
+
def _calculate_percentiles(values: List[float]) -> Dict[str, float]:
|
|
428
|
+
return {
|
|
429
|
+
"p50": float(np.percentile(values, 50)) if values else 0.0,
|
|
430
|
+
"p90": float(np.percentile(values, 90)) if values else 0.0,
|
|
431
|
+
"p99": float(np.percentile(values, 99)) if values else 0.0,
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
@staticmethod
|
|
435
|
+
def _create_failed_steps_from_results(
|
|
436
|
+
failing_test_results: List["DialogueUnderstandingTestResult"],
|
|
437
|
+
) -> List[FailedTestStep]:
|
|
438
|
+
"""Create list of FailedTestStep objects from failing test results.
|
|
439
|
+
|
|
440
|
+
Given a list of failing DialogueUnderstandingTestResult objects,
|
|
441
|
+
create and return a list of FailedTestStep objects for each failing user step.
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
failing_test_results: Results of failing Dialogue Understanding tests.
|
|
445
|
+
|
|
446
|
+
Returns:
|
|
447
|
+
List of aggregated FailedTestStep objects for logging to console and file.
|
|
448
|
+
"""
|
|
449
|
+
failed_test_steps: List[FailedTestStep] = []
|
|
450
|
+
|
|
451
|
+
for result in failing_test_results:
|
|
452
|
+
test_case = result.test_case
|
|
453
|
+
for step in test_case.failed_user_steps():
|
|
454
|
+
failed_test_steps.append(
|
|
455
|
+
FailedTestStep.from_dialogue_understanding_test_step(
|
|
456
|
+
step, test_case
|
|
457
|
+
)
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
return failed_test_steps
|
|
461
|
+
|
|
462
|
+
def _set_user_utterance_metrics(
|
|
463
|
+
self,
|
|
464
|
+
failing_test_results: List[DialogueUnderstandingTestResult],
|
|
465
|
+
passing_test_results: List[DialogueUnderstandingTestResult],
|
|
466
|
+
) -> None:
|
|
467
|
+
# Create list of booleans indicating whether each user utterance
|
|
468
|
+
# passed or failed
|
|
469
|
+
user_utterances_status = [
|
|
470
|
+
step.has_passed()
|
|
471
|
+
for test in failing_test_results + passing_test_results
|
|
472
|
+
for step in test.test_case.iterate_over_user_steps()
|
|
473
|
+
]
|
|
474
|
+
# Calculate number of passed and failed user utterances
|
|
475
|
+
self.number_of_passed_user_utterances = sum(user_utterances_status)
|
|
476
|
+
self.number_of_failed_user_utterances = (
|
|
477
|
+
len(user_utterances_status) - self.number_of_passed_user_utterances
|
|
478
|
+
)
|
|
479
|
+
# Calculate user utterance accuracy
|
|
480
|
+
self.accuracy[KEY_USER_UTTERANCES_ACCURACY] = (
|
|
481
|
+
self.number_of_passed_user_utterances
|
|
482
|
+
/ (
|
|
483
|
+
self.number_of_failed_user_utterances
|
|
484
|
+
+ self.number_of_passed_user_utterances
|
|
485
|
+
)
|
|
486
|
+
)
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import asyncio
|
|
2
|
+
import time
|
|
2
3
|
from typing import Any, Dict, List, Optional, Text
|
|
3
4
|
|
|
4
5
|
import structlog
|
|
@@ -184,8 +185,12 @@ class DialogueUnderstandingTestRunner:
|
|
|
184
185
|
user_uttered_event_indices[user_step_index],
|
|
185
186
|
)
|
|
186
187
|
|
|
188
|
+
# Total latency of a message roundtrip
|
|
189
|
+
latency = None
|
|
190
|
+
|
|
187
191
|
# send the user message
|
|
188
192
|
try:
|
|
193
|
+
start = time.time()
|
|
189
194
|
await self._send_user_message(
|
|
190
195
|
step_sender_id,
|
|
191
196
|
test_case,
|
|
@@ -193,6 +198,8 @@ class DialogueUnderstandingTestRunner:
|
|
|
193
198
|
metadata,
|
|
194
199
|
output_channel=output_channel,
|
|
195
200
|
)
|
|
201
|
+
end = time.time()
|
|
202
|
+
latency = end - start
|
|
196
203
|
except Exception as e:
|
|
197
204
|
structlogger.error(
|
|
198
205
|
"dialogue_understanding_test_runner.send_user_message.failed",
|
|
@@ -210,7 +217,7 @@ class DialogueUnderstandingTestRunner:
|
|
|
210
217
|
# get the dialogue understanding output
|
|
211
218
|
tracker = await self.agent.tracker_store.retrieve(step_sender_id)
|
|
212
219
|
dialogue_understanding_output = self.get_dialogue_understanding_output(
|
|
213
|
-
tracker, user_uttered_event_indices[user_step_index]
|
|
220
|
+
tracker, user_uttered_event_indices[user_step_index], latency
|
|
214
221
|
)
|
|
215
222
|
user_step.dialogue_understanding_output = dialogue_understanding_output
|
|
216
223
|
|
|
@@ -224,6 +231,7 @@ class DialogueUnderstandingTestRunner:
|
|
|
224
231
|
self,
|
|
225
232
|
tracker: DialogueStateTracker,
|
|
226
233
|
index_user_uttered_event: int,
|
|
234
|
+
latency: Optional[float] = None,
|
|
227
235
|
) -> Optional[DialogueUnderstandingOutput]:
|
|
228
236
|
"""Returns the dialogue understanding output.
|
|
229
237
|
|
|
@@ -259,6 +267,7 @@ class DialogueUnderstandingTestRunner:
|
|
|
259
267
|
return DialogueUnderstandingOutput(
|
|
260
268
|
commands=commands,
|
|
261
269
|
prompts=user_uttered_event.parse_data.get(PROMPTS, []),
|
|
270
|
+
latency=latency,
|
|
262
271
|
)
|
|
263
272
|
|
|
264
273
|
@staticmethod
|
|
@@ -5,7 +5,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Union
|
|
|
5
5
|
import rich
|
|
6
6
|
|
|
7
7
|
import rasa.shared.data
|
|
8
|
-
from rasa.dialogue_understanding_test.
|
|
8
|
+
from rasa.dialogue_understanding_test.command_metrics import CommandMetrics
|
|
9
9
|
from rasa.dialogue_understanding_test.constants import SCHEMA_FILE_PATH
|
|
10
10
|
from rasa.dialogue_understanding_test.du_test_case import (
|
|
11
11
|
KEY_CHOICES,
|
|
@@ -13,6 +13,9 @@ from rasa.dialogue_understanding_test.du_test_case import (
|
|
|
13
13
|
KEY_PROMPT_TOKENS,
|
|
14
14
|
)
|
|
15
15
|
from rasa.dialogue_understanding_test.du_test_result import (
|
|
16
|
+
KEY_COMMANDS_F1_MACRO,
|
|
17
|
+
KEY_COMMANDS_F1_MICRO,
|
|
18
|
+
KEY_COMMANDS_F1_WEIGHTED,
|
|
16
19
|
DialogueUnderstandingTestSuiteResult,
|
|
17
20
|
FailedTestStep,
|
|
18
21
|
)
|
|
@@ -274,6 +277,7 @@ def print_test_results(
|
|
|
274
277
|
# print failed test steps
|
|
275
278
|
print_failed_cases(test_suite_result, output_prompt=output_prompt)
|
|
276
279
|
|
|
280
|
+
print_f1_summary(test_suite_result)
|
|
277
281
|
print_command_summary(test_suite_result.command_metrics)
|
|
278
282
|
print_latency_and_token_metrics(test_suite_result)
|
|
279
283
|
print_final_line(test_suite_result)
|
|
@@ -325,14 +329,19 @@ def print_prompt(step: FailedTestStep) -> None:
|
|
|
325
329
|
rich.print(
|
|
326
330
|
f"[bold] prompt name [/bold]: {prompt_data[KEY_PROMPT_NAME]}"
|
|
327
331
|
)
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
332
|
+
if KEY_PROMPT_TOKENS in prompt_data:
|
|
333
|
+
rich.print(
|
|
334
|
+
f"[bold] prompt tokens [/bold]: {prompt_data[KEY_PROMPT_TOKENS]}" # noqa: E501
|
|
335
|
+
)
|
|
336
|
+
if KEY_COMPLETION_TOKENS in prompt_data:
|
|
337
|
+
rich.print(
|
|
338
|
+
f"[bold] completion tokens[/bold]: "
|
|
339
|
+
f"{prompt_data[KEY_COMPLETION_TOKENS]}"
|
|
340
|
+
)
|
|
341
|
+
if KEY_LATENCY in prompt_data:
|
|
342
|
+
rich.print(
|
|
343
|
+
f"[bold] latency [/bold]: {prompt_data[KEY_LATENCY]}"
|
|
344
|
+
)
|
|
336
345
|
if KEY_SYSTEM_PROMPT in prompt_data:
|
|
337
346
|
rich.print(
|
|
338
347
|
f"[bold] system prompt [/bold]: "
|
|
@@ -350,11 +359,26 @@ def print_llm_output(step: FailedTestStep) -> None:
|
|
|
350
359
|
for component, component_prompts in step.prompts.items():
|
|
351
360
|
for prompt_data in component_prompts:
|
|
352
361
|
if KEY_CHOICES in prompt_data:
|
|
353
|
-
rich.print("\n[red3]--
|
|
362
|
+
rich.print(f"\n[red3]-- LLM ouptut for {component} --[/red3]")
|
|
354
363
|
rich.print(prompt_data.get(KEY_CHOICES))
|
|
355
364
|
rich.print("[red3]-------------[/red3]")
|
|
356
365
|
|
|
357
366
|
|
|
367
|
+
def print_f1_summary(result: DialogueUnderstandingTestSuiteResult) -> None:
|
|
368
|
+
"""Print the f1 summary."""
|
|
369
|
+
print()
|
|
370
|
+
rasa.shared.utils.cli.print_info(rasa.shared.utils.cli.pad("COMMANDS F1"))
|
|
371
|
+
rasa.shared.utils.cli.print_info(
|
|
372
|
+
f"macro : {result.f1_score[KEY_COMMANDS_F1_MACRO]:.8f}"
|
|
373
|
+
)
|
|
374
|
+
rasa.shared.utils.cli.print_info(
|
|
375
|
+
f"micro : {result.f1_score[KEY_COMMANDS_F1_MICRO]:.8f}"
|
|
376
|
+
)
|
|
377
|
+
rasa.shared.utils.cli.print_info(
|
|
378
|
+
f"weighted average: {result.f1_score[KEY_COMMANDS_F1_WEIGHTED]:.8f}"
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
|
|
358
382
|
def print_command_summary(metrics: Dict[str, CommandMetrics]) -> None:
|
|
359
383
|
"""Print the command summary.
|
|
360
384
|
|
|
@@ -390,16 +414,24 @@ def print_latency_and_token_metrics(
|
|
|
390
414
|
"""Print the latency and token metrics."""
|
|
391
415
|
print()
|
|
392
416
|
rasa.shared.utils.cli.print_info(rasa.shared.utils.cli.pad("LATENCY METRICS"))
|
|
393
|
-
for
|
|
394
|
-
rasa.shared.utils.cli.print_info(f"{
|
|
417
|
+
for component, latency_metric in result.latency_metrics.items():
|
|
418
|
+
rasa.shared.utils.cli.print_info(f"--- {component} ---")
|
|
419
|
+
for key, value in latency_metric.items():
|
|
420
|
+
rasa.shared.utils.cli.print_info(f"{key}: {value:.8f}")
|
|
421
|
+
|
|
395
422
|
rasa.shared.utils.cli.print_info(rasa.shared.utils.cli.pad("PROMPT TOKEN METRICS"))
|
|
396
|
-
for
|
|
397
|
-
rasa.shared.utils.cli.print_info(f"{
|
|
423
|
+
for component, prompt_token_metric in result.prompt_token_metrics.items():
|
|
424
|
+
rasa.shared.utils.cli.print_info(f"--- {component} ---")
|
|
425
|
+
for key, value in prompt_token_metric.items():
|
|
426
|
+
rasa.shared.utils.cli.print_info(f"{key}: {value:.2f}")
|
|
427
|
+
|
|
398
428
|
rasa.shared.utils.cli.print_info(
|
|
399
429
|
rasa.shared.utils.cli.pad("COMPLETION TOKEN METRICS")
|
|
400
430
|
)
|
|
401
|
-
for
|
|
402
|
-
rasa.shared.utils.cli.print_info(f"{
|
|
431
|
+
for component, completion_token_metric in result.completion_token_metrics.items():
|
|
432
|
+
rasa.shared.utils.cli.print_info(f"--- {component} ---")
|
|
433
|
+
for key, value in completion_token_metric.items():
|
|
434
|
+
rasa.shared.utils.cli.print_info(f"{key}: {value:.2f}")
|
|
403
435
|
|
|
404
436
|
|
|
405
437
|
def print_final_line(test_suite_result: DialogueUnderstandingTestSuiteResult) -> None:
|
|
File without changes
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# keys for storing information in the message object
|
|
2
|
+
from rasa.shared.constants import OPENAI_PROVIDER, PROVIDER_CONFIG_KEY
|
|
3
|
+
from rasa.shared.utils.llm import DEFAULT_OPENAI_EMBEDDING_MODEL_NAME
|
|
4
|
+
|
|
5
|
+
SEARCH_QUERY_KEY = "search_query"
|
|
6
|
+
RETRIEVED_DOCUMENTS_KEY = "retrieved_documents"
|
|
7
|
+
POST_PROCESSED_DOCUMENTS_KEY = "post_processed_documents"
|
|
8
|
+
|
|
9
|
+
# config keys
|
|
10
|
+
THRESHOLD_CONFIG_KEY = "threshold"
|
|
11
|
+
K_CONFIG_KEY = "k"
|
|
12
|
+
VECTOR_STORE_TYPE_CONFIG_KEY = "type"
|
|
13
|
+
VECTOR_STORE_CONFIG_KEY = "vector_store"
|
|
14
|
+
CONNECTOR_CONFIG_KEY = "connector"
|
|
15
|
+
SOURCE_PROPERTY = "source"
|
|
16
|
+
POST_PROCESSING_CONFIG_KEY = "post_processing"
|
|
17
|
+
QUERY_REWRITING_CONFIG_KEY = "query_rewriting"
|
|
18
|
+
USE_LLM_PROPERTY = "use_generative_llm"
|
|
19
|
+
|
|
20
|
+
# default values
|
|
21
|
+
DEFAULT_THRESHOLD = 0.0
|
|
22
|
+
DEFAULT_K = 3
|
|
23
|
+
DEFAULT_VECTOR_STORE_TYPE = "faiss"
|
|
24
|
+
DEFAULT_EMBEDDINGS_CONFIG = {
|
|
25
|
+
PROVIDER_CONFIG_KEY: OPENAI_PROVIDER,
|
|
26
|
+
"model": DEFAULT_OPENAI_EMBEDDING_MODEL_NAME,
|
|
27
|
+
}
|
|
28
|
+
DEFAULT_VECTOR_STORE = {
|
|
29
|
+
VECTOR_STORE_TYPE_CONFIG_KEY: DEFAULT_VECTOR_STORE_TYPE,
|
|
30
|
+
SOURCE_PROPERTY: "./docs",
|
|
31
|
+
THRESHOLD_CONFIG_KEY: DEFAULT_THRESHOLD,
|
|
32
|
+
}
|