rasa-pro 3.11.2__py3-none-any.whl → 3.11.3a1.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/cli/studio/actions.py +48 -0
- rasa/cli/studio/studio.py +2 -0
- rasa/core/actions/direct_custom_actions_executor.py +31 -2
- rasa/core/channels/development_inspector.py +3 -0
- rasa/core/channels/socketio.py +5 -0
- rasa/dialogue_understanding/generator/command_generator.py +126 -5
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +28 -1
- rasa/dialogue_understanding/generator/nlu_command_adapter.py +3 -0
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +9 -1
- rasa/dialogue_understanding/utils.py +14 -0
- rasa/dialogue_understanding_test/__init__.py +0 -0
- rasa/dialogue_understanding_test/constants.py +15 -0
- rasa/dialogue_understanding_test/du_test_case.py +118 -0
- rasa/dialogue_understanding_test/du_test_result.py +11 -0
- rasa/e2e_test/e2e_test_case.py +2 -1
- rasa/e2e_test/utils/io.py +1 -1
- rasa/e2e_test/utils/validation.py +100 -2
- rasa/engine/recipes/default_recipe.py +63 -49
- rasa/engine/recipes/graph_recipe.py +8 -7
- rasa/model_manager/runner_service.py +1 -0
- rasa/model_manager/socket_bridge.py +29 -7
- rasa/model_training.py +2 -1
- rasa/shared/constants.py +17 -5
- rasa/shared/nlu/constants.py +4 -0
- rasa/studio/actions.py +147 -0
- rasa/studio/upload.py +19 -5
- rasa/telemetry.py +40 -33
- rasa/tracing/instrumentation/attribute_extractors.py +10 -9
- rasa/validator.py +32 -29
- rasa/version.py +1 -1
- {rasa_pro-3.11.2.dist-info → rasa_pro-3.11.3a1.dev1.dist-info}/METADATA +4 -4
- {rasa_pro-3.11.2.dist-info → rasa_pro-3.11.3a1.dev1.dist-info}/RECORD +35 -28
- {rasa_pro-3.11.2.dist-info → rasa_pro-3.11.3a1.dev1.dist-info}/NOTICE +0 -0
- {rasa_pro-3.11.2.dist-info → rasa_pro-3.11.3a1.dev1.dist-info}/WHEEL +0 -0
- {rasa_pro-3.11.2.dist-info → rasa_pro-3.11.3a1.dev1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from typing import List
|
|
3
|
+
|
|
4
|
+
from rasa.cli import SubParsersAction
|
|
5
|
+
from rasa.shared.constants import DEFAULT_ACTIONS_PATH
|
|
6
|
+
|
|
7
|
+
from rasa.studio.actions import handle_actions
|
|
8
|
+
from rasa_sdk.cli.arguments import action_arg
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def add_subparser(
|
|
12
|
+
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
|
|
13
|
+
) -> None:
|
|
14
|
+
"""Add the studio actions parser.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
subparsers: subparser we are going to attach to
|
|
18
|
+
parents: Parent parsers, needed to ensure tree structure in argparse
|
|
19
|
+
"""
|
|
20
|
+
actions_parser = subparsers.add_parser(
|
|
21
|
+
"actions",
|
|
22
|
+
parents=parents,
|
|
23
|
+
conflict_handler="resolve",
|
|
24
|
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
|
|
25
|
+
help=("Run Rasa actions server locally and connect it to studio."),
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
actions_parser.set_defaults(func=handle_actions)
|
|
29
|
+
|
|
30
|
+
set_studio_actions_arguments(actions_parser)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def set_studio_actions_arguments(parser: argparse.ArgumentParser) -> None:
|
|
34
|
+
"""Add arguments for running `rasa studio download`."""
|
|
35
|
+
parser.add_argument(
|
|
36
|
+
"assistant_name",
|
|
37
|
+
default=None,
|
|
38
|
+
nargs=1,
|
|
39
|
+
type=str,
|
|
40
|
+
help="Name of the assistant on Rasa Studio",
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"--actions",
|
|
45
|
+
type=action_arg,
|
|
46
|
+
default=DEFAULT_ACTIONS_PATH,
|
|
47
|
+
help="name of action package to be loaded",
|
|
48
|
+
)
|
rasa/cli/studio/studio.py
CHANGED
|
@@ -9,6 +9,7 @@ import rasa.shared.utils.cli
|
|
|
9
9
|
import rasa.cli.studio.download
|
|
10
10
|
import rasa.cli.studio.train
|
|
11
11
|
import rasa.cli.studio.upload
|
|
12
|
+
import rasa.cli.studio.actions
|
|
12
13
|
from rasa.studio.auth import StudioAuth
|
|
13
14
|
from rasa.studio.config import StudioConfig
|
|
14
15
|
|
|
@@ -33,6 +34,7 @@ def add_subparser(
|
|
|
33
34
|
rasa.cli.studio.train.add_subparser(studio_subparsers, parents)
|
|
34
35
|
rasa.cli.studio.upload.add_subparser(studio_subparsers, parents)
|
|
35
36
|
rasa.cli.studio.download.add_subparser(studio_subparsers, parents)
|
|
37
|
+
rasa.cli.studio.actions.add_subparser(studio_subparsers, parents)
|
|
36
38
|
|
|
37
39
|
_add_config_subparser(studio_subparsers, parents)
|
|
38
40
|
_add_login_subparser(studio_subparsers, parents)
|
|
@@ -1,6 +1,8 @@
|
|
|
1
|
+
from functools import lru_cache
|
|
1
2
|
from importlib.util import find_spec
|
|
2
3
|
from typing import (
|
|
3
4
|
Any,
|
|
5
|
+
ClassVar,
|
|
4
6
|
Dict,
|
|
5
7
|
Text,
|
|
6
8
|
)
|
|
@@ -21,6 +23,8 @@ structlogger = structlog.get_logger(__name__)
|
|
|
21
23
|
|
|
22
24
|
|
|
23
25
|
class DirectCustomActionExecutor(CustomActionExecutor):
|
|
26
|
+
_actions_module_registered: ClassVar[bool] = False
|
|
27
|
+
|
|
24
28
|
def __init__(self, action_name: str, action_endpoint: EndpointConfig):
|
|
25
29
|
"""Initializes the direct custom action executor.
|
|
26
30
|
|
|
@@ -30,9 +34,34 @@ class DirectCustomActionExecutor(CustomActionExecutor):
|
|
|
30
34
|
"""
|
|
31
35
|
self.action_name = action_name
|
|
32
36
|
self.action_endpoint = action_endpoint
|
|
33
|
-
self.action_executor =
|
|
37
|
+
self.action_executor = self._create_action_executor()
|
|
38
|
+
self.register_actions_from_a_module()
|
|
39
|
+
self.action_executor.reload()
|
|
40
|
+
|
|
41
|
+
@staticmethod
|
|
42
|
+
@lru_cache(maxsize=1)
|
|
43
|
+
def _create_action_executor() -> ActionExecutor:
|
|
44
|
+
"""Creates and returns a cached ActionExecutor instance.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
ActionExecutor: The cached ActionExecutor instance.
|
|
48
|
+
"""
|
|
49
|
+
return ActionExecutor()
|
|
34
50
|
|
|
35
51
|
def register_actions_from_a_module(self) -> None:
|
|
52
|
+
"""Registers actions from the specified module if not already registered.
|
|
53
|
+
|
|
54
|
+
This method checks if the actions module has already been registered to prevent
|
|
55
|
+
duplicate registrations. If not registered, it attempts to register the actions
|
|
56
|
+
module specified in the action endpoint configuration. If the module does not
|
|
57
|
+
exist, it raises a RasaException.
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
RasaException: If the actions module specified does not exist.
|
|
61
|
+
"""
|
|
62
|
+
if DirectCustomActionExecutor._actions_module_registered:
|
|
63
|
+
return
|
|
64
|
+
|
|
36
65
|
module_name = self.action_endpoint.actions_module
|
|
37
66
|
if not find_spec(module_name):
|
|
38
67
|
raise RasaException(
|
|
@@ -42,6 +71,7 @@ class DirectCustomActionExecutor(CustomActionExecutor):
|
|
|
42
71
|
)
|
|
43
72
|
|
|
44
73
|
self.action_executor.register_package(module_name)
|
|
74
|
+
DirectCustomActionExecutor._actions_module_registered = True
|
|
45
75
|
|
|
46
76
|
async def run(
|
|
47
77
|
self,
|
|
@@ -63,7 +93,6 @@ class DirectCustomActionExecutor(CustomActionExecutor):
|
|
|
63
93
|
"action.direct_custom_action_executor.run",
|
|
64
94
|
action_name=self.action_name,
|
|
65
95
|
)
|
|
66
|
-
self.register_actions_from_a_module()
|
|
67
96
|
|
|
68
97
|
tracker_state = tracker.current_state(EventVerbosity.ALL)
|
|
69
98
|
action_call = {
|
|
@@ -88,6 +88,9 @@ class DevelopmentInspectProxy(InputChannel):
|
|
|
88
88
|
"""Called when a tracker has been updated."""
|
|
89
89
|
if self.tracker_stream:
|
|
90
90
|
tracker_dump = await self.get_tracker_state(sender_id)
|
|
91
|
+
# check if the underlying channel has an on_new_tracker_dump hook
|
|
92
|
+
if hasattr(self.underlying, "on_new_tracker_dump"):
|
|
93
|
+
await self.underlying.on_new_tracker_dump(sender_id, tracker_dump)
|
|
91
94
|
await self.tracker_stream.broadcast(tracker_dump)
|
|
92
95
|
|
|
93
96
|
async def on_message_proxy(
|
rasa/core/channels/socketio.py
CHANGED
|
@@ -212,6 +212,10 @@ class SocketIOInput(InputChannel):
|
|
|
212
212
|
return None
|
|
213
213
|
return SocketIOOutput(self.sio, self.bot_message_evt)
|
|
214
214
|
|
|
215
|
+
async def on_new_tracker_dump(self, sender_id: str, tracker_dump: str):
|
|
216
|
+
if self.sio:
|
|
217
|
+
await self.sio.emit("tracker", tracker_dump, room=sender_id)
|
|
218
|
+
|
|
215
219
|
def blueprint(
|
|
216
220
|
self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
|
|
217
221
|
) -> Blueprint:
|
|
@@ -289,6 +293,7 @@ class SocketIOInput(InputChannel):
|
|
|
289
293
|
metadata = data.get(self.metadata_key, {})
|
|
290
294
|
if isinstance(metadata, Text):
|
|
291
295
|
metadata = json.loads(metadata)
|
|
296
|
+
|
|
292
297
|
message = UserMessage(
|
|
293
298
|
data.get("message", ""),
|
|
294
299
|
output_channel,
|
|
@@ -10,18 +10,25 @@ from rasa.dialogue_understanding.commands import (
|
|
|
10
10
|
ErrorCommand,
|
|
11
11
|
)
|
|
12
12
|
from rasa.dialogue_understanding.commands.set_slot_command import SetSlotExtractor
|
|
13
|
+
from rasa.shared.constants import (
|
|
14
|
+
RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_TOO_LONG,
|
|
15
|
+
RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_EMPTY,
|
|
16
|
+
)
|
|
13
17
|
from rasa.shared.core.constants import SlotMappingType
|
|
14
18
|
from rasa.shared.core.domain import Domain
|
|
15
19
|
from rasa.shared.core.flows import FlowsList
|
|
16
20
|
from rasa.shared.core.slot_mappings import SlotFillingManager
|
|
17
21
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
22
|
+
from rasa.shared.nlu.constants import (
|
|
23
|
+
COMMANDS,
|
|
24
|
+
TEXT,
|
|
25
|
+
PREDICTED_COMMANDS,
|
|
26
|
+
PROMPTS,
|
|
27
|
+
KEY_USER_PROMPT,
|
|
28
|
+
KEY_SYSTEM_PROMPT,
|
|
29
|
+
)
|
|
18
30
|
from rasa.shared.nlu.training_data.message import Message
|
|
19
|
-
from rasa.shared.nlu.constants import COMMANDS, TEXT
|
|
20
31
|
from rasa.shared.utils.llm import DEFAULT_MAX_USER_INPUT_CHARACTERS
|
|
21
|
-
from rasa.shared.constants import (
|
|
22
|
-
RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_TOO_LONG,
|
|
23
|
-
RASA_PATTERN_INTERNAL_ERROR_USER_INPUT_EMPTY,
|
|
24
|
-
)
|
|
25
32
|
|
|
26
33
|
structlogger = structlog.get_logger()
|
|
27
34
|
|
|
@@ -193,6 +200,7 @@ class CommandGenerator:
|
|
|
193
200
|
flows: The flows to use for command prediction.
|
|
194
201
|
tracker: The tracker containing the conversation history up to now.
|
|
195
202
|
**kwargs: Keyword arguments for forward compatibility.
|
|
203
|
+
|
|
196
204
|
Returns:
|
|
197
205
|
The predicted commands.
|
|
198
206
|
"""
|
|
@@ -341,3 +349,116 @@ class CommandGenerator:
|
|
|
341
349
|
]
|
|
342
350
|
|
|
343
351
|
return filtered_commands
|
|
352
|
+
|
|
353
|
+
@staticmethod
|
|
354
|
+
def _add_commands_to_message_parse_data(
|
|
355
|
+
message: Message, component_name: str, commands: List[Command]
|
|
356
|
+
) -> None:
|
|
357
|
+
"""Add commands to the message parse data.
|
|
358
|
+
|
|
359
|
+
Commands are only added in case the flag 'record_commands_and_prompts' is set.
|
|
360
|
+
Example of predicted commands in the message parse data:
|
|
361
|
+
Message(data={
|
|
362
|
+
PREDICTED_COMMANDS: {
|
|
363
|
+
"MultiStepLLMCommandGenerator": [
|
|
364
|
+
{"command": "set_slot", "name": "slot_name", "value": "slot_value"},
|
|
365
|
+
],
|
|
366
|
+
"NLUCommandAdapter": [
|
|
367
|
+
{"command": "start_flow", "name": "test_flow"},
|
|
368
|
+
]
|
|
369
|
+
}
|
|
370
|
+
})
|
|
371
|
+
"""
|
|
372
|
+
from rasa.dialogue_understanding.utils import record_commands_and_prompts
|
|
373
|
+
|
|
374
|
+
# only set commands if the flag "record_commands_and_prompts" is set to True
|
|
375
|
+
if not record_commands_and_prompts:
|
|
376
|
+
return
|
|
377
|
+
|
|
378
|
+
commands_as_dict = [command.as_dict() for command in commands]
|
|
379
|
+
|
|
380
|
+
if message.get(PREDICTED_COMMANDS) is not None:
|
|
381
|
+
predicted_commands = message.get(PREDICTED_COMMANDS)
|
|
382
|
+
if component_name in predicted_commands:
|
|
383
|
+
predicted_commands[component_name].extend(commands_as_dict)
|
|
384
|
+
else:
|
|
385
|
+
predicted_commands[component_name] = commands_as_dict
|
|
386
|
+
else:
|
|
387
|
+
predicted_commands = {component_name: commands_as_dict}
|
|
388
|
+
|
|
389
|
+
message.set(
|
|
390
|
+
PREDICTED_COMMANDS,
|
|
391
|
+
predicted_commands,
|
|
392
|
+
add_to_output=True,
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
@staticmethod
|
|
396
|
+
def _add_prompt_to_message_parse_data(
|
|
397
|
+
message: Message,
|
|
398
|
+
component_name: str,
|
|
399
|
+
prompt_name: str,
|
|
400
|
+
user_prompt: str,
|
|
401
|
+
system_prompt: Optional[str] = None,
|
|
402
|
+
) -> None:
|
|
403
|
+
"""Add prompt to the message parse data.
|
|
404
|
+
|
|
405
|
+
Prompt is only added in case the flag 'record_commands_and_prompts' is set.
|
|
406
|
+
Example of prompts in the message parse data:
|
|
407
|
+
Message(data={
|
|
408
|
+
PROMPTS: {
|
|
409
|
+
"MultiStepLLMCommandGenerator": [
|
|
410
|
+
(
|
|
411
|
+
"fill_slots_prompt",
|
|
412
|
+
{
|
|
413
|
+
"user_prompt": <prompt content>",
|
|
414
|
+
"system_prompt": <prompt content>"
|
|
415
|
+
}
|
|
416
|
+
),
|
|
417
|
+
(
|
|
418
|
+
"handle_flows_prompt",
|
|
419
|
+
{
|
|
420
|
+
"user_prompt": <prompt content>",
|
|
421
|
+
"system_prompt": <prompt content>"
|
|
422
|
+
}
|
|
423
|
+
),
|
|
424
|
+
],
|
|
425
|
+
"SingleStepLLMCommandGenerator": [
|
|
426
|
+
(
|
|
427
|
+
"prompt_template",
|
|
428
|
+
{
|
|
429
|
+
"user_prompt": <prompt content>",
|
|
430
|
+
"system_prompt": <prompt content>"
|
|
431
|
+
}
|
|
432
|
+
),
|
|
433
|
+
]
|
|
434
|
+
}
|
|
435
|
+
})
|
|
436
|
+
"""
|
|
437
|
+
from rasa.dialogue_understanding.utils import record_commands_and_prompts
|
|
438
|
+
|
|
439
|
+
# only set prompt if the flag "record_commands_and_prompts" is set to True
|
|
440
|
+
if not record_commands_and_prompts:
|
|
441
|
+
return
|
|
442
|
+
|
|
443
|
+
prompt_tuple = (
|
|
444
|
+
prompt_name,
|
|
445
|
+
{
|
|
446
|
+
KEY_USER_PROMPT: user_prompt,
|
|
447
|
+
**({KEY_SYSTEM_PROMPT: system_prompt} if system_prompt else {}),
|
|
448
|
+
},
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
if message.get(PROMPTS) is not None:
|
|
452
|
+
prompts = message.get(PROMPTS)
|
|
453
|
+
if component_name in prompts:
|
|
454
|
+
prompts[component_name].append(prompt_tuple)
|
|
455
|
+
else:
|
|
456
|
+
prompts[component_name] = [prompt_tuple]
|
|
457
|
+
else:
|
|
458
|
+
prompts = {component_name: [prompt_tuple]}
|
|
459
|
+
|
|
460
|
+
message.set(
|
|
461
|
+
PROMPTS,
|
|
462
|
+
prompts,
|
|
463
|
+
add_to_output=True,
|
|
464
|
+
)
|
|
@@ -144,7 +144,6 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
144
144
|
**kwargs: Any,
|
|
145
145
|
) -> "MultiStepLLMCommandGenerator":
|
|
146
146
|
"""Loads trained component (see parent class for full docstring)."""
|
|
147
|
-
|
|
148
147
|
# Perform health check of the LLM client config
|
|
149
148
|
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
150
149
|
cls.perform_llm_health_check(
|
|
@@ -200,6 +199,9 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
200
199
|
message, flows, tracker
|
|
201
200
|
)
|
|
202
201
|
commands = self._clean_up_commands(commands)
|
|
202
|
+
self._add_commands_to_message_parse_data(
|
|
203
|
+
message, MultiStepLLMCommandGenerator.__name__, commands
|
|
204
|
+
)
|
|
203
205
|
except ProviderClientAPIException:
|
|
204
206
|
# if any step resulted in API exception, the command prediction cannot
|
|
205
207
|
# be completed, "predict" the ErrorCommand
|
|
@@ -542,6 +544,15 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
542
544
|
)
|
|
543
545
|
|
|
544
546
|
commands = self.parse_commands(actions, tracker, available_flows)
|
|
547
|
+
|
|
548
|
+
if commands:
|
|
549
|
+
self._add_prompt_to_message_parse_data(
|
|
550
|
+
message,
|
|
551
|
+
MultiStepLLMCommandGenerator.__name__,
|
|
552
|
+
"fill_slots_for_active_flow_prompt",
|
|
553
|
+
prompt,
|
|
554
|
+
)
|
|
555
|
+
|
|
545
556
|
return commands
|
|
546
557
|
|
|
547
558
|
async def _predict_commands_for_handling_flows(
|
|
@@ -585,6 +596,14 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
585
596
|
# filter out flows that are already started and active
|
|
586
597
|
commands = self._filter_redundant_start_flow_commands(tracker, commands)
|
|
587
598
|
|
|
599
|
+
if commands:
|
|
600
|
+
self._add_prompt_to_message_parse_data(
|
|
601
|
+
message,
|
|
602
|
+
MultiStepLLMCommandGenerator.__name__,
|
|
603
|
+
"handle_flows_prompt",
|
|
604
|
+
prompt,
|
|
605
|
+
)
|
|
606
|
+
|
|
588
607
|
return commands
|
|
589
608
|
|
|
590
609
|
@staticmethod
|
|
@@ -674,6 +693,14 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
674
693
|
commands=commands,
|
|
675
694
|
)
|
|
676
695
|
|
|
696
|
+
if commands:
|
|
697
|
+
self._add_prompt_to_message_parse_data(
|
|
698
|
+
message,
|
|
699
|
+
MultiStepLLMCommandGenerator.__name__,
|
|
700
|
+
"fill_slots_for_new_flow_prompt",
|
|
701
|
+
prompt,
|
|
702
|
+
)
|
|
703
|
+
|
|
677
704
|
return commands
|
|
678
705
|
|
|
679
706
|
def _prepare_inputs(
|
|
@@ -137,7 +137,6 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
137
137
|
**kwargs: Any,
|
|
138
138
|
) -> "SingleStepLLMCommandGenerator":
|
|
139
139
|
"""Loads trained component (see parent class for full docstring)."""
|
|
140
|
-
|
|
141
140
|
# Perform health check of the LLM API endpoint
|
|
142
141
|
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
143
142
|
cls.perform_llm_health_check(
|
|
@@ -282,6 +281,15 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
282
281
|
commands = self.parse_commands(action_list, tracker, flows)
|
|
283
282
|
|
|
284
283
|
self._update_message_parse_data_for_fine_tuning(message, commands, flow_prompt)
|
|
284
|
+
self._add_commands_to_message_parse_data(
|
|
285
|
+
message, SingleStepLLMCommandGenerator.__name__, commands
|
|
286
|
+
)
|
|
287
|
+
self._add_prompt_to_message_parse_data(
|
|
288
|
+
message,
|
|
289
|
+
SingleStepLLMCommandGenerator.__name__,
|
|
290
|
+
"command_generator_prompt",
|
|
291
|
+
flow_prompt,
|
|
292
|
+
)
|
|
285
293
|
|
|
286
294
|
return commands
|
|
287
295
|
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from contextlib import contextmanager
|
|
2
|
+
from typing import Generator
|
|
3
|
+
|
|
4
|
+
record_commands_and_prompts = False
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@contextmanager
|
|
8
|
+
def set_record_commands_and_prompts() -> Generator:
|
|
9
|
+
global record_commands_and_prompts
|
|
10
|
+
record_commands_and_prompts = True
|
|
11
|
+
try:
|
|
12
|
+
yield
|
|
13
|
+
finally:
|
|
14
|
+
record_commands_and_prompts = False
|
|
File without changes
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import rasa.e2e_test.constants as constants
|
|
2
|
+
|
|
3
|
+
KEY_FIXTURES = constants.KEY_FIXTURES
|
|
4
|
+
KEY_METADATA = constants.KEY_METADATA
|
|
5
|
+
KEY_STUB_CUSTOM_ACTIONS = constants.KEY_STUB_CUSTOM_ACTIONS
|
|
6
|
+
KEY_USER_INPUT = constants.KEY_USER_INPUT
|
|
7
|
+
KEY_BOT_INPUT = constants.KEY_BOT_INPUT
|
|
8
|
+
KEY_BOT_UTTERED = constants.KEY_BOT_UTTERED
|
|
9
|
+
KEY_STEPS = constants.KEY_STEPS
|
|
10
|
+
KEY_TEST_CASE = constants.KEY_TEST_CASE
|
|
11
|
+
KEY_TEST_CASES = constants.KEY_TEST_CASES
|
|
12
|
+
KEY_COMMANDS = "commands"
|
|
13
|
+
|
|
14
|
+
ACTOR_USER = "user"
|
|
15
|
+
ACTOR_BOT = "bot"
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
from typing import List, Optional, Dict, Any
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
4
|
+
|
|
5
|
+
from rasa.dialogue_understanding.commands import Command
|
|
6
|
+
from rasa.dialogue_understanding_test.constants import (
|
|
7
|
+
ACTOR_USER,
|
|
8
|
+
KEY_COMMANDS,
|
|
9
|
+
ACTOR_BOT,
|
|
10
|
+
KEY_TEST_CASE,
|
|
11
|
+
KEY_STEPS,
|
|
12
|
+
KEY_FIXTURES,
|
|
13
|
+
KEY_METADATA,
|
|
14
|
+
KEY_USER_INPUT,
|
|
15
|
+
KEY_BOT_INPUT,
|
|
16
|
+
KEY_BOT_UTTERED,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class DialogueUnderstandingOutput(BaseModel):
|
|
21
|
+
"""Output containing prompts and generated commands by component.
|
|
22
|
+
|
|
23
|
+
Example of commands:
|
|
24
|
+
{
|
|
25
|
+
"MultiStepLLMCommandGenerator": [
|
|
26
|
+
{"command": "set_slot", "name": "slot_name", "value": "slot_value"},
|
|
27
|
+
],
|
|
28
|
+
"NLUCommandAdapter": [
|
|
29
|
+
{"command": "start_flow", "name": "test_flow"},
|
|
30
|
+
]
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
Example of prompts:
|
|
34
|
+
{
|
|
35
|
+
"MultiStepLLMCommandGenerator": [
|
|
36
|
+
(
|
|
37
|
+
"fill_slots_prompt",
|
|
38
|
+
{
|
|
39
|
+
"user_prompt": "<prompt content>",
|
|
40
|
+
"system_prompt": "<prompt content>"
|
|
41
|
+
}
|
|
42
|
+
),
|
|
43
|
+
(
|
|
44
|
+
"handle_flows_prompt",
|
|
45
|
+
{
|
|
46
|
+
"user_prompt": "<prompt content>",
|
|
47
|
+
"system_prompt": "<prompt content>"
|
|
48
|
+
}
|
|
49
|
+
),
|
|
50
|
+
],
|
|
51
|
+
}
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
prompts: Dict[str, tuple[str, Dict[str, str]]]
|
|
55
|
+
commands: Dict[str, List[Command]]
|
|
56
|
+
|
|
57
|
+
model_config = ConfigDict(frozen=True)
|
|
58
|
+
|
|
59
|
+
def get_component_data(
|
|
60
|
+
self, component_name: str
|
|
61
|
+
) -> tuple[Optional[tuple[str, Dict[str, str]]], List[Command]]:
|
|
62
|
+
"""Get both the prompts and commands for a specific component."""
|
|
63
|
+
return self.prompts.get(component_name), self.commands.get(component_name, [])
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class DialogueUnderstandingTestStep(BaseModel):
|
|
67
|
+
actor: str
|
|
68
|
+
text: Optional[str] = None
|
|
69
|
+
template: Optional[str] = None
|
|
70
|
+
line: Optional[int] = None
|
|
71
|
+
metadata_name: Optional[str] = None
|
|
72
|
+
commands: Optional[List[Command]] = None
|
|
73
|
+
dialogue_understanding_output: Optional[DialogueUnderstandingOutput] = None
|
|
74
|
+
|
|
75
|
+
def as_dict(self) -> Dict[str, Any]:
|
|
76
|
+
if self.actor == ACTOR_USER:
|
|
77
|
+
if self.commands:
|
|
78
|
+
return {
|
|
79
|
+
KEY_USER_INPUT: self.text,
|
|
80
|
+
# TODO: The command should be converted into our DSL
|
|
81
|
+
KEY_COMMANDS: [command.as_dict() for command in self.commands],
|
|
82
|
+
}
|
|
83
|
+
return {ACTOR_USER: self.text}
|
|
84
|
+
elif self.actor == ACTOR_BOT:
|
|
85
|
+
if self.text is not None:
|
|
86
|
+
return {KEY_BOT_INPUT: self.text}
|
|
87
|
+
elif self.template is not None:
|
|
88
|
+
return {KEY_BOT_UTTERED: self.template}
|
|
89
|
+
|
|
90
|
+
return {}
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class DialogueUnderstandingTestCase(BaseModel):
|
|
94
|
+
name: str
|
|
95
|
+
steps: list[DialogueUnderstandingTestStep] = Field(min_length=1)
|
|
96
|
+
file: Optional[str] = None
|
|
97
|
+
line: Optional[int] = None
|
|
98
|
+
fixture_names: Optional[List[str]] = None
|
|
99
|
+
metadata_name: Optional[str] = None
|
|
100
|
+
|
|
101
|
+
def full_name(self) -> str:
|
|
102
|
+
return f"{self.file}::{self.name}"
|
|
103
|
+
|
|
104
|
+
def as_dict(self) -> Dict[str, Any]:
|
|
105
|
+
result = {
|
|
106
|
+
KEY_TEST_CASE: self.name,
|
|
107
|
+
KEY_STEPS: [step.as_dict() for step in self.steps],
|
|
108
|
+
}
|
|
109
|
+
if self.fixture_names:
|
|
110
|
+
result[KEY_FIXTURES] = self.fixture_names
|
|
111
|
+
if self.metadata_name:
|
|
112
|
+
result[KEY_METADATA] = self.metadata_name
|
|
113
|
+
return result
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
# Update forward references
|
|
117
|
+
DialogueUnderstandingTestStep.model_rebuild()
|
|
118
|
+
DialogueUnderstandingTestCase.model_rebuild()
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from rasa.dialogue_understanding_test.du_test_case import DialogueUnderstandingTestCase
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class DialogueUnderstandingTestResult(BaseModel):
|
|
9
|
+
test_case: DialogueUnderstandingTestCase
|
|
10
|
+
passed: bool
|
|
11
|
+
error_line: Optional[int] = None
|
rasa/e2e_test/e2e_test_case.py
CHANGED
|
@@ -5,6 +5,7 @@ from typing import Any, Dict, List, Optional, Text, Union
|
|
|
5
5
|
|
|
6
6
|
import structlog
|
|
7
7
|
|
|
8
|
+
from rasa.dialogue_understanding_test.du_test_case import DialogueUnderstandingTestCase
|
|
8
9
|
from rasa.e2e_test.assertions import Assertion
|
|
9
10
|
from rasa.e2e_test.constants import (
|
|
10
11
|
KEY_ASSERTIONS,
|
|
@@ -551,7 +552,7 @@ class Metadata:
|
|
|
551
552
|
class TestSuite:
|
|
552
553
|
"""Class for representing all top level test suite keys."""
|
|
553
554
|
|
|
554
|
-
test_cases: List[TestCase]
|
|
555
|
+
test_cases: List[Union[TestCase, DialogueUnderstandingTestCase]]
|
|
555
556
|
fixtures: List[Fixture]
|
|
556
557
|
metadata: List[Metadata]
|
|
557
558
|
stub_custom_actions: Dict[Text, StubCustomAction]
|
rasa/e2e_test/utils/io.py
CHANGED
|
@@ -404,7 +404,7 @@ def read_test_cases(path: str) -> TestSuite:
|
|
|
404
404
|
stub_data=stub_data,
|
|
405
405
|
)
|
|
406
406
|
|
|
407
|
-
validate_test_case(test_case_name, input_test_cases)
|
|
407
|
+
validate_test_case(test_case_name, input_test_cases, fixtures, metadata)
|
|
408
408
|
try:
|
|
409
409
|
if stub_custom_actions:
|
|
410
410
|
ensure_beta_feature_is_enabled(
|