rasa-pro 3.12.18.dev1__py3-none-any.whl → 3.12.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/__init__.py +0 -6
- rasa/core/actions/action.py +1 -4
- rasa/core/channels/voice_stream/asr/azure.py +9 -0
- rasa/core/channels/voice_stream/twilio_media_streams.py +7 -0
- rasa/core/channels/voice_stream/voice_channel.py +40 -9
- rasa/core/policies/intentless_policy.py +1 -3
- rasa/core/processor.py +50 -5
- rasa/core/utils.py +11 -2
- rasa/dialogue_understanding/coexistence/llm_based_router.py +1 -0
- rasa/dialogue_understanding/commands/__init__.py +4 -0
- rasa/dialogue_understanding/commands/cancel_flow_command.py +3 -1
- rasa/dialogue_understanding/commands/set_slot_command.py +6 -0
- rasa/dialogue_understanding/commands/utils.py +26 -2
- rasa/dialogue_understanding/generator/command_generator.py +11 -1
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +4 -15
- rasa/dialogue_understanding/generator/llm_command_generator.py +1 -3
- rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +4 -44
- rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +1 -14
- rasa/dialogue_understanding/processor/command_processor.py +5 -5
- rasa/dialogue_understanding/stack/frames/flow_stack_frame.py +17 -4
- rasa/dialogue_understanding/stack/utils.py +3 -1
- rasa/dialogue_understanding/utils.py +68 -12
- rasa/dialogue_understanding_test/du_test_schema.yml +3 -3
- rasa/e2e_test/e2e_test_coverage_report.py +1 -1
- rasa/e2e_test/e2e_test_schema.yml +3 -3
- rasa/hooks.py +0 -55
- rasa/llm_fine_tuning/utils.py +2 -4
- rasa/shared/constants.py +0 -5
- rasa/shared/core/flows/constants.py +2 -0
- rasa/shared/core/flows/flow.py +129 -13
- rasa/shared/core/flows/flows_list.py +18 -1
- rasa/shared/core/flows/steps/link.py +7 -2
- rasa/shared/providers/constants.py +0 -9
- rasa/shared/providers/llm/_base_litellm_client.py +4 -14
- rasa/shared/providers/llm/litellm_router_llm_client.py +7 -17
- rasa/shared/providers/llm/llm_client.py +15 -24
- rasa/shared/providers/llm/self_hosted_llm_client.py +2 -10
- rasa/tracing/instrumentation/attribute_extractors.py +2 -2
- rasa/version.py +1 -1
- {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.20.dist-info}/METADATA +1 -2
- {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.20.dist-info}/RECORD +44 -45
- rasa/monkey_patches.py +0 -91
- {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.20.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.20.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.18.dev1.dist-info → rasa_pro-3.12.20.dist-info}/entry_points.txt +0 -0
rasa/__init__.py
CHANGED
|
@@ -5,11 +5,5 @@ from rasa import version
|
|
|
5
5
|
# define the version before the other imports since these need it
|
|
6
6
|
__version__ = version.__version__
|
|
7
7
|
|
|
8
|
-
from litellm.integrations.langfuse.langfuse import LangFuseLogger
|
|
9
|
-
|
|
10
|
-
from rasa.monkey_patches import litellm_langfuse_logger_init_fixed
|
|
11
|
-
|
|
12
|
-
# Monkey-patch the init method as early as possible before the class is used
|
|
13
|
-
LangFuseLogger.__init__ = litellm_langfuse_logger_init_fixed # type: ignore
|
|
14
8
|
|
|
15
9
|
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
rasa/core/actions/action.py
CHANGED
|
@@ -1137,15 +1137,12 @@ class ActionSendText(Action):
|
|
|
1137
1137
|
tracker: "DialogueStateTracker",
|
|
1138
1138
|
domain: "Domain",
|
|
1139
1139
|
metadata: Optional[Dict[Text, Any]] = None,
|
|
1140
|
-
create_bot_uttered_event: bool = True,
|
|
1141
1140
|
) -> List[Event]:
|
|
1142
1141
|
"""Runs action. Please see parent class for the full docstring."""
|
|
1143
1142
|
fallback = {"text": ""}
|
|
1144
1143
|
metadata_copy = copy.deepcopy(metadata) if metadata else {}
|
|
1145
1144
|
message = metadata_copy.get("message", fallback)
|
|
1146
|
-
|
|
1147
|
-
return [create_bot_utterance(message)]
|
|
1148
|
-
return []
|
|
1145
|
+
return [create_bot_utterance(message)]
|
|
1149
1146
|
|
|
1150
1147
|
|
|
1151
1148
|
class ActionExtractSlots(Action):
|
|
@@ -3,6 +3,8 @@ import os
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from typing import Any, AsyncIterator, Dict, Optional
|
|
5
5
|
|
|
6
|
+
import structlog
|
|
7
|
+
|
|
6
8
|
from rasa.core.channels.voice_stream.asr.asr_engine import ASREngine, ASREngineConfig
|
|
7
9
|
from rasa.core.channels.voice_stream.asr.asr_event import (
|
|
8
10
|
ASREvent,
|
|
@@ -13,6 +15,8 @@ from rasa.core.channels.voice_stream.audio_bytes import HERTZ, RasaAudioBytes
|
|
|
13
15
|
from rasa.shared.constants import AZURE_SPEECH_API_KEY_ENV_VAR
|
|
14
16
|
from rasa.shared.exceptions import ConnectionException
|
|
15
17
|
|
|
18
|
+
logger = structlog.get_logger(__name__)
|
|
19
|
+
|
|
16
20
|
|
|
17
21
|
@dataclass
|
|
18
22
|
class AzureASRConfig(ASREngineConfig):
|
|
@@ -61,6 +65,11 @@ class AzureASR(ASREngine[AzureASRConfig]):
|
|
|
61
65
|
and self.config.speech_endpoint is None
|
|
62
66
|
):
|
|
63
67
|
self.config.speech_region = "eastus"
|
|
68
|
+
logger.warning(
|
|
69
|
+
"voice_channel.asr.azure.no_region",
|
|
70
|
+
message="No speech region configured, using 'eastus' as default",
|
|
71
|
+
region="eastus",
|
|
72
|
+
)
|
|
64
73
|
speech_config = speechsdk.SpeechConfig(
|
|
65
74
|
subscription=os.environ[AZURE_SPEECH_API_KEY_ENV_VAR],
|
|
66
75
|
region=self.config.speech_region,
|
|
@@ -135,6 +135,13 @@ class TwilioMediaStreamsInputChannel(VoiceInputChannel):
|
|
|
135
135
|
def name(cls) -> str:
|
|
136
136
|
return "twilio_media_streams"
|
|
137
137
|
|
|
138
|
+
def get_sender_id(self, call_parameters: CallParameters) -> str:
|
|
139
|
+
"""Get the sender ID for the channel.
|
|
140
|
+
|
|
141
|
+
Twilio Media Streams uses the Stream ID as Sender ID because
|
|
142
|
+
it is required in OutputChannel.send_text_message to send messages."""
|
|
143
|
+
return call_parameters.stream_id # type: ignore[return-value]
|
|
144
|
+
|
|
138
145
|
def channel_bytes_to_rasa_audio_bytes(self, input_bytes: bytes) -> RasaAudioBytes:
|
|
139
146
|
return RasaAudioBytes(base64.b64decode(input_bytes))
|
|
140
147
|
|
|
@@ -288,6 +288,17 @@ class VoiceInputChannel(InputChannel):
|
|
|
288
288
|
self.monitor_silence = monitor_silence
|
|
289
289
|
self.tts_cache = TTSCache(tts_config.get("cache_size", 1000))
|
|
290
290
|
|
|
291
|
+
logger.info(
|
|
292
|
+
"voice_channel.initialized",
|
|
293
|
+
server_url=self.server_url,
|
|
294
|
+
asr_config=self.asr_config,
|
|
295
|
+
tts_config=self.tts_config,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
def get_sender_id(self, call_parameters: CallParameters) -> str:
|
|
299
|
+
"""Get the sender ID for the channel."""
|
|
300
|
+
return call_parameters.call_id
|
|
301
|
+
|
|
291
302
|
async def monitor_silence_timeout(self, asr_event_queue: asyncio.Queue) -> None:
|
|
292
303
|
timeout = call_state.silence_timeout
|
|
293
304
|
if not timeout:
|
|
@@ -334,9 +345,9 @@ class VoiceInputChannel(InputChannel):
|
|
|
334
345
|
) -> None:
|
|
335
346
|
output_channel = self.create_output_channel(channel_websocket, tts_engine)
|
|
336
347
|
message = UserMessage(
|
|
337
|
-
"/session_start",
|
|
338
|
-
output_channel,
|
|
339
|
-
call_parameters
|
|
348
|
+
text="/session_start",
|
|
349
|
+
output_channel=output_channel,
|
|
350
|
+
sender_id=self.get_sender_id(call_parameters),
|
|
340
351
|
input_channel=self.name(),
|
|
341
352
|
metadata=asdict(call_parameters),
|
|
342
353
|
)
|
|
@@ -393,6 +404,9 @@ class VoiceInputChannel(InputChannel):
|
|
|
393
404
|
await asr_engine.send_audio_chunks(channel_action.audio_bytes)
|
|
394
405
|
elif isinstance(channel_action, EndConversationAction):
|
|
395
406
|
# end stream event came from the other side
|
|
407
|
+
await self.handle_disconnect(
|
|
408
|
+
channel_websocket, on_new_message, tts_engine, call_parameters
|
|
409
|
+
)
|
|
396
410
|
break
|
|
397
411
|
|
|
398
412
|
async def receive_asr_events() -> None:
|
|
@@ -449,9 +463,9 @@ class VoiceInputChannel(InputChannel):
|
|
|
449
463
|
call_state.is_user_speaking = False # type: ignore[attr-defined]
|
|
450
464
|
output_channel = self.create_output_channel(voice_websocket, tts_engine)
|
|
451
465
|
message = UserMessage(
|
|
452
|
-
e.text,
|
|
453
|
-
output_channel,
|
|
454
|
-
call_parameters
|
|
466
|
+
text=e.text,
|
|
467
|
+
output_channel=output_channel,
|
|
468
|
+
sender_id=self.get_sender_id(call_parameters),
|
|
455
469
|
input_channel=self.name(),
|
|
456
470
|
metadata=asdict(call_parameters),
|
|
457
471
|
)
|
|
@@ -462,10 +476,27 @@ class VoiceInputChannel(InputChannel):
|
|
|
462
476
|
elif isinstance(e, UserSilence):
|
|
463
477
|
output_channel = self.create_output_channel(voice_websocket, tts_engine)
|
|
464
478
|
message = UserMessage(
|
|
465
|
-
"/silence_timeout",
|
|
466
|
-
output_channel,
|
|
467
|
-
call_parameters
|
|
479
|
+
text="/silence_timeout",
|
|
480
|
+
output_channel=output_channel,
|
|
481
|
+
sender_id=self.get_sender_id(call_parameters),
|
|
468
482
|
input_channel=self.name(),
|
|
469
483
|
metadata=asdict(call_parameters),
|
|
470
484
|
)
|
|
471
485
|
await on_new_message(message)
|
|
486
|
+
|
|
487
|
+
async def handle_disconnect(
|
|
488
|
+
self,
|
|
489
|
+
channel_websocket: Websocket,
|
|
490
|
+
on_new_message: Callable[[UserMessage], Awaitable[Any]],
|
|
491
|
+
tts_engine: TTSEngine,
|
|
492
|
+
call_parameters: CallParameters,
|
|
493
|
+
) -> None:
|
|
494
|
+
"""Handle disconnection from the channel."""
|
|
495
|
+
output_channel = self.create_output_channel(channel_websocket, tts_engine)
|
|
496
|
+
message = UserMessage(
|
|
497
|
+
text="/session_end",
|
|
498
|
+
output_channel=output_channel,
|
|
499
|
+
sender_id=self.get_sender_id(call_parameters),
|
|
500
|
+
input_channel=self.name(),
|
|
501
|
+
)
|
|
502
|
+
await on_new_message(message)
|
|
@@ -721,9 +721,7 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
|
|
|
721
721
|
final_response_examples.append(resp)
|
|
722
722
|
|
|
723
723
|
llm_response = await self.generate_answer(
|
|
724
|
-
final_response_examples,
|
|
725
|
-
conversation_samples,
|
|
726
|
-
history,
|
|
724
|
+
final_response_examples, conversation_samples, history
|
|
727
725
|
)
|
|
728
726
|
if not llm_response:
|
|
729
727
|
structlogger.debug("intentless_policy.prediction.skip_llm_fail")
|
rasa/core/processor.py
CHANGED
|
@@ -34,6 +34,9 @@ from rasa.dialogue_understanding.commands import (
|
|
|
34
34
|
CannotHandleCommand,
|
|
35
35
|
Command,
|
|
36
36
|
NoopCommand,
|
|
37
|
+
RestartCommand,
|
|
38
|
+
SessionEndCommand,
|
|
39
|
+
SessionStartCommand,
|
|
37
40
|
SetSlotCommand,
|
|
38
41
|
)
|
|
39
42
|
from rasa.dialogue_understanding.commands.utils import (
|
|
@@ -880,19 +883,61 @@ class MessageProcessor:
|
|
|
880
883
|
tracker.has_coexistence_routing_slot
|
|
881
884
|
and tracker.get_slot(ROUTE_TO_CALM_SLOT) is None
|
|
882
885
|
):
|
|
883
|
-
#
|
|
884
|
-
#
|
|
885
|
-
#
|
|
886
|
-
#
|
|
886
|
+
# If we are currently not routing to either CALM or DM1:
|
|
887
|
+
# - Sticky route to CALM if there are any commands
|
|
888
|
+
# from the trigger intent parsing
|
|
889
|
+
# - Sticky route to DM1 if there are no commands present
|
|
890
|
+
route_to_calm_slot_value = self._determine_route_to_calm_slot_value(
|
|
891
|
+
nlu_adapted_commands
|
|
892
|
+
)
|
|
887
893
|
commands += [
|
|
888
894
|
SetSlotCommand(
|
|
889
|
-
ROUTE_TO_CALM_SLOT,
|
|
895
|
+
ROUTE_TO_CALM_SLOT, route_to_calm_slot_value
|
|
890
896
|
).as_dict()
|
|
891
897
|
]
|
|
892
898
|
|
|
893
899
|
parse_data[COMMANDS] = commands
|
|
894
900
|
return parse_data
|
|
895
901
|
|
|
902
|
+
def _determine_route_to_calm_slot_value(
|
|
903
|
+
self, nlu_adapted_commands: List[Dict[str, Any]]
|
|
904
|
+
) -> Optional[bool]:
|
|
905
|
+
"""Determines what value should be assigned to `ROUTE_TO_CALM_SLOT`.
|
|
906
|
+
|
|
907
|
+
Returns:
|
|
908
|
+
- True: If any command other than:
|
|
909
|
+
- SessionStartCommand
|
|
910
|
+
- SessionEndCommand
|
|
911
|
+
- RestartCommand
|
|
912
|
+
is present.
|
|
913
|
+
- None: If only ignored system commands are present.
|
|
914
|
+
- False If no commands at all.
|
|
915
|
+
"""
|
|
916
|
+
system_commands_to_ignore = [
|
|
917
|
+
SessionStartCommand.command(),
|
|
918
|
+
SessionEndCommand.command(),
|
|
919
|
+
RestartCommand.command(),
|
|
920
|
+
]
|
|
921
|
+
|
|
922
|
+
# Exclude the system commands, as it doesn't originate from the user's
|
|
923
|
+
# input intent and shouldn't influence the decision for setting
|
|
924
|
+
# ROUTE_TO_CALM_SLOT.
|
|
925
|
+
intent_triggered_commands = [
|
|
926
|
+
command
|
|
927
|
+
for command in nlu_adapted_commands
|
|
928
|
+
if command.get("command") not in system_commands_to_ignore
|
|
929
|
+
]
|
|
930
|
+
|
|
931
|
+
if len(intent_triggered_commands) > 0:
|
|
932
|
+
# There are commands other than system commands present - route to CALM
|
|
933
|
+
return True
|
|
934
|
+
elif len(nlu_adapted_commands) > 0:
|
|
935
|
+
# Only system command is present — defer routing decision
|
|
936
|
+
return None
|
|
937
|
+
else:
|
|
938
|
+
# No commands at all — route to DM1
|
|
939
|
+
return False
|
|
940
|
+
|
|
896
941
|
def _update_full_retrieval_intent(self, parse_data: Dict[Text, Any]) -> None:
|
|
897
942
|
"""Update the parse data with the full retrieval intent.
|
|
898
943
|
|
rasa/core/utils.py
CHANGED
|
@@ -389,16 +389,25 @@ def should_force_slot_filling(
|
|
|
389
389
|
and the name of the slot if applicable.
|
|
390
390
|
"""
|
|
391
391
|
from rasa.dialogue_understanding.processor.command_processor import (
|
|
392
|
+
find_updated_flows,
|
|
392
393
|
get_current_collect_step,
|
|
393
394
|
)
|
|
394
395
|
|
|
395
396
|
if tracker is None:
|
|
396
|
-
structlogger.
|
|
397
|
-
"slot.force_slot_filling.
|
|
397
|
+
structlogger.debug(
|
|
398
|
+
"slot.force_slot_filling.no_found_tracker",
|
|
398
399
|
event_info="Tracker is None. Cannot force slot filling.",
|
|
399
400
|
)
|
|
400
401
|
return False, None
|
|
401
402
|
|
|
403
|
+
updated_flows = find_updated_flows(tracker, flows)
|
|
404
|
+
if updated_flows:
|
|
405
|
+
structlogger.debug(
|
|
406
|
+
"slot.force_slot_filling.running_flows_were_updated",
|
|
407
|
+
updated_flow_ids=updated_flows,
|
|
408
|
+
)
|
|
409
|
+
return False, None
|
|
410
|
+
|
|
402
411
|
stack = tracker.stack
|
|
403
412
|
step = get_current_collect_step(stack, flows)
|
|
404
413
|
if step is None or not step.force_slot_filling:
|
|
@@ -171,6 +171,7 @@ class LLMBasedRouter(LLMHealthCheckMixin, GraphComponent):
|
|
|
171
171
|
**kwargs: Any,
|
|
172
172
|
) -> "LLMBasedRouter":
|
|
173
173
|
"""Loads trained component (see parent class for full docstring)."""
|
|
174
|
+
|
|
174
175
|
# Perform health check on the resolved LLM client config
|
|
175
176
|
llm_config = resolve_model_client_config(config.get(LLM_CONFIG_KEY, {}))
|
|
176
177
|
cls.perform_llm_health_check(
|
|
@@ -16,6 +16,9 @@ from rasa.dialogue_understanding.commands.error_command import ErrorCommand
|
|
|
16
16
|
from rasa.dialogue_understanding.commands.free_form_answer_command import (
|
|
17
17
|
FreeFormAnswerCommand,
|
|
18
18
|
)
|
|
19
|
+
from rasa.dialogue_understanding.commands.handle_code_change_command import (
|
|
20
|
+
HandleCodeChangeCommand,
|
|
21
|
+
)
|
|
19
22
|
from rasa.dialogue_understanding.commands.human_handoff_command import (
|
|
20
23
|
HumanHandoffCommand,
|
|
21
24
|
)
|
|
@@ -49,6 +52,7 @@ __all__ = [
|
|
|
49
52
|
"SetSlotCommand",
|
|
50
53
|
"StartFlowCommand",
|
|
51
54
|
"HumanHandoffCommand",
|
|
55
|
+
"HandleCodeChangeCommand",
|
|
52
56
|
"CorrectSlotsCommand",
|
|
53
57
|
"CorrectedSlot",
|
|
54
58
|
"ErrorCommand",
|
|
@@ -95,7 +95,9 @@ class CancelFlowCommand(Command):
|
|
|
95
95
|
original_stack = original_tracker.stack
|
|
96
96
|
|
|
97
97
|
applied_events: List[Event] = []
|
|
98
|
-
user_frame = top_user_flow_frame(
|
|
98
|
+
user_frame = top_user_flow_frame(
|
|
99
|
+
original_stack, ignore_call_and_link_frames=False
|
|
100
|
+
)
|
|
99
101
|
current_flow = user_frame.flow(all_flows) if user_frame else None
|
|
100
102
|
|
|
101
103
|
if not current_flow:
|
|
@@ -13,6 +13,7 @@ from rasa.dialogue_understanding.commands.command_syntax_manager import (
|
|
|
13
13
|
)
|
|
14
14
|
from rasa.dialogue_understanding.commands.utils import (
|
|
15
15
|
clean_extracted_value,
|
|
16
|
+
find_default_flows_collecting_slot,
|
|
16
17
|
get_nullable_slot_value,
|
|
17
18
|
)
|
|
18
19
|
from rasa.dialogue_understanding.patterns.collect_information import (
|
|
@@ -136,6 +137,11 @@ class SetSlotCommand(Command):
|
|
|
136
137
|
):
|
|
137
138
|
# Get the other predicted flows from the most recent message on the tracker.
|
|
138
139
|
predicted_flows = get_flows_predicted_to_start_from_tracker(tracker)
|
|
140
|
+
if not predicted_flows:
|
|
141
|
+
# If no predicted flows, check for default flows collecting the slot.
|
|
142
|
+
predicted_flows = find_default_flows_collecting_slot(
|
|
143
|
+
self.name, all_flows
|
|
144
|
+
)
|
|
139
145
|
use_slot_fill = any(
|
|
140
146
|
step.collect == self.name and not step.ask_before_filling
|
|
141
147
|
for flow in all_flows.underlying_flows
|
|
@@ -7,18 +7,18 @@ from rasa.dialogue_understanding.patterns.validate_slot import (
|
|
|
7
7
|
)
|
|
8
8
|
from rasa.shared.constants import ACTION_ASK_PREFIX, UTTER_ASK_PREFIX
|
|
9
9
|
from rasa.shared.core.events import Event, SlotSet
|
|
10
|
+
from rasa.shared.core.flows import FlowsList
|
|
10
11
|
from rasa.shared.core.slots import Slot
|
|
11
12
|
from rasa.shared.core.trackers import DialogueStateTracker
|
|
12
13
|
|
|
13
14
|
if TYPE_CHECKING:
|
|
14
15
|
from rasa.dialogue_understanding.commands import StartFlowCommand
|
|
15
|
-
from rasa.shared.core.flows import FlowsList
|
|
16
16
|
|
|
17
17
|
structlogger = structlog.get_logger()
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def start_flow_by_name(
|
|
21
|
-
flow_name: str, flows:
|
|
21
|
+
flow_name: str, flows: FlowsList
|
|
22
22
|
) -> Optional["StartFlowCommand"]:
|
|
23
23
|
from rasa.dialogue_understanding.commands import StartFlowCommand
|
|
24
24
|
|
|
@@ -126,3 +126,27 @@ def create_validate_frames_from_slot_set_events(
|
|
|
126
126
|
validate_frames.append(frame)
|
|
127
127
|
|
|
128
128
|
return tracker, validate_frames
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def find_default_flows_collecting_slot(
|
|
132
|
+
slot_name: str, all_flows: FlowsList
|
|
133
|
+
) -> List[str]:
|
|
134
|
+
"""Find default flows that have collect steps matching the specified slot name.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
slot_name: The name of the slot to search for.
|
|
138
|
+
all_flows: All flows in the assistant.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
List of flow IDs for default flows that collect the specified slot
|
|
142
|
+
without asking before filling.
|
|
143
|
+
"""
|
|
144
|
+
return [
|
|
145
|
+
flow.id
|
|
146
|
+
for flow in all_flows.underlying_flows
|
|
147
|
+
if flow.is_rasa_default_flow
|
|
148
|
+
and any(
|
|
149
|
+
step.collect == slot_name and not step.ask_before_filling
|
|
150
|
+
for step in flow.get_collect_steps()
|
|
151
|
+
)
|
|
152
|
+
]
|
|
@@ -8,6 +8,7 @@ from rasa.dialogue_understanding.commands import (
|
|
|
8
8
|
Command,
|
|
9
9
|
CorrectSlotsCommand,
|
|
10
10
|
ErrorCommand,
|
|
11
|
+
HandleCodeChangeCommand,
|
|
11
12
|
SetSlotCommand,
|
|
12
13
|
StartFlowCommand,
|
|
13
14
|
)
|
|
@@ -396,15 +397,24 @@ class CommandGenerator:
|
|
|
396
397
|
The filtered commands.
|
|
397
398
|
"""
|
|
398
399
|
from rasa.dialogue_understanding.processor.command_processor import (
|
|
400
|
+
find_updated_flows,
|
|
399
401
|
get_current_collect_step,
|
|
400
402
|
)
|
|
401
403
|
|
|
402
404
|
if tracker is None:
|
|
403
|
-
structlogger.
|
|
405
|
+
structlogger.debug(
|
|
404
406
|
"command_generator.filter_commands_during_force_slot_filling.tracker_not_found",
|
|
405
407
|
)
|
|
406
408
|
return commands
|
|
407
409
|
|
|
410
|
+
updated_flows = find_updated_flows(tracker, available_flows)
|
|
411
|
+
if updated_flows:
|
|
412
|
+
structlogger.debug(
|
|
413
|
+
"command_generator.filter_commands_during_force_slot_filling.running_flows_were_updated",
|
|
414
|
+
updated_flow_ids=updated_flows,
|
|
415
|
+
)
|
|
416
|
+
return [HandleCodeChangeCommand()]
|
|
417
|
+
|
|
408
418
|
stack = tracker.stack
|
|
409
419
|
step = get_current_collect_step(stack, available_flows)
|
|
410
420
|
|
|
@@ -1,8 +1,6 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
|
-
from asyncio import Lock
|
|
3
2
|
from functools import lru_cache
|
|
4
3
|
from typing import Any, Dict, List, Optional, Set, Text, Tuple, Union
|
|
5
|
-
from uuid import UUID, uuid4
|
|
6
4
|
|
|
7
5
|
import structlog
|
|
8
6
|
from jinja2 import Environment, Template, select_autoescape
|
|
@@ -91,9 +89,6 @@ class LLMBasedCommandGenerator(
|
|
|
91
89
|
else:
|
|
92
90
|
self.flow_retrieval = None
|
|
93
91
|
|
|
94
|
-
self.sender_id_to_session_id_mapping: Dict[str, UUID] = {}
|
|
95
|
-
self._lock = Lock()
|
|
96
|
-
|
|
97
92
|
### Abstract methods
|
|
98
93
|
@staticmethod
|
|
99
94
|
@abstractmethod
|
|
@@ -230,7 +225,8 @@ class LLMBasedCommandGenerator(
|
|
|
230
225
|
|
|
231
226
|
@lru_cache
|
|
232
227
|
def compile_template(self, template: str) -> Template:
|
|
233
|
-
"""
|
|
228
|
+
"""
|
|
229
|
+
Compile the prompt template and register custom filters.
|
|
234
230
|
Compiling the template is an expensive operation,
|
|
235
231
|
so we cache the result.
|
|
236
232
|
"""
|
|
@@ -332,9 +328,7 @@ class LLMBasedCommandGenerator(
|
|
|
332
328
|
|
|
333
329
|
@measure_llm_latency
|
|
334
330
|
async def invoke_llm(
|
|
335
|
-
self,
|
|
336
|
-
prompt: Union[List[dict], List[str], str],
|
|
337
|
-
metadata: Optional[Dict[str, Any]] = None,
|
|
331
|
+
self, prompt: Union[List[dict], List[str], str]
|
|
338
332
|
) -> Optional[LLMResponse]:
|
|
339
333
|
"""Use LLM to generate a response.
|
|
340
334
|
|
|
@@ -347,7 +341,6 @@ class LLMBasedCommandGenerator(
|
|
|
347
341
|
- a list of messages. Each message is a string and will be formatted
|
|
348
342
|
as a user message.
|
|
349
343
|
- a single message as a string which will be formatted as user message.
|
|
350
|
-
metadata: Optional metadata to be passed to the LLM call.
|
|
351
344
|
|
|
352
345
|
Returns:
|
|
353
346
|
An LLMResponse object.
|
|
@@ -359,7 +352,7 @@ class LLMBasedCommandGenerator(
|
|
|
359
352
|
self.config.get(LLM_CONFIG_KEY), self.get_default_llm_config()
|
|
360
353
|
)
|
|
361
354
|
try:
|
|
362
|
-
return await llm.acompletion(prompt
|
|
355
|
+
return await llm.acompletion(prompt)
|
|
363
356
|
except Exception as e:
|
|
364
357
|
# unfortunately, langchain does not wrap LLM exceptions which means
|
|
365
358
|
# we have to catch all exceptions here
|
|
@@ -662,7 +655,3 @@ class LLMBasedCommandGenerator(
|
|
|
662
655
|
def get_default_llm_config() -> Dict[str, Any]:
|
|
663
656
|
"""Get the default LLM config for the command generator."""
|
|
664
657
|
return DEFAULT_LLM_CONFIG
|
|
665
|
-
|
|
666
|
-
async def _get_or_create_session_id(self, sender_id: str) -> UUID:
|
|
667
|
-
async with self._lock:
|
|
668
|
-
return self.sender_id_to_session_id_mapping.setdefault(sender_id, uuid4())
|
|
@@ -55,9 +55,7 @@ class LLMCommandGenerator(SingleStepLLMCommandGenerator):
|
|
|
55
55
|
)
|
|
56
56
|
|
|
57
57
|
async def invoke_llm(
|
|
58
|
-
self,
|
|
59
|
-
prompt: Union[List[dict], List[str], str],
|
|
60
|
-
metadata: Optional[Dict[str, Any]] = None,
|
|
58
|
+
self, prompt: Union[List[dict], List[str], str]
|
|
61
59
|
) -> Optional[LLMResponse]:
|
|
62
60
|
try:
|
|
63
61
|
return await super().invoke_llm(prompt)
|
|
@@ -42,9 +42,6 @@ from rasa.engine.storage.resource import Resource
|
|
|
42
42
|
from rasa.engine.storage.storage import ModelStorage
|
|
43
43
|
from rasa.shared.constants import (
|
|
44
44
|
EMBEDDINGS_CONFIG_KEY,
|
|
45
|
-
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
46
|
-
LANGFUSE_METADATA_SESSION_ID,
|
|
47
|
-
LANGFUSE_TAGS,
|
|
48
45
|
RASA_PATTERN_CANNOT_HANDLE_NOT_SUPPORTED,
|
|
49
46
|
ROUTE_TO_CALM_SLOT,
|
|
50
47
|
)
|
|
@@ -110,7 +107,7 @@ structlogger = structlog.get_logger()
|
|
|
110
107
|
)
|
|
111
108
|
@deprecated(
|
|
112
109
|
reason=(
|
|
113
|
-
"The MultiStepLLMCommandGenerator is
|
|
110
|
+
"The MultiStepLLMCommandGenerator is deprecated and will be removed in "
|
|
114
111
|
"Rasa `4.0.0`."
|
|
115
112
|
)
|
|
116
113
|
)
|
|
@@ -495,20 +492,7 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
495
492
|
prompt=prompt,
|
|
496
493
|
)
|
|
497
494
|
|
|
498
|
-
|
|
499
|
-
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
500
|
-
else:
|
|
501
|
-
session_id = "unknown"
|
|
502
|
-
metadata = {
|
|
503
|
-
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
504
|
-
LANGFUSE_CUSTOM_METADATA_DICT: {
|
|
505
|
-
"component": self.__class__.__name__,
|
|
506
|
-
"function": "_predict_commands_for_active_flow",
|
|
507
|
-
},
|
|
508
|
-
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
509
|
-
}
|
|
510
|
-
|
|
511
|
-
response = await self.invoke_llm(prompt, metadata)
|
|
495
|
+
response = await self.invoke_llm(prompt)
|
|
512
496
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
513
497
|
actions = None
|
|
514
498
|
if llm_response and llm_response.choices:
|
|
@@ -562,20 +546,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
562
546
|
".prompt_rendered",
|
|
563
547
|
prompt=prompt,
|
|
564
548
|
)
|
|
565
|
-
if tracker:
|
|
566
|
-
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
567
|
-
else:
|
|
568
|
-
session_id = "unknown"
|
|
569
|
-
metadata = {
|
|
570
|
-
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
571
|
-
LANGFUSE_CUSTOM_METADATA_DICT: {
|
|
572
|
-
"component": self.__class__.__name__,
|
|
573
|
-
"function": "_predict_commands_for_handling_flows",
|
|
574
|
-
},
|
|
575
|
-
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
576
|
-
}
|
|
577
549
|
|
|
578
|
-
response = await self.invoke_llm(prompt
|
|
550
|
+
response = await self.invoke_llm(prompt)
|
|
579
551
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
580
552
|
actions = None
|
|
581
553
|
if llm_response and llm_response.choices:
|
|
@@ -664,20 +636,8 @@ class MultiStepLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
664
636
|
flow=newly_started_flow.id,
|
|
665
637
|
prompt=prompt,
|
|
666
638
|
)
|
|
667
|
-
if tracker:
|
|
668
|
-
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
669
|
-
else:
|
|
670
|
-
session_id = "unknown"
|
|
671
|
-
metadata = {
|
|
672
|
-
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
673
|
-
LANGFUSE_CUSTOM_METADATA_DICT: {
|
|
674
|
-
"component": self.__class__.__name__,
|
|
675
|
-
"function": "_predict_commands_for_newly_started_flow",
|
|
676
|
-
},
|
|
677
|
-
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
678
|
-
}
|
|
679
639
|
|
|
680
|
-
response = await self.invoke_llm(prompt
|
|
640
|
+
response = await self.invoke_llm(prompt)
|
|
681
641
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
682
642
|
actions = None
|
|
683
643
|
if llm_response and llm_response.choices:
|
|
@@ -47,9 +47,6 @@ from rasa.shared.constants import (
|
|
|
47
47
|
AWS_BEDROCK_PROVIDER,
|
|
48
48
|
AZURE_OPENAI_PROVIDER,
|
|
49
49
|
EMBEDDINGS_CONFIG_KEY,
|
|
50
|
-
LANGFUSE_CUSTOM_METADATA_DICT,
|
|
51
|
-
LANGFUSE_METADATA_SESSION_ID,
|
|
52
|
-
LANGFUSE_TAGS,
|
|
53
50
|
MAX_COMPLETION_TOKENS_CONFIG_KEY,
|
|
54
51
|
PROMPT_TEMPLATE_CONFIG_KEY,
|
|
55
52
|
ROUTE_TO_CALM_SLOT,
|
|
@@ -369,17 +366,7 @@ class CompactLLMCommandGenerator(LLMBasedCommandGenerator):
|
|
|
369
366
|
prompt=flow_prompt,
|
|
370
367
|
)
|
|
371
368
|
|
|
372
|
-
|
|
373
|
-
session_id = str(await self._get_or_create_session_id(tracker.sender_id))
|
|
374
|
-
else:
|
|
375
|
-
session_id = "unknown"
|
|
376
|
-
metadata = {
|
|
377
|
-
LANGFUSE_METADATA_SESSION_ID: session_id,
|
|
378
|
-
LANGFUSE_CUSTOM_METADATA_DICT: {"component": self.__class__.__name__},
|
|
379
|
-
LANGFUSE_TAGS: [self.__class__.__name__],
|
|
380
|
-
}
|
|
381
|
-
|
|
382
|
-
response = await self.invoke_llm(flow_prompt, metadata)
|
|
369
|
+
response = await self.invoke_llm(flow_prompt)
|
|
383
370
|
llm_response = LLMResponse.ensure_llm_response(response)
|
|
384
371
|
# The check for 'None' maintains compatibility with older versions
|
|
385
372
|
# of LLMCommandGenerator. In previous implementations, 'invoke_llm'
|
|
@@ -214,18 +214,18 @@ def execute_commands(
|
|
|
214
214
|
commands: List[Command] = get_commands_from_tracker(tracker)
|
|
215
215
|
original_tracker = tracker.copy()
|
|
216
216
|
|
|
217
|
-
commands = clean_up_commands(
|
|
218
|
-
commands, tracker, all_flows, execution_context, story_graph, domain
|
|
219
|
-
)
|
|
220
|
-
|
|
221
217
|
updated_flows = find_updated_flows(tracker, all_flows)
|
|
222
218
|
if updated_flows:
|
|
223
|
-
#
|
|
219
|
+
# if there are updated flows, we need to handle the code change
|
|
224
220
|
structlogger.debug(
|
|
225
221
|
"command_processor.execute_commands.running_flows_were_updated",
|
|
226
222
|
updated_flow_ids=updated_flows,
|
|
227
223
|
)
|
|
228
224
|
commands = [HandleCodeChangeCommand()]
|
|
225
|
+
else:
|
|
226
|
+
commands = clean_up_commands(
|
|
227
|
+
commands, tracker, all_flows, execution_context, story_graph, domain
|
|
228
|
+
)
|
|
229
229
|
|
|
230
230
|
# store current flow hashes if they changed
|
|
231
231
|
new_hashes = calculate_flow_fingerprints(all_flows)
|