rasa-pro 3.12.6.dev1__py3-none-any.whl → 3.12.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/cli/run.py +10 -6
- rasa/cli/utils.py +7 -0
- rasa/core/actions/action.py +0 -6
- rasa/core/channels/channel.py +93 -0
- rasa/core/channels/inspector/dist/assets/{arc-c7691751.js → arc-351bec79.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{blockDiagram-38ab4fdb-ab99dff7.js → blockDiagram-38ab4fdb-2567f3e5.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{c4Diagram-3d4e48cf-08c35a6b.js → c4Diagram-3d4e48cf-c94acad0.js} +1 -1
- rasa/core/channels/inspector/dist/assets/channel-96a753ef.js +1 -0
- rasa/core/channels/inspector/dist/assets/{classDiagram-70f12bd4-9e9c71c9.js → classDiagram-70f12bd4-a2c4f658.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{classDiagram-v2-f2320105-15e7e2bf.js → classDiagram-v2-f2320105-4036ee82.js} +1 -1
- rasa/core/channels/inspector/dist/assets/clone-5bbb0c7d.js +1 -0
- rasa/core/channels/inspector/dist/assets/{createText-2e5e7dd3-9c105cb1.js → createText-2e5e7dd3-01f8708a.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{edges-e0da2a9e-77e89e48.js → edges-e0da2a9e-17b4c582.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{erDiagram-9861fffd-7a011646.js → erDiagram-9861fffd-5b382730.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{flowDb-956e92f1-b6f105ac.js → flowDb-956e92f1-c9dd4758.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{flowDiagram-66a62f08-ce4f18c2.js → flowDiagram-66a62f08-014c7159.js} +1 -1
- rasa/core/channels/inspector/dist/assets/flowDiagram-v2-96b9c2cf-72082386.js +1 -0
- rasa/core/channels/inspector/dist/assets/{flowchart-elk-definition-4a651766-cb5f6da4.js → flowchart-elk-definition-4a651766-bc13fd64.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{ganttDiagram-c361ad54-e4d19e28.js → ganttDiagram-c361ad54-a3bc832f.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{gitGraphDiagram-72cf32ee-727b1c33.js → gitGraphDiagram-72cf32ee-4f0983dd.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{graph-6e2ab9a7.js → graph-0069f93e.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{index-3862675e-84ec700f.js → index-3862675e-7ddaa093.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{index-098a1a24.js → index-d77a19b4.js} +129 -116
- rasa/core/channels/inspector/dist/assets/{infoDiagram-f8f76790-78dda442.js → infoDiagram-f8f76790-c3e28742.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{journeyDiagram-49397b02-f1cc6dd1.js → journeyDiagram-49397b02-6d36c64c.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{layout-d98dcd0c.js → layout-3d27f9c0.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{line-838e3d82.js → line-e8cb25c5.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{linear-eae72406.js → linear-4321e9fa.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{mindmap-definition-fc14e90a-c96fd84b.js → mindmap-definition-fc14e90a-47e328b2.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{pieDiagram-8a3498a8-c936d4e2.js → pieDiagram-8a3498a8-647edbaf.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{quadrantDiagram-120e2f19-b338eb8f.js → quadrantDiagram-120e2f19-0703ad7d.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{requirementDiagram-deff3bca-c6b6c0d5.js → requirementDiagram-deff3bca-59312f87.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{sankeyDiagram-04a897e0-b9372e19.js → sankeyDiagram-04a897e0-8e170e1c.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{sequenceDiagram-704730f1-479e0a3f.js → sequenceDiagram-704730f1-b2b42696.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{stateDiagram-587899a1-fd26eebc.js → stateDiagram-587899a1-239f7e55.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{stateDiagram-v2-d93cdb3a-3233e0ae.js → stateDiagram-v2-d93cdb3a-9cb9c726.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{styles-6aaf32cf-1fdd392b.js → styles-6aaf32cf-d15a0f74.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{styles-9a916d00-6d7bfa1b.js → styles-9a916d00-d7c52634.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{styles-c10674c1-f86aab11.js → styles-c10674c1-cf79ea88.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{svgDrawCommon-08f97a94-e3e49d7a.js → svgDrawCommon-08f97a94-52b3b9f9.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{timeline-definition-85554ec2-6fe08b4d.js → timeline-definition-85554ec2-c8e3cd8c.js} +1 -1
- rasa/core/channels/inspector/dist/assets/{xychartDiagram-e933f94c-c2e06fd6.js → xychartDiagram-e933f94c-300afa53.js} +1 -1
- rasa/core/channels/inspector/dist/index.html +1 -1
- rasa/core/channels/inspector/src/components/Chat.tsx +23 -2
- rasa/core/channels/inspector/src/components/DiagramFlow.tsx +2 -5
- rasa/core/channels/inspector/src/helpers/conversation.ts +16 -0
- rasa/core/channels/inspector/src/types.ts +1 -1
- rasa/core/channels/voice_ready/audiocodes.py +53 -21
- rasa/core/channels/voice_ready/jambonz.py +25 -5
- rasa/core/channels/voice_ready/jambonz_protocol.py +4 -0
- rasa/core/channels/voice_ready/twilio_voice.py +48 -1
- rasa/core/channels/voice_stream/tts/azure.py +11 -2
- rasa/core/channels/voice_stream/twilio_media_streams.py +101 -26
- rasa/core/policies/flows/flow_executor.py +3 -38
- rasa/core/processor.py +19 -5
- rasa/core/utils.py +53 -0
- rasa/dialogue_understanding/commands/cancel_flow_command.py +4 -59
- rasa/dialogue_understanding/commands/start_flow_command.py +0 -41
- rasa/dialogue_understanding/generator/command_generator.py +67 -0
- rasa/dialogue_understanding/generator/llm_based_command_generator.py +2 -12
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +0 -61
- rasa/dialogue_understanding/processor/command_processor.py +7 -65
- rasa/dialogue_understanding/stack/utils.py +0 -38
- rasa/e2e_test/utils/validation.py +3 -3
- rasa/shared/core/constants.py +0 -8
- rasa/shared/core/flows/flow.py +0 -17
- rasa/shared/core/flows/flows_yaml_schema.json +3 -38
- rasa/shared/core/flows/steps/collect.py +5 -18
- rasa/shared/core/flows/utils.py +1 -16
- rasa/shared/core/slot_mappings.py +11 -5
- rasa/shared/nlu/constants.py +0 -1
- rasa/shared/utils/common.py +11 -1
- rasa/validator.py +1 -123
- rasa/version.py +1 -2
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.7.dist-info}/METADATA +3 -1
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.7.dist-info}/RECORD +79 -81
- rasa/core/actions/action_handle_digressions.py +0 -164
- rasa/core/channels/inspector/dist/assets/channel-11268142.js +0 -1
- rasa/core/channels/inspector/dist/assets/clone-ff7f2ce7.js +0 -1
- rasa/core/channels/inspector/dist/assets/flowDiagram-v2-96b9c2cf-cba7ae20.js +0 -1
- rasa/dialogue_understanding/commands/handle_digressions_command.py +0 -144
- rasa/dialogue_understanding/patterns/handle_digressions.py +0 -81
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.7.dist-info}/NOTICE +0 -0
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.7.dist-info}/WHEEL +0 -0
- {rasa_pro-3.12.6.dev1.dist-info → rasa_pro-3.12.7.dist-info}/entry_points.txt +0 -0
|
@@ -2,6 +2,7 @@ import { Box, Button, Flex, Heading, Text } from "@chakra-ui/react";
|
|
|
2
2
|
import mermaid from "mermaid";
|
|
3
3
|
import { useOurTheme } from "../theme";
|
|
4
4
|
import { formatFlow } from "../helpers/formatters";
|
|
5
|
+
import { restartConversation } from "../helpers/conversation";
|
|
5
6
|
import { useEffect, useRef, useState } from "react";
|
|
6
7
|
import { Flow, Slot, Stack } from "../types";
|
|
7
8
|
import { NoActiveFlow } from "./NoActiveFlow";
|
|
@@ -51,11 +52,7 @@ export const DiagramFlow = ({ stackFrame, stepTrail, flows, slots }: Props) => {
|
|
|
51
52
|
}, [text, flow, slots, stackFrame]);
|
|
52
53
|
|
|
53
54
|
const handleRestartConversation = () => {
|
|
54
|
-
|
|
55
|
-
const url = new URL(window.location.href);
|
|
56
|
-
url.searchParams.delete("sender");
|
|
57
|
-
window.history.pushState(null, "", url.toString());
|
|
58
|
-
location.reload();
|
|
55
|
+
restartConversation();
|
|
59
56
|
};
|
|
60
57
|
|
|
61
58
|
const scrollSx = {
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
export const restartConversation = () => {
|
|
2
|
+
// unset the sender id from the query parameters
|
|
3
|
+
const url = new URL(window.location.href);
|
|
4
|
+
url.searchParams.delete("sender");
|
|
5
|
+
window.history.pushState(null, "", url.toString());
|
|
6
|
+
location.reload();
|
|
7
|
+
};
|
|
8
|
+
|
|
9
|
+
// Make the function available on the window object
|
|
10
|
+
declare global {
|
|
11
|
+
interface Window {
|
|
12
|
+
restartConversation: typeof restartConversation;
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
window.restartConversation = restartConversation;
|
|
@@ -5,7 +5,7 @@ export interface Slot {
|
|
|
5
5
|
}
|
|
6
6
|
|
|
7
7
|
export interface Event {
|
|
8
|
-
event: "user" | "bot" | "flow_completed" | "flow_started" | "stack" | "restart";
|
|
8
|
+
event: "user" | "bot" | "flow_completed" | "flow_started" | "stack" | "restart" | "session_ended";
|
|
9
9
|
text?: string;
|
|
10
10
|
timestamp: string;
|
|
11
11
|
update?: string;
|
|
@@ -96,10 +96,13 @@ class Conversation:
|
|
|
96
96
|
event_params = {"value": event["value"]}
|
|
97
97
|
text += json.dumps(event_params)
|
|
98
98
|
else:
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
99
|
+
# handle other events described by Audiocodes
|
|
100
|
+
# https://techdocs.audiocodes.com/voice-ai-connect/#VAIG_Combined/inactivity-detection.htm?TocPath=Bot%2520integration%257CReceiving%2520notifications%257C_____3
|
|
101
|
+
text = f"{INTENT_MESSAGE_PREFIX}vaig_event_{event['name']}"
|
|
102
|
+
event_params = {**event.get("parameters", {})}
|
|
103
|
+
if "value" in event:
|
|
104
|
+
event_params["value"] = event["value"]
|
|
105
|
+
text += json.dumps(event_params)
|
|
103
106
|
|
|
104
107
|
return text
|
|
105
108
|
|
|
@@ -115,11 +118,21 @@ class Conversation:
|
|
|
115
118
|
async def handle_activities(
|
|
116
119
|
self,
|
|
117
120
|
message: Dict[Text, Any],
|
|
121
|
+
input_channel_name: str,
|
|
118
122
|
output_channel: OutputChannel,
|
|
119
123
|
on_new_message: Callable[[UserMessage], Awaitable[Any]],
|
|
120
124
|
) -> None:
|
|
121
125
|
"""Handle activities sent by Audiocodes."""
|
|
122
126
|
structlogger.debug("audiocodes.handle.activities")
|
|
127
|
+
if input_channel_name == "":
|
|
128
|
+
structlogger.warning(
|
|
129
|
+
"audiocodes.handle.activities.empty_input_channel_name",
|
|
130
|
+
event_info=(
|
|
131
|
+
"Audiocodes input channel name is empty "
|
|
132
|
+
f"for conversation {self.conversation_id}"
|
|
133
|
+
),
|
|
134
|
+
)
|
|
135
|
+
|
|
123
136
|
for activity in message["activities"]:
|
|
124
137
|
text = None
|
|
125
138
|
if activity[ACTIVITY_ID_KEY] in self.activity_ids:
|
|
@@ -143,6 +156,7 @@ class Conversation:
|
|
|
143
156
|
metadata = self.get_metadata(activity)
|
|
144
157
|
user_msg = UserMessage(
|
|
145
158
|
text=text,
|
|
159
|
+
input_channel=input_channel_name,
|
|
146
160
|
output_channel=output_channel,
|
|
147
161
|
sender_id=self.conversation_id,
|
|
148
162
|
metadata=metadata,
|
|
@@ -394,7 +408,12 @@ class AudiocodesInput(InputChannel):
|
|
|
394
408
|
# start a background task to handle activities
|
|
395
409
|
self._create_task(
|
|
396
410
|
conversation_id,
|
|
397
|
-
conversation.handle_activities(
|
|
411
|
+
conversation.handle_activities(
|
|
412
|
+
request.json,
|
|
413
|
+
input_channel_name=self.name(),
|
|
414
|
+
output_channel=ac_output,
|
|
415
|
+
on_new_message=on_new_message,
|
|
416
|
+
),
|
|
398
417
|
)
|
|
399
418
|
return response.json(response_json)
|
|
400
419
|
|
|
@@ -407,23 +426,9 @@ class AudiocodesInput(InputChannel):
|
|
|
407
426
|
Example of payload:
|
|
408
427
|
{"conversation": <conversation_id>, "reason": Optional[Text]}.
|
|
409
428
|
"""
|
|
410
|
-
self.
|
|
411
|
-
|
|
412
|
-
await on_new_message(
|
|
413
|
-
UserMessage(
|
|
414
|
-
text=f"{INTENT_MESSAGE_PREFIX}session_end",
|
|
415
|
-
output_channel=None,
|
|
416
|
-
sender_id=conversation_id,
|
|
417
|
-
metadata=reason,
|
|
418
|
-
)
|
|
429
|
+
return await self._handle_disconnect(
|
|
430
|
+
request, conversation_id, on_new_message
|
|
419
431
|
)
|
|
420
|
-
del self.conversations[conversation_id]
|
|
421
|
-
structlogger.debug(
|
|
422
|
-
"audiocodes.disconnect",
|
|
423
|
-
conversation=conversation_id,
|
|
424
|
-
request=request.json,
|
|
425
|
-
)
|
|
426
|
-
return response.json({})
|
|
427
432
|
|
|
428
433
|
@ac_webhook.route("/conversation/<conversation_id>/keepalive", methods=["POST"])
|
|
429
434
|
async def keepalive(request: Request, conversation_id: Text) -> HTTPResponse:
|
|
@@ -438,6 +443,32 @@ class AudiocodesInput(InputChannel):
|
|
|
438
443
|
|
|
439
444
|
return ac_webhook
|
|
440
445
|
|
|
446
|
+
async def _handle_disconnect(
|
|
447
|
+
self,
|
|
448
|
+
request: Request,
|
|
449
|
+
conversation_id: Text,
|
|
450
|
+
on_new_message: Callable[[UserMessage], Awaitable[Any]],
|
|
451
|
+
) -> HTTPResponse:
|
|
452
|
+
"""Triggered when the call is disconnected."""
|
|
453
|
+
self._get_conversation(request.token, conversation_id)
|
|
454
|
+
reason = {"reason": request.json.get("reason")}
|
|
455
|
+
await on_new_message(
|
|
456
|
+
UserMessage(
|
|
457
|
+
text=f"{INTENT_MESSAGE_PREFIX}session_end",
|
|
458
|
+
input_channel=self.name(),
|
|
459
|
+
output_channel=None,
|
|
460
|
+
sender_id=conversation_id,
|
|
461
|
+
metadata=reason,
|
|
462
|
+
)
|
|
463
|
+
)
|
|
464
|
+
del self.conversations[conversation_id]
|
|
465
|
+
structlogger.debug(
|
|
466
|
+
"audiocodes.disconnect",
|
|
467
|
+
conversation=conversation_id,
|
|
468
|
+
request=request.json,
|
|
469
|
+
)
|
|
470
|
+
return response.json({})
|
|
471
|
+
|
|
441
472
|
|
|
442
473
|
class AudiocodesOutput(OutputChannel):
|
|
443
474
|
@classmethod
|
|
@@ -445,6 +476,7 @@ class AudiocodesOutput(OutputChannel):
|
|
|
445
476
|
return CHANNEL_NAME
|
|
446
477
|
|
|
447
478
|
def __init__(self) -> None:
|
|
479
|
+
super().__init__()
|
|
448
480
|
self.messages: List[Dict] = []
|
|
449
481
|
|
|
450
482
|
async def add_message(self, message: Dict) -> None:
|
|
@@ -5,8 +5,14 @@ from sanic import Blueprint, Websocket, response # type: ignore[attr-defined]
|
|
|
5
5
|
from sanic.request import Request
|
|
6
6
|
from sanic.response import HTTPResponse
|
|
7
7
|
|
|
8
|
-
from rasa.core.channels.channel import
|
|
8
|
+
from rasa.core.channels.channel import (
|
|
9
|
+
InputChannel,
|
|
10
|
+
OutputChannel,
|
|
11
|
+
UserMessage,
|
|
12
|
+
requires_basic_auth,
|
|
13
|
+
)
|
|
9
14
|
from rasa.core.channels.voice_ready.jambonz_protocol import (
|
|
15
|
+
CHANNEL_NAME,
|
|
10
16
|
send_ws_hangup_message,
|
|
11
17
|
send_ws_text_message,
|
|
12
18
|
websocket_message_handler,
|
|
@@ -18,8 +24,6 @@ from rasa.utils.io import remove_emojis
|
|
|
18
24
|
|
|
19
25
|
structlogger = structlog.get_logger()
|
|
20
26
|
|
|
21
|
-
CHANNEL_NAME = "jambonz"
|
|
22
|
-
|
|
23
27
|
DEFAULT_HANGUP_DELAY_SECONDS = 1
|
|
24
28
|
|
|
25
29
|
|
|
@@ -32,12 +36,27 @@ class JambonzVoiceReadyInput(InputChannel):
|
|
|
32
36
|
|
|
33
37
|
@classmethod
|
|
34
38
|
def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel:
|
|
35
|
-
|
|
39
|
+
if not credentials:
|
|
40
|
+
return cls()
|
|
41
|
+
|
|
42
|
+
username = credentials.get("username")
|
|
43
|
+
password = credentials.get("password")
|
|
44
|
+
if (username is None) != (password is None):
|
|
45
|
+
raise RasaException(
|
|
46
|
+
"In Jambonz channel, either both username and password "
|
|
47
|
+
"or neither should be provided. "
|
|
48
|
+
)
|
|
36
49
|
|
|
37
|
-
|
|
50
|
+
return cls(username, password)
|
|
51
|
+
|
|
52
|
+
def __init__(
|
|
53
|
+
self, username: Optional[Text] = None, password: Optional[Text] = None
|
|
54
|
+
) -> None:
|
|
38
55
|
"""Initializes the JambonzVoiceReadyInput channel."""
|
|
39
56
|
mark_as_beta_feature("Jambonz Channel")
|
|
40
57
|
validate_voice_license_scope()
|
|
58
|
+
self.username = username
|
|
59
|
+
self.password = password
|
|
41
60
|
|
|
42
61
|
def blueprint(
|
|
43
62
|
self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
|
|
@@ -50,6 +69,7 @@ class JambonzVoiceReadyInput(InputChannel):
|
|
|
50
69
|
return response.json({"status": "ok"})
|
|
51
70
|
|
|
52
71
|
@jambonz_webhook.websocket("/websocket", subprotocols=["ws.jambonz.org"]) # type: ignore
|
|
72
|
+
@requires_basic_auth(self.username, self.password)
|
|
53
73
|
async def websocket(request: Request, ws: Websocket) -> None:
|
|
54
74
|
"""Triggered on new websocket connection."""
|
|
55
75
|
async for message in ws:
|
|
@@ -10,6 +10,7 @@ from rasa.core.channels.channel import UserMessage
|
|
|
10
10
|
from rasa.core.channels.voice_ready.utils import CallParameters
|
|
11
11
|
|
|
12
12
|
structlogger = structlog.get_logger()
|
|
13
|
+
CHANNEL_NAME = "jambonz"
|
|
13
14
|
|
|
14
15
|
|
|
15
16
|
@dataclass
|
|
@@ -206,6 +207,7 @@ async def handle_new_session(
|
|
|
206
207
|
output_channel=output_channel,
|
|
207
208
|
sender_id=message.call_sid,
|
|
208
209
|
metadata=asdict(message.call_params),
|
|
210
|
+
input_channel=CHANNEL_NAME,
|
|
209
211
|
)
|
|
210
212
|
await send_config_ack(message.message_id, ws)
|
|
211
213
|
await on_new_message(user_msg)
|
|
@@ -238,6 +240,7 @@ async def handle_gather_completed(
|
|
|
238
240
|
output_channel = JambonzWebsocketOutput(ws, transcript_result.call_sid)
|
|
239
241
|
user_msg = UserMessage(
|
|
240
242
|
text=most_likely_transcript.text,
|
|
243
|
+
input_channel=CHANNEL_NAME,
|
|
241
244
|
output_channel=output_channel,
|
|
242
245
|
sender_id=transcript_result.call_sid,
|
|
243
246
|
metadata={},
|
|
@@ -288,6 +291,7 @@ async def handle_call_status(
|
|
|
288
291
|
output_channel = JambonzWebsocketOutput(ws, call_status.call_sid)
|
|
289
292
|
user_msg = UserMessage(
|
|
290
293
|
text="/session_end",
|
|
294
|
+
input_channel=CHANNEL_NAME,
|
|
291
295
|
output_channel=output_channel,
|
|
292
296
|
sender_id=call_status.call_sid,
|
|
293
297
|
metadata={},
|
|
@@ -13,14 +13,19 @@ from rasa.core.channels.channel import (
|
|
|
13
13
|
CollectingOutputChannel,
|
|
14
14
|
InputChannel,
|
|
15
15
|
UserMessage,
|
|
16
|
+
create_auth_requested_response_provider,
|
|
17
|
+
requires_basic_auth,
|
|
16
18
|
)
|
|
17
19
|
from rasa.core.channels.voice_ready.utils import CallParameters
|
|
18
20
|
from rasa.shared.core.events import BotUttered
|
|
19
|
-
from rasa.shared.exceptions import InvalidConfigException
|
|
21
|
+
from rasa.shared.exceptions import InvalidConfigException, RasaException
|
|
20
22
|
|
|
21
23
|
logger = structlog.get_logger(__name__)
|
|
22
24
|
|
|
23
25
|
|
|
26
|
+
TWILIO_VOICE_PATH = "webhooks/twilio_voice/webhook"
|
|
27
|
+
|
|
28
|
+
|
|
24
29
|
def map_call_params(form: RequestParameters) -> CallParameters:
|
|
25
30
|
"""Map the Audiocodes parameters to the CallParameters dataclass."""
|
|
26
31
|
return CallParameters(
|
|
@@ -120,6 +125,14 @@ class TwilioVoiceInput(InputChannel):
|
|
|
120
125
|
"""Load custom configurations."""
|
|
121
126
|
credentials = credentials or {}
|
|
122
127
|
|
|
128
|
+
username = credentials.get("username")
|
|
129
|
+
password = credentials.get("password")
|
|
130
|
+
if (username is None) != (password is None):
|
|
131
|
+
raise RasaException(
|
|
132
|
+
"In TwilioVoice channel, either both username and password "
|
|
133
|
+
"or neither should be provided. "
|
|
134
|
+
)
|
|
135
|
+
|
|
123
136
|
return cls(
|
|
124
137
|
credentials.get(
|
|
125
138
|
"reprompt_fallback_phrase",
|
|
@@ -129,6 +142,8 @@ class TwilioVoiceInput(InputChannel):
|
|
|
129
142
|
credentials.get("speech_timeout", "5"),
|
|
130
143
|
credentials.get("speech_model", "default"),
|
|
131
144
|
credentials.get("enhanced", "false"),
|
|
145
|
+
username=username,
|
|
146
|
+
password=password,
|
|
132
147
|
)
|
|
133
148
|
|
|
134
149
|
def __init__(
|
|
@@ -138,6 +153,8 @@ class TwilioVoiceInput(InputChannel):
|
|
|
138
153
|
speech_timeout: Text = "5",
|
|
139
154
|
speech_model: Text = "default",
|
|
140
155
|
enhanced: Text = "false",
|
|
156
|
+
username: Optional[Text] = None,
|
|
157
|
+
password: Optional[Text] = None,
|
|
141
158
|
) -> None:
|
|
142
159
|
"""Creates a connection to Twilio voice.
|
|
143
160
|
|
|
@@ -153,6 +170,8 @@ class TwilioVoiceInput(InputChannel):
|
|
|
153
170
|
self.speech_timeout = speech_timeout
|
|
154
171
|
self.speech_model = speech_model
|
|
155
172
|
self.enhanced = enhanced
|
|
173
|
+
self.username = username
|
|
174
|
+
self.password = password
|
|
156
175
|
|
|
157
176
|
self._validate_configuration()
|
|
158
177
|
|
|
@@ -161,6 +180,9 @@ class TwilioVoiceInput(InputChannel):
|
|
|
161
180
|
if self.assistant_voice not in self.SUPPORTED_VOICES:
|
|
162
181
|
self._raise_invalid_voice_exception()
|
|
163
182
|
|
|
183
|
+
if (self.username is None) != (self.password is None):
|
|
184
|
+
self._raise_invalid_credentials_exception()
|
|
185
|
+
|
|
164
186
|
try:
|
|
165
187
|
int(self.speech_timeout)
|
|
166
188
|
except ValueError:
|
|
@@ -246,6 +268,13 @@ class TwilioVoiceInput(InputChannel):
|
|
|
246
268
|
return response.json({"status": "ok"})
|
|
247
269
|
|
|
248
270
|
@twilio_voice_webhook.route("/webhook", methods=["POST"])
|
|
271
|
+
@requires_basic_auth(
|
|
272
|
+
username=self.username,
|
|
273
|
+
password=self.password,
|
|
274
|
+
auth_request_provider=create_auth_requested_response_provider(
|
|
275
|
+
TWILIO_VOICE_PATH
|
|
276
|
+
),
|
|
277
|
+
)
|
|
249
278
|
async def receive(request: Request) -> HTTPResponse:
|
|
250
279
|
sender_id = request.form.get("From")
|
|
251
280
|
text = request.form.get("SpeechResult")
|
|
@@ -310,6 +339,11 @@ class TwilioVoiceInput(InputChannel):
|
|
|
310
339
|
twilio_response = self._build_twilio_voice_response(
|
|
311
340
|
[{"text": last_response_text}]
|
|
312
341
|
)
|
|
342
|
+
|
|
343
|
+
logger.debug(
|
|
344
|
+
"twilio_voice.webhook.twilio_response",
|
|
345
|
+
twilio_response=str(twilio_response),
|
|
346
|
+
)
|
|
313
347
|
return response.text(str(twilio_response), content_type="text/xml")
|
|
314
348
|
|
|
315
349
|
return twilio_voice_webhook
|
|
@@ -329,6 +363,13 @@ class TwilioVoiceInput(InputChannel):
|
|
|
329
363
|
enhanced=self.enhanced,
|
|
330
364
|
)
|
|
331
365
|
|
|
366
|
+
if not messages:
|
|
367
|
+
# In case bot has a greet message disabled
|
|
368
|
+
# or if the bot is not configured to send an initial message
|
|
369
|
+
# we need to send a voice response with speech settings
|
|
370
|
+
voice_response.append(gather)
|
|
371
|
+
return voice_response
|
|
372
|
+
|
|
332
373
|
# Add pauses between messages.
|
|
333
374
|
# Add a listener to the last message to listen for user response.
|
|
334
375
|
for i, message in enumerate(messages):
|
|
@@ -347,6 +388,12 @@ class TwilioVoiceInput(InputChannel):
|
|
|
347
388
|
|
|
348
389
|
return voice_response
|
|
349
390
|
|
|
391
|
+
def _raise_invalid_credentials_exception(self) -> None:
|
|
392
|
+
raise InvalidConfigException(
|
|
393
|
+
"In TwilioVoice channel, either both username and password "
|
|
394
|
+
"or neither should be provided. "
|
|
395
|
+
)
|
|
396
|
+
|
|
350
397
|
|
|
351
398
|
class TwilioVoiceCollectingOutputChannel(CollectingOutputChannel):
|
|
352
399
|
"""Output channel that collects send messages in a list.
|
|
@@ -54,13 +54,22 @@ class AzureTTS(TTSEngine[AzureTTSConfig]):
|
|
|
54
54
|
async for data in response.content.iter_chunked(1024):
|
|
55
55
|
yield self.engine_bytes_to_rasa_audio_bytes(data)
|
|
56
56
|
return
|
|
57
|
+
elif response.status == 401:
|
|
58
|
+
structlogger.error(
|
|
59
|
+
"azure.synthesize.rest.authentication_failed",
|
|
60
|
+
status_code=response.status,
|
|
61
|
+
)
|
|
62
|
+
raise TTSError(
|
|
63
|
+
f"Authentication failed. Please check your API key: {response.status}" # noqa: E501
|
|
64
|
+
)
|
|
57
65
|
else:
|
|
66
|
+
response_text = await response.text()
|
|
58
67
|
structlogger.error(
|
|
59
68
|
"azure.synthesize.rest.failed",
|
|
60
69
|
status_code=response.status,
|
|
61
|
-
msg=
|
|
70
|
+
msg=response_text,
|
|
62
71
|
)
|
|
63
|
-
raise TTSError(f"TTS failed: {
|
|
72
|
+
raise TTSError(f"TTS failed: {response_text}")
|
|
64
73
|
except ClientConnectorError as e:
|
|
65
74
|
raise TTSError(e)
|
|
66
75
|
except TimeoutError as e:
|
|
@@ -1,7 +1,9 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
import base64
|
|
2
4
|
import json
|
|
3
5
|
import uuid
|
|
4
|
-
from typing import Any, Awaitable, Callable, Dict, Optional, Text, Tuple
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Optional, Text, Tuple
|
|
5
7
|
|
|
6
8
|
import structlog
|
|
7
9
|
from sanic import ( # type: ignore[attr-defined]
|
|
@@ -12,7 +14,11 @@ from sanic import ( # type: ignore[attr-defined]
|
|
|
12
14
|
response,
|
|
13
15
|
)
|
|
14
16
|
|
|
15
|
-
from rasa.core.channels import UserMessage
|
|
17
|
+
from rasa.core.channels import InputChannel, UserMessage
|
|
18
|
+
from rasa.core.channels.channel import (
|
|
19
|
+
create_auth_requested_response_provider,
|
|
20
|
+
requires_basic_auth,
|
|
21
|
+
)
|
|
16
22
|
from rasa.core.channels.voice_ready.utils import CallParameters
|
|
17
23
|
from rasa.core.channels.voice_stream.audio_bytes import RasaAudioBytes
|
|
18
24
|
from rasa.core.channels.voice_stream.call_state import call_state
|
|
@@ -25,10 +31,24 @@ from rasa.core.channels.voice_stream.voice_channel import (
|
|
|
25
31
|
VoiceInputChannel,
|
|
26
32
|
VoiceOutputChannel,
|
|
27
33
|
)
|
|
34
|
+
from rasa.shared.exceptions import RasaException
|
|
35
|
+
|
|
36
|
+
if TYPE_CHECKING:
|
|
37
|
+
from twilio.twiml.voice_response import VoiceResponse
|
|
28
38
|
|
|
29
39
|
logger = structlog.get_logger(__name__)
|
|
30
40
|
|
|
31
41
|
|
|
42
|
+
TWILIO_MEDIA_STREAMS_WEBHOOK_PATH = "webhooks/twilio_media_streams/webhook"
|
|
43
|
+
TWILIO_MEDIA_STREAMS_WEBSOCKET_PATH = "webhooks/twilio_media_streams/websocket"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
CALL_SID_REQUEST_KEY = "CallSid"
|
|
47
|
+
FROM_NUMBER_REQUEST_KEY = "From"
|
|
48
|
+
TO_NUMBER_REQUEST_KEY = "To"
|
|
49
|
+
DIRECTION_REQUEST_KEY = "Direction"
|
|
50
|
+
|
|
51
|
+
|
|
32
52
|
def map_call_params(data: Dict[Text, Any]) -> CallParameters:
|
|
33
53
|
"""Map the twilio stream parameters to the CallParameters dataclass."""
|
|
34
54
|
stream_sid = data["streamSid"]
|
|
@@ -77,6 +97,40 @@ class TwilioMediaStreamsOutputChannel(VoiceOutputChannel):
|
|
|
77
97
|
|
|
78
98
|
|
|
79
99
|
class TwilioMediaStreamsInputChannel(VoiceInputChannel):
|
|
100
|
+
def __init__(
|
|
101
|
+
self,
|
|
102
|
+
server_url: str,
|
|
103
|
+
asr_config: Dict,
|
|
104
|
+
tts_config: Dict,
|
|
105
|
+
monitor_silence: bool = False,
|
|
106
|
+
username: Optional[Text] = None,
|
|
107
|
+
password: Optional[Text] = None,
|
|
108
|
+
):
|
|
109
|
+
super().__init__(server_url, asr_config, tts_config, monitor_silence)
|
|
110
|
+
self.username = username
|
|
111
|
+
self.password = password
|
|
112
|
+
|
|
113
|
+
@classmethod
|
|
114
|
+
def from_credentials(cls, credentials: Optional[Dict[str, Any]]) -> InputChannel:
|
|
115
|
+
credentials = credentials or {}
|
|
116
|
+
|
|
117
|
+
username = credentials.get("username")
|
|
118
|
+
password = credentials.get("password")
|
|
119
|
+
if (username is None) != (password is None):
|
|
120
|
+
raise RasaException(
|
|
121
|
+
"In TwilioMediaStreams channel, either both username and password "
|
|
122
|
+
"or neither should be provided. "
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
return cls(
|
|
126
|
+
credentials["server_url"],
|
|
127
|
+
credentials["asr"],
|
|
128
|
+
credentials["tts"],
|
|
129
|
+
credentials.get("monitor_silence", False),
|
|
130
|
+
username=username,
|
|
131
|
+
password=password,
|
|
132
|
+
)
|
|
133
|
+
|
|
80
134
|
@classmethod
|
|
81
135
|
def name(cls) -> str:
|
|
82
136
|
return "twilio_media_streams"
|
|
@@ -130,16 +184,6 @@ class TwilioMediaStreamsInputChannel(VoiceInputChannel):
|
|
|
130
184
|
self.tts_cache,
|
|
131
185
|
)
|
|
132
186
|
|
|
133
|
-
def websocket_stream_url(self) -> str:
|
|
134
|
-
"""Returns the websocket stream URL."""
|
|
135
|
-
# depending on the config value, the url might contain http as a
|
|
136
|
-
# protocol or not - we'll make sure both work
|
|
137
|
-
if self.server_url.startswith("http"):
|
|
138
|
-
base_url = self.server_url.replace("http", "ws")
|
|
139
|
-
else:
|
|
140
|
-
base_url = f"wss://{self.server_url}"
|
|
141
|
-
return f"{base_url}/webhooks/twilio_media_streams/websocket"
|
|
142
|
-
|
|
143
187
|
def blueprint(
|
|
144
188
|
self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
|
|
145
189
|
) -> Blueprint:
|
|
@@ -151,22 +195,20 @@ class TwilioMediaStreamsInputChannel(VoiceInputChannel):
|
|
|
151
195
|
return response.json({"status": "ok"})
|
|
152
196
|
|
|
153
197
|
@blueprint.route("/webhook", methods=["POST"])
|
|
198
|
+
@requires_basic_auth(
|
|
199
|
+
username=self.username,
|
|
200
|
+
password=self.password,
|
|
201
|
+
auth_request_provider=create_auth_requested_response_provider(
|
|
202
|
+
realm=TWILIO_MEDIA_STREAMS_WEBHOOK_PATH
|
|
203
|
+
),
|
|
204
|
+
)
|
|
154
205
|
async def receive(request: Request) -> HTTPResponse:
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
voice_response = VoiceResponse()
|
|
158
|
-
start = Connect()
|
|
159
|
-
stream = start.stream(url=self.websocket_stream_url())
|
|
160
|
-
# pass information about the call to the webhook - so we can
|
|
161
|
-
# store it in the input channel
|
|
162
|
-
stream.parameter(name="call_id", value=request.form.get("CallSid", None))
|
|
163
|
-
stream.parameter(name="user_phone", value=request.form.get("From", None))
|
|
164
|
-
stream.parameter(name="bot_phone", value=request.form.get("To", None))
|
|
165
|
-
stream.parameter(
|
|
166
|
-
name="direction", value=request.form.get("Direction", None)
|
|
167
|
-
)
|
|
206
|
+
voice_response = self._build_twilio_response(request)
|
|
168
207
|
|
|
169
|
-
|
|
208
|
+
logger.debug(
|
|
209
|
+
"twilio_media_streams.webhook.twilio_response",
|
|
210
|
+
twilio_response=str(voice_response),
|
|
211
|
+
)
|
|
170
212
|
|
|
171
213
|
return response.text(str(voice_response), content_type="text/xml")
|
|
172
214
|
|
|
@@ -175,3 +217,36 @@ class TwilioMediaStreamsInputChannel(VoiceInputChannel):
|
|
|
175
217
|
await self.run_audio_streaming(on_new_message, ws)
|
|
176
218
|
|
|
177
219
|
return blueprint
|
|
220
|
+
|
|
221
|
+
def _websocket_stream_url(self) -> str:
|
|
222
|
+
"""Returns the websocket stream URL."""
|
|
223
|
+
# depending on the config value, the url might contain http as a
|
|
224
|
+
# protocol or not - we'll make sure both work
|
|
225
|
+
if self.server_url.startswith("http"):
|
|
226
|
+
base_url = self.server_url.replace("http", "ws")
|
|
227
|
+
else:
|
|
228
|
+
base_url = f"wss://{self.server_url}"
|
|
229
|
+
return f"{base_url}/{TWILIO_MEDIA_STREAMS_WEBSOCKET_PATH}"
|
|
230
|
+
|
|
231
|
+
def _build_twilio_response(self, request: Request) -> VoiceResponse:
|
|
232
|
+
from twilio.twiml.voice_response import Connect, VoiceResponse
|
|
233
|
+
|
|
234
|
+
voice_response = VoiceResponse()
|
|
235
|
+
start = Connect()
|
|
236
|
+
stream = start.stream(url=self._websocket_stream_url())
|
|
237
|
+
# pass information about the call to the webhook - so we can
|
|
238
|
+
# store it in the input channel
|
|
239
|
+
stream.parameter(
|
|
240
|
+
name="call_id", value=request.form.get(CALL_SID_REQUEST_KEY, None)
|
|
241
|
+
)
|
|
242
|
+
stream.parameter(
|
|
243
|
+
name="user_phone", value=request.form.get(FROM_NUMBER_REQUEST_KEY, None)
|
|
244
|
+
)
|
|
245
|
+
stream.parameter(
|
|
246
|
+
name="bot_phone", value=request.form.get(TO_NUMBER_REQUEST_KEY, None)
|
|
247
|
+
)
|
|
248
|
+
stream.parameter(
|
|
249
|
+
name="direction", value=request.form.get(DIRECTION_REQUEST_KEY, None)
|
|
250
|
+
)
|
|
251
|
+
voice_response.append(start)
|
|
252
|
+
return voice_response
|
|
@@ -23,7 +23,6 @@ from rasa.core.policies.flows.flow_step_result import (
|
|
|
23
23
|
)
|
|
24
24
|
from rasa.dialogue_understanding.commands import CancelFlowCommand
|
|
25
25
|
from rasa.dialogue_understanding.patterns.cancel import CancelPatternFlowStackFrame
|
|
26
|
-
from rasa.dialogue_understanding.patterns.clarify import ClarifyPatternFlowStackFrame
|
|
27
26
|
from rasa.dialogue_understanding.patterns.collect_information import (
|
|
28
27
|
CollectInformationPatternFlowStackFrame,
|
|
29
28
|
)
|
|
@@ -51,7 +50,6 @@ from rasa.dialogue_understanding.stack.frames.flow_stack_frame import (
|
|
|
51
50
|
)
|
|
52
51
|
from rasa.dialogue_understanding.stack.utils import (
|
|
53
52
|
top_user_flow_frame,
|
|
54
|
-
user_flows_on_the_stack,
|
|
55
53
|
)
|
|
56
54
|
from rasa.shared.constants import RASA_PATTERN_HUMAN_HANDOFF
|
|
57
55
|
from rasa.shared.core.constants import (
|
|
@@ -280,33 +278,6 @@ def trigger_pattern_continue_interrupted(
|
|
|
280
278
|
return events
|
|
281
279
|
|
|
282
280
|
|
|
283
|
-
def trigger_pattern_clarification(
|
|
284
|
-
current_frame: DialogueStackFrame, stack: DialogueStack, flows: FlowsList
|
|
285
|
-
) -> None:
|
|
286
|
-
"""Trigger the pattern to clarify which topic to continue if needed."""
|
|
287
|
-
if not isinstance(current_frame, UserFlowStackFrame):
|
|
288
|
-
return None
|
|
289
|
-
|
|
290
|
-
if current_frame.frame_type in [
|
|
291
|
-
FlowStackFrameType.CALL,
|
|
292
|
-
FlowStackFrameType.INTERRUPT,
|
|
293
|
-
]:
|
|
294
|
-
# we want to return to the flow that called
|
|
295
|
-
# the current flow or the flow that was interrupted
|
|
296
|
-
# by the current flow
|
|
297
|
-
return None
|
|
298
|
-
|
|
299
|
-
pending_flows = [
|
|
300
|
-
flows.flow_by_id(frame.flow_id)
|
|
301
|
-
for frame in stack.frames
|
|
302
|
-
if isinstance(frame, UserFlowStackFrame)
|
|
303
|
-
and frame.flow_id != current_frame.flow_id
|
|
304
|
-
]
|
|
305
|
-
|
|
306
|
-
flow_names = [flow.readable_name() for flow in pending_flows if flow is not None]
|
|
307
|
-
stack.push(ClarifyPatternFlowStackFrame(names=flow_names))
|
|
308
|
-
|
|
309
|
-
|
|
310
281
|
def trigger_pattern_completed(
|
|
311
282
|
current_frame: DialogueStackFrame, stack: DialogueStack, flows: FlowsList
|
|
312
283
|
) -> None:
|
|
@@ -675,15 +646,9 @@ def _run_end_step(
|
|
|
675
646
|
structlogger.debug("flow.step.run.flow_end")
|
|
676
647
|
current_frame = stack.pop()
|
|
677
648
|
trigger_pattern_completed(current_frame, stack, flows)
|
|
678
|
-
resumed_events =
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
# we need to trigger the pattern clarify
|
|
682
|
-
trigger_pattern_clarification(current_frame, stack, flows)
|
|
683
|
-
else:
|
|
684
|
-
resumed_events = trigger_pattern_continue_interrupted(
|
|
685
|
-
current_frame, stack, flows, tracker
|
|
686
|
-
)
|
|
649
|
+
resumed_events = trigger_pattern_continue_interrupted(
|
|
650
|
+
current_frame, stack, flows, tracker
|
|
651
|
+
)
|
|
687
652
|
reset_events: List[Event] = reset_scoped_slots(current_frame, flow, tracker)
|
|
688
653
|
return ContinueFlowWithNextStep(
|
|
689
654
|
events=initial_events + reset_events + resumed_events, has_flow_ended=True
|