rasa-pro 3.10.16__py3-none-any.whl → 3.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (240) hide show
  1. rasa/__main__.py +31 -15
  2. rasa/api.py +12 -2
  3. rasa/cli/arguments/default_arguments.py +24 -4
  4. rasa/cli/arguments/run.py +15 -0
  5. rasa/cli/arguments/shell.py +5 -1
  6. rasa/cli/arguments/train.py +17 -9
  7. rasa/cli/evaluate.py +7 -7
  8. rasa/cli/inspect.py +19 -7
  9. rasa/cli/interactive.py +1 -0
  10. rasa/cli/llm_fine_tuning.py +11 -14
  11. rasa/cli/project_templates/calm/config.yml +5 -7
  12. rasa/cli/project_templates/calm/endpoints.yml +15 -2
  13. rasa/cli/project_templates/tutorial/config.yml +8 -5
  14. rasa/cli/project_templates/tutorial/data/flows.yml +1 -1
  15. rasa/cli/project_templates/tutorial/data/patterns.yml +5 -0
  16. rasa/cli/project_templates/tutorial/domain.yml +14 -0
  17. rasa/cli/project_templates/tutorial/endpoints.yml +5 -0
  18. rasa/cli/run.py +7 -0
  19. rasa/cli/scaffold.py +4 -2
  20. rasa/cli/studio/upload.py +0 -15
  21. rasa/cli/train.py +14 -53
  22. rasa/cli/utils.py +14 -11
  23. rasa/cli/x.py +7 -7
  24. rasa/constants.py +3 -1
  25. rasa/core/actions/action.py +77 -33
  26. rasa/core/actions/action_hangup.py +29 -0
  27. rasa/core/actions/action_repeat_bot_messages.py +89 -0
  28. rasa/core/actions/e2e_stub_custom_action_executor.py +5 -1
  29. rasa/core/actions/http_custom_action_executor.py +4 -0
  30. rasa/core/agent.py +2 -2
  31. rasa/core/brokers/kafka.py +3 -1
  32. rasa/core/brokers/pika.py +3 -1
  33. rasa/core/channels/__init__.py +10 -6
  34. rasa/core/channels/channel.py +41 -4
  35. rasa/core/channels/development_inspector.py +150 -46
  36. rasa/core/channels/inspector/README.md +1 -1
  37. rasa/core/channels/inspector/dist/assets/{arc-b6e548fe.js → arc-bc141fb2.js} +1 -1
  38. rasa/core/channels/inspector/dist/assets/{c4Diagram-d0fbc5ce-fa03ac9e.js → c4Diagram-d0fbc5ce-be2db283.js} +1 -1
  39. rasa/core/channels/inspector/dist/assets/{classDiagram-936ed81e-ee67392a.js → classDiagram-936ed81e-55366915.js} +1 -1
  40. rasa/core/channels/inspector/dist/assets/{classDiagram-v2-c3cb15f1-9b283fae.js → classDiagram-v2-c3cb15f1-bb529518.js} +1 -1
  41. rasa/core/channels/inspector/dist/assets/{createText-62fc7601-8b6fcc2a.js → createText-62fc7601-b0ec81d6.js} +1 -1
  42. rasa/core/channels/inspector/dist/assets/{edges-f2ad444c-22e77f4f.js → edges-f2ad444c-6166330c.js} +1 -1
  43. rasa/core/channels/inspector/dist/assets/{erDiagram-9d236eb7-60ffc87f.js → erDiagram-9d236eb7-5ccc6a8e.js} +1 -1
  44. rasa/core/channels/inspector/dist/assets/{flowDb-1972c806-9dd802e4.js → flowDb-1972c806-fca3bfe4.js} +1 -1
  45. rasa/core/channels/inspector/dist/assets/{flowDiagram-7ea5b25a-5fa1912f.js → flowDiagram-7ea5b25a-4739080f.js} +1 -1
  46. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-855bc5b3-736177bf.js +1 -0
  47. rasa/core/channels/inspector/dist/assets/{flowchart-elk-definition-abe16c3d-622a1fd2.js → flowchart-elk-definition-abe16c3d-7c1b0e0f.js} +1 -1
  48. rasa/core/channels/inspector/dist/assets/{ganttDiagram-9b5ea136-e285a63a.js → ganttDiagram-9b5ea136-772fd050.js} +1 -1
  49. rasa/core/channels/inspector/dist/assets/{gitGraphDiagram-99d0ae7c-f237bdca.js → gitGraphDiagram-99d0ae7c-8eae1dc9.js} +1 -1
  50. rasa/core/channels/inspector/dist/assets/{index-2c4b9a3b-4b03d70e.js → index-2c4b9a3b-f55afcdf.js} +1 -1
  51. rasa/core/channels/inspector/dist/assets/index-e7cef9de.js +1317 -0
  52. rasa/core/channels/inspector/dist/assets/{infoDiagram-736b4530-72a0fa5f.js → infoDiagram-736b4530-124d4a14.js} +1 -1
  53. rasa/core/channels/inspector/dist/assets/{journeyDiagram-df861f2b-82218c41.js → journeyDiagram-df861f2b-7c4fae44.js} +1 -1
  54. rasa/core/channels/inspector/dist/assets/{layout-78cff630.js → layout-b9885fb6.js} +1 -1
  55. rasa/core/channels/inspector/dist/assets/{line-5038b469.js → line-7c59abb6.js} +1 -1
  56. rasa/core/channels/inspector/dist/assets/{linear-c4fc4098.js → linear-4776f780.js} +1 -1
  57. rasa/core/channels/inspector/dist/assets/{mindmap-definition-beec6740-c33c8ea6.js → mindmap-definition-beec6740-2332c46c.js} +1 -1
  58. rasa/core/channels/inspector/dist/assets/{pieDiagram-dbbf0591-a8d03059.js → pieDiagram-dbbf0591-8fb39303.js} +1 -1
  59. rasa/core/channels/inspector/dist/assets/{quadrantDiagram-4d7f4fd6-6a0e56b2.js → quadrantDiagram-4d7f4fd6-3c7180a2.js} +1 -1
  60. rasa/core/channels/inspector/dist/assets/{requirementDiagram-6fc4c22a-2dc7c7bd.js → requirementDiagram-6fc4c22a-e910bcb8.js} +1 -1
  61. rasa/core/channels/inspector/dist/assets/{sankeyDiagram-8f13d901-2360fe39.js → sankeyDiagram-8f13d901-ead16c89.js} +1 -1
  62. rasa/core/channels/inspector/dist/assets/{sequenceDiagram-b655622a-41b9f9ad.js → sequenceDiagram-b655622a-29a02a19.js} +1 -1
  63. rasa/core/channels/inspector/dist/assets/{stateDiagram-59f0c015-0aad326f.js → stateDiagram-59f0c015-042b3137.js} +1 -1
  64. rasa/core/channels/inspector/dist/assets/{stateDiagram-v2-2b26beab-9847d984.js → stateDiagram-v2-2b26beab-2178c0f3.js} +1 -1
  65. rasa/core/channels/inspector/dist/assets/{styles-080da4f6-564d890e.js → styles-080da4f6-23ffa4fc.js} +1 -1
  66. rasa/core/channels/inspector/dist/assets/{styles-3dcbcfbf-38957613.js → styles-3dcbcfbf-94f59763.js} +1 -1
  67. rasa/core/channels/inspector/dist/assets/{styles-9c745c82-f0fc6921.js → styles-9c745c82-78a6bebc.js} +1 -1
  68. rasa/core/channels/inspector/dist/assets/{svgDrawCommon-4835440b-ef3c5a77.js → svgDrawCommon-4835440b-eae2a6f6.js} +1 -1
  69. rasa/core/channels/inspector/dist/assets/{timeline-definition-5b62e21b-bf3e91c1.js → timeline-definition-5b62e21b-5c968d92.js} +1 -1
  70. rasa/core/channels/inspector/dist/assets/{xychartDiagram-2b33534f-4d4026c0.js → xychartDiagram-2b33534f-fd3db0d5.js} +1 -1
  71. rasa/core/channels/inspector/dist/index.html +18 -17
  72. rasa/core/channels/inspector/index.html +17 -16
  73. rasa/core/channels/inspector/package.json +5 -1
  74. rasa/core/channels/inspector/src/App.tsx +118 -68
  75. rasa/core/channels/inspector/src/components/Chat.tsx +95 -0
  76. rasa/core/channels/inspector/src/components/DiagramFlow.tsx +11 -10
  77. rasa/core/channels/inspector/src/components/DialogueStack.tsx +10 -25
  78. rasa/core/channels/inspector/src/components/LoadingSpinner.tsx +6 -3
  79. rasa/core/channels/inspector/src/helpers/audiostream.ts +165 -0
  80. rasa/core/channels/inspector/src/helpers/formatters.test.ts +10 -0
  81. rasa/core/channels/inspector/src/helpers/formatters.ts +107 -41
  82. rasa/core/channels/inspector/src/helpers/utils.ts +92 -7
  83. rasa/core/channels/inspector/src/types.ts +21 -1
  84. rasa/core/channels/inspector/yarn.lock +94 -1
  85. rasa/core/channels/rest.py +51 -46
  86. rasa/core/channels/socketio.py +28 -1
  87. rasa/core/channels/telegram.py +1 -1
  88. rasa/core/channels/twilio.py +1 -1
  89. rasa/core/channels/{audiocodes.py → voice_ready/audiocodes.py} +122 -69
  90. rasa/core/channels/{voice_aware → voice_ready}/jambonz.py +26 -8
  91. rasa/core/channels/{voice_aware → voice_ready}/jambonz_protocol.py +57 -5
  92. rasa/core/channels/{twilio_voice.py → voice_ready/twilio_voice.py} +64 -28
  93. rasa/core/channels/voice_ready/utils.py +37 -0
  94. rasa/core/channels/voice_stream/asr/__init__.py +0 -0
  95. rasa/core/channels/voice_stream/asr/asr_engine.py +89 -0
  96. rasa/core/channels/voice_stream/asr/asr_event.py +18 -0
  97. rasa/core/channels/voice_stream/asr/azure.py +129 -0
  98. rasa/core/channels/voice_stream/asr/deepgram.py +90 -0
  99. rasa/core/channels/voice_stream/audio_bytes.py +8 -0
  100. rasa/core/channels/voice_stream/browser_audio.py +107 -0
  101. rasa/core/channels/voice_stream/call_state.py +23 -0
  102. rasa/core/channels/voice_stream/tts/__init__.py +0 -0
  103. rasa/core/channels/voice_stream/tts/azure.py +106 -0
  104. rasa/core/channels/voice_stream/tts/cartesia.py +118 -0
  105. rasa/core/channels/voice_stream/tts/tts_cache.py +27 -0
  106. rasa/core/channels/voice_stream/tts/tts_engine.py +58 -0
  107. rasa/core/channels/voice_stream/twilio_media_streams.py +173 -0
  108. rasa/core/channels/voice_stream/util.py +57 -0
  109. rasa/core/channels/voice_stream/voice_channel.py +427 -0
  110. rasa/core/information_retrieval/qdrant.py +1 -0
  111. rasa/core/nlg/contextual_response_rephraser.py +45 -17
  112. rasa/{nlu → core}/persistor.py +203 -68
  113. rasa/core/policies/enterprise_search_policy.py +119 -63
  114. rasa/core/policies/flows/flow_executor.py +15 -22
  115. rasa/core/policies/intentless_policy.py +83 -28
  116. rasa/core/processor.py +25 -0
  117. rasa/core/run.py +12 -2
  118. rasa/core/secrets_manager/constants.py +4 -0
  119. rasa/core/secrets_manager/factory.py +8 -0
  120. rasa/core/secrets_manager/vault.py +11 -1
  121. rasa/core/training/interactive.py +33 -34
  122. rasa/core/utils.py +47 -21
  123. rasa/dialogue_understanding/coexistence/llm_based_router.py +41 -14
  124. rasa/dialogue_understanding/commands/__init__.py +6 -0
  125. rasa/dialogue_understanding/commands/repeat_bot_messages_command.py +60 -0
  126. rasa/dialogue_understanding/commands/session_end_command.py +61 -0
  127. rasa/dialogue_understanding/commands/user_silence_command.py +59 -0
  128. rasa/dialogue_understanding/commands/utils.py +5 -0
  129. rasa/dialogue_understanding/generator/constants.py +2 -0
  130. rasa/dialogue_understanding/generator/flow_retrieval.py +47 -9
  131. rasa/dialogue_understanding/generator/llm_based_command_generator.py +38 -15
  132. rasa/dialogue_understanding/generator/llm_command_generator.py +1 -1
  133. rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generator.py +35 -13
  134. rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2 +3 -0
  135. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +60 -13
  136. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +53 -0
  137. rasa/dialogue_understanding/patterns/repeat.py +37 -0
  138. rasa/dialogue_understanding/patterns/user_silence.py +37 -0
  139. rasa/dialogue_understanding/processor/command_processor.py +21 -1
  140. rasa/e2e_test/aggregate_test_stats_calculator.py +1 -11
  141. rasa/e2e_test/assertions.py +136 -61
  142. rasa/e2e_test/assertions_schema.yml +23 -0
  143. rasa/e2e_test/e2e_test_case.py +85 -6
  144. rasa/e2e_test/e2e_test_runner.py +2 -3
  145. rasa/e2e_test/utils/e2e_yaml_utils.py +1 -1
  146. rasa/engine/graph.py +3 -10
  147. rasa/engine/loader.py +12 -0
  148. rasa/engine/recipes/config_files/default_config.yml +0 -3
  149. rasa/engine/recipes/default_recipe.py +0 -1
  150. rasa/engine/recipes/graph_recipe.py +0 -1
  151. rasa/engine/runner/dask.py +2 -2
  152. rasa/engine/storage/local_model_storage.py +12 -42
  153. rasa/engine/storage/storage.py +1 -5
  154. rasa/engine/validation.py +527 -74
  155. rasa/model_manager/__init__.py +0 -0
  156. rasa/model_manager/config.py +40 -0
  157. rasa/model_manager/model_api.py +559 -0
  158. rasa/model_manager/runner_service.py +286 -0
  159. rasa/model_manager/socket_bridge.py +146 -0
  160. rasa/model_manager/studio_jwt_auth.py +86 -0
  161. rasa/model_manager/trainer_service.py +325 -0
  162. rasa/model_manager/utils.py +87 -0
  163. rasa/model_manager/warm_rasa_process.py +187 -0
  164. rasa/model_service.py +112 -0
  165. rasa/model_training.py +42 -23
  166. rasa/nlu/tokenizers/whitespace_tokenizer.py +3 -14
  167. rasa/server.py +4 -2
  168. rasa/shared/constants.py +60 -8
  169. rasa/shared/core/constants.py +13 -0
  170. rasa/shared/core/domain.py +107 -50
  171. rasa/shared/core/events.py +29 -0
  172. rasa/shared/core/flows/flow.py +5 -0
  173. rasa/shared/core/flows/flows_list.py +19 -6
  174. rasa/shared/core/flows/flows_yaml_schema.json +10 -0
  175. rasa/shared/core/flows/utils.py +39 -0
  176. rasa/shared/core/flows/validation.py +121 -0
  177. rasa/shared/core/flows/yaml_flows_io.py +15 -27
  178. rasa/shared/core/slots.py +5 -0
  179. rasa/shared/importers/importer.py +59 -41
  180. rasa/shared/importers/multi_project.py +23 -11
  181. rasa/shared/importers/rasa.py +12 -3
  182. rasa/shared/importers/remote_importer.py +196 -0
  183. rasa/shared/importers/utils.py +3 -1
  184. rasa/shared/nlu/training_data/formats/rasa_yaml.py +18 -3
  185. rasa/shared/nlu/training_data/training_data.py +18 -19
  186. rasa/shared/providers/_configs/litellm_router_client_config.py +220 -0
  187. rasa/shared/providers/_configs/model_group_config.py +167 -0
  188. rasa/shared/providers/_configs/openai_client_config.py +1 -1
  189. rasa/shared/providers/_configs/rasa_llm_client_config.py +73 -0
  190. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +1 -0
  191. rasa/shared/providers/_configs/utils.py +16 -0
  192. rasa/shared/providers/_utils.py +79 -0
  193. rasa/shared/providers/embedding/_base_litellm_embedding_client.py +13 -29
  194. rasa/shared/providers/embedding/azure_openai_embedding_client.py +54 -21
  195. rasa/shared/providers/embedding/default_litellm_embedding_client.py +24 -0
  196. rasa/shared/providers/embedding/litellm_router_embedding_client.py +135 -0
  197. rasa/shared/providers/llm/_base_litellm_client.py +34 -22
  198. rasa/shared/providers/llm/azure_openai_llm_client.py +50 -29
  199. rasa/shared/providers/llm/default_litellm_llm_client.py +24 -0
  200. rasa/shared/providers/llm/litellm_router_llm_client.py +182 -0
  201. rasa/shared/providers/llm/rasa_llm_client.py +112 -0
  202. rasa/shared/providers/llm/self_hosted_llm_client.py +5 -29
  203. rasa/shared/providers/mappings.py +19 -0
  204. rasa/shared/providers/router/__init__.py +0 -0
  205. rasa/shared/providers/router/_base_litellm_router_client.py +183 -0
  206. rasa/shared/providers/router/router_client.py +73 -0
  207. rasa/shared/utils/common.py +40 -24
  208. rasa/shared/utils/health_check/__init__.py +0 -0
  209. rasa/shared/utils/health_check/embeddings_health_check_mixin.py +31 -0
  210. rasa/shared/utils/health_check/health_check.py +258 -0
  211. rasa/shared/utils/health_check/llm_health_check_mixin.py +31 -0
  212. rasa/shared/utils/io.py +27 -6
  213. rasa/shared/utils/llm.py +354 -44
  214. rasa/shared/utils/schemas/events.py +2 -0
  215. rasa/shared/utils/schemas/model_config.yml +0 -10
  216. rasa/shared/utils/yaml.py +181 -38
  217. rasa/studio/data_handler.py +3 -1
  218. rasa/studio/upload.py +160 -74
  219. rasa/telemetry.py +94 -17
  220. rasa/tracing/config.py +3 -1
  221. rasa/tracing/instrumentation/attribute_extractors.py +95 -18
  222. rasa/tracing/instrumentation/instrumentation.py +121 -0
  223. rasa/utils/common.py +5 -0
  224. rasa/utils/endpoints.py +27 -1
  225. rasa/utils/io.py +8 -16
  226. rasa/utils/log_utils.py +9 -2
  227. rasa/utils/sanic_error_handler.py +32 -0
  228. rasa/validator.py +110 -16
  229. rasa/version.py +1 -1
  230. {rasa_pro-3.10.16.dist-info → rasa_pro-3.11.0.dist-info}/METADATA +16 -14
  231. {rasa_pro-3.10.16.dist-info → rasa_pro-3.11.0.dist-info}/RECORD +236 -185
  232. rasa/core/channels/inspector/dist/assets/flowDiagram-v2-855bc5b3-1844e5a5.js +0 -1
  233. rasa/core/channels/inspector/dist/assets/index-a5d3e69d.js +0 -1040
  234. rasa/core/channels/voice_aware/utils.py +0 -20
  235. rasa/llm_fine_tuning/notebooks/unsloth_finetuning.ipynb +0 -407
  236. /rasa/core/channels/{voice_aware → voice_ready}/__init__.py +0 -0
  237. /rasa/core/channels/{voice_native → voice_stream}/__init__.py +0 -0
  238. {rasa_pro-3.10.16.dist-info → rasa_pro-3.11.0.dist-info}/NOTICE +0 -0
  239. {rasa_pro-3.10.16.dist-info → rasa_pro-3.11.0.dist-info}/WHEEL +0 -0
  240. {rasa_pro-3.10.16.dist-info → rasa_pro-3.11.0.dist-info}/entry_points.txt +0 -0
@@ -1,9 +1,11 @@
1
1
  from sanic import Blueprint, response
2
- from sanic.request import Request
2
+ from sanic.request import Request, RequestParameters
3
3
  from sanic.response import HTTPResponse
4
4
  from twilio.twiml.voice_response import VoiceResponse, Gather
5
5
  from typing import Text, Callable, Awaitable, List, Any, Dict, Optional
6
+ from dataclasses import asdict
6
7
 
8
+ import structlog
7
9
  import rasa.utils.io
8
10
  import rasa.shared.utils.io
9
11
  from rasa.shared.core.events import BotUttered
@@ -13,6 +15,19 @@ from rasa.core.channels.channel import (
13
15
  CollectingOutputChannel,
14
16
  UserMessage,
15
17
  )
18
+ from rasa.core.channels.voice_ready.utils import CallParameters
19
+
20
+ logger = structlog.get_logger(__name__)
21
+
22
+
23
+ def map_call_params(form: RequestParameters) -> CallParameters:
24
+ """Map the Audiocodes parameters to the CallParameters dataclass."""
25
+ return CallParameters(
26
+ call_id=form.get("CallSid"),
27
+ user_phone=form.get("Caller"),
28
+ bot_phone=form.get("Called"),
29
+ direction=form.get("Direction"),
30
+ )
16
31
 
17
32
 
18
33
  class TwilioVoiceInput(InputChannel):
@@ -105,7 +120,6 @@ class TwilioVoiceInput(InputChannel):
105
120
  credentials = credentials or {}
106
121
 
107
122
  return cls(
108
- credentials.get("initial_prompt", "hello"),
109
123
  credentials.get(
110
124
  "reprompt_fallback_phrase",
111
125
  "I'm sorry I didn't get that could you rephrase.",
@@ -118,7 +132,6 @@ class TwilioVoiceInput(InputChannel):
118
132
 
119
133
  def __init__(
120
134
  self,
121
- initial_prompt: Optional[Text],
122
135
  reprompt_fallback_phrase: Optional[Text],
123
136
  assistant_voice: Optional[Text],
124
137
  speech_timeout: Text = "5",
@@ -128,14 +141,12 @@ class TwilioVoiceInput(InputChannel):
128
141
  """Creates a connection to Twilio voice.
129
142
 
130
143
  Args:
131
- initial_prompt: text to use to prompt a conversation when call is answered.
132
144
  reprompt_fallback_phrase: phrase to use if no user response.
133
145
  assistant_voice: name of the assistant voice to use.
134
146
  speech_timeout: how long to pause when user finished speaking.
135
147
  speech_model: type of transcription model to use from Twilio.
136
148
  enhanced: toggle to use Twilio's premium speech transcription model.
137
149
  """
138
- self.initial_prompt = initial_prompt
139
150
  self.reprompt_fallback_phrase = reprompt_fallback_phrase
140
151
  self.assistant_voice = assistant_voice
141
152
  self.speech_timeout = speech_timeout
@@ -239,22 +250,43 @@ class TwilioVoiceInput(InputChannel):
239
250
  text = request.form.get("SpeechResult")
240
251
  input_channel = self.name()
241
252
  call_status = request.form.get("CallStatus")
253
+ metadata = {}
242
254
 
243
255
  collector = TwilioVoiceCollectingOutputChannel()
244
256
 
257
+ logger.debug(
258
+ "twilio_voice.webhook",
259
+ sender_id=sender_id,
260
+ text=text,
261
+ call_status=call_status,
262
+ )
245
263
  # Provide an initial greeting to answer the user's call.
246
264
  if (text is None) and (call_status == "ringing"):
247
- text = self.initial_prompt
265
+ text = "/session_start"
266
+ metadata = asdict(map_call_params(request.form))
267
+
268
+ # when call is disconnected
269
+ if call_status == "completed":
270
+ text = "/session_end"
271
+ metadata = {"reason": "user disconnected"}
248
272
 
249
273
  # determine the response.
250
274
  if text is not None:
275
+ logger.info("twilio_voice.webhook.text_not_none", sender_id=sender_id)
251
276
  await on_new_message(
252
- UserMessage(text, collector, sender_id, input_channel=input_channel)
277
+ UserMessage(
278
+ text,
279
+ collector,
280
+ sender_id,
281
+ input_channel=input_channel,
282
+ metadata=metadata,
283
+ )
253
284
  )
254
285
 
255
286
  twilio_response = self._build_twilio_voice_response(collector.messages)
256
287
  # If the user doesn't respond resend the last message.
257
288
  else:
289
+ logger.info("twilio_voice.webhook.text_none", sender_id=sender_id)
258
290
  # Get last user utterance from tracker.
259
291
  tracker = await request.app.ctx.agent.tracker_store.retrieve(sender_id)
260
292
  last_response = None
@@ -285,6 +317,7 @@ class TwilioVoiceInput(InputChannel):
285
317
  self, messages: List[Dict[Text, Any]]
286
318
  ) -> VoiceResponse:
287
319
  """Builds the Twilio Voice Response object."""
320
+ logger.debug("twilio_voice.build_twilio_voice_response", messages=messages)
288
321
  voice_response = VoiceResponse()
289
322
  gather = Gather(
290
323
  input="speech",
@@ -299,6 +332,11 @@ class TwilioVoiceInput(InputChannel):
299
332
  # Add a listener to the last message to listen for user response.
300
333
  for i, message in enumerate(messages):
301
334
  msg_text = message["text"]
335
+ # Check if the message is a hangup message.
336
+ if message.get("custom", {}).get("hangup"):
337
+ voice_response.hangup()
338
+ break
339
+
302
340
  if i + 1 == len(messages):
303
341
  gather.say(msg_text, voice=self.assistant_voice)
304
342
  voice_response.append(gather)
@@ -320,38 +358,23 @@ class TwilioVoiceCollectingOutputChannel(CollectingOutputChannel):
320
358
  """Name of the output channel."""
321
359
  return "twilio_voice"
322
360
 
323
- @staticmethod
324
- def _emoji_warning(text: Text) -> None:
325
- """Raises a warning if text contains an emoji."""
326
- emoji_regex = rasa.utils.io.get_emoji_regex()
327
- if emoji_regex.findall(text):
328
- rasa.shared.utils.io.raise_warning(
329
- "Text contains an emoji in a voice response. "
330
- "Review responses to provide a voice-friendly alternative."
331
- )
332
-
333
361
  async def send_text_message(
334
362
  self, recipient_id: Text, text: Text, **kwargs: Any
335
363
  ) -> None:
336
364
  """Sends the text message after removing emojis."""
337
- self._emoji_warning(text)
365
+ text = rasa.utils.io.remove_emojis(text)
338
366
  for message_part in text.strip().split("\n\n"):
339
367
  await self._persist_message(self._message(recipient_id, text=message_part))
340
368
 
341
369
  async def send_text_with_buttons(
342
370
  self,
343
- recipient_id: Text,
344
- text: Text,
345
- buttons: List[Dict[Text, Any]],
371
+ recipient_id: str,
372
+ text: str,
373
+ buttons: List[Dict[str, Any]],
346
374
  **kwargs: Any,
347
375
  ) -> None:
348
- """Convert buttons into a voice representation."""
349
- self._emoji_warning(text)
350
- await self._persist_message(self._message(recipient_id, text=text))
351
-
352
- for b in buttons:
353
- self._emoji_warning(b["title"])
354
- await self._persist_message(self._message(recipient_id, text=b["title"]))
376
+ """Uses the concise button output format for voice channels."""
377
+ await self.send_text_with_buttons_concise(recipient_id, text, buttons, **kwargs)
355
378
 
356
379
  async def send_image_url(
357
380
  self, recipient_id: Text, image: Text, **kwargs: Any
@@ -365,3 +388,16 @@ class TwilioVoiceCollectingOutputChannel(CollectingOutputChannel):
365
388
  "with a visual elements such as images and emojis "
366
389
  "that are used in your voice channel."
367
390
  )
391
+
392
+ async def hangup(self, recipient_id: Text, **kwargs: Any) -> None:
393
+ """
394
+ Indicate that the conversation should be ended.
395
+
396
+ Parent class is a collecting output channel, so we don't actually hang up
397
+ but we add a custom message to the list of messages to be sent.
398
+ This message will be picked up by _build_twilio_voice_response
399
+ which will hang up the call.
400
+ """
401
+ await self._persist_message(
402
+ self._message(recipient_id, custom={"hangup": True})
403
+ )
@@ -0,0 +1,37 @@
1
+ import structlog
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+
6
+ structlogger = structlog.get_logger()
7
+
8
+
9
+ def validate_voice_license_scope() -> None:
10
+ from rasa.utils.licensing import (
11
+ PRODUCT_AREA,
12
+ VOICE_SCOPE,
13
+ validate_license_from_env,
14
+ )
15
+
16
+ """Validate that the correct license scope is present."""
17
+ structlogger.info(
18
+ f"Validating current Rasa Pro license scope which must include "
19
+ f"the '{VOICE_SCOPE}' scope to use the voice channel."
20
+ )
21
+
22
+ voice_product_scope = PRODUCT_AREA + " " + VOICE_SCOPE
23
+ validate_license_from_env(product_area=voice_product_scope)
24
+
25
+
26
+ @dataclass
27
+ class CallParameters:
28
+ """Standardized call parameters for voice channels."""
29
+
30
+ call_id: str
31
+ user_phone: str
32
+ bot_phone: str
33
+ user_name: Optional[str] = None
34
+ user_host: Optional[str] = None
35
+ bot_host: Optional[str] = None
36
+ direction: Optional[str] = None
37
+ stream_id: Optional[str] = None
File without changes
@@ -0,0 +1,89 @@
1
+ from dataclasses import dataclass
2
+ from typing import (
3
+ Dict,
4
+ AsyncIterator,
5
+ Any,
6
+ Generic,
7
+ Optional,
8
+ Tuple,
9
+ Type,
10
+ TypeVar,
11
+ )
12
+
13
+ from websockets.legacy.client import WebSocketClientProtocol
14
+
15
+ from rasa.core.channels.voice_stream.asr.asr_event import ASREvent
16
+ from rasa.core.channels.voice_stream.audio_bytes import RasaAudioBytes
17
+ from rasa.core.channels.voice_stream.util import MergeableConfig
18
+ from rasa.shared.exceptions import ConnectionException
19
+ from rasa.shared.utils.common import validate_environment
20
+
21
+ T = TypeVar("T", bound="ASREngineConfig")
22
+ E = TypeVar("E", bound="ASREngine")
23
+
24
+
25
+ @dataclass
26
+ class ASREngineConfig(MergeableConfig):
27
+ pass
28
+
29
+
30
+ class ASREngine(Generic[T]):
31
+ required_env_vars: Tuple[str, ...] = ()
32
+ required_packages: Tuple[str, ...] = ()
33
+
34
+ def __init__(self, config: Optional[T] = None):
35
+ self.config = self.get_default_config().merge(config)
36
+ self.asr_socket: Optional[WebSocketClientProtocol] = None
37
+ validate_environment(
38
+ self.required_env_vars,
39
+ self.required_packages,
40
+ f"ASR Engine {self.__class__.__name__}",
41
+ )
42
+
43
+ async def connect(self) -> None:
44
+ self.asr_socket = await self.open_websocket_connection()
45
+
46
+ async def open_websocket_connection(self) -> WebSocketClientProtocol:
47
+ """Connect to the ASR system."""
48
+ raise NotImplementedError
49
+
50
+ @classmethod
51
+ def from_config_dict(cls: Type[E], config: Dict) -> E:
52
+ raise NotImplementedError
53
+
54
+ async def close_connection(self) -> None:
55
+ if self.asr_socket:
56
+ await self.asr_socket.close()
57
+
58
+ async def signal_audio_done(self) -> None:
59
+ """Signal to the ASR Api that you are done sending data."""
60
+ raise NotImplementedError
61
+
62
+ async def send_audio_chunks(self, chunk: RasaAudioBytes) -> None:
63
+ """Send audio chunks to the ASR system via the websocket."""
64
+ if self.asr_socket is None:
65
+ raise ConnectionException("Websocket not connected.")
66
+ engine_bytes = self.rasa_audio_bytes_to_engine_bytes(chunk)
67
+ await self.asr_socket.send(engine_bytes)
68
+
69
+ def rasa_audio_bytes_to_engine_bytes(self, chunk: RasaAudioBytes) -> bytes:
70
+ """Convert RasaAudioBytes to bytes usable by this engine."""
71
+ raise NotImplementedError
72
+
73
+ async def stream_asr_events(self) -> AsyncIterator[ASREvent]:
74
+ """Stream the events returned by the ASR system as it is fed audio bytes."""
75
+ if self.asr_socket is None:
76
+ raise ConnectionException("Websocket not connected.")
77
+ async for message in self.asr_socket:
78
+ asr_event = self.engine_event_to_asr_event(message)
79
+ if asr_event:
80
+ yield asr_event
81
+
82
+ def engine_event_to_asr_event(self, e: Any) -> Optional[ASREvent]:
83
+ """Translate an engine event to a common ASREvent."""
84
+ raise NotImplementedError
85
+
86
+ @staticmethod
87
+ def get_default_config() -> T:
88
+ """Get the default config for this component."""
89
+ raise NotImplementedError
@@ -0,0 +1,18 @@
1
+ from dataclasses import dataclass
2
+
3
+
4
+ @dataclass
5
+ class ASREvent:
6
+ @classmethod
7
+ def name(cls) -> str:
8
+ return cls.__name__
9
+
10
+
11
+ @dataclass
12
+ class NewTranscript(ASREvent):
13
+ text: str
14
+
15
+
16
+ @dataclass
17
+ class UserIsSpeaking(ASREvent):
18
+ pass
@@ -0,0 +1,129 @@
1
+ import os
2
+ from dataclasses import dataclass
3
+ from typing import Any, Dict, Optional, AsyncIterator
4
+ import asyncio
5
+
6
+ from rasa.core.channels.voice_stream.asr.asr_engine import ASREngine, ASREngineConfig
7
+ from rasa.core.channels.voice_stream.asr.asr_event import (
8
+ ASREvent,
9
+ NewTranscript,
10
+ UserIsSpeaking,
11
+ )
12
+ from rasa.core.channels.voice_stream.audio_bytes import HERTZ, RasaAudioBytes
13
+ from rasa.shared.constants import AZURE_SPEECH_API_KEY_ENV_VAR
14
+ from rasa.shared.exceptions import ConnectionException
15
+
16
+
17
+ @dataclass
18
+ class AzureASRConfig(ASREngineConfig):
19
+ language: Optional[str] = None
20
+ speech_region: Optional[str] = None
21
+
22
+
23
+ class AzureASR(ASREngine[AzureASRConfig]):
24
+ required_env_vars = (AZURE_SPEECH_API_KEY_ENV_VAR,)
25
+ required_packages = ("azure.cognitiveservices.speech",)
26
+
27
+ def __init__(self, config: Optional[AzureASRConfig] = None):
28
+ super().__init__(config)
29
+
30
+ import azure.cognitiveservices.speech as speechsdk
31
+
32
+ self.speech_recognizer: Optional[speechsdk.SpeechRecognizer] = None
33
+ self.stream: Optional[speechsdk.audio.PushAudioInputStream] = None
34
+ self.is_recognizing = False
35
+ self.queue: asyncio.Queue[speechsdk.SpeechRecognitionEventArgs] = (
36
+ asyncio.Queue()
37
+ )
38
+
39
+ @staticmethod
40
+ def validate_environment() -> None:
41
+ """Make sure all needed requirements for this component are met."""
42
+
43
+ def signal_user_is_speaking(self, event: Any) -> None:
44
+ """Replace the azure event with a generic is speaking event."""
45
+ self.fill_queue(UserIsSpeaking())
46
+
47
+ def fill_queue(self, event: Any) -> None:
48
+ """Either puts the event or a dedicated ASR Event into the queue."""
49
+ self.queue.put_nowait(event)
50
+
51
+ async def connect(self) -> None:
52
+ import azure.cognitiveservices.speech as speechsdk
53
+
54
+ speech_config = speechsdk.SpeechConfig(
55
+ subscription=os.environ[AZURE_SPEECH_API_KEY_ENV_VAR],
56
+ region=self.config.speech_region,
57
+ )
58
+ audio_format = speechsdk.audio.AudioStreamFormat(
59
+ samples_per_second=HERTZ,
60
+ bits_per_sample=8,
61
+ channels=1,
62
+ wave_stream_format=speechsdk.AudioStreamWaveFormat.MULAW,
63
+ )
64
+ self.stream = speechsdk.audio.PushAudioInputStream(stream_format=audio_format)
65
+ audio_config = speechsdk.audio.AudioConfig(stream=self.stream)
66
+ self.speech_recognizer = speechsdk.SpeechRecognizer(
67
+ speech_config=speech_config,
68
+ language=self.config.language,
69
+ audio_config=audio_config,
70
+ )
71
+ self.speech_recognizer.recognized.connect(self.fill_queue)
72
+ self.speech_recognizer.recognizing.connect(self.signal_user_is_speaking)
73
+ self.speech_recognizer.start_continuous_recognition_async()
74
+ self.is_recognizing = True
75
+
76
+ async def close_connection(self) -> None:
77
+ if self.speech_recognizer is None:
78
+ raise ConnectionException("Websocket not connected.")
79
+ self.speech_recognizer.stop_continuous_recognition_async()
80
+
81
+ async def signal_audio_done(self) -> None:
82
+ """Signal to the ASR Api that you are done sending data."""
83
+ self.is_recognizing = False
84
+
85
+ def rasa_audio_bytes_to_engine_bytes(self, chunk: RasaAudioBytes) -> bytes:
86
+ """Convert RasaAudioBytes to bytes usable by this engine."""
87
+ return chunk
88
+
89
+ async def send_audio_chunks(self, chunk: RasaAudioBytes) -> None:
90
+ """Send audio chunks to the ASR system via the websocket."""
91
+ if self.speech_recognizer is None or self.stream is None:
92
+ raise ConnectionException("ASR not connected.")
93
+ engine_bytes = self.rasa_audio_bytes_to_engine_bytes(chunk)
94
+ self.stream.write(engine_bytes)
95
+
96
+ async def stream_asr_events(self) -> AsyncIterator[ASREvent]:
97
+ """Stream the events returned by the ASR system as it is fed audio bytes."""
98
+ if self.speech_recognizer is None:
99
+ raise ConnectionException("Websocket not connected.")
100
+ while self.is_recognizing or not self.queue.empty():
101
+ try:
102
+ message = await asyncio.wait_for(self.queue.get(), timeout=2)
103
+ asr_event = self.engine_event_to_asr_event(message)
104
+ if asr_event:
105
+ yield asr_event
106
+ except asyncio.TimeoutError:
107
+ pass
108
+
109
+ def engine_event_to_asr_event(self, e: Any) -> Optional[ASREvent]:
110
+ """Translate an engine event to a common ASREvent."""
111
+ import azure.cognitiveservices.speech as speechsdk
112
+
113
+ if isinstance(e, speechsdk.SpeechRecognitionEventArgs) and isinstance(
114
+ e.result, speechsdk.SpeechRecognitionResult
115
+ ):
116
+ return NewTranscript(e.result.text)
117
+ if isinstance(e, ASREvent):
118
+ # transformation happened before
119
+ return e
120
+
121
+ return None
122
+
123
+ @staticmethod
124
+ def get_default_config() -> AzureASRConfig:
125
+ return AzureASRConfig("en-US", "germanywestcentral")
126
+
127
+ @classmethod
128
+ def from_config_dict(cls, config: Dict) -> "AzureASR":
129
+ return AzureASR(AzureASRConfig.from_dict(config))
@@ -0,0 +1,90 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, Dict, Optional
3
+ import json
4
+ import os
5
+
6
+ import websockets
7
+ from websockets.legacy.client import WebSocketClientProtocol
8
+
9
+ from rasa.core.channels.voice_stream.asr.asr_engine import ASREngine, ASREngineConfig
10
+ from rasa.core.channels.voice_stream.asr.asr_event import (
11
+ ASREvent,
12
+ NewTranscript,
13
+ UserIsSpeaking,
14
+ )
15
+ from rasa.core.channels.voice_stream.audio_bytes import HERTZ, RasaAudioBytes
16
+ from rasa.shared.constants import DEEPGRAM_API_KEY_ENV_VAR
17
+
18
+
19
+ @dataclass
20
+ class DeepgramASRConfig(ASREngineConfig):
21
+ endpoint: Optional[str] = None
22
+ # number of miliseconds of silence to determine end of speech
23
+ endpointing: Optional[int] = None
24
+ language: Optional[str] = None
25
+ model: Optional[str] = None
26
+ smart_format: Optional[bool] = None
27
+
28
+
29
+ class DeepgramASR(ASREngine[DeepgramASRConfig]):
30
+ required_env_vars = (DEEPGRAM_API_KEY_ENV_VAR,)
31
+
32
+ def __init__(self, config: Optional[DeepgramASRConfig] = None):
33
+ super().__init__(config)
34
+ self.accumulated_transcript = ""
35
+
36
+ async def open_websocket_connection(self) -> WebSocketClientProtocol:
37
+ """Connect to the ASR system."""
38
+ deepgram_api_key = os.environ[DEEPGRAM_API_KEY_ENV_VAR]
39
+ extra_headers = {"Authorization": f"Token {deepgram_api_key}"}
40
+ api_url = self._get_api_url()
41
+ query_params = self._get_query_params()
42
+ return await websockets.connect( # type: ignore
43
+ api_url + query_params,
44
+ extra_headers=extra_headers,
45
+ )
46
+
47
+ def _get_api_url(self) -> str:
48
+ return f"wss://{self.config.endpoint}/v1/listen?"
49
+
50
+ def _get_query_params(self) -> str:
51
+ return (
52
+ f"encoding=mulaw&sample_rate={HERTZ}&endpointing={self.config.endpointing}"
53
+ f"&vad_events=true&language={self.config.language}&interim_results=true"
54
+ f"&model={self.config.model}&smart_format={str(self.config.smart_format).lower()}"
55
+ )
56
+
57
+ async def signal_audio_done(self) -> None:
58
+ """Signal to the ASR Api that you are done sending data."""
59
+ if self.asr_socket is None:
60
+ raise AttributeError("Websocket not connected.")
61
+ await self.asr_socket.send(json.dumps({"type": "CloseStream"}))
62
+
63
+ def rasa_audio_bytes_to_engine_bytes(self, chunk: RasaAudioBytes) -> bytes:
64
+ """Convert RasaAudioBytes to bytes usable by this engine."""
65
+ return chunk
66
+
67
+ def engine_event_to_asr_event(self, e: Any) -> Optional[ASREvent]:
68
+ """Translate an engine event to a common ASREvent."""
69
+ data = json.loads(e)
70
+ if "is_final" in data:
71
+ transcript = data["channel"]["alternatives"][0]["transcript"]
72
+ if data["is_final"]:
73
+ if data.get("speech_final"):
74
+ full_transcript = self.accumulated_transcript + transcript
75
+ self.accumulated_transcript = ""
76
+ if full_transcript:
77
+ return NewTranscript(full_transcript)
78
+ else:
79
+ self.accumulated_transcript += transcript
80
+ elif transcript:
81
+ return UserIsSpeaking()
82
+ return None
83
+
84
+ @staticmethod
85
+ def get_default_config() -> DeepgramASRConfig:
86
+ return DeepgramASRConfig("api.deepgram.com", 400, "en", "nova-2-general", True)
87
+
88
+ @classmethod
89
+ def from_config_dict(cls, config: Dict) -> "DeepgramASR":
90
+ return DeepgramASR(DeepgramASRConfig.from_dict(config))
@@ -0,0 +1,8 @@
1
+ from typing import NewType
2
+
3
+ # a common intermediate audio byte format that acts as a common data format,
4
+ # to prevent quadratic complexity between formats of channels, asr engines,
5
+ # and tts engines
6
+ # currently corresponds to raw wave, 8khz, 8bit, mono channel, mulaw encoding
7
+ RasaAudioBytes = NewType("RasaAudioBytes", bytes)
8
+ HERTZ = 8000
@@ -0,0 +1,107 @@
1
+ import audioop
2
+ import base64
3
+ import json
4
+
5
+ import structlog
6
+ import uuid
7
+ from typing import Any, Awaitable, Callable, Optional, Tuple
8
+
9
+ from sanic import Blueprint, HTTPResponse, Request, response
10
+ from sanic import Websocket # type: ignore
11
+
12
+
13
+ from rasa.core.channels import UserMessage
14
+ from rasa.core.channels.voice_ready.utils import CallParameters
15
+ from rasa.core.channels.voice_stream.call_state import call_state
16
+ from rasa.core.channels.voice_stream.tts.tts_engine import TTSEngine
17
+ from rasa.core.channels.voice_stream.audio_bytes import RasaAudioBytes
18
+ from rasa.core.channels.voice_stream.voice_channel import (
19
+ ContinueConversationAction,
20
+ EndConversationAction,
21
+ NewAudioAction,
22
+ VoiceChannelAction,
23
+ VoiceInputChannel,
24
+ VoiceOutputChannel,
25
+ )
26
+
27
+ logger = structlog.get_logger()
28
+
29
+
30
+ class BrowserAudioOutputChannel(VoiceOutputChannel):
31
+ @classmethod
32
+ def name(cls) -> str:
33
+ return "browser_audio"
34
+
35
+ def rasa_audio_bytes_to_channel_bytes(
36
+ self, rasa_audio_bytes: RasaAudioBytes
37
+ ) -> bytes:
38
+ return audioop.ulaw2lin(rasa_audio_bytes, 4)
39
+
40
+ def channel_bytes_to_message(self, recipient_id: str, channel_bytes: bytes) -> str:
41
+ return json.dumps({"audio": base64.b64encode(channel_bytes).decode("utf-8")})
42
+
43
+ def create_marker_message(self, recipient_id: str) -> Tuple[str, str]:
44
+ message_id = uuid.uuid4().hex
45
+ return json.dumps({"marker": message_id}), message_id
46
+
47
+
48
+ class BrowserAudioInputChannel(VoiceInputChannel):
49
+ @classmethod
50
+ def name(cls) -> str:
51
+ return "browser_audio"
52
+
53
+ def channel_bytes_to_rasa_audio_bytes(self, input_bytes: bytes) -> RasaAudioBytes:
54
+ return RasaAudioBytes(audioop.lin2ulaw(input_bytes, 4))
55
+
56
+ async def collect_call_parameters(
57
+ self, channel_websocket: Websocket
58
+ ) -> Optional[CallParameters]:
59
+ call_id = f"inspect-{uuid.uuid4()}"
60
+ return CallParameters(call_id, "local", "local", stream_id=call_id)
61
+
62
+ def map_input_message(
63
+ self,
64
+ message: Any,
65
+ ) -> VoiceChannelAction:
66
+ data = json.loads(message)
67
+ if "audio" in data:
68
+ channel_bytes = base64.b64decode(data["audio"])
69
+ audio_bytes = self.channel_bytes_to_rasa_audio_bytes(channel_bytes)
70
+ return NewAudioAction(audio_bytes)
71
+ elif "marker" in data:
72
+ if data["marker"] == call_state.latest_bot_audio_id:
73
+ # Just finished streaming last audio bytes
74
+ call_state.is_bot_speaking = False # type: ignore[attr-defined]
75
+ if call_state.should_hangup:
76
+ logger.debug(
77
+ "browser_audio.hangup", marker=call_state.latest_bot_audio_id
78
+ )
79
+ return EndConversationAction()
80
+ else:
81
+ call_state.is_bot_speaking = True # type: ignore[attr-defined]
82
+ return ContinueConversationAction()
83
+
84
+ def create_output_channel(
85
+ self, voice_websocket: Websocket, tts_engine: TTSEngine
86
+ ) -> VoiceOutputChannel:
87
+ return BrowserAudioOutputChannel(
88
+ voice_websocket,
89
+ tts_engine,
90
+ self.tts_cache,
91
+ )
92
+
93
+ def blueprint(
94
+ self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
95
+ ) -> Blueprint:
96
+ """Defines a Sanic bluelogger.debug."""
97
+ blueprint = Blueprint("browser_audio", __name__)
98
+
99
+ @blueprint.route("/", methods=["GET"])
100
+ async def health(_: Request) -> HTTPResponse:
101
+ return response.json({"status": "ok"})
102
+
103
+ @blueprint.websocket("/websocket") # type: ignore
104
+ async def handle_message(request: Request, ws: Websocket) -> None:
105
+ await self.run_audio_streaming(on_new_message, ws)
106
+
107
+ return blueprint