rasa-pro 3.11.0rc2__py3-none-any.whl → 3.11.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (30) hide show
  1. rasa/core/channels/development_inspector.py +4 -1
  2. rasa/core/channels/voice_ready/audiocodes.py +3 -4
  3. rasa/core/channels/voice_stream/asr/asr_event.py +1 -1
  4. rasa/core/channels/voice_stream/asr/azure.py +5 -7
  5. rasa/core/channels/voice_stream/asr/deepgram.py +13 -11
  6. rasa/core/channels/voice_stream/voice_channel.py +48 -18
  7. rasa/core/nlg/contextual_response_rephraser.py +2 -2
  8. rasa/core/policies/enterprise_search_policy.py +5 -5
  9. rasa/core/policies/intentless_policy.py +9 -5
  10. rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py +2 -1
  11. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +9 -0
  12. rasa/engine/validation.py +31 -19
  13. rasa/shared/constants.py +5 -5
  14. rasa/shared/core/constants.py +1 -1
  15. rasa/shared/core/domain.py +0 -26
  16. rasa/shared/providers/_configs/litellm_router_client_config.py +29 -9
  17. rasa/shared/providers/embedding/litellm_router_embedding_client.py +1 -1
  18. rasa/shared/providers/llm/_base_litellm_client.py +26 -0
  19. rasa/shared/providers/llm/litellm_router_llm_client.py +56 -1
  20. rasa/shared/providers/llm/self_hosted_llm_client.py +4 -28
  21. rasa/shared/providers/router/_base_litellm_router_client.py +35 -1
  22. rasa/shared/utils/common.py +1 -1
  23. rasa/telemetry.py +3 -1
  24. rasa/tracing/config.py +1 -1
  25. rasa/version.py +1 -1
  26. {rasa_pro-3.11.0rc2.dist-info → rasa_pro-3.11.0rc3.dist-info}/METADATA +1 -1
  27. {rasa_pro-3.11.0rc2.dist-info → rasa_pro-3.11.0rc3.dist-info}/RECORD +30 -30
  28. {rasa_pro-3.11.0rc2.dist-info → rasa_pro-3.11.0rc3.dist-info}/NOTICE +0 -0
  29. {rasa_pro-3.11.0rc2.dist-info → rasa_pro-3.11.0rc3.dist-info}/WHEEL +0 -0
  30. {rasa_pro-3.11.0rc2.dist-info → rasa_pro-3.11.0rc3.dist-info}/entry_points.txt +0 -0
@@ -187,5 +187,8 @@ class TrackerStream:
187
187
  if not self._connected_clients:
188
188
  return
189
189
  await asyncio.wait(
190
- [self._send(websocket, message) for websocket in self._connected_clients]
190
+ [
191
+ asyncio.create_task(self._send(websocket, message))
192
+ for websocket in self._connected_clients
193
+ ]
191
194
  )
@@ -74,7 +74,7 @@ class Conversation:
74
74
  @staticmethod
75
75
  def get_metadata(activity: Dict[Text, Any]) -> Optional[Dict[Text, Any]]:
76
76
  """Get metadata from the activity."""
77
- return activity.get("parameters")
77
+ return asdict(map_call_params(activity["parameters"]))
78
78
 
79
79
  @staticmethod
80
80
  def _handle_event(event: Dict[Text, Any]) -> Text:
@@ -88,17 +88,16 @@ class Conversation:
88
88
 
89
89
  if event["name"] == EVENT_START:
90
90
  text = f"{INTENT_MESSAGE_PREFIX}{USER_INTENT_SESSION_START}"
91
- event_params = asdict(map_call_params(event["parameters"]))
92
91
  elif event["name"] == EVENT_DTMF:
93
92
  text = f"{INTENT_MESSAGE_PREFIX}vaig_event_DTMF"
94
93
  event_params = {"value": event["value"]}
94
+ text += json.dumps(event_params)
95
95
  else:
96
96
  structlogger.warning(
97
97
  "audiocodes.handle.event.unknown_event", event_payload=event
98
98
  )
99
99
  return ""
100
100
 
101
- text += json.dumps(event_params)
102
101
  return text
103
102
 
104
103
  def is_active_conversation(self, now: datetime, delta: timedelta) -> bool:
@@ -384,7 +383,7 @@ class AudiocodesInput(InputChannel):
384
383
  {"conversation": <conversation_id>, "reason": Optional[Text]}.
385
384
  """
386
385
  self._get_conversation(request.token, conversation_id)
387
- reason = json.dumps({"reason": request.json.get("reason")})
386
+ reason = {"reason": request.json.get("reason")}
388
387
  await on_new_message(
389
388
  UserMessage(
390
389
  text=f"{INTENT_MESSAGE_PREFIX}session_end",
@@ -14,5 +14,5 @@ class NewTranscript(ASREvent):
14
14
 
15
15
 
16
16
  @dataclass
17
- class UserStartedSpeaking(ASREvent):
17
+ class UserIsSpeaking(ASREvent):
18
18
  pass
@@ -7,7 +7,7 @@ from rasa.core.channels.voice_stream.asr.asr_engine import ASREngine, ASREngineC
7
7
  from rasa.core.channels.voice_stream.asr.asr_event import (
8
8
  ASREvent,
9
9
  NewTranscript,
10
- UserStartedSpeaking,
10
+ UserIsSpeaking,
11
11
  )
12
12
  from rasa.core.channels.voice_stream.audio_bytes import HERTZ, RasaAudioBytes
13
13
  from rasa.shared.exceptions import ConnectionException
@@ -31,9 +31,9 @@ class AzureASR(ASREngine[AzureASRConfig]):
31
31
  asyncio.Queue()
32
32
  )
33
33
 
34
- def signal_user_started_speaking(self, event: Any) -> None:
35
- """Replace the unspecific azure event with a specific start event."""
36
- self.fill_queue(UserStartedSpeaking())
34
+ def signal_user_is_speaking(self, event: Any) -> None:
35
+ """Replace the azure event with a generic is speaking event."""
36
+ self.fill_queue(UserIsSpeaking())
37
37
 
38
38
  def fill_queue(self, event: Any) -> None:
39
39
  """Either puts the event or a dedicated ASR Event into the queue."""
@@ -60,9 +60,7 @@ class AzureASR(ASREngine[AzureASRConfig]):
60
60
  audio_config=audio_config,
61
61
  )
62
62
  self.speech_recognizer.recognized.connect(self.fill_queue)
63
- self.speech_recognizer.speech_start_detected.connect(
64
- self.signal_user_started_speaking
65
- )
63
+ self.speech_recognizer.recognizing.connect(self.signal_user_is_speaking)
66
64
  self.speech_recognizer.start_continuous_recognition_async()
67
65
  self.is_recognizing = True
68
66
 
@@ -10,7 +10,7 @@ from rasa.core.channels.voice_stream.asr.asr_engine import ASREngine, ASREngineC
10
10
  from rasa.core.channels.voice_stream.asr.asr_event import (
11
11
  ASREvent,
12
12
  NewTranscript,
13
- UserStartedSpeaking,
13
+ UserIsSpeaking,
14
14
  )
15
15
  from rasa.core.channels.voice_stream.audio_bytes import HERTZ, RasaAudioBytes
16
16
 
@@ -49,7 +49,7 @@ class DeepgramASR(ASREngine[DeepgramASRConfig]):
49
49
  def _get_query_params(self) -> str:
50
50
  return (
51
51
  f"encoding=mulaw&sample_rate={HERTZ}&endpointing={self.config.endpointing}"
52
- f"&vad_events=true&language={self.config.language}"
52
+ f"&vad_events=true&language={self.config.language}&interim_results=true"
53
53
  f"&model={self.config.model}&smart_format={str(self.config.smart_format).lower()}"
54
54
  )
55
55
 
@@ -66,16 +66,18 @@ class DeepgramASR(ASREngine[DeepgramASRConfig]):
66
66
  def engine_event_to_asr_event(self, e: Any) -> Optional[ASREvent]:
67
67
  """Translate an engine event to a common ASREvent."""
68
68
  data = json.loads(e)
69
- if data.get("is_final"):
69
+ if "is_final" in data:
70
70
  transcript = data["channel"]["alternatives"][0]["transcript"]
71
- if data.get("speech_final"):
72
- full_transcript = self.accumulated_transcript + transcript
73
- self.accumulated_transcript = ""
74
- return NewTranscript(full_transcript)
75
- else:
76
- self.accumulated_transcript += transcript
77
- elif data.get("type") == "SpeechStarted":
78
- return UserStartedSpeaking()
71
+ if data["is_final"]:
72
+ if data.get("speech_final"):
73
+ full_transcript = self.accumulated_transcript + transcript
74
+ self.accumulated_transcript = ""
75
+ if full_transcript:
76
+ return NewTranscript(full_transcript)
77
+ else:
78
+ self.accumulated_transcript += transcript
79
+ elif transcript:
80
+ return UserIsSpeaking()
79
81
  return None
80
82
 
81
83
  @staticmethod
@@ -5,10 +5,7 @@ from dataclasses import asdict, dataclass
5
5
  from typing import Any, AsyncIterator, Awaitable, Callable, Dict, List, Optional, Tuple
6
6
 
7
7
  from rasa.core.channels.voice_stream.util import generate_silence
8
- from rasa.shared.core.constants import (
9
- SILENCE_TIMEOUT_DEFAULT_VALUE,
10
- SLOT_SILENCE_TIMEOUT,
11
- )
8
+ from rasa.shared.core.constants import SLOT_SILENCE_TIMEOUT
12
9
  from rasa.shared.utils.common import (
13
10
  class_from_module_path,
14
11
  mark_as_beta_feature,
@@ -24,7 +21,7 @@ from rasa.core.channels.voice_stream.asr.asr_engine import ASREngine
24
21
  from rasa.core.channels.voice_stream.asr.asr_event import (
25
22
  ASREvent,
26
23
  NewTranscript,
27
- UserStartedSpeaking,
24
+ UserIsSpeaking,
28
25
  )
29
26
  from sanic import Websocket # type: ignore
30
27
 
@@ -233,11 +230,18 @@ class VoiceOutputChannel(OutputChannel):
233
230
 
234
231
 
235
232
  class VoiceInputChannel(InputChannel):
236
- def __init__(self, server_url: str, asr_config: Dict, tts_config: Dict):
233
+ def __init__(
234
+ self,
235
+ server_url: str,
236
+ asr_config: Dict,
237
+ tts_config: Dict,
238
+ monitor_silence: bool = False,
239
+ ):
237
240
  validate_voice_license_scope()
238
241
  self.server_url = server_url
239
242
  self.asr_config = asr_config
240
243
  self.tts_config = tts_config
244
+ self.monitor_silence = monitor_silence
241
245
  self.tts_cache = TTSCache(tts_config.get("cache_size", 1000))
242
246
 
243
247
  async def handle_silence_timeout(
@@ -247,10 +251,14 @@ class VoiceInputChannel(InputChannel):
247
251
  tts_engine: TTSEngine,
248
252
  call_parameters: CallParameters,
249
253
  ) -> None:
250
- timeout = call_state.silence_timeout or SILENCE_TIMEOUT_DEFAULT_VALUE
251
- logger.info("voice_channel.silence_timeout_watch_started", timeout=timeout)
254
+ timeout = call_state.silence_timeout
255
+ if not timeout:
256
+ return
257
+ if not self.monitor_silence:
258
+ return
259
+ logger.debug("voice_channel.silence_timeout_watch_started", timeout=timeout)
252
260
  await asyncio.sleep(timeout)
253
- logger.info("voice_channel.silence_timeout_tripped")
261
+ logger.debug("voice_channel.silence_timeout_tripped")
254
262
  output_channel = self.create_output_channel(voice_websocket, tts_engine)
255
263
  message = UserMessage(
256
264
  "/silence_timeout",
@@ -261,10 +269,23 @@ class VoiceInputChannel(InputChannel):
261
269
  )
262
270
  await on_new_message(message)
263
271
 
272
+ @staticmethod
273
+ def _cancel_silence_timeout_watcher() -> None:
274
+ """Cancels the silent timeout task if it exists."""
275
+ if call_state.silence_timeout_watcher:
276
+ logger.debug("voice_channel.cancelling_current_timeout_watcher_task")
277
+ call_state.silence_timeout_watcher.cancel()
278
+ call_state.silence_timeout_watcher = None # type: ignore[attr-defined]
279
+
264
280
  @classmethod
265
281
  def from_credentials(cls, credentials: Optional[Dict[str, Any]]) -> InputChannel:
266
282
  credentials = credentials or {}
267
- return cls(credentials["server_url"], credentials["asr"], credentials["tts"])
283
+ return cls(
284
+ credentials["server_url"],
285
+ credentials["asr"],
286
+ credentials["tts"],
287
+ credentials.get("monitor_silence", False),
288
+ )
268
289
 
269
290
  def channel_bytes_to_rasa_audio_bytes(self, input_bytes: bytes) -> RasaAudioBytes:
270
291
  raise NotImplementedError
@@ -323,11 +344,14 @@ class VoiceInputChannel(InputChannel):
323
344
  is_bot_speaking_after = call_state.is_bot_speaking
324
345
 
325
346
  if not is_bot_speaking_before and is_bot_speaking_after:
326
- logger.info("voice_channel.bot_started_speaking")
347
+ logger.debug("voice_channel.bot_started_speaking")
348
+ # relevant when the bot speaks multiple messages in one turn
349
+ self._cancel_silence_timeout_watcher()
327
350
 
328
351
  # we just stopped speaking, starting a watcher for silence timeout
329
352
  if is_bot_speaking_before and not is_bot_speaking_after:
330
- logger.info("voice_channel.bot_stopped_speaking")
353
+ logger.debug("voice_channel.bot_stopped_speaking")
354
+ self._cancel_silence_timeout_watcher()
331
355
  call_state.silence_timeout_watcher = ( # type: ignore[attr-defined]
332
356
  asyncio.create_task(
333
357
  self.handle_silence_timeout(
@@ -354,12 +378,20 @@ class VoiceInputChannel(InputChannel):
354
378
  call_parameters,
355
379
  )
356
380
 
381
+ audio_forwarding_task = asyncio.create_task(consume_audio_bytes())
382
+ asr_event_task = asyncio.create_task(consume_asr_events())
357
383
  await asyncio.wait(
358
- [consume_audio_bytes(), consume_asr_events()],
384
+ [audio_forwarding_task, asr_event_task],
359
385
  return_when=asyncio.FIRST_COMPLETED,
360
386
  )
387
+ if not audio_forwarding_task.done():
388
+ audio_forwarding_task.cancel()
389
+ if not asr_event_task.done():
390
+ asr_event_task.cancel()
361
391
  await tts_engine.close_connection()
362
392
  await asr_engine.close_connection()
393
+ await channel_websocket.close()
394
+ self._cancel_silence_timeout_watcher()
363
395
 
364
396
  def create_output_channel(
365
397
  self, voice_websocket: Websocket, tts_engine: TTSEngine
@@ -377,7 +409,7 @@ class VoiceInputChannel(InputChannel):
377
409
  ) -> None:
378
410
  """Handle a new event from the ASR system."""
379
411
  if isinstance(e, NewTranscript) and e.text:
380
- logger.info(
412
+ logger.debug(
381
413
  "VoiceInputChannel.handle_asr_event.new_transcript", transcript=e.text
382
414
  )
383
415
  call_state.is_user_speaking = False # type: ignore[attr-defined]
@@ -390,8 +422,6 @@ class VoiceInputChannel(InputChannel):
390
422
  metadata=asdict(call_parameters),
391
423
  )
392
424
  await on_new_message(message)
393
- elif isinstance(e, UserStartedSpeaking):
394
- if call_state.silence_timeout_watcher:
395
- call_state.silence_timeout_watcher.cancel()
396
- call_state.silence_timeout_watcher = None # type: ignore[attr-defined]
425
+ elif isinstance(e, UserIsSpeaking):
426
+ self._cancel_silence_timeout_watcher()
397
427
  call_state.is_user_speaking = True # type: ignore[attr-defined]
@@ -13,7 +13,7 @@ from rasa.shared.constants import (
13
13
  PROVIDER_CONFIG_KEY,
14
14
  OPENAI_PROVIDER,
15
15
  TIMEOUT_CONFIG_KEY,
16
- MODEL_GROUP_CONFIG_KEY,
16
+ MODEL_GROUP_ID_CONFIG_KEY,
17
17
  )
18
18
  from rasa.shared.core.domain import KEY_RESPONSES_TEXT, Domain
19
19
  from rasa.shared.core.events import BotUttered, UserUttered
@@ -253,7 +253,7 @@ class ContextualResponseRephraser(
253
253
  llm_type=self.llm_property(PROVIDER_CONFIG_KEY),
254
254
  llm_model=self.llm_property(MODEL_CONFIG_KEY)
255
255
  or self.llm_property(MODEL_NAME_CONFIG_KEY),
256
- llm_model_group_id=self.llm_property(MODEL_GROUP_CONFIG_KEY),
256
+ llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
257
257
  )
258
258
  if not (updated_text := await self._generate_llm_response(prompt)):
259
259
  # If the LLM fails to generate a response, we
@@ -51,7 +51,7 @@ from rasa.shared.constants import (
51
51
  OPENAI_PROVIDER,
52
52
  TIMEOUT_CONFIG_KEY,
53
53
  MODEL_NAME_CONFIG_KEY,
54
- MODEL_GROUP_CONFIG_KEY,
54
+ MODEL_GROUP_ID_CONFIG_KEY,
55
55
  )
56
56
  from rasa.shared.core.constants import (
57
57
  ACTION_CANCEL_FLOW,
@@ -337,12 +337,12 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
337
337
  embeddings_model=self.embeddings_config.get(MODEL_CONFIG_KEY)
338
338
  or self.embeddings_config.get(MODEL_NAME_CONFIG_KEY),
339
339
  embeddings_model_group_id=self.embeddings_config.get(
340
- MODEL_GROUP_CONFIG_KEY
340
+ MODEL_GROUP_ID_CONFIG_KEY
341
341
  ),
342
342
  llm_type=self.llm_config.get(PROVIDER_CONFIG_KEY),
343
343
  llm_model=self.llm_config.get(MODEL_CONFIG_KEY)
344
344
  or self.llm_config.get(MODEL_NAME_CONFIG_KEY),
345
- llm_model_group_id=self.llm_config.get(MODEL_GROUP_CONFIG_KEY),
345
+ llm_model_group_id=self.llm_config.get(MODEL_GROUP_ID_CONFIG_KEY),
346
346
  citation_enabled=self.citation_enabled,
347
347
  )
348
348
  self.persist()
@@ -538,12 +538,12 @@ class EnterpriseSearchPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Po
538
538
  embeddings_model=self.embeddings_config.get(MODEL_CONFIG_KEY)
539
539
  or self.embeddings_config.get(MODEL_NAME_CONFIG_KEY),
540
540
  embeddings_model_group_id=self.embeddings_config.get(
541
- MODEL_GROUP_CONFIG_KEY
541
+ MODEL_GROUP_ID_CONFIG_KEY
542
542
  ),
543
543
  llm_type=self.llm_config.get(PROVIDER_CONFIG_KEY),
544
544
  llm_model=self.llm_config.get(MODEL_CONFIG_KEY)
545
545
  or self.llm_config.get(MODEL_NAME_CONFIG_KEY),
546
- llm_model_group_id=self.llm_config.get(MODEL_GROUP_CONFIG_KEY),
546
+ llm_model_group_id=self.llm_config.get(MODEL_GROUP_ID_CONFIG_KEY),
547
547
  citation_enabled=self.citation_enabled,
548
548
  )
549
549
  return self._create_prediction(
@@ -39,7 +39,7 @@ from rasa.shared.constants import (
39
39
  PROVIDER_CONFIG_KEY,
40
40
  OPENAI_PROVIDER,
41
41
  TIMEOUT_CONFIG_KEY,
42
- MODEL_GROUP_CONFIG_KEY,
42
+ MODEL_GROUP_ID_CONFIG_KEY,
43
43
  )
44
44
  from rasa.shared.core.constants import ACTION_LISTEN_NAME
45
45
  from rasa.shared.core.constants import ACTION_TRIGGER_CHITCHAT
@@ -558,11 +558,13 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
558
558
  embeddings_type=self.embeddings_property(PROVIDER_CONFIG_KEY),
559
559
  embeddings_model=self.embeddings_property(MODEL_CONFIG_KEY)
560
560
  or self.embeddings_property(MODEL_NAME_CONFIG_KEY),
561
- embeddings_model_group_id=self.embeddings_property(MODEL_GROUP_CONFIG_KEY),
561
+ embeddings_model_group_id=self.embeddings_property(
562
+ MODEL_GROUP_ID_CONFIG_KEY
563
+ ),
562
564
  llm_type=self.llm_property(PROVIDER_CONFIG_KEY),
563
565
  llm_model=self.llm_property(MODEL_CONFIG_KEY)
564
566
  or self.llm_property(MODEL_NAME_CONFIG_KEY),
565
- llm_model_group_id=self.llm_property(MODEL_GROUP_CONFIG_KEY),
567
+ llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
566
568
  )
567
569
 
568
570
  self.persist()
@@ -642,11 +644,13 @@ class IntentlessPolicy(LLMHealthCheckMixin, EmbeddingsHealthCheckMixin, Policy):
642
644
  embeddings_type=self.embeddings_property(PROVIDER_CONFIG_KEY),
643
645
  embeddings_model=self.embeddings_property(MODEL_CONFIG_KEY)
644
646
  or self.embeddings_property(MODEL_NAME_CONFIG_KEY),
645
- embeddings_model_group_id=self.embeddings_property(MODEL_GROUP_CONFIG_KEY),
647
+ embeddings_model_group_id=self.embeddings_property(
648
+ MODEL_GROUP_ID_CONFIG_KEY
649
+ ),
646
650
  llm_type=self.llm_property(PROVIDER_CONFIG_KEY),
647
651
  llm_model=self.llm_property(MODEL_CONFIG_KEY)
648
652
  or self.llm_property(MODEL_NAME_CONFIG_KEY),
649
- llm_model_group_id=self.llm_property(MODEL_GROUP_CONFIG_KEY),
653
+ llm_model_group_id=self.llm_property(MODEL_GROUP_ID_CONFIG_KEY),
650
654
  score=score,
651
655
  )
652
656
 
@@ -113,6 +113,7 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
113
113
  )
114
114
 
115
115
  self.trace_prompt_tokens = self.config.get("trace_prompt_tokens", False)
116
+ self.repeat_command_enabled = self.is_repeat_command_enabled()
116
117
 
117
118
  ### Implementations of LLMBasedCommandGenerator parent
118
119
  @staticmethod
@@ -458,7 +459,7 @@ class SingleStepLLMCommandGenerator(LLMBasedCommandGenerator):
458
459
  "current_slot": current_slot,
459
460
  "current_slot_description": current_slot_description,
460
461
  "user_message": latest_user_message,
461
- "is_repeat_command_enabled": self.is_repeat_command_enabled(),
462
+ "is_repeat_command_enabled": self.repeat_command_enabled,
462
463
  }
463
464
 
464
465
  return self.compile_template(self.prompt_template).render(**inputs)
@@ -111,6 +111,15 @@ slots:
111
111
  type: bool
112
112
  mappings:
113
113
  - type: from_llm
114
+ silence_timeout:
115
+ type: float
116
+ initial_value: 6.0
117
+ max_value: 1000000
118
+ consecutive_silence_timeouts:
119
+ type: float
120
+ initial_value: 0.0
121
+ max_value: 1000000
122
+
114
123
 
115
124
  flows:
116
125
  pattern_cancel_flow:
rasa/engine/validation.py CHANGED
@@ -67,11 +67,12 @@ from rasa.shared.constants import (
67
67
  ROUTER_CONFIG_KEY,
68
68
  MODELS_CONFIG_KEY,
69
69
  MODEL_GROUP_CONFIG_KEY,
70
- ROUTER_STRATEGY_CONFIG_KEY,
71
- VALID_ROUTER_STRATEGIES,
72
- ROUTER_STRATEGIES_REQUIRING_REDIS_CACHE,
73
- ROUTER_STRATEGIES_NOT_REQUIRING_CACHE,
70
+ ROUTING_STRATEGY_CONFIG_KEY,
71
+ VALID_ROUTING_STRATEGIES,
72
+ ROUTING_STRATEGIES_REQUIRING_REDIS_CACHE,
73
+ ROUTING_STRATEGIES_NOT_REQUIRING_CACHE,
74
74
  REDIS_HOST_CONFIG_KEY,
75
+ USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY,
75
76
  )
76
77
  from rasa.shared.core.constants import ACTION_RESET_ROUTING, ACTION_TRIGGER_CHITCHAT
77
78
  from rasa.shared.core.domain import Domain
@@ -1053,31 +1054,42 @@ def _validate_model_group_router_setting(
1053
1054
  if ROUTER_CONFIG_KEY not in model_group:
1054
1055
  continue
1055
1056
 
1057
+ for model_config in model_group.get(MODELS_CONFIG_KEY, []):
1058
+ if USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY in model_config:
1059
+ print_error_and_exit(
1060
+ f"You defined the '{USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY}' in "
1061
+ f"the model group '{model_group[MODEL_GROUP_ID_CONFIG_KEY]}'. This "
1062
+ f"key is not allowed in the model configuration as the router is "
1063
+ f"defined. Please remove this key from your model configuration "
1064
+ f"and update it in the '{ROUTER_CONFIG_KEY} configuration, as it "
1065
+ f"is a router level setting."
1066
+ )
1067
+
1056
1068
  router_config = model_group[ROUTER_CONFIG_KEY]
1057
- if ROUTER_STRATEGY_CONFIG_KEY in router_config:
1058
- router_strategy = router_config.get(ROUTER_STRATEGY_CONFIG_KEY)
1059
- if router_strategy and router_strategy not in VALID_ROUTER_STRATEGIES:
1069
+ if ROUTING_STRATEGY_CONFIG_KEY in router_config:
1070
+ routing_strategy = router_config.get(ROUTING_STRATEGY_CONFIG_KEY)
1071
+ if routing_strategy and routing_strategy not in VALID_ROUTING_STRATEGIES:
1060
1072
  print_error_and_exit(
1061
- f"The router strategy you defined for the model group "
1062
- f"'{model_group[MODEL_GROUP_ID_CONFIG_KEY]}' is not valid. "
1063
- f"Valid router strategies are categorized as follows:\n"
1073
+ f"The routing strategy '{routing_strategy}' you defined for the "
1074
+ f"model group '{model_group[MODEL_GROUP_ID_CONFIG_KEY]}' is not "
1075
+ f"valid. Valid routing strategies are categorized as follows:\n"
1064
1076
  f"- Strategies requiring Redis caching: "
1065
- f"{', '.join(ROUTER_STRATEGIES_REQUIRING_REDIS_CACHE)}\n"
1077
+ f"{', '.join(ROUTING_STRATEGIES_REQUIRING_REDIS_CACHE)}\n"
1066
1078
  f"- Strategies not requiring caching: "
1067
- f"{', '.join(ROUTER_STRATEGIES_NOT_REQUIRING_CACHE)}"
1079
+ f"{', '.join(ROUTING_STRATEGIES_NOT_REQUIRING_CACHE)}"
1068
1080
  )
1069
1081
  if (
1070
- router_strategy in ROUTER_STRATEGIES_REQUIRING_REDIS_CACHE
1082
+ routing_strategy in ROUTING_STRATEGIES_REQUIRING_REDIS_CACHE
1071
1083
  and REDIS_HOST_CONFIG_KEY not in router_config
1072
1084
  ):
1073
1085
  structlogger.warning(
1074
- "validation.router_strategy.redis_host_not_defined",
1086
+ "validation.routing_strategy.redis_host_not_defined",
1075
1087
  event_info=(
1076
- f"The router strategy '{router_strategy}' requires a Redis host"
1077
- f" to be defined. Without a Redis host, the system defaults to "
1078
- f"'in-memory' caching. Please add the '{REDIS_HOST_CONFIG_KEY}'"
1079
- f" to the router configuration for the model group "
1080
- f"'{model_group[MODEL_GROUP_ID_CONFIG_KEY]}'."
1088
+ f"The routing strategy '{routing_strategy}' requires a Redis "
1089
+ f"host to be defined. Without a Redis host, the system "
1090
+ f"defaults to 'in-memory' caching. Please add the "
1091
+ f"'{REDIS_HOST_CONFIG_KEY}' to the router configuration for "
1092
+ f"the model group '{model_group[MODEL_GROUP_ID_CONFIG_KEY]}'."
1081
1093
  ),
1082
1094
  )
1083
1095
 
rasa/shared/constants.py CHANGED
@@ -184,19 +184,19 @@ N_REPHRASES_CONFIG_KEY = "n"
184
184
  USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY = "use_chat_completions_endpoint"
185
185
 
186
186
  ROUTER_CONFIG_KEY = "router"
187
- ROUTER_STRATEGY_CONFIG_KEY = "router_strategy"
187
+ ROUTING_STRATEGY_CONFIG_KEY = "routing_strategy"
188
188
  REDIS_HOST_CONFIG_KEY = "redis_host"
189
- ROUTER_STRATEGIES_REQUIRING_REDIS_CACHE = [
189
+ ROUTING_STRATEGIES_REQUIRING_REDIS_CACHE = [
190
190
  "cost-based-routing",
191
191
  "usage-based-routing",
192
192
  ]
193
- ROUTER_STRATEGIES_NOT_REQUIRING_CACHE = [
193
+ ROUTING_STRATEGIES_NOT_REQUIRING_CACHE = [
194
194
  "latency-based-routing",
195
195
  "least-busy",
196
196
  "simple-shuffle",
197
197
  ]
198
- VALID_ROUTER_STRATEGIES = (
199
- ROUTER_STRATEGIES_REQUIRING_REDIS_CACHE + ROUTER_STRATEGIES_NOT_REQUIRING_CACHE
198
+ VALID_ROUTING_STRATEGIES = (
199
+ ROUTING_STRATEGIES_REQUIRING_REDIS_CACHE + ROUTING_STRATEGIES_NOT_REQUIRING_CACHE
200
200
  )
201
201
 
202
202
  MODELS_CONFIG_KEY = "models"
@@ -110,8 +110,8 @@ FLOW_SLOT_NAMES = [FLOW_HASHES_SLOT]
110
110
 
111
111
  # slots for audio timeout
112
112
  SLOT_SILENCE_TIMEOUT = "silence_timeout"
113
- SILENCE_TIMEOUT_DEFAULT_VALUE = 6.0
114
113
  SLOT_CONSECUTIVE_SILENCE_TIMEOUTS = "consecutive_silence_timeouts"
114
+ SILENCE_TIMEOUT_DEFAULT_VALUE = 6.0
115
115
  SILENCE_SLOTS = [SLOT_SILENCE_TIMEOUT, SLOT_CONSECUTIVE_SILENCE_TIMEOUTS]
116
116
  # slots for knowledge base
117
117
  SLOT_LISTED_ITEMS = "knowledge_base_listed_objects"
@@ -3,7 +3,6 @@ from __future__ import annotations
3
3
  import collections
4
4
  import copy
5
5
  import json
6
- import math
7
6
  import os
8
7
  from dataclasses import dataclass
9
8
  from functools import cached_property
@@ -58,7 +57,6 @@ from rasa.shared.core.events import SlotSet, UserUttered
58
57
  from rasa.shared.core.slots import (
59
58
  AnySlot,
60
59
  CategoricalSlot,
61
- FloatSlot,
62
60
  ListSlot,
63
61
  Slot,
64
62
  TextSlot,
@@ -1084,7 +1082,6 @@ class Domain:
1084
1082
  self._add_knowledge_base_slots()
1085
1083
  self._add_categorical_slot_default_value()
1086
1084
  self._add_session_metadata_slot()
1087
- self._add_audio_slots()
1088
1085
 
1089
1086
  def _add_categorical_slot_default_value(self) -> None:
1090
1087
  """Add a default value to all categorical slots.
@@ -1139,29 +1136,6 @@ class Domain:
1139
1136
  )
1140
1137
  )
1141
1138
 
1142
- def _add_audio_slots(self) -> None:
1143
- """Add slots relevant for audio channels."""
1144
- self.slots.append(
1145
- FloatSlot(
1146
- rasa.shared.core.constants.SLOT_SILENCE_TIMEOUT,
1147
- mappings=[],
1148
- influence_conversation=False,
1149
- is_builtin=True,
1150
- initial_value=rasa.shared.core.constants.SILENCE_TIMEOUT_DEFAULT_VALUE,
1151
- max_value=math.inf,
1152
- )
1153
- )
1154
- self.slots.append(
1155
- FloatSlot(
1156
- rasa.shared.core.constants.SLOT_CONSECUTIVE_SILENCE_TIMEOUTS,
1157
- mappings=[],
1158
- influence_conversation=False,
1159
- is_builtin=True,
1160
- initial_value=0.0,
1161
- max_value=math.inf,
1162
- )
1163
- )
1164
-
1165
1139
  def _add_knowledge_base_slots(self) -> None:
1166
1140
  """Add slots for the knowledge base action to slots.
1167
1141
 
@@ -14,6 +14,7 @@ from rasa.shared.constants import (
14
14
  API_TYPE_CONFIG_KEY,
15
15
  MODEL_CONFIG_KEY,
16
16
  MODEL_LIST_KEY,
17
+ USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY,
17
18
  )
18
19
  from rasa.shared.providers._configs.model_group_config import (
19
20
  ModelGroupConfig,
@@ -29,6 +30,7 @@ _LITELLM_UNSUPPORTED_KEYS = [
29
30
  PROVIDER_CONFIG_KEY,
30
31
  DEPLOYMENT_CONFIG_KEY,
31
32
  API_TYPE_CONFIG_KEY,
33
+ USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY,
32
34
  ]
33
35
 
34
36
 
@@ -84,6 +86,7 @@ class LiteLLMRouterClientConfig:
84
86
 
85
87
  _model_group_config: ModelGroupConfig
86
88
  router: Dict[str, Any]
89
+ _use_chat_completions_endpoint: bool = True
87
90
  extra_parameters: dict = field(default_factory=dict)
88
91
 
89
92
  @property
@@ -98,6 +101,14 @@ class LiteLLMRouterClientConfig:
98
101
  def litellm_model_list(self) -> List[Dict[str, Any]]:
99
102
  return self._convert_models_to_litellm_model_list()
100
103
 
104
+ @property
105
+ def litellm_router_settings(self) -> Dict[str, Any]:
106
+ return self._convert_router_to_litellm_router_settings()
107
+
108
+ @property
109
+ def use_chat_completions_endpoint(self) -> bool:
110
+ return self._use_chat_completions_endpoint
111
+
101
112
  def __post_init__(self) -> None:
102
113
  if not self.router:
103
114
  message = "Router cannot be empty."
@@ -121,7 +132,6 @@ class LiteLLMRouterClientConfig:
121
132
  Returns:
122
133
  LiteLLMRouterClientConfig
123
134
  """
124
-
125
135
  model_group_config = ModelGroupConfig.from_dict(config)
126
136
 
127
137
  # Copy config to avoid mutating the original
@@ -130,13 +140,18 @@ class LiteLLMRouterClientConfig:
130
140
  config_copy.pop(MODEL_GROUP_ID_CONFIG_KEY, None)
131
141
  config_copy.pop(MODELS_CONFIG_KEY, None)
132
142
  # Get the router settings
133
- router_settings = config_copy.pop(ROUTER_CONFIG_KEY, None)
143
+ router_settings = config_copy.pop(ROUTER_CONFIG_KEY, {})
144
+ # Get the use_chat_completions_endpoint setting
145
+ use_chat_completions_endpoint = router_settings.get(
146
+ USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY, True
147
+ )
134
148
  # The rest is considered as extra parameters
135
149
  extra_parameters = config_copy
136
150
 
137
151
  this = LiteLLMRouterClientConfig(
138
152
  _model_group_config=model_group_config,
139
153
  router=router_settings,
154
+ _use_chat_completions_endpoint=use_chat_completions_endpoint,
140
155
  extra_parameters=extra_parameters,
141
156
  )
142
157
  return this
@@ -150,14 +165,17 @@ class LiteLLMRouterClientConfig:
150
165
  return d
151
166
 
152
167
  def to_litellm_dict(self) -> dict:
153
- litellm_model_list = self._convert_models_to_litellm_model_list()
154
- d = {
168
+ return {
155
169
  **self.extra_parameters,
156
170
  MODEL_GROUP_ID_CONFIG_KEY: self.model_group_id,
157
- MODEL_LIST_KEY: litellm_model_list,
158
- ROUTER_CONFIG_KEY: self.router,
171
+ MODEL_LIST_KEY: self._convert_models_to_litellm_model_list(),
172
+ ROUTER_CONFIG_KEY: self._convert_router_to_litellm_router_settings(),
159
173
  }
160
- return d
174
+
175
+ def _convert_router_to_litellm_router_settings(self) -> Dict[str, Any]:
176
+ _router_settings_copy = copy.deepcopy(self.router)
177
+ _router_settings_copy.pop(USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY, None)
178
+ return _router_settings_copy
161
179
 
162
180
  def _convert_models_to_litellm_model_list(self) -> List[Dict[str, Any]]:
163
181
  litellm_model_list = []
@@ -172,7 +190,7 @@ class LiteLLMRouterClientConfig:
172
190
  prefix = get_prefix_from_provider(provider)
173
191
 
174
192
  # Determine whether to use model or deployment key based on the provider.
175
- litellm_model_name_without_prefix = (
193
+ litellm_model_name = (
176
194
  litellm_model_config[DEPLOYMENT_CONFIG_KEY]
177
195
  if provider in DEPLOYMENT_CENTRIC_PROVIDERS
178
196
  else litellm_model_config[MODEL_CONFIG_KEY]
@@ -180,7 +198,9 @@ class LiteLLMRouterClientConfig:
180
198
 
181
199
  # Set 'model' to a provider prefixed model name e.g. openai/gpt-4
182
200
  litellm_model_config[MODEL_CONFIG_KEY] = (
183
- f"{prefix}/{litellm_model_name_without_prefix}"
201
+ litellm_model_name
202
+ if f"{prefix}/" in litellm_model_name
203
+ else f"{prefix}/{litellm_model_name}"
184
204
  )
185
205
 
186
206
  # Remove parameters that are None and not supported by LiteLLM.
@@ -72,7 +72,7 @@ class LiteLLMRouterEmbeddingClient(
72
72
  return cls(
73
73
  model_group_id=client_config.model_group_id,
74
74
  model_configurations=client_config.litellm_model_list,
75
- router_settings=client_config.router,
75
+ router_settings=client_config.litellm_router_settings,
76
76
  **client_config.extra_parameters,
77
77
  )
78
78
 
@@ -221,6 +221,32 @@ class _BaseLiteLLMClient:
221
221
  )
222
222
  return formatted_response
223
223
 
224
+ def _format_text_completion_response(self, response: Any) -> LLMResponse:
225
+ """Parses the LiteLLM text completion response to Rasa format."""
226
+ formatted_response = LLMResponse(
227
+ id=response.id,
228
+ created=response.created,
229
+ choices=[choice.text for choice in response.choices],
230
+ model=response.model,
231
+ )
232
+ if (usage := response.usage) is not None:
233
+ prompt_tokens = (
234
+ num_tokens
235
+ if isinstance(num_tokens := usage.prompt_tokens, (int, float))
236
+ else 0
237
+ )
238
+ completion_tokens = (
239
+ num_tokens
240
+ if isinstance(num_tokens := usage.completion_tokens, (int, float))
241
+ else 0
242
+ )
243
+ formatted_response.usage = LLMUsage(prompt_tokens, completion_tokens)
244
+ structlogger.debug(
245
+ "base_litellm_client.formatted_response",
246
+ formatted_response=formatted_response.to_dict(),
247
+ )
248
+ return formatted_response
249
+
224
250
  @staticmethod
225
251
  def _ensure_certificates() -> None:
226
252
  """Configures SSL certificates for LiteLLM. This method is invoked during
@@ -68,15 +68,61 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
68
68
  return cls(
69
69
  model_group_id=client_config.model_group_id,
70
70
  model_configurations=client_config.litellm_model_list,
71
- router_settings=client_config.router,
71
+ router_settings=client_config.litellm_router_settings,
72
+ use_chat_completions_endpoint=client_config.use_chat_completions_endpoint,
72
73
  **client_config.extra_parameters,
73
74
  )
74
75
 
76
+ @suppress_logs(log_level=logging.WARNING)
77
+ def _text_completion(self, prompt: Union[List[str], str]) -> LLMResponse:
78
+ """
79
+ Synchronously generate completions for given prompt.
80
+
81
+ Args:
82
+ prompt: Prompt to generate the completion for.
83
+ Returns:
84
+ List of message completions.
85
+ Raises:
86
+ ProviderClientAPIException: If the API request fails.
87
+ """
88
+ try:
89
+ response = self.router_client.text_completion(
90
+ prompt=prompt, **self._completion_fn_args
91
+ )
92
+ return self._format_text_completion_response(response)
93
+ except Exception as e:
94
+ raise ProviderClientAPIException(e)
95
+
96
+ @suppress_logs(log_level=logging.WARNING)
97
+ async def _atext_completion(self, prompt: Union[List[str], str]) -> LLMResponse:
98
+ """
99
+ Asynchronously generate completions for given prompt.
100
+
101
+ Args:
102
+ prompt: Prompt to generate the completion for.
103
+ Returns:
104
+ List of message completions.
105
+ Raises:
106
+ ProviderClientAPIException: If the API request fails.
107
+ """
108
+ try:
109
+ response = await self.router_client.atext_completion(
110
+ prompt=prompt, **self._completion_fn_args
111
+ )
112
+ return self._format_text_completion_response(response)
113
+ except Exception as e:
114
+ raise ProviderClientAPIException(e)
115
+
75
116
  @suppress_logs(log_level=logging.WARNING)
76
117
  def completion(self, messages: Union[List[str], str]) -> LLMResponse:
77
118
  """
78
119
  Synchronously generate completions for given list of messages.
79
120
 
121
+ Method overrides the base class method to call the appropriate
122
+ completion method based on the configuration. If the chat completions
123
+ endpoint is enabled, the completion method is called. Otherwise, the
124
+ text_completion method is called.
125
+
80
126
  Args:
81
127
  messages: List of messages or a single message to generate the
82
128
  completion for.
@@ -85,6 +131,8 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
85
131
  Raises:
86
132
  ProviderClientAPIException: If the API request fails.
87
133
  """
134
+ if not self._use_chat_completions_endpoint:
135
+ return self._text_completion(messages)
88
136
  try:
89
137
  formatted_messages = self._format_messages(messages)
90
138
  response = self.router_client.completion(
@@ -99,6 +147,11 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
99
147
  """
100
148
  Asynchronously generate completions for given list of messages.
101
149
 
150
+ Method overrides the base class method to call the appropriate
151
+ completion method based on the configuration. If the chat completions
152
+ endpoint is enabled, the completion method is called. Otherwise, the
153
+ text_completion method is called.
154
+
102
155
  Args:
103
156
  messages: List of messages or a single message to generate the
104
157
  completion for.
@@ -107,6 +160,8 @@ class LiteLLMRouterLLMClient(_BaseLiteLLMRouterClient, _BaseLiteLLMClient):
107
160
  Raises:
108
161
  ProviderClientAPIException: If the API request fails.
109
162
  """
163
+ if not self._use_chat_completions_endpoint:
164
+ return await self._atext_completion(messages)
110
165
  try:
111
166
  formatted_messages = self._format_messages(messages)
112
167
  response = await self.router_client.acompletion(
@@ -10,13 +10,14 @@ import structlog
10
10
  from rasa.shared.constants import (
11
11
  SELF_HOSTED_VLLM_PREFIX,
12
12
  SELF_HOSTED_VLLM_API_KEY_ENV_VAR,
13
+ API_KEY,
13
14
  )
14
15
  from rasa.shared.providers._configs.self_hosted_llm_client_config import (
15
16
  SelfHostedLLMClientConfig,
16
17
  )
17
18
  from rasa.shared.exceptions import ProviderClientAPIException
18
19
  from rasa.shared.providers.llm._base_litellm_client import _BaseLiteLLMClient
19
- from rasa.shared.providers.llm.llm_response import LLMResponse, LLMUsage
20
+ from rasa.shared.providers.llm.llm_response import LLMResponse
20
21
  from rasa.shared.utils.io import suppress_logs
21
22
 
22
23
  structlogger = structlog.get_logger()
@@ -61,7 +62,8 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
61
62
  self._api_version = api_version
62
63
  self._use_chat_completions_endpoint = use_chat_completions_endpoint
63
64
  self._extra_parameters = kwargs or {}
64
- self._apply_dummy_api_key_if_missing()
65
+ if self._extra_parameters.get(API_KEY) is None:
66
+ self._apply_dummy_api_key_if_missing()
65
67
 
66
68
  @classmethod
67
69
  def from_config(cls, config: Dict[str, Any]) -> "SelfHostedLLMClient":
@@ -259,32 +261,6 @@ class SelfHostedLLMClient(_BaseLiteLLMClient):
259
261
  return super().completion(messages)
260
262
  return self._text_completion(messages)
261
263
 
262
- def _format_text_completion_response(self, response: Any) -> LLMResponse:
263
- """Parses the LiteLLM text completion response to Rasa format."""
264
- formatted_response = LLMResponse(
265
- id=response.id,
266
- created=response.created,
267
- choices=[choice.text for choice in response.choices],
268
- model=response.model,
269
- )
270
- if (usage := response.usage) is not None:
271
- prompt_tokens = (
272
- num_tokens
273
- if isinstance(num_tokens := usage.prompt_tokens, (int, float))
274
- else 0
275
- )
276
- completion_tokens = (
277
- num_tokens
278
- if isinstance(num_tokens := usage.completion_tokens, (int, float))
279
- else 0
280
- )
281
- formatted_response.usage = LLMUsage(prompt_tokens, completion_tokens)
282
- structlogger.debug(
283
- "base_litellm_client.formatted_response",
284
- formatted_response=formatted_response.to_dict(),
285
- )
286
- return formatted_response
287
-
288
264
  @staticmethod
289
265
  def _apply_dummy_api_key_if_missing() -> None:
290
266
  if not os.getenv(SELF_HOSTED_VLLM_API_KEY_ENV_VAR):
@@ -1,4 +1,5 @@
1
1
  from typing import Any, Dict, List
2
+ import os
2
3
  import structlog
3
4
 
4
5
  from litellm import Router
@@ -7,6 +8,12 @@ from rasa.shared.constants import (
7
8
  MODEL_LIST_KEY,
8
9
  MODEL_GROUP_ID_CONFIG_KEY,
9
10
  ROUTER_CONFIG_KEY,
11
+ SELF_HOSTED_VLLM_PREFIX,
12
+ SELF_HOSTED_VLLM_API_KEY_ENV_VAR,
13
+ LITELLM_PARAMS_KEY,
14
+ API_KEY,
15
+ MODEL_CONFIG_KEY,
16
+ USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY,
10
17
  )
11
18
  from rasa.shared.exceptions import ProviderClientValidationError
12
19
  from rasa.shared.providers._configs.litellm_router_client_config import (
@@ -42,12 +49,15 @@ class _BaseLiteLLMRouterClient:
42
49
  model_group_id: str,
43
50
  model_configurations: List[Dict[str, Any]],
44
51
  router_settings: Dict[str, Any],
52
+ use_chat_completions_endpoint: bool = True,
45
53
  **kwargs: Any,
46
54
  ):
47
55
  self._model_group_id = model_group_id
48
56
  self._model_configurations = model_configurations
49
57
  self._router_settings = router_settings
58
+ self._use_chat_completions_endpoint = use_chat_completions_endpoint
50
59
  self._extra_parameters = kwargs or {}
60
+ self.additional_client_setup()
51
61
  try:
52
62
  resolved_model_configurations = (
53
63
  self._resolve_env_vars_in_model_configurations()
@@ -67,6 +77,21 @@ class _BaseLiteLLMRouterClient:
67
77
  )
68
78
  raise ProviderClientValidationError(f"{event_info} Original error: {e}")
69
79
 
80
+ def additional_client_setup(self) -> None:
81
+ """Additional setup for the LiteLLM Router client."""
82
+ # If the model configuration is self-hosted VLLM, set a dummy API key if not
83
+ # provided. A bug in the LiteLLM library requires an API key to be set even if
84
+ # it is not required.
85
+ for model_configuration in self.model_configurations:
86
+ if (
87
+ f"{SELF_HOSTED_VLLM_PREFIX}/"
88
+ in model_configuration[LITELLM_PARAMS_KEY][MODEL_CONFIG_KEY]
89
+ and API_KEY not in model_configuration[LITELLM_PARAMS_KEY]
90
+ and not os.getenv(SELF_HOSTED_VLLM_API_KEY_ENV_VAR)
91
+ ):
92
+ os.environ[SELF_HOSTED_VLLM_API_KEY_ENV_VAR] = "dummy api key"
93
+ return
94
+
70
95
  @classmethod
71
96
  def from_config(cls, config: Dict[str, Any]) -> "_BaseLiteLLMRouterClient":
72
97
  """Instantiates a LiteLLM Router Embedding client from a configuration dict.
@@ -95,7 +120,8 @@ class _BaseLiteLLMRouterClient:
95
120
  return cls(
96
121
  model_group_id=client_config.model_group_id,
97
122
  model_configurations=client_config.litellm_model_list,
98
- router_settings=client_config.router,
123
+ router_settings=client_config.litellm_router_settings,
124
+ use_chat_completions_endpoint=client_config.use_chat_completions_endpoint,
99
125
  **client_config.extra_parameters,
100
126
  )
101
127
 
@@ -119,6 +145,11 @@ class _BaseLiteLLMRouterClient:
119
145
  """Returns the instantiated LiteLLM Router client."""
120
146
  return self._router_client
121
147
 
148
+ @property
149
+ def use_chat_completions_endpoint(self) -> bool:
150
+ """Returns whether to use the chat completions endpoint."""
151
+ return self._use_chat_completions_endpoint
152
+
122
153
  @property
123
154
  def _litellm_extra_parameters(self) -> Dict[str, Any]:
124
155
  """
@@ -136,6 +167,9 @@ class _BaseLiteLLMRouterClient:
136
167
  MODEL_GROUP_ID_CONFIG_KEY: self.model_group_id,
137
168
  MODEL_LIST_KEY: self.model_configurations,
138
169
  ROUTER_CONFIG_KEY: self.router_settings,
170
+ USE_CHAT_COMPLETIONS_ENDPOINT_CONFIG_KEY: (
171
+ self.use_chat_completions_endpoint
172
+ ),
139
173
  **self._litellm_extra_parameters,
140
174
  }
141
175
 
@@ -193,7 +193,7 @@ def mark_as_experimental_feature(feature_name: Text) -> None:
193
193
  def mark_as_beta_feature(feature_name: Text) -> None:
194
194
  """Warns users that they are using a beta feature."""
195
195
  logger.warning(
196
- f"🔬 Beta Feature: {feature_name} is in beta. It may have unexpected"
196
+ f"🔬 Beta Feature: {feature_name} is in beta. It may have unexpected "
197
197
  "behaviour and might be changed in the future."
198
198
  )
199
199
 
rasa/telemetry.py CHANGED
@@ -35,9 +35,9 @@ from rasa.constants import (
35
35
  from rasa.shared.constants import (
36
36
  PROMPT_CONFIG_KEY,
37
37
  PROMPT_TEMPLATE_CONFIG_KEY,
38
- MODEL_GROUP_CONFIG_KEY,
39
38
  LLM_API_HEALTH_CHECK_ENV_VAR,
40
39
  LLM_API_HEALTH_CHECK_DEFAULT_VALUE,
40
+ MODEL_GROUP_CONFIG_KEY,
41
41
  )
42
42
  from rasa.engine.storage.local_model_storage import LocalModelStorage
43
43
  from rasa.shared.constants import DOCS_URL_TELEMETRY, UTTER_ASK_PREFIX
@@ -1133,6 +1133,7 @@ def _get_llm_command_generator_config(config: Dict[str, Any]) -> Optional[Dict]:
1133
1133
  def extract_llm_command_generator_llm_client_settings(component: Dict) -> Dict:
1134
1134
  """Extracts settings related to LLM command generator."""
1135
1135
  llm_config = component.get(LLM_CONFIG_KEY, {})
1136
+ # Config at this stage is not yet resolved, so read from `model_group`
1136
1137
  llm_model_group_id = llm_config.get(MODEL_GROUP_CONFIG_KEY)
1137
1138
  llm_model_name = llm_config.get(MODEL_CONFIG_KEY) or llm_config.get(
1138
1139
  MODEL_NAME_CONFIG_KEY
@@ -1174,6 +1175,7 @@ def _get_llm_command_generator_config(config: Dict[str, Any]) -> Optional[Dict]:
1174
1175
  if flow_retrieval_enabled
1175
1176
  else None
1176
1177
  )
1178
+ # Config at this stage is not yet resolved, so read from `model_group`
1177
1179
  flow_retrieval_embedding_model_group_id = embeddings_config.get(
1178
1180
  MODEL_GROUP_CONFIG_KEY
1179
1181
  )
rasa/tracing/config.py CHANGED
@@ -131,7 +131,7 @@ def get_tracer_provider(endpoints_file: Text) -> Optional[TracerProvider]:
131
131
 
132
132
  if not cfg:
133
133
  logger.info(
134
- f"No endpoint for tracing type available in {endpoints_file},"
134
+ f"No endpoint for tracing type available in {endpoints_file}, "
135
135
  f"tracing will not be configured."
136
136
  )
137
137
  return None
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.11.0rc2"
3
+ __version__ = "3.11.0rc3"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: rasa-pro
3
- Version: 3.11.0rc2
3
+ Version: 3.11.0rc3
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Home-page: https://rasa.com
6
6
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
@@ -123,7 +123,7 @@ rasa/core/channels/botframework.py,sha256=xyc_n7DJ3uglqvkr0IrQ3xxPWgvaqSOLHWx9BU
123
123
  rasa/core/channels/callback.py,sha256=4LpjtJgQMAAXHwZrcVlVEUdpDTRqTe6n7XtwCusa75U,2750
124
124
  rasa/core/channels/channel.py,sha256=0cicx4SZsm0icCSO-F-e-Qk5W08ef11ozZRSrLfFPto,15107
125
125
  rasa/core/channels/console.py,sha256=fYhkSY8a_pn09ssjTczsKTALinABogpFJzzWTnL7MP8,8076
126
- rasa/core/channels/development_inspector.py,sha256=Yfg0fkSt-KTUwWd34P1O6KghKRfHJ3GITMRaDha-NJs,6775
126
+ rasa/core/channels/development_inspector.py,sha256=WWARmnKBAe6fc63P_qbzlHli9RjMhXG8OFqEqlxuqec,6842
127
127
  rasa/core/channels/facebook.py,sha256=ub8DCnTPe3_EyYtdYE49mo2Y-UNpURj6Qx9590oadeM,15816
128
128
  rasa/core/channels/hangouts.py,sha256=GjTmiVvE_OJ7Ig1-j2Aax95Bp1RFL-TUW80rnNcxxY0,11562
129
129
  rasa/core/channels/inspector/.eslintrc.cjs,sha256=MXLV2wxhPZqg3wvFlyi1fM363_7XxtWsB87RqWN4gzY,580
@@ -259,7 +259,7 @@ rasa/core/channels/telegram.py,sha256=5BrNECFM3qe9XjNpDb8Q9fbqCT5aKr5L6IH21W8sum
259
259
  rasa/core/channels/twilio.py,sha256=GsdjfplZdBj0fRB60bSggPF1DXFZ_x18V_dlcDy5VFs,5943
260
260
  rasa/core/channels/vier_cvg.py,sha256=PfvSluQqgJbP0JzZPFUvum3z7H55JPPeobcD-z5zCkw,13544
261
261
  rasa/core/channels/voice_ready/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
262
- rasa/core/channels/voice_ready/audiocodes.py,sha256=fhy2cYMsb0U26yNbt65MsMZbuwiwZQz38esCXscjAUQ,18801
262
+ rasa/core/channels/voice_ready/audiocodes.py,sha256=R-Fu_yrAYMA4Yvfhqd32u72_lk9w6Fm5NAiQCmpUMJY,18742
263
263
  rasa/core/channels/voice_ready/jambonz.py,sha256=Xks2sHoX6DANQHJdYciMkBxqzOE7qPqwgXWoiA1Y0DE,4154
264
264
  rasa/core/channels/voice_ready/jambonz_protocol.py,sha256=cVbp1wpAzl3c-CR_QEcGWrLROEhJKzRB68AXtf7DRQE,12998
265
265
  rasa/core/channels/voice_ready/twilio_voice.py,sha256=FcNHuJoNm4175YAgtURLUDFz92nXsrnZOZcnpK7PLR0,14519
@@ -267,9 +267,9 @@ rasa/core/channels/voice_ready/utils.py,sha256=e46Qo4v2KOvpHizS1WGT4olAsYKmWl-fi
267
267
  rasa/core/channels/voice_stream/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
268
268
  rasa/core/channels/voice_stream/asr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
269
269
  rasa/core/channels/voice_stream/asr/asr_engine.py,sha256=D2sAW4orUzkh2fWGMA8gsTT6ilzNM33l7FPLuWacNFk,2694
270
- rasa/core/channels/voice_stream/asr/asr_event.py,sha256=G36QVDnGoRjkoSUVd4MWVsOCzscm-xUqU24GZY9q7co,251
271
- rasa/core/channels/voice_stream/asr/azure.py,sha256=3gkB0HqgEEsJN-C-QEiYby_sJbpv9I1Sj4UDQ3bvans,4876
272
- rasa/core/channels/voice_stream/asr/deepgram.py,sha256=9-Disu-ZpI5B3-Cu8a9gkuXchmhIhpIoM2T8z3Pgjo8,3309
270
+ rasa/core/channels/voice_stream/asr/asr_event.py,sha256=QDn8OdeQ-7uBedT6Eqvs8wyR5T4UNHQ32TSMPaRHXwQ,246
271
+ rasa/core/channels/voice_stream/asr/azure.py,sha256=empUOd9J-mzPLZOJolrlwRrp0VaZcZWO27b5ZFLzLZE,4818
272
+ rasa/core/channels/voice_stream/asr/deepgram.py,sha256=T_0_rrwy2oqwa8SW1X31KVFjeEykv5zjGYBjEATQBYc,3402
273
273
  rasa/core/channels/voice_stream/audio_bytes.py,sha256=3V0QQplPD-kVfebaaeVcKgV7pwIJyjnTenujVD3y3sY,340
274
274
  rasa/core/channels/voice_stream/browser_audio.py,sha256=4gIaz0rnKZfpuAuP17Vlqhbp0zHZFaA7t1oJJGC1Axo,3764
275
275
  rasa/core/channels/voice_stream/call_state.py,sha256=8mveygG0YTSXEzh2j6opgFnCYKuLKCj66ZIdZA6RCLU,764
@@ -280,7 +280,7 @@ rasa/core/channels/voice_stream/tts/tts_cache.py,sha256=dKYEMkIVuT2n4pJ-JMI2n1di
280
280
  rasa/core/channels/voice_stream/tts/tts_engine.py,sha256=hlVaA3SkRTUWF2DDUFNeMyQcHL5i3ubISrSuXUOzs6Y,1493
281
281
  rasa/core/channels/voice_stream/twilio_media_streams.py,sha256=UIvJYPw6AGUmOLX7aqZIE1mzESo6grcL8f_veIpoq1s,6339
282
282
  rasa/core/channels/voice_stream/util.py,sha256=Bwap0JCoFVOyMmWuD3uRnPUf7lnHRaQEfJF1-UoqxrA,1987
283
- rasa/core/channels/voice_stream/voice_channel.py,sha256=jFVi7HHEyAqtIjAUQOBBZs3SAkugbiwkeD6aeLfh4q0,15356
283
+ rasa/core/channels/voice_stream/voice_channel.py,sha256=LUNHAjEvRdvA4xpnfoAZkjYKLCO0iJwSCd8JDU_c7wA,16435
284
284
  rasa/core/channels/webexteams.py,sha256=yEdONmgIhZBV4TEOM5xwXEmuqP_RvFmQbHCw1JB2Y-0,4843
285
285
  rasa/core/concurrent_lock_store.py,sha256=ycd-aeJJWXIokMRimCdQFHdwuMfl512hZSUHE8oSd2c,7722
286
286
  rasa/core/constants.py,sha256=TqXFvenIWNf_7D0U462q0ltRD4MKzUGXiTWjHvxpFPs,3974
@@ -307,7 +307,7 @@ rasa/core/lock_store.py,sha256=fgdufUYXHEiTcD7NCCqgDAQRRtt7jrKafENHqFKOyi0,12504
307
307
  rasa/core/migrate.py,sha256=XNeYdiRytBmBNubOQ8KZOT_wR1o9aOpHHfBU9PCB2eg,14626
308
308
  rasa/core/nlg/__init__.py,sha256=0eQOZ0fB35b18oVhRFczcH30jJHgO8WXFhnbXGOxJek,240
309
309
  rasa/core/nlg/callback.py,sha256=rFkDe7CSAETASRefpERUT6-DHWPs0UXhx8x4tZ1QE0M,5238
310
- rasa/core/nlg/contextual_response_rephraser.py,sha256=mioOt9Yzy5SyPXugGWcaPfn-tz7Oz2RLOrPgC1c04XE,11049
310
+ rasa/core/nlg/contextual_response_rephraser.py,sha256=RqYig6NFnaXcW5vkAUSb54XWoBkeVWm2WYDCsafthBY,11055
311
311
  rasa/core/nlg/generator.py,sha256=YZ_rh--MeyzA6oXRqr_Ng-jcmPgbCmWMJJrquPmo__8,8436
312
312
  rasa/core/nlg/interpolator.py,sha256=Dc-J2Vf6vPPUbwIgZQm3AJDGvMaFTsh9Citd4CYuA9U,5189
313
313
  rasa/core/nlg/response.py,sha256=aHpy9BgjO7ub6v-sVPiQqutUA_7-UD1l3DJGVeQyp4k,5888
@@ -315,7 +315,7 @@ rasa/core/nlg/summarize.py,sha256=JO6VCfM_RnU0QX8Us42YkNOxC0ESKV1xcVH_sCW27ZU,21
315
315
  rasa/core/persistor.py,sha256=T5H03wHiJhCS8ybgUI1FpF0CfyLp7nU__vXIzNbxpS0,18368
316
316
  rasa/core/policies/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
317
317
  rasa/core/policies/ensemble.py,sha256=AjNOEy2Iubbe-LdKaoFUXG8ch6yPrg3bTvcTcAPmeOs,12959
318
- rasa/core/policies/enterprise_search_policy.py,sha256=fjxgJq_m2XWEUpzjwH56BZ1X5FfbOKNejMGd7yWXtcc,34111
318
+ rasa/core/policies/enterprise_search_policy.py,sha256=nG1vgZO5woxvXCZWayYXQzZkmxPemfsL0c62QkZcgcI,34126
319
319
  rasa/core/policies/enterprise_search_prompt_template.jinja2,sha256=dCS_seyBGxMQoMsOjjvPp0dd31OSzZCJSZeev1FJK5Q,1187
320
320
  rasa/core/policies/enterprise_search_prompt_with_citation_template.jinja2,sha256=vRQBs3q13UmvRRgqA8-DmRtM7tqZP2ngwMVJ4gy7lE0,3302
321
321
  rasa/core/policies/flow_policy.py,sha256=wGb1l_59cGM9ZaexSIK5uXFi618739oNfLOxx2FC0_Y,7490
@@ -323,7 +323,7 @@ rasa/core/policies/flows/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZ
323
323
  rasa/core/policies/flows/flow_exceptions.py,sha256=_FQuN-cerQDM1pivce9bz4zylh5UYkljvYS1gjDukHI,1527
324
324
  rasa/core/policies/flows/flow_executor.py,sha256=CdfSdl3v4JvOmnRY58VgzVH46aG6khDnwZAyGvothH0,25707
325
325
  rasa/core/policies/flows/flow_step_result.py,sha256=agjPrD6lahGSe2ViO5peBeoMdI9ngVGRSgtytgxmJmg,1360
326
- rasa/core/policies/intentless_policy.py,sha256=fqFME9Oy0fJEdYDEhrIOYpi0Nkun78NPdxeAExL6gJI,37861
326
+ rasa/core/policies/intentless_policy.py,sha256=-4RfqmY6iVUUr-p2JnzauW3cx1Tiu-EXCMOkOjMc5qY,37936
327
327
  rasa/core/policies/intentless_prompt_template.jinja2,sha256=KhIL3cruMmkxhrs5oVbqgSvK6ZiN_6TQ_jXrgtEB-ZY,677
328
328
  rasa/core/policies/memoization.py,sha256=XoRxUdYUGRfO47tAEyc5k5pUgt38a4fipO336EU5Vdc,19466
329
329
  rasa/core/policies/policy.py,sha256=HeVtIaV0dA1QcAG3vjdn-4g7-oUEJPL4u01ETJt78YA,27464
@@ -392,7 +392,7 @@ rasa/dialogue_understanding/generator/multi_step/multi_step_llm_command_generato
392
392
  rasa/dialogue_understanding/generator/nlu_command_adapter.py,sha256=_mltSp6JzB6tYmhxjTVxyjOzorO7-Poj2nLpHQrDsQs,9202
393
393
  rasa/dialogue_understanding/generator/single_step/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
394
394
  rasa/dialogue_understanding/generator/single_step/command_prompt_template.jinja2,sha256=nMayu-heJYH1QmcL1cFmXb8SeiJzfdDR_9Oy5IRUXsM,3937
395
- rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=cMFOdJM1CBCdtHNTgv1gChXkAEbMGl0rrCptZDgIyLg,18102
395
+ rasa/dialogue_understanding/generator/single_step/single_step_llm_command_generator.py,sha256=R3jtKevzzq0un9WRHYCOrWWViGkAEun-XMhW6qg8ExU,18168
396
396
  rasa/dialogue_understanding/patterns/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
397
397
  rasa/dialogue_understanding/patterns/cancel.py,sha256=IQ4GVHNnNCqwKRLlAqBtLsgolcbPPnHsHdb3aOAFhEs,3868
398
398
  rasa/dialogue_understanding/patterns/cannot_handle.py,sha256=pg0zJHl-hDBnl6y9IyxZzW57yuMdfD8xI8eiK6EVrG8,1406
@@ -403,7 +403,7 @@ rasa/dialogue_understanding/patterns/collect_information.py,sha256=nfzAtvjIyP67C
403
403
  rasa/dialogue_understanding/patterns/completed.py,sha256=NqVaS_5-62UetGRXjR1eOGG3o6EPaIAQxbbkkNVEa9s,1278
404
404
  rasa/dialogue_understanding/patterns/continue_interrupted.py,sha256=4UCFxCReL1wR8ALU4B0LzpC9izdtKrp2nJ6chS_jVng,1355
405
405
  rasa/dialogue_understanding/patterns/correction.py,sha256=ZfSGzvgLvmbebEuisYP0Ke0lQEZziuDvq1oB4wMSFr4,11001
406
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml,sha256=6OGUtShlujrWbRvB8GS5gzRz9Sm7BgYFhGHAxGJIqwc,9662
406
+ rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml,sha256=FZ-GFPtJriAIFvVX3wV9BBeEI9AS-VoSLIaa9jNoWl4,9838
407
407
  rasa/dialogue_understanding/patterns/human_handoff.py,sha256=ocGrnLLRW-_aiXjmSUG9nReUGQbBUxFFt4FTBWSXARM,1132
408
408
  rasa/dialogue_understanding/patterns/internal_error.py,sha256=L8kEC6-TOZecugWtA2dm8lNG9gUevffyT5RotWeJIzM,1608
409
409
  rasa/dialogue_understanding/patterns/repeat.py,sha256=K7Ok3DGsB2mivA2AwaKfmDyagupSSySOo4oARx2eXm8,1152
@@ -468,7 +468,7 @@ rasa/engine/training/components.py,sha256=ZOSTbPEHth545q41B9geXKdEtIYZ3PaZdwSXrA
468
468
  rasa/engine/training/fingerprinting.py,sha256=lY4wHte37470MR6sBaERt0WT9NF06NUGTX9bRAh-W_4,2006
469
469
  rasa/engine/training/graph_trainer.py,sha256=fCnFZAv7UNxFjaLRY0MxPd18d3mO9It4Uk1Joq7Q3Mc,10636
470
470
  rasa/engine/training/hooks.py,sha256=u7HQXDJJT4pBzQUaIIfuM3YEreGjRdp0IEv6XUrRFtk,5469
471
- rasa/engine/validation.py,sha256=f9K2rI62mNuXMcwMpklc-2IUogs1hjCWN5S9mj5bRx8,46677
471
+ rasa/engine/validation.py,sha256=LK2WHGo1ywawQy9H9skl91EcyEK64tnpzPKvowpitbE,47448
472
472
  rasa/env.py,sha256=zLzQMkATVIZj6s4C7RsLLOLT8g6-Q96m5iBaHW_mEA8,480
473
473
  rasa/exceptions.py,sha256=acZiGDb5zC1ZGv1oBPHImBeRKxyHOA_mW6N8e9nOEaU,2116
474
474
  rasa/graph_components/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -579,12 +579,12 @@ rasa/nlu/utils/spacy_utils.py,sha256=pBvsCVKVuZ3b2Pjn-XuOVZ6lzZu9Voc2R4N1VczwtCM
579
579
  rasa/plugin.py,sha256=H_OZcHy_U3eAK-JHr43TSxcPqS0JEGcZkFvmumeeJEs,2670
580
580
  rasa/server.py,sha256=eLRWFPoJrdc9_eNu0pUj9p52O8MR28zIm4ZP9_MWiH0,57899
581
581
  rasa/shared/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
582
- rasa/shared/constants.py,sha256=hGKUV_s6mGzGSmJzRIZ00VCz27-IHoe0Vdq0c-a5qLM,10365
582
+ rasa/shared/constants.py,sha256=nWWQXcSLbSfmLeSHQT0Uf2Q8xF7bfgJ4mnUzV5njB6s,10372
583
583
  rasa/shared/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
584
584
  rasa/shared/core/command_payload_reader.py,sha256=Vhiop9LWFawaEruRifBBrVmoEJ-fj1Tli1wBvsYu2_I,3563
585
- rasa/shared/core/constants.py,sha256=3nzRgBr8FprxIzJBroV-hEisWtSbD0iI0A9Y9_YSQHs,5704
585
+ rasa/shared/core/constants.py,sha256=WNFzABG-eiVREBL6aDZAmcNDiSmuSbvWuxXIMoX2Iv8,5704
586
586
  rasa/shared/core/conversation.py,sha256=tw1fD2XB3gOdQjDI8hHo5TAAmE2JYNogQGWe3rE929w,1385
587
- rasa/shared/core/domain.py,sha256=We7tzhKhpsMPPPHS4ZkeRjGR6SVklcWa0mGcBfsaw-A,81911
587
+ rasa/shared/core/domain.py,sha256=mttMcqbAabnOBPY2hvdg8K9pg57M7qlw7okPOu-4Kt8,81056
588
588
  rasa/shared/core/events.py,sha256=6yuOrZs8hZaR0FV1nC58l1u6qE4fegwrvL5nH1w7xY4,83719
589
589
  rasa/shared/core/flows/__init__.py,sha256=HszhIvEARpmyxABFc1MKYvj8oy04WiZW1xmCdToakbs,181
590
590
  rasa/shared/core/flows/flow.py,sha256=XzF9RUxLNyiGndnpvECV4pMczzc6g7UtgwokyXAoaTY,21496
@@ -665,7 +665,7 @@ rasa/shared/providers/_configs/azure_openai_client_config.py,sha256=gBqanDwr-eij
665
665
  rasa/shared/providers/_configs/client_config.py,sha256=Tvnv9CCpzoHB053lvuqth_JwjTAKtQosahW6BEWdQNU,1584
666
666
  rasa/shared/providers/_configs/default_litellm_client_config.py,sha256=ywD0EYWCKqNpx0fPyOavIwwS2BTd3q9QtNeAyIvOd8E,4318
667
667
  rasa/shared/providers/_configs/huggingface_local_embedding_client_config.py,sha256=laahM8xQJ-r0nAL8X2SnBM5kZAOiPNo-w0hv7BudAdk,8166
668
- rasa/shared/providers/_configs/litellm_router_client_config.py,sha256=s3WiASMC53pt2I2XNTTYc8AvvfX6sgQtxYgl9evKaqE,6283
668
+ rasa/shared/providers/_configs/litellm_router_client_config.py,sha256=pMuiU_3DcqkubF_hYKN_mWU6f0jZdQLmGzZyVVRcXX4,7231
669
669
  rasa/shared/providers/_configs/model_group_config.py,sha256=rpqOJYE_0UlbAlY9W9rArug71vsGBeBYvLFIQ2UFLeQ,5585
670
670
  rasa/shared/providers/_configs/openai_client_config.py,sha256=Rbagyk3bpKUYrCKCSSCltDBw99SRQgLVmPyIlsK4nnE,5805
671
671
  rasa/shared/providers/_configs/rasa_llm_client_config.py,sha256=C-So861b4T_0Tgx8dbl5BO_YGe18QlMH3baBuSIQR0A,2211
@@ -680,25 +680,25 @@ rasa/shared/providers/embedding/default_litellm_embedding_client.py,sha256=B-BSo
680
680
  rasa/shared/providers/embedding/embedding_client.py,sha256=rmFBKSKSihqmzpuZ-I0zVm1BBqTjL6V-K65gefoI35o,2839
681
681
  rasa/shared/providers/embedding/embedding_response.py,sha256=H55mSAL3LfVvDlBklaCCQ4AnNwCsQSQ1f2D0oPrx3FY,1204
682
682
  rasa/shared/providers/embedding/huggingface_local_embedding_client.py,sha256=Zo3gyj49h4LxXV7bx39TXpIPKlernG-5xzqXczTCbig,6913
683
- rasa/shared/providers/embedding/litellm_router_embedding_client.py,sha256=GzDYlRETeLXoPlDO8Kp61xF6V0TDitAV5FvI7sRak4Y,4499
683
+ rasa/shared/providers/embedding/litellm_router_embedding_client.py,sha256=H6Dog9jgpogkxDKwEHQ2Xok_vIjpOXxZjTbMPGbvTjQ,4516
684
684
  rasa/shared/providers/embedding/openai_embedding_client.py,sha256=XNRGE7apo2v3kWRrtgxE-Gq4rvNko3IiXtvgC4krDYE,5429
685
685
  rasa/shared/providers/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
686
- rasa/shared/providers/llm/_base_litellm_client.py,sha256=lf3bk47NgQrKYMgCr2ZsWgatuPK5bv2-DrkjfowvDp0,8960
686
+ rasa/shared/providers/llm/_base_litellm_client.py,sha256=O5PpLKBgvAroWRyJE5YbzuVPb0jXMHjae917F8HgqIU,10004
687
687
  rasa/shared/providers/llm/azure_openai_llm_client.py,sha256=A6sg2bvulNczuzu1J0V7CoAkXUx4EUxtiqI4R98_IKE,12976
688
688
  rasa/shared/providers/llm/default_litellm_llm_client.py,sha256=yvqd4ARoGSi9iqfE2uFvVEYRU6rICePBnEEKTduCc9w,2777
689
- rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=HdnKLyZivhjphUjX11UCjBOKUnEVrh9HN1rO9D0QzoU,4517
689
+ rasa/shared/providers/llm/litellm_router_llm_client.py,sha256=llko2DfOpiLMpHxnW26I1Hb1wTn7VmZ_yu43GRXhqwQ,6815
690
690
  rasa/shared/providers/llm/llm_client.py,sha256=6-gMsEJqquhUPGXzNiq_ybM_McLWxAJ_QhbmWcLnb_Q,2358
691
691
  rasa/shared/providers/llm/llm_response.py,sha256=Ltmc8yk9cAqtK8QgwfZZywudM5ZQsT4y_AKAQ3q05hA,1490
692
692
  rasa/shared/providers/llm/openai_llm_client.py,sha256=uDdcugBcO3sfxbduc00eqaZdrJP0VFX5dkBd2Dem47M,4844
693
693
  rasa/shared/providers/llm/rasa_llm_client.py,sha256=SpgWn3uHHEezIcyvMfi468zRLw_W8VF6sIs-VIhElPc,3357
694
- rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=VpFSXYmNDq06yAd8Iu-gi9i_JyO5VSk6OQXfZ46-sfE,10151
694
+ rasa/shared/providers/llm/self_hosted_llm_client.py,sha256=98FaF0-lYnytC46ulhrCAQjUKy9TI0U2QILml__UCzc,9170
695
695
  rasa/shared/providers/mappings.py,sha256=5S9FKtL5GkwK-L93HR_2RMvXnrZo3hyhb0oebAIbjSg,3675
696
696
  rasa/shared/providers/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
697
- rasa/shared/providers/router/_base_litellm_router_client.py,sha256=Je-KTdN58uSMGXg26k-lKgIaQ7J_8gwh9V2YFnKglHA,5362
697
+ rasa/shared/providers/router/_base_litellm_router_client.py,sha256=IPhoGMyGJ2FzGbBIXL45CmA6mc0azPeZzgAxDwioeLc,6898
698
698
  rasa/shared/providers/router/router_client.py,sha256=wS2uq_TfQAcYSQznhiMN-kxdUn3EdnmkdAeXuUyz1pE,2140
699
699
  rasa/shared/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
700
700
  rasa/shared/utils/cli.py,sha256=bJpkf0VzzmtpmBnDnIl7SgvrntnBuaJQMHBXHm2WxcA,2916
701
- rasa/shared/utils/common.py,sha256=rsQ10XhdLY5w_U_PGl7kIIL43cSxGr2ezP00fIvfQ1I,9760
701
+ rasa/shared/utils/common.py,sha256=i6eDUzGQDPaKH17czblXh6-Q7gmhJTZXdCP9qbCJXo4,9761
702
702
  rasa/shared/utils/constants.py,sha256=ZNQu0RHM_7Q4A2hn6pD8XlKPEwzivNpfKiiQihwH8-U,141
703
703
  rasa/shared/utils/health_check/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
704
704
  rasa/shared/utils/health_check/embeddings_health_check_mixin.py,sha256=oJyjIFpNi-L8i8YfXxekyee9Y66rGBzJXjP1Iqh3-sk,1034
@@ -723,9 +723,9 @@ rasa/studio/download.py,sha256=9uE4KKaHnID_3-Tt_E5_D00XTwhLlj4oxORY8CZRrZc,17188
723
723
  rasa/studio/results_logger.py,sha256=0gCkEaZ1CeFmxRHArK5my_DsLYjApZrxfiRMT5asE6A,4963
724
724
  rasa/studio/train.py,sha256=gfPtirITzBDo9gV4hqDNSwPYtVp_22cq8OWI6YIBgyk,4243
725
725
  rasa/studio/upload.py,sha256=NoMktbnYE6j-eqz_uZVdeUc-N-0-s6R6pR0nVFuuuSU,15578
726
- rasa/telemetry.py,sha256=6BsHEXrpmEB9VvJwpCOYSUx2hdDA5a9DuX9QTQW1_dM,64440
726
+ rasa/telemetry.py,sha256=huwxc0bqbE87DoPhzc8KqxdQ6H8Hcyh3ZZolmrRYciQ,64598
727
727
  rasa/tracing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
728
- rasa/tracing/config.py,sha256=dNpIORLGG_HgDygrQ4x67yxprOD3XXyyjNE8wVBQLv0,12859
728
+ rasa/tracing/config.py,sha256=kA-xEY2oAc07gw1RzGeMuNnDKd_ZrVXT_B63pxGW-uI,12860
729
729
  rasa/tracing/constants.py,sha256=N_MJLStE3IkmPKQCQv42epd3jdBMJ4Ith1dVO65N5ho,2425
730
730
  rasa/tracing/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
731
731
  rasa/tracing/instrumentation/attribute_extractors.py,sha256=Nf2jgONkOaSWVGA-1iGJW52_RmZfi34pMuVqs7pTTM4,25974
@@ -769,9 +769,9 @@ rasa/utils/train_utils.py,sha256=f1NWpp5y6al0dzoQyyio4hc4Nf73DRoRSHDzEK6-C4E,212
769
769
  rasa/utils/url_tools.py,sha256=JQcHL2aLqLHu82k7_d9imUoETCm2bmlHaDpOJ-dKqBc,1218
770
770
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
771
771
  rasa/validator.py,sha256=y7GR9lF-4CQXfs8bBZgQrqJ3jjTCz7o3AjVrHKarcCk,65621
772
- rasa/version.py,sha256=uvHkHAgcV5HbtaAP8ElbwoOCJ81HSK7UORBcV6MGYrQ,120
773
- rasa_pro-3.11.0rc2.dist-info/METADATA,sha256=hraidF8eaf9MX5tlnjkvUWkExR9uaGXtq0tSDy2_m08,10797
774
- rasa_pro-3.11.0rc2.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
775
- rasa_pro-3.11.0rc2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
776
- rasa_pro-3.11.0rc2.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
777
- rasa_pro-3.11.0rc2.dist-info/RECORD,,
772
+ rasa/version.py,sha256=6rpTyuz8MNDHIabQzTt0fxvtKcyA2z64YnQFmnLA68k,120
773
+ rasa_pro-3.11.0rc3.dist-info/METADATA,sha256=pi4fTLnHLTpmPk-5USVxjMDt61vwSQfsG5IaZWmq9T8,10797
774
+ rasa_pro-3.11.0rc3.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
775
+ rasa_pro-3.11.0rc3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
776
+ rasa_pro-3.11.0rc3.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
777
+ rasa_pro-3.11.0rc3.dist-info/RECORD,,