rasa-pro 3.14.0rc4__py3-none-any.whl → 3.14.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (79) hide show
  1. rasa/agents/agent_manager.py +7 -5
  2. rasa/agents/protocol/a2a/a2a_agent.py +13 -11
  3. rasa/agents/protocol/mcp/mcp_base_agent.py +49 -11
  4. rasa/agents/validation.py +4 -2
  5. rasa/builder/copilot/copilot_templated_message_provider.py +1 -1
  6. rasa/builder/validation_service.py +4 -0
  7. rasa/cli/arguments/data.py +9 -0
  8. rasa/cli/data.py +72 -6
  9. rasa/cli/interactive.py +3 -0
  10. rasa/cli/llm_fine_tuning.py +1 -0
  11. rasa/cli/project_templates/defaults.py +1 -0
  12. rasa/cli/validation/bot_config.py +2 -0
  13. rasa/constants.py +2 -1
  14. rasa/core/actions/action_clean_stack.py +32 -0
  15. rasa/core/actions/action_exceptions.py +1 -1
  16. rasa/core/actions/constants.py +4 -0
  17. rasa/core/actions/custom_action_executor.py +70 -12
  18. rasa/core/actions/grpc_custom_action_executor.py +41 -2
  19. rasa/core/actions/http_custom_action_executor.py +49 -25
  20. rasa/core/agent.py +4 -1
  21. rasa/core/available_agents.py +1 -1
  22. rasa/core/channels/voice_stream/browser_audio.py +3 -3
  23. rasa/core/channels/voice_stream/voice_channel.py +27 -17
  24. rasa/core/config/credentials.py +3 -3
  25. rasa/core/exceptions.py +1 -1
  26. rasa/core/featurizers/tracker_featurizers.py +3 -2
  27. rasa/core/persistor.py +7 -7
  28. rasa/core/policies/flows/agent_executor.py +84 -4
  29. rasa/core/policies/flows/flow_exceptions.py +5 -2
  30. rasa/core/policies/flows/flow_executor.py +52 -31
  31. rasa/core/policies/flows/mcp_tool_executor.py +7 -1
  32. rasa/core/policies/rule_policy.py +1 -1
  33. rasa/core/run.py +21 -5
  34. rasa/dialogue_understanding/commands/cancel_flow_command.py +1 -1
  35. rasa/dialogue_understanding/generator/llm_based_command_generator.py +6 -3
  36. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +15 -7
  37. rasa/dialogue_understanding/generator/single_step/search_ready_llm_command_generator.py +15 -8
  38. rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +1 -1
  39. rasa/dialogue_understanding/processor/command_processor.py +13 -7
  40. rasa/e2e_test/e2e_config.py +4 -3
  41. rasa/engine/recipes/default_components.py +16 -6
  42. rasa/graph_components/validators/default_recipe_validator.py +10 -4
  43. rasa/model_manager/runner_service.py +1 -1
  44. rasa/nlu/classifiers/diet_classifier.py +2 -0
  45. rasa/privacy/privacy_config.py +1 -1
  46. rasa/shared/agents/auth/auth_strategy/oauth2_auth_strategy.py +4 -7
  47. rasa/shared/core/flows/flow.py +8 -2
  48. rasa/shared/core/slots.py +55 -24
  49. rasa/shared/core/training_data/story_reader/story_reader.py +1 -1
  50. rasa/shared/exceptions.py +23 -2
  51. rasa/shared/providers/_configs/azure_openai_client_config.py +4 -5
  52. rasa/shared/providers/_configs/default_litellm_client_config.py +4 -4
  53. rasa/shared/providers/_configs/litellm_router_client_config.py +3 -2
  54. rasa/shared/providers/_configs/openai_client_config.py +5 -7
  55. rasa/shared/providers/_configs/rasa_llm_client_config.py +4 -4
  56. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +4 -4
  57. rasa/shared/providers/llm/_base_litellm_client.py +42 -14
  58. rasa/shared/providers/llm/litellm_router_llm_client.py +40 -17
  59. rasa/shared/providers/llm/self_hosted_llm_client.py +34 -32
  60. rasa/shared/utils/common.py +9 -1
  61. rasa/shared/utils/configs.py +5 -8
  62. rasa/shared/utils/llm.py +21 -4
  63. rasa/shared/utils/mcp/server_connection.py +7 -4
  64. rasa/studio/download.py +3 -0
  65. rasa/studio/prompts.py +1 -0
  66. rasa/studio/upload.py +4 -0
  67. rasa/utils/common.py +9 -0
  68. rasa/utils/endpoints.py +6 -0
  69. rasa/utils/installation_utils.py +111 -0
  70. rasa/utils/log_utils.py +20 -1
  71. rasa/utils/tensorflow/callback.py +2 -0
  72. rasa/utils/tensorflow/models.py +3 -0
  73. rasa/utils/train_utils.py +2 -0
  74. rasa/version.py +1 -1
  75. {rasa_pro-3.14.0rc4.dist-info → rasa_pro-3.14.2.dist-info}/METADATA +3 -3
  76. {rasa_pro-3.14.0rc4.dist-info → rasa_pro-3.14.2.dist-info}/RECORD +79 -78
  77. {rasa_pro-3.14.0rc4.dist-info → rasa_pro-3.14.2.dist-info}/NOTICE +0 -0
  78. {rasa_pro-3.14.0rc4.dist-info → rasa_pro-3.14.2.dist-info}/WHEEL +0 -0
  79. {rasa_pro-3.14.0rc4.dist-info → rasa_pro-3.14.2.dist-info}/entry_points.txt +0 -0
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import asyncio
4
- from typing import Any, Dict, List, Optional, cast
4
+ from typing import Any, Dict, List, Optional, Tuple, cast
5
5
 
6
6
  import structlog
7
7
 
@@ -24,6 +24,7 @@ from rasa.core.policies.flows.flow_step_result import (
24
24
  PauseFlowReturnPrediction,
25
25
  )
26
26
  from rasa.core.utils import get_slot_names_from_exit_conditions
27
+ from rasa.dialogue_understanding.patterns.cancel import CancelPatternFlowStackFrame
27
28
  from rasa.dialogue_understanding.patterns.internal_error import (
28
29
  InternalErrorPatternFlowStackFrame,
29
30
  )
@@ -31,6 +32,7 @@ from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
31
32
  from rasa.dialogue_understanding.stack.frames.flow_stack_frame import (
32
33
  AgentStackFrame,
33
34
  AgentState,
35
+ BaseFlowStackFrame,
34
36
  )
35
37
  from rasa.shared.agents.utils import get_protocol_type
36
38
  from rasa.shared.core.constants import (
@@ -46,9 +48,11 @@ from rasa.shared.core.events import (
46
48
  AgentResumed,
47
49
  AgentStarted,
48
50
  Event,
51
+ FlowCancelled,
49
52
  SlotSet,
50
53
  deserialise_events,
51
54
  )
55
+ from rasa.shared.core.flows.flows_list import FlowsList
52
56
  from rasa.shared.core.flows.steps import (
53
57
  CallFlowStep,
54
58
  )
@@ -85,6 +89,7 @@ async def run_agent(
85
89
  step: CallFlowStep,
86
90
  tracker: DialogueStateTracker,
87
91
  slots: List[Slot],
92
+ flows: FlowsList,
88
93
  ) -> FlowStepResult:
89
94
  """Run an agent call step."""
90
95
  structlogger.debug(
@@ -177,9 +182,13 @@ async def run_agent(
177
182
  elif output.status == AgentStatus.COMPLETED:
178
183
  return _handle_agent_completed(output, final_events, stack, step)
179
184
  elif output.status == AgentStatus.FATAL_ERROR:
180
- return _handle_agent_fatal_error(output, final_events, stack, step)
185
+ return _handle_agent_fatal_error(
186
+ output, final_events, stack, step, flows, tracker
187
+ )
181
188
  else:
182
- return _handle_agent_unknown_status(output, final_events, stack, step)
189
+ return _handle_agent_unknown_status(
190
+ output, final_events, stack, step, flows, tracker
191
+ )
183
192
 
184
193
 
185
194
  async def _call_agent_with_retry(
@@ -299,6 +308,8 @@ def _handle_agent_unknown_status(
299
308
  final_events: List[Event],
300
309
  stack: DialogueStack,
301
310
  step: CallFlowStep,
311
+ flows: FlowsList,
312
+ tracker: DialogueStateTracker,
302
313
  ) -> FlowStepResult:
303
314
  """Handle unknown agent status.
304
315
 
@@ -307,6 +318,8 @@ def _handle_agent_unknown_status(
307
318
  final_events: List of events to be added to the final result
308
319
  stack: The dialogue stack
309
320
  step: The flow step that called the agent
321
+ flows: All flows
322
+ tracker: The dialogue state tracker
310
323
 
311
324
  Returns:
312
325
  FlowStepResult indicating to continue with internal error pattern
@@ -320,8 +333,21 @@ def _handle_agent_unknown_status(
320
333
  flow_id=step.flow_id,
321
334
  status=output.status,
322
335
  )
336
+ # remove the agent stack frame
323
337
  remove_agent_stack_frame(stack, step.call)
324
338
  final_events.append(AgentCancelled(agent_id=step.call, flow_id=step.flow_id))
339
+
340
+ # cancel the current active flow:
341
+ # push the cancel pattern stack frame and add the flow cancelled event
342
+ cancel_pattern_stack_frame, flow_cancelled_event = _cancel_flow(
343
+ stack, flows, tracker, step
344
+ )
345
+ if cancel_pattern_stack_frame:
346
+ stack.push(cancel_pattern_stack_frame)
347
+ if flow_cancelled_event:
348
+ final_events.append(flow_cancelled_event)
349
+
350
+ # trigger the internal error pattern
325
351
  stack.push(InternalErrorPatternFlowStackFrame())
326
352
  return ContinueFlowWithNextStep(events=final_events)
327
353
 
@@ -418,6 +444,8 @@ def _handle_agent_fatal_error(
418
444
  final_events: List[Event],
419
445
  stack: DialogueStack,
420
446
  step: CallFlowStep,
447
+ flows: FlowsList,
448
+ tracker: DialogueStateTracker,
421
449
  ) -> FlowStepResult:
422
450
  """Handle fatal error from agent execution.
423
451
 
@@ -426,13 +454,15 @@ def _handle_agent_fatal_error(
426
454
  final_events: List of events to be added to the final result
427
455
  stack: The dialogue stack
428
456
  step: The flow step that called the agent
457
+ flows: All flows
458
+ tracker: The dialogue state tracker
429
459
 
430
460
  Returns:
431
461
  FlowStepResult indicating to continue with internal error pattern
432
462
  """
433
463
  output.metadata = output.metadata or {}
434
464
  _update_agent_events(final_events, output.metadata)
435
- # the agent failed, trigger pattern_internal_error
465
+ # the agent failed, cancel the current flow and trigger pattern_internal_error
436
466
  structlogger.error(
437
467
  "flow.step.run_agent.fatal_error",
438
468
  agent_name=step.call,
@@ -440,16 +470,66 @@ def _handle_agent_fatal_error(
440
470
  flow_id=step.flow_id,
441
471
  error_message=output.error_message,
442
472
  )
473
+ # remove the agent stack frame
443
474
  remove_agent_stack_frame(stack, step.call)
444
475
  final_events.append(
445
476
  AgentCancelled(
446
477
  agent_id=step.call, flow_id=step.flow_id, reason=output.error_message
447
478
  )
448
479
  )
480
+
481
+ # cancel the current active flow:
482
+ # push the cancel pattern stack frame and add the flow cancelled event
483
+ cancel_pattern_stack_frame, flow_cancelled_event = _cancel_flow(
484
+ stack, flows, tracker, step
485
+ )
486
+ if cancel_pattern_stack_frame:
487
+ stack.push(cancel_pattern_stack_frame)
488
+ if flow_cancelled_event:
489
+ final_events.append(flow_cancelled_event)
490
+
491
+ # push the internal error pattern stack frame
449
492
  stack.push(InternalErrorPatternFlowStackFrame())
450
493
  return ContinueFlowWithNextStep(events=final_events)
451
494
 
452
495
 
496
+ def _cancel_flow(
497
+ stack: DialogueStack,
498
+ flows: FlowsList,
499
+ tracker: DialogueStateTracker,
500
+ step: CallFlowStep,
501
+ ) -> Tuple[Optional[CancelPatternFlowStackFrame], Optional[FlowCancelled]]:
502
+ """Cancel the current active flow.
503
+
504
+ Creates a cancel pattern stack frame and a flow cancelled event.
505
+ """
506
+ from rasa.dialogue_understanding.commands import CancelFlowCommand
507
+
508
+ cancel_pattern_stack_frame = None
509
+ flow_cancelled_event = None
510
+
511
+ top_frame = stack.top()
512
+
513
+ if isinstance(top_frame, BaseFlowStackFrame):
514
+ flow = flows.flow_by_id(step.flow_id)
515
+ flow_name = (
516
+ flow.readable_name(language=tracker.current_language)
517
+ if flow
518
+ else step.flow_id
519
+ )
520
+
521
+ canceled_frames = CancelFlowCommand.select_canceled_frames(stack)
522
+
523
+ cancel_pattern_stack_frame = CancelPatternFlowStackFrame(
524
+ canceled_name=flow_name,
525
+ canceled_frames=canceled_frames,
526
+ )
527
+
528
+ flow_cancelled_event = FlowCancelled(step.flow_id, step.id)
529
+
530
+ return cancel_pattern_stack_frame, flow_cancelled_event
531
+
532
+
453
533
  ################################################################################
454
534
  # Create predictions
455
535
  ################################################################################
@@ -5,14 +5,17 @@ from rasa.shared.exceptions import RasaException
5
5
  class FlowException(RasaException):
6
6
  """Exception that is raised when there is a problem with a flow."""
7
7
 
8
- pass
8
+ def __init__(self, message: str = "Flow error occurred") -> None:
9
+ """Initialize FlowException with a message."""
10
+ super().__init__(message)
9
11
 
10
12
 
11
13
  class FlowCircuitBreakerTrippedException(FlowException):
12
14
  """Exception that is raised when the flow circuit breaker tripped.
13
15
 
14
16
  The circuit breaker gets tripped when a flow seems to be stuck in
15
- executing steps and does not make any progress."""
17
+ executing steps and does not make any progress.
18
+ """
16
19
 
17
20
  def __init__(
18
21
  self, dialogue_stack: DialogueStack, number_of_steps_taken: int
@@ -357,6 +357,10 @@ def reset_scoped_slots(
357
357
  flow_persistable_slots = current_flow.persisted_slots
358
358
 
359
359
  for step in current_flow.steps_with_calls_resolved:
360
+ # take persisted slots from called flows into consideration
361
+ # before resetting slots
362
+ if isinstance(step, CallFlowStep) and step.called_flow_reference:
363
+ flow_persistable_slots.extend(step.called_flow_reference.persisted_slots)
360
364
  if isinstance(step, CollectInformationFlowStep):
361
365
  # reset all slots scoped to the flow
362
366
  slot_name = step.collect
@@ -368,7 +372,22 @@ def reset_scoped_slots(
368
372
  # slots set by the set slots step should be reset after the flow ends
369
373
  # unless they are also used in a collect step where `reset_after_flow_ends`
370
374
  # is set to `False` or set in the `persisted_slots` list.
371
- resettable_set_slots = [
375
+ resettable_set_slots = _get_resettable_set_slots(
376
+ current_flow, not_resettable_slot_names, flow_persistable_slots
377
+ )
378
+ for name in resettable_set_slots:
379
+ _reset_slot(name, tracker)
380
+
381
+ return events
382
+
383
+
384
+ def _get_resettable_set_slots(
385
+ current_flow: Flow,
386
+ not_resettable_slot_names: set[Text],
387
+ flow_persistable_slots: List[Text],
388
+ ) -> List[Text]:
389
+ """Get list of slot names from SetSlotsFlowStep that should be reset."""
390
+ return [
372
391
  slot["key"]
373
392
  for step in current_flow.steps_with_calls_resolved
374
393
  if isinstance(step, SetSlotsFlowStep)
@@ -377,11 +396,6 @@ def reset_scoped_slots(
377
396
  and slot["key"] not in flow_persistable_slots
378
397
  ]
379
398
 
380
- for name in resettable_set_slots:
381
- _reset_slot(name, tracker)
382
-
383
- return events
384
-
385
399
 
386
400
  async def advance_flows(
387
401
  tracker: DialogueStateTracker,
@@ -650,7 +664,7 @@ async def run_step(
650
664
  return _run_link_step(initial_events, stack, step)
651
665
 
652
666
  elif isinstance(step, CallFlowStep):
653
- return await _run_call_step(initial_events, stack, step, tracker, slots)
667
+ return await _run_call_step(initial_events, stack, step, tracker, slots, flows)
654
668
 
655
669
  elif isinstance(step, SetSlotsFlowStep):
656
670
  return _run_set_slot_step(initial_events, step)
@@ -723,12 +737,13 @@ async def _run_call_step(
723
737
  step: CallFlowStep,
724
738
  tracker: DialogueStateTracker,
725
739
  slots: List[Slot],
740
+ flows: FlowsList,
726
741
  ) -> FlowStepResult:
727
742
  structlogger.debug("flow.step.run.call")
728
743
  if step.is_calling_mcp_tool():
729
744
  return await call_mcp_tool(initial_events, stack, step, tracker)
730
745
  elif step.is_calling_agent():
731
- return await run_agent(initial_events, stack, step, tracker, slots)
746
+ return await run_agent(initial_events, stack, step, tracker, slots, flows)
732
747
  else:
733
748
  stack.push(
734
749
  UserFlowStackFrame(
@@ -852,25 +867,7 @@ def _silence_timeout_events_for_collect_step(
852
867
  input_channel_name
853
868
  )
854
869
  else:
855
- input_channel_name = tracker.get_latest_input_channel()
856
- credentials_config = Configuration.get_instance().credentials
857
-
858
- if credentials_config:
859
- channel_config = (
860
- credentials_config.channels.get(input_channel_name)
861
- if input_channel_name
862
- else None
863
- )
864
-
865
- silence_timeout = (
866
- channel_config.get(
867
- SILENCE_TIMEOUT_CHANNEL_KEY, GLOBAL_SILENCE_TIMEOUT_DEFAULT_VALUE
868
- )
869
- if channel_config
870
- else GLOBAL_SILENCE_TIMEOUT_DEFAULT_VALUE
871
- )
872
- else:
873
- silence_timeout = GLOBAL_SILENCE_TIMEOUT_DEFAULT_VALUE
870
+ silence_timeout = _get_default_silence_timeout(tracker)
874
871
 
875
872
  structlogger.debug(
876
873
  "flow.step.run.use_channel_silence_timeout",
@@ -890,13 +887,37 @@ def _append_global_silence_timeout_event(
890
887
  events: List[Event], tracker: DialogueStateTracker
891
888
  ) -> None:
892
889
  current_silence_timeout = tracker.get_slot(SILENCE_TIMEOUT_SLOT)
893
- endpoints = Configuration.get_instance().endpoints
894
- global_silence_timeout = endpoints.interaction_handling.global_silence_timeout
890
+ default_silence_timeout = _get_default_silence_timeout(tracker)
895
891
 
896
- if current_silence_timeout != global_silence_timeout:
892
+ if current_silence_timeout != default_silence_timeout:
897
893
  events.append(
898
894
  SlotSet(
899
895
  SILENCE_TIMEOUT_SLOT,
900
- global_silence_timeout,
896
+ default_silence_timeout,
897
+ )
898
+ )
899
+
900
+
901
+ def _get_default_silence_timeout(tracker: DialogueStateTracker) -> float:
902
+ """Get the default silence timeout for the tracker."""
903
+ input_channel_name = tracker.get_latest_input_channel()
904
+ credentials_config = Configuration.get_instance().credentials
905
+
906
+ if credentials_config:
907
+ channel_config = (
908
+ credentials_config.channels.get(input_channel_name)
909
+ if input_channel_name
910
+ else None
911
+ )
912
+
913
+ silence_timeout = (
914
+ channel_config.get(
915
+ SILENCE_TIMEOUT_CHANNEL_KEY, GLOBAL_SILENCE_TIMEOUT_DEFAULT_VALUE
901
916
  )
917
+ if channel_config
918
+ else GLOBAL_SILENCE_TIMEOUT_DEFAULT_VALUE
902
919
  )
920
+ else:
921
+ silence_timeout = GLOBAL_SILENCE_TIMEOUT_DEFAULT_VALUE
922
+
923
+ return silence_timeout
@@ -1,4 +1,5 @@
1
1
  import json
2
+ from datetime import timedelta
2
3
  from typing import Any, Dict, List, Optional
3
4
 
4
5
  import structlog
@@ -24,6 +25,7 @@ structlogger = structlog.get_logger()
24
25
 
25
26
  CONFIG_VALUE = "value"
26
27
  CONFIG_SLOT = "slot"
28
+ TOOL_CALL_DEFATULT_TIMEOUT = 10 # seconds
27
29
 
28
30
 
29
31
  async def call_mcp_tool(
@@ -102,7 +104,11 @@ async def _execute_mcp_tool_call(
102
104
 
103
105
  # Call the tool with parameters
104
106
  mcp_server = await mcp_server_connection.ensure_active_session()
105
- result = await mcp_server.call_tool(step.call, arguments)
107
+ result = await mcp_server.call_tool(
108
+ step.call,
109
+ arguments,
110
+ read_timeout_seconds=timedelta(seconds=TOOL_CALL_DEFATULT_TIMEOUT),
111
+ )
106
112
 
107
113
  # Handle tool execution result
108
114
  if result is None or result.isError:
@@ -84,7 +84,7 @@ class InvalidRule(RasaException):
84
84
  """Exception that can be raised when rules are not valid."""
85
85
 
86
86
  def __init__(self, message: Text) -> None:
87
- super().__init__()
87
+ super().__init__(message)
88
88
  self.message = message
89
89
 
90
90
  def __str__(self) -> Text:
rasa/core/run.py CHANGED
@@ -5,6 +5,7 @@ import platform
5
5
  import uuid
6
6
  import warnings
7
7
  from asyncio import AbstractEventLoop
8
+ from copy import deepcopy
8
9
  from functools import partial
9
10
  from typing import (
10
11
  Any,
@@ -112,7 +113,11 @@ def _create_single_channel(
112
113
  if channel in BUILTIN_CHANNELS:
113
114
  channel_class = BUILTIN_CHANNELS[channel]
114
115
 
115
- return channel_class.from_credentials(credentials)
116
+ channel_credentials = deepcopy(credentials)
117
+ channel_credentials.pop(
118
+ "silence_timeout", None
119
+ ) if channel_credentials else None
120
+ return channel_class.from_credentials(channel_credentials)
116
121
  elif channel in channels_with_optional_deps:
117
122
  # Channel is known but not available due to missing dependency
118
123
  dependency = channels_with_optional_deps[channel]
@@ -328,10 +333,21 @@ def serve_application(
328
333
 
329
334
  logger.info(f"Starting Rasa server on {protocol}://{interface}:{port}")
330
335
 
331
- app.register_listener(
332
- partial(load_agent_on_start, model_path, endpoints, remote_storage, sub_agents),
333
- "before_server_start",
334
- )
336
+ async def load_agent_and_check_failure(app: Sanic, loop: AbstractEventLoop) -> None:
337
+ """Load agent and exit if it fails in non-debug mode."""
338
+ try:
339
+ await load_agent_on_start(
340
+ model_path, endpoints, remote_storage, sub_agents, app, loop
341
+ )
342
+ except Exception as e:
343
+ is_debug = logger.isEnabledFor(logging.DEBUG)
344
+ if is_debug:
345
+ raise e # show traceback in debug
346
+ # non-debug: log and exit without starting server
347
+ logger.error(f"Failed to load agent: {e}")
348
+ os._exit(1) # Any other exit method would show a traceback.
349
+
350
+ app.register_listener(load_agent_and_check_failure, "before_server_start")
335
351
 
336
352
  app.register_listener(
337
353
  licensing.validate_limited_server_license, "after_server_start"
@@ -56,7 +56,7 @@ class CancelFlowCommand(Command):
56
56
  Returns:
57
57
  The frames that were canceled.
58
58
  """
59
- canceled_frames = []
59
+ canceled_frames: List[str] = []
60
60
  # we need to go through the original stack dump in reverse order
61
61
  # to find the frames that were canceled. we cancel everything from
62
62
  # the top of the stack until we hit the user flow that was canceled.
@@ -62,7 +62,9 @@ structlogger = structlog.get_logger()
62
62
  class LLMBasedCommandGenerator(
63
63
  LLMHealthCheckMixin, GraphComponent, CommandGenerator, ABC
64
64
  ):
65
- """An abstract class defining interface and common functionality
65
+ """This class provides common functionality for all LLM-based command generators.
66
+
67
+ An abstract class defining interface and common functionality
66
68
  of an LLM-based command generators.
67
69
  """
68
70
 
@@ -174,8 +176,9 @@ class LLMBasedCommandGenerator(
174
176
  def train(
175
177
  self, training_data: TrainingData, flows: FlowsList, domain: Domain
176
178
  ) -> Resource:
177
- """Train the llm based command generator. Stores all flows into a vector
178
- store.
179
+ """Trains the LLM-based command generator and prepares flow retrieval data.
180
+
181
+ Stores all flows into a vector store.
179
182
  """
180
183
  self.perform_llm_health_check(
181
184
  self.config.get(LLM_CONFIG_KEY),
@@ -168,6 +168,20 @@ class CompactLLMCommandGenerator(SingleStepBasedLLMCommandGenerator):
168
168
  if prompt_template is not None:
169
169
  return prompt_template
170
170
 
171
+ # Try to load the template from the given path or fallback to the default for
172
+ # the component.
173
+ custom_prompt_template_path = config.get(PROMPT_TEMPLATE_CONFIG_KEY)
174
+ if custom_prompt_template_path is not None:
175
+ custom_prompt_template = get_prompt_template(
176
+ custom_prompt_template_path,
177
+ None, # Default will be based on the model
178
+ log_source_component=log_source_component,
179
+ log_source_method=log_context,
180
+ )
181
+ if custom_prompt_template is not None:
182
+ return custom_prompt_template
183
+
184
+ # Fallback to the default prompt template based on the model.
171
185
  default_command_prompt_template = get_default_prompt_template_based_on_model(
172
186
  llm_config=config.get(LLM_CONFIG_KEY, {}) or {},
173
187
  model_prompt_mapping=cls.get_model_prompt_mapper(),
@@ -177,10 +191,4 @@ class CompactLLMCommandGenerator(SingleStepBasedLLMCommandGenerator):
177
191
  log_source_method=log_context,
178
192
  )
179
193
 
180
- # Return the prompt template either from the config or the default prompt.
181
- return get_prompt_template(
182
- config.get(PROMPT_TEMPLATE_CONFIG_KEY),
183
- default_command_prompt_template,
184
- log_source_component=log_source_component,
185
- log_source_method=log_context,
186
- )
194
+ return default_command_prompt_template
@@ -165,7 +165,20 @@ class SearchReadyLLMCommandGenerator(SingleStepBasedLLMCommandGenerator):
165
165
  if prompt_template is not None:
166
166
  return prompt_template
167
167
 
168
- # Get the default prompt template based on the model name.
168
+ # Try to load the template from the given path or fallback to the default for
169
+ # the component.
170
+ custom_prompt_template_path = config.get(PROMPT_TEMPLATE_CONFIG_KEY)
171
+ if custom_prompt_template_path is not None:
172
+ custom_prompt_template = get_prompt_template(
173
+ custom_prompt_template_path,
174
+ None, # Default will be based on the model
175
+ log_source_component=log_source_component,
176
+ log_source_method=log_context,
177
+ )
178
+ if custom_prompt_template is not None:
179
+ return custom_prompt_template
180
+
181
+ # Fallback to the default prompt template based on the model.
169
182
  default_command_prompt_template = get_default_prompt_template_based_on_model(
170
183
  llm_config=config.get(LLM_CONFIG_KEY, {}) or {},
171
184
  model_prompt_mapping=cls.get_model_prompt_mapper(),
@@ -175,10 +188,4 @@ class SearchReadyLLMCommandGenerator(SingleStepBasedLLMCommandGenerator):
175
188
  log_source_method=log_context,
176
189
  )
177
190
 
178
- # Return the prompt template either from the config or the default prompt.
179
- return get_prompt_template(
180
- config.get(PROMPT_TEMPLATE_CONFIG_KEY),
181
- default_command_prompt_template,
182
- log_source_component=log_source_component,
183
- log_source_method=log_context,
184
- )
191
+ return default_command_prompt_template
@@ -233,7 +233,7 @@ flows:
233
233
  collect: interrupted_flow_to_continue
234
234
  description: "Fill this slot with the name of the flow the user wants to continue. If the user does not want to continue any of the interrupted flows, fill this slot with 'none'."
235
235
  next:
236
- - if: slots.interrupted_flow_to_continue != "none"
236
+ - if: slots.interrupted_flow_to_continue is not "none"
237
237
  then:
238
238
  - action: action_continue_interrupted_flow
239
239
  next: END
@@ -499,10 +499,22 @@ def clean_up_commands(
499
499
  else:
500
500
  clean_commands.append(command)
501
501
 
502
+ # ensure that there is only one command of a certain command type
503
+ clean_commands = ensure_max_number_of_command_type(
504
+ clean_commands, CannotHandleCommand, 1
505
+ )
506
+ clean_commands = ensure_max_number_of_command_type(
507
+ clean_commands, RepeatBotMessagesCommand, 1
508
+ )
509
+ clean_commands = ensure_max_number_of_command_type(
510
+ clean_commands, ChitChatAnswerCommand, 1
511
+ )
512
+
502
513
  # Replace CannotHandleCommands with ContinueAgentCommand when an agent is active
503
514
  # to keep the agent running, but preserve chitchat
504
515
  clean_commands = _replace_cannot_handle_with_continue_agent(clean_commands, tracker)
505
516
 
517
+ # filter out cannot handle commands if there are other commands present
506
518
  # when coexistence is enabled, by default there will be a SetSlotCommand
507
519
  # for the ROUTE_TO_CALM_SLOT slot.
508
520
  if tracker.has_coexistence_routing_slot and len(clean_commands) > 2:
@@ -510,12 +522,6 @@ def clean_up_commands(
510
522
  elif not tracker.has_coexistence_routing_slot and len(clean_commands) > 1:
511
523
  clean_commands = filter_cannot_handle_command(clean_commands)
512
524
 
513
- clean_commands = ensure_max_number_of_command_type(
514
- clean_commands, RepeatBotMessagesCommand, 1
515
- )
516
- clean_commands = ensure_max_number_of_command_type(
517
- clean_commands, ContinueAgentCommand, 1
518
- )
519
525
  structlogger.debug(
520
526
  "command_processor.clean_up_commands.final_commands",
521
527
  command=clean_commands,
@@ -580,7 +586,7 @@ def clean_up_start_flow_command(
580
586
  # drop a start flow command if the starting flow is equal
581
587
  # to the currently active flow
582
588
  structlogger.debug(
583
- "command_processor.clean_up_commands." "skip_command_flow_already_active",
589
+ "command_processor.clean_up_commands.skip_command_flow_already_active",
584
590
  command=command,
585
591
  )
586
592
  return clean_commands
@@ -72,9 +72,10 @@ class LLMJudgeConfig(BaseModel):
72
72
 
73
73
  llm_config = resolve_model_client_config(llm_config)
74
74
  llm_config, llm_extra_parameters = cls.extract_attributes(llm_config)
75
- llm_config = combine_custom_and_default_config(
76
- llm_config, cls.get_default_llm_config()
77
- )
75
+ if not llm_config:
76
+ llm_config = combine_custom_and_default_config(
77
+ llm_config, cls.get_default_llm_config()
78
+ )
78
79
  embeddings_config = resolve_model_client_config(embeddings)
79
80
  embeddings_config, embeddings_extra_parameters = cls.extract_attributes(
80
81
  embeddings_config
@@ -27,22 +27,32 @@ from rasa.shared.utils.common import conditional_import
27
27
 
28
28
  # components dependent on tensorflow
29
29
  TEDPolicy, TED_POLICY_AVAILABLE = conditional_import(
30
- "rasa.core.policies.ted_policy", "TEDPolicy"
30
+ "rasa.core.policies.ted_policy", "TEDPolicy", check_installation_setup=True
31
31
  )
32
32
  UnexpecTEDIntentPolicy, UNEXPECTED_INTENT_POLICY_AVAILABLE = conditional_import(
33
- "rasa.core.policies.unexpected_intent_policy", "UnexpecTEDIntentPolicy"
33
+ "rasa.core.policies.unexpected_intent_policy",
34
+ "UnexpecTEDIntentPolicy",
35
+ check_installation_setup=True,
34
36
  )
35
37
  DIETClassifier, DIET_CLASSIFIER_AVAILABLE = conditional_import(
36
- "rasa.nlu.classifiers.diet_classifier", "DIETClassifier"
38
+ "rasa.nlu.classifiers.diet_classifier",
39
+ "DIETClassifier",
40
+ check_installation_setup=True,
37
41
  )
38
42
  ConveRTFeaturizer, CONVERT_FEATURIZER_AVAILABLE = conditional_import(
39
- "rasa.nlu.featurizers.dense_featurizer.convert_featurizer", "ConveRTFeaturizer"
43
+ "rasa.nlu.featurizers.dense_featurizer.convert_featurizer",
44
+ "ConveRTFeaturizer",
45
+ check_installation_setup=True,
40
46
  )
41
47
  LanguageModelFeaturizer, LANGUAGE_MODEL_FEATURIZER_AVAILABLE = conditional_import(
42
- "rasa.nlu.featurizers.dense_featurizer.lm_featurizer", "LanguageModelFeaturizer"
48
+ "rasa.nlu.featurizers.dense_featurizer.lm_featurizer",
49
+ "LanguageModelFeaturizer",
50
+ check_installation_setup=True,
43
51
  )
44
52
  ResponseSelector, RESPONSE_SELECTOR_AVAILABLE = conditional_import(
45
- "rasa.nlu.selectors.response_selector", "ResponseSelector"
53
+ "rasa.nlu.selectors.response_selector",
54
+ "ResponseSelector",
55
+ check_installation_setup=True,
46
56
  )
47
57
 
48
58
  # components dependent on skops
@@ -40,16 +40,22 @@ from rasa.shared.utils.common import conditional_import
40
40
 
41
41
  # Conditional imports for TensorFlow-dependent components
42
42
  TEDPolicy, TED_POLICY_AVAILABLE = conditional_import(
43
- "rasa.core.policies.ted_policy", "TEDPolicy"
43
+ "rasa.core.policies.ted_policy", "TEDPolicy", check_installation_setup=True
44
44
  )
45
45
  UnexpecTEDIntentPolicy, UNEXPECTED_INTENT_POLICY_AVAILABLE = conditional_import(
46
- "rasa.core.policies.unexpected_intent_policy", "UnexpecTEDIntentPolicy"
46
+ "rasa.core.policies.unexpected_intent_policy",
47
+ "UnexpecTEDIntentPolicy",
48
+ check_installation_setup=True,
47
49
  )
48
50
  DIETClassifier, DIET_CLASSIFIER_AVAILABLE = conditional_import(
49
- "rasa.nlu.classifiers.diet_classifier", "DIETClassifier"
51
+ "rasa.nlu.classifiers.diet_classifier",
52
+ "DIETClassifier",
53
+ check_installation_setup=True,
50
54
  )
51
55
  ResponseSelector, RESPONSE_SELECTOR_AVAILABLE = conditional_import(
52
- "rasa.nlu.selectors.response_selector", "ResponseSelector"
56
+ "rasa.nlu.selectors.response_selector",
57
+ "ResponseSelector",
58
+ check_installation_setup=True,
53
59
  )
54
60
 
55
61
  # Conditional imports for nlu components requiring other dependencies than tensorflow