rasa-pro 3.14.1__py3-none-any.whl → 3.15.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

Files changed (69) hide show
  1. rasa/builder/config.py +4 -0
  2. rasa/builder/constants.py +5 -0
  3. rasa/builder/copilot/copilot.py +28 -9
  4. rasa/builder/copilot/models.py +251 -32
  5. rasa/builder/document_retrieval/inkeep_document_retrieval.py +2 -0
  6. rasa/builder/download.py +111 -1
  7. rasa/builder/evaluator/__init__.py +0 -0
  8. rasa/builder/evaluator/constants.py +15 -0
  9. rasa/builder/evaluator/copilot_executor.py +89 -0
  10. rasa/builder/evaluator/dataset/models.py +173 -0
  11. rasa/builder/evaluator/exceptions.py +4 -0
  12. rasa/builder/evaluator/response_classification/__init__.py +0 -0
  13. rasa/builder/evaluator/response_classification/constants.py +66 -0
  14. rasa/builder/evaluator/response_classification/evaluator.py +346 -0
  15. rasa/builder/evaluator/response_classification/langfuse_runner.py +463 -0
  16. rasa/builder/evaluator/response_classification/models.py +61 -0
  17. rasa/builder/evaluator/scripts/__init__.py +0 -0
  18. rasa/builder/evaluator/scripts/run_response_classification_evaluator.py +152 -0
  19. rasa/builder/jobs.py +208 -1
  20. rasa/builder/logging_utils.py +25 -24
  21. rasa/builder/main.py +6 -1
  22. rasa/builder/models.py +23 -0
  23. rasa/builder/project_generator.py +29 -10
  24. rasa/builder/service.py +205 -46
  25. rasa/builder/telemetry/__init__.py +0 -0
  26. rasa/builder/telemetry/copilot_langfuse_telemetry.py +384 -0
  27. rasa/builder/{copilot/telemetry.py → telemetry/copilot_segment_telemetry.py} +21 -3
  28. rasa/builder/training_service.py +13 -1
  29. rasa/builder/validation_service.py +2 -1
  30. rasa/constants.py +1 -0
  31. rasa/core/actions/action_clean_stack.py +32 -0
  32. rasa/core/actions/constants.py +4 -0
  33. rasa/core/actions/custom_action_executor.py +70 -12
  34. rasa/core/actions/grpc_custom_action_executor.py +41 -2
  35. rasa/core/actions/http_custom_action_executor.py +49 -25
  36. rasa/core/channels/voice_stream/voice_channel.py +14 -2
  37. rasa/core/policies/flows/flow_executor.py +20 -6
  38. rasa/core/run.py +15 -4
  39. rasa/dialogue_understanding/generator/llm_based_command_generator.py +6 -3
  40. rasa/dialogue_understanding/generator/single_step/compact_llm_command_generator.py +15 -7
  41. rasa/dialogue_understanding/generator/single_step/search_ready_llm_command_generator.py +15 -8
  42. rasa/dialogue_understanding/processor/command_processor.py +49 -7
  43. rasa/e2e_test/e2e_config.py +4 -3
  44. rasa/engine/recipes/default_components.py +16 -6
  45. rasa/graph_components/validators/default_recipe_validator.py +10 -4
  46. rasa/nlu/classifiers/diet_classifier.py +2 -0
  47. rasa/shared/core/slots.py +55 -24
  48. rasa/shared/providers/_configs/azure_openai_client_config.py +4 -5
  49. rasa/shared/providers/_configs/default_litellm_client_config.py +4 -4
  50. rasa/shared/providers/_configs/litellm_router_client_config.py +3 -2
  51. rasa/shared/providers/_configs/openai_client_config.py +5 -7
  52. rasa/shared/providers/_configs/rasa_llm_client_config.py +4 -4
  53. rasa/shared/providers/_configs/self_hosted_llm_client_config.py +4 -4
  54. rasa/shared/providers/llm/_base_litellm_client.py +42 -14
  55. rasa/shared/providers/llm/litellm_router_llm_client.py +38 -15
  56. rasa/shared/providers/llm/self_hosted_llm_client.py +34 -32
  57. rasa/shared/utils/common.py +9 -1
  58. rasa/shared/utils/configs.py +5 -8
  59. rasa/utils/common.py +9 -0
  60. rasa/utils/endpoints.py +8 -0
  61. rasa/utils/installation_utils.py +111 -0
  62. rasa/utils/tensorflow/callback.py +2 -0
  63. rasa/utils/train_utils.py +2 -0
  64. rasa/version.py +1 -1
  65. {rasa_pro-3.14.1.dist-info → rasa_pro-3.15.0a3.dist-info}/METADATA +15 -13
  66. {rasa_pro-3.14.1.dist-info → rasa_pro-3.15.0a3.dist-info}/RECORD +69 -53
  67. {rasa_pro-3.14.1.dist-info → rasa_pro-3.15.0a3.dist-info}/NOTICE +0 -0
  68. {rasa_pro-3.14.1.dist-info → rasa_pro-3.15.0a3.dist-info}/WHEEL +0 -0
  69. {rasa_pro-3.14.1.dist-info → rasa_pro-3.15.0a3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,384 @@
1
+ from functools import wraps
2
+ from typing import (
3
+ TYPE_CHECKING,
4
+ Any,
5
+ AsyncGenerator,
6
+ Callable,
7
+ Dict,
8
+ List,
9
+ Optional,
10
+ )
11
+
12
+ if TYPE_CHECKING:
13
+ from rasa.builder.copilot.copilot import Copilot
14
+ from rasa.builder.copilot.models import CopilotContext
15
+ from rasa.builder.document_retrieval.inkeep_document_retrieval import (
16
+ InKeepDocumentRetrieval,
17
+ )
18
+
19
+ import langfuse
20
+ import structlog
21
+
22
+ from rasa.builder.copilot.copilot_response_handler import CopilotResponseHandler
23
+ from rasa.builder.copilot.models import (
24
+ CopilotRequest,
25
+ EventContent,
26
+ UsageStatistics,
27
+ UserChatMessage,
28
+ )
29
+ from rasa.builder.document_retrieval.models import Document
30
+ from rasa.builder.models import BotFiles
31
+ from rasa.builder.shared.tracker_context import TrackerContext
32
+
33
+ structlogger = structlog.get_logger()
34
+
35
+
36
+ class CopilotLangfuseTelemetry:
37
+ @staticmethod
38
+ def trace_copilot_tracker_context(
39
+ tracker_context: Optional[TrackerContext],
40
+ max_conversation_turns: int,
41
+ session_id: str,
42
+ ) -> None:
43
+ """Trace the copilot tracker context.
44
+
45
+ Args:
46
+ tracker_context: The tracker context.
47
+ max_conversation_turns: The maximum number of conversation turns to be
48
+ fetched from the tracker.
49
+ session_id: The session ID used to fetch the right tracker.
50
+ """
51
+ langfuse_client = langfuse.get_client()
52
+ # Use `update_current_span` to update the current span of the trace.
53
+ langfuse_client.update_current_span(
54
+ output={
55
+ "tracker_context": (
56
+ tracker_context.model_dump() if tracker_context else None
57
+ ),
58
+ },
59
+ metadata={
60
+ "max_conversation_turns": max_conversation_turns,
61
+ "session_id": session_id,
62
+ },
63
+ )
64
+
65
+ @staticmethod
66
+ def trace_copilot_relevant_assistant_files(
67
+ relevant_assistant_files: BotFiles,
68
+ ) -> None:
69
+ """Trace the copilot relevant assistant files.
70
+
71
+ Args:
72
+ relevant_assistant_files: The relevant assistant files.
73
+ """
74
+ langfuse_client = langfuse.get_client()
75
+ # Use `update_current_span` to update the current span of the trace.
76
+ langfuse_client.update_current_span(
77
+ output={
78
+ "relevant_assistant_files": relevant_assistant_files,
79
+ },
80
+ )
81
+
82
+ @staticmethod
83
+ def setup_copilot_endpoint_call_trace_attributes(
84
+ hello_rasa_project_id: str,
85
+ chat_id: str,
86
+ user_id: str,
87
+ request: CopilotRequest,
88
+ handler: CopilotResponseHandler,
89
+ relevant_documents: list[Document],
90
+ copilot_context: "CopilotContext",
91
+ ) -> None:
92
+ """Set up the current langfuse trace with project and user context.
93
+
94
+ Args:
95
+ hello_rasa_project_id: The Hello Rasa project ID.
96
+ chat_id: The chat/conversation ID.
97
+ user_id: The user ID.
98
+ request: The parsed CopilotRequest object.
99
+ handler: The response handler containing generated responses.
100
+ relevant_documents: The relevant documents used to generate the response.
101
+ """
102
+ langfuse_client = langfuse.get_client()
103
+ user_message = CopilotLangfuseTelemetry._extract_last_user_message_content(
104
+ request
105
+ )
106
+ tracker_event_attachments = (
107
+ CopilotLangfuseTelemetry._extract_tracker_event_attachments(request)
108
+ )
109
+ response_category = CopilotLangfuseTelemetry._extract_response_category(handler)
110
+ reference_section_entries = CopilotLangfuseTelemetry._extract_references(
111
+ handler, relevant_documents
112
+ )
113
+
114
+ # Create a session ID as a composite ID from project id, user id and chat id
115
+ session_id = CopilotLangfuseTelemetry._create_session_id(
116
+ hello_rasa_project_id, user_id, chat_id
117
+ )
118
+ # Use `update_current_trace` to update the top level trace.
119
+ langfuse_client.update_current_trace(
120
+ user_id=user_id,
121
+ session_id=session_id,
122
+ input={
123
+ "message": user_message,
124
+ "tracker_event_attachments": tracker_event_attachments,
125
+ },
126
+ output={
127
+ "answer": CopilotLangfuseTelemetry._full_text(handler),
128
+ "response_category": response_category,
129
+ "references": reference_section_entries,
130
+ },
131
+ metadata={
132
+ "ids": {
133
+ "user_id": user_id,
134
+ "project_id": hello_rasa_project_id,
135
+ "chat_history_id": chat_id,
136
+ },
137
+ "copilot_additional_context": {
138
+ "relevant_documents": [
139
+ doc.model_dump() for doc in relevant_documents
140
+ ],
141
+ "relevant_assistant_files": copilot_context.assistant_files,
142
+ "assistant_tracker_context": (
143
+ copilot_context.tracker_context.model_dump()
144
+ if copilot_context.tracker_context
145
+ else None
146
+ ),
147
+ "assistant_logs": copilot_context.assistant_logs,
148
+ "copilot_chat_history": [
149
+ message.model_dump()
150
+ for message in copilot_context.copilot_chat_history
151
+ ],
152
+ },
153
+ },
154
+ tags=[response_category] if response_category else [],
155
+ )
156
+
157
+ @staticmethod
158
+ def trace_copilot_streaming_generation(
159
+ func: Callable[..., AsyncGenerator[str, None]],
160
+ ) -> Callable[..., AsyncGenerator[str, None]]:
161
+ """Custom decorator for tracing async streaming of the Copilot's LLM generation.
162
+
163
+ This decorator handles Langfuse tracing for async streaming of the Copilot's LLM
164
+ generation by manually managing the generation span and updating it with usage
165
+ statistics after the stream completes.
166
+ """
167
+
168
+ @wraps(func)
169
+ async def wrapper(
170
+ self: "Copilot", messages: List[Dict[str, Any]]
171
+ ) -> AsyncGenerator[str, None]:
172
+ langfuse_client = langfuse.get_client()
173
+
174
+ with langfuse_client.start_as_current_generation(
175
+ name=f"{self.__class__.__name__}.{func.__name__}",
176
+ input={"messages": messages},
177
+ ) as generation:
178
+ output = []
179
+ # Call the original streaming function and start capturing the output
180
+ async for chunk in func(self, messages):
181
+ output.append(chunk)
182
+ yield chunk
183
+
184
+ # Update the span's model parameters and output after streaming is
185
+ # complete
186
+ generation.update(
187
+ model_parameters=self.llm_config, output="".join(output)
188
+ )
189
+
190
+ # Update the span's usage statistics after streaming is complete
191
+ if self.usage_statistics:
192
+ CopilotLangfuseTelemetry._update_generation_span_with_usage_statistics(
193
+ generation, self.usage_statistics
194
+ )
195
+
196
+ return wrapper
197
+
198
+ @staticmethod
199
+ def trace_document_retrieval_generation(
200
+ func: Callable[..., Any],
201
+ ) -> Callable[..., Any]:
202
+ """Custom decorator for tracing document retrieval generation with Langfuse.
203
+
204
+ This decorator handles Langfuse tracing for document retrieval API calls
205
+ by manually managing the generation span and updating it with usage statistics.
206
+ """
207
+
208
+ @wraps(func)
209
+ async def wrapper(
210
+ self: "InKeepDocumentRetrieval",
211
+ query: str,
212
+ temperature: float,
213
+ timeout: float,
214
+ ) -> Any:
215
+ langfuse_client = langfuse.get_client()
216
+
217
+ with langfuse_client.start_as_current_generation(
218
+ name=f"{self.__class__.__name__}.{func.__name__}",
219
+ input={
220
+ "query": query,
221
+ "temperature": temperature,
222
+ "timeout": timeout,
223
+ },
224
+ ) as generation:
225
+ # Call the original function
226
+ response = await func(self, query, temperature, timeout)
227
+
228
+ # Update the span with response content
229
+ generation.update(
230
+ output=response,
231
+ model_parameters={
232
+ "temperature": str(temperature),
233
+ "timeout": str(timeout),
234
+ },
235
+ )
236
+
237
+ # Update usage statistics if available
238
+ usage_statistics = UsageStatistics.from_chat_completion_response(
239
+ response
240
+ )
241
+ if usage_statistics:
242
+ CopilotLangfuseTelemetry._update_generation_span_with_usage_statistics(
243
+ generation, usage_statistics
244
+ )
245
+
246
+ return response
247
+
248
+ return wrapper
249
+
250
+ @staticmethod
251
+ def _extract_last_user_message_content(request: CopilotRequest) -> Optional[str]:
252
+ """Extract the last user message from the CopilotRequest object.
253
+
254
+ Args:
255
+ request: The CopilotRequest object.
256
+ """
257
+ if not isinstance(request.last_message, UserChatMessage):
258
+ return None
259
+ return request.last_message.get_flattened_text_content()
260
+
261
+ @staticmethod
262
+ def _extract_tracker_event_attachments(
263
+ request: CopilotRequest,
264
+ ) -> list[Dict[str, Any]]:
265
+ """Extract tracker event attachments from the last user message.
266
+
267
+ Args:
268
+ request: The CopilotRequest object.
269
+
270
+ Returns:
271
+ The event content block sent with the last user message in the
272
+ dictionary format.
273
+ """
274
+ last_message = request.last_message
275
+ if not isinstance(last_message, UserChatMessage):
276
+ return []
277
+ return [
278
+ attachment.model_dump()
279
+ for attachment in last_message.get_content_blocks_by_type(EventContent)
280
+ ]
281
+
282
+ @staticmethod
283
+ def _extract_response_category(handler: CopilotResponseHandler) -> Optional[str]:
284
+ """Extract the response category from the response handler.
285
+
286
+ Args:
287
+ handler: The response handler containing generated response.
288
+
289
+ Returns:
290
+ The response category of the first generated response, or None if no
291
+ responses.
292
+ """
293
+ if not handler.generated_responses:
294
+ return None
295
+ # The handler contains multiple chunks of one response. We use the first chunk's
296
+ # response category.
297
+ return handler.generated_responses[0].response_category.value
298
+
299
+ @staticmethod
300
+ def _full_text(handler: CopilotResponseHandler) -> str:
301
+ """Extract full text from the response handler.
302
+
303
+ Args:
304
+ handler: The response handler containing generated responses.
305
+
306
+ Returns:
307
+ The concatenated content of all generated responses.
308
+ """
309
+ return "".join(
310
+ response.content
311
+ for response in handler.generated_responses
312
+ if getattr(response, "content", None)
313
+ )
314
+
315
+ @staticmethod
316
+ def _extract_references(
317
+ handler: CopilotResponseHandler,
318
+ relevant_documents: list[Document],
319
+ ) -> List[Dict[str, Any]]:
320
+ """Extract reference entries from the response handler.
321
+
322
+ Args:
323
+ handler: The response handler containing generated responses.
324
+ relevant_documents: The relevant documents used to generate the response.
325
+
326
+ Returns:
327
+ A list of reference entries in dictionary format.
328
+ """
329
+ if not relevant_documents:
330
+ return []
331
+
332
+ reference_entries: list[Dict[str, Any]] = []
333
+ reference_section = handler.extract_references(relevant_documents)
334
+ for reference_entry in reference_section.references:
335
+ reference_entries.append(
336
+ reference_entry.model_dump(
337
+ exclude={"response_category", "response_completeness"}
338
+ )
339
+ )
340
+
341
+ return reference_entries
342
+
343
+ @staticmethod
344
+ def _update_generation_span_with_usage_statistics(
345
+ generation_span: langfuse.LangfuseGeneration,
346
+ usage_statistics: UsageStatistics,
347
+ ) -> None:
348
+ """Update the generation span with the usage statistics.
349
+
350
+ Args:
351
+ generation_span: The generation span.
352
+ usage_statistics: The usage statistics of the generation.
353
+ """
354
+ generation_span.update(
355
+ usage_details={
356
+ "input_non_cached_usage": (
357
+ usage_statistics.non_cached_prompt_tokens or 0
358
+ ),
359
+ "input_cached_usage": usage_statistics.cached_prompt_tokens or 0,
360
+ "output_usage": usage_statistics.completion_tokens or 0,
361
+ "total": usage_statistics.total_tokens or 0,
362
+ },
363
+ cost_details={
364
+ "input_non_cached_cost": usage_statistics.non_cached_cost or 0,
365
+ "input_cached_cost": usage_statistics.cached_cost or 0,
366
+ "output_cost": usage_statistics.output_cost or 0,
367
+ "total": usage_statistics.total_cost or 0,
368
+ },
369
+ model=usage_statistics.model,
370
+ )
371
+
372
+ @staticmethod
373
+ def _create_session_id(
374
+ hello_rasa_project_id: str,
375
+ user_id: str,
376
+ chat_id: str,
377
+ ) -> str:
378
+ """Create a session ID as a composite from project id, user id and chat id."""
379
+ pattern = "PID-{project_id}-UID-{user_id}-CID-{chat_id}"
380
+ return pattern.format(
381
+ project_id=hello_rasa_project_id,
382
+ user_id=user_id,
383
+ chat_id=chat_id,
384
+ )
@@ -1,14 +1,25 @@
1
1
  import datetime as dt
2
2
  import os
3
3
  import uuid
4
- from typing import Any, Iterable, Optional, Sequence
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Iterable,
8
+ Optional,
9
+ Sequence,
10
+ )
11
+
12
+ if TYPE_CHECKING:
13
+ pass
5
14
 
6
15
  import structlog
7
16
 
8
17
  from rasa import telemetry
9
18
  from rasa.builder.copilot.constants import COPILOT_SEGMENT_WRITE_KEY_ENV_VAR
10
19
  from rasa.builder.copilot.copilot_response_handler import CopilotResponseHandler
11
- from rasa.builder.copilot.models import EventContent
20
+ from rasa.builder.copilot.models import (
21
+ EventContent,
22
+ )
12
23
  from rasa.builder.document_retrieval.models import Document
13
24
  from rasa.telemetry import (
14
25
  SEGMENT_TRACK_ENDPOINT,
@@ -56,7 +67,7 @@ def _track(event: str, user_id: str, properties: dict) -> None:
56
67
  structlogger.warning("builder.telemetry.track_failed", error=str(e))
57
68
 
58
69
 
59
- class CopilotTelemetry:
70
+ class CopilotSegmentTelemetry:
60
71
  def __init__(
61
72
  self,
62
73
  *,
@@ -96,6 +107,7 @@ class CopilotTelemetry:
96
107
  latency_ms: int,
97
108
  model: str,
98
109
  input_tokens: Optional[int] = None,
110
+ cached_prompt_tokens: Optional[int] = None,
99
111
  output_tokens: Optional[int] = None,
100
112
  total_tokens: Optional[int] = None,
101
113
  system_message: Optional[dict[str, Any]] = None,
@@ -112,6 +124,7 @@ class CopilotTelemetry:
112
124
  latency_ms: End-to-end Copilot latency to produce this response.
113
125
  model: The model used to generate the response.
114
126
  input_tokens: Number of input tokens used (optional).
127
+ cached_prompt_tokens: Number of cached prompt tokens.
115
128
  output_tokens: Number of output tokens generated (optional).
116
129
  total_tokens: Total number of tokens used (input + output) (optional).
117
130
  system_message: The system message used (optional).
@@ -135,6 +148,7 @@ class CopilotTelemetry:
135
148
  "latency_ms": latency_ms,
136
149
  "model": model,
137
150
  "input_tokens": input_tokens,
151
+ "cached_prompt_tokens": cached_prompt_tokens,
138
152
  "output_tokens": output_tokens,
139
153
  "total_tokens": total_tokens,
140
154
  "chat_history": chat_history,
@@ -193,6 +207,7 @@ class CopilotTelemetry:
193
207
  latency_ms: int,
194
208
  model: str,
195
209
  prompt_tokens: int,
210
+ cached_prompt_tokens: int,
196
211
  completion_tokens: int,
197
212
  total_tokens: int,
198
213
  system_message: dict[str, Any],
@@ -208,11 +223,13 @@ class CopilotTelemetry:
208
223
  latency_ms: End-to-end Copilot latency to produce this response.
209
224
  model: The model used to generate the response.
210
225
  prompt_tokens: Number of input tokens used.
226
+ cached_prompt_tokens: Number of cached prompt tokens.
211
227
  completion_tokens: Number of output tokens generated.
212
228
  total_tokens: Total number of tokens used (input + output).
213
229
  system_message: The system message used.
214
230
  chat_history: The chat history messages used.
215
231
  last_user_message: The last user message used.
232
+ tracker_event_attachments: List of tracker event attachments.
216
233
  """
217
234
  structlogger.debug("builder.telemetry.log_copilot_from_handler")
218
235
  text = self._full_text(handler)
@@ -223,6 +240,7 @@ class CopilotTelemetry:
223
240
  latency_ms=latency_ms,
224
241
  model=model,
225
242
  input_tokens=prompt_tokens,
243
+ cached_prompt_tokens=cached_prompt_tokens,
226
244
  output_tokens=completion_tokens,
227
245
  total_tokens=total_tokens,
228
246
  system_message=system_message,
@@ -99,10 +99,16 @@ async def try_load_existing_agent(project_folder: str) -> Optional[Agent]:
99
99
  available_endpoints = Configuration.initialise_endpoints(
100
100
  endpoints_path=Path(project_folder) / DEFAULT_ENDPOINTS_PATH
101
101
  ).endpoints
102
+ # Get available sub agents for agent loading
103
+ _sub_agents = Configuration.initialise_sub_agents(
104
+ sub_agents_path=None
105
+ ).available_agents
102
106
 
103
107
  # Load the agent
104
108
  agent = await load_agent(
105
- model_path=latest_model_path, endpoints=available_endpoints
109
+ model_path=latest_model_path,
110
+ endpoints=available_endpoints,
111
+ sub_agents=_sub_agents,
106
112
  )
107
113
 
108
114
  if agent and agent.is_ready():
@@ -133,6 +139,9 @@ async def _train_model(
133
139
  try:
134
140
  structlogger.info("training.started")
135
141
 
142
+ # init sub agents using the default path
143
+ Configuration.initialise_sub_agents(sub_agents_path=None)
144
+
136
145
  training_result = await train(
137
146
  domain="",
138
147
  config=str(config_file),
@@ -160,6 +169,8 @@ async def _load_agent(model_path: str, endpoints_file: Path) -> Agent:
160
169
  available_endpoints = Configuration.initialise_endpoints(
161
170
  endpoints_path=endpoints_file
162
171
  ).endpoints
172
+ _sub_agents = Configuration.get_instance().available_agents
173
+
163
174
  if available_endpoints is None:
164
175
  raise AgentLoadError("No endpoints available for agent loading")
165
176
 
@@ -173,6 +184,7 @@ async def _load_agent(model_path: str, endpoints_file: Path) -> Agent:
173
184
  model_path=model_path,
174
185
  remote_storage=None,
175
186
  endpoints=available_endpoints,
187
+ sub_agents=_sub_agents,
176
188
  )
177
189
 
178
190
  if agent_instance is None:
@@ -24,7 +24,7 @@ def _mock_sys_exit() -> Generator[Dict[str, bool], Any, None]:
24
24
  was_sys_exit_called["value"] = True
25
25
 
26
26
  original_exit = sys.exit
27
- sys.exit = sys_exit_mock # type: ignore[assignment]
27
+ sys.exit = sys_exit_mock # type: ignore
28
28
 
29
29
  try:
30
30
  yield was_sys_exit_called
@@ -50,6 +50,7 @@ async def validate_project(importer: TrainingDataImporter) -> Optional[str]:
50
50
  from rasa.core.config.configuration import Configuration
51
51
 
52
52
  Configuration.initialise_empty()
53
+ Configuration.initialise_sub_agents(sub_agents_path=None)
53
54
 
54
55
  validate_files(
55
56
  fail_on_warnings=config.VALIDATION_FAIL_ON_WARNINGS,
rasa/constants.py CHANGED
@@ -33,6 +33,7 @@ ENV_MCP_LOGGING_ENABLED = "MCP_LOGGING_ENABLED"
33
33
  ENV_LOG_LEVEL_MATPLOTLIB = "LOG_LEVEL_MATPLOTLIB"
34
34
  ENV_LOG_LEVEL_RABBITMQ = "LOG_LEVEL_RABBITMQ"
35
35
  ENV_LOG_LEVEL_KAFKA = "LOG_LEVEL_KAFKA"
36
+ ENV_LOG_LEVEL_PYMONGO = "LOG_LEVEL_PYMONGO"
36
37
 
37
38
  DEFAULT_SANIC_WORKERS = 1
38
39
  ENV_SANIC_WORKERS = "SANIC_WORKERS"
@@ -4,9 +4,11 @@ from typing import Any, Dict, List, Optional
4
4
 
5
5
  import structlog
6
6
 
7
+ import rasa.dialogue_understanding.stack.utils
7
8
  from rasa.core.actions.action import Action
8
9
  from rasa.core.channels import OutputChannel
9
10
  from rasa.core.nlg import NaturalLanguageGenerator
11
+ from rasa.dialogue_understanding.patterns.code_change import FLOW_PATTERN_CODE_CHANGE_ID
10
12
  from rasa.dialogue_understanding.stack.dialogue_stack import DialogueStack
11
13
  from rasa.dialogue_understanding.stack.frames import (
12
14
  BaseFlowStackFrame,
@@ -41,6 +43,15 @@ class ActionCleanStack(Action):
41
43
  """Clean the stack."""
42
44
  structlogger.debug("action_clean_stack.run")
43
45
  new_frames = []
46
+ top_flow_frame = rasa.dialogue_understanding.stack.utils.top_flow_frame(
47
+ tracker.stack, ignore_call_frames=False
48
+ )
49
+ top_user_flow_frame = (
50
+ rasa.dialogue_understanding.stack.utils.top_user_flow_frame(
51
+ tracker.stack, ignore_call_and_link_frames=False
52
+ )
53
+ )
54
+
44
55
  # Set all frames to their end step, filter out any non-BaseFlowStackFrames
45
56
  for frame in tracker.stack.frames:
46
57
  if isinstance(frame, BaseFlowStackFrame):
@@ -56,4 +67,25 @@ class ActionCleanStack(Action):
56
67
  new_frames.append(frame)
57
68
  new_stack = DialogueStack.from_dict([frame.as_dict() for frame in new_frames])
58
69
 
70
+ # Check if the action is being called from within a user flow
71
+ if (
72
+ top_flow_frame
73
+ and top_flow_frame.flow_id != FLOW_PATTERN_CODE_CHANGE_ID
74
+ and top_user_flow_frame
75
+ and top_user_flow_frame.flow_id == top_flow_frame.flow_id
76
+ ):
77
+ # The action is being called from within a user flow on the stack.
78
+ # If there are other frames on the stack, we need to make sure
79
+ # the last executed frame is the end step of the current user flow so
80
+ # that we can trigger pattern_completed for this user flow.
81
+ new_stack.pop()
82
+ structlogger.debug(
83
+ "action_clean_stack.pushing_user_frame_at_the_bottom_of_stack",
84
+ flow_id=top_user_flow_frame.flow_id,
85
+ )
86
+ new_stack.push(
87
+ top_user_flow_frame,
88
+ index=0,
89
+ )
90
+
59
91
  return tracker.create_stack_updated_events(new_stack)
@@ -3,3 +3,7 @@ SELECTIVE_DOMAIN = "enable_selective_domain"
3
3
 
4
4
  SSL_CLIENT_CERT_FIELD = "ssl_client_cert"
5
5
  SSL_CLIENT_KEY_FIELD = "ssl_client_key"
6
+
7
+ # Special marker key used by EndpointConfig to indicate 449 status
8
+ # without raising an exception
9
+ MISSING_DOMAIN_MARKER = "missing_domain"