rasa-pro 3.14.0rc1__py3-none-any.whl → 3.14.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

@@ -6,6 +6,9 @@ COPILOT_PROMPTS_FILE = "copilot_system_prompt.jinja2"
6
6
  COPILOT_LAST_USER_MESSAGE_CONTEXT_PROMPT_FILE = (
7
7
  "latest_user_message_context_prompt.jinja2"
8
8
  )
9
+ COPILOT_TRAINING_ERROR_HANDLER_PROMPT_FILE = (
10
+ "copilot_training_error_handler_prompt.jinja2"
11
+ )
9
12
 
10
13
  # A dot-path for importlib to the rasa internal messages templates
11
14
  COPILOT_MESSAGE_TEMPLATES_DIR = "builder.copilot.templated_messages"
@@ -22,7 +25,7 @@ ROLE_COPILOT: Literal["copilot"] = "copilot"
22
25
 
23
26
  # Rasa internal role - Used to indicate that the message is from the Rasa internal
24
27
  # system components.
25
- ROLE_COPILOT_INTERNAL: Literal["copilot_internal"] = "copilot_internal"
28
+ ROLE_COPILOT_INTERNAL: Literal["internal_copilot_request"] = "internal_copilot_request"
26
29
 
27
30
  # Copilot Telemetry
28
31
  COPILOT_SEGMENT_WRITE_KEY_ENV_VAR = "COPILOT_SEGMENT_WRITE_KEY"
@@ -1,9 +1,8 @@
1
1
  import asyncio
2
- import copy
3
2
  import importlib
4
3
  import json
5
4
  from contextlib import asynccontextmanager
6
- from typing import Any, Dict, List, Optional
5
+ from typing import Any, Dict, List, Optional, Union
7
6
 
8
7
  import openai
9
8
  import structlog
@@ -16,9 +15,9 @@ from rasa.builder.copilot.constants import (
16
15
  COPILOT_LAST_USER_MESSAGE_CONTEXT_PROMPT_FILE,
17
16
  COPILOT_PROMPTS_DIR,
18
17
  COPILOT_PROMPTS_FILE,
18
+ COPILOT_TRAINING_ERROR_HANDLER_PROMPT_FILE,
19
19
  ROLE_COPILOT,
20
20
  ROLE_COPILOT_INTERNAL,
21
- ROLE_SYSTEM,
22
21
  ROLE_USER,
23
22
  )
24
23
  from rasa.builder.copilot.exceptions import CopilotStreamError
@@ -26,9 +25,12 @@ from rasa.builder.copilot.models import (
26
25
  CopilotChatMessage,
27
26
  CopilotContext,
28
27
  CopilotGenerationContext,
28
+ CopilotSystemMessage,
29
+ FileContent,
30
+ InternalCopilotRequestChatMessage,
29
31
  ResponseCategory,
30
- TextContent,
31
32
  UsageStatistics,
33
+ UserChatMessage,
32
34
  )
33
35
  from rasa.builder.document_retrieval.inkeep_document_retrieval import (
34
36
  InKeepDocumentRetrieval,
@@ -60,6 +62,12 @@ class Copilot:
60
62
  COPILOT_LAST_USER_MESSAGE_CONTEXT_PROMPT_FILE,
61
63
  )
62
64
  )
65
+ self._training_error_handler_prompt_template = Template(
66
+ importlib.resources.read_text(
67
+ f"{PACKAGE_NAME}.{COPILOT_PROMPTS_DIR}",
68
+ COPILOT_TRAINING_ERROR_HANDLER_PROMPT_FILE,
69
+ )
70
+ )
63
71
 
64
72
  # The final stream chunk includes usage statistics.
65
73
  self.usage_statistics = UsageStatistics()
@@ -205,86 +213,87 @@ class Copilot:
205
213
  Returns:
206
214
  A list of messages in OpenAI format.
207
215
  """
208
- # Split chat history into past messages and latest message
209
- past_messages = [
210
- message
211
- for message in context.copilot_chat_history[:-1]
212
- if message.response_category != ResponseCategory.GUARDRAILS_POLICY_VIOLATION
213
- ]
214
- latest_message = context.copilot_chat_history[-1]
215
-
216
- # Create the system message
217
- system_message = await self._create_system_message()
218
- # Create the chat history messages (excludes the last message)
219
- chat_history = self._create_chat_history_messages(past_messages)
220
- # Create the last message and add the context to it
221
- latest_message_with_context = self._create_last_user_message_with_context(
222
- latest_message, context, relevant_documents
216
+ if not context.copilot_chat_history:
217
+ return []
218
+
219
+ past_messages = self._create_chat_history_messages(
220
+ context.copilot_chat_history[:-1]
223
221
  )
224
- return [system_message, *chat_history, latest_message_with_context]
225
222
 
226
- async def _create_system_message(self) -> Dict[str, Any]:
227
- """Render the correct Jinja template based on desired output_type."""
228
- rendered_prompt = self._system_message_prompt_template.render()
229
- return {"role": ROLE_SYSTEM, "content": rendered_prompt}
223
+ latest_message = self._process_latest_message(
224
+ context.copilot_chat_history[-1], context, relevant_documents
225
+ )
226
+ system_message = self._create_system_message()
227
+
228
+ return [system_message, *past_messages, latest_message]
230
229
 
231
230
  def _create_chat_history_messages(
232
- self,
233
- past_messages: List["CopilotChatMessage"],
231
+ self, chat_history: List[Union[UserChatMessage, CopilotChatMessage]]
234
232
  ) -> List[Dict[str, Any]]:
235
- """Create the chat history messages for the copilot.
233
+ """Filter and convert past messages to OpenAI format.
236
234
 
237
- Filter out messages with response_category of GUARDRAILS_POLICY_VIOLATION.
238
- This will filter out all the user messages that were flagged by guardrails, but
239
- also the copilot messages that were produced by guardrails.
235
+ Excludes guardrails policy violations and non-user/copilot messages.
240
236
 
241
237
  Args:
242
- past_messages: List of past messages (excluding the latest message).
238
+ chat_history: List of chat messages to filter and convert.
243
239
 
244
240
  Returns:
245
- List of messages in OpenAI format.
241
+ List of messages in OpenAI format
246
242
  """
247
- return [
248
- message.to_openai_format()
249
- for message in past_messages
250
- if message.response_category != ResponseCategory.GUARDRAILS_POLICY_VIOLATION
251
- ]
243
+ filtered_messages = []
244
+
245
+ for message in chat_history:
246
+ if (
247
+ message.response_category
248
+ != ResponseCategory.GUARDRAILS_POLICY_VIOLATION
249
+ and message.role in [ROLE_USER, ROLE_COPILOT]
250
+ ):
251
+ filtered_messages.append(message)
252
252
 
253
- def _create_last_user_message_with_context(
253
+ return [message.build_openai_message() for message in filtered_messages]
254
+
255
+ def _process_latest_message(
254
256
  self,
255
- latest_message: "CopilotChatMessage",
257
+ latest_message: Any,
256
258
  context: CopilotContext,
257
259
  relevant_documents: List[Document],
258
260
  ) -> Dict[str, Any]:
259
- """Create the last user message with context.
260
-
261
- The last user message is the last message in the copilot chat history.
262
- We add the context prompt with the current conversation, state, assistant logs,
263
- assistant files, and relevant documents as a text content block to the beginning
264
- of the message.
261
+ """Process the latest message and convert it to OpenAI format.
265
262
 
266
263
  Args:
267
- context: The context of the copilot.
268
- relevant_documents: The relevant documents to use in the context.
264
+ latest_message: The most recent message from the chat history.
265
+ context: The copilot context containing conversation state.
266
+ relevant_documents: List of relevant documents for context.
269
267
 
270
268
  Returns:
271
- The last user message with context in the OpenAI format.
269
+ Message in OpenAI format.
270
+
271
+ Raises:
272
+ ValueError: If the message type is not supported.
272
273
  """
273
- last_user_message = copy.deepcopy(latest_message)
274
- context_prompt = self._render_last_user_message_context_prompt(
275
- context, relevant_documents
276
- )
277
- last_user_message.content.insert(
278
- 0, TextContent(type="text", text=context_prompt)
279
- )
280
- return {
281
- "role": ROLE_USER,
282
- "content": [
283
- {"type": "text", "text": content.text}
284
- for content in last_user_message.content
285
- if isinstance(content, TextContent)
286
- ],
287
- }
274
+ if isinstance(latest_message, UserChatMessage):
275
+ rendered_prompt = self._render_last_user_message_context_prompt(
276
+ context, relevant_documents
277
+ )
278
+ return latest_message.build_openai_message(prompt=rendered_prompt)
279
+
280
+ elif isinstance(latest_message, InternalCopilotRequestChatMessage):
281
+ rendered_prompt = self._render_training_error_handler_prompt(
282
+ latest_message, relevant_documents
283
+ )
284
+ return latest_message.build_openai_message(prompt=rendered_prompt)
285
+
286
+ else:
287
+ raise ValueError(f"Unexpected message type: {type(latest_message)}")
288
+
289
+ def _create_system_message(self) -> Dict[str, Any]:
290
+ """Create the system message for the conversation.
291
+
292
+ Returns:
293
+ System message in OpenAI format with rendered prompt template.
294
+ """
295
+ rendered_prompt = self._system_message_prompt_template.render()
296
+ return CopilotSystemMessage().build_openai_message(prompt=rendered_prompt)
288
297
 
289
298
  def _render_last_user_message_context_prompt(
290
299
  self,
@@ -307,32 +316,76 @@ class Copilot:
307
316
  )
308
317
  return rendered_prompt
309
318
 
319
+ def _render_training_error_handler_prompt(
320
+ self,
321
+ internal_request_message: InternalCopilotRequestChatMessage,
322
+ relevant_documents: List[Document],
323
+ ) -> str:
324
+ """Render the training error handler prompt with documentation and context.
325
+
326
+ Args:
327
+ internal_request_message: Internal request message.
328
+ context: The copilot context.
329
+ relevant_documents: List of relevant documents for context.
330
+
331
+ Returns:
332
+ Rendered prompt string for training error analysis.
333
+ """
334
+ modified_files_dicts: Dict[str, str] = {
335
+ file.file_path: file.file_content
336
+ for file in internal_request_message.get_content_blocks_by_type(FileContent)
337
+ }
338
+ rendered_prompt = self._training_error_handler_prompt_template.render(
339
+ logs=internal_request_message.get_flattened_log_content(),
340
+ modified_files=modified_files_dicts,
341
+ documentation_results=self._format_documents(relevant_documents),
342
+ )
343
+
344
+ return rendered_prompt
345
+
310
346
  @staticmethod
311
347
  def _create_documentation_search_query(context: CopilotContext) -> str:
312
- """Format chat messages between user and copilot for documentation search."""
348
+ """Format chat messages between user and copilot for documentation search.
313
349
 
314
- result = ""
350
+ Filters out guardrails policy violations and only includes messages with
351
+ USER or COPILOT roles, then takes the last N relevant messages.
352
+ """
315
353
  role_to_prefix = {
316
354
  ROLE_USER: "User",
317
355
  ROLE_COPILOT: "Assistant",
318
- ROLE_COPILOT_INTERNAL: "Copilot Internal Request",
356
+ ROLE_COPILOT_INTERNAL: "User",
319
357
  }
358
+ allowed_message_types = (
359
+ UserChatMessage,
360
+ InternalCopilotRequestChatMessage,
361
+ CopilotChatMessage,
362
+ )
320
363
 
321
- # Only use the last N messages for documentation search
322
- messages_to_include = context.copilot_chat_history[
323
- -COPILOT_DOCUMENTATION_SEARCH_QUERY_HISTORY_MESSAGES:
324
- ]
364
+ query_chat_history: List[str] = []
365
+
366
+ for message in reversed(context.copilot_chat_history):
367
+ if (
368
+ message.response_category
369
+ == ResponseCategory.GUARDRAILS_POLICY_VIOLATION
370
+ or not isinstance(message, allowed_message_types)
371
+ ):
372
+ continue
373
+
374
+ if (
375
+ len(query_chat_history)
376
+ >= COPILOT_DOCUMENTATION_SEARCH_QUERY_HISTORY_MESSAGES
377
+ ):
378
+ break
325
379
 
326
- for message in messages_to_include:
327
380
  prefix = role_to_prefix[message.role]
328
- text = message.get_text_content().strip()
329
- if text:
330
- result += f"{prefix}: {text}\n"
331
- log_content = message.get_log_content().strip()
332
- if log_content:
333
- result += f"{prefix}: {log_content}\n"
381
+ text = (
382
+ Copilot._format_internal_message_for_query_chat_history(message)
383
+ if isinstance(message, InternalCopilotRequestChatMessage)
384
+ else Copilot._format_normal_message_for_query_chat_history(message)
385
+ )
386
+ query_chat_history.insert(0, f"{prefix}: {text}")
334
387
 
335
- return result
388
+ return "\n".join(query_chat_history)
336
389
 
337
390
  @staticmethod
338
391
  def _format_documents(results: List[Document]) -> Optional[str]:
@@ -340,7 +393,7 @@ class Copilot:
340
393
  if not results:
341
394
  return None
342
395
 
343
- formatted_results = {
396
+ formatted_results: Dict[str, Any] = {
344
397
  "sources": [
345
398
  {
346
399
  # Start the reference from 1, not 0.
@@ -448,3 +501,26 @@ class Copilot:
448
501
  return json.dumps({}, ensure_ascii=False, indent=2)
449
502
  current_state = tracker_context.current_state.model_dump()
450
503
  return json.dumps(current_state, ensure_ascii=False, indent=2)
504
+
505
+ @staticmethod
506
+ def _format_normal_message_for_query_chat_history(
507
+ message: Union[UserChatMessage, CopilotChatMessage],
508
+ ) -> str:
509
+ """Format normal message for query chat history."""
510
+ return f"{message.get_flattened_text_content()}"
511
+
512
+ @staticmethod
513
+ def _format_internal_message_for_query_chat_history(
514
+ message: InternalCopilotRequestChatMessage,
515
+ ) -> str:
516
+ """Format internal copilot request message for query chat history."""
517
+ text_content = message.get_flattened_text_content()
518
+ log_content = message.get_flattened_log_content()
519
+ if text_content and log_content:
520
+ return f"{text_content}\nLogs: {log_content}"
521
+ elif text_content:
522
+ return text_content
523
+ elif log_content:
524
+ return f"Logs: {log_content}"
525
+ else:
526
+ return ""