rasa-pro 3.14.0rc1__py3-none-any.whl → 3.14.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

@@ -1,16 +1,23 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from enum import Enum
3
- from typing import Any, Dict, List, Literal, Optional, Union
3
+ from typing import Any, Dict, List, Literal, Optional, Type, TypeVar, Union
4
4
 
5
5
  import structlog
6
6
  from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
7
- from pydantic import BaseModel, Field, field_serializer, model_validator
7
+ from pydantic import (
8
+ BaseModel,
9
+ Field,
10
+ field_serializer,
11
+ field_validator,
12
+ model_validator,
13
+ )
8
14
  from typing_extensions import Annotated
9
15
 
10
16
  from rasa.builder.copilot.constants import (
11
17
  ROLE_ASSISTANT,
12
18
  ROLE_COPILOT,
13
19
  ROLE_COPILOT_INTERNAL,
20
+ ROLE_SYSTEM,
14
21
  ROLE_USER,
15
22
  )
16
23
  from rasa.builder.document_retrieval.models import Document
@@ -49,7 +56,8 @@ class ResponseCategory(Enum):
49
56
  # When Copilot analyzes error logs and provides suggestions
50
57
  TRAINING_ERROR_LOG_ANALYSIS = "training_error_log_analysis"
51
58
  E2E_TESTING_ERROR_LOG_ANALYSIS = "e2e_testing_error_log_analysis"
52
-
59
+ TRAINING_ERROR_LOG = "training_error_log"
60
+ E2E_TESTING_ERROR_LOG = "e2e_testing_error_log"
53
61
  # Conversation history signature
54
62
  SIGNATURE = "signature"
55
63
 
@@ -90,7 +98,7 @@ class LogContent(BaseContent):
90
98
  type: Literal["log"]
91
99
  content: str = Field(..., description="Logs, error messages, stack traces, etc.")
92
100
  context: Optional[str] = Field(
93
- None,
101
+ default=None,
94
102
  description=(
95
103
  "Additional, optional context description for the logs "
96
104
  "(e.g., 'training session', 'e2e testing run', 'deployment process')"
@@ -144,125 +152,233 @@ ContentBlock = Annotated[
144
152
  ),
145
153
  ]
146
154
 
155
+ TContentBlock = TypeVar("TContentBlock", bound=BaseContent)
147
156
 
148
- class CopilotChatMessage(BaseModel):
149
- """Model for a single chat messages between the user and the copilot."""
150
157
 
151
- role: str = Field(
152
- ...,
153
- pattern=f"^({ROLE_USER}|{ROLE_COPILOT}|{ROLE_COPILOT_INTERNAL})$",
154
- description="The role of the message sender.",
158
+ class BaseCopilotChatMessage(BaseModel, ABC):
159
+ role: str
160
+ response_category: Optional[ResponseCategory] = Field(default=None)
161
+
162
+ @abstractmethod
163
+ def build_openai_message(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def]
164
+ pass
165
+
166
+ @field_serializer("response_category", when_used="always")
167
+ def _serialize_response_category(
168
+ self, v: Optional[ResponseCategory]
169
+ ) -> Optional[str]:
170
+ """Serializing CopilotChatMessage, response_category should be a string."""
171
+ return None if v is None else v.value
172
+
173
+
174
+ class CopilotSystemMessage(BaseCopilotChatMessage):
175
+ role: Literal["system"] = Field(
176
+ default=ROLE_SYSTEM,
177
+ pattern=f"^{ROLE_SYSTEM}",
178
+ description="The system message that sets the system instructions for the LLM.",
155
179
  )
156
- content: List[ContentBlock]
157
- response_category: Optional[ResponseCategory] = Field(
158
- None,
159
- description=(
160
- "The category/source of this message. For user role messages, only `None` "
161
- "or `GUARDRAILS_POLICY_VIOLATION` are allowed. For copilot role messages, "
162
- "any category is permitted."
163
- ),
180
+
181
+ def build_openai_message(self, prompt: str, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def]
182
+ """Render the system message template and return OpenAI format."""
183
+ return {"role": ROLE_SYSTEM, "content": prompt}
184
+
185
+
186
+ class UserChatMessage(BaseCopilotChatMessage):
187
+ role: Literal["user"] = Field(
188
+ default=ROLE_USER,
189
+ pattern=f"^{ROLE_USER}",
190
+ description="The user who sent the message.",
164
191
  )
192
+ content: List[ContentBlock]
193
+
194
+ @classmethod
195
+ @field_validator("content")
196
+ def must_have_at_least_one_text(cls, v: List[ContentBlock]) -> List[ContentBlock]:
197
+ if not any(isinstance(content_block, TextContent) for content_block in v):
198
+ message = "User role messages must have at least one `TextContent` block."
199
+ structlogger.error(
200
+ "user_chat_message.missing_text_content",
201
+ event_info=message,
202
+ content=v,
203
+ )
204
+ raise ValueError(
205
+ "UserChatMessage must contain at least one TextContent block."
206
+ )
207
+ return v
165
208
 
166
209
  @model_validator(mode="after")
167
- def validate_response_category_for_role(self) -> "CopilotChatMessage":
168
- """Validate value of response_category for the role of the message.
210
+ def validate_response_category(self) -> "UserChatMessage":
211
+ """Validate value of response_category for user message.
169
212
 
170
213
  For 'user' role messages, only None or GUARDRAILS_POLICY_VIOLATION are allowed.
171
- For 'copilot' role messages, any category is permitted.
172
- For 'rasa_internal' role messages, any category is permitted.
173
214
  """
215
+ allowed_response_categories = [ResponseCategory.GUARDRAILS_POLICY_VIOLATION]
174
216
  if (
175
- self.role == ROLE_USER
176
- and self.response_category is not None
177
- and self.response_category != ResponseCategory.GUARDRAILS_POLICY_VIOLATION
217
+ self.response_category is not None
218
+ and self.response_category not in allowed_response_categories
178
219
  ):
179
220
  message = (
180
221
  f"User role messages can only have response_category of `None` or "
181
- f"`{ResponseCategory.GUARDRAILS_POLICY_VIOLATION}`, "
182
- f"got `{self.response_category}`."
222
+ f"{', '.join(category.value for category in allowed_response_categories)}." # noqa: E501
223
+ f"Got `{self.response_category}`."
183
224
  )
184
225
  structlogger.error(
185
- "copilot_chat_message.validate_response_category_for_role"
226
+ "user_chat_message.validate_response_category"
186
227
  ".invalid_response_category",
187
228
  event_info=message,
188
229
  response_category=self.response_category,
230
+ allowed_response_categories=allowed_response_categories,
189
231
  role=self.role,
190
232
  )
191
233
  raise ValueError(message)
192
234
 
193
235
  return self
194
236
 
195
- @field_serializer("response_category", when_used="always")
196
- def _serialize_response_category(
197
- self, v: Optional[ResponseCategory]
198
- ) -> Optional[str]:
199
- """Serializing CopilotChatMessage, response_category should be a string."""
200
- return None if v is None else v.value
201
-
202
- def get_text_content(self) -> str:
203
- """Concatenate all 'text' content blocks into a single string."""
237
+ def get_flattened_text_content(self) -> str:
238
+ """Get the text content from the message."""
204
239
  return "\n".join(
205
240
  content_block.text
206
241
  for content_block in self.content
207
242
  if isinstance(content_block, TextContent)
208
243
  )
209
244
 
210
- def get_log_content(self) -> str:
211
- """Concatenate all 'log' content blocks into a single string."""
245
+ def build_openai_message( # type: ignore[no-untyped-def]
246
+ self, prompt: Optional[str] = None, *args, **kwargs
247
+ ) -> Dict[str, Any]:
248
+ # If a prompt is provided, add it to the message content as additional
249
+ # instructions
250
+ if prompt:
251
+ return {
252
+ "role": ROLE_USER,
253
+ "content": [
254
+ {"type": "text", "text": prompt},
255
+ {"type": "text", "text": self.get_flattened_text_content()},
256
+ ],
257
+ }
258
+ # Return simple text content (useful for showing the history)
259
+ else:
260
+ return {"role": ROLE_USER, "content": self.get_flattened_text_content()}
261
+
262
+
263
+ class CopilotChatMessage(BaseCopilotChatMessage):
264
+ role: Literal["copilot"]
265
+ content: List[ContentBlock]
266
+
267
+ def get_flattened_text_content(self) -> str:
268
+ """Get the text content from the message."""
212
269
  return "\n".join(
213
- content_block.content
270
+ content_block.text
214
271
  for content_block in self.content
215
- if isinstance(content_block, LogContent)
272
+ if isinstance(content_block, TextContent)
216
273
  )
217
274
 
218
- def to_openai_format(self) -> Dict[str, Any]:
219
- """Convert to OpenAI message format for API calls."""
220
- role_to_openai_format = {
221
- ROLE_USER: self._user_message_to_openai_format,
222
- ROLE_COPILOT: self._copilot_message_to_openai_format,
223
- ROLE_COPILOT_INTERNAL: self._copilot_message_to_openai_format,
224
- }
225
- return role_to_openai_format[self.role]()
226
-
227
- def _user_message_to_openai_format(self) -> Dict[str, Any]:
228
- role = self._map_role_to_openai()
229
- content = self.get_text_content()
230
- return {"role": role, "content": content}
231
-
232
- def _copilot_message_to_openai_format(self) -> Dict[str, Any]:
233
- role = self._map_role_to_openai()
275
+ def build_openai_message(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def]
234
276
  # For now the Copilot responds only with the text content and all the content
235
277
  # is formatted as a markdown.
236
- # TODO: Once we start predicting the files, and expecting other content blocks
237
- # we should update this.
238
- content = self.get_text_content()
239
- return {"role": role, "content": content}
240
-
241
- def _map_role_to_openai(self) -> str:
242
- """Map internal roles to OpenAI-compatible roles."""
243
- role_mapping = {
244
- ROLE_USER: ROLE_USER,
245
- ROLE_COPILOT: ROLE_ASSISTANT,
246
- ROLE_COPILOT_INTERNAL: ROLE_USER,
247
- }
248
- if self.role not in role_mapping.keys():
278
+ return {"role": ROLE_ASSISTANT, "content": self.get_flattened_text_content()}
279
+
280
+
281
+ class InternalCopilotRequestChatMessage(BaseCopilotChatMessage):
282
+ role: Literal["internal_copilot_request"]
283
+ content: List[ContentBlock]
284
+
285
+ @model_validator(mode="after")
286
+ def validate_response_category(self) -> "InternalCopilotRequestChatMessage":
287
+ """Validate value of response_category for internal copilot request message.
288
+
289
+ For 'internal_copilot_request' role messages, only `TRAINING_ERROR_LOG_ANALYSIS`
290
+ and `E2E_TESTING_ERROR_LOG_ANALYSIS` response categories are allowed.
291
+ """
292
+ allowed_response_categories = [
293
+ ResponseCategory.TRAINING_ERROR_LOG_ANALYSIS,
294
+ ResponseCategory.E2E_TESTING_ERROR_LOG_ANALYSIS,
295
+ ]
296
+ if self.response_category not in allowed_response_categories:
297
+ message = (
298
+ f"Copilot Internal Roles request messages can only have of "
299
+ f"{', '.join(category.value for category in allowed_response_categories)}. " # noqa: E501
300
+ f"Got `{self.response_category}`."
301
+ )
249
302
  structlogger.error(
250
- "copilot_chat_message.to_openai_format.invalid_role",
251
- event_info=(
252
- f"Invalid role: `{self.role}`. "
253
- f"Only {', '.join(role_mapping.keys())} roles are supported."
254
- ),
303
+ "internal_copilot_request_chat_message.validate_response_category"
304
+ ".invalid_response_category",
305
+ event_info=message,
306
+ response_category=self.response_category,
307
+ allowed_response_categories=allowed_response_categories,
255
308
  role=self.role,
256
309
  )
257
- raise ValueError(f"Invalid role: {self.role}")
310
+ raise ValueError(message)
311
+
312
+ return self
313
+
314
+ def get_flattened_text_content(self) -> str:
315
+ """Get the text content from the message."""
316
+ return "\n".join(
317
+ content_block.text
318
+ for content_block in self.content
319
+ if isinstance(content_block, TextContent)
320
+ )
321
+
322
+ def get_flattened_log_content(self) -> str:
323
+ """Get the text content from the message."""
324
+ return "\n".join(
325
+ content_block.content
326
+ for content_block in self.content
327
+ if isinstance(content_block, LogContent)
328
+ )
258
329
 
259
- return role_mapping[self.role]
330
+ def get_content_blocks_by_type(
331
+ self, content_type: Type[TContentBlock]
332
+ ) -> List[TContentBlock]:
333
+ """Get the content blocks from the message by type."""
334
+ return [
335
+ content_block
336
+ for content_block in self.content
337
+ if isinstance(content_block, content_type)
338
+ ]
339
+
340
+ def build_openai_message(self, prompt: str, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def]
341
+ """Build OpenAI message with pre-rendered prompt.
342
+
343
+ The prompt should be rendered externally using the content from this message
344
+ (logs, files, any additional context outside of this message, etc.) before
345
+ being passed to this method.
346
+ """
347
+ return {"role": ROLE_USER, "content": prompt}
348
+
349
+
350
+ # Union type for all possible chat message types
351
+ ChatMessage = Union[
352
+ CopilotSystemMessage,
353
+ UserChatMessage,
354
+ CopilotChatMessage,
355
+ InternalCopilotRequestChatMessage,
356
+ ]
357
+
358
+
359
+ class CopilotContext(BaseModel):
360
+ """Model containing the context used by the copilot to generate a response."""
361
+
362
+ assistant_logs: str = Field(default="")
363
+ assistant_files: Dict[str, str] = Field(
364
+ default_factory=dict,
365
+ description=(
366
+ "The assistant files. Key is the file path, value is the file content."
367
+ ),
368
+ )
369
+ copilot_chat_history: List[ChatMessage] = Field(default_factory=list)
370
+ tracker_context: Optional[TrackerContext] = Field(default=None)
371
+
372
+ class Config:
373
+ """Config for LLMBuilderContext."""
374
+
375
+ arbitrary_types_allowed = True
260
376
 
261
377
 
262
378
  class CopilotRequest(BaseModel):
263
379
  """Request model for the copilot endpoint."""
264
380
 
265
- copilot_chat_history: List[CopilotChatMessage] = Field(
381
+ copilot_chat_history: List[ChatMessage] = Field(
266
382
  ...,
267
383
  description=(
268
384
  "The chat history between the user and the copilot. "
@@ -285,8 +401,43 @@ class CopilotRequest(BaseModel):
285
401
  description='Signature scheme version (e.g. "v1").',
286
402
  )
287
403
 
404
+ @field_validator("copilot_chat_history", mode="before")
405
+ @classmethod
406
+ def parse_chat_history(cls, v: List[Dict[str, Any]]) -> List[ChatMessage]:
407
+ """Manually parse chat history messages based on role field."""
408
+ parsed_messages: List[ChatMessage] = []
409
+ available_roles = [ROLE_USER, ROLE_COPILOT, ROLE_COPILOT_INTERNAL]
410
+ for message_data in v:
411
+ role = message_data.get("role")
412
+
413
+ if role == ROLE_USER:
414
+ parsed_messages.append(UserChatMessage(**message_data))
415
+
416
+ elif role == ROLE_COPILOT:
417
+ parsed_messages.append(CopilotChatMessage(**message_data))
418
+
419
+ elif role == ROLE_COPILOT_INTERNAL:
420
+ parsed_messages.append(
421
+ InternalCopilotRequestChatMessage(**message_data)
422
+ )
423
+
424
+ else:
425
+ message = (
426
+ f"Unknown role '{role}' in chat message. "
427
+ f"Available roles are: {', '.join(available_roles)}."
428
+ )
429
+ structlogger.error(
430
+ "copilot_request.parse_chat_history.unknown_role",
431
+ event_info=message,
432
+ role=role,
433
+ available_roles=available_roles,
434
+ )
435
+ raise ValueError(message)
436
+
437
+ return parsed_messages
438
+
288
439
  @property
289
- def last_message(self) -> Optional[CopilotChatMessage]:
440
+ def last_message(self) -> Optional[ChatMessage]:
290
441
  """Get the last message from the copilot chat history."""
291
442
  if not self.copilot_chat_history:
292
443
  return None
@@ -315,6 +466,12 @@ class CopilotOutput(BaseModel, ABC):
315
466
  """Convert to SSE event format."""
316
467
  pass
317
468
 
469
+ @property
470
+ @abstractmethod
471
+ def sse_data(self) -> Dict[str, Any]:
472
+ """Extract the SSE data payload."""
473
+ pass
474
+
318
475
 
319
476
  class GeneratedContent(CopilotOutput):
320
477
  """Represents generated content from the LLM to be streamed."""
@@ -327,13 +484,18 @@ class GeneratedContent(CopilotOutput):
327
484
  """Convert to SSE event format."""
328
485
  return ServerSentEvent(
329
486
  event="copilot_response",
330
- data={
331
- "content": self.content,
332
- "response_category": self.response_category.value,
333
- "completeness": self.response_completeness.value,
334
- },
487
+ data=self.sse_data,
335
488
  )
336
489
 
490
+ @property
491
+ def sse_data(self) -> Dict[str, Any]:
492
+ """Extract the SSE data payload."""
493
+ return {
494
+ "content": self.content,
495
+ "response_category": self.response_category.value,
496
+ "completeness": self.response_completeness.value,
497
+ }
498
+
337
499
 
338
500
  class ReferenceEntry(CopilotOutput):
339
501
  """Represents a reference entry with title and url."""
@@ -361,15 +523,20 @@ class ReferenceEntry(CopilotOutput):
361
523
  """Convert to SSE event format."""
362
524
  return ServerSentEvent(
363
525
  event="copilot_response",
364
- data={
365
- "index": self.index,
366
- "title": self.title,
367
- "url": self.url,
368
- "response_category": self.response_category.value,
369
- "completeness": self.response_completeness.value,
370
- },
526
+ data=self.sse_data,
371
527
  )
372
528
 
529
+ @property
530
+ def sse_data(self) -> Dict[str, Any]:
531
+ """Extract the SSE data payload."""
532
+ return {
533
+ "index": self.index,
534
+ "title": self.title,
535
+ "url": self.url,
536
+ "response_category": self.response_category.value,
537
+ "completeness": self.response_completeness.value,
538
+ }
539
+
373
540
 
374
541
  class ReferenceSection(CopilotOutput):
375
542
  """Represents a reference section with documentation links."""
@@ -395,16 +562,21 @@ class ReferenceSection(CopilotOutput):
395
562
  """Convert to SSE event format."""
396
563
  return ServerSentEvent(
397
564
  event="copilot_response",
398
- data={
399
- "references": [
400
- reference.model_dump(include={"index", "title", "url"})
401
- for reference in self.references
402
- ],
403
- "response_category": self.response_category.value,
404
- "completeness": self.response_completeness.value,
405
- },
565
+ data=self.sse_data,
406
566
  )
407
567
 
568
+ @property
569
+ def sse_data(self) -> Dict[str, Any]:
570
+ """Extract the SSE data payload."""
571
+ return {
572
+ "references": [
573
+ reference.model_dump(include={"index", "title", "url"})
574
+ for reference in self.references
575
+ ],
576
+ "response_category": self.response_category.value,
577
+ "completeness": self.response_completeness.value,
578
+ }
579
+
408
580
  def sort_references(self) -> None:
409
581
  """Sort references by index value."""
410
582
  sorted_references = sorted(
@@ -414,18 +586,42 @@ class ReferenceSection(CopilotOutput):
414
586
  self.references = sorted_references
415
587
 
416
588
 
417
- class CopilotContext(BaseModel):
418
- """Model containing the context used by the copilot to generate a response."""
589
+ class TrainingErrorLog(CopilotOutput):
590
+ """Represents an error log."""
591
+
592
+ logs: List[LogContent]
593
+ response_category: ResponseCategory = Field(
594
+ default=ResponseCategory.TRAINING_ERROR_LOG,
595
+ frozen=True,
596
+ )
597
+ response_completeness: ResponseCompleteness = ResponseCompleteness.COMPLETE
419
598
 
420
- assistant_logs: str = Field("")
421
- assistant_files: Dict[str, str] = Field({})
422
- copilot_chat_history: List["CopilotChatMessage"] = Field([])
423
- tracker_context: Optional[TrackerContext] = Field(None)
599
+ @model_validator(mode="after")
600
+ def validate_response_category(self) -> "TrainingErrorLog":
601
+ """Validate that response_category has the correct default value."""
602
+ if self.response_category != ResponseCategory.TRAINING_ERROR_LOG:
603
+ raise ValueError(
604
+ f"TrainingErrorLog response_category must be "
605
+ f"{ResponseCategory.TRAINING_ERROR_LOG}, "
606
+ f"got `{self.response_category}`."
607
+ )
608
+ return self
424
609
 
425
- class Config:
426
- """Config for LLMBuilderContext."""
610
+ def to_sse_event(self) -> ServerSentEvent:
611
+ """Convert to SSE event format."""
612
+ return ServerSentEvent(
613
+ event="copilot_response",
614
+ data=self.sse_data,
615
+ )
427
616
 
428
- arbitrary_types_allowed = True
617
+ @property
618
+ def sse_data(self) -> Dict[str, Any]:
619
+ """Extract the SSE data payload."""
620
+ return {
621
+ "logs": [log.model_dump() for log in self.logs],
622
+ "response_category": self.response_category.value,
623
+ "completeness": self.response_completeness.value,
624
+ }
429
625
 
430
626
 
431
627
  class UsageStatistics(BaseModel):
@@ -0,0 +1,53 @@
1
+ # Task
2
+ Your task is to help the user fix a training error that occurred when they clicked the
3
+ "Apply Changes" button. The user made changes to their assistant project files and tried
4
+ to apply them, but the training process failed. You must always explain the error within
5
+ the context of the user's **assistant project files** (flows, domain, actions, config).
6
+
7
+ ---
8
+
9
+ # Response guidelines
10
+
11
+ 1. **Summary** - Start with one short sentence that explains what happened in plain language.
12
+ 2. **Explanation** - Describe why this error occurs in Rasa Assistant, focusing only on
13
+ assistant project files. Be precise but simple. Do not speculate or reference Rasa
14
+ internals unrelated to project files.
15
+ 3. **Fix Steps** - Provide clear, numbered instructions.
16
+ - Point to specific files (`flows.yml`, `domain.yml`, `actions.py`, etc.).
17
+ - Include corrected YAML or Python snippets if needed.
18
+ - Validate syntax and indentation.
19
+ - Reference documentation for configuration options, syntax rules, or best practices.
20
+ 4. **Validation** - End with one line suggesting how to re-test
21
+ 5. **Multiple Errors** - If multiple error logs are present, you **must** address and
22
+ propose fixes for **all of them**, not just one.
23
+
24
+ # Context
25
+
26
+ ## Relevant Documentation
27
+
28
+ {% if documentation_results %}
29
+ The following documentation sources are available for reference. Use the source index
30
+ numbers (1, 2, 3, etc.) for inline citations when explaining Rasa concepts or providing
31
+ authoritative fixes:
32
+ ```
33
+ {{ documentation_results }}
34
+ ```
35
+ {% else %}
36
+ No relevant documentation source found.
37
+ {% endif %}
38
+
39
+ ## Modified assistant project files
40
+
41
+ {% if modified_files %}
42
+ {{ modified_files }}
43
+ {% else %}
44
+ No modified assistant project files.
45
+ {% endif %}
46
+
47
+ ## Available assistant logs
48
+
49
+ {% if logs %}
50
+ {{ logs }}
51
+ {% else %}
52
+ No assistant logs available.
53
+ {% endif %}
@@ -315,7 +315,7 @@ class GuardrailsPolicyChecker:
315
315
  continue
316
316
  if message.role != ROLE_USER:
317
317
  continue
318
- formatted_message = message.to_openai_format()
318
+ formatted_message = message.build_openai_message()
319
319
  text = (formatted_message.get("content") or "").strip()
320
320
  if not text:
321
321
  continue