rasa-pro 3.13.1a17__py3-none-any.whl → 3.13.1a19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of rasa-pro might be problematic. Click here for more details.

@@ -0,0 +1,4 @@
1
+ ROLE_SYSTEM = "system"
2
+ ROLE_USER = "user"
3
+ ROLE_ASSISTANT = "assistant"
4
+ ROLE_RASA_COPILOT = "copilot"
@@ -35,16 +35,16 @@
35
35
  },
36
36
  {
37
37
  "type": "object",
38
- "required": ["type", "file", "content"],
38
+ "required": ["type", "file_path", "file_content"],
39
39
  "properties": {
40
40
  "type": {
41
41
  "type": "string",
42
42
  "enum": ["file"]
43
43
  },
44
- "file": {
44
+ "file_path": {
45
45
  "type": "string"
46
46
  },
47
- "content": {
47
+ "file_content": {
48
48
  "type": "string"
49
49
  }
50
50
  }
@@ -0,0 +1,233 @@
1
+ import asyncio
2
+ import importlib
3
+ import json
4
+ from contextlib import asynccontextmanager
5
+ from typing import Any, Dict, List
6
+
7
+ import importlib_resources
8
+ import openai
9
+ import structlog
10
+ from jinja2 import Template
11
+ from pydantic import ValidationError
12
+ from typing_extensions import AsyncGenerator
13
+
14
+ from rasa.builder import config
15
+ from rasa.builder.constants import ROLE_SYSTEM, ROLE_USER
16
+ from rasa.builder.exceptions import DocumentRetrievalError, LLMGenerationError
17
+ from rasa.builder.inkeep_document_retrieval import InKeepDocumentRetrieval
18
+ from rasa.builder.llm_context import tracker_as_llm_context
19
+ from rasa.builder.models import CopilotChatMessage, CopilotContext, Document
20
+ from rasa.shared.constants import PACKAGE_NAME
21
+ from rasa.shared.utils.io import read_json_file
22
+
23
+ COPILOT_LLM_STRUCTURED_OUTPUT_SCHEMA_PATH = str(
24
+ importlib_resources.files(PACKAGE_NAME).joinpath(
25
+ "builder/copilot-llm-structured-output-response-schema.json"
26
+ )
27
+ )
28
+
29
+ structlogger = structlog.get_logger()
30
+
31
+
32
+ class Copilot:
33
+ def __init__(self):
34
+ self._client = None
35
+ self._copilot_response_schema = read_json_file(
36
+ COPILOT_LLM_STRUCTURED_OUTPUT_SCHEMA_PATH
37
+ )
38
+ self._inkeep_document_retrieval = InKeepDocumentRetrieval()
39
+ self._system_message_prompt_template = Template(
40
+ importlib.resources.read_text(
41
+ "rasa.builder",
42
+ "copilot_system_prompt.jinja2",
43
+ )
44
+ )
45
+
46
+ @asynccontextmanager
47
+ async def _get_client(self) -> AsyncGenerator[openai.AsyncOpenAI, None]:
48
+ """Get or lazy create OpenAI client with proper resource management."""
49
+ if self._client is None:
50
+ self._client = openai.AsyncOpenAI(timeout=config.OPENAI_TIMEOUT)
51
+
52
+ try:
53
+ yield self._client
54
+ except Exception as e:
55
+ structlogger.error("copilot.llm_client_error", error=str(e))
56
+ raise
57
+
58
+ async def generate_response(self, context: CopilotContext) -> CopilotChatMessage:
59
+ """Generate copilot response using OpenAI."""
60
+ try:
61
+ async with self._get_client() as client:
62
+ response_format = {
63
+ "type": "json_schema",
64
+ "json_schema": {
65
+ "name": "copilot_response",
66
+ "schema": self._copilot_response_schema,
67
+ },
68
+ } # type: ignore[arg-type]
69
+ messages = await self._create_llm_request_messages(context)
70
+
71
+ response = await client.chat.completions.create(
72
+ model=config.OPENAI_MODEL,
73
+ messages=messages,
74
+ response_format=response_format,
75
+ )
76
+
77
+ content = response.choices[0].message.content
78
+ if not content:
79
+ structlogger.error(
80
+ "copilot.generate_response.empty_llm_response",
81
+ event_info="Copilot: Empty response from LLM",
82
+ messages=messages,
83
+ response=response,
84
+ )
85
+ raise LLMGenerationError("Copilot: Empty response from LLM")
86
+
87
+ try:
88
+ # Parse the JSON response and create a CopilotChatMessage
89
+ llm_response = json.loads(content)
90
+ structlogger.debug(
91
+ "copilot.generate_response.llm_response",
92
+ event_info="Copilot: LLM response",
93
+ llm_response=llm_response,
94
+ )
95
+ return CopilotChatMessage.from_llm_response(llm_response)
96
+ except (json.JSONDecodeError, ValidationError) as e:
97
+ structlogger.error(
98
+ "copilot.generate_response.llm_response_invalid_json_format",
99
+ content=content,
100
+ event_info=f"Copilot: Invalid JSON from LLM: {e}",
101
+ error=str(e),
102
+ )
103
+ raise LLMGenerationError(f"Copilot: Invalid JSON from LLM: {e}")
104
+
105
+ except openai.OpenAIError as e:
106
+ structlogger.error(
107
+ "copilot.generate_response.openai_api_error",
108
+ event_info="Copilot: OpenAI API error",
109
+ messages=messages,
110
+ error=str(e),
111
+ )
112
+ raise LLMGenerationError(f"Copilot: OpenAI API error: {e}")
113
+ except asyncio.TimeoutError:
114
+ structlogger.error(
115
+ "copilot.generate_response.llm_request_timeout",
116
+ event_info="Copilot: LLM request timed out.",
117
+ messages=messages,
118
+ )
119
+ raise LLMGenerationError("Copilot: LLM request timed out.")
120
+
121
+ async def _create_llm_request_messages(
122
+ self, context: CopilotContext
123
+ ) -> List[Dict[str, Any]]:
124
+ """Constructs the full list of messages to send to the LLM.
125
+
126
+ This includes:
127
+ - System instructions that utilizes logs, relevant documentation bits, and
128
+ the current tracker state of the Rasa assistant to generate a response.
129
+ - Chat history between the user and the copilot.
130
+
131
+ Args:
132
+ context: Context object containing chat history, current tracker
133
+ state, logs, and file references.
134
+
135
+ Returns:
136
+ A full list of messages to pass to the LLM (system + chronological message
137
+ history).
138
+ """
139
+ system_message = await self._create_system_message(context)
140
+ chat_history_messages = self._create_chat_history_messages(context)
141
+ return [system_message, *chat_history_messages]
142
+
143
+ async def _create_system_message(
144
+ self,
145
+ context: CopilotContext,
146
+ ) -> Dict[str, Any]:
147
+ """Create the system message with the instructions and context for the copilot.
148
+
149
+ Args:
150
+ context: The context of the copilot.
151
+
152
+ Returns:
153
+ The system message for the copilot.
154
+ """
155
+ # Retrieve relevant documents from Rasa documentation and format them
156
+ documentation_results = await self._search_rasa_documentation(context)
157
+ formatted_docs = self._format_documents(documentation_results)
158
+
159
+ # Format the current conversation between the user and the Rasa assistant
160
+ current_conversation_with_assistant = tracker_as_llm_context(context.tracker)
161
+
162
+ # Render the system message prompt template
163
+ rendered_prompt = self._system_message_prompt_template.render(
164
+ current_conversation=current_conversation_with_assistant,
165
+ assistant_logs=context.assistant_logs,
166
+ assistant_files=context.assistant_files,
167
+ documentation_results=formatted_docs,
168
+ )
169
+ return {"role": ROLE_SYSTEM, "content": rendered_prompt}
170
+
171
+ def _create_chat_history_messages(
172
+ self,
173
+ context: CopilotContext,
174
+ ) -> List[Dict[str, Any]]:
175
+ """Create the chat history messages for the copilot."""
176
+ return [message.to_openai_format() for message in context.copilot_chat_history]
177
+
178
+ async def _search_rasa_documentation(
179
+ self,
180
+ context: CopilotContext,
181
+ ) -> List[Document]:
182
+ """Search Rasa documentation for relevant information.
183
+
184
+ Args:
185
+ context: The context of the copilot.
186
+
187
+ Returns:
188
+ A list of Document objects. Empty list is returned if the search fails.
189
+ """
190
+ try:
191
+ query = self._create_documentation_search_query(context)
192
+ return await self._inkeep_document_retrieval.retrieve_documents(query)
193
+ except DocumentRetrievalError as e:
194
+ structlogger.error(
195
+ "copilot.search_rasa_documentation.error",
196
+ event_info=(
197
+ f"Copilot: Searching Rasa documentation for query '{query}' "
198
+ f"failed with the following error: {e}. Returning empty list."
199
+ ),
200
+ query=query,
201
+ error=str(e),
202
+ )
203
+ return []
204
+
205
+ @staticmethod
206
+ def _create_documentation_search_query(context: CopilotContext) -> str:
207
+ """Format chat messages between user and copilot for documentation search."""
208
+ result = ""
209
+ user_prefix = "User"
210
+ assistant_prefix = "Assistant"
211
+ for message in context.copilot_chat_history:
212
+ text = message.get_text_content().strip()
213
+ if not text:
214
+ continue
215
+ if message.role == ROLE_USER:
216
+ result += f"{user_prefix}: {text}\n"
217
+ else:
218
+ result += f"{assistant_prefix}: {text}\n"
219
+ return result
220
+
221
+ @staticmethod
222
+ def _format_documents(results: List[Document]) -> str:
223
+ """Format documentation search results."""
224
+ if not results:
225
+ return "<sources>No relevant documentation found.</sources>"
226
+
227
+ formatted_results = ""
228
+ for result in results:
229
+ formatted_result = f"<result url='{result.url}'>"
230
+ formatted_result += f"<content>{result.content}</content>"
231
+ formatted_results += formatted_result + "</result>"
232
+
233
+ return f"<sources>{formatted_results}</sources>"
@@ -22,16 +22,16 @@ You have access to:
22
22
  ```
23
23
  {% endif %}
24
24
 
25
- {% if bot_logs %}
25
+ {% if assistant_logs %}
26
26
  **Bot Logs:**
27
27
  ```
28
- {{ bot_logs }}
28
+ {{ assistant_logs }}
29
29
  ```
30
30
  {% endif %}
31
31
 
32
- {% if chat_bot_files %}
32
+ {% if assistant_files %}
33
33
  **Bot Configuration Files:**
34
- {% for file_name, file_content in chat_bot_files.items() %}
34
+ {% for file_name, file_content in assistant_files.items() %}
35
35
  **{{ file_name }}:**
36
36
  ```
37
37
  {{ file_content }}
@@ -182,8 +182,8 @@ Use for generic code examples and snippets:
182
182
  ```json
183
183
  {
184
184
  "type": "file",
185
- "file": "domain.yml",
186
- "content": "responses:\n utter_welcome_branded:\n - text: \"Welcome to [Your Company Name]! I'm here to help.\"\n - text: \"Hi! I'm [Bot Name], your personal assistant.\""
185
+ "file_path": "domain.yml",
186
+ "file_content": "responses:\n utter_welcome_branded:\n - text: \"Welcome to [Your Company Name]! I'm here to help.\"\n - text: \"Hi! I'm [Bot Name], your personal assistant.\""
187
187
  }
188
188
  ```
189
189
 
@@ -212,18 +212,18 @@ Use to reference Rasa documentation:
212
212
  },
213
213
  {
214
214
  "type": "file",
215
- "file": "domain.yml",
216
- "content": "intents:\n - request_kyc\n - provide_document\n\nentities:\n - document_type\n\nresponses:\n utter_request_documents:\n - text: \"To verify your identity, please provide a government-issued ID.\""
215
+ "file_path": "domain.yml",
216
+ "file_content": "intents:\n - request_kyc\n - provide_document\n\nentities:\n - document_type\n\nresponses:\n utter_request_documents:\n - text: \"To verify your identity, please provide a government-issued ID.\""
217
217
  },
218
218
  {
219
219
  "type": "file",
220
- "file": "flows.yml",
221
- "content": "flows:\n kyc_verification:\n description: Handle KYC document verification\n start_conditions:\n - intent: request_kyc\n steps:\n - action: utter_request_documents\n - intent: provide_document\n - action: action_process_kyc_document"
220
+ "file_path": "flows.yml",
221
+ "file_content": "flows:\n kyc_verification:\n description: Handle KYC document verification\n start_conditions:\n - intent: request_kyc\n steps:\n - action: utter_request_documents\n - intent: provide_document\n - action: action_process_kyc_document"
222
222
  },
223
223
  {
224
224
  "type": "file",
225
- "file": "data/nlu.yml",
226
- "content": "- intent: request_kyc\n examples: |\n - I need to verify my identity\n - How do I complete KYC\n - What documents do you need"
225
+ "file_path": "data/nlu.yml",
226
+ "file_content": "- intent: request_kyc\n examples: |\n - I need to verify my identity\n - How do I complete KYC\n - What documents do you need"
227
227
  },
228
228
  {
229
229
  "type": "text",
@@ -52,15 +52,28 @@ class InKeepDocumentRetrieval:
52
52
  List of Document objects containing retrieved content
53
53
 
54
54
  Raises:
55
- LLMGenerationError: If the API call fails or returns invalid response
55
+ DocumentRetrievalError: When document retrieval fails due to:
56
+ - Empty response from InKeep AI API
57
+ - OpenAI API errors (authentication, rate limiting, etc.)
58
+ - Request timeout
59
+ - Unexpected errors during API communication
56
60
  """
57
- response = await self._call_inkeep_rag_api(
58
- query=query,
59
- temperature=temperature,
60
- timeout=timeout,
61
- )
62
- documents = self._parse_documents_from_response(response)
63
- return documents
61
+ try:
62
+ response = await self._call_inkeep_rag_api(
63
+ query=query,
64
+ temperature=temperature,
65
+ timeout=timeout,
66
+ )
67
+ documents = self._parse_documents_from_response(response)
68
+ return documents
69
+ except DocumentRetrievalError as e:
70
+ structlogger.error(
71
+ "inkeep_document_retrieval.retrieve_documents.error",
72
+ event_info="InKeep Document Retrieval: Error",
73
+ query=query,
74
+ error=str(e),
75
+ )
76
+ raise e
64
77
 
65
78
  async def _call_inkeep_rag_api(
66
79
  self, query: str, temperature: float, timeout: float
@@ -108,7 +121,7 @@ class InKeepDocumentRetrieval:
108
121
 
109
122
  except openai.OpenAIError as e:
110
123
  structlogger.error(
111
- "inkeep_document_retrieval.api_error",
124
+ "inkeep_document_retrieval.call_inkeep_rag_api.api_error",
112
125
  event_info="InKeep Document Retrieval: API error",
113
126
  request_params=request_params,
114
127
  error=e,
@@ -116,7 +129,7 @@ class InKeepDocumentRetrieval:
116
129
  raise DocumentRetrievalError(f"InKeep Document Retrieval: API error: {e}")
117
130
  except asyncio.TimeoutError as e:
118
131
  structlogger.error(
119
- "inkeep_document_retrieval.timeout_error",
132
+ "inkeep_document_retrieval.call_inkeep_rag_api.timeout_error",
120
133
  event_info="InKeep Document Retrieval: Timeout error",
121
134
  request_params=request_params,
122
135
  error=e,
@@ -124,7 +137,7 @@ class InKeepDocumentRetrieval:
124
137
  raise DocumentRetrievalError(f"InKeep AI request timed out: {e}")
125
138
  except Exception as e:
126
139
  structlogger.error(
127
- "inkeep_document_retrieval.error",
140
+ "inkeep_document_retrieval.call_inkeep_rag_api.error",
128
141
  event_info="InKeep Document Retrieval: Error",
129
142
  request_params=request_params,
130
143
  error=e,
@@ -161,7 +174,8 @@ class InKeepDocumentRetrieval:
161
174
  response: ChatCompletion response from InKeep AI's RAG model.
162
175
 
163
176
  Returns:
164
- List of Document objects
177
+ List of Document objects. Empty list is returned if the response is empty
178
+ or if the response is invalid.
165
179
  """
166
180
  try:
167
181
  content = response.choices[0].message.content
@@ -192,7 +206,8 @@ class InKeepDocumentRetrieval:
192
206
 
193
207
  except json.JSONDecodeError as e:
194
208
  structlogger.warning(
195
- "inkeep_document_retrieval.parse_response_failed",
209
+ "inkeep_document_retrieval.parse_documents_from_response"
210
+ ".parse_response_failed",
196
211
  event_info=(
197
212
  "InKeep Document Retrieval: Parse response failed. "
198
213
  "Returning empty list.",
@@ -202,7 +217,7 @@ class InKeepDocumentRetrieval:
202
217
  return []
203
218
  except Exception as e:
204
219
  structlogger.error(
205
- "inkeep_document_retrieval.parse_response_error",
220
+ "inkeep_document_retrieval.parse_documents_from_response.error",
206
221
  event_info=(
207
222
  "InKeep Document Retrieval: Parse response error. "
208
223
  "Returning empty list.",
@@ -11,13 +11,10 @@ import importlib_resources
11
11
  import openai
12
12
  import structlog
13
13
  from jinja2 import Template
14
- from pydantic import ValidationError
15
14
 
16
15
  from rasa.builder import config
16
+ from rasa.builder.copilot import Copilot
17
17
  from rasa.builder.exceptions import LLMGenerationError
18
- from rasa.builder.inkeep_document_retrieval import InKeepDocumentRetrieval
19
- from rasa.builder.llm_context import tracker_as_llm_context
20
- from rasa.builder.models import Document, LLMBuilderContext, LLMHelperResponse
21
18
  from rasa.constants import PACKAGE_NAME
22
19
  from rasa.shared.constants import DOMAIN_SCHEMA_FILE, RESPONSES_SCHEMA_FILE
23
20
  from rasa.shared.core.flows.yaml_flows_io import FLOWS_SCHEMA_FILE
@@ -34,7 +31,23 @@ class LLMService:
34
31
  self._client: Optional[openai.AsyncOpenAI] = None
35
32
  self._domain_schema: Optional[Dict[str, Any]] = None
36
33
  self._flows_schema: Optional[Dict[str, Any]] = None
37
- self._helper_schema: Optional[Dict[str, Any]] = None
34
+ self._copilot = None
35
+
36
+ @property
37
+ def copilot(self):
38
+ """Get or lazy create copilot instance."""
39
+ if self._copilot is None:
40
+ self._copilot = Copilot()
41
+
42
+ try:
43
+ return self._copilot
44
+ except Exception as e:
45
+ structlogger.error(
46
+ "llm_service.copilot.error",
47
+ event_info="LLM Service: Error getting copilot instance.",
48
+ error=str(e),
49
+ )
50
+ raise
38
51
 
39
52
  @asynccontextmanager
40
53
  async def _get_client(self) -> AsyncGenerator[openai.AsyncOpenAI, None]:
@@ -56,9 +69,6 @@ class LLMService:
56
69
  if self._flows_schema is None:
57
70
  self._flows_schema = _prepare_flows_schema()
58
71
 
59
- if self._helper_schema is None:
60
- self._helper_schema = _load_helper_schema()
61
-
62
72
  async def generate_rasa_project(
63
73
  self, messages: List[Dict[str, Any]]
64
74
  ) -> Dict[str, Any]:
@@ -67,24 +77,25 @@ class LLMService:
67
77
 
68
78
  try:
69
79
  async with self._get_client() as client:
80
+ response_format = {
81
+ "type": "json_schema",
82
+ "json_schema": {
83
+ "name": "rasa_project",
84
+ "schema": {
85
+ "type": "object",
86
+ "properties": {
87
+ "domain": self._domain_schema,
88
+ "flows": self._flows_schema,
89
+ },
90
+ "required": ["domain", "flows"],
91
+ },
92
+ },
93
+ }
70
94
  response = await client.chat.completions.create(
71
95
  model=config.OPENAI_MODEL,
72
96
  messages=messages,
73
97
  temperature=config.OPENAI_TEMPERATURE,
74
- response_format={
75
- "type": "json_schema",
76
- "json_schema": {
77
- "name": "rasa_project",
78
- "schema": {
79
- "type": "object",
80
- "properties": {
81
- "domain": self._domain_schema,
82
- "flows": self._flows_schema,
83
- },
84
- "required": ["domain", "flows"],
85
- },
86
- },
87
- },
98
+ response_format=response_format,
88
99
  )
89
100
 
90
101
  content = response.choices[0].message.content
@@ -101,119 +112,6 @@ class LLMService:
101
112
  except asyncio.TimeoutError:
102
113
  raise LLMGenerationError("LLM request timed out")
103
114
 
104
- async def create_helper_messages(
105
- self, llm_builder_context: LLMBuilderContext
106
- ) -> List[Dict[str, Any]]:
107
- """Create helper messages for LLM builder."""
108
- # Format chat history for documentation search
109
- chat_dump = self._format_chat_dump(llm_builder_context.chat_history)
110
-
111
- # Search documentation
112
- documentation_results = await self.search_documentation(chat_dump)
113
- formatted_docs = self._format_documentation_results(documentation_results)
114
-
115
- current_conversation = tracker_as_llm_context(llm_builder_context.tracker)
116
-
117
- # Prepare LLM messages
118
- system_messages = get_helper_messages(
119
- current_conversation,
120
- llm_builder_context.bot_logs,
121
- llm_builder_context.chat_bot_files,
122
- formatted_docs,
123
- )
124
-
125
- # Add user messages
126
- messages = system_messages.copy()
127
- for msg in llm_builder_context.chat_history:
128
- messages.append(
129
- {
130
- "role": "user" if msg.type == "user" else "assistant",
131
- "content": json.dumps(msg.content)
132
- if isinstance(msg.content, list)
133
- else msg.content,
134
- }
135
- )
136
- return messages
137
-
138
- async def generate_helper_response(
139
- self, messages: List[Dict[str, Any]]
140
- ) -> LLMHelperResponse:
141
- """Generate helper response using OpenAI."""
142
- self._prepare_schemas()
143
-
144
- try:
145
- async with self._get_client() as client:
146
- response = await client.chat.completions.create(
147
- model=config.OPENAI_MODEL,
148
- messages=messages,
149
- response_format={
150
- "type": "json_schema",
151
- "json_schema": {
152
- "name": "llm_helper",
153
- "schema": self._helper_schema,
154
- },
155
- },
156
- )
157
-
158
- content = response.choices[0].message.content
159
- if not content:
160
- raise LLMGenerationError("Empty response from LLM helper")
161
-
162
- try:
163
- return LLMHelperResponse.model_validate_json(json.loads(content))
164
- except json.JSONDecodeError as e:
165
- raise LLMGenerationError(f"Invalid JSON from LLM helper: {e}")
166
- except ValidationError as e:
167
- raise LLMGenerationError(f"Invalid JSON from LLM helper: {e}")
168
-
169
- except openai.OpenAIError as e:
170
- raise LLMGenerationError(f"OpenAI API error in helper: {e}")
171
- except asyncio.TimeoutError:
172
- raise LLMGenerationError("LLM helper request timed out")
173
-
174
- async def search_documentation(
175
- self, query: str, max_results: Optional[int] = None
176
- ) -> List[Document]:
177
- """Search documentation using OpenAI vector store."""
178
- inkeep_document_retrieval = InKeepDocumentRetrieval()
179
- documents = await inkeep_document_retrieval.retrieve_documents(query)
180
- return documents
181
-
182
- @staticmethod
183
- def _format_chat_dump(messages: List[Dict[str, Any]]) -> str:
184
- """Format chat messages for documentation search."""
185
- result = ""
186
- for message in messages:
187
- if message.type == "user":
188
- content = (
189
- message.content
190
- if isinstance(message.content, str)
191
- else str(message.content)
192
- )
193
- result += f"User: {content}\n"
194
- else:
195
- if isinstance(message.content, list):
196
- for part in message.content:
197
- if part.get("type") == "text":
198
- result += f"Assistant: {part.get('text')}\n"
199
- else:
200
- result += f"Assistant: {message.content}\n"
201
- return result
202
-
203
- @staticmethod
204
- def _format_documentation_results(results: List[Document]) -> str:
205
- """Format documentation search results."""
206
- if not results:
207
- return "<sources>No relevant documentation found.</sources>"
208
-
209
- formatted_results = ""
210
- for result in results:
211
- formatted_result = f"<result url='{result.url}'>"
212
- formatted_result += f"<content>{result.content}</content>"
213
- formatted_results += formatted_result + "</result>"
214
-
215
- return f"<sources>{formatted_results}</sources>"
216
-
217
115
 
218
116
  # Schema preparation functions (stateless)
219
117
  def _prepare_domain_schema() -> Dict[str, Any]:
@@ -254,15 +152,6 @@ def _prepare_flows_schema() -> Dict[str, Any]:
254
152
  return flows_schema
255
153
 
256
154
 
257
- def _load_helper_schema() -> Dict[str, Any]:
258
- """Load helper schema."""
259
- return read_json_file(
260
- importlib_resources.files(PACKAGE_NAME).joinpath(
261
- "builder/llm-helper-schema.json"
262
- )
263
- )
264
-
265
-
266
155
  # Template functions (stateless with caching)
267
156
  _skill_template: Optional[Template] = None
268
157
  _helper_template: Optional[Template] = None
@@ -288,30 +177,5 @@ def get_skill_generation_messages(
288
177
  return [{"role": "system", "content": system_prompt}]
289
178
 
290
179
 
291
- def get_helper_messages(
292
- current_conversation: str,
293
- bot_logs: str,
294
- chat_bot_files: Dict[str, str],
295
- documentation_results: str,
296
- ) -> List[Dict[str, Any]]:
297
- """Get messages for helper response."""
298
- global _helper_template
299
-
300
- if _helper_template is None:
301
- template_content = importlib.resources.read_text(
302
- "rasa.builder",
303
- "llm_helper_prompt.jinja2",
304
- )
305
- _helper_template = Template(template_content)
306
-
307
- system_prompt = _helper_template.render(
308
- current_conversation=current_conversation,
309
- bot_logs=bot_logs,
310
- chat_bot_files=chat_bot_files,
311
- documentation_results=documentation_results,
312
- )
313
- return [{"role": "system", "content": system_prompt}]
314
-
315
-
316
180
  # Global service instance
317
181
  llm_service = LLMService()
rasa/builder/models.py CHANGED
@@ -1,12 +1,16 @@
1
1
  """Pydantic models for request/response validation."""
2
2
 
3
+ import json
3
4
  from typing import Any, Dict, List, Literal, Optional, Union
4
5
 
5
- from pydantic import BaseModel, Field, validator
6
+ import structlog
7
+ from pydantic import BaseModel, Field, model_validator, validator
6
8
 
7
9
  from rasa.cli.scaffold import ProjectTemplateName
8
10
  from rasa.shared.core.trackers import DialogueStateTracker
9
11
 
12
+ structlogger = structlog.get_logger()
13
+
10
14
 
11
15
  class PromptRequest(BaseModel):
12
16
  """Request model for prompt-to-bot endpoint."""
@@ -14,9 +18,6 @@ class PromptRequest(BaseModel):
14
18
  prompt: str = Field(
15
19
  ..., min_length=1, max_length=10000, description="The skill description prompt"
16
20
  )
17
- client_id: Optional[str] = Field(
18
- None, max_length=255, description="Optional client identifier"
19
- )
20
21
 
21
22
  @validator("prompt")
22
23
  def validate_prompt(cls, v: str) -> str:
@@ -34,9 +35,6 @@ class TemplateRequest(BaseModel):
34
35
  f"The template name to use ({ProjectTemplateName.supported_values()})"
35
36
  ),
36
37
  )
37
- client_id: Optional[str] = Field(
38
- None, max_length=255, description="Optional client identifier"
39
- )
40
38
 
41
39
  @validator("template_name")
42
40
  def validate_template_name(cls, v: Any) -> Any:
@@ -47,26 +45,143 @@ class TemplateRequest(BaseModel):
47
45
  return v
48
46
 
49
47
 
50
- class ChatMessage(BaseModel):
51
- """Model for chat messages."""
48
+ class BaseContent(BaseModel):
49
+ type: str
50
+
51
+
52
+ class TextContent(BaseContent):
53
+ type: Literal["text"]
54
+ text: str
55
+
56
+
57
+ class LinkContent(BaseContent):
58
+ type: Literal["link"]
59
+ text: str # This holds the URL
60
+
61
+
62
+ class CodeContent(BaseContent):
63
+ type: Literal["code"]
64
+ text: str
65
+
52
66
 
53
- type: str = Field(..., pattern="^(user|assistant)$")
54
- content: Union[str, List[Dict[str, Any]]] = Field(...)
67
+ class FileContent(BaseContent):
68
+ type: Literal["file"]
69
+ file_path: str
70
+ file_content: str
55
71
 
56
72
 
57
- class LLMBuilderRequest(BaseModel):
58
- """Request model for LLM builder endpoint."""
73
+ class EventContent(BaseModel):
74
+ type: Literal["event"]
75
+ event: str = Field(..., description="The event's type_name")
59
76
 
60
- messages: List[ChatMessage] = Field(..., min_items=1, max_items=50)
77
+ event_data: Dict[str, Any] = Field(
78
+ default_factory=dict,
79
+ description="Contains event-specific data fields."
80
+ )
81
+
82
+ @model_validator(mode="before")
83
+ @classmethod
84
+ def _collect_event_data(cls, data: Dict[str, Any]) -> Dict[str, Any]:
85
+ generic = {"type", "event"}
86
+ data["event_data"] = {
87
+ key: data.pop(key) for key in list(data.keys()) if key not in generic
88
+ }
89
+ return data
90
+
91
+ class Config:
92
+ extra = "forbid"
93
+
94
+
95
+ class CopilotChatMessage(BaseModel):
96
+ """Model for a single chat messages between the user and the copilot."""
97
+
98
+ role: str = Field(..., pattern="^(user|copilot)$")
99
+ content: List[
100
+ Union[TextContent, LinkContent, CodeContent, FileContent, EventContent]
101
+ ] = Field(
102
+ ...,
103
+ description=(
104
+ "The content of the message. "
105
+ "The content is expected to be a list of content blocks. "
106
+ "The content blocks are expected to be one of the following types: "
107
+ "text, link, code, or file."
108
+ ),
109
+ )
110
+
111
+ @classmethod
112
+ def from_llm_response(cls, llm_response: Dict[str, Any]) -> "CopilotChatMessage":
113
+ """Create a CopilotChatMessage from an LLM response (role: copilot)."""
114
+ parsed_content = []
115
+ block_type_to_content_class = {
116
+ "text": TextContent,
117
+ "link": LinkContent,
118
+ "code": CodeContent,
119
+ "file": FileContent,
120
+ "event": EventContent,
121
+ }
122
+ for block in llm_response.get("content_blocks", []):
123
+ block_type = block.get("type")
124
+ if block_type in block_type_to_content_class.keys():
125
+ parsed_content.append(block_type_to_content_class[block_type](**block))
126
+ else:
127
+ structlogger.error(
128
+ "copilot_chat_message.from_llm_response.unknown_content_block_type",
129
+ event_info=f"Unknown content block type: `{block_type}`",
130
+ block_type=block_type,
131
+ block=block,
132
+ llm_response=llm_response,
133
+ )
134
+ continue
135
+
136
+ return cls(role="copilot", content=parsed_content)
137
+
138
+ def get_text_content(self) -> str:
139
+ """Concatenate all 'text' content blocks into a single string."""
140
+ return "\n".join(
141
+ content_block.text
142
+ for content_block in self.content
143
+ if isinstance(content_block, TextContent)
144
+ )
145
+
146
+ def to_openai_format(self) -> Dict[str, Any]:
147
+ """Convert to OpenAI message format for API calls."""
148
+ role = "user" if self.role == "user" else "assistant"
149
+
150
+ if self.role == "user":
151
+ content = self.get_text_content()
152
+ else:
153
+ content = [block.model_dump() for block in self.content]
154
+ content = json.dumps(content)
155
+
156
+ return {"role": role, "content": content}
157
+
158
+
159
+ class CopilotRequest(BaseModel):
160
+ """Request model for the copilot endpoint."""
161
+
162
+ copilot_chat_history: List[CopilotChatMessage] = Field(
163
+ ...,
164
+ description=(
165
+ "The chat history between the user and the copilot. "
166
+ "Used to generate a new response based on the previous conversation."
167
+ ),
168
+ )
169
+ session_id: str = Field(
170
+ ...,
171
+ description=(
172
+ "The session ID of chat session with the assistant. "
173
+ "Used to fetch the conversation from the tracker."
174
+ ),
175
+ )
61
176
 
62
177
 
63
- class LLMBuilderContext(BaseModel):
64
- """Context model for LLM builder endpoint."""
178
+ class CopilotContext(BaseModel):
179
+ """Model containing the context used by the copilot to generate a response."""
65
180
 
66
181
  tracker: Optional[DialogueStateTracker] = Field(None)
67
- bot_logs: str = Field("")
68
- chat_bot_files: Dict[str, str] = Field({})
69
- chat_history: List[ChatMessage] = Field([])
182
+ assistant_logs: str = Field("")
183
+ assistant_files: Dict[str, str] = Field({})
184
+ copilot_chat_history: List[CopilotChatMessage] = Field([])
70
185
 
71
186
  class Config:
72
187
  """Config for LLMBuilderContext."""
@@ -123,8 +238,8 @@ class LinkBlock(ContentBlock):
123
238
  text: str = Field(..., pattern=r"^https?://")
124
239
 
125
240
 
126
- class LLMHelperResponse(BaseModel):
127
- """Response model for LLM helper."""
241
+ class CoPilotHelperResponse(BaseModel):
242
+ """Response model for CoPilot helper."""
128
243
 
129
244
  content_blocks: List[Union[TextBlock, CodeBlock, FileBlock, LinkBlock]] = Field(...)
130
245
 
rasa/builder/service.py CHANGED
@@ -1,6 +1,7 @@
1
1
  """Main service for the prompt-to-bot functionality."""
2
2
 
3
3
  import os
4
+ import sys
4
5
  from typing import Any, Optional
5
6
 
6
7
  import structlog
@@ -18,9 +19,9 @@ from rasa.builder.llm_service import llm_service
18
19
  from rasa.builder.logging_utils import get_recent_logs
19
20
  from rasa.builder.models import (
20
21
  ApiErrorResponse,
21
- LLMBuilderContext,
22
- LLMBuilderRequest,
23
- LLMHelperResponse,
22
+ CopilotChatMessage,
23
+ CopilotContext,
24
+ CopilotRequest,
24
25
  PromptRequest,
25
26
  ServerSentEvent,
26
27
  TemplateRequest,
@@ -51,6 +52,10 @@ def setup_project_generator(project_folder: Optional[str] = None) -> ProjectGene
51
52
  # for relative paths (./docs) in a projects config to work
52
53
  os.chdir(project_folder)
53
54
 
55
+ # Ensure the project folder is in sys.path
56
+ if project_folder not in sys.path:
57
+ sys.path.insert(0, project_folder)
58
+
54
59
  structlogger.info(
55
60
  "bot_builder_service.service_initialized", project_folder=project_folder
56
61
  )
@@ -134,8 +139,7 @@ async def health(request: Request) -> HTTPResponse:
134
139
  @openapi.tag("bot-generation")
135
140
  @openapi.body(
136
141
  {"application/json": model_to_schema(PromptRequest)},
137
- description="Prompt request with natural language description and client ID "
138
- "for tracking",
142
+ description="Prompt request with natural language description.",
139
143
  required=True,
140
144
  )
141
145
  @openapi.response(
@@ -246,7 +250,6 @@ async def handle_prompt_to_bot(request: Request) -> None:
246
250
 
247
251
  structlogger.info(
248
252
  "bot_builder_service.prompt_to_bot.success",
249
- client_id=prompt_data.client_id,
250
253
  files_generated=list(bot_files.keys()),
251
254
  )
252
255
 
@@ -300,7 +303,7 @@ async def handle_prompt_to_bot(request: Request) -> None:
300
303
  @openapi.tag("bot-generation")
301
304
  @openapi.body(
302
305
  {"application/json": model_to_schema(TemplateRequest)},
303
- description="Template request with template name and client ID for " "tracking",
306
+ description="Template request with template name.",
304
307
  required=True,
305
308
  )
306
309
  @openapi.response(
@@ -415,7 +418,6 @@ async def handle_template_to_bot(request: Request) -> None:
415
418
 
416
419
  structlogger.info(
417
420
  "bot_builder_service.template_to_bot.success",
418
- client_id=template_data.client_id,
419
421
  files_generated=list(bot_files.keys()),
420
422
  )
421
423
 
@@ -639,22 +641,27 @@ async def get_bot_data(request: Request) -> HTTPResponse:
639
641
  )
640
642
 
641
643
 
642
- @bp.route("/llm-builder", methods=["POST"])
643
- @openapi.summary("LLM assistant for bot building")
644
+ @bp.route("/copilot", methods=["POST"])
645
+ @openapi.summary("AI copilot for bot building")
644
646
  @openapi.description(
645
- "Provides LLM-powered assistance for bot building tasks, including "
646
- "debugging, suggestions, and explanations"
647
+ "Provides LLM-powered copilot assistance for conversational bot development, "
648
+ "including debugging, suggestions, and explanations."
647
649
  )
648
- @openapi.tag("llm-assistant")
650
+ @openapi.tag("copilot")
649
651
  @openapi.body(
650
- {"application/json": model_to_schema(LLMBuilderRequest)},
651
- description="LLM builder request containing chat messages and context",
652
+ {"application/json": model_to_schema(CopilotRequest)},
653
+ description=(
654
+ "Copilot request containing: "
655
+ "1. conversation history between user and copilot, "
656
+ "2. session ID for tracking conversation context with the bot being built, and"
657
+ "3. additional context."
658
+ ),
652
659
  required=True,
653
660
  )
654
661
  @openapi.response(
655
662
  200,
656
- {"application/json": model_to_schema(LLMHelperResponse)},
657
- description="LLM response with assistance and suggestions",
663
+ {"application/json": model_to_schema(CopilotChatMessage)},
664
+ description="Copilot response with assistance and suggestions.",
658
665
  )
659
666
  @openapi.response(
660
667
  400,
@@ -664,46 +671,45 @@ async def get_bot_data(request: Request) -> HTTPResponse:
664
671
  @openapi.response(
665
672
  502,
666
673
  {"application/json": model_to_schema(ApiErrorResponse)},
667
- description="LLM generation failed",
674
+ description="LLM generation failed.",
668
675
  )
669
676
  @openapi.response(
670
677
  500,
671
678
  {"application/json": model_to_schema(ApiErrorResponse)},
672
- description="Internal server error",
679
+ description="Internal server error.",
673
680
  )
674
- async def llm_builder(request: Request) -> HTTPResponse:
675
- """Handle LLM builder requests."""
681
+ async def copilot(request: Request) -> HTTPResponse:
682
+ """Handle copilot requests."""
676
683
  project_generator = get_project_generator(request)
677
684
  input_channel = get_input_channel(request)
678
685
 
679
686
  try:
680
687
  # Validate request
681
- builder_request = LLMBuilderRequest(**request.json)
688
+ copilot_request = CopilotRequest(**request.json)
682
689
 
683
690
  # Get current conversation context
684
691
  current_tracker = await current_tracker_from_input_channel(
685
692
  request.app, input_channel
686
693
  )
687
- bot_logs = get_recent_logs()
688
- chat_bot_files = project_generator.get_bot_files()
694
+ assistant_logs = get_recent_logs()
695
+ assistant_files = project_generator.get_bot_files()
689
696
 
690
- # create LLM builder context
691
- llm_builder_context = LLMBuilderContext(
697
+ # Create LLM builder context
698
+ context = CopilotContext(
692
699
  tracker=current_tracker,
693
- bot_logs=bot_logs,
694
- chat_bot_files=chat_bot_files,
695
- chat_history=builder_request.messages,
700
+ assistant_logs=assistant_logs,
701
+ assistant_files=assistant_files,
702
+ copilot_chat_history=copilot_request.copilot_chat_history,
696
703
  )
697
704
 
698
705
  # Generate response
699
- messages = await llm_service.create_helper_messages(llm_builder_context)
700
- llm_response = await llm_service.generate_helper_response(messages)
706
+ copilot_response = await llm_service.copilot.generate_response(context)
701
707
 
702
- return response.json(llm_response)
708
+ return response.json(copilot_response.model_dump())
703
709
 
704
710
  except LLMGenerationError as e:
705
711
  structlogger.error(
706
- "bot_builder_service.llm_builder.generation_error", error=str(e)
712
+ "bot_builder_service.copilot_helper.generation_error", error=str(e)
707
713
  )
708
714
  return response.json(
709
715
  ApiErrorResponse(
@@ -714,7 +720,7 @@ async def llm_builder(request: Request) -> HTTPResponse:
714
720
 
715
721
  except Exception as e:
716
722
  structlogger.error(
717
- "bot_builder_service.llm_builder.unexpected_error", error=str(e)
723
+ "bot_builder_service.copilot_helper.unexpected_error", error=str(e)
718
724
  )
719
725
  return response.json(
720
726
  ApiErrorResponse(
@@ -1,5 +1,5 @@
1
- import os
2
1
  from functools import lru_cache
2
+ from importlib.util import find_spec
3
3
  from typing import (
4
4
  Any,
5
5
  ClassVar,
@@ -16,6 +16,7 @@ from rasa.core.actions.custom_action_executor import (
16
16
  )
17
17
  from rasa.shared.core.domain import Domain
18
18
  from rasa.shared.core.trackers import DialogueStateTracker, EventVerbosity
19
+ from rasa.shared.exceptions import RasaException
19
20
  from rasa.utils.endpoints import EndpointConfig
20
21
 
21
22
  structlogger = structlog.get_logger(__name__)
@@ -62,20 +63,12 @@ class DirectCustomActionExecutor(CustomActionExecutor):
62
63
  return
63
64
 
64
65
  module_name = self.action_endpoint.actions_module
65
- # TODO: remove this / change back
66
- structlogger.info(
67
- "action.direct_custom_action_executor.register_actions_from_a_module",
68
- module_name=module_name,
69
- cwd=os.getcwd(),
70
- ls_dir=os.listdir(os.getcwd()),
71
- ls_dir_module=os.listdir(os.getcwd() + "/" + module_name),
72
- )
73
- # if not find_spec(module_name):
74
- # raise RasaException(
75
- # f"You've provided the custom actions module '{module_name}' "
76
- # f"to run directly by the rasa server, however this module does "
77
- # f"not exist. Please check for typos in your `endpoints.yml` file."
78
- # )
66
+ if not find_spec(module_name):
67
+ raise RasaException(
68
+ f"You've provided the custom actions module '{module_name}' "
69
+ f"to run directly by the rasa server, however this module does "
70
+ f"not exist. Please check for typos in your `endpoints.yml` file."
71
+ )
79
72
 
80
73
  self.action_executor.register_package(module_name)
81
74
  DirectCustomActionExecutor._actions_module_registered = True
rasa/version.py CHANGED
@@ -1,3 +1,3 @@
1
1
  # this file will automatically be changed,
2
2
  # do not add anything but the version number here!
3
- __version__ = "3.13.1a17"
3
+ __version__ = "3.13.1a19"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: rasa-pro
3
- Version: 3.13.1a17
3
+ Version: 3.13.1a19
4
4
  Summary: State-of-the-art open-core Conversational AI framework for Enterprises that natively leverages generative AI for effortless assistant development.
5
5
  Keywords: nlp,machine-learning,machine-learning-library,bot,bots,botkit,rasa conversational-agents,conversational-ai,chatbot,chatbot-framework,bot-framework
6
6
  Author: Rasa Technologies GmbH
@@ -4,20 +4,22 @@ rasa/api.py,sha256=RY3SqtlOcdq4YZGgr6DOm-nUBpiA8l8uguUZOctL_7o,6320
4
4
  rasa/builder/README.md,sha256=7WYioSzBHFY25h1QCFellv7bIOW9VLH7Gf7dwQEc1k0,3715
5
5
  rasa/builder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  rasa/builder/config.py,sha256=NxTvyqWGzJnPANv2s78SBv98fiVhebF2ktzWgr6W0CA,2461
7
+ rasa/builder/constants.py,sha256=M64YEO-8HMAY4Q9EBsYXKD7DaF28uzeGzFWI1ylAgGQ,101
8
+ rasa/builder/copilot-llm-structured-output-response-schema.json,sha256=jpHJoW4TZ9f0kQgt5SuyLigHmMcAHY0oWSsh5Hf8fnE,1629
9
+ rasa/builder/copilot.py,sha256=MhNrQO9YFSy_Q1BMQ8CE2Bwavmu75DO-kE2cIAAJsVA,9236
10
+ rasa/builder/copilot_system_prompt.jinja2,sha256=2FhWizah072ZPMZDG5W55rosPeqPepZsjsfcHxxoN-s,9639
7
11
  rasa/builder/create_openai_vector_store.py,sha256=jAk1QzM4HiC0wjkn1031xBzLFGwVV4JUJMc50QZFIdw,6642
8
12
  rasa/builder/exceptions.py,sha256=lsMX_892AoDi4yY86mIX0x-hGvCDZN22kOTTGM5Tvro,1220
9
13
  rasa/builder/inkeep-rag-response-schema.json,sha256=ePVbGo6u6sm_CPS8EeECX3UayIvcfUe6yTy2gtexKlk,1498
10
- rasa/builder/inkeep_document_retrieval.py,sha256=FafqkRgwn-OktF2oBEUzGw28KhmKPU90SSL5Tb8jLZA,7467
11
- rasa/builder/llm-helper-schema.json,sha256=z5IJc_-2mZ9oQ-z-9WzTivOoqYsLXCAm8MIOTWy5rGs,1609
14
+ rasa/builder/inkeep_document_retrieval.py,sha256=4NOZl1gfXqKcdXWdCei9BSRLuIwSj_550X-v-0OQ6x8,8239
12
15
  rasa/builder/llm_context.py,sha256=zy7htrXgS_QWJWeEj4TfseQgTI65whFJR_4GKm_iOvE,2826
13
- rasa/builder/llm_helper_prompt.jinja2,sha256=AhfEzXYIMTmWgd2TgVmPVeCfojHA29IiuO6JhTOXXKY,9585
14
- rasa/builder/llm_service.py,sha256=vAVekor9ZBRZw6-TGDGXoutd9UaNnQV2GltbLbM0-Qc,11459
16
+ rasa/builder/llm_service.py,sha256=NEB8OmM2V6xhqMnOChQXKa_nmu6LwE_9QR31liqsWFY,6114
15
17
  rasa/builder/logging_utils.py,sha256=iPJoN2HhNlS14SKyZv0s0iIljrmP6A8s8C5btoDVOXM,1383
16
18
  rasa/builder/main.py,sha256=KzlVDESi5FO3ZAXxr2RISwn2aXACaY4jjZ5Fb-lcVM4,4242
17
- rasa/builder/models.py,sha256=EuP6FVD0VIkG8Jzf8ftYW2A1xirDRTlN4Ae-OmdLdAA,6208
19
+ rasa/builder/models.py,sha256=GyiCPlUQ-luZ12FwzSvz64o0ecX6Y3ZbUk1ah6R5Ja8,9755
18
20
  rasa/builder/project_generator.py,sha256=7z2wyiwXFYQdw9RT2Yht5NrrtWwntgTBdCuaJGrEheM,10937
19
21
  rasa/builder/scrape_rasa_docs.py,sha256=yWezvPgfLkokm9t71ngyckNKCiY4bRvc9azEgSYN0Sg,2480
20
- rasa/builder/service.py,sha256=Z8yvRCnRYQbdHtyiNXLU8LhLlOl7FvccXbpWbOJ0RVU,24575
22
+ rasa/builder/service.py,sha256=z7-ROQ6x4FGCm6L1jNJ_d1V7k1w41khBfQFGZg6cOj0,24701
21
23
  rasa/builder/skill_to_bot_prompt.jinja2,sha256=h2Fgoh9k3XinN0blEEqMuOWuvwXxJifP3GJs-GczgBU,5530
22
24
  rasa/builder/training_service.py,sha256=9OIRZ6CRXHc_m9VHkeHHK7woN5Q1n7BpmsxjYjuByoI,4046
23
25
  rasa/builder/validation_service.py,sha256=rKMgbG8Jyv8WMnTIXOMd7VuGWAYicrL9wDJ22BJXZHE,2765
@@ -310,7 +312,7 @@ rasa/core/actions/action_trigger_flow.py,sha256=IydYAGafTtoY6XSgCX124xJQhzudUg8J
310
312
  rasa/core/actions/action_trigger_search.py,sha256=QfYqnaGRCqRYJ4msYsLAbnVYW5ija_tqhCcKIN8aEfw,1064
311
313
  rasa/core/actions/constants.py,sha256=gfgdWmj-OJ5xTcTAS1OcXQ3dgcTiHO98NC-SGyKlTjs,161
312
314
  rasa/core/actions/custom_action_executor.py,sha256=qafASBdM3-hByDqbkNxgXfx5yMSsJh_nB3B7x9ye0TY,6176
313
- rasa/core/actions/direct_custom_actions_executor.py,sha256=JXlyRA40Hedkz6URyemUEIV0fFCehoOoAzvatu--2FU,4025
315
+ rasa/core/actions/direct_custom_actions_executor.py,sha256=IzxRnPF92zs3WX-p9DoFq51Vf0QwfE6prB_AlyEEllc,3746
314
316
  rasa/core/actions/e2e_stub_custom_action_executor.py,sha256=D-kECC1QjVLv4owNxstW2xJPPsXTGfGepvquMeWB_ec,2282
315
317
  rasa/core/actions/forms.py,sha256=MPGxp3vg-EgFcU5UQYqWM2tycSFIuoF6vWvNSSWPhSA,26967
316
318
  rasa/core/actions/grpc_custom_action_executor.py,sha256=EDxdSIDA4H4Mu-QZk-pPGV2N41ZsbY8W9laV6l1WlDQ,9103
@@ -1068,9 +1070,9 @@ rasa/utils/train_utils.py,sha256=ClJx-6x3-h3Vt6mskacgkcCUJTMXjFPe3zAcy_DfmaU,212
1068
1070
  rasa/utils/url_tools.py,sha256=dZ1HGkVdWTJB7zYEdwoDIrEuyX9HE5WsxKKFVsXBLE0,1218
1069
1071
  rasa/utils/yaml.py,sha256=KjbZq5C94ZP7Jdsw8bYYF7HASI6K4-C_kdHfrnPLpSI,2000
1070
1072
  rasa/validator.py,sha256=IRhLfcgCpps0wSpokOvUGNaY8t8GsmeSmPOUVRKeOeE,83087
1071
- rasa/version.py,sha256=AVviI7uhVQCuE-dl34eiA4si4yQj06yJK9V2U7CyeeM,120
1072
- rasa_pro-3.13.1a17.dist-info/METADATA,sha256=3NmO_L2ac2WjOweNuV5fYTtIKciTj_ezAyC8nIwCmLI,10610
1073
- rasa_pro-3.13.1a17.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
1074
- rasa_pro-3.13.1a17.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
1075
- rasa_pro-3.13.1a17.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
1076
- rasa_pro-3.13.1a17.dist-info/RECORD,,
1073
+ rasa/version.py,sha256=5BSECPf7G03URRFUd2ZM2wihUq-XTlLshREyRXZyPGI,120
1074
+ rasa_pro-3.13.1a19.dist-info/METADATA,sha256=8YKwP1nDPfThPyWzWaqxZ_2YMjmC6vOACJb_Flm_nDI,10610
1075
+ rasa_pro-3.13.1a19.dist-info/NOTICE,sha256=7HlBoMHJY9CL2GlYSfTQ-PZsVmLmVkYmMiPlTjhuCqA,218
1076
+ rasa_pro-3.13.1a19.dist-info/WHEEL,sha256=fGIA9gx4Qxk2KDKeNJCbOEwSrmLtjWCwzBz351GyrPQ,88
1077
+ rasa_pro-3.13.1a19.dist-info/entry_points.txt,sha256=ckJ2SfEyTPgBqj_I6vm_tqY9dZF_LAPJZA335Xp0Q9U,43
1078
+ rasa_pro-3.13.1a19.dist-info/RECORD,,