rasa-pro 3.13.0a1.dev6__py3-none-any.whl → 3.13.0a1.dev7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/builder/README.md +120 -0
- rasa/builder/config.py +69 -0
- rasa/builder/create_openai_vector_store.py +204 -45
- rasa/builder/exceptions.py +49 -0
- rasa/builder/llm_service.py +327 -0
- rasa/builder/logging_utils.py +51 -0
- rasa/builder/main.py +61 -0
- rasa/builder/models.py +174 -0
- rasa/builder/project_generator.py +264 -0
- rasa/builder/service.py +447 -0
- rasa/builder/skill_to_bot_prompt.jinja2 +6 -1
- rasa/builder/training_service.py +123 -0
- rasa/builder/validation_service.py +79 -0
- rasa/cli/project_templates/finance/config.yml +17 -0
- rasa/cli/project_templates/finance/credentials.yml +33 -0
- rasa/cli/project_templates/finance/data/flows/transfer_money.yml +5 -0
- rasa/cli/project_templates/finance/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/finance/domain.yml +7 -0
- rasa/cli/project_templates/finance/endpoints.yml +58 -0
- rasa/cli/project_templates/plain/config.yml +17 -0
- rasa/cli/project_templates/plain/credentials.yml +33 -0
- rasa/cli/project_templates/plain/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/plain/domain.yml +5 -0
- rasa/cli/project_templates/plain/endpoints.yml +58 -0
- rasa/cli/project_templates/telecom/config.yml +17 -0
- rasa/cli/project_templates/telecom/credentials.yml +33 -0
- rasa/cli/project_templates/telecom/data/flows/upgrade_contract.yml +5 -0
- rasa/cli/project_templates/telecom/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/telecom/domain.yml +7 -0
- rasa/cli/project_templates/telecom/endpoints.yml +58 -0
- rasa/cli/scaffold.py +19 -3
- rasa/core/actions/action.py +5 -3
- rasa/model_manager/model_api.py +1 -1
- rasa/model_manager/runner_service.py +1 -1
- rasa/model_manager/trainer_service.py +1 -1
- rasa/model_manager/utils.py +1 -29
- rasa/shared/core/domain.py +62 -15
- rasa/shared/core/flows/yaml_flows_io.py +16 -8
- rasa/telemetry.py +2 -1
- rasa/utils/io.py +27 -9
- rasa/version.py +1 -1
- {rasa_pro-3.13.0a1.dev6.dist-info → rasa_pro-3.13.0a1.dev7.dist-info}/METADATA +1 -1
- {rasa_pro-3.13.0a1.dev6.dist-info → rasa_pro-3.13.0a1.dev7.dist-info}/RECORD +46 -19
- rasa/builder/prompt_to_bot.py +0 -650
- {rasa_pro-3.13.0a1.dev6.dist-info → rasa_pro-3.13.0a1.dev7.dist-info}/NOTICE +0 -0
- {rasa_pro-3.13.0a1.dev6.dist-info → rasa_pro-3.13.0a1.dev7.dist-info}/WHEEL +0 -0
- {rasa_pro-3.13.0a1.dev6.dist-info → rasa_pro-3.13.0a1.dev7.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""Service for handling LLM interactions."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import importlib
|
|
5
|
+
import json
|
|
6
|
+
from contextlib import asynccontextmanager
|
|
7
|
+
from copy import deepcopy
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
import importlib_resources
|
|
11
|
+
import openai
|
|
12
|
+
import structlog
|
|
13
|
+
from jinja2 import Template
|
|
14
|
+
|
|
15
|
+
from rasa.builder import config
|
|
16
|
+
from rasa.builder.exceptions import LLMGenerationError
|
|
17
|
+
from rasa.builder.llm_context import tracker_as_llm_context
|
|
18
|
+
from rasa.builder.models import LLMBuilderContext
|
|
19
|
+
from rasa.constants import PACKAGE_NAME
|
|
20
|
+
from rasa.shared.constants import DOMAIN_SCHEMA_FILE, RESPONSES_SCHEMA_FILE
|
|
21
|
+
from rasa.shared.core.flows.yaml_flows_io import FLOWS_SCHEMA_FILE
|
|
22
|
+
from rasa.shared.utils.io import read_json_file
|
|
23
|
+
from rasa.shared.utils.yaml import read_schema_file
|
|
24
|
+
|
|
25
|
+
structlogger = structlog.get_logger()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class LLMService:
|
|
29
|
+
"""Handles OpenAI LLM interactions with caching for efficiency."""
|
|
30
|
+
|
|
31
|
+
def __init__(self):
|
|
32
|
+
self._client: Optional[openai.AsyncOpenAI] = None
|
|
33
|
+
self._domain_schema: Optional[Dict[str, Any]] = None
|
|
34
|
+
self._flows_schema: Optional[Dict[str, Any]] = None
|
|
35
|
+
self._helper_schema: Optional[Dict[str, Any]] = None
|
|
36
|
+
|
|
37
|
+
@asynccontextmanager
|
|
38
|
+
async def _get_client(self):
|
|
39
|
+
"""Get or create OpenAI client with proper resource management."""
|
|
40
|
+
if self._client is None:
|
|
41
|
+
self._client = openai.AsyncOpenAI(timeout=config.OPENAI_TIMEOUT)
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
yield self._client
|
|
45
|
+
except Exception as e:
|
|
46
|
+
structlogger.error("llm.client_error", error=str(e))
|
|
47
|
+
raise
|
|
48
|
+
|
|
49
|
+
def _prepare_schemas(self):
|
|
50
|
+
"""Prepare and cache schemas for LLM generation."""
|
|
51
|
+
if self._domain_schema is None:
|
|
52
|
+
self._domain_schema = _prepare_domain_schema()
|
|
53
|
+
|
|
54
|
+
if self._flows_schema is None:
|
|
55
|
+
self._flows_schema = _prepare_flows_schema()
|
|
56
|
+
|
|
57
|
+
if self._helper_schema is None:
|
|
58
|
+
self._helper_schema = _load_helper_schema()
|
|
59
|
+
|
|
60
|
+
async def generate_rasa_project(
|
|
61
|
+
self, messages: List[Dict[str, Any]]
|
|
62
|
+
) -> Dict[str, Any]:
|
|
63
|
+
"""Generate Rasa project data using OpenAI."""
|
|
64
|
+
self._prepare_schemas()
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
async with self._get_client() as client:
|
|
68
|
+
response = await client.chat.completions.create(
|
|
69
|
+
model=config.OPENAI_MODEL,
|
|
70
|
+
messages=messages,
|
|
71
|
+
temperature=config.OPENAI_TEMPERATURE,
|
|
72
|
+
response_format={
|
|
73
|
+
"type": "json_schema",
|
|
74
|
+
"json_schema": {
|
|
75
|
+
"name": "rasa_project",
|
|
76
|
+
"schema": {
|
|
77
|
+
"type": "object",
|
|
78
|
+
"properties": {
|
|
79
|
+
"domain": self._domain_schema,
|
|
80
|
+
"flows": self._flows_schema,
|
|
81
|
+
},
|
|
82
|
+
"required": ["domain", "flows"],
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
},
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
content = response.choices[0].message.content
|
|
89
|
+
if not content:
|
|
90
|
+
raise LLMGenerationError("Empty response from LLM")
|
|
91
|
+
|
|
92
|
+
try:
|
|
93
|
+
return json.loads(content)
|
|
94
|
+
except json.JSONDecodeError as e:
|
|
95
|
+
raise LLMGenerationError(f"Invalid JSON from LLM: {e}")
|
|
96
|
+
|
|
97
|
+
except openai.OpenAIError as e:
|
|
98
|
+
raise LLMGenerationError(f"OpenAI API error: {e}")
|
|
99
|
+
except asyncio.TimeoutError:
|
|
100
|
+
raise LLMGenerationError("LLM request timed out")
|
|
101
|
+
|
|
102
|
+
async def create_helper_messages(
|
|
103
|
+
self, llm_builder_context: LLMBuilderContext
|
|
104
|
+
) -> List[Dict[str, Any]]:
|
|
105
|
+
"""Create helper messages for LLM builder."""
|
|
106
|
+
# Format chat history for documentation search
|
|
107
|
+
chat_dump = self._format_chat_dump(llm_builder_context.chat_history)
|
|
108
|
+
|
|
109
|
+
# Search documentation
|
|
110
|
+
documentation_results = await self.search_documentation(chat_dump)
|
|
111
|
+
formatted_docs = self._format_documentation_results(documentation_results)
|
|
112
|
+
|
|
113
|
+
current_conversation = tracker_as_llm_context(llm_builder_context.tracker)
|
|
114
|
+
|
|
115
|
+
# Prepare LLM messages
|
|
116
|
+
system_messages = get_helper_messages(
|
|
117
|
+
current_conversation,
|
|
118
|
+
llm_builder_context.bot_logs,
|
|
119
|
+
llm_builder_context.chat_bot_files,
|
|
120
|
+
formatted_docs,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Add user messages
|
|
124
|
+
messages = system_messages.copy()
|
|
125
|
+
for msg in llm_builder_context.chat_history:
|
|
126
|
+
messages.append(
|
|
127
|
+
{
|
|
128
|
+
"role": "user" if msg.type == "user" else "assistant",
|
|
129
|
+
"content": json.dumps(msg.content)
|
|
130
|
+
if isinstance(msg.content, list)
|
|
131
|
+
else msg.content,
|
|
132
|
+
}
|
|
133
|
+
)
|
|
134
|
+
return messages
|
|
135
|
+
|
|
136
|
+
async def generate_helper_response(
|
|
137
|
+
self, messages: List[Dict[str, Any]]
|
|
138
|
+
) -> Dict[str, Any]:
|
|
139
|
+
"""Generate helper response using OpenAI."""
|
|
140
|
+
self._prepare_schemas()
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
async with self._get_client() as client:
|
|
144
|
+
response = await client.chat.completions.create(
|
|
145
|
+
model=config.OPENAI_MODEL,
|
|
146
|
+
messages=messages,
|
|
147
|
+
response_format={
|
|
148
|
+
"type": "json_schema",
|
|
149
|
+
"json_schema": {
|
|
150
|
+
"name": "llm_helper",
|
|
151
|
+
"schema": self._helper_schema,
|
|
152
|
+
},
|
|
153
|
+
},
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
content = response.choices[0].message.content
|
|
157
|
+
if not content:
|
|
158
|
+
raise LLMGenerationError("Empty response from LLM helper")
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
return json.loads(content)
|
|
162
|
+
except json.JSONDecodeError as e:
|
|
163
|
+
raise LLMGenerationError(f"Invalid JSON from LLM helper: {e}")
|
|
164
|
+
|
|
165
|
+
except openai.OpenAIError as e:
|
|
166
|
+
raise LLMGenerationError(f"OpenAI API error in helper: {e}")
|
|
167
|
+
except asyncio.TimeoutError:
|
|
168
|
+
raise LLMGenerationError("LLM helper request timed out")
|
|
169
|
+
|
|
170
|
+
async def search_documentation(
|
|
171
|
+
self, query: str, max_results: Optional[int] = None
|
|
172
|
+
) -> List[Dict[str, Any]]:
|
|
173
|
+
"""Search documentation using OpenAI vector store."""
|
|
174
|
+
if max_results is None:
|
|
175
|
+
max_results = config.OPENAI_MAX_VECTOR_RESULTS
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
async with self._get_client() as client:
|
|
179
|
+
results = await client.vector_stores.search(
|
|
180
|
+
vector_store_id=config.OPENAI_VECTOR_STORE_ID,
|
|
181
|
+
query=query,
|
|
182
|
+
max_num_results=max_results,
|
|
183
|
+
rewrite_query=True,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
return results.data
|
|
187
|
+
|
|
188
|
+
except openai.OpenAIError as e:
|
|
189
|
+
structlogger.warning(
|
|
190
|
+
"llm.documentation_search_failed", error=str(e), query=query
|
|
191
|
+
)
|
|
192
|
+
return []
|
|
193
|
+
|
|
194
|
+
@staticmethod
|
|
195
|
+
def _format_chat_dump(messages) -> str:
|
|
196
|
+
"""Format chat messages for documentation search."""
|
|
197
|
+
result = ""
|
|
198
|
+
for message in messages:
|
|
199
|
+
if message.type == "user":
|
|
200
|
+
content = (
|
|
201
|
+
message.content
|
|
202
|
+
if isinstance(message.content, str)
|
|
203
|
+
else str(message.content)
|
|
204
|
+
)
|
|
205
|
+
result += f"User: {content}\n"
|
|
206
|
+
else:
|
|
207
|
+
if isinstance(message.content, list):
|
|
208
|
+
for part in message.content:
|
|
209
|
+
if part.get("type") == "text":
|
|
210
|
+
result += f"Assistant: {part.get('text')}\n"
|
|
211
|
+
else:
|
|
212
|
+
result += f"Assistant: {message.content}\n"
|
|
213
|
+
return result
|
|
214
|
+
|
|
215
|
+
@staticmethod
|
|
216
|
+
def _format_documentation_results(results) -> str:
|
|
217
|
+
"""Format documentation search results."""
|
|
218
|
+
if not results:
|
|
219
|
+
return "<sources>No relevant documentation found.</sources>"
|
|
220
|
+
|
|
221
|
+
formatted_results = ""
|
|
222
|
+
for result in results:
|
|
223
|
+
formatted_result = f"<result url='{result.attributes.get('url', '')}'>"
|
|
224
|
+
for part in result.content:
|
|
225
|
+
formatted_result += f"<content>{part.text}</content>"
|
|
226
|
+
formatted_results += formatted_result + "</result>"
|
|
227
|
+
|
|
228
|
+
return f"<sources>{formatted_results}</sources>"
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
# Schema preparation functions (stateless)
|
|
232
|
+
def _prepare_domain_schema() -> Dict[str, Any]:
|
|
233
|
+
"""Prepare domain schema by removing unnecessary parts."""
|
|
234
|
+
domain_schema = deepcopy(read_schema_file(DOMAIN_SCHEMA_FILE, PACKAGE_NAME, False))
|
|
235
|
+
|
|
236
|
+
# Remove parts not needed for CALM bots
|
|
237
|
+
unnecessary_keys = ["intents", "entities", "forms", "config", "session_config"]
|
|
238
|
+
|
|
239
|
+
for key in unnecessary_keys:
|
|
240
|
+
domain_schema["mapping"].pop(key, None)
|
|
241
|
+
|
|
242
|
+
# Remove problematic slot mappings
|
|
243
|
+
slot_mapping = domain_schema["mapping"]["slots"]["mapping"]["regex;([A-Za-z]+)"][
|
|
244
|
+
"mapping"
|
|
245
|
+
]
|
|
246
|
+
slot_mapping.pop("mappings", None)
|
|
247
|
+
slot_mapping.pop("validation", None)
|
|
248
|
+
|
|
249
|
+
# Add responses schema
|
|
250
|
+
domain_schema["mapping"]["responses"] = read_schema_file(
|
|
251
|
+
RESPONSES_SCHEMA_FILE, PACKAGE_NAME, False
|
|
252
|
+
)["schema;responses"]
|
|
253
|
+
|
|
254
|
+
return domain_schema
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _prepare_flows_schema() -> Dict[str, Any]:
|
|
258
|
+
"""Prepare flows schema by removing nlu_trigger."""
|
|
259
|
+
schema_file = str(
|
|
260
|
+
importlib_resources.files(PACKAGE_NAME).joinpath(FLOWS_SCHEMA_FILE)
|
|
261
|
+
)
|
|
262
|
+
flows_schema = deepcopy(read_json_file(schema_file))
|
|
263
|
+
flows_schema["$defs"]["flow"]["properties"].pop("nlu_trigger", None)
|
|
264
|
+
return flows_schema
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _load_helper_schema() -> Dict[str, Any]:
|
|
268
|
+
"""Load helper schema."""
|
|
269
|
+
return read_json_file(
|
|
270
|
+
importlib_resources.files(PACKAGE_NAME).joinpath(
|
|
271
|
+
"builder/llm-helper-schema.json"
|
|
272
|
+
)
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# Template functions (stateless with caching)
|
|
277
|
+
_skill_template: Optional[Template] = None
|
|
278
|
+
_helper_template: Optional[Template] = None
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def get_skill_generation_messages(
|
|
282
|
+
skill_description: str, project_data: Dict[str, str]
|
|
283
|
+
) -> List[Dict[str, Any]]:
|
|
284
|
+
"""Get messages for skill generation."""
|
|
285
|
+
global _skill_template
|
|
286
|
+
|
|
287
|
+
if _skill_template is None:
|
|
288
|
+
template_content = importlib.resources.read_text(
|
|
289
|
+
"rasa.builder",
|
|
290
|
+
"skill_to_bot_prompt.jinja2",
|
|
291
|
+
)
|
|
292
|
+
_skill_template = Template(template_content)
|
|
293
|
+
|
|
294
|
+
system_prompt = _skill_template.render(
|
|
295
|
+
skill_description=skill_description,
|
|
296
|
+
project_data=project_data,
|
|
297
|
+
)
|
|
298
|
+
return [{"role": "system", "content": system_prompt}]
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def get_helper_messages(
|
|
302
|
+
current_conversation: str,
|
|
303
|
+
bot_logs: str,
|
|
304
|
+
chat_bot_files: Dict[str, str],
|
|
305
|
+
documentation_results: str,
|
|
306
|
+
) -> List[Dict[str, Any]]:
|
|
307
|
+
"""Get messages for helper response."""
|
|
308
|
+
global _helper_template
|
|
309
|
+
|
|
310
|
+
if _helper_template is None:
|
|
311
|
+
template_content = importlib.resources.read_text(
|
|
312
|
+
"rasa.builder",
|
|
313
|
+
"llm_helper_prompt.jinja2",
|
|
314
|
+
)
|
|
315
|
+
_helper_template = Template(template_content)
|
|
316
|
+
|
|
317
|
+
system_prompt = _helper_template.render(
|
|
318
|
+
current_conversation=current_conversation,
|
|
319
|
+
bot_logs=bot_logs,
|
|
320
|
+
chat_bot_files=chat_bot_files,
|
|
321
|
+
documentation_results=documentation_results,
|
|
322
|
+
)
|
|
323
|
+
return [{"role": "system", "content": system_prompt}]
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
# Global service instance
|
|
327
|
+
llm_service = LLMService()
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Logging utilities for the prompt-to-bot service."""
|
|
2
|
+
|
|
3
|
+
import collections
|
|
4
|
+
import logging
|
|
5
|
+
import threading
|
|
6
|
+
from typing import Any, Deque, Dict
|
|
7
|
+
|
|
8
|
+
from rasa.builder import config
|
|
9
|
+
|
|
10
|
+
# Thread-safe deque for collecting recent logs
|
|
11
|
+
_recent_logs: Deque[str] = collections.deque(maxlen=config.MAX_LOG_ENTRIES)
|
|
12
|
+
_logs_lock = threading.RLock()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def collecting_logs_processor(
|
|
16
|
+
logger: Any, log_level: str, event_dict: Dict[str, Any]
|
|
17
|
+
) -> Dict[str, Any]:
|
|
18
|
+
"""Structlog processor that collects recent log entries.
|
|
19
|
+
|
|
20
|
+
This processor is thread-safe and maintains a rolling buffer of recent logs.
|
|
21
|
+
"""
|
|
22
|
+
if log_level != logging.getLevelName(logging.DEBUG).lower():
|
|
23
|
+
event_message = event_dict.get("event_info") or event_dict.get("event", "")
|
|
24
|
+
log_entry = f"[{log_level}] {event_message}"
|
|
25
|
+
|
|
26
|
+
with _logs_lock:
|
|
27
|
+
_recent_logs.append(log_entry)
|
|
28
|
+
|
|
29
|
+
return event_dict
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_recent_logs() -> str:
|
|
33
|
+
"""Get recent log entries as a formatted string.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Formatted string of recent log entries, one per line.
|
|
37
|
+
"""
|
|
38
|
+
with _logs_lock:
|
|
39
|
+
return "\n".join(list(_recent_logs))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def clear_recent_logs() -> None:
|
|
43
|
+
"""Clear the recent logs buffer."""
|
|
44
|
+
with _logs_lock:
|
|
45
|
+
_recent_logs.clear()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_log_count() -> int:
|
|
49
|
+
"""Get the current number of log entries."""
|
|
50
|
+
with _logs_lock:
|
|
51
|
+
return len(_recent_logs)
|
rasa/builder/main.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Main entry point for the prompt-to-bot service."""
|
|
3
|
+
|
|
4
|
+
import logging
|
|
5
|
+
import sys
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
import rasa.core.utils
|
|
9
|
+
from rasa.builder.logging_utils import collecting_logs_processor
|
|
10
|
+
from rasa.builder.service import PromptToBotService
|
|
11
|
+
from rasa.utils.common import configure_logging_and_warnings
|
|
12
|
+
from rasa.utils.log_utils import configure_structlog
|
|
13
|
+
from rasa.utils.sanic_error_handler import register_custom_sanic_error_handler
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def setup_logging():
|
|
17
|
+
"""Setup logging configuration."""
|
|
18
|
+
log_level = logging.DEBUG
|
|
19
|
+
|
|
20
|
+
configure_logging_and_warnings(
|
|
21
|
+
log_level=log_level,
|
|
22
|
+
logging_config_file=None,
|
|
23
|
+
warn_only_once=True,
|
|
24
|
+
filter_repeated_logs=True,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
configure_structlog(
|
|
28
|
+
log_level,
|
|
29
|
+
include_time=True,
|
|
30
|
+
additional_processors=[collecting_logs_processor],
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def main(project_folder: Optional[str] = None):
|
|
35
|
+
"""Main entry point."""
|
|
36
|
+
try:
|
|
37
|
+
# Setup logging
|
|
38
|
+
setup_logging()
|
|
39
|
+
|
|
40
|
+
# Create and configure service
|
|
41
|
+
|
|
42
|
+
service = PromptToBotService(project_folder)
|
|
43
|
+
register_custom_sanic_error_handler(service.app)
|
|
44
|
+
|
|
45
|
+
# Log available routes
|
|
46
|
+
rasa.core.utils.list_routes(service.app)
|
|
47
|
+
|
|
48
|
+
# Run the service
|
|
49
|
+
service.run()
|
|
50
|
+
|
|
51
|
+
except KeyboardInterrupt:
|
|
52
|
+
print("\nService stopped by user")
|
|
53
|
+
sys.exit(0)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
print(f"Failed to start service: {e}")
|
|
56
|
+
sys.exit(1)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
if __name__ == "__main__":
|
|
60
|
+
project_folder = sys.argv[1] if len(sys.argv) > 1 else None
|
|
61
|
+
main(project_folder)
|
rasa/builder/models.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
"""Pydantic models for request/response validation."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field, validator
|
|
6
|
+
|
|
7
|
+
from rasa.cli.scaffold import ProjectTemplateName
|
|
8
|
+
from rasa.shared.core.trackers import DialogueStateTracker
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class PromptRequest(BaseModel):
|
|
12
|
+
"""Request model for prompt-to-bot endpoint."""
|
|
13
|
+
|
|
14
|
+
prompt: str = Field(
|
|
15
|
+
..., min_length=1, max_length=10000, description="The skill description prompt"
|
|
16
|
+
)
|
|
17
|
+
client_id: Optional[str] = Field(
|
|
18
|
+
None, max_length=255, description="Optional client identifier"
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
@validator("prompt")
|
|
22
|
+
def validate_prompt(cls, v):
|
|
23
|
+
if not v.strip():
|
|
24
|
+
raise ValueError("Prompt cannot be empty or whitespace only")
|
|
25
|
+
return v.strip()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class TemplateRequest(BaseModel):
|
|
29
|
+
"""Request model for template-to-bot endpoint."""
|
|
30
|
+
|
|
31
|
+
template_name: ProjectTemplateName = Field(
|
|
32
|
+
...,
|
|
33
|
+
description=(
|
|
34
|
+
f"The template name to use ({ProjectTemplateName.supported_values()})"
|
|
35
|
+
),
|
|
36
|
+
)
|
|
37
|
+
client_id: Optional[str] = Field(
|
|
38
|
+
None, max_length=255, description="Optional client identifier"
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
@validator("template_name")
|
|
42
|
+
def validate_template_name(cls, v):
|
|
43
|
+
if v not in ProjectTemplateName:
|
|
44
|
+
raise ValueError(
|
|
45
|
+
f"Template name must be one of {ProjectTemplateName.supported_values()}"
|
|
46
|
+
)
|
|
47
|
+
return v
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class ChatMessage(BaseModel):
|
|
51
|
+
"""Model for chat messages."""
|
|
52
|
+
|
|
53
|
+
type: str = Field(..., pattern="^(user|assistant)$")
|
|
54
|
+
content: Union[str, List[Dict[str, Any]]] = Field(...)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class LLMBuilderRequest(BaseModel):
|
|
58
|
+
"""Request model for LLM builder endpoint."""
|
|
59
|
+
|
|
60
|
+
messages: List[ChatMessage] = Field(..., min_items=1, max_items=50)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class LLMBuilderContext(BaseModel):
|
|
64
|
+
"""Context model for LLM builder endpoint."""
|
|
65
|
+
|
|
66
|
+
tracker: Optional[DialogueStateTracker] = Field(None)
|
|
67
|
+
bot_logs: str = Field("")
|
|
68
|
+
chat_bot_files: Dict[str, str] = Field({})
|
|
69
|
+
chat_history: List[ChatMessage] = Field([])
|
|
70
|
+
|
|
71
|
+
class Config:
|
|
72
|
+
"""Config for LLMBuilderContext."""
|
|
73
|
+
|
|
74
|
+
arbitrary_types_allowed = True
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class BotDataUpdateRequest(BaseModel):
|
|
78
|
+
"""Request model for bot data updates."""
|
|
79
|
+
|
|
80
|
+
domain_yml: Optional[str] = Field(None, alias="domain.yml")
|
|
81
|
+
flows_yml: Optional[str] = Field(None, alias="flows.yml")
|
|
82
|
+
config_yml: Optional[str] = Field(None, alias="config.yml")
|
|
83
|
+
|
|
84
|
+
class Config:
|
|
85
|
+
"""Config for BotDataUpdateRequest."""
|
|
86
|
+
|
|
87
|
+
allow_population_by_field_name = True
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ContentBlock(BaseModel):
|
|
91
|
+
"""Base model for content blocks."""
|
|
92
|
+
|
|
93
|
+
type: str = Field(...)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class TextBlock(ContentBlock):
|
|
97
|
+
"""Text content block."""
|
|
98
|
+
|
|
99
|
+
type: Literal["text"] = "text"
|
|
100
|
+
text: str = Field(...)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class CodeBlock(ContentBlock):
|
|
104
|
+
"""Code content block."""
|
|
105
|
+
|
|
106
|
+
type: Literal["code"] = "code"
|
|
107
|
+
text: str = Field(...)
|
|
108
|
+
language: Optional[str] = Field(None)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class FileBlock(ContentBlock):
|
|
112
|
+
"""File content block."""
|
|
113
|
+
|
|
114
|
+
type: Literal["file"] = "file"
|
|
115
|
+
file: str = Field(...)
|
|
116
|
+
content: str = Field(...)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class LinkBlock(ContentBlock):
|
|
120
|
+
"""Link content block."""
|
|
121
|
+
|
|
122
|
+
type: Literal["link"] = "link"
|
|
123
|
+
text: str = Field(..., pattern=r"^https?://")
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class LLMHelperResponse(BaseModel):
|
|
127
|
+
"""Response model for LLM helper."""
|
|
128
|
+
|
|
129
|
+
content_blocks: List[Union[TextBlock, CodeBlock, FileBlock, LinkBlock]] = Field(...)
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class ApiResponse(BaseModel):
|
|
133
|
+
"""Standard API response model."""
|
|
134
|
+
|
|
135
|
+
status: str = Field(...)
|
|
136
|
+
message: Optional[str] = Field(None)
|
|
137
|
+
data: Optional[Dict[str, Any]] = Field(None)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
class ApiErrorResponse(BaseModel):
|
|
141
|
+
"""API error response model."""
|
|
142
|
+
|
|
143
|
+
status: Literal["error"] = "error"
|
|
144
|
+
error: str = Field(...)
|
|
145
|
+
details: Optional[Dict[str, Any]] = Field(None)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class ServerSentEvent(BaseModel):
|
|
149
|
+
"""Server-sent event model."""
|
|
150
|
+
|
|
151
|
+
event: str = Field(...)
|
|
152
|
+
data: Dict[str, Any] = Field(...)
|
|
153
|
+
|
|
154
|
+
def format(self) -> str:
|
|
155
|
+
"""Format as SSE string."""
|
|
156
|
+
import json
|
|
157
|
+
|
|
158
|
+
return f"event: {self.event}\ndata: {json.dumps(self.data)}\n\n"
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
class ValidationResult(BaseModel):
|
|
162
|
+
"""Result of validation operation."""
|
|
163
|
+
|
|
164
|
+
is_valid: bool = Field(...)
|
|
165
|
+
errors: Optional[List[str]] = Field(None)
|
|
166
|
+
warnings: Optional[List[str]] = Field(None)
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class TrainingResult(BaseModel):
|
|
170
|
+
"""Result of training operation."""
|
|
171
|
+
|
|
172
|
+
success: bool = Field(...)
|
|
173
|
+
model_path: Optional[str] = Field(None)
|
|
174
|
+
error: Optional[str] = Field(None)
|