rasa-pro 3.13.0rc3__py3-none-any.whl → 3.13.1a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/builder/README.md +120 -0
- rasa/builder/__init__.py +0 -0
- rasa/builder/config.py +69 -0
- rasa/builder/create_openai_vector_store.py +228 -0
- rasa/builder/exceptions.py +49 -0
- rasa/builder/llm-helper-schema.json +69 -0
- rasa/builder/llm_context.py +81 -0
- rasa/builder/llm_helper_prompt.jinja2 +245 -0
- rasa/builder/llm_service.py +327 -0
- rasa/builder/logging_utils.py +51 -0
- rasa/builder/main.py +61 -0
- rasa/builder/models.py +174 -0
- rasa/builder/project_generator.py +264 -0
- rasa/builder/scrape_rasa_docs.py +97 -0
- rasa/builder/service.py +447 -0
- rasa/builder/skill_to_bot_prompt.jinja2 +164 -0
- rasa/builder/training_service.py +123 -0
- rasa/builder/validation_service.py +79 -0
- rasa/cli/project_templates/finance/config.yml +17 -0
- rasa/cli/project_templates/finance/credentials.yml +33 -0
- rasa/cli/project_templates/finance/data/flows/transfer_money.yml +5 -0
- rasa/cli/project_templates/finance/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/finance/domain.yml +7 -0
- rasa/cli/project_templates/finance/endpoints.yml +58 -0
- rasa/cli/project_templates/plain/config.yml +17 -0
- rasa/cli/project_templates/plain/credentials.yml +33 -0
- rasa/cli/project_templates/plain/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/plain/domain.yml +5 -0
- rasa/cli/project_templates/plain/endpoints.yml +58 -0
- rasa/cli/project_templates/telecom/config.yml +17 -0
- rasa/cli/project_templates/telecom/credentials.yml +33 -0
- rasa/cli/project_templates/telecom/data/flows/upgrade_contract.yml +5 -0
- rasa/cli/project_templates/telecom/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/telecom/domain.yml +7 -0
- rasa/cli/project_templates/telecom/endpoints.yml +58 -0
- rasa/cli/scaffold.py +19 -3
- rasa/core/actions/action.py +5 -3
- rasa/core/channels/studio_chat.py +29 -8
- rasa/core/policies/flows/flow_executor.py +8 -1
- rasa/core/tracker_stores/auth_retry_tracker_store.py +64 -3
- rasa/core/tracker_stores/dynamo_tracker_store.py +10 -0
- rasa/core/tracker_stores/mongo_tracker_store.py +17 -0
- rasa/core/tracker_stores/redis_tracker_store.py +23 -0
- rasa/core/tracker_stores/sql_tracker_store.py +27 -0
- rasa/core/tracker_stores/tracker_store.py +36 -2
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +1 -1
- rasa/model_manager/model_api.py +2 -2
- rasa/model_manager/runner_service.py +1 -1
- rasa/model_manager/trainer_service.py +12 -9
- rasa/model_manager/utils.py +1 -29
- rasa/privacy/privacy_manager.py +19 -16
- rasa/shared/core/domain.py +62 -15
- rasa/shared/core/flows/flow_step.py +7 -1
- rasa/shared/core/flows/yaml_flows_io.py +16 -8
- rasa/shared/core/slots.py +4 -0
- rasa/shared/importers/importer.py +6 -0
- rasa/shared/importers/static.py +63 -0
- rasa/telemetry.py +2 -1
- rasa/utils/io.py +27 -9
- rasa/utils/log_utils.py +5 -1
- rasa/validator.py +7 -3
- rasa/version.py +1 -1
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/METADATA +3 -3
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/RECORD +67 -31
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/NOTICE +0 -0
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/WHEEL +0 -0
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/entry_points.txt +0 -0
rasa/builder/service.py
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
"""Main service for the prompt-to-bot functionality."""
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import structlog
|
|
6
|
+
from sanic import Sanic, response
|
|
7
|
+
from sanic.request import Request
|
|
8
|
+
|
|
9
|
+
from rasa.builder import config
|
|
10
|
+
from rasa.builder.exceptions import (
|
|
11
|
+
LLMGenerationError,
|
|
12
|
+
ProjectGenerationError,
|
|
13
|
+
TrainingError,
|
|
14
|
+
ValidationError,
|
|
15
|
+
)
|
|
16
|
+
from rasa.builder.llm_service import llm_service
|
|
17
|
+
from rasa.builder.logging_utils import get_recent_logs
|
|
18
|
+
from rasa.builder.models import (
|
|
19
|
+
ApiErrorResponse,
|
|
20
|
+
ApiResponse,
|
|
21
|
+
LLMBuilderContext,
|
|
22
|
+
LLMBuilderRequest,
|
|
23
|
+
PromptRequest,
|
|
24
|
+
ServerSentEvent,
|
|
25
|
+
TemplateRequest,
|
|
26
|
+
)
|
|
27
|
+
from rasa.builder.project_generator import ProjectGenerator
|
|
28
|
+
from rasa.builder.training_service import train_and_load_agent
|
|
29
|
+
from rasa.builder.validation_service import validate_project
|
|
30
|
+
from rasa.cli.scaffold import ProjectTemplateName
|
|
31
|
+
from rasa.core.channels.studio_chat import StudioChatInput
|
|
32
|
+
from rasa.server import configure_cors
|
|
33
|
+
from rasa.shared.core.trackers import DialogueStateTracker
|
|
34
|
+
|
|
35
|
+
structlogger = structlog.get_logger()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class PromptToBotService:
|
|
39
|
+
"""Main service for prompt-to-bot functionality."""
|
|
40
|
+
|
|
41
|
+
def __init__(self, project_folder: Optional[str] = None):
|
|
42
|
+
"""Initialize the service with a project folder for file persistence.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
project_folder: Path to the folder where project files will be stored.
|
|
46
|
+
If None, defaults to a temporary directory.
|
|
47
|
+
"""
|
|
48
|
+
if project_folder is None:
|
|
49
|
+
import tempfile
|
|
50
|
+
|
|
51
|
+
project_folder = tempfile.mkdtemp(prefix="rasa_builder_")
|
|
52
|
+
|
|
53
|
+
structlogger.info(
|
|
54
|
+
"prompt_to_bot.service_initialized", project_folder=project_folder
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
self.project_generator = ProjectGenerator(project_folder)
|
|
58
|
+
|
|
59
|
+
self.app = Sanic("PromptToBotService")
|
|
60
|
+
self.app.config.REQUEST_TIMEOUT = 60 # 1 minute timeout
|
|
61
|
+
self.app.ctx.agent = None
|
|
62
|
+
self.input_channel = self.setup_input_channel()
|
|
63
|
+
self.setup_routes()
|
|
64
|
+
self.setup_middleware()
|
|
65
|
+
|
|
66
|
+
configure_cors(self.app, cors_origins=config.CORS_ORIGINS)
|
|
67
|
+
|
|
68
|
+
def setup_input_channel(self) -> StudioChatInput:
|
|
69
|
+
"""Setup the input channel for chat interactions."""
|
|
70
|
+
studio_chat_credentials = config.get_default_credentials().get(
|
|
71
|
+
StudioChatInput.name()
|
|
72
|
+
)
|
|
73
|
+
return StudioChatInput.from_credentials(credentials=studio_chat_credentials)
|
|
74
|
+
|
|
75
|
+
def setup_routes(self):
|
|
76
|
+
"""Setup all API routes."""
|
|
77
|
+
# Core endpoints
|
|
78
|
+
self.app.add_route(
|
|
79
|
+
self.handle_prompt_to_bot, "/api/prompt-to-bot", methods=["POST"]
|
|
80
|
+
)
|
|
81
|
+
self.app.add_route(
|
|
82
|
+
self.handle_template_to_bot, "/api/template-to-bot", methods=["POST"]
|
|
83
|
+
)
|
|
84
|
+
self.app.add_route(self.get_bot_data, "/api/bot-data", methods=["GET"])
|
|
85
|
+
self.app.add_route(self.update_bot_data, "/api/bot-data", methods=["PUT"])
|
|
86
|
+
self.app.add_route(self.llm_builder, "/api/llm-builder", methods=["POST"])
|
|
87
|
+
|
|
88
|
+
# Health check
|
|
89
|
+
self.app.add_route(self.health, "/", methods=["GET"])
|
|
90
|
+
|
|
91
|
+
# Register input channel webhooks
|
|
92
|
+
from rasa.core import channels
|
|
93
|
+
|
|
94
|
+
channels.channel.register([self.input_channel], self.app, route="/webhooks/")
|
|
95
|
+
|
|
96
|
+
def setup_middleware(self):
|
|
97
|
+
"""Setup middleware for request/response processing."""
|
|
98
|
+
|
|
99
|
+
@self.app.middleware("request")
|
|
100
|
+
async def log_request(request):
|
|
101
|
+
structlogger.info(
|
|
102
|
+
"request.received",
|
|
103
|
+
method=request.method,
|
|
104
|
+
path=request.path,
|
|
105
|
+
remote_addr=request.remote_addr or "unknown",
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
@self.app.middleware("response")
|
|
109
|
+
async def log_response(request, response):
|
|
110
|
+
structlogger.info(
|
|
111
|
+
"request.completed",
|
|
112
|
+
method=request.method,
|
|
113
|
+
path=request.path,
|
|
114
|
+
status=response.status,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
async def health(self, request: Request):
|
|
118
|
+
"""Health check endpoint."""
|
|
119
|
+
return response.json({"status": "ok", "service": "prompt-to-bot"})
|
|
120
|
+
|
|
121
|
+
async def handle_prompt_to_bot(self, request: Request):
|
|
122
|
+
"""Handle prompt-to-bot generation requests."""
|
|
123
|
+
try:
|
|
124
|
+
# Validate request
|
|
125
|
+
prompt_data = PromptRequest(**request.json)
|
|
126
|
+
|
|
127
|
+
# Generate project with retries
|
|
128
|
+
bot_files = await self.project_generator.generate_project_with_retries(
|
|
129
|
+
prompt_data.prompt,
|
|
130
|
+
template=ProjectTemplateName.PLAIN,
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Train and load agent
|
|
134
|
+
importer = self.project_generator._create_importer()
|
|
135
|
+
self.app.ctx.agent = await train_and_load_agent(importer)
|
|
136
|
+
|
|
137
|
+
# Update input channel with new agent
|
|
138
|
+
self.input_channel.agent = self.app.ctx.agent
|
|
139
|
+
|
|
140
|
+
structlogger.info(
|
|
141
|
+
"prompt_to_bot.success",
|
|
142
|
+
client_id=prompt_data.client_id,
|
|
143
|
+
files_generated=list(bot_files.keys()),
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
return response.json(
|
|
147
|
+
ApiResponse(
|
|
148
|
+
status="success",
|
|
149
|
+
message="Bot generated successfully",
|
|
150
|
+
data={"bot_data": bot_files},
|
|
151
|
+
).model_dump()
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
except ValidationError as e:
|
|
155
|
+
structlogger.error("prompt_to_bot.validation_error", error=str(e))
|
|
156
|
+
return response.json(
|
|
157
|
+
ApiErrorResponse(
|
|
158
|
+
error="Validation failed", details={"validation_error": str(e)}
|
|
159
|
+
).model_dump(),
|
|
160
|
+
status=400,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
except ProjectGenerationError as e:
|
|
164
|
+
structlogger.error("prompt_to_bot.generation_error", error=str(e))
|
|
165
|
+
return response.json(
|
|
166
|
+
ApiErrorResponse(
|
|
167
|
+
error="Project generation failed",
|
|
168
|
+
details={"attempts": e.attempts, "error": str(e)},
|
|
169
|
+
).model_dump(),
|
|
170
|
+
status=500,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
except TrainingError as e:
|
|
174
|
+
structlogger.error("prompt_to_bot.training_error", error=str(e))
|
|
175
|
+
return response.json(
|
|
176
|
+
ApiErrorResponse(
|
|
177
|
+
error="Model training failed", details={"training_error": str(e)}
|
|
178
|
+
).model_dump(),
|
|
179
|
+
status=500,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
except LLMGenerationError as e:
|
|
183
|
+
structlogger.error("prompt_to_bot.llm_error", error=str(e))
|
|
184
|
+
return response.json(
|
|
185
|
+
ApiErrorResponse(
|
|
186
|
+
error="LLM generation failed", details={"llm_error": str(e)}
|
|
187
|
+
).model_dump(),
|
|
188
|
+
status=502,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
structlogger.error("prompt_to_bot.unexpected_error", error=str(e))
|
|
193
|
+
return response.json(
|
|
194
|
+
ApiErrorResponse(error="Unexpected error occurred").model_dump(),
|
|
195
|
+
status=500,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
async def handle_template_to_bot(self, request: Request):
|
|
199
|
+
"""Handle template-to-bot generation requests."""
|
|
200
|
+
try:
|
|
201
|
+
# Validate request
|
|
202
|
+
template_data = TemplateRequest(**request.json)
|
|
203
|
+
|
|
204
|
+
# Generate project with retries
|
|
205
|
+
self.project_generator.init_from_template(
|
|
206
|
+
template_data.template_name,
|
|
207
|
+
)
|
|
208
|
+
bot_files = self.project_generator.get_bot_files()
|
|
209
|
+
|
|
210
|
+
# Train and load agent
|
|
211
|
+
importer = self.project_generator._create_importer()
|
|
212
|
+
self.app.ctx.agent = await train_and_load_agent(importer)
|
|
213
|
+
|
|
214
|
+
# Update input channel with new agent
|
|
215
|
+
self.input_channel.agent = self.app.ctx.agent
|
|
216
|
+
|
|
217
|
+
structlogger.info(
|
|
218
|
+
"template_to_bot.success",
|
|
219
|
+
client_id=template_data.client_id,
|
|
220
|
+
files_generated=list(bot_files.keys()),
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
return response.json(
|
|
224
|
+
ApiResponse(
|
|
225
|
+
status="success",
|
|
226
|
+
message="Bot generated successfully",
|
|
227
|
+
data={"bot_data": bot_files},
|
|
228
|
+
).model_dump()
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
except ValidationError as e:
|
|
232
|
+
structlogger.error("template_to_bot.validation_error", error=str(e))
|
|
233
|
+
return response.json(
|
|
234
|
+
ApiErrorResponse(
|
|
235
|
+
error="Validation failed", details={"validation_error": str(e)}
|
|
236
|
+
).model_dump(),
|
|
237
|
+
status=400,
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
except ProjectGenerationError as e:
|
|
241
|
+
structlogger.error("template_to_bot.generation_error", error=str(e))
|
|
242
|
+
return response.json(
|
|
243
|
+
ApiErrorResponse(
|
|
244
|
+
error="Project generation failed",
|
|
245
|
+
details={"attempts": e.attempts, "error": str(e)},
|
|
246
|
+
).model_dump(),
|
|
247
|
+
status=500,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
except TrainingError as e:
|
|
251
|
+
structlogger.error("template_to_bot.training_error", error=str(e))
|
|
252
|
+
return response.json(
|
|
253
|
+
ApiErrorResponse(
|
|
254
|
+
error="Model training failed", details={"training_error": str(e)}
|
|
255
|
+
).model_dump(),
|
|
256
|
+
status=500,
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
except LLMGenerationError as e:
|
|
260
|
+
structlogger.error("template_to_bot.llm_error", error=str(e))
|
|
261
|
+
return response.json(
|
|
262
|
+
ApiErrorResponse(
|
|
263
|
+
error="LLM generation failed", details={"llm_error": str(e)}
|
|
264
|
+
).model_dump(),
|
|
265
|
+
status=502,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
except Exception as e:
|
|
269
|
+
structlogger.error("template_to_bot.unexpected_error", error=str(e))
|
|
270
|
+
return response.json(
|
|
271
|
+
ApiErrorResponse(error="Unexpected error occurred").model_dump(),
|
|
272
|
+
status=500,
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
async def get_bot_data(self, request: Request):
|
|
276
|
+
"""Get current bot data."""
|
|
277
|
+
bot_files = self.project_generator.get_bot_files()
|
|
278
|
+
return response.json(
|
|
279
|
+
ApiResponse(status="success", data={"bot_data": bot_files}).model_dump()
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
async def update_bot_data(self, request: Request):
|
|
283
|
+
"""Update bot data with server-sent events for progress tracking."""
|
|
284
|
+
sse_response = await request.respond(content_type="text/event-stream")
|
|
285
|
+
|
|
286
|
+
try:
|
|
287
|
+
# 1. Received
|
|
288
|
+
await self._send_sse_event(
|
|
289
|
+
sse_response,
|
|
290
|
+
ServerSentEvent(event="received", data={"status": "received"}),
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# Update bot files
|
|
294
|
+
bot_data = request.json
|
|
295
|
+
self.project_generator.update_bot_files(bot_data)
|
|
296
|
+
|
|
297
|
+
# 2. Validating
|
|
298
|
+
await self._send_sse_event(
|
|
299
|
+
sse_response,
|
|
300
|
+
ServerSentEvent(event="validating", data={"status": "validating"}),
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
importer = self.project_generator._create_importer()
|
|
305
|
+
validation_error = await validate_project(importer)
|
|
306
|
+
|
|
307
|
+
if validation_error:
|
|
308
|
+
raise ValidationError(validation_error)
|
|
309
|
+
|
|
310
|
+
await self._send_sse_event(
|
|
311
|
+
sse_response,
|
|
312
|
+
ServerSentEvent(
|
|
313
|
+
event="validation_success",
|
|
314
|
+
data={"status": "validation_success"},
|
|
315
|
+
),
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
except ValidationError as e:
|
|
319
|
+
await self._send_sse_event(
|
|
320
|
+
sse_response,
|
|
321
|
+
ServerSentEvent(
|
|
322
|
+
event="validation_error",
|
|
323
|
+
data={"status": "validation_error", "error": str(e)},
|
|
324
|
+
),
|
|
325
|
+
)
|
|
326
|
+
await sse_response.eof()
|
|
327
|
+
return
|
|
328
|
+
|
|
329
|
+
# 3. Training
|
|
330
|
+
await self._send_sse_event(
|
|
331
|
+
sse_response,
|
|
332
|
+
ServerSentEvent(event="training", data={"status": "training"}),
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
try:
|
|
336
|
+
self.app.ctx.agent = await train_and_load_agent(importer)
|
|
337
|
+
self.input_channel.agent = self.app.ctx.agent
|
|
338
|
+
|
|
339
|
+
await self._send_sse_event(
|
|
340
|
+
sse_response,
|
|
341
|
+
ServerSentEvent(
|
|
342
|
+
event="train_success", data={"status": "train_success"}
|
|
343
|
+
),
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
except TrainingError as e:
|
|
347
|
+
await self._send_sse_event(
|
|
348
|
+
sse_response,
|
|
349
|
+
ServerSentEvent(
|
|
350
|
+
event="train_error",
|
|
351
|
+
data={"status": "train_error", "error": str(e)},
|
|
352
|
+
),
|
|
353
|
+
)
|
|
354
|
+
await sse_response.eof()
|
|
355
|
+
return
|
|
356
|
+
|
|
357
|
+
# 4. Done
|
|
358
|
+
await self._send_sse_event(
|
|
359
|
+
sse_response,
|
|
360
|
+
ServerSentEvent(
|
|
361
|
+
event="done",
|
|
362
|
+
data={
|
|
363
|
+
"status": "done",
|
|
364
|
+
"bot_data": self.project_generator.get_bot_files(),
|
|
365
|
+
},
|
|
366
|
+
),
|
|
367
|
+
)
|
|
368
|
+
|
|
369
|
+
except Exception as e:
|
|
370
|
+
await self._send_sse_event(
|
|
371
|
+
sse_response,
|
|
372
|
+
ServerSentEvent(
|
|
373
|
+
event="error", data={"status": "error", "error": str(e)}
|
|
374
|
+
),
|
|
375
|
+
)
|
|
376
|
+
finally:
|
|
377
|
+
await sse_response.eof()
|
|
378
|
+
|
|
379
|
+
async def llm_builder(self, request: Request):
|
|
380
|
+
"""Handle LLM builder requests."""
|
|
381
|
+
try:
|
|
382
|
+
# Validate request
|
|
383
|
+
builder_request = LLMBuilderRequest(**request.json)
|
|
384
|
+
|
|
385
|
+
# Get current conversation context
|
|
386
|
+
current_tracker = await self.current_tracker_from_input_channel()
|
|
387
|
+
bot_logs = get_recent_logs()
|
|
388
|
+
chat_bot_files = self.project_generator.get_bot_files()
|
|
389
|
+
|
|
390
|
+
# create LLM builder context
|
|
391
|
+
llm_builder_context = LLMBuilderContext(
|
|
392
|
+
tracker=current_tracker,
|
|
393
|
+
bot_logs=bot_logs,
|
|
394
|
+
chat_bot_files=chat_bot_files,
|
|
395
|
+
chat_history=builder_request.messages,
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
# Generate response
|
|
399
|
+
messages = await llm_service.create_helper_messages(llm_builder_context)
|
|
400
|
+
llm_response = await llm_service.generate_helper_response(messages)
|
|
401
|
+
|
|
402
|
+
return response.json(llm_response)
|
|
403
|
+
|
|
404
|
+
except LLMGenerationError as e:
|
|
405
|
+
structlogger.error("llm_builder.generation_error", error=str(e))
|
|
406
|
+
return response.json(
|
|
407
|
+
ApiErrorResponse(
|
|
408
|
+
error="LLM helper generation failed", details={"llm_error": str(e)}
|
|
409
|
+
).model_dump(),
|
|
410
|
+
status=502,
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
except Exception as e:
|
|
414
|
+
structlogger.error("llm_builder.unexpected_error", error=str(e))
|
|
415
|
+
return response.json(
|
|
416
|
+
ApiErrorResponse(error="Unexpected error in LLM builder").model_dump(),
|
|
417
|
+
status=500,
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
async def current_tracker_from_input_channel(
|
|
421
|
+
self,
|
|
422
|
+
) -> Optional[DialogueStateTracker]:
|
|
423
|
+
"""Generate chat bot context from current conversation."""
|
|
424
|
+
if self.app.ctx.agent and self.input_channel.latest_tracker_session_id:
|
|
425
|
+
return await self.app.ctx.agent.tracker_store.retrieve(
|
|
426
|
+
self.input_channel.latest_tracker_session_id
|
|
427
|
+
)
|
|
428
|
+
else:
|
|
429
|
+
return None
|
|
430
|
+
|
|
431
|
+
@staticmethod
|
|
432
|
+
async def _send_sse_event(sse_response, event: ServerSentEvent):
|
|
433
|
+
"""Send a server-sent event."""
|
|
434
|
+
await sse_response.send(event.format())
|
|
435
|
+
|
|
436
|
+
def run(self):
|
|
437
|
+
"""Run the service."""
|
|
438
|
+
structlogger.info(
|
|
439
|
+
"service.starting", host=config.SERVER_HOST, port=config.SERVER_PORT
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
self.app.run(
|
|
443
|
+
host=config.SERVER_HOST,
|
|
444
|
+
port=config.SERVER_PORT,
|
|
445
|
+
legacy=True,
|
|
446
|
+
motd=False,
|
|
447
|
+
)
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
# Rasa CALM Flow and Domain YAML Generator
|
|
2
|
+
|
|
3
|
+
You are an expert in creating Rasa CALM flows and domain JSON configurations. Your task is to generate these JSON files based on a user's description of a conversational skill. The user may not be familiar with Rasa, so it's crucial to interpret their requirements accurately and create a well-structured, functional Rasa configuration.
|
|
4
|
+
|
|
5
|
+
## Input
|
|
6
|
+
You will receive a description of a conversational skill. This description will be inserted where you see [USER_SKILL_DESCRIPTION] in this prompt.
|
|
7
|
+
|
|
8
|
+
## Output
|
|
9
|
+
Generate a JSON configurations that includes:
|
|
10
|
+
1. A Rasa CALM flow JSON
|
|
11
|
+
2. A corresponding Rasa domain JSON
|
|
12
|
+
|
|
13
|
+
Ensure that both parts of the JSONs are complete, well-structured, and compatible with each other.
|
|
14
|
+
|
|
15
|
+
## Example
|
|
16
|
+
Here's an example of a skill description and the corresponding JSON outputs and custom action code
|
|
17
|
+
|
|
18
|
+
Skill Description: "Create a skill for transferring money. It should ask for the recipient and amount, check the user has enough balance, and then confirm with the user before finalizing the transfer."
|
|
19
|
+
|
|
20
|
+
CALM BOT JSON:
|
|
21
|
+
```json
|
|
22
|
+
{
|
|
23
|
+
"flows": {
|
|
24
|
+
"transfer_money": {
|
|
25
|
+
"description": "This flow lets users send money to friends and family.",
|
|
26
|
+
"steps": [
|
|
27
|
+
{
|
|
28
|
+
"collect": "recipient"
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"collect": "amount",
|
|
32
|
+
"description": "the number of US dollars to send"
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"action": "action_check_sufficient_funds",
|
|
36
|
+
"next": [
|
|
37
|
+
{
|
|
38
|
+
"if": "not slots.has_sufficient_funds",
|
|
39
|
+
"then": [
|
|
40
|
+
{
|
|
41
|
+
"action": "utter_insufficient_funds",
|
|
42
|
+
"next": "END"
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"else": "final_confirmation"
|
|
48
|
+
}
|
|
49
|
+
]
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
"collect": "final_confirmation",
|
|
53
|
+
"id": "final_confirmation",
|
|
54
|
+
"next": [
|
|
55
|
+
{
|
|
56
|
+
"if": "not slots.final_confirmation",
|
|
57
|
+
"then": [
|
|
58
|
+
{
|
|
59
|
+
"action": "utter_transfer_cancelled",
|
|
60
|
+
"next": "END"
|
|
61
|
+
}
|
|
62
|
+
]
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
"else": "transfer_successful"
|
|
66
|
+
}
|
|
67
|
+
]
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
"action": "utter_transfer_complete",
|
|
71
|
+
"id": "transfer_successful"
|
|
72
|
+
}
|
|
73
|
+
]
|
|
74
|
+
}
|
|
75
|
+
},
|
|
76
|
+
"domain": {
|
|
77
|
+
"actions": [
|
|
78
|
+
"action_check_sufficient_funds"
|
|
79
|
+
],
|
|
80
|
+
"slots": {
|
|
81
|
+
"recipient": {
|
|
82
|
+
"type": "Text",
|
|
83
|
+
"mappings": [
|
|
84
|
+
{
|
|
85
|
+
"type": "from_llm"
|
|
86
|
+
}
|
|
87
|
+
]
|
|
88
|
+
},
|
|
89
|
+
"has_sufficient_funds": {
|
|
90
|
+
"type": "bool",
|
|
91
|
+
"mappings": [
|
|
92
|
+
{
|
|
93
|
+
"type": "custom"
|
|
94
|
+
}
|
|
95
|
+
]
|
|
96
|
+
}
|
|
97
|
+
},
|
|
98
|
+
"responses": {
|
|
99
|
+
"utter_ask_recipient": [
|
|
100
|
+
{
|
|
101
|
+
"text": "Who would you like to send money to?"
|
|
102
|
+
}
|
|
103
|
+
],
|
|
104
|
+
"utter_ask_final_confirmation": [
|
|
105
|
+
{
|
|
106
|
+
"text": "Please confirm: you want to transfer {amount} to {recipient}?"
|
|
107
|
+
}
|
|
108
|
+
],
|
|
109
|
+
"utter_transfer_cancelled": [
|
|
110
|
+
{
|
|
111
|
+
"text": "Your transfer has been cancelled."
|
|
112
|
+
}
|
|
113
|
+
],
|
|
114
|
+
"utter_insufficient_funds": [
|
|
115
|
+
{
|
|
116
|
+
"text": "You do not have enough funds to make this transaction."
|
|
117
|
+
}
|
|
118
|
+
]
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
## Guidelines for CALM Flow JSON:
|
|
125
|
+
- Start a `flow` with the name of the flow as the key in `flows`
|
|
126
|
+
- Include a `description` for the skill
|
|
127
|
+
- Use `steps` to outline the conversation flow
|
|
128
|
+
- Implement `collect` steps for gathering information
|
|
129
|
+
- `collect` steps should reference an existing slot
|
|
130
|
+
- For a collect step referencing a slot `A`, there should be a corresponding `utter_ask_A`
|
|
131
|
+
utterance that is used in the collect step to ask for the information to be stored in `A`
|
|
132
|
+
- Use `action` steps for custom actions
|
|
133
|
+
- Implement conditional logic with `if`, `then`, and `else` where appropriate
|
|
134
|
+
- Use `next` to define the flow between steps. If the flow should end after a step, add next: END.
|
|
135
|
+
- The content after `then` or `else` can be: the id of another step defined in the flow, a list of steps, or an END
|
|
136
|
+
- End the flow with an appropriate action or message
|
|
137
|
+
|
|
138
|
+
## Guidelines for Domain JSON:
|
|
139
|
+
- Include all necessary `actions`
|
|
140
|
+
- Define all required `slots` with appropriate types. Type should be one of 'float', 'bool', 'text', or 'categorical'
|
|
141
|
+
- Slots filled by a 'collect' step should have mapping 'from_llm' and slots set by custom actions should have a 'custom' mapping
|
|
142
|
+
- Provide `responses` for all bot turns
|
|
143
|
+
|
|
144
|
+
## Guidelines for Custom Actions
|
|
145
|
+
- Ensure all actions mentioned in the flow are properly defined in the domains list of actions.
|
|
146
|
+
|
|
147
|
+
## Important Notes:
|
|
148
|
+
- Ensure that the flow logic is coherent and follows a natural conversation pattern
|
|
149
|
+
- All custom actions in the flow should be listed in the domain's `actions` section
|
|
150
|
+
- All slots collected in the flow should be defined in the domain's `slots` section
|
|
151
|
+
- Provide appropriate `utter_` responses for each user interaction
|
|
152
|
+
- Use realistic and appropriate names for actions, slots, and responses
|
|
153
|
+
- Consider error handling and alternative conversation paths
|
|
154
|
+
- Aim for a balance between simplicity and functionality
|
|
155
|
+
|
|
156
|
+
Now, please generate Rasa CALM flow and domain JSON based on the initial bot data.
|
|
157
|
+
Modify the initial bot data where it makes sense based on the users description
|
|
158
|
+
of the skill (e.g. the initial bot greeting utter_greet).
|
|
159
|
+
|
|
160
|
+
INITIAL BOT DATA:
|
|
161
|
+
{{project_data|tojson}}
|
|
162
|
+
|
|
163
|
+
USER_SKILL_DESCRIPTION:
|
|
164
|
+
{{skill_description}}
|