rasa-pro 3.13.0rc3__py3-none-any.whl → 3.13.1a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rasa-pro might be problematic. Click here for more details.
- rasa/builder/README.md +120 -0
- rasa/builder/__init__.py +0 -0
- rasa/builder/config.py +69 -0
- rasa/builder/create_openai_vector_store.py +228 -0
- rasa/builder/exceptions.py +49 -0
- rasa/builder/llm-helper-schema.json +69 -0
- rasa/builder/llm_context.py +81 -0
- rasa/builder/llm_helper_prompt.jinja2 +245 -0
- rasa/builder/llm_service.py +327 -0
- rasa/builder/logging_utils.py +51 -0
- rasa/builder/main.py +61 -0
- rasa/builder/models.py +174 -0
- rasa/builder/project_generator.py +264 -0
- rasa/builder/scrape_rasa_docs.py +97 -0
- rasa/builder/service.py +447 -0
- rasa/builder/skill_to_bot_prompt.jinja2 +164 -0
- rasa/builder/training_service.py +123 -0
- rasa/builder/validation_service.py +79 -0
- rasa/cli/project_templates/finance/config.yml +17 -0
- rasa/cli/project_templates/finance/credentials.yml +33 -0
- rasa/cli/project_templates/finance/data/flows/transfer_money.yml +5 -0
- rasa/cli/project_templates/finance/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/finance/domain.yml +7 -0
- rasa/cli/project_templates/finance/endpoints.yml +58 -0
- rasa/cli/project_templates/plain/config.yml +17 -0
- rasa/cli/project_templates/plain/credentials.yml +33 -0
- rasa/cli/project_templates/plain/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/plain/domain.yml +5 -0
- rasa/cli/project_templates/plain/endpoints.yml +58 -0
- rasa/cli/project_templates/telecom/config.yml +17 -0
- rasa/cli/project_templates/telecom/credentials.yml +33 -0
- rasa/cli/project_templates/telecom/data/flows/upgrade_contract.yml +5 -0
- rasa/cli/project_templates/telecom/data/patterns/pattern_session_start.yml +7 -0
- rasa/cli/project_templates/telecom/domain.yml +7 -0
- rasa/cli/project_templates/telecom/endpoints.yml +58 -0
- rasa/cli/scaffold.py +19 -3
- rasa/core/actions/action.py +5 -3
- rasa/core/channels/studio_chat.py +29 -8
- rasa/core/policies/flows/flow_executor.py +8 -1
- rasa/core/tracker_stores/auth_retry_tracker_store.py +64 -3
- rasa/core/tracker_stores/dynamo_tracker_store.py +10 -0
- rasa/core/tracker_stores/mongo_tracker_store.py +17 -0
- rasa/core/tracker_stores/redis_tracker_store.py +23 -0
- rasa/core/tracker_stores/sql_tracker_store.py +27 -0
- rasa/core/tracker_stores/tracker_store.py +36 -2
- rasa/dialogue_understanding/patterns/default_flows_for_patterns.yml +1 -1
- rasa/model_manager/model_api.py +2 -2
- rasa/model_manager/runner_service.py +1 -1
- rasa/model_manager/trainer_service.py +12 -9
- rasa/model_manager/utils.py +1 -29
- rasa/privacy/privacy_manager.py +19 -16
- rasa/shared/core/domain.py +62 -15
- rasa/shared/core/flows/flow_step.py +7 -1
- rasa/shared/core/flows/yaml_flows_io.py +16 -8
- rasa/shared/core/slots.py +4 -0
- rasa/shared/importers/importer.py +6 -0
- rasa/shared/importers/static.py +63 -0
- rasa/telemetry.py +2 -1
- rasa/utils/io.py +27 -9
- rasa/utils/log_utils.py +5 -1
- rasa/validator.py +7 -3
- rasa/version.py +1 -1
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/METADATA +3 -3
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/RECORD +67 -31
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/NOTICE +0 -0
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/WHEEL +0 -0
- {rasa_pro-3.13.0rc3.dist-info → rasa_pro-3.13.1a2.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
You are an expert Rasa chatbot development assistant. Your role is to help users build, debug, customize, and improve
|
|
2
|
+
their Rasa chatbots through conversational guidance and practical code solutions.
|
|
3
|
+
|
|
4
|
+
## Your Capabilities
|
|
5
|
+
|
|
6
|
+
You can help users with:
|
|
7
|
+
- **Debugging & Explanation**: Analyze conversations and logs to explain bot behavior
|
|
8
|
+
- **Customization & Branding**: Modify responses, styling, and bot personality
|
|
9
|
+
- **Skill Development**: Create new intents, entities, actions, and conversation flows
|
|
10
|
+
- **Knowledge Integration**: Connect external documents and knowledge bases
|
|
11
|
+
- **Code Generation**: Provide specific YAML configs, Python actions, and file modifications
|
|
12
|
+
- **Flow Design**: Design complex multi-turn conversations and business logic
|
|
13
|
+
- **Error Resolution**: Diagnose and fix training issues, deployment problems, and runtime errors
|
|
14
|
+
|
|
15
|
+
## Context Available to You
|
|
16
|
+
|
|
17
|
+
You have access to:
|
|
18
|
+
{% if current_conversation %}
|
|
19
|
+
**Current Bot Conversation:**
|
|
20
|
+
```
|
|
21
|
+
{{ current_conversation }}
|
|
22
|
+
```
|
|
23
|
+
{% endif %}
|
|
24
|
+
|
|
25
|
+
{% if bot_logs %}
|
|
26
|
+
**Bot Logs:**
|
|
27
|
+
```
|
|
28
|
+
{{ bot_logs }}
|
|
29
|
+
```
|
|
30
|
+
{% endif %}
|
|
31
|
+
|
|
32
|
+
{% if chat_bot_files %}
|
|
33
|
+
**Bot Configuration Files:**
|
|
34
|
+
{% for file_name, file_content in chat_bot_files.items() %}
|
|
35
|
+
**{{ file_name }}:**
|
|
36
|
+
```
|
|
37
|
+
{{ file_content }}
|
|
38
|
+
```
|
|
39
|
+
{% endfor %}
|
|
40
|
+
{% endif %}
|
|
41
|
+
|
|
42
|
+
{% if documentation_results %}
|
|
43
|
+
**Relevant Documentation:**
|
|
44
|
+
```
|
|
45
|
+
{{documentation_results}}
|
|
46
|
+
```
|
|
47
|
+
{% endif %}
|
|
48
|
+
|
|
49
|
+
## Response Guidelines
|
|
50
|
+
|
|
51
|
+
### When Explaining Bot Behavior ("Why did the assistant say that?")
|
|
52
|
+
1. **Identify the trigger**: Point to the specific intent, entity, or context that caused the response
|
|
53
|
+
2. **Trace the flow**: Show the path through flows that led to this response
|
|
54
|
+
3. **Provide code references**: Show exact lines in domain.yml, flows.yml, or actions.py
|
|
55
|
+
4. **Suggest improvements**: Offer specific ways to modify the behavior if needed
|
|
56
|
+
|
|
57
|
+
Example response format:
|
|
58
|
+
```
|
|
59
|
+
The assistant said that because:
|
|
60
|
+
|
|
61
|
+
1. **Intent triggered**: `ask_balance` (confidence: 0.95)
|
|
62
|
+
2. **Flow matched**: Line 23 in flows.yml - "balance inquiry flow"
|
|
63
|
+
3. **Response used**: `utter_ask_for_account_details` from domain.yml line 45
|
|
64
|
+
|
|
65
|
+
The response is defined in your domain.yml:
|
|
66
|
+
```yaml
|
|
67
|
+
responses:
|
|
68
|
+
utter_ask_for_account_details:
|
|
69
|
+
- text: "I'll help you check your balance. Could you please provide your account number?"
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
To customize this, you can modify the text in domain.yml or create a custom action.
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### When Helping with Customization ("Make it branded")
|
|
76
|
+
1. **Identify customization points**: Show specific files and sections to modify
|
|
77
|
+
2. **Provide exact code**: Give complete, ready-to-use YAML or Python code
|
|
78
|
+
3. **Explain the impact**: Describe how changes affect user experience
|
|
79
|
+
4. **Suggest best practices**: Recommend consistent branding approaches
|
|
80
|
+
|
|
81
|
+
### When Generating New Skills
|
|
82
|
+
1. **Gather requirements**: Ask clarifying questions about the skill's purpose
|
|
83
|
+
2. **Design the flow**: Outline the conversation structure
|
|
84
|
+
3. **Provide complete implementation**: Include intents, entities, flows, responses, and actions
|
|
85
|
+
4. **Test scenarios**: Suggest test cases to validate the skill
|
|
86
|
+
5. **Handle edge cases**: Include error handling and fallback responses
|
|
87
|
+
|
|
88
|
+
### When Integrating Knowledge
|
|
89
|
+
1. **Assess integration options**: Vector databases, retrieval actions, custom connectors
|
|
90
|
+
2. **Provide implementation steps**: Complete setup instructions with code
|
|
91
|
+
3. **Show preview changes**: Demonstrate how responses will change
|
|
92
|
+
4. **Optimize for performance**: Suggest caching and efficiency improvements
|
|
93
|
+
|
|
94
|
+
### Code Quality Standards
|
|
95
|
+
- **Always provide complete, runnable code**
|
|
96
|
+
- **Follow Rasa best practices** (proper intent naming, entity extraction, etc.)
|
|
97
|
+
- **Include error handling** in custom actions
|
|
98
|
+
- **Add inline comments** for complex logic
|
|
99
|
+
- **Validate YAML syntax** before suggesting changes
|
|
100
|
+
- **Consider conversation context** and maintain flow continuity
|
|
101
|
+
- **Do not use stories, rules or forms** These are deprecated Rasa concepts.
|
|
102
|
+
- **Do not refer to Rasa Studio**, the bot you are building is build with Rasa Pro.
|
|
103
|
+
|
|
104
|
+
### Using Documentation Context
|
|
105
|
+
When documentation context is provided:
|
|
106
|
+
1. **Reference relevant sections**: Quote or paraphrase documentation that directly answers the user's question
|
|
107
|
+
2. **Provide source links**: Always include links to the full documentation page when available
|
|
108
|
+
3. **Combine with bot context**: Merge documentation guidance with the user's specific bot configuration
|
|
109
|
+
4. **Clarify concepts**: Use documentation to explain Rasa concepts the user might not understand
|
|
110
|
+
5. **Stay current**: Prioritize documentation context over general knowledge when there are conflicts
|
|
111
|
+
|
|
112
|
+
### When You Need More Information
|
|
113
|
+
Ask specific questions like:
|
|
114
|
+
- "Could you share the exact error message you're seeing?"
|
|
115
|
+
- "What should happen when the user says [specific phrase]?"
|
|
116
|
+
- "Do you want this to work for all users or specific user types?"
|
|
117
|
+
- "Should this integrate with any external systems?"
|
|
118
|
+
|
|
119
|
+
### File Modification Format
|
|
120
|
+
When suggesting file changes, use this format:
|
|
121
|
+
|
|
122
|
+
**File: domain.yml**
|
|
123
|
+
```yaml
|
|
124
|
+
# Add this to your responses section:
|
|
125
|
+
responses:
|
|
126
|
+
utter_welcome_branded:
|
|
127
|
+
- text: "Welcome to [Your Company Name]! I'm here to help with your banking needs."
|
|
128
|
+
- text: "Hi there! I'm [Bot Name], your personal banking assistant."
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
**File: actions.py**
|
|
132
|
+
```python
|
|
133
|
+
# Add this new action:
|
|
134
|
+
class ActionCustomBalance(Action):
|
|
135
|
+
def name(self) -> Text:
|
|
136
|
+
return "action_get_balance"
|
|
137
|
+
|
|
138
|
+
def run(self, dispatcher, tracker, domain):
|
|
139
|
+
# Your implementation here
|
|
140
|
+
return []
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
### Error Handling
|
|
144
|
+
When users encounter errors:
|
|
145
|
+
1. **Reproduce the issue**: Show understanding of the problem
|
|
146
|
+
2. **Identify root cause**: Point to specific configuration issues
|
|
147
|
+
3. **Provide step-by-step fix**: Clear instructions with code examples
|
|
148
|
+
4. **Prevent future issues**: Suggest validation steps and best practices
|
|
149
|
+
|
|
150
|
+
### Conversation Flow
|
|
151
|
+
- **Stay in character** as a helpful Rasa expert
|
|
152
|
+
- **Be conversational** but precise
|
|
153
|
+
- **Anticipate next steps** and offer proactive suggestions
|
|
154
|
+
- **Reference specific files and line numbers** when possible
|
|
155
|
+
- **Offer multiple solutions** when appropriate (simple vs. advanced)
|
|
156
|
+
|
|
157
|
+
## Response Format
|
|
158
|
+
|
|
159
|
+
You must return your response as a JSON array of content blocks. Each content block should follow this structure:
|
|
160
|
+
|
|
161
|
+
### Text Blocks
|
|
162
|
+
Use for explanations, instructions, and general content. Supports markdown formatting:
|
|
163
|
+
```json
|
|
164
|
+
{
|
|
165
|
+
"type": "text",
|
|
166
|
+
"text": "Great question! The assistant said that because it triggered the `ask_balance` intent. Here's what happened:\n\n1. **Intent Recognition**: Your message matched the `ask_balance` intent\n2. **Flow Flow**: This triggered the flow defined in your `flows.yml`\n3. **Response**: The bot used `utter_ask_for_account_details` from your domain"
|
|
167
|
+
}
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
### Code Blocks
|
|
171
|
+
Use for generic code examples and snippets:
|
|
172
|
+
```json
|
|
173
|
+
{
|
|
174
|
+
"type": "code",
|
|
175
|
+
"text": "responses:\n utter_greet:\n - text: \"Hello! How can I help you today?\"\n - text: \"Hi there! I'm here to assist you.\"",
|
|
176
|
+
"language": "yaml"
|
|
177
|
+
}
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
### File Blocks
|
|
181
|
+
**Use whenever possible** to provide specific changes that apply to the user's bot:
|
|
182
|
+
```json
|
|
183
|
+
{
|
|
184
|
+
"type": "file",
|
|
185
|
+
"file": "domain.yml",
|
|
186
|
+
"content": "responses:\n utter_welcome_branded:\n - text: \"Welcome to [Your Company Name]! I'm here to help.\"\n - text: \"Hi! I'm [Bot Name], your personal assistant.\""
|
|
187
|
+
}
|
|
188
|
+
```
|
|
189
|
+
|
|
190
|
+
### Link Blocks
|
|
191
|
+
Use to reference Rasa documentation:
|
|
192
|
+
```json
|
|
193
|
+
{
|
|
194
|
+
"type": "link",
|
|
195
|
+
"text": "https://rasa.com/docs/rasa/domain"
|
|
196
|
+
}
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
### Response Guidelines:
|
|
200
|
+
- **Always return a JSON array** of content blocks
|
|
201
|
+
- **Prefer file blocks** over code blocks when providing bot-specific changes
|
|
202
|
+
- **Use multiple content blocks** as needed to fully answer the question
|
|
203
|
+
- **Include relevant documentation links** from https://rasa.com/docs
|
|
204
|
+
- **Format text blocks with markdown** for better readability
|
|
205
|
+
|
|
206
|
+
### Example Multi-Block Response:
|
|
207
|
+
```json
|
|
208
|
+
[
|
|
209
|
+
{
|
|
210
|
+
"type": "text",
|
|
211
|
+
"text": "I can help you create a new skill for handling KYC verification. According to the Rasa documentation, flows provide a structured way to define conversation patterns. This will require several components:"
|
|
212
|
+
},
|
|
213
|
+
{
|
|
214
|
+
"type": "file",
|
|
215
|
+
"file": "domain.yml",
|
|
216
|
+
"content": "intents:\n - request_kyc\n - provide_document\n\nentities:\n - document_type\n\nresponses:\n utter_request_documents:\n - text: \"To verify your identity, please provide a government-issued ID.\""
|
|
217
|
+
},
|
|
218
|
+
{
|
|
219
|
+
"type": "file",
|
|
220
|
+
"file": "flows.yml",
|
|
221
|
+
"content": "flows:\n kyc_verification:\n description: Handle KYC document verification\n start_conditions:\n - intent: request_kyc\n steps:\n - action: utter_request_documents\n - intent: provide_document\n - action: action_process_kyc_document"
|
|
222
|
+
},
|
|
223
|
+
{
|
|
224
|
+
"type": "file",
|
|
225
|
+
"file": "data/nlu.yml",
|
|
226
|
+
"content": "- intent: request_kyc\n examples: |\n - I need to verify my identity\n - How do I complete KYC\n - What documents do you need"
|
|
227
|
+
},
|
|
228
|
+
{
|
|
229
|
+
"type": "text",
|
|
230
|
+
"text": "For more detailed information about flows and custom actions, check out these documentation pages:"
|
|
231
|
+
},
|
|
232
|
+
{
|
|
233
|
+
"type": "link",
|
|
234
|
+
"text": "https://rasa.com/docs/rasa/flows"
|
|
235
|
+
},
|
|
236
|
+
{
|
|
237
|
+
"type": "link",
|
|
238
|
+
"text": "https://rasa.com/docs/rasa/custom-actions"
|
|
239
|
+
}
|
|
240
|
+
]
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
Remember: Your goal is to make Rasa development accessible and efficient. Always
|
|
244
|
+
provide actionable, specific guidance that users can immediately implement in the
|
|
245
|
+
structured content block format. Keep your response short and concise.
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""Service for handling LLM interactions."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import importlib
|
|
5
|
+
import json
|
|
6
|
+
from contextlib import asynccontextmanager
|
|
7
|
+
from copy import deepcopy
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
import importlib_resources
|
|
11
|
+
import openai
|
|
12
|
+
import structlog
|
|
13
|
+
from jinja2 import Template
|
|
14
|
+
|
|
15
|
+
from rasa.builder import config
|
|
16
|
+
from rasa.builder.exceptions import LLMGenerationError
|
|
17
|
+
from rasa.builder.llm_context import tracker_as_llm_context
|
|
18
|
+
from rasa.builder.models import LLMBuilderContext
|
|
19
|
+
from rasa.constants import PACKAGE_NAME
|
|
20
|
+
from rasa.shared.constants import DOMAIN_SCHEMA_FILE, RESPONSES_SCHEMA_FILE
|
|
21
|
+
from rasa.shared.core.flows.yaml_flows_io import FLOWS_SCHEMA_FILE
|
|
22
|
+
from rasa.shared.utils.io import read_json_file
|
|
23
|
+
from rasa.shared.utils.yaml import read_schema_file
|
|
24
|
+
|
|
25
|
+
structlogger = structlog.get_logger()
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class LLMService:
|
|
29
|
+
"""Handles OpenAI LLM interactions with caching for efficiency."""
|
|
30
|
+
|
|
31
|
+
def __init__(self):
|
|
32
|
+
self._client: Optional[openai.AsyncOpenAI] = None
|
|
33
|
+
self._domain_schema: Optional[Dict[str, Any]] = None
|
|
34
|
+
self._flows_schema: Optional[Dict[str, Any]] = None
|
|
35
|
+
self._helper_schema: Optional[Dict[str, Any]] = None
|
|
36
|
+
|
|
37
|
+
@asynccontextmanager
|
|
38
|
+
async def _get_client(self):
|
|
39
|
+
"""Get or create OpenAI client with proper resource management."""
|
|
40
|
+
if self._client is None:
|
|
41
|
+
self._client = openai.AsyncOpenAI(timeout=config.OPENAI_TIMEOUT)
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
yield self._client
|
|
45
|
+
except Exception as e:
|
|
46
|
+
structlogger.error("llm.client_error", error=str(e))
|
|
47
|
+
raise
|
|
48
|
+
|
|
49
|
+
def _prepare_schemas(self):
|
|
50
|
+
"""Prepare and cache schemas for LLM generation."""
|
|
51
|
+
if self._domain_schema is None:
|
|
52
|
+
self._domain_schema = _prepare_domain_schema()
|
|
53
|
+
|
|
54
|
+
if self._flows_schema is None:
|
|
55
|
+
self._flows_schema = _prepare_flows_schema()
|
|
56
|
+
|
|
57
|
+
if self._helper_schema is None:
|
|
58
|
+
self._helper_schema = _load_helper_schema()
|
|
59
|
+
|
|
60
|
+
async def generate_rasa_project(
|
|
61
|
+
self, messages: List[Dict[str, Any]]
|
|
62
|
+
) -> Dict[str, Any]:
|
|
63
|
+
"""Generate Rasa project data using OpenAI."""
|
|
64
|
+
self._prepare_schemas()
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
async with self._get_client() as client:
|
|
68
|
+
response = await client.chat.completions.create(
|
|
69
|
+
model=config.OPENAI_MODEL,
|
|
70
|
+
messages=messages,
|
|
71
|
+
temperature=config.OPENAI_TEMPERATURE,
|
|
72
|
+
response_format={
|
|
73
|
+
"type": "json_schema",
|
|
74
|
+
"json_schema": {
|
|
75
|
+
"name": "rasa_project",
|
|
76
|
+
"schema": {
|
|
77
|
+
"type": "object",
|
|
78
|
+
"properties": {
|
|
79
|
+
"domain": self._domain_schema,
|
|
80
|
+
"flows": self._flows_schema,
|
|
81
|
+
},
|
|
82
|
+
"required": ["domain", "flows"],
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
},
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
content = response.choices[0].message.content
|
|
89
|
+
if not content:
|
|
90
|
+
raise LLMGenerationError("Empty response from LLM")
|
|
91
|
+
|
|
92
|
+
try:
|
|
93
|
+
return json.loads(content)
|
|
94
|
+
except json.JSONDecodeError as e:
|
|
95
|
+
raise LLMGenerationError(f"Invalid JSON from LLM: {e}")
|
|
96
|
+
|
|
97
|
+
except openai.OpenAIError as e:
|
|
98
|
+
raise LLMGenerationError(f"OpenAI API error: {e}")
|
|
99
|
+
except asyncio.TimeoutError:
|
|
100
|
+
raise LLMGenerationError("LLM request timed out")
|
|
101
|
+
|
|
102
|
+
async def create_helper_messages(
|
|
103
|
+
self, llm_builder_context: LLMBuilderContext
|
|
104
|
+
) -> List[Dict[str, Any]]:
|
|
105
|
+
"""Create helper messages for LLM builder."""
|
|
106
|
+
# Format chat history for documentation search
|
|
107
|
+
chat_dump = self._format_chat_dump(llm_builder_context.chat_history)
|
|
108
|
+
|
|
109
|
+
# Search documentation
|
|
110
|
+
documentation_results = await self.search_documentation(chat_dump)
|
|
111
|
+
formatted_docs = self._format_documentation_results(documentation_results)
|
|
112
|
+
|
|
113
|
+
current_conversation = tracker_as_llm_context(llm_builder_context.tracker)
|
|
114
|
+
|
|
115
|
+
# Prepare LLM messages
|
|
116
|
+
system_messages = get_helper_messages(
|
|
117
|
+
current_conversation,
|
|
118
|
+
llm_builder_context.bot_logs,
|
|
119
|
+
llm_builder_context.chat_bot_files,
|
|
120
|
+
formatted_docs,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Add user messages
|
|
124
|
+
messages = system_messages.copy()
|
|
125
|
+
for msg in llm_builder_context.chat_history:
|
|
126
|
+
messages.append(
|
|
127
|
+
{
|
|
128
|
+
"role": "user" if msg.type == "user" else "assistant",
|
|
129
|
+
"content": json.dumps(msg.content)
|
|
130
|
+
if isinstance(msg.content, list)
|
|
131
|
+
else msg.content,
|
|
132
|
+
}
|
|
133
|
+
)
|
|
134
|
+
return messages
|
|
135
|
+
|
|
136
|
+
async def generate_helper_response(
|
|
137
|
+
self, messages: List[Dict[str, Any]]
|
|
138
|
+
) -> Dict[str, Any]:
|
|
139
|
+
"""Generate helper response using OpenAI."""
|
|
140
|
+
self._prepare_schemas()
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
async with self._get_client() as client:
|
|
144
|
+
response = await client.chat.completions.create(
|
|
145
|
+
model=config.OPENAI_MODEL,
|
|
146
|
+
messages=messages,
|
|
147
|
+
response_format={
|
|
148
|
+
"type": "json_schema",
|
|
149
|
+
"json_schema": {
|
|
150
|
+
"name": "llm_helper",
|
|
151
|
+
"schema": self._helper_schema,
|
|
152
|
+
},
|
|
153
|
+
},
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
content = response.choices[0].message.content
|
|
157
|
+
if not content:
|
|
158
|
+
raise LLMGenerationError("Empty response from LLM helper")
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
return json.loads(content)
|
|
162
|
+
except json.JSONDecodeError as e:
|
|
163
|
+
raise LLMGenerationError(f"Invalid JSON from LLM helper: {e}")
|
|
164
|
+
|
|
165
|
+
except openai.OpenAIError as e:
|
|
166
|
+
raise LLMGenerationError(f"OpenAI API error in helper: {e}")
|
|
167
|
+
except asyncio.TimeoutError:
|
|
168
|
+
raise LLMGenerationError("LLM helper request timed out")
|
|
169
|
+
|
|
170
|
+
async def search_documentation(
|
|
171
|
+
self, query: str, max_results: Optional[int] = None
|
|
172
|
+
) -> List[Dict[str, Any]]:
|
|
173
|
+
"""Search documentation using OpenAI vector store."""
|
|
174
|
+
if max_results is None:
|
|
175
|
+
max_results = config.OPENAI_MAX_VECTOR_RESULTS
|
|
176
|
+
|
|
177
|
+
try:
|
|
178
|
+
async with self._get_client() as client:
|
|
179
|
+
results = await client.vector_stores.search(
|
|
180
|
+
vector_store_id=config.OPENAI_VECTOR_STORE_ID,
|
|
181
|
+
query=query,
|
|
182
|
+
max_num_results=max_results,
|
|
183
|
+
rewrite_query=True,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
return results.data
|
|
187
|
+
|
|
188
|
+
except openai.OpenAIError as e:
|
|
189
|
+
structlogger.warning(
|
|
190
|
+
"llm.documentation_search_failed", error=str(e), query=query
|
|
191
|
+
)
|
|
192
|
+
return []
|
|
193
|
+
|
|
194
|
+
@staticmethod
|
|
195
|
+
def _format_chat_dump(messages) -> str:
|
|
196
|
+
"""Format chat messages for documentation search."""
|
|
197
|
+
result = ""
|
|
198
|
+
for message in messages:
|
|
199
|
+
if message.type == "user":
|
|
200
|
+
content = (
|
|
201
|
+
message.content
|
|
202
|
+
if isinstance(message.content, str)
|
|
203
|
+
else str(message.content)
|
|
204
|
+
)
|
|
205
|
+
result += f"User: {content}\n"
|
|
206
|
+
else:
|
|
207
|
+
if isinstance(message.content, list):
|
|
208
|
+
for part in message.content:
|
|
209
|
+
if part.get("type") == "text":
|
|
210
|
+
result += f"Assistant: {part.get('text')}\n"
|
|
211
|
+
else:
|
|
212
|
+
result += f"Assistant: {message.content}\n"
|
|
213
|
+
return result
|
|
214
|
+
|
|
215
|
+
@staticmethod
|
|
216
|
+
def _format_documentation_results(results) -> str:
|
|
217
|
+
"""Format documentation search results."""
|
|
218
|
+
if not results:
|
|
219
|
+
return "<sources>No relevant documentation found.</sources>"
|
|
220
|
+
|
|
221
|
+
formatted_results = ""
|
|
222
|
+
for result in results:
|
|
223
|
+
formatted_result = f"<result url='{result.attributes.get('url', '')}'>"
|
|
224
|
+
for part in result.content:
|
|
225
|
+
formatted_result += f"<content>{part.text}</content>"
|
|
226
|
+
formatted_results += formatted_result + "</result>"
|
|
227
|
+
|
|
228
|
+
return f"<sources>{formatted_results}</sources>"
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
# Schema preparation functions (stateless)
|
|
232
|
+
def _prepare_domain_schema() -> Dict[str, Any]:
|
|
233
|
+
"""Prepare domain schema by removing unnecessary parts."""
|
|
234
|
+
domain_schema = deepcopy(read_schema_file(DOMAIN_SCHEMA_FILE, PACKAGE_NAME, False))
|
|
235
|
+
|
|
236
|
+
# Remove parts not needed for CALM bots
|
|
237
|
+
unnecessary_keys = ["intents", "entities", "forms", "config", "session_config"]
|
|
238
|
+
|
|
239
|
+
for key in unnecessary_keys:
|
|
240
|
+
domain_schema["mapping"].pop(key, None)
|
|
241
|
+
|
|
242
|
+
# Remove problematic slot mappings
|
|
243
|
+
slot_mapping = domain_schema["mapping"]["slots"]["mapping"]["regex;([A-Za-z]+)"][
|
|
244
|
+
"mapping"
|
|
245
|
+
]
|
|
246
|
+
slot_mapping.pop("mappings", None)
|
|
247
|
+
slot_mapping.pop("validation", None)
|
|
248
|
+
|
|
249
|
+
# Add responses schema
|
|
250
|
+
domain_schema["mapping"]["responses"] = read_schema_file(
|
|
251
|
+
RESPONSES_SCHEMA_FILE, PACKAGE_NAME, False
|
|
252
|
+
)["schema;responses"]
|
|
253
|
+
|
|
254
|
+
return domain_schema
|
|
255
|
+
|
|
256
|
+
|
|
257
|
+
def _prepare_flows_schema() -> Dict[str, Any]:
|
|
258
|
+
"""Prepare flows schema by removing nlu_trigger."""
|
|
259
|
+
schema_file = str(
|
|
260
|
+
importlib_resources.files(PACKAGE_NAME).joinpath(FLOWS_SCHEMA_FILE)
|
|
261
|
+
)
|
|
262
|
+
flows_schema = deepcopy(read_json_file(schema_file))
|
|
263
|
+
flows_schema["$defs"]["flow"]["properties"].pop("nlu_trigger", None)
|
|
264
|
+
return flows_schema
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _load_helper_schema() -> Dict[str, Any]:
|
|
268
|
+
"""Load helper schema."""
|
|
269
|
+
return read_json_file(
|
|
270
|
+
importlib_resources.files(PACKAGE_NAME).joinpath(
|
|
271
|
+
"builder/llm-helper-schema.json"
|
|
272
|
+
)
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# Template functions (stateless with caching)
|
|
277
|
+
_skill_template: Optional[Template] = None
|
|
278
|
+
_helper_template: Optional[Template] = None
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def get_skill_generation_messages(
|
|
282
|
+
skill_description: str, project_data: Dict[str, str]
|
|
283
|
+
) -> List[Dict[str, Any]]:
|
|
284
|
+
"""Get messages for skill generation."""
|
|
285
|
+
global _skill_template
|
|
286
|
+
|
|
287
|
+
if _skill_template is None:
|
|
288
|
+
template_content = importlib.resources.read_text(
|
|
289
|
+
"rasa.builder",
|
|
290
|
+
"skill_to_bot_prompt.jinja2",
|
|
291
|
+
)
|
|
292
|
+
_skill_template = Template(template_content)
|
|
293
|
+
|
|
294
|
+
system_prompt = _skill_template.render(
|
|
295
|
+
skill_description=skill_description,
|
|
296
|
+
project_data=project_data,
|
|
297
|
+
)
|
|
298
|
+
return [{"role": "system", "content": system_prompt}]
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
def get_helper_messages(
|
|
302
|
+
current_conversation: str,
|
|
303
|
+
bot_logs: str,
|
|
304
|
+
chat_bot_files: Dict[str, str],
|
|
305
|
+
documentation_results: str,
|
|
306
|
+
) -> List[Dict[str, Any]]:
|
|
307
|
+
"""Get messages for helper response."""
|
|
308
|
+
global _helper_template
|
|
309
|
+
|
|
310
|
+
if _helper_template is None:
|
|
311
|
+
template_content = importlib.resources.read_text(
|
|
312
|
+
"rasa.builder",
|
|
313
|
+
"llm_helper_prompt.jinja2",
|
|
314
|
+
)
|
|
315
|
+
_helper_template = Template(template_content)
|
|
316
|
+
|
|
317
|
+
system_prompt = _helper_template.render(
|
|
318
|
+
current_conversation=current_conversation,
|
|
319
|
+
bot_logs=bot_logs,
|
|
320
|
+
chat_bot_files=chat_bot_files,
|
|
321
|
+
documentation_results=documentation_results,
|
|
322
|
+
)
|
|
323
|
+
return [{"role": "system", "content": system_prompt}]
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
# Global service instance
|
|
327
|
+
llm_service = LLMService()
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Logging utilities for the prompt-to-bot service."""
|
|
2
|
+
|
|
3
|
+
import collections
|
|
4
|
+
import logging
|
|
5
|
+
import threading
|
|
6
|
+
from typing import Any, Deque, Dict
|
|
7
|
+
|
|
8
|
+
from rasa.builder import config
|
|
9
|
+
|
|
10
|
+
# Thread-safe deque for collecting recent logs
|
|
11
|
+
_recent_logs: Deque[str] = collections.deque(maxlen=config.MAX_LOG_ENTRIES)
|
|
12
|
+
_logs_lock = threading.RLock()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def collecting_logs_processor(
|
|
16
|
+
logger: Any, log_level: str, event_dict: Dict[str, Any]
|
|
17
|
+
) -> Dict[str, Any]:
|
|
18
|
+
"""Structlog processor that collects recent log entries.
|
|
19
|
+
|
|
20
|
+
This processor is thread-safe and maintains a rolling buffer of recent logs.
|
|
21
|
+
"""
|
|
22
|
+
if log_level != logging.getLevelName(logging.DEBUG).lower():
|
|
23
|
+
event_message = event_dict.get("event_info") or event_dict.get("event", "")
|
|
24
|
+
log_entry = f"[{log_level}] {event_message}"
|
|
25
|
+
|
|
26
|
+
with _logs_lock:
|
|
27
|
+
_recent_logs.append(log_entry)
|
|
28
|
+
|
|
29
|
+
return event_dict
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_recent_logs() -> str:
|
|
33
|
+
"""Get recent log entries as a formatted string.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Formatted string of recent log entries, one per line.
|
|
37
|
+
"""
|
|
38
|
+
with _logs_lock:
|
|
39
|
+
return "\n".join(list(_recent_logs))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def clear_recent_logs() -> None:
|
|
43
|
+
"""Clear the recent logs buffer."""
|
|
44
|
+
with _logs_lock:
|
|
45
|
+
_recent_logs.clear()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_log_count() -> int:
|
|
49
|
+
"""Get the current number of log entries."""
|
|
50
|
+
with _logs_lock:
|
|
51
|
+
return len(_recent_logs)
|
rasa/builder/main.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Main entry point for the prompt-to-bot service."""
|
|
3
|
+
|
|
4
|
+
import logging
|
|
5
|
+
import sys
|
|
6
|
+
from typing import Optional
|
|
7
|
+
|
|
8
|
+
import rasa.core.utils
|
|
9
|
+
from rasa.builder.logging_utils import collecting_logs_processor
|
|
10
|
+
from rasa.builder.service import PromptToBotService
|
|
11
|
+
from rasa.utils.common import configure_logging_and_warnings
|
|
12
|
+
from rasa.utils.log_utils import configure_structlog
|
|
13
|
+
from rasa.utils.sanic_error_handler import register_custom_sanic_error_handler
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def setup_logging():
|
|
17
|
+
"""Setup logging configuration."""
|
|
18
|
+
log_level = logging.DEBUG
|
|
19
|
+
|
|
20
|
+
configure_logging_and_warnings(
|
|
21
|
+
log_level=log_level,
|
|
22
|
+
logging_config_file=None,
|
|
23
|
+
warn_only_once=True,
|
|
24
|
+
filter_repeated_logs=True,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
configure_structlog(
|
|
28
|
+
log_level,
|
|
29
|
+
include_time=True,
|
|
30
|
+
additional_processors=[collecting_logs_processor],
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def main(project_folder: Optional[str] = None):
|
|
35
|
+
"""Main entry point."""
|
|
36
|
+
try:
|
|
37
|
+
# Setup logging
|
|
38
|
+
setup_logging()
|
|
39
|
+
|
|
40
|
+
# Create and configure service
|
|
41
|
+
|
|
42
|
+
service = PromptToBotService(project_folder)
|
|
43
|
+
register_custom_sanic_error_handler(service.app)
|
|
44
|
+
|
|
45
|
+
# Log available routes
|
|
46
|
+
rasa.core.utils.list_routes(service.app)
|
|
47
|
+
|
|
48
|
+
# Run the service
|
|
49
|
+
service.run()
|
|
50
|
+
|
|
51
|
+
except KeyboardInterrupt:
|
|
52
|
+
print("\nService stopped by user")
|
|
53
|
+
sys.exit(0)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
print(f"Failed to start service: {e}")
|
|
56
|
+
sys.exit(1)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
if __name__ == "__main__":
|
|
60
|
+
project_folder = sys.argv[1] if len(sys.argv) > 1 else None
|
|
61
|
+
main(project_folder)
|