@riotprompt/riotprompt 1.0.1-dev.0 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/MCP-IMPLEMENTATION.md +192 -0
- package/MCP-SUMMARY.md +198 -0
- package/MCP.md +185 -0
- package/README.md +16 -1
- package/dist/mcp/index.d.ts +4 -0
- package/dist/mcp/prompts/create_and_execute.md +17 -0
- package/dist/mcp/prompts/index.d.ts +9 -0
- package/dist/mcp/prompts/process_and_export.md +17 -0
- package/dist/mcp/resources/config.d.ts +4 -0
- package/dist/mcp/resources/index.d.ts +9 -0
- package/dist/mcp/resources/version.d.ts +4 -0
- package/dist/mcp/server.d.ts +19 -0
- package/dist/mcp/tools/create.d.ts +3 -0
- package/dist/mcp/tools/execute.d.ts +3 -0
- package/dist/mcp/tools/get-version.d.ts +3 -0
- package/dist/mcp/tools/index.d.ts +9 -0
- package/dist/mcp/tools/process.d.ts +3 -0
- package/dist/mcp/types.d.ts +157 -0
- package/dist/mcp-server.js +31149 -0
- package/dist/mcp-server.js.map +7 -0
- package/dist/security/path-guard.js +26 -3
- package/dist/security/path-guard.js.map +1 -1
- package/package.json +19 -9
- package/scripts/build-mcp.js +67 -0
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
# RiotPrompt MCP Implementation
|
|
2
|
+
|
|
3
|
+
This document describes the MCP (Model Context Protocol) implementation for RiotPrompt.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The MCP server exposes RiotPrompt's CLI functionality through the Model Context Protocol, allowing AI assistants to programmatically create, process, and execute prompts.
|
|
8
|
+
|
|
9
|
+
## Architecture
|
|
10
|
+
|
|
11
|
+
The implementation follows the pattern established in kodrdriv's MCP server:
|
|
12
|
+
|
|
13
|
+
```
|
|
14
|
+
src/mcp/
|
|
15
|
+
├── index.ts # Module entry point
|
|
16
|
+
├── server.ts # Main MCP server implementation
|
|
17
|
+
├── types.ts # Type definitions
|
|
18
|
+
├── tools/ # Tool implementations
|
|
19
|
+
│ ├── index.ts # Tool registry and executor
|
|
20
|
+
│ ├── get-version.ts # Version information tool
|
|
21
|
+
│ ├── create.ts # Prompt creation tool
|
|
22
|
+
│ ├── process.ts # Prompt processing tool
|
|
23
|
+
│ └── execute.ts # Prompt execution tool
|
|
24
|
+
├── prompts/ # Workflow templates
|
|
25
|
+
│ ├── index.ts # Prompt registry and loader
|
|
26
|
+
│ ├── create_and_execute.md
|
|
27
|
+
│ └── process_and_export.md
|
|
28
|
+
└── resources/ # Read-only resources
|
|
29
|
+
├── index.ts # Resource registry
|
|
30
|
+
├── config.ts # Configuration resource
|
|
31
|
+
└── version.ts # Version resource
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Tools
|
|
35
|
+
|
|
36
|
+
### 1. riotprompt_get_version
|
|
37
|
+
- **Purpose**: Get version information
|
|
38
|
+
- **Parameters**: None
|
|
39
|
+
- **Returns**: Version, name, description
|
|
40
|
+
|
|
41
|
+
### 2. riotprompt_create
|
|
42
|
+
- **Purpose**: Create new prompt structure or import from file
|
|
43
|
+
- **Parameters**:
|
|
44
|
+
- `promptName` (required): Name of the prompt
|
|
45
|
+
- `path` (optional): Base path for creation
|
|
46
|
+
- `persona` (optional): Initial persona text
|
|
47
|
+
- `instructions` (optional): Initial instructions text
|
|
48
|
+
- `createContext` (optional): Create context directory
|
|
49
|
+
- `importFile` (optional): Import from JSON/XML
|
|
50
|
+
- **Returns**: Path to created prompt, list of files
|
|
51
|
+
|
|
52
|
+
### 3. riotprompt_process
|
|
53
|
+
- **Purpose**: Process and format prompts
|
|
54
|
+
- **Parameters**:
|
|
55
|
+
- `promptPath` (required): Path to prompt
|
|
56
|
+
- `model` (optional): Target model
|
|
57
|
+
- `format` (optional): Output format (text/json/xml)
|
|
58
|
+
- `outputFile` (optional): Save to file
|
|
59
|
+
- **Returns**: Formatted output or file path
|
|
60
|
+
|
|
61
|
+
### 4. riotprompt_execute
|
|
62
|
+
- **Purpose**: Execute prompt using LLM provider
|
|
63
|
+
- **Parameters**:
|
|
64
|
+
- `promptPath` (required): Path to prompt
|
|
65
|
+
- `model` (optional): Model to use
|
|
66
|
+
- `apiKey` (optional): API key override
|
|
67
|
+
- `temperature` (optional): Temperature setting
|
|
68
|
+
- `maxTokens` (optional): Max tokens
|
|
69
|
+
- **Returns**: LLM response, usage stats
|
|
70
|
+
|
|
71
|
+
## Resources
|
|
72
|
+
|
|
73
|
+
### riotprompt://config
|
|
74
|
+
Read-only access to riotprompt.yaml configuration
|
|
75
|
+
|
|
76
|
+
### riotprompt://version
|
|
77
|
+
Version information for the package
|
|
78
|
+
|
|
79
|
+
## Prompts
|
|
80
|
+
|
|
81
|
+
### create_and_execute
|
|
82
|
+
Workflow template for creating and executing a new prompt
|
|
83
|
+
|
|
84
|
+
### process_and_export
|
|
85
|
+
Workflow template for processing and exporting existing prompts
|
|
86
|
+
|
|
87
|
+
## Build Process
|
|
88
|
+
|
|
89
|
+
The MCP server is built separately from the main library:
|
|
90
|
+
|
|
91
|
+
1. Main library build: `vite build`
|
|
92
|
+
2. CLI build: `vite build -c vite.config.cli.ts`
|
|
93
|
+
3. MCP server build: `node scripts/build-mcp.js`
|
|
94
|
+
4. Copy prompt templates: `copyfiles -u 1 "src/mcp/prompts/*.md" dist`
|
|
95
|
+
|
|
96
|
+
The build script (`scripts/build-mcp.js`) uses esbuild to:
|
|
97
|
+
- Bundle the MCP server code
|
|
98
|
+
- Mark external dependencies (MCP SDK, RiotPrompt dependencies)
|
|
99
|
+
- Add shebang for executable
|
|
100
|
+
- Make the file executable
|
|
101
|
+
|
|
102
|
+
## Dependencies
|
|
103
|
+
|
|
104
|
+
### Runtime Dependencies
|
|
105
|
+
- `@modelcontextprotocol/sdk`: MCP protocol implementation
|
|
106
|
+
- All RiotPrompt dependencies (marked as external in build)
|
|
107
|
+
|
|
108
|
+
### Dev Dependencies
|
|
109
|
+
- `esbuild`: For bundling the MCP server
|
|
110
|
+
- `copyfiles`: For copying prompt templates
|
|
111
|
+
- `@modelcontextprotocol/inspector`: For testing
|
|
112
|
+
|
|
113
|
+
## Testing
|
|
114
|
+
|
|
115
|
+
```bash
|
|
116
|
+
# Build MCP server
|
|
117
|
+
npm run mcp:build
|
|
118
|
+
|
|
119
|
+
# Test with MCP inspector
|
|
120
|
+
npm run mcp:inspect
|
|
121
|
+
|
|
122
|
+
# Watch mode for development
|
|
123
|
+
npm run mcp:dev
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
## Usage
|
|
127
|
+
|
|
128
|
+
### In Claude Desktop
|
|
129
|
+
|
|
130
|
+
Add to `~/Library/Application Support/Claude/claude_desktop_config.json`:
|
|
131
|
+
|
|
132
|
+
```json
|
|
133
|
+
{
|
|
134
|
+
"mcpServers": {
|
|
135
|
+
"riotprompt": {
|
|
136
|
+
"command": "npx",
|
|
137
|
+
"args": ["-y", "@riotprompt/riotprompt", "riotprompt-mcp"]
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
```
|
|
142
|
+
|
|
143
|
+
### In Other MCP Clients
|
|
144
|
+
|
|
145
|
+
Use the `riotprompt-mcp` command or the full path to `dist/mcp-server.js`.
|
|
146
|
+
|
|
147
|
+
## Environment Variables
|
|
148
|
+
|
|
149
|
+
The MCP server respects the same environment variables as the CLI:
|
|
150
|
+
- `OPENAI_API_KEY`: For OpenAI models
|
|
151
|
+
- `ANTHROPIC_API_KEY`: For Anthropic/Claude models
|
|
152
|
+
- `GEMINI_API_KEY`: For Google Gemini models
|
|
153
|
+
- `RIOTPROMPT_MCP_SERVER`: Set automatically when running as MCP server
|
|
154
|
+
|
|
155
|
+
## Error Handling
|
|
156
|
+
|
|
157
|
+
The server follows MCP best practices:
|
|
158
|
+
- Returns structured error responses
|
|
159
|
+
- Includes context and recovery suggestions
|
|
160
|
+
- Logs are captured and included in responses
|
|
161
|
+
- Undefined values are cleaned from JSON responses
|
|
162
|
+
|
|
163
|
+
## Future Enhancements
|
|
164
|
+
|
|
165
|
+
Potential additions:
|
|
166
|
+
- More workflow prompts
|
|
167
|
+
- Streaming support for long-running operations
|
|
168
|
+
- Progress notifications for execution
|
|
169
|
+
- Batch operations
|
|
170
|
+
- Template management tools
|
|
171
|
+
- Validation tools
|
|
172
|
+
|
|
173
|
+
## Comparison with kodrdriv
|
|
174
|
+
|
|
175
|
+
Similarities:
|
|
176
|
+
- Same MCP SDK and architecture pattern
|
|
177
|
+
- Similar tool/resource/prompt structure
|
|
178
|
+
- Same build process approach
|
|
179
|
+
- Same error handling patterns
|
|
180
|
+
|
|
181
|
+
Differences:
|
|
182
|
+
- Simpler tool set (4 tools vs 20+)
|
|
183
|
+
- No tree operations (single package focus)
|
|
184
|
+
- No git integration
|
|
185
|
+
- Focus on prompt operations vs workflow automation
|
|
186
|
+
|
|
187
|
+
## Notes
|
|
188
|
+
|
|
189
|
+
- The MCP server runs in a separate process from the CLI
|
|
190
|
+
- All file operations use `fs/promises` to comply with eslint rules
|
|
191
|
+
- The server is stateless - each tool call is independent
|
|
192
|
+
- Prompt templates are loaded from markdown files at runtime
|
package/MCP-SUMMARY.md
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
# RiotPrompt MCP Implementation - Summary
|
|
2
|
+
|
|
3
|
+
## What Was Created
|
|
4
|
+
|
|
5
|
+
Successfully implemented a complete MCP (Model Context Protocol) server for RiotPrompt, following the patterns established in kodrdriv.
|
|
6
|
+
|
|
7
|
+
### Files Created
|
|
8
|
+
|
|
9
|
+
#### Core MCP Files
|
|
10
|
+
- `src/mcp/index.ts` - Module entry point
|
|
11
|
+
- `src/mcp/server.ts` - Main MCP server implementation (370 lines)
|
|
12
|
+
- `src/mcp/types.ts` - Type definitions for MCP integration (170 lines)
|
|
13
|
+
|
|
14
|
+
#### Tools (4 tools)
|
|
15
|
+
- `src/mcp/tools/index.ts` - Tool registry and executor
|
|
16
|
+
- `src/mcp/tools/get-version.ts` - Get version information
|
|
17
|
+
- `src/mcp/tools/create.ts` - Create new prompt structures
|
|
18
|
+
- `src/mcp/tools/process.ts` - Process and format prompts
|
|
19
|
+
- `src/mcp/tools/execute.ts` - Execute prompts using LLM providers
|
|
20
|
+
|
|
21
|
+
#### Prompts (2 workflow templates)
|
|
22
|
+
- `src/mcp/prompts/index.ts` - Prompt registry and loader
|
|
23
|
+
- `src/mcp/prompts/create_and_execute.md` - Workflow for creating and executing prompts
|
|
24
|
+
- `src/mcp/prompts/process_and_export.md` - Workflow for processing and exporting prompts
|
|
25
|
+
|
|
26
|
+
#### Resources (2 resources)
|
|
27
|
+
- `src/mcp/resources/index.ts` - Resource registry
|
|
28
|
+
- `src/mcp/resources/config.ts` - Read riotprompt.yaml configuration
|
|
29
|
+
- `src/mcp/resources/version.ts` - Get version information
|
|
30
|
+
|
|
31
|
+
#### Build & Documentation
|
|
32
|
+
- `scripts/build-mcp.js` - Build script for MCP server
|
|
33
|
+
- `MCP.md` - User-facing documentation
|
|
34
|
+
- `MCP-IMPLEMENTATION.md` - Technical implementation details
|
|
35
|
+
- `MCP-SUMMARY.md` - This file
|
|
36
|
+
|
|
37
|
+
### Package.json Updates
|
|
38
|
+
|
|
39
|
+
Added:
|
|
40
|
+
- `bin.riotprompt-mcp` entry point
|
|
41
|
+
- MCP build scripts (`mcp:build`, `mcp:inspect`, `mcp:dev`)
|
|
42
|
+
- Updated main build script to include MCP server
|
|
43
|
+
- Dependencies: `@modelcontextprotocol/sdk`
|
|
44
|
+
- Dev dependencies: `@modelcontextprotocol/inspector`, `copyfiles`, `esbuild`, `rollup-plugin-preserve-shebang`
|
|
45
|
+
|
|
46
|
+
### ESLint Configuration
|
|
47
|
+
|
|
48
|
+
Updated `eslint.config.mjs` to ignore `scripts/**` directory to allow console.log in build scripts.
|
|
49
|
+
|
|
50
|
+
## Tools Provided
|
|
51
|
+
|
|
52
|
+
### 1. riotprompt_get_version
|
|
53
|
+
Get version information for riotprompt.
|
|
54
|
+
|
|
55
|
+
### 2. riotprompt_create
|
|
56
|
+
Create a new prompt directory structure or import from JSON/XML file.
|
|
57
|
+
- Scaffolds persona.md, instructions.md, and context/ directory
|
|
58
|
+
- Supports importing existing prompts
|
|
59
|
+
|
|
60
|
+
### 3. riotprompt_process
|
|
61
|
+
Process a prompt and format it for a specific model or export to JSON/XML.
|
|
62
|
+
- Supports directory-based prompts and file-based prompts
|
|
63
|
+
- Can format for specific models (GPT-4, Claude, Gemini, etc.)
|
|
64
|
+
- Can export to JSON or XML
|
|
65
|
+
|
|
66
|
+
### 4. riotprompt_execute
|
|
67
|
+
Execute a prompt using an LLM provider (OpenAI, Anthropic, Gemini).
|
|
68
|
+
- Supports all major LLM providers
|
|
69
|
+
- Returns response and token usage
|
|
70
|
+
- Respects API keys from environment or parameters
|
|
71
|
+
|
|
72
|
+
## Resources Provided
|
|
73
|
+
|
|
74
|
+
### riotprompt://config
|
|
75
|
+
Read-only access to the riotprompt.yaml configuration file.
|
|
76
|
+
|
|
77
|
+
### riotprompt://version
|
|
78
|
+
Version information for the riotprompt package.
|
|
79
|
+
|
|
80
|
+
## Prompts Provided
|
|
81
|
+
|
|
82
|
+
### create_and_execute
|
|
83
|
+
Workflow template for creating a new prompt and executing it.
|
|
84
|
+
|
|
85
|
+
### process_and_export
|
|
86
|
+
Workflow template for processing an existing prompt and exporting it.
|
|
87
|
+
|
|
88
|
+
## Build Process
|
|
89
|
+
|
|
90
|
+
The build process now includes:
|
|
91
|
+
1. Lint check
|
|
92
|
+
2. TypeScript type checking
|
|
93
|
+
3. Main library build (vite)
|
|
94
|
+
4. CLI build (vite)
|
|
95
|
+
5. MCP server build (esbuild)
|
|
96
|
+
6. Copy prompt templates to dist/
|
|
97
|
+
7. Make MCP server executable
|
|
98
|
+
|
|
99
|
+
Build command: `npm run build`
|
|
100
|
+
|
|
101
|
+
## Testing
|
|
102
|
+
|
|
103
|
+
The MCP server can be tested using:
|
|
104
|
+
```bash
|
|
105
|
+
npm run mcp:inspect
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
This launches the MCP Inspector for interactive testing.
|
|
109
|
+
|
|
110
|
+
## Usage
|
|
111
|
+
|
|
112
|
+
### Claude Desktop Configuration
|
|
113
|
+
|
|
114
|
+
Add to `~/Library/Application Support/Claude/claude_desktop_config.json`:
|
|
115
|
+
|
|
116
|
+
```json
|
|
117
|
+
{
|
|
118
|
+
"mcpServers": {
|
|
119
|
+
"riotprompt": {
|
|
120
|
+
"command": "npx",
|
|
121
|
+
"args": ["-y", "@riotprompt/riotprompt", "riotprompt-mcp"]
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
### Example Interactions
|
|
128
|
+
|
|
129
|
+
Once configured, users can ask their AI assistant:
|
|
130
|
+
|
|
131
|
+
- "Create a new prompt called 'summarizer' for summarizing articles"
|
|
132
|
+
- "Process the prompt at ./my-prompt and format it for Claude"
|
|
133
|
+
- "Execute the prompt at ./my-prompt using GPT-4"
|
|
134
|
+
- "Show me the riotprompt configuration"
|
|
135
|
+
|
|
136
|
+
## Technical Details
|
|
137
|
+
|
|
138
|
+
### Architecture
|
|
139
|
+
- Uses `@modelcontextprotocol/sdk` for MCP protocol
|
|
140
|
+
- Stdio transport for communication
|
|
141
|
+
- Stateless design - each tool call is independent
|
|
142
|
+
- Async/await throughout for proper error handling
|
|
143
|
+
|
|
144
|
+
### Code Quality
|
|
145
|
+
- All files use `fs/promises` instead of sync fs operations
|
|
146
|
+
- Proper TypeScript typing throughout
|
|
147
|
+
- ESLint compliant
|
|
148
|
+
- Follows established patterns from kodrdriv
|
|
149
|
+
|
|
150
|
+
### Error Handling
|
|
151
|
+
- Structured error responses
|
|
152
|
+
- Context and recovery suggestions included
|
|
153
|
+
- Logs captured and included in responses
|
|
154
|
+
- Undefined values cleaned from JSON
|
|
155
|
+
|
|
156
|
+
## Comparison with kodrdriv MCP
|
|
157
|
+
|
|
158
|
+
### Similarities
|
|
159
|
+
- Same MCP SDK and architecture
|
|
160
|
+
- Similar tool/resource/prompt structure
|
|
161
|
+
- Same build process approach
|
|
162
|
+
- Same error handling patterns
|
|
163
|
+
|
|
164
|
+
### Differences
|
|
165
|
+
- Simpler tool set (4 tools vs 20+)
|
|
166
|
+
- No tree operations (single package focus)
|
|
167
|
+
- No git integration
|
|
168
|
+
- Focus on prompt operations vs workflow automation
|
|
169
|
+
|
|
170
|
+
## Next Steps
|
|
171
|
+
|
|
172
|
+
The MCP server is complete and ready for use. Potential future enhancements:
|
|
173
|
+
|
|
174
|
+
1. Add more workflow prompts
|
|
175
|
+
2. Add streaming support for long-running operations
|
|
176
|
+
3. Add progress notifications for execution
|
|
177
|
+
4. Add batch operations
|
|
178
|
+
5. Add template management tools
|
|
179
|
+
6. Add validation tools
|
|
180
|
+
|
|
181
|
+
## Files Modified
|
|
182
|
+
|
|
183
|
+
- `package.json` - Added MCP dependencies and scripts
|
|
184
|
+
- `eslint.config.mjs` - Added scripts/ to ignore list
|
|
185
|
+
- `README.md` - Added MCP section
|
|
186
|
+
|
|
187
|
+
## Build Verification
|
|
188
|
+
|
|
189
|
+
✅ Build successful
|
|
190
|
+
✅ MCP server created at `dist/mcp-server.js`
|
|
191
|
+
✅ MCP server is executable (755 permissions)
|
|
192
|
+
✅ Prompt templates copied to `dist/mcp/prompts/`
|
|
193
|
+
✅ All linting checks pass
|
|
194
|
+
✅ TypeScript compilation successful
|
|
195
|
+
|
|
196
|
+
## Status
|
|
197
|
+
|
|
198
|
+
**COMPLETE** - The MCP server is fully implemented, built, and ready for use.
|
package/MCP.md
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
# RiotPrompt MCP Server
|
|
2
|
+
|
|
3
|
+
Model Context Protocol (MCP) server for RiotPrompt, providing AI assistants with tools to create, process, and execute prompts.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The RiotPrompt MCP server exposes RiotPrompt's CLI functionality through the Model Context Protocol, allowing AI assistants to:
|
|
8
|
+
|
|
9
|
+
- Create new prompt structures
|
|
10
|
+
- Process prompts for different models
|
|
11
|
+
- Execute prompts using LLM providers
|
|
12
|
+
- Access configuration and version information
|
|
13
|
+
|
|
14
|
+
## Installation
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
npm install -g @riotprompt/riotprompt
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
## Configuration
|
|
21
|
+
|
|
22
|
+
Add to your MCP settings (e.g., Claude Desktop config):
|
|
23
|
+
|
|
24
|
+
```json
|
|
25
|
+
{
|
|
26
|
+
"mcpServers": {
|
|
27
|
+
"riotprompt": {
|
|
28
|
+
"command": "npx",
|
|
29
|
+
"args": ["-y", "@riotprompt/riotprompt", "riotprompt-mcp"]
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Or if installed globally:
|
|
36
|
+
|
|
37
|
+
```json
|
|
38
|
+
{
|
|
39
|
+
"mcpServers": {
|
|
40
|
+
"riotprompt": {
|
|
41
|
+
"command": "riotprompt-mcp"
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Tools
|
|
48
|
+
|
|
49
|
+
### riotprompt_get_version
|
|
50
|
+
|
|
51
|
+
Get version information for riotprompt.
|
|
52
|
+
|
|
53
|
+
**Parameters:** None
|
|
54
|
+
|
|
55
|
+
**Returns:**
|
|
56
|
+
- `version`: Package version
|
|
57
|
+
- `name`: Package name
|
|
58
|
+
- `description`: Package description
|
|
59
|
+
|
|
60
|
+
### riotprompt_create
|
|
61
|
+
|
|
62
|
+
Create a new prompt directory structure or import from file.
|
|
63
|
+
|
|
64
|
+
**Parameters:**
|
|
65
|
+
- `promptName` (required): Name of the prompt to create
|
|
66
|
+
- `path` (optional): Base path to create the prompt in
|
|
67
|
+
- `persona` (optional): Initial text for persona.md
|
|
68
|
+
- `instructions` (optional): Initial text for instructions.md
|
|
69
|
+
- `createContext` (optional): Create context directory (default: true)
|
|
70
|
+
- `importFile` (optional): Import from JSON or XML file
|
|
71
|
+
|
|
72
|
+
**Returns:**
|
|
73
|
+
- `path`: Full path to created prompt
|
|
74
|
+
- `files`: List of created files
|
|
75
|
+
|
|
76
|
+
### riotprompt_process
|
|
77
|
+
|
|
78
|
+
Process a prompt and format it for a specific model or export to JSON/XML.
|
|
79
|
+
|
|
80
|
+
**Parameters:**
|
|
81
|
+
- `promptPath` (required): Path to prompt directory or file
|
|
82
|
+
- `model` (optional): Model to format for (e.g., gpt-4, claude-3-opus)
|
|
83
|
+
- `format` (optional): Output format (text, json, xml)
|
|
84
|
+
- `outputFile` (optional): Path to save output
|
|
85
|
+
|
|
86
|
+
**Returns:**
|
|
87
|
+
- `output`: Formatted prompt (if no outputFile specified)
|
|
88
|
+
- `outputFile`: Path to saved file (if outputFile specified)
|
|
89
|
+
- `format`: Output format used
|
|
90
|
+
|
|
91
|
+
### riotprompt_execute
|
|
92
|
+
|
|
93
|
+
Execute a prompt using an LLM provider.
|
|
94
|
+
|
|
95
|
+
**Parameters:**
|
|
96
|
+
- `promptPath` (required): Path to prompt directory or file
|
|
97
|
+
- `model` (optional): Model to use (e.g., gpt-4, claude-3-opus, gemini-1.5-pro)
|
|
98
|
+
- `apiKey` (optional): API key (overrides environment variables)
|
|
99
|
+
- `temperature` (optional): Temperature (0-1)
|
|
100
|
+
- `maxTokens` (optional): Maximum tokens to generate
|
|
101
|
+
|
|
102
|
+
**Returns:**
|
|
103
|
+
- `content`: Response from the LLM
|
|
104
|
+
- `usage`: Token usage information
|
|
105
|
+
- `model`: Model used
|
|
106
|
+
|
|
107
|
+
**Environment Variables:**
|
|
108
|
+
- `OPENAI_API_KEY`: For OpenAI models
|
|
109
|
+
- `ANTHROPIC_API_KEY`: For Anthropic/Claude models
|
|
110
|
+
- `GEMINI_API_KEY`: For Google Gemini models
|
|
111
|
+
|
|
112
|
+
## Resources
|
|
113
|
+
|
|
114
|
+
### riotprompt://config
|
|
115
|
+
|
|
116
|
+
Read the current riotprompt configuration from `riotprompt.yaml`.
|
|
117
|
+
|
|
118
|
+
### riotprompt://version
|
|
119
|
+
|
|
120
|
+
Get version information for riotprompt.
|
|
121
|
+
|
|
122
|
+
## Prompts
|
|
123
|
+
|
|
124
|
+
### create_and_execute
|
|
125
|
+
|
|
126
|
+
Workflow template for creating a new prompt and executing it.
|
|
127
|
+
|
|
128
|
+
**Arguments:**
|
|
129
|
+
- `promptName` (required): Name of the prompt to create
|
|
130
|
+
- `model` (optional): Model to use for execution
|
|
131
|
+
- `path` (optional): Path where to create the prompt
|
|
132
|
+
|
|
133
|
+
### process_and_export
|
|
134
|
+
|
|
135
|
+
Workflow template for processing an existing prompt and exporting it.
|
|
136
|
+
|
|
137
|
+
**Arguments:**
|
|
138
|
+
- `promptPath` (required): Path to the prompt
|
|
139
|
+
- `model` (optional): Model to format for
|
|
140
|
+
- `format` (optional): Output format (text, json, xml)
|
|
141
|
+
|
|
142
|
+
## Example Usage
|
|
143
|
+
|
|
144
|
+
Once configured, you can ask your AI assistant:
|
|
145
|
+
|
|
146
|
+
- "Create a new prompt called 'summarizer' with a persona for summarizing text"
|
|
147
|
+
- "Process the prompt at ./my-prompt and format it for Claude"
|
|
148
|
+
- "Execute the prompt at ./my-prompt using GPT-4"
|
|
149
|
+
- "Show me the riotprompt configuration"
|
|
150
|
+
|
|
151
|
+
## Development
|
|
152
|
+
|
|
153
|
+
### Building
|
|
154
|
+
|
|
155
|
+
```bash
|
|
156
|
+
npm run mcp:build
|
|
157
|
+
```
|
|
158
|
+
|
|
159
|
+
### Testing
|
|
160
|
+
|
|
161
|
+
```bash
|
|
162
|
+
npm run mcp:inspect
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
### Local Development
|
|
166
|
+
|
|
167
|
+
```bash
|
|
168
|
+
# Watch mode
|
|
169
|
+
npm run mcp:dev
|
|
170
|
+
|
|
171
|
+
# Test with MCP inspector
|
|
172
|
+
npm run mcp:inspect
|
|
173
|
+
```
|
|
174
|
+
|
|
175
|
+
## Architecture
|
|
176
|
+
|
|
177
|
+
The MCP server is built using:
|
|
178
|
+
|
|
179
|
+
- `@modelcontextprotocol/sdk` for MCP protocol implementation
|
|
180
|
+
- RiotPrompt's core functionality for prompt operations
|
|
181
|
+
- Stdio transport for communication with MCP clients
|
|
182
|
+
|
|
183
|
+
## License
|
|
184
|
+
|
|
185
|
+
Apache-2.0
|
package/README.md
CHANGED
|
@@ -19,7 +19,22 @@ A powerful, flexible prompt building library and CLI tool for AI applications wi
|
|
|
19
19
|
## Installation
|
|
20
20
|
|
|
21
21
|
```bash
|
|
22
|
-
npm install riotprompt
|
|
22
|
+
npm install @riotprompt/riotprompt
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## MCP Server
|
|
26
|
+
|
|
27
|
+
RiotPrompt includes a Model Context Protocol (MCP) server that allows AI assistants to create, process, and execute prompts. See [MCP.md](MCP.md) for configuration and usage details.
|
|
28
|
+
|
|
29
|
+
```json
|
|
30
|
+
{
|
|
31
|
+
"mcpServers": {
|
|
32
|
+
"riotprompt": {
|
|
33
|
+
"command": "npx",
|
|
34
|
+
"args": ["-y", "@riotprompt/riotprompt", "riotprompt-mcp"]
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
}
|
|
23
38
|
```
|
|
24
39
|
|
|
25
40
|
## CLI Usage
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# Create and Execute a Prompt
|
|
2
|
+
|
|
3
|
+
I need to create a new prompt and execute it.
|
|
4
|
+
|
|
5
|
+
## Steps
|
|
6
|
+
|
|
7
|
+
1. Use `riotprompt_create` to create a new prompt structure at ${path}
|
|
8
|
+
2. Review the created structure
|
|
9
|
+
3. Use `riotprompt_execute` to run the prompt with the specified model
|
|
10
|
+
|
|
11
|
+
## Details
|
|
12
|
+
|
|
13
|
+
- Prompt name: ${promptName}
|
|
14
|
+
- Model: ${model}
|
|
15
|
+
- Path: ${path}
|
|
16
|
+
|
|
17
|
+
Please help me create and test this prompt.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { McpPrompt, McpPromptMessage } from '../types.js';
|
|
2
|
+
/**
|
|
3
|
+
* Get all available prompts
|
|
4
|
+
*/
|
|
5
|
+
export declare function getPrompts(): McpPrompt[];
|
|
6
|
+
/**
|
|
7
|
+
* Get a prompt by name
|
|
8
|
+
*/
|
|
9
|
+
export declare function getPrompt(name: string, args: Record<string, string>): Promise<McpPromptMessage[]>;
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# Process and Export a Prompt
|
|
2
|
+
|
|
3
|
+
I need to process an existing prompt and export it to a different format.
|
|
4
|
+
|
|
5
|
+
## Steps
|
|
6
|
+
|
|
7
|
+
1. Use `riotprompt_process` to load and process the prompt at ${promptPath}
|
|
8
|
+
2. Format it for ${model} (if specified)
|
|
9
|
+
3. Export to ${format} format
|
|
10
|
+
|
|
11
|
+
## Details
|
|
12
|
+
|
|
13
|
+
- Prompt path: ${promptPath}
|
|
14
|
+
- Model: ${model}
|
|
15
|
+
- Output format: ${format}
|
|
16
|
+
|
|
17
|
+
Please help me process and export this prompt.
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* RiotPrompt MCP Server
|
|
4
|
+
*
|
|
5
|
+
* Exposes riotprompt commands, resources, and prompts via MCP.
|
|
6
|
+
*
|
|
7
|
+
* This server provides:
|
|
8
|
+
* - Tools: Prompt creation, processing, and execution commands
|
|
9
|
+
* - Resources: Configuration and version information
|
|
10
|
+
* - Prompts: Workflow templates for common prompt operations
|
|
11
|
+
*
|
|
12
|
+
* Uses McpServer high-level API for better progress notification support
|
|
13
|
+
*/
|
|
14
|
+
/**
|
|
15
|
+
* Recursively remove undefined values from an object to prevent JSON serialization issues
|
|
16
|
+
* Preserves null values as they are valid in JSON
|
|
17
|
+
* @internal - Exported for testing purposes
|
|
18
|
+
*/
|
|
19
|
+
export declare function removeUndefinedValues(obj: any): any;
|