@safenest/mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +166 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +410 -0
- package/package.json +50 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 SafeNest
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<img src="https://raw.githubusercontent.com/SafeNestSDK/mcp/main/assets/logo.png" alt="SafeNest" width="200" />
|
|
3
|
+
</p>
|
|
4
|
+
|
|
5
|
+
<h1 align="center">SafeNest MCP Server</h1>
|
|
6
|
+
|
|
7
|
+
<p align="center">
|
|
8
|
+
<strong>MCP server for SafeNest - AI-powered child safety tools for Claude</strong>
|
|
9
|
+
</p>
|
|
10
|
+
|
|
11
|
+
<p align="center">
|
|
12
|
+
<a href="https://www.npmjs.com/package/@safenest/mcp"><img src="https://img.shields.io/npm/v/@safenest/mcp.svg" alt="npm version"></a>
|
|
13
|
+
<a href="https://github.com/SafeNestSDK/mcp/blob/main/LICENSE"><img src="https://img.shields.io/github/license/SafeNestSDK/mcp.svg" alt="license"></a>
|
|
14
|
+
</p>
|
|
15
|
+
|
|
16
|
+
<p align="center">
|
|
17
|
+
<a href="https://api.safenest.dev/docs">API Docs</a> •
|
|
18
|
+
<a href="https://safenest.app">Dashboard</a> •
|
|
19
|
+
<a href="https://discord.gg/7kbTeRYRXD">Discord</a>
|
|
20
|
+
</p>
|
|
21
|
+
|
|
22
|
+
---
|
|
23
|
+
|
|
24
|
+
## What is this?
|
|
25
|
+
|
|
26
|
+
SafeNest MCP Server brings AI-powered child safety tools directly into Claude, Cursor, and other MCP-compatible AI assistants. Ask Claude to check messages for bullying, detect grooming patterns, or generate safety action plans.
|
|
27
|
+
|
|
28
|
+
## Available Tools
|
|
29
|
+
|
|
30
|
+
| Tool | Description |
|
|
31
|
+
|------|-------------|
|
|
32
|
+
| `detect_bullying` | Analyze text for bullying, harassment, or harmful language |
|
|
33
|
+
| `detect_grooming` | Detect grooming patterns and predatory behavior in conversations |
|
|
34
|
+
| `detect_unsafe` | Identify unsafe content (self-harm, violence, explicit material) |
|
|
35
|
+
| `analyze` | Quick comprehensive safety check (bullying + unsafe) |
|
|
36
|
+
| `analyze_emotions` | Analyze emotional content and mental state indicators |
|
|
37
|
+
| `get_action_plan` | Generate age-appropriate guidance for safety situations |
|
|
38
|
+
| `generate_report` | Create incident reports from conversations |
|
|
39
|
+
|
|
40
|
+
---
|
|
41
|
+
|
|
42
|
+
## Installation
|
|
43
|
+
|
|
44
|
+
### Claude Desktop
|
|
45
|
+
|
|
46
|
+
Add to your Claude Desktop config (`~/Library/Application Support/Claude/claude_desktop_config.json` on Mac):
|
|
47
|
+
|
|
48
|
+
```json
|
|
49
|
+
{
|
|
50
|
+
"mcpServers": {
|
|
51
|
+
"safenest": {
|
|
52
|
+
"command": "npx",
|
|
53
|
+
"args": ["-y", "@safenest/mcp"],
|
|
54
|
+
"env": {
|
|
55
|
+
"SAFENEST_API_KEY": "your-api-key"
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
### Cursor
|
|
63
|
+
|
|
64
|
+
Add to your Cursor MCP settings:
|
|
65
|
+
|
|
66
|
+
```json
|
|
67
|
+
{
|
|
68
|
+
"mcpServers": {
|
|
69
|
+
"safenest": {
|
|
70
|
+
"command": "npx",
|
|
71
|
+
"args": ["-y", "@safenest/mcp"],
|
|
72
|
+
"env": {
|
|
73
|
+
"SAFENEST_API_KEY": "your-api-key"
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Global Install
|
|
81
|
+
|
|
82
|
+
```bash
|
|
83
|
+
npm install -g @safenest/mcp
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
Then run:
|
|
87
|
+
```bash
|
|
88
|
+
SAFENEST_API_KEY=your-api-key safenest-mcp
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
---
|
|
92
|
+
|
|
93
|
+
## Usage Examples
|
|
94
|
+
|
|
95
|
+
Once configured, you can ask Claude:
|
|
96
|
+
|
|
97
|
+
### Bullying Detection
|
|
98
|
+
> "Check if this message is bullying: 'Nobody likes you, just go away'"
|
|
99
|
+
|
|
100
|
+
**Response:**
|
|
101
|
+
```
|
|
102
|
+
## ⚠️ Bullying Detected
|
|
103
|
+
|
|
104
|
+
**Severity:** 🟠 Medium
|
|
105
|
+
**Confidence:** 92%
|
|
106
|
+
**Risk Score:** 75%
|
|
107
|
+
|
|
108
|
+
**Types:** exclusion, verbal_abuse
|
|
109
|
+
|
|
110
|
+
### Rationale
|
|
111
|
+
The message contains direct exclusionary language...
|
|
112
|
+
|
|
113
|
+
### Recommended Action
|
|
114
|
+
`flag_for_moderator`
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
### Grooming Detection
|
|
118
|
+
> "Analyze this conversation for grooming patterns..."
|
|
119
|
+
|
|
120
|
+
### Quick Safety Check
|
|
121
|
+
> "Is this message safe? 'I don't want to be here anymore'"
|
|
122
|
+
|
|
123
|
+
### Emotion Analysis
|
|
124
|
+
> "Analyze the emotions in: 'I'm so stressed about school and nobody understands'"
|
|
125
|
+
|
|
126
|
+
### Action Plan
|
|
127
|
+
> "Give me an action plan for a 12-year-old being cyberbullied"
|
|
128
|
+
|
|
129
|
+
### Incident Report
|
|
130
|
+
> "Generate an incident report from these messages..."
|
|
131
|
+
|
|
132
|
+
---
|
|
133
|
+
|
|
134
|
+
## Get an API Key
|
|
135
|
+
|
|
136
|
+
1. Go to [safenest.app](https://safenest.app)
|
|
137
|
+
2. Create an account
|
|
138
|
+
3. Generate an API key
|
|
139
|
+
4. Add it to your MCP config
|
|
140
|
+
|
|
141
|
+
---
|
|
142
|
+
|
|
143
|
+
## Requirements
|
|
144
|
+
|
|
145
|
+
- Node.js 18+
|
|
146
|
+
- SafeNest API key
|
|
147
|
+
|
|
148
|
+
---
|
|
149
|
+
|
|
150
|
+
## Support
|
|
151
|
+
|
|
152
|
+
- **API Docs**: [api.safenest.dev/docs](https://api.safenest.dev/docs)
|
|
153
|
+
- **Discord**: [discord.gg/7kbTeRYRXD](https://discord.gg/7kbTeRYRXD)
|
|
154
|
+
- **Email**: support@safenest.dev
|
|
155
|
+
|
|
156
|
+
---
|
|
157
|
+
|
|
158
|
+
## License
|
|
159
|
+
|
|
160
|
+
MIT License - see [LICENSE](LICENSE) for details.
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
<p align="center">
|
|
165
|
+
<sub>Built with care for child safety by the <a href="https://safenest.dev">SafeNest</a> team</sub>
|
|
166
|
+
</p>
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":""}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
4
|
+
const index_js_1 = require("@modelcontextprotocol/sdk/server/index.js");
|
|
5
|
+
const stdio_js_1 = require("@modelcontextprotocol/sdk/server/stdio.js");
|
|
6
|
+
const types_js_1 = require("@modelcontextprotocol/sdk/types.js");
|
|
7
|
+
const sdk_1 = require("@safenest/sdk");
|
|
8
|
+
// Initialize SafeNest client
|
|
9
|
+
const apiKey = process.env.SAFENEST_API_KEY;
|
|
10
|
+
if (!apiKey) {
|
|
11
|
+
console.error('Error: SAFENEST_API_KEY environment variable is required');
|
|
12
|
+
process.exit(1);
|
|
13
|
+
}
|
|
14
|
+
const client = new sdk_1.SafeNestClient(apiKey);
|
|
15
|
+
// Severity emoji mapping
|
|
16
|
+
const severityEmoji = {
|
|
17
|
+
low: '🟡',
|
|
18
|
+
medium: '🟠',
|
|
19
|
+
high: '🔴',
|
|
20
|
+
critical: '⛔',
|
|
21
|
+
};
|
|
22
|
+
const riskEmoji = {
|
|
23
|
+
safe: '✅',
|
|
24
|
+
none: '✅',
|
|
25
|
+
low: '🟡',
|
|
26
|
+
medium: '🟠',
|
|
27
|
+
high: '🔴',
|
|
28
|
+
critical: '⛔',
|
|
29
|
+
};
|
|
30
|
+
const trendEmoji = {
|
|
31
|
+
improving: '📈',
|
|
32
|
+
stable: '➡️',
|
|
33
|
+
worsening: '📉',
|
|
34
|
+
};
|
|
35
|
+
// Tool definitions
|
|
36
|
+
const tools = [
|
|
37
|
+
{
|
|
38
|
+
name: 'detect_bullying',
|
|
39
|
+
description: 'Analyze text content to detect bullying, harassment, or harmful language. Returns severity, type of bullying, confidence score, and recommended actions.',
|
|
40
|
+
inputSchema: {
|
|
41
|
+
type: 'object',
|
|
42
|
+
properties: {
|
|
43
|
+
content: {
|
|
44
|
+
type: 'string',
|
|
45
|
+
description: 'The text content to analyze for bullying',
|
|
46
|
+
},
|
|
47
|
+
context: {
|
|
48
|
+
type: 'object',
|
|
49
|
+
description: 'Optional context for better analysis',
|
|
50
|
+
properties: {
|
|
51
|
+
language: { type: 'string' },
|
|
52
|
+
ageGroup: { type: 'string' },
|
|
53
|
+
relationship: { type: 'string' },
|
|
54
|
+
platform: { type: 'string' },
|
|
55
|
+
},
|
|
56
|
+
},
|
|
57
|
+
},
|
|
58
|
+
required: ['content'],
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
name: 'detect_grooming',
|
|
63
|
+
description: 'Analyze a conversation for grooming patterns and predatory behavior. Identifies manipulation tactics, boundary violations, and isolation attempts.',
|
|
64
|
+
inputSchema: {
|
|
65
|
+
type: 'object',
|
|
66
|
+
properties: {
|
|
67
|
+
messages: {
|
|
68
|
+
type: 'array',
|
|
69
|
+
description: 'Array of messages in the conversation',
|
|
70
|
+
items: {
|
|
71
|
+
type: 'object',
|
|
72
|
+
properties: {
|
|
73
|
+
role: {
|
|
74
|
+
type: 'string',
|
|
75
|
+
enum: ['adult', 'child', 'unknown'],
|
|
76
|
+
description: 'Role of the message sender',
|
|
77
|
+
},
|
|
78
|
+
content: {
|
|
79
|
+
type: 'string',
|
|
80
|
+
description: 'Message content',
|
|
81
|
+
},
|
|
82
|
+
},
|
|
83
|
+
required: ['role', 'content'],
|
|
84
|
+
},
|
|
85
|
+
},
|
|
86
|
+
childAge: {
|
|
87
|
+
type: 'number',
|
|
88
|
+
description: 'Age of the child in the conversation',
|
|
89
|
+
},
|
|
90
|
+
},
|
|
91
|
+
required: ['messages'],
|
|
92
|
+
},
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
name: 'detect_unsafe',
|
|
96
|
+
description: 'Detect unsafe content including self-harm, violence, drugs, explicit material, or other harmful content categories.',
|
|
97
|
+
inputSchema: {
|
|
98
|
+
type: 'object',
|
|
99
|
+
properties: {
|
|
100
|
+
content: {
|
|
101
|
+
type: 'string',
|
|
102
|
+
description: 'The text content to analyze for unsafe content',
|
|
103
|
+
},
|
|
104
|
+
context: {
|
|
105
|
+
type: 'object',
|
|
106
|
+
description: 'Optional context for better analysis',
|
|
107
|
+
properties: {
|
|
108
|
+
language: { type: 'string' },
|
|
109
|
+
ageGroup: { type: 'string' },
|
|
110
|
+
platform: { type: 'string' },
|
|
111
|
+
},
|
|
112
|
+
},
|
|
113
|
+
},
|
|
114
|
+
required: ['content'],
|
|
115
|
+
},
|
|
116
|
+
},
|
|
117
|
+
{
|
|
118
|
+
name: 'analyze',
|
|
119
|
+
description: 'Quick comprehensive safety analysis that checks for both bullying and unsafe content. Best for general content screening.',
|
|
120
|
+
inputSchema: {
|
|
121
|
+
type: 'object',
|
|
122
|
+
properties: {
|
|
123
|
+
content: {
|
|
124
|
+
type: 'string',
|
|
125
|
+
description: 'The text content to analyze',
|
|
126
|
+
},
|
|
127
|
+
include: {
|
|
128
|
+
type: 'array',
|
|
129
|
+
items: { type: 'string', enum: ['bullying', 'unsafe'] },
|
|
130
|
+
description: 'Which checks to run (default: both)',
|
|
131
|
+
},
|
|
132
|
+
},
|
|
133
|
+
required: ['content'],
|
|
134
|
+
},
|
|
135
|
+
},
|
|
136
|
+
{
|
|
137
|
+
name: 'analyze_emotions',
|
|
138
|
+
description: 'Analyze emotional content and mental state indicators. Identifies dominant emotions, trends, and provides follow-up recommendations.',
|
|
139
|
+
inputSchema: {
|
|
140
|
+
type: 'object',
|
|
141
|
+
properties: {
|
|
142
|
+
content: {
|
|
143
|
+
type: 'string',
|
|
144
|
+
description: 'The text content to analyze for emotions',
|
|
145
|
+
},
|
|
146
|
+
},
|
|
147
|
+
required: ['content'],
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
{
|
|
151
|
+
name: 'get_action_plan',
|
|
152
|
+
description: 'Generate age-appropriate guidance and action steps for handling a safety situation. Tailored for children, parents, or educators.',
|
|
153
|
+
inputSchema: {
|
|
154
|
+
type: 'object',
|
|
155
|
+
properties: {
|
|
156
|
+
situation: {
|
|
157
|
+
type: 'string',
|
|
158
|
+
description: 'Description of the situation needing guidance',
|
|
159
|
+
},
|
|
160
|
+
childAge: {
|
|
161
|
+
type: 'number',
|
|
162
|
+
description: 'Age of the child involved',
|
|
163
|
+
},
|
|
164
|
+
audience: {
|
|
165
|
+
type: 'string',
|
|
166
|
+
enum: ['child', 'parent', 'educator', 'platform'],
|
|
167
|
+
description: 'Who the guidance is for (default: parent)',
|
|
168
|
+
},
|
|
169
|
+
severity: {
|
|
170
|
+
type: 'string',
|
|
171
|
+
enum: ['low', 'medium', 'high', 'critical'],
|
|
172
|
+
description: 'Severity of the situation',
|
|
173
|
+
},
|
|
174
|
+
},
|
|
175
|
+
required: ['situation'],
|
|
176
|
+
},
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
name: 'generate_report',
|
|
180
|
+
description: 'Generate a comprehensive incident report from a conversation. Includes summary, risk level, and recommended next steps.',
|
|
181
|
+
inputSchema: {
|
|
182
|
+
type: 'object',
|
|
183
|
+
properties: {
|
|
184
|
+
messages: {
|
|
185
|
+
type: 'array',
|
|
186
|
+
description: 'Array of messages in the incident',
|
|
187
|
+
items: {
|
|
188
|
+
type: 'object',
|
|
189
|
+
properties: {
|
|
190
|
+
sender: { type: 'string', description: 'Name/ID of sender' },
|
|
191
|
+
content: { type: 'string', description: 'Message content' },
|
|
192
|
+
},
|
|
193
|
+
required: ['sender', 'content'],
|
|
194
|
+
},
|
|
195
|
+
},
|
|
196
|
+
childAge: {
|
|
197
|
+
type: 'number',
|
|
198
|
+
description: 'Age of the child involved',
|
|
199
|
+
},
|
|
200
|
+
incidentType: {
|
|
201
|
+
type: 'string',
|
|
202
|
+
description: 'Type of incident (e.g., bullying, grooming)',
|
|
203
|
+
},
|
|
204
|
+
},
|
|
205
|
+
required: ['messages'],
|
|
206
|
+
},
|
|
207
|
+
},
|
|
208
|
+
];
|
|
209
|
+
// Create MCP server
|
|
210
|
+
const server = new index_js_1.Server({
|
|
211
|
+
name: 'safenest-mcp',
|
|
212
|
+
version: '1.0.0',
|
|
213
|
+
}, {
|
|
214
|
+
capabilities: {
|
|
215
|
+
tools: {},
|
|
216
|
+
},
|
|
217
|
+
});
|
|
218
|
+
// List tools handler
|
|
219
|
+
server.setRequestHandler(types_js_1.ListToolsRequestSchema, async () => {
|
|
220
|
+
return { tools };
|
|
221
|
+
});
|
|
222
|
+
// Call tool handler
|
|
223
|
+
server.setRequestHandler(types_js_1.CallToolRequestSchema, async (request) => {
|
|
224
|
+
const { name, arguments: args = {} } = request.params;
|
|
225
|
+
try {
|
|
226
|
+
switch (name) {
|
|
227
|
+
case 'detect_bullying': {
|
|
228
|
+
const result = await client.detectBullying({
|
|
229
|
+
content: args.content,
|
|
230
|
+
context: args.context,
|
|
231
|
+
});
|
|
232
|
+
const emoji = severityEmoji[result.severity] || '⚪';
|
|
233
|
+
const response = `## ${result.is_bullying ? '⚠️ Bullying Detected' : '✅ No Bullying Detected'}
|
|
234
|
+
|
|
235
|
+
**Severity:** ${emoji} ${result.severity.charAt(0).toUpperCase() + result.severity.slice(1)}
|
|
236
|
+
**Confidence:** ${(result.confidence * 100).toFixed(0)}%
|
|
237
|
+
**Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
|
|
238
|
+
|
|
239
|
+
${result.is_bullying ? `**Types:** ${result.bullying_type.join(', ')}` : ''}
|
|
240
|
+
|
|
241
|
+
### Rationale
|
|
242
|
+
${result.rationale}
|
|
243
|
+
|
|
244
|
+
### Recommended Action
|
|
245
|
+
\`${result.recommended_action}\``;
|
|
246
|
+
return { content: [{ type: 'text', text: response }] };
|
|
247
|
+
}
|
|
248
|
+
case 'detect_grooming': {
|
|
249
|
+
const messages = args.messages.map((m) => ({
|
|
250
|
+
role: m.role,
|
|
251
|
+
content: m.content,
|
|
252
|
+
}));
|
|
253
|
+
const result = await client.detectGrooming({
|
|
254
|
+
messages,
|
|
255
|
+
childAge: args.childAge,
|
|
256
|
+
});
|
|
257
|
+
const emoji = riskEmoji[result.grooming_risk] || '⚪';
|
|
258
|
+
const response = `## ${result.grooming_risk === 'none' ? '✅ No Grooming Detected' : '⚠️ Grooming Risk Detected'}
|
|
259
|
+
|
|
260
|
+
**Risk Level:** ${emoji} ${result.grooming_risk.charAt(0).toUpperCase() + result.grooming_risk.slice(1)}
|
|
261
|
+
**Confidence:** ${(result.confidence * 100).toFixed(0)}%
|
|
262
|
+
**Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
|
|
263
|
+
|
|
264
|
+
${result.flags.length > 0 ? `**Warning Flags:**\n${result.flags.map(f => `- 🚩 ${f}`).join('\n')}` : ''}
|
|
265
|
+
|
|
266
|
+
### Rationale
|
|
267
|
+
${result.rationale}
|
|
268
|
+
|
|
269
|
+
### Recommended Action
|
|
270
|
+
\`${result.recommended_action}\``;
|
|
271
|
+
return { content: [{ type: 'text', text: response }] };
|
|
272
|
+
}
|
|
273
|
+
case 'detect_unsafe': {
|
|
274
|
+
const result = await client.detectUnsafe({
|
|
275
|
+
content: args.content,
|
|
276
|
+
context: args.context,
|
|
277
|
+
});
|
|
278
|
+
const emoji = severityEmoji[result.severity] || '⚪';
|
|
279
|
+
const response = `## ${result.unsafe ? '⚠️ Unsafe Content Detected' : '✅ Content is Safe'}
|
|
280
|
+
|
|
281
|
+
**Severity:** ${emoji} ${result.severity.charAt(0).toUpperCase() + result.severity.slice(1)}
|
|
282
|
+
**Confidence:** ${(result.confidence * 100).toFixed(0)}%
|
|
283
|
+
**Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
|
|
284
|
+
|
|
285
|
+
${result.unsafe ? `**Categories:**\n${result.categories.map(c => `- ⚠️ ${c}`).join('\n')}` : ''}
|
|
286
|
+
|
|
287
|
+
### Rationale
|
|
288
|
+
${result.rationale}
|
|
289
|
+
|
|
290
|
+
### Recommended Action
|
|
291
|
+
\`${result.recommended_action}\``;
|
|
292
|
+
return { content: [{ type: 'text', text: response }] };
|
|
293
|
+
}
|
|
294
|
+
case 'analyze': {
|
|
295
|
+
const result = await client.analyze({
|
|
296
|
+
content: args.content,
|
|
297
|
+
include: args.include,
|
|
298
|
+
});
|
|
299
|
+
const emoji = riskEmoji[result.risk_level] || '⚪';
|
|
300
|
+
const response = `## Safety Analysis Results
|
|
301
|
+
|
|
302
|
+
**Overall Risk:** ${emoji} ${result.risk_level.charAt(0).toUpperCase() + result.risk_level.slice(1)}
|
|
303
|
+
**Risk Score:** ${(result.risk_score * 100).toFixed(0)}%
|
|
304
|
+
|
|
305
|
+
### Summary
|
|
306
|
+
${result.summary}
|
|
307
|
+
|
|
308
|
+
### Recommended Action
|
|
309
|
+
\`${result.recommended_action}\`
|
|
310
|
+
|
|
311
|
+
---
|
|
312
|
+
${result.bullying ? `
|
|
313
|
+
**Bullying Check:** ${result.bullying.is_bullying ? '⚠️ Detected' : '✅ Clear'}
|
|
314
|
+
` : ''}${result.unsafe ? `
|
|
315
|
+
**Unsafe Content:** ${result.unsafe.unsafe ? '⚠️ Detected' : '✅ Clear'}
|
|
316
|
+
` : ''}`;
|
|
317
|
+
return { content: [{ type: 'text', text: response }] };
|
|
318
|
+
}
|
|
319
|
+
case 'analyze_emotions': {
|
|
320
|
+
const result = await client.analyzeEmotions({
|
|
321
|
+
content: args.content,
|
|
322
|
+
});
|
|
323
|
+
const emoji = trendEmoji[result.trend] || '➡️';
|
|
324
|
+
// Format emotion scores
|
|
325
|
+
const emotionScoresList = Object.entries(result.emotion_scores)
|
|
326
|
+
.sort((a, b) => b[1] - a[1])
|
|
327
|
+
.map(([emotion, score]) => `- ${emotion}: ${(score * 100).toFixed(0)}%`)
|
|
328
|
+
.join('\n');
|
|
329
|
+
const response = `## Emotion Analysis
|
|
330
|
+
|
|
331
|
+
**Dominant Emotions:** ${result.dominant_emotions.join(', ')}
|
|
332
|
+
**Trend:** ${emoji} ${result.trend.charAt(0).toUpperCase() + result.trend.slice(1)}
|
|
333
|
+
|
|
334
|
+
### Emotion Scores
|
|
335
|
+
${emotionScoresList}
|
|
336
|
+
|
|
337
|
+
### Summary
|
|
338
|
+
${result.summary}
|
|
339
|
+
|
|
340
|
+
### Recommended Follow-up
|
|
341
|
+
${result.recommended_followup}`;
|
|
342
|
+
return { content: [{ type: 'text', text: response }] };
|
|
343
|
+
}
|
|
344
|
+
case 'get_action_plan': {
|
|
345
|
+
const result = await client.getActionPlan({
|
|
346
|
+
situation: args.situation,
|
|
347
|
+
childAge: args.childAge,
|
|
348
|
+
audience: args.audience,
|
|
349
|
+
severity: args.severity,
|
|
350
|
+
});
|
|
351
|
+
const response = `## Action Plan
|
|
352
|
+
|
|
353
|
+
**Audience:** ${result.audience}
|
|
354
|
+
**Tone:** ${result.tone}
|
|
355
|
+
${result.reading_level ? `**Reading Level:** ${result.reading_level}` : ''}
|
|
356
|
+
|
|
357
|
+
### Steps
|
|
358
|
+
${result.steps.map((step, i) => `${i + 1}. ${step}`).join('\n')}`;
|
|
359
|
+
return { content: [{ type: 'text', text: response }] };
|
|
360
|
+
}
|
|
361
|
+
case 'generate_report': {
|
|
362
|
+
const messages = args.messages.map((m) => ({
|
|
363
|
+
sender: m.sender,
|
|
364
|
+
content: m.content,
|
|
365
|
+
}));
|
|
366
|
+
const result = await client.generateReport({
|
|
367
|
+
messages,
|
|
368
|
+
childAge: args.childAge,
|
|
369
|
+
incident: args.incidentType ? { type: args.incidentType } : undefined,
|
|
370
|
+
});
|
|
371
|
+
const emoji = riskEmoji[result.risk_level] || '⚪';
|
|
372
|
+
const response = `## 📋 Incident Report
|
|
373
|
+
|
|
374
|
+
**Risk Level:** ${emoji} ${result.risk_level.charAt(0).toUpperCase() + result.risk_level.slice(1)}
|
|
375
|
+
|
|
376
|
+
### Summary
|
|
377
|
+
${result.summary}
|
|
378
|
+
|
|
379
|
+
### Categories
|
|
380
|
+
${result.categories.map(c => `- ${c}`).join('\n')}
|
|
381
|
+
|
|
382
|
+
### Recommended Next Steps
|
|
383
|
+
${result.recommended_next_steps.map((step, i) => `${i + 1}. ${step}`).join('\n')}`;
|
|
384
|
+
return { content: [{ type: 'text', text: response }] };
|
|
385
|
+
}
|
|
386
|
+
default:
|
|
387
|
+
return {
|
|
388
|
+
content: [{ type: 'text', text: `Unknown tool: ${name}` }],
|
|
389
|
+
isError: true,
|
|
390
|
+
};
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
catch (error) {
|
|
394
|
+
const message = error instanceof Error ? error.message : 'Unknown error';
|
|
395
|
+
return {
|
|
396
|
+
content: [{ type: 'text', text: `Error: ${message}` }],
|
|
397
|
+
isError: true,
|
|
398
|
+
};
|
|
399
|
+
}
|
|
400
|
+
});
|
|
401
|
+
// Start server
|
|
402
|
+
async function main() {
|
|
403
|
+
const transport = new stdio_js_1.StdioServerTransport();
|
|
404
|
+
await server.connect(transport);
|
|
405
|
+
console.error('SafeNest MCP server running on stdio');
|
|
406
|
+
}
|
|
407
|
+
main().catch((error) => {
|
|
408
|
+
console.error('Fatal error:', error);
|
|
409
|
+
process.exit(1);
|
|
410
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@safenest/mcp",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "MCP server for SafeNest - AI-powered child safety analysis tools for Claude and other AI assistants",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"types": "dist/index.d.ts",
|
|
7
|
+
"bin": {
|
|
8
|
+
"safenest-mcp": "./dist/index.js"
|
|
9
|
+
},
|
|
10
|
+
"files": [
|
|
11
|
+
"dist"
|
|
12
|
+
],
|
|
13
|
+
"scripts": {
|
|
14
|
+
"build": "tsc",
|
|
15
|
+
"dev": "tsc --watch",
|
|
16
|
+
"start": "node dist/index.js",
|
|
17
|
+
"prepublishOnly": "npm run build"
|
|
18
|
+
},
|
|
19
|
+
"keywords": [
|
|
20
|
+
"safenest",
|
|
21
|
+
"mcp",
|
|
22
|
+
"model-context-protocol",
|
|
23
|
+
"claude",
|
|
24
|
+
"ai",
|
|
25
|
+
"child-safety",
|
|
26
|
+
"content-moderation",
|
|
27
|
+
"bullying-detection"
|
|
28
|
+
],
|
|
29
|
+
"author": "SafeNest <sales@safenest.dev>",
|
|
30
|
+
"license": "MIT",
|
|
31
|
+
"repository": {
|
|
32
|
+
"type": "git",
|
|
33
|
+
"url": "https://github.com/SafeNestSDK/mcp.git"
|
|
34
|
+
},
|
|
35
|
+
"homepage": "https://safenest.dev",
|
|
36
|
+
"bugs": {
|
|
37
|
+
"url": "https://github.com/SafeNestSDK/mcp/issues"
|
|
38
|
+
},
|
|
39
|
+
"dependencies": {
|
|
40
|
+
"@modelcontextprotocol/sdk": "^1.0.0",
|
|
41
|
+
"@safenest/sdk": "^1.0.0"
|
|
42
|
+
},
|
|
43
|
+
"devDependencies": {
|
|
44
|
+
"@types/node": "^20.0.0",
|
|
45
|
+
"typescript": "^5.3.0"
|
|
46
|
+
},
|
|
47
|
+
"engines": {
|
|
48
|
+
"node": ">=18.0.0"
|
|
49
|
+
}
|
|
50
|
+
}
|