@vettly/mcp 0.1.7 → 0.1.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -10
- package/dist/bin/vettly-mcp.cjs +212 -0
- package/dist/bin/vettly-mcp.js +1 -1
- package/dist/{chunk-VZJIPBOX.js → chunk-L5ZHIGXM.js} +212 -0
- package/dist/index.cjs +223 -0
- package/dist/index.js +11 -1
- package/package.json +4 -3
- package/smithery.yaml +21 -0
- package/dist/bin/vettly-mcp.d.cts +0 -1
- package/dist/bin/vettly-mcp.d.ts +0 -1
- package/dist/index.d.cts +0 -9
- package/dist/index.d.ts +0 -9
package/README.md
CHANGED
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
# @vettly/mcp
|
|
2
2
|
|
|
3
|
-
MCP server for
|
|
3
|
+
MCP server for UGC moderation. Enables AI assistants to make policy-governed, auditable content decisions.
|
|
4
4
|
|
|
5
|
-
##
|
|
5
|
+
## UGC Moderation Essentials
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
Apps with user-generated content need four things to stay compliant and keep users safe. The MCP server lets AI assistants participate in all four:
|
|
8
8
|
|
|
9
|
-
| Requirement |
|
|
10
|
-
|
|
11
|
-
| **Content filtering** |
|
|
12
|
-
| **User reporting** |
|
|
13
|
-
| **User blocking** |
|
|
14
|
-
| **Audit trail** |
|
|
9
|
+
| Requirement | MCP Integration |
|
|
10
|
+
|-------------|-----------------|
|
|
11
|
+
| **Content filtering** | `moderate_content` tool |
|
|
12
|
+
| **User reporting** | `get_recent_decisions` tool (review flagged content) |
|
|
13
|
+
| **User blocking** | Pair with server-side SDK (`POST /v1/blocks`) |
|
|
14
|
+
| **Audit trail** | Every tool response includes `decisionId` |
|
|
15
15
|
|
|
16
16
|
```
|
|
17
17
|
Prompt: "Check if this user comment is safe for our app: 'Great product!'
|
|
@@ -53,7 +53,7 @@ Add to your `claude_desktop_config.json`:
|
|
|
53
53
|
"command": "npx",
|
|
54
54
|
"args": ["-y", "@vettly/mcp"],
|
|
55
55
|
"env": {
|
|
56
|
-
"VETTLY_API_KEY": "
|
|
56
|
+
"VETTLY_API_KEY": "vettly_live_xxx"
|
|
57
57
|
}
|
|
58
58
|
}
|
|
59
59
|
}
|
package/dist/bin/vettly-mcp.cjs
CHANGED
|
@@ -24,6 +24,13 @@ function registerModerateTool(server, client) {
|
|
|
24
24
|
policyId: ModerateInputSchema.shape.policyId,
|
|
25
25
|
contentType: ModerateInputSchema.shape.contentType
|
|
26
26
|
},
|
|
27
|
+
{
|
|
28
|
+
title: "Moderate Content",
|
|
29
|
+
readOnlyHint: true,
|
|
30
|
+
destructiveHint: false,
|
|
31
|
+
idempotentHint: true,
|
|
32
|
+
openWorldHint: true
|
|
33
|
+
},
|
|
27
34
|
async (args) => {
|
|
28
35
|
const input = ModerateInputSchema.parse(args);
|
|
29
36
|
try {
|
|
@@ -80,6 +87,13 @@ function registerValidateTool(server, client) {
|
|
|
80
87
|
{
|
|
81
88
|
yamlContent: ValidateInputSchema.shape.yamlContent
|
|
82
89
|
},
|
|
90
|
+
{
|
|
91
|
+
title: "Validate Policy",
|
|
92
|
+
readOnlyHint: true,
|
|
93
|
+
destructiveHint: false,
|
|
94
|
+
idempotentHint: true,
|
|
95
|
+
openWorldHint: true
|
|
96
|
+
},
|
|
83
97
|
async (args) => {
|
|
84
98
|
const input = ValidateInputSchema.parse(args);
|
|
85
99
|
try {
|
|
@@ -113,6 +127,13 @@ function registerListPoliciesTool(server, client) {
|
|
|
113
127
|
"list_policies",
|
|
114
128
|
"List all moderation policies available in your Vettly account.",
|
|
115
129
|
{},
|
|
130
|
+
{
|
|
131
|
+
title: "List Policies",
|
|
132
|
+
readOnlyHint: true,
|
|
133
|
+
destructiveHint: false,
|
|
134
|
+
idempotentHint: true,
|
|
135
|
+
openWorldHint: true
|
|
136
|
+
},
|
|
116
137
|
async () => {
|
|
117
138
|
try {
|
|
118
139
|
const result = await client.listPolicies();
|
|
@@ -151,6 +172,13 @@ function registerUsageStatsTool(server, client) {
|
|
|
151
172
|
{
|
|
152
173
|
days: UsageStatsInputSchema.shape.days
|
|
153
174
|
},
|
|
175
|
+
{
|
|
176
|
+
title: "Get Usage Stats",
|
|
177
|
+
readOnlyHint: true,
|
|
178
|
+
destructiveHint: false,
|
|
179
|
+
idempotentHint: true,
|
|
180
|
+
openWorldHint: true
|
|
181
|
+
},
|
|
154
182
|
async (args) => {
|
|
155
183
|
const input = UsageStatsInputSchema.parse(args);
|
|
156
184
|
try {
|
|
@@ -196,6 +224,13 @@ function registerDecisionsTool(server, client) {
|
|
|
196
224
|
policyId: DecisionsInputSchema.shape.policyId,
|
|
197
225
|
contentType: DecisionsInputSchema.shape.contentType
|
|
198
226
|
},
|
|
227
|
+
{
|
|
228
|
+
title: "Get Recent Decisions",
|
|
229
|
+
readOnlyHint: true,
|
|
230
|
+
destructiveHint: false,
|
|
231
|
+
idempotentHint: true,
|
|
232
|
+
openWorldHint: true
|
|
233
|
+
},
|
|
199
234
|
async (args) => {
|
|
200
235
|
const input = DecisionsInputSchema.parse(args);
|
|
201
236
|
try {
|
|
@@ -316,6 +351,182 @@ function registerResources(server, client) {
|
|
|
316
351
|
registerPolicyResources(server, client);
|
|
317
352
|
}
|
|
318
353
|
|
|
354
|
+
// src/prompts/moderate-content-guide.ts
|
|
355
|
+
function registerModerateContentGuide(server) {
|
|
356
|
+
server.prompt(
|
|
357
|
+
"moderate-content-guide",
|
|
358
|
+
"Step-by-step guide to moderating content with Vettly. Explains the workflow: list policies, moderate content, and interpret results.",
|
|
359
|
+
async () => ({
|
|
360
|
+
messages: [
|
|
361
|
+
{
|
|
362
|
+
role: "user",
|
|
363
|
+
content: {
|
|
364
|
+
type: "text",
|
|
365
|
+
text: `You are helping a user moderate content with Vettly. Follow this workflow:
|
|
366
|
+
|
|
367
|
+
## Step 1: List available policies
|
|
368
|
+
Use the \`list_policies\` tool to see what moderation policies are configured. Each policy has an ID and defines rules for what content is allowed or flagged.
|
|
369
|
+
|
|
370
|
+
## Step 2: Moderate content
|
|
371
|
+
Use the \`moderate_content\` tool with:
|
|
372
|
+
- \`content\`: The text, image URL, or video URL to check
|
|
373
|
+
- \`policyId\`: The policy ID from step 1
|
|
374
|
+
- \`contentType\`: "text", "image", or "video"
|
|
375
|
+
|
|
376
|
+
## Step 3: Interpret results
|
|
377
|
+
The response includes:
|
|
378
|
+
- \`safe\`: Boolean \u2014 whether the content passed moderation
|
|
379
|
+
- \`flagged\`: Boolean \u2014 whether the content was flagged for review
|
|
380
|
+
- \`action\`: The recommended action ("allow", "flag", or "block")
|
|
381
|
+
- \`categories\`: Breakdown of category scores (e.g. hate speech, violence, sexual content)
|
|
382
|
+
- \`decisionId\`: Unique ID for this decision, useful for auditing
|
|
383
|
+
|
|
384
|
+
## Tips
|
|
385
|
+
- Always list policies first so you know which policy ID to use.
|
|
386
|
+
- For images and video, pass the URL as the \`content\` parameter.
|
|
387
|
+
- Use \`get_recent_decisions\` to review past moderation outcomes.
|
|
388
|
+
- Use \`get_usage_stats\` to check your account's usage and remaining quota.`
|
|
389
|
+
}
|
|
390
|
+
}
|
|
391
|
+
]
|
|
392
|
+
})
|
|
393
|
+
);
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
// src/prompts/write-policy.ts
|
|
397
|
+
var import_zod5 = require("zod");
|
|
398
|
+
function registerWritePolicy(server) {
|
|
399
|
+
server.prompt(
|
|
400
|
+
"write-policy",
|
|
401
|
+
"Guide for writing a Vettly moderation policy in YAML. Optionally tailored to a specific platform type.",
|
|
402
|
+
{ platform: import_zod5.z.string().optional().describe('The type of platform (e.g. "kids app", "marketplace", "social media", "dating app")') },
|
|
403
|
+
async (args) => {
|
|
404
|
+
const platformContext = args.platform ? `
|
|
405
|
+
|
|
406
|
+
The user is building a **${args.platform}**. Tailor your policy suggestions to this platform type \u2014 consider what content categories matter most and what thresholds are appropriate.` : "";
|
|
407
|
+
return {
|
|
408
|
+
messages: [
|
|
409
|
+
{
|
|
410
|
+
role: "user",
|
|
411
|
+
content: {
|
|
412
|
+
type: "text",
|
|
413
|
+
text: `You are helping a user write a Vettly moderation policy in YAML.${platformContext}
|
|
414
|
+
|
|
415
|
+
## Policy YAML structure
|
|
416
|
+
|
|
417
|
+
A Vettly policy is a YAML document that defines moderation rules in plain English. Here's the structure:
|
|
418
|
+
|
|
419
|
+
\`\`\`yaml
|
|
420
|
+
name: My Policy
|
|
421
|
+
description: Brief description of what this policy covers
|
|
422
|
+
|
|
423
|
+
rules:
|
|
424
|
+
- category: hate_speech
|
|
425
|
+
threshold: 0.7
|
|
426
|
+
action: block
|
|
427
|
+
description: Block hateful content targeting protected groups
|
|
428
|
+
|
|
429
|
+
- category: violence
|
|
430
|
+
threshold: 0.8
|
|
431
|
+
action: flag
|
|
432
|
+
description: Flag violent content for human review
|
|
433
|
+
|
|
434
|
+
- category: sexual_content
|
|
435
|
+
threshold: 0.5
|
|
436
|
+
action: block
|
|
437
|
+
description: Block explicit sexual content
|
|
438
|
+
|
|
439
|
+
- category: self_harm
|
|
440
|
+
threshold: 0.6
|
|
441
|
+
action: block
|
|
442
|
+
description: Block content promoting self-harm
|
|
443
|
+
\`\`\`
|
|
444
|
+
|
|
445
|
+
## Available categories
|
|
446
|
+
Common categories: \`hate_speech\`, \`violence\`, \`sexual_content\`, \`self_harm\`, \`harassment\`, \`dangerous_content\`, \`spam\`
|
|
447
|
+
|
|
448
|
+
## Actions
|
|
449
|
+
- \`allow\` \u2014 let the content through
|
|
450
|
+
- \`flag\` \u2014 mark for human review
|
|
451
|
+
- \`block\` \u2014 reject the content
|
|
452
|
+
|
|
453
|
+
## Thresholds
|
|
454
|
+
- 0.0\u20130.3: Very sensitive (catches more, higher false positive rate)
|
|
455
|
+
- 0.4\u20130.6: Moderate sensitivity
|
|
456
|
+
- 0.7\u20131.0: High confidence only (fewer false positives)
|
|
457
|
+
|
|
458
|
+
## Workflow
|
|
459
|
+
1. Draft the YAML policy based on the user's requirements
|
|
460
|
+
2. Use the \`validate_policy\` tool to check for errors
|
|
461
|
+
3. Iterate until validation passes
|
|
462
|
+
4. The user can then save the policy via the Vettly dashboard`
|
|
463
|
+
}
|
|
464
|
+
}
|
|
465
|
+
]
|
|
466
|
+
};
|
|
467
|
+
}
|
|
468
|
+
);
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
// src/prompts/review-flagged-content.ts
|
|
472
|
+
var import_zod6 = require("zod");
|
|
473
|
+
function registerReviewFlaggedContent(server) {
|
|
474
|
+
server.prompt(
|
|
475
|
+
"review-flagged-content",
|
|
476
|
+
"Template for reviewing flagged moderation decisions. Optionally filtered to a specific policy.",
|
|
477
|
+
{ policyId: import_zod6.z.string().optional().describe("Filter flagged decisions to a specific policy ID") },
|
|
478
|
+
async (args) => {
|
|
479
|
+
const policyFilter = args.policyId ? `
|
|
480
|
+
|
|
481
|
+
Filter decisions to policy ID: \`${args.policyId}\`` : "";
|
|
482
|
+
return {
|
|
483
|
+
messages: [
|
|
484
|
+
{
|
|
485
|
+
role: "user",
|
|
486
|
+
content: {
|
|
487
|
+
type: "text",
|
|
488
|
+
text: `You are helping a user review flagged moderation decisions in Vettly.${policyFilter}
|
|
489
|
+
|
|
490
|
+
## Review workflow
|
|
491
|
+
|
|
492
|
+
### Step 1: Fetch flagged decisions
|
|
493
|
+
Use the \`get_recent_decisions\` tool with \`flagged: true\` to get recent flagged content.${args.policyId ? ` Also set \`policyId: "${args.policyId}"\` to filter by that policy.` : " Optionally filter by `policyId` or `contentType`."}
|
|
494
|
+
|
|
495
|
+
### Step 2: Analyze each decision
|
|
496
|
+
For each flagged decision, examine:
|
|
497
|
+
- **Content**: What was submitted
|
|
498
|
+
- **Categories**: Which categories triggered and their scores
|
|
499
|
+
- **Action**: What action was taken (flag vs block)
|
|
500
|
+
- **Policy**: Which policy was applied
|
|
501
|
+
|
|
502
|
+
### Step 3: Provide recommendations
|
|
503
|
+
For each flagged item, suggest one of:
|
|
504
|
+
- **Approve**: The content is acceptable, the flag was a false positive
|
|
505
|
+
- **Confirm block**: The content correctly violates the policy
|
|
506
|
+
- **Adjust policy**: The threshold may need tuning (too sensitive or not enough)
|
|
507
|
+
|
|
508
|
+
### Step 4: Check for patterns
|
|
509
|
+
Use \`get_usage_stats\` to look at overall moderation trends. If many items are being flagged, the policy thresholds may need adjustment.
|
|
510
|
+
|
|
511
|
+
## Tips
|
|
512
|
+
- High category scores (>0.9) are almost always correct flags
|
|
513
|
+
- Scores in the 0.5-0.7 range are where most false positives occur
|
|
514
|
+
- If a category consistently flags incorrectly, raise its threshold in the policy`
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
]
|
|
518
|
+
};
|
|
519
|
+
}
|
|
520
|
+
);
|
|
521
|
+
}
|
|
522
|
+
|
|
523
|
+
// src/prompts/index.ts
|
|
524
|
+
function registerPrompts(server) {
|
|
525
|
+
registerModerateContentGuide(server);
|
|
526
|
+
registerWritePolicy(server);
|
|
527
|
+
registerReviewFlaggedContent(server);
|
|
528
|
+
}
|
|
529
|
+
|
|
319
530
|
// src/server.ts
|
|
320
531
|
function createVettlyMcpServer(config) {
|
|
321
532
|
const client = new import_sdk.ModerationClient({
|
|
@@ -328,6 +539,7 @@ function createVettlyMcpServer(config) {
|
|
|
328
539
|
});
|
|
329
540
|
registerTools(server, client);
|
|
330
541
|
registerResources(server, client);
|
|
542
|
+
registerPrompts(server);
|
|
331
543
|
return server;
|
|
332
544
|
}
|
|
333
545
|
|
package/dist/bin/vettly-mcp.js
CHANGED
|
@@ -18,6 +18,13 @@ function registerModerateTool(server, client) {
|
|
|
18
18
|
policyId: ModerateInputSchema.shape.policyId,
|
|
19
19
|
contentType: ModerateInputSchema.shape.contentType
|
|
20
20
|
},
|
|
21
|
+
{
|
|
22
|
+
title: "Moderate Content",
|
|
23
|
+
readOnlyHint: true,
|
|
24
|
+
destructiveHint: false,
|
|
25
|
+
idempotentHint: true,
|
|
26
|
+
openWorldHint: true
|
|
27
|
+
},
|
|
21
28
|
async (args) => {
|
|
22
29
|
const input = ModerateInputSchema.parse(args);
|
|
23
30
|
try {
|
|
@@ -74,6 +81,13 @@ function registerValidateTool(server, client) {
|
|
|
74
81
|
{
|
|
75
82
|
yamlContent: ValidateInputSchema.shape.yamlContent
|
|
76
83
|
},
|
|
84
|
+
{
|
|
85
|
+
title: "Validate Policy",
|
|
86
|
+
readOnlyHint: true,
|
|
87
|
+
destructiveHint: false,
|
|
88
|
+
idempotentHint: true,
|
|
89
|
+
openWorldHint: true
|
|
90
|
+
},
|
|
77
91
|
async (args) => {
|
|
78
92
|
const input = ValidateInputSchema.parse(args);
|
|
79
93
|
try {
|
|
@@ -107,6 +121,13 @@ function registerListPoliciesTool(server, client) {
|
|
|
107
121
|
"list_policies",
|
|
108
122
|
"List all moderation policies available in your Vettly account.",
|
|
109
123
|
{},
|
|
124
|
+
{
|
|
125
|
+
title: "List Policies",
|
|
126
|
+
readOnlyHint: true,
|
|
127
|
+
destructiveHint: false,
|
|
128
|
+
idempotentHint: true,
|
|
129
|
+
openWorldHint: true
|
|
130
|
+
},
|
|
110
131
|
async () => {
|
|
111
132
|
try {
|
|
112
133
|
const result = await client.listPolicies();
|
|
@@ -145,6 +166,13 @@ function registerUsageStatsTool(server, client) {
|
|
|
145
166
|
{
|
|
146
167
|
days: UsageStatsInputSchema.shape.days
|
|
147
168
|
},
|
|
169
|
+
{
|
|
170
|
+
title: "Get Usage Stats",
|
|
171
|
+
readOnlyHint: true,
|
|
172
|
+
destructiveHint: false,
|
|
173
|
+
idempotentHint: true,
|
|
174
|
+
openWorldHint: true
|
|
175
|
+
},
|
|
148
176
|
async (args) => {
|
|
149
177
|
const input = UsageStatsInputSchema.parse(args);
|
|
150
178
|
try {
|
|
@@ -190,6 +218,13 @@ function registerDecisionsTool(server, client) {
|
|
|
190
218
|
policyId: DecisionsInputSchema.shape.policyId,
|
|
191
219
|
contentType: DecisionsInputSchema.shape.contentType
|
|
192
220
|
},
|
|
221
|
+
{
|
|
222
|
+
title: "Get Recent Decisions",
|
|
223
|
+
readOnlyHint: true,
|
|
224
|
+
destructiveHint: false,
|
|
225
|
+
idempotentHint: true,
|
|
226
|
+
openWorldHint: true
|
|
227
|
+
},
|
|
193
228
|
async (args) => {
|
|
194
229
|
const input = DecisionsInputSchema.parse(args);
|
|
195
230
|
try {
|
|
@@ -310,6 +345,182 @@ function registerResources(server, client) {
|
|
|
310
345
|
registerPolicyResources(server, client);
|
|
311
346
|
}
|
|
312
347
|
|
|
348
|
+
// src/prompts/moderate-content-guide.ts
|
|
349
|
+
function registerModerateContentGuide(server) {
|
|
350
|
+
server.prompt(
|
|
351
|
+
"moderate-content-guide",
|
|
352
|
+
"Step-by-step guide to moderating content with Vettly. Explains the workflow: list policies, moderate content, and interpret results.",
|
|
353
|
+
async () => ({
|
|
354
|
+
messages: [
|
|
355
|
+
{
|
|
356
|
+
role: "user",
|
|
357
|
+
content: {
|
|
358
|
+
type: "text",
|
|
359
|
+
text: `You are helping a user moderate content with Vettly. Follow this workflow:
|
|
360
|
+
|
|
361
|
+
## Step 1: List available policies
|
|
362
|
+
Use the \`list_policies\` tool to see what moderation policies are configured. Each policy has an ID and defines rules for what content is allowed or flagged.
|
|
363
|
+
|
|
364
|
+
## Step 2: Moderate content
|
|
365
|
+
Use the \`moderate_content\` tool with:
|
|
366
|
+
- \`content\`: The text, image URL, or video URL to check
|
|
367
|
+
- \`policyId\`: The policy ID from step 1
|
|
368
|
+
- \`contentType\`: "text", "image", or "video"
|
|
369
|
+
|
|
370
|
+
## Step 3: Interpret results
|
|
371
|
+
The response includes:
|
|
372
|
+
- \`safe\`: Boolean \u2014 whether the content passed moderation
|
|
373
|
+
- \`flagged\`: Boolean \u2014 whether the content was flagged for review
|
|
374
|
+
- \`action\`: The recommended action ("allow", "flag", or "block")
|
|
375
|
+
- \`categories\`: Breakdown of category scores (e.g. hate speech, violence, sexual content)
|
|
376
|
+
- \`decisionId\`: Unique ID for this decision, useful for auditing
|
|
377
|
+
|
|
378
|
+
## Tips
|
|
379
|
+
- Always list policies first so you know which policy ID to use.
|
|
380
|
+
- For images and video, pass the URL as the \`content\` parameter.
|
|
381
|
+
- Use \`get_recent_decisions\` to review past moderation outcomes.
|
|
382
|
+
- Use \`get_usage_stats\` to check your account's usage and remaining quota.`
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
]
|
|
386
|
+
})
|
|
387
|
+
);
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
// src/prompts/write-policy.ts
|
|
391
|
+
import { z as z5 } from "zod";
|
|
392
|
+
function registerWritePolicy(server) {
|
|
393
|
+
server.prompt(
|
|
394
|
+
"write-policy",
|
|
395
|
+
"Guide for writing a Vettly moderation policy in YAML. Optionally tailored to a specific platform type.",
|
|
396
|
+
{ platform: z5.string().optional().describe('The type of platform (e.g. "kids app", "marketplace", "social media", "dating app")') },
|
|
397
|
+
async (args) => {
|
|
398
|
+
const platformContext = args.platform ? `
|
|
399
|
+
|
|
400
|
+
The user is building a **${args.platform}**. Tailor your policy suggestions to this platform type \u2014 consider what content categories matter most and what thresholds are appropriate.` : "";
|
|
401
|
+
return {
|
|
402
|
+
messages: [
|
|
403
|
+
{
|
|
404
|
+
role: "user",
|
|
405
|
+
content: {
|
|
406
|
+
type: "text",
|
|
407
|
+
text: `You are helping a user write a Vettly moderation policy in YAML.${platformContext}
|
|
408
|
+
|
|
409
|
+
## Policy YAML structure
|
|
410
|
+
|
|
411
|
+
A Vettly policy is a YAML document that defines moderation rules in plain English. Here's the structure:
|
|
412
|
+
|
|
413
|
+
\`\`\`yaml
|
|
414
|
+
name: My Policy
|
|
415
|
+
description: Brief description of what this policy covers
|
|
416
|
+
|
|
417
|
+
rules:
|
|
418
|
+
- category: hate_speech
|
|
419
|
+
threshold: 0.7
|
|
420
|
+
action: block
|
|
421
|
+
description: Block hateful content targeting protected groups
|
|
422
|
+
|
|
423
|
+
- category: violence
|
|
424
|
+
threshold: 0.8
|
|
425
|
+
action: flag
|
|
426
|
+
description: Flag violent content for human review
|
|
427
|
+
|
|
428
|
+
- category: sexual_content
|
|
429
|
+
threshold: 0.5
|
|
430
|
+
action: block
|
|
431
|
+
description: Block explicit sexual content
|
|
432
|
+
|
|
433
|
+
- category: self_harm
|
|
434
|
+
threshold: 0.6
|
|
435
|
+
action: block
|
|
436
|
+
description: Block content promoting self-harm
|
|
437
|
+
\`\`\`
|
|
438
|
+
|
|
439
|
+
## Available categories
|
|
440
|
+
Common categories: \`hate_speech\`, \`violence\`, \`sexual_content\`, \`self_harm\`, \`harassment\`, \`dangerous_content\`, \`spam\`
|
|
441
|
+
|
|
442
|
+
## Actions
|
|
443
|
+
- \`allow\` \u2014 let the content through
|
|
444
|
+
- \`flag\` \u2014 mark for human review
|
|
445
|
+
- \`block\` \u2014 reject the content
|
|
446
|
+
|
|
447
|
+
## Thresholds
|
|
448
|
+
- 0.0\u20130.3: Very sensitive (catches more, higher false positive rate)
|
|
449
|
+
- 0.4\u20130.6: Moderate sensitivity
|
|
450
|
+
- 0.7\u20131.0: High confidence only (fewer false positives)
|
|
451
|
+
|
|
452
|
+
## Workflow
|
|
453
|
+
1. Draft the YAML policy based on the user's requirements
|
|
454
|
+
2. Use the \`validate_policy\` tool to check for errors
|
|
455
|
+
3. Iterate until validation passes
|
|
456
|
+
4. The user can then save the policy via the Vettly dashboard`
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
]
|
|
460
|
+
};
|
|
461
|
+
}
|
|
462
|
+
);
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
// src/prompts/review-flagged-content.ts
|
|
466
|
+
import { z as z6 } from "zod";
|
|
467
|
+
function registerReviewFlaggedContent(server) {
|
|
468
|
+
server.prompt(
|
|
469
|
+
"review-flagged-content",
|
|
470
|
+
"Template for reviewing flagged moderation decisions. Optionally filtered to a specific policy.",
|
|
471
|
+
{ policyId: z6.string().optional().describe("Filter flagged decisions to a specific policy ID") },
|
|
472
|
+
async (args) => {
|
|
473
|
+
const policyFilter = args.policyId ? `
|
|
474
|
+
|
|
475
|
+
Filter decisions to policy ID: \`${args.policyId}\`` : "";
|
|
476
|
+
return {
|
|
477
|
+
messages: [
|
|
478
|
+
{
|
|
479
|
+
role: "user",
|
|
480
|
+
content: {
|
|
481
|
+
type: "text",
|
|
482
|
+
text: `You are helping a user review flagged moderation decisions in Vettly.${policyFilter}
|
|
483
|
+
|
|
484
|
+
## Review workflow
|
|
485
|
+
|
|
486
|
+
### Step 1: Fetch flagged decisions
|
|
487
|
+
Use the \`get_recent_decisions\` tool with \`flagged: true\` to get recent flagged content.${args.policyId ? ` Also set \`policyId: "${args.policyId}"\` to filter by that policy.` : " Optionally filter by `policyId` or `contentType`."}
|
|
488
|
+
|
|
489
|
+
### Step 2: Analyze each decision
|
|
490
|
+
For each flagged decision, examine:
|
|
491
|
+
- **Content**: What was submitted
|
|
492
|
+
- **Categories**: Which categories triggered and their scores
|
|
493
|
+
- **Action**: What action was taken (flag vs block)
|
|
494
|
+
- **Policy**: Which policy was applied
|
|
495
|
+
|
|
496
|
+
### Step 3: Provide recommendations
|
|
497
|
+
For each flagged item, suggest one of:
|
|
498
|
+
- **Approve**: The content is acceptable, the flag was a false positive
|
|
499
|
+
- **Confirm block**: The content correctly violates the policy
|
|
500
|
+
- **Adjust policy**: The threshold may need tuning (too sensitive or not enough)
|
|
501
|
+
|
|
502
|
+
### Step 4: Check for patterns
|
|
503
|
+
Use \`get_usage_stats\` to look at overall moderation trends. If many items are being flagged, the policy thresholds may need adjustment.
|
|
504
|
+
|
|
505
|
+
## Tips
|
|
506
|
+
- High category scores (>0.9) are almost always correct flags
|
|
507
|
+
- Scores in the 0.5-0.7 range are where most false positives occur
|
|
508
|
+
- If a category consistently flags incorrectly, raise its threshold in the policy`
|
|
509
|
+
}
|
|
510
|
+
}
|
|
511
|
+
]
|
|
512
|
+
};
|
|
513
|
+
}
|
|
514
|
+
);
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
// src/prompts/index.ts
|
|
518
|
+
function registerPrompts(server) {
|
|
519
|
+
registerModerateContentGuide(server);
|
|
520
|
+
registerWritePolicy(server);
|
|
521
|
+
registerReviewFlaggedContent(server);
|
|
522
|
+
}
|
|
523
|
+
|
|
313
524
|
// src/server.ts
|
|
314
525
|
function createVettlyMcpServer(config) {
|
|
315
526
|
const client = new ModerationClient({
|
|
@@ -322,6 +533,7 @@ function createVettlyMcpServer(config) {
|
|
|
322
533
|
});
|
|
323
534
|
registerTools(server, client);
|
|
324
535
|
registerResources(server, client);
|
|
536
|
+
registerPrompts(server);
|
|
325
537
|
return server;
|
|
326
538
|
}
|
|
327
539
|
|
package/dist/index.cjs
CHANGED
|
@@ -20,9 +20,11 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
20
20
|
// src/index.ts
|
|
21
21
|
var index_exports = {};
|
|
22
22
|
__export(index_exports, {
|
|
23
|
+
configSchema: () => configSchema,
|
|
23
24
|
createVettlyMcpServer: () => createVettlyMcpServer
|
|
24
25
|
});
|
|
25
26
|
module.exports = __toCommonJS(index_exports);
|
|
27
|
+
var import_zod7 = require("zod");
|
|
26
28
|
|
|
27
29
|
// src/server.ts
|
|
28
30
|
var import_mcp2 = require("@modelcontextprotocol/sdk/server/mcp.js");
|
|
@@ -44,6 +46,13 @@ function registerModerateTool(server, client) {
|
|
|
44
46
|
policyId: ModerateInputSchema.shape.policyId,
|
|
45
47
|
contentType: ModerateInputSchema.shape.contentType
|
|
46
48
|
},
|
|
49
|
+
{
|
|
50
|
+
title: "Moderate Content",
|
|
51
|
+
readOnlyHint: true,
|
|
52
|
+
destructiveHint: false,
|
|
53
|
+
idempotentHint: true,
|
|
54
|
+
openWorldHint: true
|
|
55
|
+
},
|
|
47
56
|
async (args) => {
|
|
48
57
|
const input = ModerateInputSchema.parse(args);
|
|
49
58
|
try {
|
|
@@ -100,6 +109,13 @@ function registerValidateTool(server, client) {
|
|
|
100
109
|
{
|
|
101
110
|
yamlContent: ValidateInputSchema.shape.yamlContent
|
|
102
111
|
},
|
|
112
|
+
{
|
|
113
|
+
title: "Validate Policy",
|
|
114
|
+
readOnlyHint: true,
|
|
115
|
+
destructiveHint: false,
|
|
116
|
+
idempotentHint: true,
|
|
117
|
+
openWorldHint: true
|
|
118
|
+
},
|
|
103
119
|
async (args) => {
|
|
104
120
|
const input = ValidateInputSchema.parse(args);
|
|
105
121
|
try {
|
|
@@ -133,6 +149,13 @@ function registerListPoliciesTool(server, client) {
|
|
|
133
149
|
"list_policies",
|
|
134
150
|
"List all moderation policies available in your Vettly account.",
|
|
135
151
|
{},
|
|
152
|
+
{
|
|
153
|
+
title: "List Policies",
|
|
154
|
+
readOnlyHint: true,
|
|
155
|
+
destructiveHint: false,
|
|
156
|
+
idempotentHint: true,
|
|
157
|
+
openWorldHint: true
|
|
158
|
+
},
|
|
136
159
|
async () => {
|
|
137
160
|
try {
|
|
138
161
|
const result = await client.listPolicies();
|
|
@@ -171,6 +194,13 @@ function registerUsageStatsTool(server, client) {
|
|
|
171
194
|
{
|
|
172
195
|
days: UsageStatsInputSchema.shape.days
|
|
173
196
|
},
|
|
197
|
+
{
|
|
198
|
+
title: "Get Usage Stats",
|
|
199
|
+
readOnlyHint: true,
|
|
200
|
+
destructiveHint: false,
|
|
201
|
+
idempotentHint: true,
|
|
202
|
+
openWorldHint: true
|
|
203
|
+
},
|
|
174
204
|
async (args) => {
|
|
175
205
|
const input = UsageStatsInputSchema.parse(args);
|
|
176
206
|
try {
|
|
@@ -216,6 +246,13 @@ function registerDecisionsTool(server, client) {
|
|
|
216
246
|
policyId: DecisionsInputSchema.shape.policyId,
|
|
217
247
|
contentType: DecisionsInputSchema.shape.contentType
|
|
218
248
|
},
|
|
249
|
+
{
|
|
250
|
+
title: "Get Recent Decisions",
|
|
251
|
+
readOnlyHint: true,
|
|
252
|
+
destructiveHint: false,
|
|
253
|
+
idempotentHint: true,
|
|
254
|
+
openWorldHint: true
|
|
255
|
+
},
|
|
219
256
|
async (args) => {
|
|
220
257
|
const input = DecisionsInputSchema.parse(args);
|
|
221
258
|
try {
|
|
@@ -336,6 +373,182 @@ function registerResources(server, client) {
|
|
|
336
373
|
registerPolicyResources(server, client);
|
|
337
374
|
}
|
|
338
375
|
|
|
376
|
+
// src/prompts/moderate-content-guide.ts
|
|
377
|
+
function registerModerateContentGuide(server) {
|
|
378
|
+
server.prompt(
|
|
379
|
+
"moderate-content-guide",
|
|
380
|
+
"Step-by-step guide to moderating content with Vettly. Explains the workflow: list policies, moderate content, and interpret results.",
|
|
381
|
+
async () => ({
|
|
382
|
+
messages: [
|
|
383
|
+
{
|
|
384
|
+
role: "user",
|
|
385
|
+
content: {
|
|
386
|
+
type: "text",
|
|
387
|
+
text: `You are helping a user moderate content with Vettly. Follow this workflow:
|
|
388
|
+
|
|
389
|
+
## Step 1: List available policies
|
|
390
|
+
Use the \`list_policies\` tool to see what moderation policies are configured. Each policy has an ID and defines rules for what content is allowed or flagged.
|
|
391
|
+
|
|
392
|
+
## Step 2: Moderate content
|
|
393
|
+
Use the \`moderate_content\` tool with:
|
|
394
|
+
- \`content\`: The text, image URL, or video URL to check
|
|
395
|
+
- \`policyId\`: The policy ID from step 1
|
|
396
|
+
- \`contentType\`: "text", "image", or "video"
|
|
397
|
+
|
|
398
|
+
## Step 3: Interpret results
|
|
399
|
+
The response includes:
|
|
400
|
+
- \`safe\`: Boolean \u2014 whether the content passed moderation
|
|
401
|
+
- \`flagged\`: Boolean \u2014 whether the content was flagged for review
|
|
402
|
+
- \`action\`: The recommended action ("allow", "flag", or "block")
|
|
403
|
+
- \`categories\`: Breakdown of category scores (e.g. hate speech, violence, sexual content)
|
|
404
|
+
- \`decisionId\`: Unique ID for this decision, useful for auditing
|
|
405
|
+
|
|
406
|
+
## Tips
|
|
407
|
+
- Always list policies first so you know which policy ID to use.
|
|
408
|
+
- For images and video, pass the URL as the \`content\` parameter.
|
|
409
|
+
- Use \`get_recent_decisions\` to review past moderation outcomes.
|
|
410
|
+
- Use \`get_usage_stats\` to check your account's usage and remaining quota.`
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
]
|
|
414
|
+
})
|
|
415
|
+
);
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
// src/prompts/write-policy.ts
|
|
419
|
+
var import_zod5 = require("zod");
|
|
420
|
+
function registerWritePolicy(server) {
|
|
421
|
+
server.prompt(
|
|
422
|
+
"write-policy",
|
|
423
|
+
"Guide for writing a Vettly moderation policy in YAML. Optionally tailored to a specific platform type.",
|
|
424
|
+
{ platform: import_zod5.z.string().optional().describe('The type of platform (e.g. "kids app", "marketplace", "social media", "dating app")') },
|
|
425
|
+
async (args) => {
|
|
426
|
+
const platformContext = args.platform ? `
|
|
427
|
+
|
|
428
|
+
The user is building a **${args.platform}**. Tailor your policy suggestions to this platform type \u2014 consider what content categories matter most and what thresholds are appropriate.` : "";
|
|
429
|
+
return {
|
|
430
|
+
messages: [
|
|
431
|
+
{
|
|
432
|
+
role: "user",
|
|
433
|
+
content: {
|
|
434
|
+
type: "text",
|
|
435
|
+
text: `You are helping a user write a Vettly moderation policy in YAML.${platformContext}
|
|
436
|
+
|
|
437
|
+
## Policy YAML structure
|
|
438
|
+
|
|
439
|
+
A Vettly policy is a YAML document that defines moderation rules in plain English. Here's the structure:
|
|
440
|
+
|
|
441
|
+
\`\`\`yaml
|
|
442
|
+
name: My Policy
|
|
443
|
+
description: Brief description of what this policy covers
|
|
444
|
+
|
|
445
|
+
rules:
|
|
446
|
+
- category: hate_speech
|
|
447
|
+
threshold: 0.7
|
|
448
|
+
action: block
|
|
449
|
+
description: Block hateful content targeting protected groups
|
|
450
|
+
|
|
451
|
+
- category: violence
|
|
452
|
+
threshold: 0.8
|
|
453
|
+
action: flag
|
|
454
|
+
description: Flag violent content for human review
|
|
455
|
+
|
|
456
|
+
- category: sexual_content
|
|
457
|
+
threshold: 0.5
|
|
458
|
+
action: block
|
|
459
|
+
description: Block explicit sexual content
|
|
460
|
+
|
|
461
|
+
- category: self_harm
|
|
462
|
+
threshold: 0.6
|
|
463
|
+
action: block
|
|
464
|
+
description: Block content promoting self-harm
|
|
465
|
+
\`\`\`
|
|
466
|
+
|
|
467
|
+
## Available categories
|
|
468
|
+
Common categories: \`hate_speech\`, \`violence\`, \`sexual_content\`, \`self_harm\`, \`harassment\`, \`dangerous_content\`, \`spam\`
|
|
469
|
+
|
|
470
|
+
## Actions
|
|
471
|
+
- \`allow\` \u2014 let the content through
|
|
472
|
+
- \`flag\` \u2014 mark for human review
|
|
473
|
+
- \`block\` \u2014 reject the content
|
|
474
|
+
|
|
475
|
+
## Thresholds
|
|
476
|
+
- 0.0\u20130.3: Very sensitive (catches more, higher false positive rate)
|
|
477
|
+
- 0.4\u20130.6: Moderate sensitivity
|
|
478
|
+
- 0.7\u20131.0: High confidence only (fewer false positives)
|
|
479
|
+
|
|
480
|
+
## Workflow
|
|
481
|
+
1. Draft the YAML policy based on the user's requirements
|
|
482
|
+
2. Use the \`validate_policy\` tool to check for errors
|
|
483
|
+
3. Iterate until validation passes
|
|
484
|
+
4. The user can then save the policy via the Vettly dashboard`
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
]
|
|
488
|
+
};
|
|
489
|
+
}
|
|
490
|
+
);
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
// src/prompts/review-flagged-content.ts
|
|
494
|
+
var import_zod6 = require("zod");
|
|
495
|
+
function registerReviewFlaggedContent(server) {
|
|
496
|
+
server.prompt(
|
|
497
|
+
"review-flagged-content",
|
|
498
|
+
"Template for reviewing flagged moderation decisions. Optionally filtered to a specific policy.",
|
|
499
|
+
{ policyId: import_zod6.z.string().optional().describe("Filter flagged decisions to a specific policy ID") },
|
|
500
|
+
async (args) => {
|
|
501
|
+
const policyFilter = args.policyId ? `
|
|
502
|
+
|
|
503
|
+
Filter decisions to policy ID: \`${args.policyId}\`` : "";
|
|
504
|
+
return {
|
|
505
|
+
messages: [
|
|
506
|
+
{
|
|
507
|
+
role: "user",
|
|
508
|
+
content: {
|
|
509
|
+
type: "text",
|
|
510
|
+
text: `You are helping a user review flagged moderation decisions in Vettly.${policyFilter}
|
|
511
|
+
|
|
512
|
+
## Review workflow
|
|
513
|
+
|
|
514
|
+
### Step 1: Fetch flagged decisions
|
|
515
|
+
Use the \`get_recent_decisions\` tool with \`flagged: true\` to get recent flagged content.${args.policyId ? ` Also set \`policyId: "${args.policyId}"\` to filter by that policy.` : " Optionally filter by `policyId` or `contentType`."}
|
|
516
|
+
|
|
517
|
+
### Step 2: Analyze each decision
|
|
518
|
+
For each flagged decision, examine:
|
|
519
|
+
- **Content**: What was submitted
|
|
520
|
+
- **Categories**: Which categories triggered and their scores
|
|
521
|
+
- **Action**: What action was taken (flag vs block)
|
|
522
|
+
- **Policy**: Which policy was applied
|
|
523
|
+
|
|
524
|
+
### Step 3: Provide recommendations
|
|
525
|
+
For each flagged item, suggest one of:
|
|
526
|
+
- **Approve**: The content is acceptable, the flag was a false positive
|
|
527
|
+
- **Confirm block**: The content correctly violates the policy
|
|
528
|
+
- **Adjust policy**: The threshold may need tuning (too sensitive or not enough)
|
|
529
|
+
|
|
530
|
+
### Step 4: Check for patterns
|
|
531
|
+
Use \`get_usage_stats\` to look at overall moderation trends. If many items are being flagged, the policy thresholds may need adjustment.
|
|
532
|
+
|
|
533
|
+
## Tips
|
|
534
|
+
- High category scores (>0.9) are almost always correct flags
|
|
535
|
+
- Scores in the 0.5-0.7 range are where most false positives occur
|
|
536
|
+
- If a category consistently flags incorrectly, raise its threshold in the policy`
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
]
|
|
540
|
+
};
|
|
541
|
+
}
|
|
542
|
+
);
|
|
543
|
+
}
|
|
544
|
+
|
|
545
|
+
// src/prompts/index.ts
|
|
546
|
+
function registerPrompts(server) {
|
|
547
|
+
registerModerateContentGuide(server);
|
|
548
|
+
registerWritePolicy(server);
|
|
549
|
+
registerReviewFlaggedContent(server);
|
|
550
|
+
}
|
|
551
|
+
|
|
339
552
|
// src/server.ts
|
|
340
553
|
function createVettlyMcpServer(config) {
|
|
341
554
|
const client = new import_sdk.ModerationClient({
|
|
@@ -348,9 +561,19 @@ function createVettlyMcpServer(config) {
|
|
|
348
561
|
});
|
|
349
562
|
registerTools(server, client);
|
|
350
563
|
registerResources(server, client);
|
|
564
|
+
registerPrompts(server);
|
|
351
565
|
return server;
|
|
352
566
|
}
|
|
567
|
+
|
|
568
|
+
// src/index.ts
|
|
569
|
+
var configSchema = import_zod7.z.object({
|
|
570
|
+
vettlyApiKey: import_zod7.z.string().describe(
|
|
571
|
+
"Your Vettly API key (starts with vettly_live_ or vettly_test_). Get one free at https://vettly.dev"
|
|
572
|
+
),
|
|
573
|
+
apiUrl: import_zod7.z.string().url().default("https://api.vettly.dev").describe("Custom API base URL. Only change this for self-hosted Vettly instances.")
|
|
574
|
+
});
|
|
353
575
|
// Annotate the CommonJS export names for ESM import in node:
|
|
354
576
|
0 && (module.exports = {
|
|
577
|
+
configSchema,
|
|
355
578
|
createVettlyMcpServer
|
|
356
579
|
});
|
package/dist/index.js
CHANGED
|
@@ -1,6 +1,16 @@
|
|
|
1
1
|
import {
|
|
2
2
|
createVettlyMcpServer
|
|
3
|
-
} from "./chunk-
|
|
3
|
+
} from "./chunk-L5ZHIGXM.js";
|
|
4
|
+
|
|
5
|
+
// src/index.ts
|
|
6
|
+
import { z } from "zod";
|
|
7
|
+
var configSchema = z.object({
|
|
8
|
+
vettlyApiKey: z.string().describe(
|
|
9
|
+
"Your Vettly API key (starts with vettly_live_ or vettly_test_). Get one free at https://vettly.dev"
|
|
10
|
+
),
|
|
11
|
+
apiUrl: z.string().url().default("https://api.vettly.dev").describe("Custom API base URL. Only change this for self-hosted Vettly instances.")
|
|
12
|
+
});
|
|
4
13
|
export {
|
|
14
|
+
configSchema,
|
|
5
15
|
createVettlyMcpServer
|
|
6
16
|
};
|
package/package.json
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@vettly/mcp",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.9",
|
|
4
4
|
"mcpName": "io.github.code-with-brian/vettly",
|
|
5
|
-
"description": "MCP server for content moderation with Claude and Cursor.
|
|
5
|
+
"description": "MCP server for content moderation with Claude and Cursor. Content moderation policy testing.",
|
|
6
6
|
"type": "module",
|
|
7
7
|
"main": "./dist/index.js",
|
|
8
8
|
"types": "./dist/index.d.ts",
|
|
@@ -18,7 +18,8 @@
|
|
|
18
18
|
},
|
|
19
19
|
"files": [
|
|
20
20
|
"dist",
|
|
21
|
-
"README.md"
|
|
21
|
+
"README.md",
|
|
22
|
+
"smithery.yaml"
|
|
22
23
|
],
|
|
23
24
|
"scripts": {
|
|
24
25
|
"build": "tsup src/index.ts src/bin/vettly-mcp.ts --format esm,cjs --dts --clean",
|
package/smithery.yaml
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
# Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
|
|
2
|
+
|
|
3
|
+
startCommand:
|
|
4
|
+
type: stdio
|
|
5
|
+
|
|
6
|
+
configSchema:
|
|
7
|
+
type: object
|
|
8
|
+
required:
|
|
9
|
+
- vettlyApiKey
|
|
10
|
+
properties:
|
|
11
|
+
vettlyApiKey:
|
|
12
|
+
type: string
|
|
13
|
+
description: Your Vettly API key (starts with vettly_live_ or vettly_test_). Get one free at https://vettly.dev
|
|
14
|
+
apiUrl:
|
|
15
|
+
type: string
|
|
16
|
+
default: https://api.vettly.dev
|
|
17
|
+
description: Custom API base URL. Only change this for self-hosted Vettly instances.
|
|
18
|
+
|
|
19
|
+
commandFunction:
|
|
20
|
+
|-
|
|
21
|
+
(config) => ({ command: 'npx', args: ['-y', '@vettly/mcp'], env: { VETTLY_API_KEY: config.vettlyApiKey, ...(config.apiUrl && config.apiUrl !== 'https://api.vettly.dev' ? { VETTLY_API_URL: config.apiUrl } : {}) } })
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
package/dist/bin/vettly-mcp.d.ts
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
package/dist/index.d.cts
DELETED
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
2
|
-
|
|
3
|
-
interface VettlyMcpConfig {
|
|
4
|
-
apiKey: string;
|
|
5
|
-
apiUrl?: string;
|
|
6
|
-
}
|
|
7
|
-
declare function createVettlyMcpServer(config: VettlyMcpConfig): McpServer;
|
|
8
|
-
|
|
9
|
-
export { type VettlyMcpConfig, createVettlyMcpServer };
|
package/dist/index.d.ts
DELETED
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
2
|
-
|
|
3
|
-
interface VettlyMcpConfig {
|
|
4
|
-
apiKey: string;
|
|
5
|
-
apiUrl?: string;
|
|
6
|
-
}
|
|
7
|
-
declare function createVettlyMcpServer(config: VettlyMcpConfig): McpServer;
|
|
8
|
-
|
|
9
|
-
export { type VettlyMcpConfig, createVettlyMcpServer };
|