@modelcontextprotocol/server-everything 0.6.2 → 2025.3.19
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +43 -5
- package/dist/everything.js +139 -8
- package/package.json +4 -2
package/README.md
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Everything MCP Server
|
1
|
+
# Everything MCP Server
|
2
2
|
|
3
3
|
This MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients. It implements prompts, tools, resources, sampling, and more to showcase MCP capabilities.
|
4
4
|
|
@@ -15,7 +15,7 @@ This MCP server attempts to exercise all the features of the MCP protocol. It is
|
|
15
15
|
2. `add`
|
16
16
|
- Adds two numbers together
|
17
17
|
- Inputs:
|
18
|
-
- `a` (number): First number
|
18
|
+
- `a` (number): First number
|
19
19
|
- `b` (number): Second number
|
20
20
|
- Returns: Text result of the addition
|
21
21
|
|
@@ -27,7 +27,7 @@ This MCP server attempts to exercise all the features of the MCP protocol. It is
|
|
27
27
|
- Returns: Completion message with duration and steps
|
28
28
|
- Sends progress notifications during execution
|
29
29
|
|
30
|
-
4. `sampleLLM`
|
30
|
+
4. `sampleLLM`
|
31
31
|
- Demonstrates LLM sampling capability using MCP sampling feature
|
32
32
|
- Inputs:
|
33
33
|
- `prompt` (string): The prompt to send to the LLM
|
@@ -39,17 +39,41 @@ This MCP server attempts to exercise all the features of the MCP protocol. It is
|
|
39
39
|
- No inputs required
|
40
40
|
- Returns: Base64 encoded PNG image data
|
41
41
|
|
42
|
+
6. `printEnv`
|
43
|
+
- Prints all environment variables
|
44
|
+
- Useful for debugging MCP server configuration
|
45
|
+
- No inputs required
|
46
|
+
- Returns: JSON string of all environment variables
|
47
|
+
|
48
|
+
7. `annotatedMessage`
|
49
|
+
- Demonstrates how annotations can be used to provide metadata about content
|
50
|
+
- Inputs:
|
51
|
+
- `messageType` (enum: "error" | "success" | "debug"): Type of message to demonstrate different annotation patterns
|
52
|
+
- `includeImage` (boolean, default: false): Whether to include an example image
|
53
|
+
- Returns: Content with varying annotations:
|
54
|
+
- Error messages: High priority (1.0), visible to both user and assistant
|
55
|
+
- Success messages: Medium priority (0.7), user-focused
|
56
|
+
- Debug messages: Low priority (0.3), assistant-focused
|
57
|
+
- Optional image: Medium priority (0.5), user-focused
|
58
|
+
- Example annotations:
|
59
|
+
```json
|
60
|
+
{
|
61
|
+
"priority": 1.0,
|
62
|
+
"audience": ["user", "assistant"]
|
63
|
+
}
|
64
|
+
```
|
65
|
+
|
42
66
|
### Resources
|
43
67
|
|
44
68
|
The server provides 100 test resources in two formats:
|
45
|
-
- Even numbered resources:
|
69
|
+
- Even numbered resources:
|
46
70
|
- Plaintext format
|
47
71
|
- URI pattern: `test://static/resource/{even_number}`
|
48
72
|
- Content: Simple text description
|
49
73
|
|
50
74
|
- Odd numbered resources:
|
51
75
|
- Binary blob format
|
52
|
-
- URI pattern: `test://static/resource/{odd_number}`
|
76
|
+
- URI pattern: `test://static/resource/{odd_number}`
|
53
77
|
- Content: Base64 encoded binary data
|
54
78
|
|
55
79
|
Resource features:
|
@@ -72,6 +96,20 @@ Resource features:
|
|
72
96
|
- `style` (string): Output style preference
|
73
97
|
- Returns: Multi-turn conversation with images
|
74
98
|
|
99
|
+
### Logging
|
100
|
+
|
101
|
+
The server sends random-leveled log messages every 15 seconds, e.g.:
|
102
|
+
|
103
|
+
```json
|
104
|
+
{
|
105
|
+
"method": "notifications/message",
|
106
|
+
"params": {
|
107
|
+
"level": "info",
|
108
|
+
"data": "Info-level message"
|
109
|
+
}
|
110
|
+
}
|
111
|
+
```
|
112
|
+
|
75
113
|
## Usage with Claude Desktop
|
76
114
|
|
77
115
|
Add to your `claude_desktop_config.json`:
|
package/dist/everything.js
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
2
|
-
import { CallToolRequestSchema, CreateMessageResultSchema, GetPromptRequestSchema, ListPromptsRequestSchema, ListResourcesRequestSchema, ListResourceTemplatesRequestSchema, ListToolsRequestSchema, ReadResourceRequestSchema, SetLevelRequestSchema, SubscribeRequestSchema, ToolSchema, UnsubscribeRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
2
|
+
import { CallToolRequestSchema, CompleteRequestSchema, CreateMessageResultSchema, GetPromptRequestSchema, ListPromptsRequestSchema, ListResourcesRequestSchema, ListResourceTemplatesRequestSchema, ListToolsRequestSchema, ReadResourceRequestSchema, SetLevelRequestSchema, SubscribeRequestSchema, ToolSchema, UnsubscribeRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
3
3
|
import { z } from "zod";
|
4
4
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
5
5
|
const ToolInputSchema = ToolSchema.shape.inputSchema;
|
@@ -18,6 +18,7 @@ const LongRunningOperationSchema = z.object({
|
|
18
18
|
.describe("Duration of the operation in seconds"),
|
19
19
|
steps: z.number().default(5).describe("Number of steps in the operation"),
|
20
20
|
});
|
21
|
+
const PrintEnvSchema = z.object({});
|
21
22
|
const SampleLLMSchema = z.object({
|
22
23
|
prompt: z.string().describe("The prompt to send to the LLM"),
|
23
24
|
maxTokens: z
|
@@ -25,14 +26,28 @@ const SampleLLMSchema = z.object({
|
|
25
26
|
.default(100)
|
26
27
|
.describe("Maximum number of tokens to generate"),
|
27
28
|
});
|
29
|
+
// Example completion values
|
30
|
+
const EXAMPLE_COMPLETIONS = {
|
31
|
+
style: ["casual", "formal", "technical", "friendly"],
|
32
|
+
temperature: ["0", "0.5", "0.7", "1.0"],
|
33
|
+
resourceId: ["1", "2", "3", "4", "5"],
|
34
|
+
};
|
28
35
|
const GetTinyImageSchema = z.object({});
|
36
|
+
const AnnotatedMessageSchema = z.object({
|
37
|
+
messageType: z.enum(["error", "success", "debug"])
|
38
|
+
.describe("Type of message to demonstrate different annotation patterns"),
|
39
|
+
includeImage: z.boolean().default(false)
|
40
|
+
.describe("Whether to include an example image")
|
41
|
+
});
|
29
42
|
var ToolName;
|
30
43
|
(function (ToolName) {
|
31
44
|
ToolName["ECHO"] = "echo";
|
32
45
|
ToolName["ADD"] = "add";
|
33
46
|
ToolName["LONG_RUNNING_OPERATION"] = "longRunningOperation";
|
47
|
+
ToolName["PRINT_ENV"] = "printEnv";
|
34
48
|
ToolName["SAMPLE_LLM"] = "sampleLLM";
|
35
49
|
ToolName["GET_TINY_IMAGE"] = "getTinyImage";
|
50
|
+
ToolName["ANNOTATED_MESSAGE"] = "annotatedMessage";
|
36
51
|
})(ToolName || (ToolName = {}));
|
37
52
|
var PromptName;
|
38
53
|
(function (PromptName) {
|
@@ -52,9 +67,9 @@ export const createServer = () => {
|
|
52
67
|
},
|
53
68
|
});
|
54
69
|
let subscriptions = new Set();
|
55
|
-
let
|
70
|
+
let subsUpdateInterval;
|
56
71
|
// Set up update interval for subscribed resources
|
57
|
-
|
72
|
+
subsUpdateInterval = setInterval(() => {
|
58
73
|
for (const uri of subscriptions) {
|
59
74
|
server.notification({
|
60
75
|
method: "notifications/resources/updated",
|
@@ -62,6 +77,32 @@ export const createServer = () => {
|
|
62
77
|
});
|
63
78
|
}
|
64
79
|
}, 5000);
|
80
|
+
let logLevel = "debug";
|
81
|
+
let logsUpdateInterval;
|
82
|
+
const messages = [
|
83
|
+
{ level: "debug", data: "Debug-level message" },
|
84
|
+
{ level: "info", data: "Info-level message" },
|
85
|
+
{ level: "notice", data: "Notice-level message" },
|
86
|
+
{ level: "warning", data: "Warning-level message" },
|
87
|
+
{ level: "error", data: "Error-level message" },
|
88
|
+
{ level: "critical", data: "Critical-level message" },
|
89
|
+
{ level: "alert", data: "Alert level-message" },
|
90
|
+
{ level: "emergency", data: "Emergency-level message" }
|
91
|
+
];
|
92
|
+
const isMessageIgnored = (level) => {
|
93
|
+
const currentLevel = messages.findIndex((msg) => logLevel === msg.level);
|
94
|
+
const messageLevel = messages.findIndex((msg) => level === msg.level);
|
95
|
+
return messageLevel < currentLevel;
|
96
|
+
};
|
97
|
+
// Set up update interval for random log messages
|
98
|
+
logsUpdateInterval = setInterval(() => {
|
99
|
+
let message = {
|
100
|
+
method: "notifications/message",
|
101
|
+
params: messages[Math.floor(Math.random() * messages.length)],
|
102
|
+
};
|
103
|
+
if (!isMessageIgnored(message.params.level))
|
104
|
+
server.notification(message);
|
105
|
+
}, 15000);
|
65
106
|
// Helper method to request sampling from client
|
66
107
|
const requestSampling = async (context, uri, maxTokens = 100) => {
|
67
108
|
const request = {
|
@@ -243,6 +284,11 @@ export const createServer = () => {
|
|
243
284
|
description: "Adds two numbers",
|
244
285
|
inputSchema: zodToJsonSchema(AddSchema),
|
245
286
|
},
|
287
|
+
{
|
288
|
+
name: ToolName.PRINT_ENV,
|
289
|
+
description: "Prints all environment variables, helpful for debugging MCP server configuration",
|
290
|
+
inputSchema: zodToJsonSchema(PrintEnvSchema),
|
291
|
+
},
|
246
292
|
{
|
247
293
|
name: ToolName.LONG_RUNNING_OPERATION,
|
248
294
|
description: "Demonstrates a long running operation with progress updates",
|
@@ -258,6 +304,11 @@ export const createServer = () => {
|
|
258
304
|
description: "Returns the MCP_TINY_IMAGE",
|
259
305
|
inputSchema: zodToJsonSchema(GetTinyImageSchema),
|
260
306
|
},
|
307
|
+
{
|
308
|
+
name: ToolName.ANNOTATED_MESSAGE,
|
309
|
+
description: "Demonstrates how annotations can be used to provide metadata about content",
|
310
|
+
inputSchema: zodToJsonSchema(AnnotatedMessageSchema),
|
311
|
+
},
|
261
312
|
];
|
262
313
|
return { tools };
|
263
314
|
});
|
@@ -308,12 +359,22 @@ export const createServer = () => {
|
|
308
359
|
],
|
309
360
|
};
|
310
361
|
}
|
362
|
+
if (name === ToolName.PRINT_ENV) {
|
363
|
+
return {
|
364
|
+
content: [
|
365
|
+
{
|
366
|
+
type: "text",
|
367
|
+
text: JSON.stringify(process.env, null, 2),
|
368
|
+
},
|
369
|
+
],
|
370
|
+
};
|
371
|
+
}
|
311
372
|
if (name === ToolName.SAMPLE_LLM) {
|
312
373
|
const validatedArgs = SampleLLMSchema.parse(args);
|
313
374
|
const { prompt, maxTokens } = validatedArgs;
|
314
375
|
const result = await requestSampling(prompt, ToolName.SAMPLE_LLM, maxTokens);
|
315
376
|
return {
|
316
|
-
content: [{ type: "text", text: `LLM sampling result: ${result}` }],
|
377
|
+
content: [{ type: "text", text: `LLM sampling result: ${result.content.text}` }],
|
317
378
|
};
|
318
379
|
}
|
319
380
|
if (name === ToolName.GET_TINY_IMAGE) {
|
@@ -336,25 +397,95 @@ export const createServer = () => {
|
|
336
397
|
],
|
337
398
|
};
|
338
399
|
}
|
400
|
+
if (name === ToolName.ANNOTATED_MESSAGE) {
|
401
|
+
const { messageType, includeImage } = AnnotatedMessageSchema.parse(args);
|
402
|
+
const content = [];
|
403
|
+
// Main message with different priorities/audiences based on type
|
404
|
+
if (messageType === "error") {
|
405
|
+
content.push({
|
406
|
+
type: "text",
|
407
|
+
text: "Error: Operation failed",
|
408
|
+
annotations: {
|
409
|
+
priority: 1.0, // Errors are highest priority
|
410
|
+
audience: ["user", "assistant"] // Both need to know about errors
|
411
|
+
}
|
412
|
+
});
|
413
|
+
}
|
414
|
+
else if (messageType === "success") {
|
415
|
+
content.push({
|
416
|
+
type: "text",
|
417
|
+
text: "Operation completed successfully",
|
418
|
+
annotations: {
|
419
|
+
priority: 0.7, // Success messages are important but not critical
|
420
|
+
audience: ["user"] // Success mainly for user consumption
|
421
|
+
}
|
422
|
+
});
|
423
|
+
}
|
424
|
+
else if (messageType === "debug") {
|
425
|
+
content.push({
|
426
|
+
type: "text",
|
427
|
+
text: "Debug: Cache hit ratio 0.95, latency 150ms",
|
428
|
+
annotations: {
|
429
|
+
priority: 0.3, // Debug info is low priority
|
430
|
+
audience: ["assistant"] // Technical details for assistant
|
431
|
+
}
|
432
|
+
});
|
433
|
+
}
|
434
|
+
// Optional image with its own annotations
|
435
|
+
if (includeImage) {
|
436
|
+
content.push({
|
437
|
+
type: "image",
|
438
|
+
data: MCP_TINY_IMAGE,
|
439
|
+
mimeType: "image/png",
|
440
|
+
annotations: {
|
441
|
+
priority: 0.5,
|
442
|
+
audience: ["user"] // Images primarily for user visualization
|
443
|
+
}
|
444
|
+
});
|
445
|
+
}
|
446
|
+
return { content };
|
447
|
+
}
|
339
448
|
throw new Error(`Unknown tool: ${name}`);
|
340
449
|
});
|
450
|
+
server.setRequestHandler(CompleteRequestSchema, async (request) => {
|
451
|
+
const { ref, argument } = request.params;
|
452
|
+
if (ref.type === "ref/resource") {
|
453
|
+
const resourceId = ref.uri.split("/").pop();
|
454
|
+
if (!resourceId)
|
455
|
+
return { completion: { values: [] } };
|
456
|
+
// Filter resource IDs that start with the input value
|
457
|
+
const values = EXAMPLE_COMPLETIONS.resourceId.filter(id => id.startsWith(argument.value));
|
458
|
+
return { completion: { values, hasMore: false, total: values.length } };
|
459
|
+
}
|
460
|
+
if (ref.type === "ref/prompt") {
|
461
|
+
// Handle completion for prompt arguments
|
462
|
+
const completions = EXAMPLE_COMPLETIONS[argument.name];
|
463
|
+
if (!completions)
|
464
|
+
return { completion: { values: [] } };
|
465
|
+
const values = completions.filter(value => value.startsWith(argument.value));
|
466
|
+
return { completion: { values, hasMore: false, total: values.length } };
|
467
|
+
}
|
468
|
+
throw new Error(`Unknown reference type`);
|
469
|
+
});
|
341
470
|
server.setRequestHandler(SetLevelRequestSchema, async (request) => {
|
342
471
|
const { level } = request.params;
|
472
|
+
logLevel = level;
|
343
473
|
// Demonstrate different log levels
|
344
474
|
await server.notification({
|
345
475
|
method: "notifications/message",
|
346
476
|
params: {
|
347
477
|
level: "debug",
|
348
478
|
logger: "test-server",
|
349
|
-
data: `Logging level set to: ${
|
479
|
+
data: `Logging level set to: ${logLevel}`,
|
350
480
|
},
|
351
481
|
});
|
352
482
|
return {};
|
353
483
|
});
|
354
484
|
const cleanup = async () => {
|
355
|
-
if (
|
356
|
-
clearInterval(
|
357
|
-
|
485
|
+
if (subsUpdateInterval)
|
486
|
+
clearInterval(subsUpdateInterval);
|
487
|
+
if (logsUpdateInterval)
|
488
|
+
clearInterval(logsUpdateInterval);
|
358
489
|
};
|
359
490
|
return { server, cleanup };
|
360
491
|
};
|
package/package.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
{
|
2
2
|
"name": "@modelcontextprotocol/server-everything",
|
3
|
-
"version": "
|
3
|
+
"version": "2025.3.19",
|
4
4
|
"description": "MCP server that exercises all the features of the MCP protocol",
|
5
5
|
"license": "MIT",
|
6
6
|
"author": "Anthropic, PBC (https://anthropic.com)",
|
@@ -16,7 +16,9 @@
|
|
16
16
|
"scripts": {
|
17
17
|
"build": "tsc && shx chmod +x dist/*.js",
|
18
18
|
"prepare": "npm run build",
|
19
|
-
"watch": "tsc --watch"
|
19
|
+
"watch": "tsc --watch",
|
20
|
+
"start": "node dist/index.js",
|
21
|
+
"start:sse": "node dist/sse.js"
|
20
22
|
},
|
21
23
|
"dependencies": {
|
22
24
|
"@modelcontextprotocol/sdk": "1.0.1",
|