@modelcontextprotocol/server-everything 0.6.2 → 2025.1.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -5
- package/dist/everything.js +45 -2
- package/package.json +1 -1
package/README.md
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
# Everything MCP Server
|
1
|
+
# Everything MCP Server
|
2
2
|
|
3
3
|
This MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients. It implements prompts, tools, resources, sampling, and more to showcase MCP capabilities.
|
4
4
|
|
@@ -15,7 +15,7 @@ This MCP server attempts to exercise all the features of the MCP protocol. It is
|
|
15
15
|
2. `add`
|
16
16
|
- Adds two numbers together
|
17
17
|
- Inputs:
|
18
|
-
- `a` (number): First number
|
18
|
+
- `a` (number): First number
|
19
19
|
- `b` (number): Second number
|
20
20
|
- Returns: Text result of the addition
|
21
21
|
|
@@ -27,7 +27,7 @@ This MCP server attempts to exercise all the features of the MCP protocol. It is
|
|
27
27
|
- Returns: Completion message with duration and steps
|
28
28
|
- Sends progress notifications during execution
|
29
29
|
|
30
|
-
4. `sampleLLM`
|
30
|
+
4. `sampleLLM`
|
31
31
|
- Demonstrates LLM sampling capability using MCP sampling feature
|
32
32
|
- Inputs:
|
33
33
|
- `prompt` (string): The prompt to send to the LLM
|
@@ -39,17 +39,23 @@ This MCP server attempts to exercise all the features of the MCP protocol. It is
|
|
39
39
|
- No inputs required
|
40
40
|
- Returns: Base64 encoded PNG image data
|
41
41
|
|
42
|
+
6. `printEnv`
|
43
|
+
- Prints all environment variables
|
44
|
+
- Useful for debugging MCP server configuration
|
45
|
+
- No inputs required
|
46
|
+
- Returns: JSON string of all environment variables
|
47
|
+
|
42
48
|
### Resources
|
43
49
|
|
44
50
|
The server provides 100 test resources in two formats:
|
45
|
-
- Even numbered resources:
|
51
|
+
- Even numbered resources:
|
46
52
|
- Plaintext format
|
47
53
|
- URI pattern: `test://static/resource/{even_number}`
|
48
54
|
- Content: Simple text description
|
49
55
|
|
50
56
|
- Odd numbered resources:
|
51
57
|
- Binary blob format
|
52
|
-
- URI pattern: `test://static/resource/{odd_number}`
|
58
|
+
- URI pattern: `test://static/resource/{odd_number}`
|
53
59
|
- Content: Base64 encoded binary data
|
54
60
|
|
55
61
|
Resource features:
|
package/dist/everything.js
CHANGED
@@ -1,5 +1,5 @@
|
|
1
1
|
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
2
|
-
import { CallToolRequestSchema, CreateMessageResultSchema, GetPromptRequestSchema, ListPromptsRequestSchema, ListResourcesRequestSchema, ListResourceTemplatesRequestSchema, ListToolsRequestSchema, ReadResourceRequestSchema, SetLevelRequestSchema, SubscribeRequestSchema, ToolSchema, UnsubscribeRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
2
|
+
import { CallToolRequestSchema, CompleteRequestSchema, CreateMessageResultSchema, GetPromptRequestSchema, ListPromptsRequestSchema, ListResourcesRequestSchema, ListResourceTemplatesRequestSchema, ListToolsRequestSchema, ReadResourceRequestSchema, SetLevelRequestSchema, SubscribeRequestSchema, ToolSchema, UnsubscribeRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
3
3
|
import { z } from "zod";
|
4
4
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
5
5
|
const ToolInputSchema = ToolSchema.shape.inputSchema;
|
@@ -18,6 +18,7 @@ const LongRunningOperationSchema = z.object({
|
|
18
18
|
.describe("Duration of the operation in seconds"),
|
19
19
|
steps: z.number().default(5).describe("Number of steps in the operation"),
|
20
20
|
});
|
21
|
+
const PrintEnvSchema = z.object({});
|
21
22
|
const SampleLLMSchema = z.object({
|
22
23
|
prompt: z.string().describe("The prompt to send to the LLM"),
|
23
24
|
maxTokens: z
|
@@ -25,12 +26,19 @@ const SampleLLMSchema = z.object({
|
|
25
26
|
.default(100)
|
26
27
|
.describe("Maximum number of tokens to generate"),
|
27
28
|
});
|
29
|
+
// Example completion values
|
30
|
+
const EXAMPLE_COMPLETIONS = {
|
31
|
+
style: ["casual", "formal", "technical", "friendly"],
|
32
|
+
temperature: ["0", "0.5", "0.7", "1.0"],
|
33
|
+
resourceId: ["1", "2", "3", "4", "5"],
|
34
|
+
};
|
28
35
|
const GetTinyImageSchema = z.object({});
|
29
36
|
var ToolName;
|
30
37
|
(function (ToolName) {
|
31
38
|
ToolName["ECHO"] = "echo";
|
32
39
|
ToolName["ADD"] = "add";
|
33
40
|
ToolName["LONG_RUNNING_OPERATION"] = "longRunningOperation";
|
41
|
+
ToolName["PRINT_ENV"] = "printEnv";
|
34
42
|
ToolName["SAMPLE_LLM"] = "sampleLLM";
|
35
43
|
ToolName["GET_TINY_IMAGE"] = "getTinyImage";
|
36
44
|
})(ToolName || (ToolName = {}));
|
@@ -243,6 +251,11 @@ export const createServer = () => {
|
|
243
251
|
description: "Adds two numbers",
|
244
252
|
inputSchema: zodToJsonSchema(AddSchema),
|
245
253
|
},
|
254
|
+
{
|
255
|
+
name: ToolName.PRINT_ENV,
|
256
|
+
description: "Prints all environment variables, helpful for debugging MCP server configuration",
|
257
|
+
inputSchema: zodToJsonSchema(PrintEnvSchema),
|
258
|
+
},
|
246
259
|
{
|
247
260
|
name: ToolName.LONG_RUNNING_OPERATION,
|
248
261
|
description: "Demonstrates a long running operation with progress updates",
|
@@ -308,12 +321,22 @@ export const createServer = () => {
|
|
308
321
|
],
|
309
322
|
};
|
310
323
|
}
|
324
|
+
if (name === ToolName.PRINT_ENV) {
|
325
|
+
return {
|
326
|
+
content: [
|
327
|
+
{
|
328
|
+
type: "text",
|
329
|
+
text: JSON.stringify(process.env, null, 2),
|
330
|
+
},
|
331
|
+
],
|
332
|
+
};
|
333
|
+
}
|
311
334
|
if (name === ToolName.SAMPLE_LLM) {
|
312
335
|
const validatedArgs = SampleLLMSchema.parse(args);
|
313
336
|
const { prompt, maxTokens } = validatedArgs;
|
314
337
|
const result = await requestSampling(prompt, ToolName.SAMPLE_LLM, maxTokens);
|
315
338
|
return {
|
316
|
-
content: [{ type: "text", text: `LLM sampling result: ${result}` }],
|
339
|
+
content: [{ type: "text", text: `LLM sampling result: ${result.content.text}` }],
|
317
340
|
};
|
318
341
|
}
|
319
342
|
if (name === ToolName.GET_TINY_IMAGE) {
|
@@ -338,6 +361,26 @@ export const createServer = () => {
|
|
338
361
|
}
|
339
362
|
throw new Error(`Unknown tool: ${name}`);
|
340
363
|
});
|
364
|
+
server.setRequestHandler(CompleteRequestSchema, async (request) => {
|
365
|
+
const { ref, argument } = request.params;
|
366
|
+
if (ref.type === "ref/resource") {
|
367
|
+
const resourceId = ref.uri.split("/").pop();
|
368
|
+
if (!resourceId)
|
369
|
+
return { completion: { values: [] } };
|
370
|
+
// Filter resource IDs that start with the input value
|
371
|
+
const values = EXAMPLE_COMPLETIONS.resourceId.filter(id => id.startsWith(argument.value));
|
372
|
+
return { completion: { values, hasMore: false, total: values.length } };
|
373
|
+
}
|
374
|
+
if (ref.type === "ref/prompt") {
|
375
|
+
// Handle completion for prompt arguments
|
376
|
+
const completions = EXAMPLE_COMPLETIONS[argument.name];
|
377
|
+
if (!completions)
|
378
|
+
return { completion: { values: [] } };
|
379
|
+
const values = completions.filter(value => value.startsWith(argument.value));
|
380
|
+
return { completion: { values, hasMore: false, total: values.length } };
|
381
|
+
}
|
382
|
+
throw new Error(`Unknown reference type`);
|
383
|
+
});
|
341
384
|
server.setRequestHandler(SetLevelRequestSchema, async (request) => {
|
342
385
|
const { level } = request.params;
|
343
386
|
// Demonstrate different log levels
|
package/package.json
CHANGED