@modelcontextprotocol/server-everything 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
package/README.md ADDED
@@ -0,0 +1,3 @@
1
+ # Everything server
2
+
3
+ This MCP server attempts to exercise all the features of the MCP protocol. It is not intended to be a useful server, but rather a test server for builders of MCP clients.
@@ -0,0 +1,361 @@
1
+ import { Server } from "@modelcontextprotocol/sdk/server/index.js";
2
+ import { CallToolRequestSchema, CreateMessageResultSchema, GetPromptRequestSchema, ListPromptsRequestSchema, ListResourcesRequestSchema, ListResourceTemplatesRequestSchema, ListToolsRequestSchema, ReadResourceRequestSchema, SetLevelRequestSchema, SubscribeRequestSchema, ToolSchema, UnsubscribeRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
3
+ import { z } from "zod";
4
+ import { zodToJsonSchema } from "zod-to-json-schema";
5
+ const ToolInputSchema = ToolSchema.shape.inputSchema;
6
+ /* Input schemas for tools implemented in this server */
7
+ const EchoSchema = z.object({
8
+ message: z.string().describe("Message to echo"),
9
+ });
10
+ const AddSchema = z.object({
11
+ a: z.number().describe("First number"),
12
+ b: z.number().describe("Second number"),
13
+ });
14
+ const LongRunningOperationSchema = z.object({
15
+ duration: z
16
+ .number()
17
+ .default(10)
18
+ .describe("Duration of the operation in seconds"),
19
+ steps: z.number().default(5).describe("Number of steps in the operation"),
20
+ });
21
+ const SampleLLMSchema = z.object({
22
+ prompt: z.string().describe("The prompt to send to the LLM"),
23
+ maxTokens: z
24
+ .number()
25
+ .default(100)
26
+ .describe("Maximum number of tokens to generate"),
27
+ });
28
+ const GetTinyImageSchema = z.object({});
29
+ var ToolName;
30
+ (function (ToolName) {
31
+ ToolName["ECHO"] = "echo";
32
+ ToolName["ADD"] = "add";
33
+ ToolName["LONG_RUNNING_OPERATION"] = "longRunningOperation";
34
+ ToolName["SAMPLE_LLM"] = "sampleLLM";
35
+ ToolName["GET_TINY_IMAGE"] = "getTinyImage";
36
+ })(ToolName || (ToolName = {}));
37
+ var PromptName;
38
+ (function (PromptName) {
39
+ PromptName["SIMPLE"] = "simple_prompt";
40
+ PromptName["COMPLEX"] = "complex_prompt";
41
+ })(PromptName || (PromptName = {}));
42
+ export const createServer = () => {
43
+ const server = new Server({
44
+ name: "example-servers/everything",
45
+ version: "1.0.0",
46
+ }, {
47
+ capabilities: {
48
+ prompts: {},
49
+ resources: { subscribe: true },
50
+ tools: {},
51
+ logging: {},
52
+ },
53
+ });
54
+ let subscriptions = new Set();
55
+ let updateInterval;
56
+ // Set up update interval for subscribed resources
57
+ updateInterval = setInterval(() => {
58
+ for (const uri of subscriptions) {
59
+ server.notification({
60
+ method: "notifications/resources/updated",
61
+ params: { uri },
62
+ });
63
+ }
64
+ }, 5000);
65
+ // Helper method to request sampling from client
66
+ const requestSampling = async (context, uri, maxTokens = 100) => {
67
+ const request = {
68
+ method: "sampling/createMessage",
69
+ params: {
70
+ messages: [
71
+ {
72
+ role: "user",
73
+ content: {
74
+ type: "text",
75
+ text: `Resource ${uri} context: ${context}`,
76
+ },
77
+ },
78
+ ],
79
+ systemPrompt: "You are a helpful test server.",
80
+ maxTokens,
81
+ temperature: 0.7,
82
+ includeContext: "thisServer",
83
+ },
84
+ };
85
+ return await server.request(request, CreateMessageResultSchema);
86
+ };
87
+ const ALL_RESOURCES = Array.from({ length: 100 }, (_, i) => {
88
+ const uri = `test://static/resource/${i + 1}`;
89
+ if (i % 2 === 0) {
90
+ return {
91
+ uri,
92
+ name: `Resource ${i + 1}`,
93
+ mimeType: "text/plain",
94
+ text: `Resource ${i + 1}: This is a plaintext resource`,
95
+ };
96
+ }
97
+ else {
98
+ const buffer = Buffer.from(`Resource ${i + 1}: This is a base64 blob`);
99
+ return {
100
+ uri,
101
+ name: `Resource ${i + 1}`,
102
+ mimeType: "application/octet-stream",
103
+ blob: buffer.toString("base64"),
104
+ };
105
+ }
106
+ });
107
+ const PAGE_SIZE = 10;
108
+ server.setRequestHandler(ListResourcesRequestSchema, async (request) => {
109
+ const cursor = request.params?.cursor;
110
+ let startIndex = 0;
111
+ if (cursor) {
112
+ const decodedCursor = parseInt(atob(cursor), 10);
113
+ if (!isNaN(decodedCursor)) {
114
+ startIndex = decodedCursor;
115
+ }
116
+ }
117
+ const endIndex = Math.min(startIndex + PAGE_SIZE, ALL_RESOURCES.length);
118
+ const resources = ALL_RESOURCES.slice(startIndex, endIndex);
119
+ let nextCursor;
120
+ if (endIndex < ALL_RESOURCES.length) {
121
+ nextCursor = btoa(endIndex.toString());
122
+ }
123
+ return {
124
+ resources,
125
+ nextCursor,
126
+ };
127
+ });
128
+ server.setRequestHandler(ListResourceTemplatesRequestSchema, async () => {
129
+ return {
130
+ resourceTemplates: [
131
+ {
132
+ uriTemplate: "test://static/resource/{id}",
133
+ name: "Static Resource",
134
+ description: "A static resource with a numeric ID",
135
+ },
136
+ ],
137
+ };
138
+ });
139
+ server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
140
+ const uri = request.params.uri;
141
+ if (uri.startsWith("test://static/resource/")) {
142
+ const index = parseInt(uri.split("/").pop() ?? "", 10) - 1;
143
+ if (index >= 0 && index < ALL_RESOURCES.length) {
144
+ const resource = ALL_RESOURCES[index];
145
+ return {
146
+ contents: [resource],
147
+ };
148
+ }
149
+ }
150
+ throw new Error(`Unknown resource: ${uri}`);
151
+ });
152
+ server.setRequestHandler(SubscribeRequestSchema, async (request) => {
153
+ const { uri } = request.params;
154
+ subscriptions.add(uri);
155
+ // Request sampling from client when someone subscribes
156
+ await requestSampling("A new subscription was started", uri);
157
+ return {};
158
+ });
159
+ server.setRequestHandler(UnsubscribeRequestSchema, async (request) => {
160
+ subscriptions.delete(request.params.uri);
161
+ return {};
162
+ });
163
+ server.setRequestHandler(ListPromptsRequestSchema, async () => {
164
+ return {
165
+ prompts: [
166
+ {
167
+ name: PromptName.SIMPLE,
168
+ description: "A prompt without arguments",
169
+ },
170
+ {
171
+ name: PromptName.COMPLEX,
172
+ description: "A prompt with arguments",
173
+ arguments: [
174
+ {
175
+ name: "temperature",
176
+ description: "Temperature setting",
177
+ required: true,
178
+ },
179
+ {
180
+ name: "style",
181
+ description: "Output style",
182
+ required: false,
183
+ },
184
+ ],
185
+ },
186
+ ],
187
+ };
188
+ });
189
+ server.setRequestHandler(GetPromptRequestSchema, async (request) => {
190
+ const { name, arguments: args } = request.params;
191
+ if (name === PromptName.SIMPLE) {
192
+ return {
193
+ messages: [
194
+ {
195
+ role: "user",
196
+ content: {
197
+ type: "text",
198
+ text: "This is a simple prompt without arguments.",
199
+ },
200
+ },
201
+ ],
202
+ };
203
+ }
204
+ if (name === PromptName.COMPLEX) {
205
+ return {
206
+ messages: [
207
+ {
208
+ role: "user",
209
+ content: {
210
+ type: "text",
211
+ text: `This is a complex prompt with arguments: temperature=${args?.temperature}, style=${args?.style}`,
212
+ },
213
+ },
214
+ {
215
+ role: "assistant",
216
+ content: {
217
+ type: "text",
218
+ text: "I understand. You've provided a complex prompt with temperature and style arguments. How would you like me to proceed?",
219
+ },
220
+ },
221
+ {
222
+ role: "user",
223
+ content: {
224
+ type: "image",
225
+ data: MCP_TINY_IMAGE,
226
+ mimeType: "image/png",
227
+ },
228
+ },
229
+ ],
230
+ };
231
+ }
232
+ throw new Error(`Unknown prompt: ${name}`);
233
+ });
234
+ server.setRequestHandler(ListToolsRequestSchema, async () => {
235
+ const tools = [
236
+ {
237
+ name: ToolName.ECHO,
238
+ description: "Echoes back the input",
239
+ inputSchema: zodToJsonSchema(EchoSchema),
240
+ },
241
+ {
242
+ name: ToolName.ADD,
243
+ description: "Adds two numbers",
244
+ inputSchema: zodToJsonSchema(AddSchema),
245
+ },
246
+ {
247
+ name: ToolName.LONG_RUNNING_OPERATION,
248
+ description: "Demonstrates a long running operation with progress updates",
249
+ inputSchema: zodToJsonSchema(LongRunningOperationSchema),
250
+ },
251
+ {
252
+ name: ToolName.SAMPLE_LLM,
253
+ description: "Samples from an LLM using MCP's sampling feature",
254
+ inputSchema: zodToJsonSchema(SampleLLMSchema),
255
+ },
256
+ {
257
+ name: ToolName.GET_TINY_IMAGE,
258
+ description: "Returns the MCP_TINY_IMAGE",
259
+ inputSchema: zodToJsonSchema(GetTinyImageSchema),
260
+ },
261
+ ];
262
+ return { tools };
263
+ });
264
+ server.setRequestHandler(CallToolRequestSchema, async (request) => {
265
+ const { name, arguments: args } = request.params;
266
+ if (name === ToolName.ECHO) {
267
+ const validatedArgs = EchoSchema.parse(args);
268
+ return {
269
+ content: [{ type: "text", text: `Echo: ${validatedArgs.message}` }],
270
+ };
271
+ }
272
+ if (name === ToolName.ADD) {
273
+ const validatedArgs = AddSchema.parse(args);
274
+ const sum = validatedArgs.a + validatedArgs.b;
275
+ return {
276
+ content: [
277
+ {
278
+ type: "text",
279
+ text: `The sum of ${validatedArgs.a} and ${validatedArgs.b} is ${sum}.`,
280
+ },
281
+ ],
282
+ };
283
+ }
284
+ if (name === ToolName.LONG_RUNNING_OPERATION) {
285
+ const validatedArgs = LongRunningOperationSchema.parse(args);
286
+ const { duration, steps } = validatedArgs;
287
+ const stepDuration = duration / steps;
288
+ const progressToken = request.params._meta?.progressToken;
289
+ for (let i = 1; i < steps + 1; i++) {
290
+ await new Promise((resolve) => setTimeout(resolve, stepDuration * 1000));
291
+ if (progressToken !== undefined) {
292
+ await server.notification({
293
+ method: "notifications/progress",
294
+ params: {
295
+ progress: i,
296
+ total: steps,
297
+ progressToken,
298
+ },
299
+ });
300
+ }
301
+ }
302
+ return {
303
+ content: [
304
+ {
305
+ type: "text",
306
+ text: `Long running operation completed. Duration: ${duration} seconds, Steps: ${steps}.`,
307
+ },
308
+ ],
309
+ };
310
+ }
311
+ if (name === ToolName.SAMPLE_LLM) {
312
+ const validatedArgs = SampleLLMSchema.parse(args);
313
+ const { prompt, maxTokens } = validatedArgs;
314
+ const result = await requestSampling(prompt, ToolName.SAMPLE_LLM, maxTokens);
315
+ return {
316
+ content: [{ type: "text", text: `LLM sampling result: ${result}` }],
317
+ };
318
+ }
319
+ if (name === ToolName.GET_TINY_IMAGE) {
320
+ GetTinyImageSchema.parse(args);
321
+ return {
322
+ content: [
323
+ {
324
+ type: "text",
325
+ text: "This is a tiny image:",
326
+ },
327
+ {
328
+ type: "image",
329
+ data: MCP_TINY_IMAGE,
330
+ mimeType: "image/png",
331
+ },
332
+ {
333
+ type: "text",
334
+ text: "The image above is the MCP tiny image.",
335
+ },
336
+ ],
337
+ };
338
+ }
339
+ throw new Error(`Unknown tool: ${name}`);
340
+ });
341
+ server.setRequestHandler(SetLevelRequestSchema, async (request) => {
342
+ const { level } = request.params;
343
+ // Demonstrate different log levels
344
+ await server.notification({
345
+ method: "notifications/message",
346
+ params: {
347
+ level: "debug",
348
+ logger: "test-server",
349
+ data: `Logging level set to: ${level}`,
350
+ },
351
+ });
352
+ return {};
353
+ });
354
+ const cleanup = async () => {
355
+ if (updateInterval) {
356
+ clearInterval(updateInterval);
357
+ }
358
+ };
359
+ return { server, cleanup };
360
+ };
361
+ const MCP_TINY_IMAGE = "iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAKsGlDQ1BJQ0MgUHJvZmlsZQAASImVlwdUU+kSgOfe9JDQEiIgJfQmSCeAlBBaAAXpYCMkAUKJMRBU7MriClZURLCs6KqIgo0idizYFsWC3QVZBNR1sWDDlXeBQ9jdd9575805c+a7c+efmf+e/z9nLgCdKZDJMlF1gCxpjjwyyI8dn5DIJvUABRiY0kBdIMyWcSMiwgCTUft3+dgGyJC9YzuU69/f/1fREImzhQBIBMbJomxhFsbHMe0TyuQ5ALg9mN9kbo5siK9gzJRjDWL8ZIhTR7hviJOHGY8fjomO5GGsDUCmCQTyVACaKeZn5wpTsTw0f4ztpSKJFGPsGbyzsmaLMMbqgiUWI8N4KD8n+S95Uv+WM1mZUyBIVfLIXoaF7C/JlmUK5v+fn+N/S1amYrSGOaa0NHlwJGaxvpAHGbNDlSxNnhI+yhLRcPwwpymCY0ZZmM1LHGWRwD9UuTZzStgop0gC+co8OfzoURZnB0SNsnx2pLJWipzHHWWBfKyuIiNG6U8T85X589Ki40Y5VxI7ZZSzM6JCx2J4Sr9cEansXywN8hurG6jce1b2X/Yr4SvX5qRFByv3LhjrXyzljuXMjlf2JhL7B4zFxCjjZTl+ylqyzAhlvDgzSOnPzo1Srs3BDuTY2gjlN0wXhESMMoRBELAhBjIhB+QggECQgBTEOeJ5Q2cUeLNl8+WS1LQcNhe7ZWI2Xyq0m8B2tHd0Bhi6syNH4j1r+C4irGtjvhWVAF4nBgcHT475Qm4BHEkCoNaO+SxnAKh3A1w5JVTIc0d8Q9cJCEAFNWCCDhiACViCLTiCK3iCLwRACIRDNCTATBBCGmRhnc+FhbAMCqAI1sNmKIOdsBv2wyE4CvVwCs7DZbgOt+AePIZ26IJX0AcfYQBBEBJCRxiIDmKImCE2iCPCQbyRACQMiUQSkCQkFZEiCmQhsgIpQoqRMmQXUokcQU4g55GrSCvyEOlAepF3yFcUh9JQJqqPmqMTUQ7KRUPRaHQGmorOQfPQfHQtWopWoAfROvQ8eh29h7ajr9B+HOBUcCycEc4Wx8HxcOG4RFwKTo5bjCvEleAqcNW4Rlwz7g6uHfca9wVPxDPwbLwt3hMfjI/BC/Fz8Ivxq/Fl+P34OvxF/B18B74P/51AJ+gRbAgeBD4hnpBKmEsoIJQQ9hJqCZcI9whdhI9EIpFFtCC6EYOJCcR04gLiauJ2Yg3xHLGV2EnsJ5FIOiQbkhcpnCQg5ZAKSFtJB0lnSbdJXaTPZBWyIdmRHEhOJEvJy8kl5APkM+Tb5G7yAEWdYkbxoIRTRJT5lHWUPZRGyk1KF2WAqkG1oHpRo6np1GXUUmo19RL1CfW9ioqKsYq7ylQVicpSlVKVwypXVDpUvtA0adY0Hm06TUFbS9tHO0d7SHtPp9PN6b70RHoOfS29kn6B/oz+WZWhaqfKVxWpLlEtV61Tva36Ro2iZqbGVZuplqdWonZM7abaa3WKurk6T12gvli9XP2E+n31fg2GhoNGuEaWxmqNAxpXNXo0SZrmmgGaIs18zd2aFzQ7GTiGCYPHEDJWMPYwLjG6mESmBZPPTGcWMQ8xW5h9WppazlqxWvO0yrVOa7WzcCxzFp+VyVrHOspqY30dpz+OO048btW46nG3x33SHq/tqy3WLtSu0b6n/VWHrROgk6GzQade56kuXtdad6ruXN0dupd0X49njvccLxxfOP7o+Ed6qJ61XqTeAr3dejf0+vUN9IP0Zfpb9S/ovzZgGfgapBtsMjhj0GvIMPQ2lBhuMjxr+JKtxeayM9ml7IvsPiM9o2AjhdEuoxajAWML4xjj5cY1xk9NqCYckxSTTSZNJn2mhqaTTReaVpk+MqOYcczSzLaYNZt9MrcwjzNfaV5v3mOhbcG3yLOosnhiSbf0sZxjWWF514poxbHKsNpudcsatXaxTrMut75pg9q42khsttu0TiBMcJ8gnVAx4b4tzZZrm2tbZdthx7ILs1tuV2/3ZqLpxMSJGyY2T/xu72Kfab/H/rGDpkOIw3KHRod3jtaOQsdyx7tOdKdApyVODU5vnW2cxc47nB+4MFwmu6x0aXL509XNVe5a7drrZuqW5LbN7T6HyYngrOZccSe4+7kvcT/l/sXD1SPH46jHH562nhmeBzx7JllMEk/aM6nTy9hL4LXLq92b7Z3k/ZN3u4+Rj8Cnwue5r4mvyHevbzfXipvOPch942fvJ/er9fvE8+At4p3zx/kH+Rf6twRoBsQElAU8CzQOTA2sCuwLcglaEHQumBAcGrwh+D5fny/kV/L7QtxCFoVcDKWFRoWWhT4Psw6ThzVORieHTN44+ckUsynSKfXhEM4P3xj+NMIiYk7EyanEqRFTy6e+iHSIXBjZHMWImhV1IOpjtF/0uujHMZYxipimWLXY6bGVsZ/i/OOK49rjJ8Yvir+eoJsgSWhIJCXGJu5N7J8WMG3ztK7pLtMLprfNsJgxb8bVmbozM2eenqU2SzDrWBIhKS7pQNI3QbigQtCfzE/eltwn5Am3CF+JfEWbRL1iL3GxuDvFK6U4pSfVK3Vjam+aT1pJ2msJT1ImeZsenL4z/VNGeMa+jMHMuMyaLHJWUtYJqaY0Q3pxtsHsebNbZTayAln7HI85m+f0yUPle7OR7BnZDTlMbDi6obBU/KDoyPXOLc/9PDd27rF5GvOk827Mt56/an53XmDezwvwC4QLmhYaLVy2sGMRd9Guxcji5MVNS0yW5C/pWhq0dP8y6rKMZb8st19evPzDirgVjfn6+UvzO38I+qGqQLVAXnB/pefKnT/if5T82LLKadXWVd8LRYXXiuyLSoq+rRauvrbGYU3pmsG1KWtb1rmu27GeuF66vm2Dz4b9xRrFecWdGydvrNvE3lS46cPmWZuvljiX7NxC3aLY0l4aVtqw1XTr+q3fytLK7pX7ldds09u2atun7aLtt3f47qjeqb+zaOfXnyQ/PdgVtKuuwryiZDdxd+7uF3ti9zT/zPm5cq/u3qK9f+6T7mvfH7n/YqVbZeUBvQPrqtAqRVXvwekHbx3yP9RQbVu9q4ZVU3QYDisOvzySdKTtaOjRpmOcY9XHzY5vq2XUFtYhdfPr+urT6tsbEhpaT4ScaGr0bKw9aXdy3ymjU+WntU6vO0M9k39m8Gze2f5zsnOvz6ee72ya1fT4QvyFuxenXmy5FHrpyuXAyxeauc1nr3hdOXXV4+qJa5xr9dddr9fdcLlR+4vLL7Utri11N91uNtzyv9XYOqn1zG2f2+fv+N+5fJd/9/q9Kfda22LaHtyffr/9gehBz8PMh28f5T4aeLz0CeFJ4VP1pyXP9J5V/Gr1a027a/vpDv+OG8+jnj/uFHa++i37t29d+S/oL0q6Dbsrexx7TvUG9t56Oe1l1yvZq4HXBb9r/L7tjeWb43/4/nGjL76v66387eC71e913u/74PyhqT+i/9nHrI8Dnwo/63ze/4Xzpflr3NfugbnfSN9K/7T6s/F76Pcng1mDgzKBXDA8CuAwRVNSAN7tA6AnADCwGYI6bWSmHhZk5D9gmOA/8cjcPSyuANWYGRqNeOcADmNqvhRAzRdgaCyK9gXUyUmpo/Pv8Kw+JAbYv8K0HECi2x6tebQU/iEjc/xf+v6nBWXWv9l/AV0EC6JTIblRAAAAeGVYSWZNTQAqAAAACAAFARIAAwAAAAEAAQAAARoABQAAAAEAAABKARsABQAAAAEAAABSASgAAwAAAAEAAgAAh2kABAAAAAEAAABaAAAAAAAAAJAAAAABAAAAkAAAAAEAAqACAAQAAAABAAAAFKADAAQAAAABAAAAFAAAAAAXNii1AAAACXBIWXMAABYlAAAWJQFJUiTwAAAB82lUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNi4wLjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOllSZXNvbHV0aW9uPjE0NDwvdGlmZjpZUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6T3JpZW50YXRpb24+MTwvdGlmZjpPcmllbnRhdGlvbj4KICAgICAgICAgPHRpZmY6WFJlc29sdXRpb24+MTQ0PC90aWZmOlhSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KReh49gAAAjRJREFUOBGFlD2vMUEUx2clvoNCcW8hCqFAo1dKhEQpvsF9KrWEBh/ALbQ0KkInBI3SWyGPCCJEQliXgsTLefaca/bBWjvJzs6cOf/fnDkzOQJIjWm06/XKBEGgD8c6nU5VIWgBtQDPZPWtJE8O63a7LBgMMo/Hw0ql0jPjcY4RvmqXy4XMjUYDUwLtdhtmsxnYbDbI5/O0djqdFFKmsEiGZ9jP9gem0yn0ej2Yz+fg9XpfycimAD7DttstQTDKfr8Po9GIIg6Hw1Cr1RTgB+A72GAwgMPhQLBMJgNSXsFqtUI2myUo18pA6QJogefsPrLBX4QdCVatViklw+EQRFGEj88P2O12pEUGATmsXq+TaLPZ0AXgMRF2vMEqlQoJTSYTpNNpApvNZliv1/+BHDaZTAi2Wq1A3Ig0xmMej7+RcZjdbodUKkWAaDQK+GHjHPnImB88JrZIJAKFQgH2+z2BOczhcMiwRCIBgUAA+NN5BP6mj2DYff35gk6nA61WCzBn2JxO5wPM7/fLz4vD0E+OECfn8xl/0Gw2KbLxeAyLxQIsFgt8p75pDSO7h/HbpUWpewCike9WLpfB7XaDy+WCYrFI/slk8i0MnRRAUt46hPMI4vE4+Hw+ec7t9/44VgWigEeby+UgFArJWjUYOqhWG6x50rpcSfR6PVUfNOgEVRlTX0HhrZBKz4MZjUYWi8VoA+lc9H/VaRZYjBKrtXR8tlwumcFgeMWRbZpA9ORQWfVm8A/FsrLaxebd5wAAAABJRU5ErkJggg==";
package/dist/index.js ADDED
@@ -0,0 +1,18 @@
1
+ #!/usr/bin/env node
2
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
3
+ import { createServer } from "./everything.js";
4
+ async function main() {
5
+ const transport = new StdioServerTransport();
6
+ const { server, cleanup } = createServer();
7
+ await server.connect(transport);
8
+ // Cleanup on exit
9
+ process.on("SIGINT", async () => {
10
+ await cleanup();
11
+ await server.close();
12
+ process.exit(0);
13
+ });
14
+ }
15
+ main().catch((error) => {
16
+ console.error("Server error:", error);
17
+ process.exit(1);
18
+ });
package/dist/sse.js ADDED
@@ -0,0 +1,24 @@
1
+ import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js";
2
+ import express from "express";
3
+ import { createServer } from "./everything.js";
4
+ const app = express();
5
+ const { server, cleanup } = createServer();
6
+ let transport;
7
+ app.get("/sse", async (req, res) => {
8
+ console.log("Received connection");
9
+ transport = new SSEServerTransport("/message", res);
10
+ await server.connect(transport);
11
+ server.onclose = async () => {
12
+ await cleanup();
13
+ await server.close();
14
+ process.exit(0);
15
+ };
16
+ });
17
+ app.post("/message", async (req, res) => {
18
+ console.log("Received message");
19
+ await transport.handlePostMessage(req, res);
20
+ });
21
+ const PORT = process.env.PORT || 3001;
22
+ app.listen(PORT, () => {
23
+ console.log(`Server is running on port ${PORT}`);
24
+ });
package/package.json ADDED
@@ -0,0 +1,32 @@
1
+ {
2
+ "name": "@modelcontextprotocol/server-everything",
3
+ "version": "0.1.0",
4
+ "description": "MCP server that exercises all the features of the MCP protocol",
5
+ "license": "MIT",
6
+ "author": "Anthropic, PBC (https://anthropic.com)",
7
+ "homepage": "https://modelcontextprotocol.io",
8
+ "bugs": "https://github.com/modelcontextprotocol/servers/issues",
9
+ "type": "module",
10
+ "bin": {
11
+ "mcp-server-everything": "dist/index.js"
12
+ },
13
+ "files": [
14
+ "dist"
15
+ ],
16
+ "scripts": {
17
+ "build": "tsc && shx chmod +x dist/*.js",
18
+ "prepare": "npm run build",
19
+ "watch": "tsc --watch"
20
+ },
21
+ "dependencies": {
22
+ "@modelcontextprotocol/sdk": "0.5.0",
23
+ "express": "^4.21.1",
24
+ "zod": "^3.23.8",
25
+ "zod-to-json-schema": "^3.23.5"
26
+ },
27
+ "devDependencies": {
28
+ "@types/express": "^5.0.0",
29
+ "shx": "^0.3.4",
30
+ "typescript": "^5.6.2"
31
+ }
32
+ }