@smithery/sdk 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +674 -0
- package/README.md +126 -0
- package/dist/examples/shell.d.ts +1 -0
- package/dist/examples/shell.js +78 -0
- package/dist/examples/simple.d.ts +1 -0
- package/dist/examples/simple.js +60 -0
- package/dist/index.d.ts +147 -0
- package/dist/index.js +75 -0
- package/dist/integrations/llm/anthropic.d.ts +11 -0
- package/dist/integrations/llm/anthropic.js +49 -0
- package/dist/integrations/llm/openai.d.ts +12 -0
- package/dist/integrations/llm/openai.js +46 -0
- package/dist/types.d.ts +193 -0
- package/dist/types.js +41 -0
- package/package.json +39 -0
package/README.md
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
# Smithery Typescript Framework [](https://badge.fury.io/js/@smithery%2Fsdk)
|
|
2
|
+
|
|
3
|
+
Smithery is a Typescript framework that easily connects language models (LLMs) to [Model Context Protocols](https://modelcontextprotocol.io/) (MCPs), allowing you to build agents that use resources and tools without being overwhelmed by JSON schemas.
|
|
4
|
+
|
|
5
|
+
⚠️ _This repository is work in progress and in alpha. Not recommended for production use yet._ ⚠️
|
|
6
|
+
|
|
7
|
+
**Key Features**
|
|
8
|
+
|
|
9
|
+
- Connect to multiple MCPs with a single client
|
|
10
|
+
- Tool schema is handled automatically for the LLM
|
|
11
|
+
- Supports LLM reasoning through multiple tool calls
|
|
12
|
+
|
|
13
|
+
# Quickstart
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
npm install @smithery/sdk
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Usage
|
|
22
|
+
|
|
23
|
+
In this example, we'll connect use OpenAI client with Exa search capabilities.
|
|
24
|
+
|
|
25
|
+
```bash
|
|
26
|
+
npm install @smithery/mcp-exa
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
The following code sets up OpenAI and connects to an Exa MCP server. In this case, we're running the server locally within the same process, so it's just a simple passthrough.
|
|
30
|
+
|
|
31
|
+
```typescript
|
|
32
|
+
import { Connection } from "@smithery/sdk"
|
|
33
|
+
import { OpenAIHandler } from "@smithery/sdk/openai"
|
|
34
|
+
import * as exa from "@smithery/mcp-exa"
|
|
35
|
+
import { OpenAI } from "openai"
|
|
36
|
+
|
|
37
|
+
const openai = new OpenAI()
|
|
38
|
+
const connection = await Connection.connect({
|
|
39
|
+
exa: {
|
|
40
|
+
server: exa.createServer({
|
|
41
|
+
apiKey: process.env.EXA_API_KEY,
|
|
42
|
+
}),
|
|
43
|
+
},
|
|
44
|
+
})
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
Now you can make your LLM aware of the available tools from Exa.
|
|
48
|
+
|
|
49
|
+
```typescript
|
|
50
|
+
// Create a handler
|
|
51
|
+
const handler = new OpenAIHandler(connection)
|
|
52
|
+
const response = await openai.chat.completions.create({
|
|
53
|
+
model: "gpt-4o-mini",
|
|
54
|
+
messages: [{ role: "user", content: "In 2024, did OpenAI release GPT-5?" }],
|
|
55
|
+
// Pass the tools to OpenAI call
|
|
56
|
+
tools: await handler.listTools(),
|
|
57
|
+
})
|
|
58
|
+
// Obtain the tool outputs as new messages
|
|
59
|
+
const toolMessages = await handler.call(response)
|
|
60
|
+
```
|
|
61
|
+
|
|
62
|
+
Using this, you can easily enable your LLM to call tools and obtain the results.
|
|
63
|
+
|
|
64
|
+
However, it's often the case where your LLM needs to call a tool, see its response, and continue processing output of the tool in order to give you a final response.
|
|
65
|
+
|
|
66
|
+
In this case, you have to loop your LLM call and update your messages until there are no more toolMessages to continue.
|
|
67
|
+
|
|
68
|
+
Example:
|
|
69
|
+
|
|
70
|
+
```typescript
|
|
71
|
+
let messages: ChatCompletionMessageParam[] = [
|
|
72
|
+
{
|
|
73
|
+
role: "user",
|
|
74
|
+
content:
|
|
75
|
+
"Deduce Obama's age in number of days. It's November 28, 2024 today. Search to ensure correctness.",
|
|
76
|
+
},
|
|
77
|
+
]
|
|
78
|
+
const handler = new OpenAIHandler(connection)
|
|
79
|
+
|
|
80
|
+
while (!isDone) {
|
|
81
|
+
const response = await openai.chat.completions.create({
|
|
82
|
+
model: "gpt-4o-mini",
|
|
83
|
+
messages,
|
|
84
|
+
tools: await handler.listTools(),
|
|
85
|
+
})
|
|
86
|
+
// Handle tool calls
|
|
87
|
+
const toolMessages = await handler.call(response)
|
|
88
|
+
|
|
89
|
+
// Append new messages
|
|
90
|
+
messages.push(response.choices[0].message)
|
|
91
|
+
messages.push(...toolMessages)
|
|
92
|
+
isDone = toolMessages.length === 0
|
|
93
|
+
}
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
See a full example in the [examples](./src/examples) directory.
|
|
97
|
+
|
|
98
|
+
# Troubleshooting
|
|
99
|
+
|
|
100
|
+
```
|
|
101
|
+
Error: ReferenceError: EventSource is not defined
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
This event means you're trying to use EventSource API (which is typically used in the browser) from Node. You'll have to install the following to use it:
|
|
105
|
+
|
|
106
|
+
```bash
|
|
107
|
+
npm install eventsource
|
|
108
|
+
npm install -D @types/eventsource
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
Patch the global EventSource object:
|
|
112
|
+
|
|
113
|
+
```typescript
|
|
114
|
+
import EventSource from "eventsource"
|
|
115
|
+
global.EventSource = EventSource as any
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
# Contributing
|
|
119
|
+
Developing locally:
|
|
120
|
+
```sh
|
|
121
|
+
npm link -ws --include-workspace-root
|
|
122
|
+
```
|
|
123
|
+
Version bumping:
|
|
124
|
+
```sh
|
|
125
|
+
npm version patch -ws --include-workspace-root
|
|
126
|
+
```
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import * as shellMcp from "@unroute/mcp-shell";
|
|
2
|
+
import dotenv from "dotenv";
|
|
3
|
+
import EventSource from "eventsource";
|
|
4
|
+
import { OpenAI } from "openai";
|
|
5
|
+
import { Connection } from "../index.js";
|
|
6
|
+
import { OpenAIHandler } from "../integrations/llm/openai.js";
|
|
7
|
+
import url from "node:url";
|
|
8
|
+
import readline from "node:readline";
|
|
9
|
+
// Utility for human approval
|
|
10
|
+
async function getHumanApproval(command, args) {
|
|
11
|
+
const rl = readline.createInterface({
|
|
12
|
+
input: process.stdin,
|
|
13
|
+
output: process.stdout,
|
|
14
|
+
});
|
|
15
|
+
return new Promise((resolve) => {
|
|
16
|
+
rl.question(`Command: ${command} ${args.join(" ")}\nApprove? [y/N]: `, (answer) => {
|
|
17
|
+
rl.close();
|
|
18
|
+
resolve(answer.toLowerCase() === "y");
|
|
19
|
+
});
|
|
20
|
+
});
|
|
21
|
+
}
|
|
22
|
+
// Patch event source
|
|
23
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
24
|
+
global.EventSource = EventSource;
|
|
25
|
+
async function main() {
|
|
26
|
+
dotenv.config();
|
|
27
|
+
// Initialize the OpenAI client
|
|
28
|
+
const openai = new OpenAI();
|
|
29
|
+
// Connect to MCPs
|
|
30
|
+
const connection = await Connection.connect({
|
|
31
|
+
shell: shellMcp.createServer({
|
|
32
|
+
allowedCommands: ["ls", "pwd", "date", "echo"],
|
|
33
|
+
approvalHandler: getHumanApproval,
|
|
34
|
+
}),
|
|
35
|
+
});
|
|
36
|
+
// Example conversation with tool usage
|
|
37
|
+
let isDone = false;
|
|
38
|
+
const messages = [
|
|
39
|
+
{
|
|
40
|
+
role: "user",
|
|
41
|
+
content: "What's the date?",
|
|
42
|
+
},
|
|
43
|
+
];
|
|
44
|
+
const handler = new OpenAIHandler(connection);
|
|
45
|
+
while (!isDone) {
|
|
46
|
+
const response = await openai.chat.completions.create({
|
|
47
|
+
model: "gpt-4o",
|
|
48
|
+
messages,
|
|
49
|
+
tools: await handler.listTools(),
|
|
50
|
+
});
|
|
51
|
+
// Handle tool calls - will prompt for approval during execution
|
|
52
|
+
const toolMessages = await handler.call(response);
|
|
53
|
+
messages.push(response.choices[0].message);
|
|
54
|
+
messages.push(...toolMessages);
|
|
55
|
+
isDone = toolMessages.length === 0;
|
|
56
|
+
console.log("Processing messages:", messages.map((m) => ({
|
|
57
|
+
role: m.role,
|
|
58
|
+
content: m.content,
|
|
59
|
+
tools: "tool_calls" in m ? m.tool_calls?.length : 0,
|
|
60
|
+
})));
|
|
61
|
+
}
|
|
62
|
+
// Print the final conversation
|
|
63
|
+
console.log("\nFinal conversation:");
|
|
64
|
+
messages.forEach((msg) => {
|
|
65
|
+
console.log(`\n${msg.role.toUpperCase()}:`);
|
|
66
|
+
console.log(msg.content);
|
|
67
|
+
if (msg.role === "assistant" && msg.tool_calls) {
|
|
68
|
+
console.log("Tool calls:", JSON.stringify(msg.tool_calls, null, 2));
|
|
69
|
+
}
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
// Run the example
|
|
73
|
+
if (import.meta.url === url.pathToFileURL(process.argv[1]).href) {
|
|
74
|
+
main().catch((err) => {
|
|
75
|
+
console.error("Error:", err);
|
|
76
|
+
process.exit(1);
|
|
77
|
+
});
|
|
78
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import * as e2b from "@unroute/mcp-e2b";
|
|
2
|
+
import * as exa from "@unroute/mcp-exa";
|
|
3
|
+
import dotenv from "dotenv";
|
|
4
|
+
import EventSource from "eventsource";
|
|
5
|
+
import { OpenAI } from "openai";
|
|
6
|
+
import { Connection } from "../index.js";
|
|
7
|
+
import { OpenAIHandler } from "../integrations/llm/openai.js";
|
|
8
|
+
// Patch event source
|
|
9
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
10
|
+
global.EventSource = EventSource;
|
|
11
|
+
async function main() {
|
|
12
|
+
dotenv.config();
|
|
13
|
+
// Initialize the OpenAI client
|
|
14
|
+
const openai = new OpenAI();
|
|
15
|
+
// Connect to MCPs
|
|
16
|
+
const connection = await Connection.connect({
|
|
17
|
+
exa: {
|
|
18
|
+
server: exa.createServer(),
|
|
19
|
+
},
|
|
20
|
+
e2b: {
|
|
21
|
+
server: e2b.createServer(),
|
|
22
|
+
},
|
|
23
|
+
});
|
|
24
|
+
// Example conversation with tool usage
|
|
25
|
+
let isDone = false;
|
|
26
|
+
const messages = [
|
|
27
|
+
{
|
|
28
|
+
role: "user",
|
|
29
|
+
content: "Search about the latest news about syria and give me a summary",
|
|
30
|
+
},
|
|
31
|
+
];
|
|
32
|
+
const handler = new OpenAIHandler(connection);
|
|
33
|
+
while (!isDone) {
|
|
34
|
+
const response = await openai.chat.completions.create({
|
|
35
|
+
model: "gpt-4o-mini",
|
|
36
|
+
messages,
|
|
37
|
+
tools: await handler.listTools(),
|
|
38
|
+
});
|
|
39
|
+
// Handle tool calls
|
|
40
|
+
const toolMessages = await handler.call(response);
|
|
41
|
+
messages.push(response.choices[0].message);
|
|
42
|
+
messages.push(...toolMessages);
|
|
43
|
+
isDone = toolMessages.length === 0;
|
|
44
|
+
console.log("messages", messages);
|
|
45
|
+
}
|
|
46
|
+
// Print the final conversation
|
|
47
|
+
console.log("\nFinal conversation:");
|
|
48
|
+
messages.forEach((msg) => {
|
|
49
|
+
console.log(`\n${msg.role.toUpperCase()}:`);
|
|
50
|
+
console.log(msg.content);
|
|
51
|
+
if (msg.role === "assistant" && msg.tool_calls) {
|
|
52
|
+
console.log(msg.tool_calls);
|
|
53
|
+
}
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
// Run the example
|
|
57
|
+
main().catch((err) => {
|
|
58
|
+
console.error("Error:", err);
|
|
59
|
+
process.exit(1);
|
|
60
|
+
});
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
2
|
+
import type { RequestOptions } from "@modelcontextprotocol/sdk/shared/protocol.js";
|
|
3
|
+
import { type MCPConfig, type Tools } from "./types.js";
|
|
4
|
+
export { AnthropicHandler } from "./integrations/llm/anthropic.js";
|
|
5
|
+
export { OpenAIHandler } from "./integrations/llm/openai.js";
|
|
6
|
+
export type { MCPConfig, Tools } from "./types.js";
|
|
7
|
+
export declare class Connection {
|
|
8
|
+
mcps: Map<string, Client>;
|
|
9
|
+
static connect(config: MCPConfig): Promise<Connection>;
|
|
10
|
+
private toolsCache;
|
|
11
|
+
listTools(): Promise<Tools>;
|
|
12
|
+
callTools(calls: {
|
|
13
|
+
mcp: string;
|
|
14
|
+
name: string;
|
|
15
|
+
arguments: any;
|
|
16
|
+
}[], options?: RequestOptions): Promise<(import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
17
|
+
_meta: import("zod").ZodOptional<import("zod").ZodObject<{}, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<{}, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<{}, import("zod").ZodTypeAny, "passthrough">>>;
|
|
18
|
+
}, {
|
|
19
|
+
content: import("zod").ZodArray<import("zod").ZodUnion<[import("zod").ZodObject<{
|
|
20
|
+
type: import("zod").ZodLiteral<"text">;
|
|
21
|
+
text: import("zod").ZodString;
|
|
22
|
+
}, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<{
|
|
23
|
+
type: import("zod").ZodLiteral<"text">;
|
|
24
|
+
text: import("zod").ZodString;
|
|
25
|
+
}, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<{
|
|
26
|
+
type: import("zod").ZodLiteral<"text">;
|
|
27
|
+
text: import("zod").ZodString;
|
|
28
|
+
}, import("zod").ZodTypeAny, "passthrough">>, import("zod").ZodObject<{
|
|
29
|
+
type: import("zod").ZodLiteral<"image">;
|
|
30
|
+
data: import("zod").ZodString;
|
|
31
|
+
mimeType: import("zod").ZodString;
|
|
32
|
+
}, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<{
|
|
33
|
+
type: import("zod").ZodLiteral<"image">;
|
|
34
|
+
data: import("zod").ZodString;
|
|
35
|
+
mimeType: import("zod").ZodString;
|
|
36
|
+
}, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<{
|
|
37
|
+
type: import("zod").ZodLiteral<"image">;
|
|
38
|
+
data: import("zod").ZodString;
|
|
39
|
+
mimeType: import("zod").ZodString;
|
|
40
|
+
}, import("zod").ZodTypeAny, "passthrough">>, import("zod").ZodObject<{
|
|
41
|
+
type: import("zod").ZodLiteral<"resource">;
|
|
42
|
+
resource: import("zod").ZodUnion<[import("zod").ZodObject<import("zod").objectUtil.extendShape<{
|
|
43
|
+
uri: import("zod").ZodString;
|
|
44
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
45
|
+
}, {
|
|
46
|
+
text: import("zod").ZodString;
|
|
47
|
+
}>, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
48
|
+
uri: import("zod").ZodString;
|
|
49
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
50
|
+
}, {
|
|
51
|
+
text: import("zod").ZodString;
|
|
52
|
+
}>, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<import("zod").objectUtil.extendShape<{
|
|
53
|
+
uri: import("zod").ZodString;
|
|
54
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
55
|
+
}, {
|
|
56
|
+
text: import("zod").ZodString;
|
|
57
|
+
}>, import("zod").ZodTypeAny, "passthrough">>, import("zod").ZodObject<import("zod").objectUtil.extendShape<{
|
|
58
|
+
uri: import("zod").ZodString;
|
|
59
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
60
|
+
}, {
|
|
61
|
+
blob: import("zod").ZodString;
|
|
62
|
+
}>, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
63
|
+
uri: import("zod").ZodString;
|
|
64
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
65
|
+
}, {
|
|
66
|
+
blob: import("zod").ZodString;
|
|
67
|
+
}>, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<import("zod").objectUtil.extendShape<{
|
|
68
|
+
uri: import("zod").ZodString;
|
|
69
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
70
|
+
}, {
|
|
71
|
+
blob: import("zod").ZodString;
|
|
72
|
+
}>, import("zod").ZodTypeAny, "passthrough">>]>;
|
|
73
|
+
}, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<{
|
|
74
|
+
type: import("zod").ZodLiteral<"resource">;
|
|
75
|
+
resource: import("zod").ZodUnion<[import("zod").ZodObject<import("zod").objectUtil.extendShape<{
|
|
76
|
+
uri: import("zod").ZodString;
|
|
77
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
78
|
+
}, {
|
|
79
|
+
text: import("zod").ZodString;
|
|
80
|
+
}>, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
81
|
+
uri: import("zod").ZodString;
|
|
82
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
83
|
+
}, {
|
|
84
|
+
text: import("zod").ZodString;
|
|
85
|
+
}>, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<import("zod").objectUtil.extendShape<{
|
|
86
|
+
uri: import("zod").ZodString;
|
|
87
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
88
|
+
}, {
|
|
89
|
+
text: import("zod").ZodString;
|
|
90
|
+
}>, import("zod").ZodTypeAny, "passthrough">>, import("zod").ZodObject<import("zod").objectUtil.extendShape<{
|
|
91
|
+
uri: import("zod").ZodString;
|
|
92
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
93
|
+
}, {
|
|
94
|
+
blob: import("zod").ZodString;
|
|
95
|
+
}>, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
96
|
+
uri: import("zod").ZodString;
|
|
97
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
98
|
+
}, {
|
|
99
|
+
blob: import("zod").ZodString;
|
|
100
|
+
}>, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<import("zod").objectUtil.extendShape<{
|
|
101
|
+
uri: import("zod").ZodString;
|
|
102
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
103
|
+
}, {
|
|
104
|
+
blob: import("zod").ZodString;
|
|
105
|
+
}>, import("zod").ZodTypeAny, "passthrough">>]>;
|
|
106
|
+
}, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<{
|
|
107
|
+
type: import("zod").ZodLiteral<"resource">;
|
|
108
|
+
resource: import("zod").ZodUnion<[import("zod").ZodObject<import("zod").objectUtil.extendShape<{
|
|
109
|
+
uri: import("zod").ZodString;
|
|
110
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
111
|
+
}, {
|
|
112
|
+
text: import("zod").ZodString;
|
|
113
|
+
}>, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
114
|
+
uri: import("zod").ZodString;
|
|
115
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
116
|
+
}, {
|
|
117
|
+
text: import("zod").ZodString;
|
|
118
|
+
}>, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<import("zod").objectUtil.extendShape<{
|
|
119
|
+
uri: import("zod").ZodString;
|
|
120
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
121
|
+
}, {
|
|
122
|
+
text: import("zod").ZodString;
|
|
123
|
+
}>, import("zod").ZodTypeAny, "passthrough">>, import("zod").ZodObject<import("zod").objectUtil.extendShape<{
|
|
124
|
+
uri: import("zod").ZodString;
|
|
125
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
126
|
+
}, {
|
|
127
|
+
blob: import("zod").ZodString;
|
|
128
|
+
}>, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
129
|
+
uri: import("zod").ZodString;
|
|
130
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
131
|
+
}, {
|
|
132
|
+
blob: import("zod").ZodString;
|
|
133
|
+
}>, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<import("zod").objectUtil.extendShape<{
|
|
134
|
+
uri: import("zod").ZodString;
|
|
135
|
+
mimeType: import("zod").ZodOptional<import("zod").ZodString>;
|
|
136
|
+
}, {
|
|
137
|
+
blob: import("zod").ZodString;
|
|
138
|
+
}>, import("zod").ZodTypeAny, "passthrough">>]>;
|
|
139
|
+
}, import("zod").ZodTypeAny, "passthrough">>]>, "many">;
|
|
140
|
+
isError: import("zod").ZodOptional<import("zod").ZodDefault<import("zod").ZodBoolean>>;
|
|
141
|
+
}>, import("zod").ZodTypeAny, "passthrough"> | import("zod").objectOutputType<import("zod").objectUtil.extendShape<{
|
|
142
|
+
_meta: import("zod").ZodOptional<import("zod").ZodObject<{}, "passthrough", import("zod").ZodTypeAny, import("zod").objectOutputType<{}, import("zod").ZodTypeAny, "passthrough">, import("zod").objectInputType<{}, import("zod").ZodTypeAny, "passthrough">>>;
|
|
143
|
+
}, {
|
|
144
|
+
toolResult: import("zod").ZodUnknown;
|
|
145
|
+
}>, import("zod").ZodTypeAny, "passthrough">)[]>;
|
|
146
|
+
close(): Promise<void>;
|
|
147
|
+
}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
2
|
+
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
|
|
3
|
+
import { InMemoryTransport } from "@modelcontextprotocol/sdk/inMemory.js";
|
|
4
|
+
import { CallToolResultSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
5
|
+
import { v4 as uuidv4 } from "uuid";
|
|
6
|
+
import { isServerConfig, isURIConfig, isWrappedServerConfig, isStdioConfig, } from "./types.js";
|
|
7
|
+
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
|
8
|
+
export { AnthropicHandler } from "./integrations/llm/anthropic.js";
|
|
9
|
+
export { OpenAIHandler } from "./integrations/llm/openai.js";
|
|
10
|
+
export class Connection {
|
|
11
|
+
constructor() {
|
|
12
|
+
this.mcps = new Map();
|
|
13
|
+
// TODO: Invalidate cache on tool change
|
|
14
|
+
this.toolsCache = null;
|
|
15
|
+
}
|
|
16
|
+
static async connect(config) {
|
|
17
|
+
const connection = new Connection();
|
|
18
|
+
await Promise.all(Object.entries(config).map(async ([mcpName, mcpConfig]) => {
|
|
19
|
+
const mcp = new Client({
|
|
20
|
+
name: uuidv4(),
|
|
21
|
+
version: "1.0.0",
|
|
22
|
+
}, {
|
|
23
|
+
capabilities: {},
|
|
24
|
+
});
|
|
25
|
+
if (isURIConfig(mcpConfig)) {
|
|
26
|
+
// For URI configs, connect using SSE (Server-Sent Events) transport
|
|
27
|
+
await mcp.connect(new SSEClientTransport(new URL(mcpConfig.url)));
|
|
28
|
+
}
|
|
29
|
+
else if (isStdioConfig(mcpConfig)) {
|
|
30
|
+
// For pipe configs, spawn the server locally, then connect using using Stdio transport
|
|
31
|
+
await mcp.connect(new StdioClientTransport(mcpConfig.stdio));
|
|
32
|
+
}
|
|
33
|
+
else if (isServerConfig(mcpConfig) ||
|
|
34
|
+
isWrappedServerConfig(mcpConfig)) {
|
|
35
|
+
// Handle both direct Server instances and wrapped {server: Server} configs
|
|
36
|
+
const server = isWrappedServerConfig(mcpConfig)
|
|
37
|
+
? mcpConfig.server
|
|
38
|
+
: mcpConfig;
|
|
39
|
+
// Create paired transports for in-memory communication between client and server
|
|
40
|
+
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair();
|
|
41
|
+
await server.connect(serverTransport);
|
|
42
|
+
await mcp.connect(clientTransport);
|
|
43
|
+
}
|
|
44
|
+
connection.mcps.set(mcpName, mcp);
|
|
45
|
+
}));
|
|
46
|
+
return connection;
|
|
47
|
+
}
|
|
48
|
+
async listTools() {
|
|
49
|
+
if (this.toolsCache === null) {
|
|
50
|
+
this.toolsCache = (await Promise.all(Array.from(this.mcps.entries()).map(async ([name, mcp]) => {
|
|
51
|
+
const capabilities = mcp.getServerCapabilities();
|
|
52
|
+
if (!capabilities?.tools)
|
|
53
|
+
return [];
|
|
54
|
+
const response = await mcp.listTools();
|
|
55
|
+
return { [name]: response.tools };
|
|
56
|
+
}))).reduce((acc, curr) => Object.assign(acc, curr), {});
|
|
57
|
+
}
|
|
58
|
+
return this.toolsCache;
|
|
59
|
+
}
|
|
60
|
+
async callTools(calls, options) {
|
|
61
|
+
return await Promise.all(calls.map(async (call) => {
|
|
62
|
+
const mcp = this.mcps.get(call.mcp);
|
|
63
|
+
if (!mcp) {
|
|
64
|
+
throw new Error(`MCP tool ${call.mcp} not found`);
|
|
65
|
+
}
|
|
66
|
+
return mcp.callTool({
|
|
67
|
+
name: call.name,
|
|
68
|
+
arguments: call.arguments,
|
|
69
|
+
}, CallToolResultSchema, options);
|
|
70
|
+
}));
|
|
71
|
+
}
|
|
72
|
+
async close() {
|
|
73
|
+
await Promise.all(Array.from(this.mcps.values()).map((mcp) => mcp.close()));
|
|
74
|
+
}
|
|
75
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { Message, MessageParam, Tool } from "@anthropic-ai/sdk/resources/index.js";
|
|
2
|
+
import type { RequestOptions } from "@modelcontextprotocol/sdk/shared/protocol.js";
|
|
3
|
+
import type { Connection } from "../../index.js";
|
|
4
|
+
import type { Tools } from "../../types.js";
|
|
5
|
+
export declare class AnthropicHandler {
|
|
6
|
+
private connection;
|
|
7
|
+
constructor(connection: Connection);
|
|
8
|
+
listTools(): Promise<Tool[]>;
|
|
9
|
+
format(tools: Tools): Tool[];
|
|
10
|
+
call(response: Message, options?: RequestOptions): Promise<MessageParam[]>;
|
|
11
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
export class AnthropicHandler {
|
|
2
|
+
constructor(connection) {
|
|
3
|
+
this.connection = connection;
|
|
4
|
+
}
|
|
5
|
+
async listTools() {
|
|
6
|
+
return this.format(await this.connection.listTools());
|
|
7
|
+
}
|
|
8
|
+
format(tools) {
|
|
9
|
+
return Object.entries(tools).flatMap(([mcpName, tools]) => tools.map((tool) => ({
|
|
10
|
+
name: `${mcpName}_${tool.name}`,
|
|
11
|
+
input_schema: { ...tool.inputSchema, type: "object" },
|
|
12
|
+
description: tool.description,
|
|
13
|
+
})));
|
|
14
|
+
}
|
|
15
|
+
// TODO: Support streaming
|
|
16
|
+
async call(response, options) {
|
|
17
|
+
const content = response.content;
|
|
18
|
+
if (!content || content.length === 0) {
|
|
19
|
+
return [];
|
|
20
|
+
}
|
|
21
|
+
// Find tool calls in the message content
|
|
22
|
+
const toolCalls = content.filter((part) => part.type === "tool_use");
|
|
23
|
+
if (toolCalls.length === 0) {
|
|
24
|
+
return [];
|
|
25
|
+
}
|
|
26
|
+
const results = await this.connection.callTools(toolCalls.map((toolCall) => {
|
|
27
|
+
const splitPoint = toolCall.name.indexOf("_");
|
|
28
|
+
const mcp = toolCall.name.slice(0, splitPoint);
|
|
29
|
+
const name = toolCall.name.slice(splitPoint + 1);
|
|
30
|
+
return {
|
|
31
|
+
mcp,
|
|
32
|
+
name,
|
|
33
|
+
arguments: toolCall.input,
|
|
34
|
+
};
|
|
35
|
+
}), options);
|
|
36
|
+
return [
|
|
37
|
+
{
|
|
38
|
+
role: "user",
|
|
39
|
+
content: results.map((result, index) => ({
|
|
40
|
+
tool_use_id: toolCalls[index].id,
|
|
41
|
+
type: "tool_result",
|
|
42
|
+
// TODO: Find a way to remove the any
|
|
43
|
+
content: result.content.filter((part) => part.type === "text"),
|
|
44
|
+
is_error: Boolean(result.isError),
|
|
45
|
+
})),
|
|
46
|
+
},
|
|
47
|
+
];
|
|
48
|
+
}
|
|
49
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { OpenAI } from "openai";
|
|
2
|
+
import type { ChatCompletionTool, ChatCompletionToolMessageParam } from "openai/resources/index.js";
|
|
3
|
+
import type { RequestOptions } from "@modelcontextprotocol/sdk/shared/protocol.js";
|
|
4
|
+
import type { Connection } from "../../index.js";
|
|
5
|
+
import type { Tools } from "../../types.js";
|
|
6
|
+
export declare class OpenAIHandler {
|
|
7
|
+
private connection;
|
|
8
|
+
constructor(connection: Connection);
|
|
9
|
+
listTools(strict?: boolean): Promise<ChatCompletionTool[]>;
|
|
10
|
+
format(tools: Tools, strict?: boolean): ChatCompletionTool[];
|
|
11
|
+
call(response: OpenAI.Chat.Completions.ChatCompletion, options?: RequestOptions): Promise<ChatCompletionToolMessageParam[]>;
|
|
12
|
+
}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
export class OpenAIHandler {
|
|
2
|
+
constructor(connection) {
|
|
3
|
+
this.connection = connection;
|
|
4
|
+
}
|
|
5
|
+
async listTools(strict = false) {
|
|
6
|
+
return this.format(await this.connection.listTools(), strict);
|
|
7
|
+
}
|
|
8
|
+
format(tools, strict = false) {
|
|
9
|
+
return Object.entries(tools).flatMap(([mcpName, tools]) => tools.map((tool) => ({
|
|
10
|
+
type: "function",
|
|
11
|
+
function: {
|
|
12
|
+
name: `${mcpName}_${tool.name}`,
|
|
13
|
+
description: tool.description,
|
|
14
|
+
parameters: tool.inputSchema,
|
|
15
|
+
strict,
|
|
16
|
+
},
|
|
17
|
+
})));
|
|
18
|
+
}
|
|
19
|
+
// TODO: Support streaming
|
|
20
|
+
async call(response, options) {
|
|
21
|
+
const choice = response.choices[0];
|
|
22
|
+
// TODO: Support `n`
|
|
23
|
+
if (!choice) {
|
|
24
|
+
return [];
|
|
25
|
+
}
|
|
26
|
+
const toolCalls = choice.message?.tool_calls;
|
|
27
|
+
if (!toolCalls) {
|
|
28
|
+
return [];
|
|
29
|
+
}
|
|
30
|
+
const results = await this.connection.callTools(toolCalls.map((toolCall) => {
|
|
31
|
+
const splitPoint = toolCall.function.name.indexOf("_");
|
|
32
|
+
const mcp = toolCall.function.name.slice(0, splitPoint);
|
|
33
|
+
const name = toolCall.function.name.slice(splitPoint + 1);
|
|
34
|
+
return {
|
|
35
|
+
mcp,
|
|
36
|
+
name,
|
|
37
|
+
arguments: JSON.parse(toolCall.function.arguments),
|
|
38
|
+
};
|
|
39
|
+
}), options);
|
|
40
|
+
return results.map((result, index) => ({
|
|
41
|
+
role: "tool",
|
|
42
|
+
content: result.content,
|
|
43
|
+
tool_call_id: toolCalls[index].id,
|
|
44
|
+
}));
|
|
45
|
+
}
|
|
46
|
+
}
|