mcp-use 0.1.20 ā 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-2HFIPY7C.js +429 -0
- package/dist/chunk-4DEFXVWT.js +680 -0
- package/dist/chunk-JXLQRAW2.js +532 -0
- package/dist/chunk-SHUYVCID.js +6 -0
- package/dist/chunk-YUSC6R6V.js +299 -0
- package/dist/index.cjs +5762 -0
- package/dist/index.d.ts +7 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +3767 -22
- package/dist/langfuse-YA2S23SM.js +13 -0
- package/dist/src/agents/remote.d.ts.map +1 -1
- package/dist/src/agents/utils/ai_sdk.d.ts.map +1 -1
- package/dist/src/auth/browser-provider.d.ts +52 -0
- package/dist/src/auth/browser-provider.d.ts.map +1 -0
- package/dist/src/auth/callback.d.ts +6 -0
- package/dist/src/auth/callback.d.ts.map +1 -0
- package/dist/src/auth/index.d.ts +7 -0
- package/dist/src/auth/index.d.ts.map +1 -0
- package/dist/src/auth/types.d.ts +18 -0
- package/dist/src/auth/types.d.ts.map +1 -0
- package/dist/src/browser.cjs +323 -0
- package/dist/src/browser.d.ts +8 -0
- package/dist/src/browser.d.ts.map +1 -0
- package/dist/src/browser.js +9 -0
- package/dist/src/client/base.d.ts +32 -0
- package/dist/src/client/base.d.ts.map +1 -0
- package/dist/src/client.d.ts +19 -16
- package/dist/src/client.d.ts.map +1 -1
- package/dist/src/logging.d.ts +1 -1
- package/dist/src/logging.d.ts.map +1 -1
- package/dist/src/oauth-helper.d.ts +125 -0
- package/dist/src/oauth-helper.d.ts.map +1 -0
- package/dist/src/react/index.cjs +986 -0
- package/dist/src/react/index.d.ts +9 -0
- package/dist/src/react/index.d.ts.map +1 -0
- package/dist/src/react/index.js +11 -0
- package/dist/src/react/types.d.ts +139 -0
- package/dist/src/react/types.d.ts.map +1 -0
- package/dist/src/react/useMcp.d.ts +3 -0
- package/dist/src/react/useMcp.d.ts.map +1 -0
- package/dist/src/server/index.cjs +566 -0
- package/dist/src/server/index.d.ts +3 -0
- package/dist/src/server/index.d.ts.map +1 -0
- package/dist/src/server/index.js +9 -0
- package/dist/src/server/logging.d.ts +16 -0
- package/dist/src/server/logging.d.ts.map +1 -0
- package/dist/src/server/mcp-server.d.ts +282 -0
- package/dist/src/server/mcp-server.d.ts.map +1 -0
- package/dist/src/server/types.d.ts +47 -0
- package/dist/src/server/types.d.ts.map +1 -0
- package/dist/src/utils/assert.d.ts +8 -0
- package/dist/src/utils/assert.d.ts.map +1 -0
- package/dist/tsconfig.tsbuildinfo +1 -0
- package/package.json +72 -40
- package/dist/examples/add_server_tool.d.ts +0 -8
- package/dist/examples/add_server_tool.d.ts.map +0 -1
- package/dist/examples/add_server_tool.js +0 -79
- package/dist/examples/ai_sdk_example.d.ts +0 -23
- package/dist/examples/ai_sdk_example.d.ts.map +0 -1
- package/dist/examples/ai_sdk_example.js +0 -213
- package/dist/examples/airbnb_use.d.ts +0 -10
- package/dist/examples/airbnb_use.d.ts.map +0 -1
- package/dist/examples/airbnb_use.js +0 -43
- package/dist/examples/blender_use.d.ts +0 -15
- package/dist/examples/blender_use.d.ts.map +0 -1
- package/dist/examples/blender_use.js +0 -39
- package/dist/examples/browser_use.d.ts +0 -10
- package/dist/examples/browser_use.d.ts.map +0 -1
- package/dist/examples/browser_use.js +0 -46
- package/dist/examples/chat_example.d.ts +0 -10
- package/dist/examples/chat_example.d.ts.map +0 -1
- package/dist/examples/chat_example.js +0 -86
- package/dist/examples/filesystem_use.d.ts +0 -11
- package/dist/examples/filesystem_use.d.ts.map +0 -1
- package/dist/examples/filesystem_use.js +0 -43
- package/dist/examples/http_example.d.ts +0 -18
- package/dist/examples/http_example.d.ts.map +0 -1
- package/dist/examples/http_example.js +0 -37
- package/dist/examples/mcp_everything.d.ts +0 -6
- package/dist/examples/mcp_everything.d.ts.map +0 -1
- package/dist/examples/mcp_everything.js +0 -25
- package/dist/examples/multi_server_example.d.ts +0 -10
- package/dist/examples/multi_server_example.d.ts.map +0 -1
- package/dist/examples/multi_server_example.js +0 -51
- package/dist/examples/observability.d.ts +0 -6
- package/dist/examples/observability.d.ts.map +0 -1
- package/dist/examples/observability.js +0 -50
- package/dist/examples/stream_example.d.ts +0 -12
- package/dist/examples/stream_example.d.ts.map +0 -1
- package/dist/examples/stream_example.js +0 -198
- package/dist/examples/structured_output.d.ts +0 -9
- package/dist/examples/structured_output.d.ts.map +0 -1
- package/dist/examples/structured_output.js +0 -95
- package/dist/src/adapters/base.js +0 -124
- package/dist/src/adapters/index.js +0 -2
- package/dist/src/adapters/langchain_adapter.js +0 -49
- package/dist/src/agents/base.js +0 -9
- package/dist/src/agents/index.js +0 -3
- package/dist/src/agents/mcp_agent.js +0 -1002
- package/dist/src/agents/prompts/system_prompt_builder.js +0 -40
- package/dist/src/agents/prompts/templates.js +0 -39
- package/dist/src/agents/remote.js +0 -264
- package/dist/src/agents/utils/ai_sdk.js +0 -62
- package/dist/src/agents/utils/index.js +0 -1
- package/dist/src/client.js +0 -133
- package/dist/src/config.js +0 -34
- package/dist/src/connectors/base.js +0 -143
- package/dist/src/connectors/http.js +0 -150
- package/dist/src/connectors/index.js +0 -4
- package/dist/src/connectors/stdio.js +0 -68
- package/dist/src/connectors/websocket.js +0 -157
- package/dist/src/logging.js +0 -217
- package/dist/src/managers/index.js +0 -2
- package/dist/src/managers/server_manager.js +0 -106
- package/dist/src/managers/tools/acquire_active_mcp_server.js +0 -17
- package/dist/src/managers/tools/add_server_from_config.js +0 -40
- package/dist/src/managers/tools/base.js +0 -17
- package/dist/src/managers/tools/connect_mcp_server.js +0 -46
- package/dist/src/managers/tools/index.js +0 -5
- package/dist/src/managers/tools/list_mcp_servers.js +0 -33
- package/dist/src/managers/tools/release_mcp_server_connection.js +0 -19
- package/dist/src/observability/index.js +0 -12
- package/dist/src/observability/langfuse.js +0 -211
- package/dist/src/observability/manager.js +0 -199
- package/dist/src/observability/types.js +0 -4
- package/dist/src/session.js +0 -23
- package/dist/src/task_managers/base.js +0 -127
- package/dist/src/task_managers/index.js +0 -5
- package/dist/src/task_managers/sse.js +0 -43
- package/dist/src/task_managers/stdio.js +0 -51
- package/dist/src/task_managers/streamable_http.js +0 -50
- package/dist/src/task_managers/websocket.js +0 -67
- package/dist/src/telemetry/events.js +0 -44
- package/dist/src/telemetry/index.js +0 -8
- package/dist/src/telemetry/telemetry.js +0 -324
- package/dist/src/telemetry/utils.js +0 -39
- package/dist/tests/ai_sdk_compatibility.test.js +0 -214
- package/dist/tests/stream_events.test.js +0 -307
- package/dist/tests/stream_events_simple.test.js +0 -179
- package/dist/vitest.config.js +0 -21
@@ -1,46 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Basic usage example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect any LLM to MCP tools through a unified interface.
|
6
|
-
*
|
7
|
-
* Special thanks to https://github.com/microsoft/playwright-mcp for the server.
|
8
|
-
*/
|
9
|
-
import path from 'node:path';
|
10
|
-
import { fileURLToPath } from 'node:url';
|
11
|
-
import { ChatOpenAI } from '@langchain/openai';
|
12
|
-
import { config } from 'dotenv';
|
13
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
14
|
-
// Load environment variables from .env file
|
15
|
-
config();
|
16
|
-
const __filename = fileURLToPath(import.meta.url);
|
17
|
-
const __dirname = path.dirname(__filename);
|
18
|
-
async function main() {
|
19
|
-
const config = {
|
20
|
-
mcpServers: {
|
21
|
-
playwright: {
|
22
|
-
command: 'npx',
|
23
|
-
args: ['@playwright/mcp@latest'],
|
24
|
-
env: {
|
25
|
-
DISPLAY: ':1',
|
26
|
-
},
|
27
|
-
},
|
28
|
-
},
|
29
|
-
};
|
30
|
-
// Create MCPClient from config file
|
31
|
-
const client = new MCPClient(config);
|
32
|
-
// Create LLM
|
33
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o' });
|
34
|
-
// const llm = init_chat_model({ model: "llama-3.1-8b-instant", model_provider: "groq" })
|
35
|
-
// const llm = new ChatAnthropic({ model: "claude-3-" })
|
36
|
-
// const llm = new ChatGroq({ model: "llama3-8b-8192" })
|
37
|
-
// Create agent with the client
|
38
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
39
|
-
// Run the query
|
40
|
-
const result = await agent.run(`Navigate to https://github.com/mcp-use/mcp-use, give a star to the project and write
|
41
|
-
a summary of the project.`, 30);
|
42
|
-
console.error(`\nResult: ${result}`);
|
43
|
-
}
|
44
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
45
|
-
main().catch(console.error);
|
46
|
-
}
|
@@ -1,10 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Simple chat example using MCPAgent with built-in conversation memory.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the MCPAgent with its built-in
|
5
|
-
* conversation history capabilities for better contextual interactions.
|
6
|
-
*
|
7
|
-
* Special thanks to https://github.com/microsoft/playwright-mcp for the server.
|
8
|
-
*/
|
9
|
-
export {};
|
10
|
-
//# sourceMappingURL=chat_example.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"chat_example.d.ts","sourceRoot":"","sources":["../../examples/chat_example.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG"}
|
@@ -1,86 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Simple chat example using MCPAgent with built-in conversation memory.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the MCPAgent with its built-in
|
5
|
-
* conversation history capabilities for better contextual interactions.
|
6
|
-
*
|
7
|
-
* Special thanks to https://github.com/microsoft/playwright-mcp for the server.
|
8
|
-
*/
|
9
|
-
import readline from 'node:readline';
|
10
|
-
import { ChatOpenAI } from '@langchain/openai';
|
11
|
-
import { config } from 'dotenv';
|
12
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
13
|
-
// Load environment variables from .env file
|
14
|
-
config();
|
15
|
-
async function runMemoryChat() {
|
16
|
-
// Config file path - change this to your config file
|
17
|
-
const config = {
|
18
|
-
mcpServers: {
|
19
|
-
airbnb: {
|
20
|
-
command: 'npx',
|
21
|
-
args: ['-y', '@openbnb/mcp-server-airbnb', '--ignore-robots-txt'],
|
22
|
-
},
|
23
|
-
},
|
24
|
-
};
|
25
|
-
console.error('Initializing chat...');
|
26
|
-
// Create MCP client and agent with memory enabled
|
27
|
-
const client = new MCPClient(config);
|
28
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o-mini' });
|
29
|
-
// Create agent with memory_enabled=true
|
30
|
-
const agent = new MCPAgent({
|
31
|
-
llm,
|
32
|
-
client,
|
33
|
-
maxSteps: 15,
|
34
|
-
memoryEnabled: true, // Enable built-in conversation memory
|
35
|
-
});
|
36
|
-
console.error('\n===== Interactive MCP Chat =====');
|
37
|
-
console.error('Type \'exit\' or \'quit\' to end the conversation');
|
38
|
-
console.error('Type \'clear\' to clear conversation history');
|
39
|
-
console.error('==================================\n');
|
40
|
-
// Create readline interface for user input
|
41
|
-
const rl = readline.createInterface({
|
42
|
-
input: process.stdin,
|
43
|
-
output: process.stdout,
|
44
|
-
});
|
45
|
-
const question = (prompt) => {
|
46
|
-
return new Promise((resolve) => {
|
47
|
-
rl.question(prompt, resolve);
|
48
|
-
});
|
49
|
-
};
|
50
|
-
try {
|
51
|
-
// Main chat loop
|
52
|
-
while (true) {
|
53
|
-
// Get user input
|
54
|
-
const userInput = await question('\nYou: ');
|
55
|
-
// Check for exit command
|
56
|
-
if (userInput.toLowerCase() === 'exit' || userInput.toLowerCase() === 'quit') {
|
57
|
-
console.error('Ending conversation...');
|
58
|
-
break;
|
59
|
-
}
|
60
|
-
// Check for clear history command
|
61
|
-
if (userInput.toLowerCase() === 'clear') {
|
62
|
-
agent.clearConversationHistory();
|
63
|
-
console.error('Conversation history cleared.');
|
64
|
-
continue;
|
65
|
-
}
|
66
|
-
// Get response from agent
|
67
|
-
process.stdout.write('\nAssistant: ');
|
68
|
-
try {
|
69
|
-
// Run the agent with the user input (memory handling is automatic)
|
70
|
-
const response = await agent.run(userInput);
|
71
|
-
console.error(response);
|
72
|
-
}
|
73
|
-
catch (error) {
|
74
|
-
console.error(`\nError: ${error}`);
|
75
|
-
}
|
76
|
-
}
|
77
|
-
}
|
78
|
-
finally {
|
79
|
-
// Clean up
|
80
|
-
rl.close();
|
81
|
-
await client.closeAllSessions();
|
82
|
-
}
|
83
|
-
}
|
84
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
85
|
-
runMemoryChat().catch(console.error);
|
86
|
-
}
|
@@ -1,11 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Basic usage example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect any LLM to MCP tools through a unified interface.
|
6
|
-
*
|
7
|
-
* Special Thanks to https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem
|
8
|
-
* for the server.
|
9
|
-
*/
|
10
|
-
export {};
|
11
|
-
//# sourceMappingURL=filesystem_use.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"filesystem_use.d.ts","sourceRoot":"","sources":["../../examples/filesystem_use.ts"],"names":[],"mappings":"AAAA;;;;;;;;GAQG"}
|
@@ -1,43 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Basic usage example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect any LLM to MCP tools through a unified interface.
|
6
|
-
*
|
7
|
-
* Special Thanks to https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem
|
8
|
-
* for the server.
|
9
|
-
*/
|
10
|
-
import { ChatOpenAI } from '@langchain/openai';
|
11
|
-
import { config } from 'dotenv';
|
12
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
13
|
-
// Load environment variables from .env file
|
14
|
-
config();
|
15
|
-
const serverConfig = {
|
16
|
-
mcpServers: {
|
17
|
-
filesystem: {
|
18
|
-
command: 'npx',
|
19
|
-
args: [
|
20
|
-
'-y',
|
21
|
-
'@modelcontextprotocol/server-filesystem',
|
22
|
-
'THE_PATH_TO_YOUR_DIRECTORY',
|
23
|
-
],
|
24
|
-
},
|
25
|
-
},
|
26
|
-
};
|
27
|
-
async function main() {
|
28
|
-
// Create MCPClient from config
|
29
|
-
const client = MCPClient.fromDict(serverConfig);
|
30
|
-
// Create LLM
|
31
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o' });
|
32
|
-
// const llm = init_chat_model({ model: "llama-3.1-8b-instant", model_provider: "groq" })
|
33
|
-
// const llm = new ChatAnthropic({ model: "claude-3-" })
|
34
|
-
// const llm = new ChatGroq({ model: "llama3-8b-8192" })
|
35
|
-
// Create agent with the client
|
36
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
37
|
-
// Run the query
|
38
|
-
const result = await agent.run('Hello can you give me a list of files and directories in the current directory', 30);
|
39
|
-
console.log(`\nResult: ${result}`);
|
40
|
-
}
|
41
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
42
|
-
main().catch(console.error);
|
43
|
-
}
|
@@ -1,18 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* HTTP Example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect to an MCP server running on a specific HTTP port.
|
6
|
-
*
|
7
|
-
* Before running this example, you need to start the Playwright MCP server
|
8
|
-
* in another terminal with:
|
9
|
-
*
|
10
|
-
* npx @playwright/mcp@latest --port 8931
|
11
|
-
*
|
12
|
-
* This will start the server on port 8931. Resulting in the config you find below.
|
13
|
-
* Of course you can run this with any server you want at any URL.
|
14
|
-
*
|
15
|
-
* Special thanks to https://github.com/microsoft/playwright-mcp for the server.
|
16
|
-
*/
|
17
|
-
export {};
|
18
|
-
//# sourceMappingURL=http_example.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"http_example.d.ts","sourceRoot":"","sources":["../../examples/http_example.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG"}
|
@@ -1,37 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* HTTP Example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect to an MCP server running on a specific HTTP port.
|
6
|
-
*
|
7
|
-
* Before running this example, you need to start the Playwright MCP server
|
8
|
-
* in another terminal with:
|
9
|
-
*
|
10
|
-
* npx @playwright/mcp@latest --port 8931
|
11
|
-
*
|
12
|
-
* This will start the server on port 8931. Resulting in the config you find below.
|
13
|
-
* Of course you can run this with any server you want at any URL.
|
14
|
-
*
|
15
|
-
* Special thanks to https://github.com/microsoft/playwright-mcp for the server.
|
16
|
-
*/
|
17
|
-
import { ChatOpenAI } from '@langchain/openai';
|
18
|
-
import { config } from 'dotenv';
|
19
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
20
|
-
// Load environment variables from .env file
|
21
|
-
config();
|
22
|
-
async function main() {
|
23
|
-
const config = { mcpServers: { http: { url: 'https://gitmcp.io/docs' } } };
|
24
|
-
// Create MCPClient from config
|
25
|
-
const client = MCPClient.fromDict(config);
|
26
|
-
// Create LLM
|
27
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o' });
|
28
|
-
// Create agent with the client
|
29
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
30
|
-
// Run the query
|
31
|
-
const result = await agent.run('Which tools are available and what can they do?', 30);
|
32
|
-
console.log(`\nResult: ${result}`);
|
33
|
-
await agent.close();
|
34
|
-
}
|
35
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
36
|
-
main().catch(console.error);
|
37
|
-
}
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"mcp_everything.d.ts","sourceRoot":"","sources":["../../examples/mcp_everything.ts"],"names":[],"mappings":"AAAA;;;GAGG"}
|
@@ -1,25 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example shows how to test the different functionalities of MCPs using the MCP server from
|
3
|
-
* anthropic.
|
4
|
-
*/
|
5
|
-
import { ChatOpenAI } from '@langchain/openai';
|
6
|
-
import { config } from 'dotenv';
|
7
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
8
|
-
// Load environment variables from .env file
|
9
|
-
config();
|
10
|
-
const everythingServer = {
|
11
|
-
mcpServers: { everything: { command: 'npx', args: ['-y', '@modelcontextprotocol/server-everything'] } },
|
12
|
-
};
|
13
|
-
async function main() {
|
14
|
-
const client = new MCPClient(everythingServer);
|
15
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o', temperature: 0 });
|
16
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
17
|
-
const result = await agent.run(`Hello, you are a tester can you please answer the follwing questions:
|
18
|
-
- Which resources do you have access to?
|
19
|
-
- Which prompts do you have access to?
|
20
|
-
- Which tools do you have access to?`, 30);
|
21
|
-
console.log(`\nResult: ${result}`);
|
22
|
-
}
|
23
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
24
|
-
main().catch(console.error);
|
25
|
-
}
|
@@ -1,10 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Example demonstrating how to use MCPClient with multiple servers.
|
3
|
-
*
|
4
|
-
* This example shows how to:
|
5
|
-
* 1. Configure multiple MCP servers
|
6
|
-
* 2. Create and manage sessions for each server
|
7
|
-
* 3. Use tools from different servers in a single agent
|
8
|
-
*/
|
9
|
-
export {};
|
10
|
-
//# sourceMappingURL=multi_server_example.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"multi_server_example.d.ts","sourceRoot":"","sources":["../../examples/multi_server_example.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG"}
|
@@ -1,51 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Example demonstrating how to use MCPClient with multiple servers.
|
3
|
-
*
|
4
|
-
* This example shows how to:
|
5
|
-
* 1. Configure multiple MCP servers
|
6
|
-
* 2. Create and manage sessions for each server
|
7
|
-
* 3. Use tools from different servers in a single agent
|
8
|
-
*/
|
9
|
-
import { ChatAnthropic } from '@langchain/anthropic';
|
10
|
-
import { config } from 'dotenv';
|
11
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
12
|
-
// Load environment variables from .env file
|
13
|
-
config();
|
14
|
-
async function runMultiServerExample() {
|
15
|
-
// Create a configuration with multiple servers
|
16
|
-
const config = {
|
17
|
-
mcpServers: {
|
18
|
-
airbnb: {
|
19
|
-
command: 'npx',
|
20
|
-
args: ['-y', '@openbnb/mcp-server-airbnb', '--ignore-robots-txt'],
|
21
|
-
},
|
22
|
-
playwright: {
|
23
|
-
command: 'npx',
|
24
|
-
args: ['@playwright/mcp@latest'],
|
25
|
-
env: { DISPLAY: ':1' },
|
26
|
-
},
|
27
|
-
filesystem: {
|
28
|
-
command: 'npx',
|
29
|
-
args: [
|
30
|
-
'-y',
|
31
|
-
'@modelcontextprotocol/server-filesystem',
|
32
|
-
'YOUR_DIRECTORY_HERE',
|
33
|
-
],
|
34
|
-
},
|
35
|
-
},
|
36
|
-
};
|
37
|
-
// Create MCPClient with the multi-server configuration
|
38
|
-
const client = MCPClient.fromDict(config);
|
39
|
-
// Create LLM
|
40
|
-
const llm = new ChatAnthropic({ model: 'claude-3-5-sonnet-20240620' });
|
41
|
-
// Create agent with the client
|
42
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
43
|
-
// Example 1: Using tools from different servers in a single query
|
44
|
-
const result = await agent.run('Search for a nice place to stay in Barcelona on Airbnb, '
|
45
|
-
+ 'then use Google to find nearby restaurants and attractions.'
|
46
|
-
+ 'Write the result in the current directory in restarant.txt', 30);
|
47
|
-
console.log(result);
|
48
|
-
}
|
49
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
50
|
-
runMultiServerExample().catch(console.error);
|
51
|
-
}
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"observability.d.ts","sourceRoot":"","sources":["../../examples/observability.ts"],"names":[],"mappings":"AAAA;;;GAGG"}
|
@@ -1,50 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example shows how to test the different functionalities of MCPs using the MCP server from
|
3
|
-
* anthropic.
|
4
|
-
*/
|
5
|
-
import { ChatOpenAI } from '@langchain/openai';
|
6
|
-
import { config } from 'dotenv';
|
7
|
-
import { Logger, MCPAgent, MCPClient } from '../index.js';
|
8
|
-
// Load environment variables from .env file
|
9
|
-
config();
|
10
|
-
// Enable debug logging to see observability messages
|
11
|
-
Logger.setDebug(true);
|
12
|
-
const everythingServer = {
|
13
|
-
mcpServers: { everything: { command: 'npx', args: ['-y', '@modelcontextprotocol/server-everything'] } },
|
14
|
-
};
|
15
|
-
async function main() {
|
16
|
-
console.log('š Starting MCP Observability example with Langfuse tracing...');
|
17
|
-
console.log('š Environment variables:');
|
18
|
-
console.log(` LANGFUSE_PUBLIC_KEY: ${process.env.LANGFUSE_PUBLIC_KEY ? 'ā
Set' : 'ā Missing'}`);
|
19
|
-
console.log(` LANGFUSE_SECRET_KEY: ${process.env.LANGFUSE_SECRET_KEY ? 'ā
Set' : 'ā Missing'}`);
|
20
|
-
console.log(` LANGFUSE_HOST: ${process.env.LANGFUSE_HOST || 'Not set'}`);
|
21
|
-
console.log(` MCP_USE_LANGFUSE: ${process.env.MCP_USE_LANGFUSE || 'Not set'}`);
|
22
|
-
const client = new MCPClient(everythingServer);
|
23
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o', temperature: 0 });
|
24
|
-
const agent = new MCPAgent({
|
25
|
-
llm,
|
26
|
-
client,
|
27
|
-
maxSteps: 30,
|
28
|
-
});
|
29
|
-
// console.log('š§ Initializing agent...')
|
30
|
-
// await agent.initialize()
|
31
|
-
// Set additional metadata for testing (Optional)
|
32
|
-
agent.setMetadata({
|
33
|
-
agent_id: 'test-agent-123',
|
34
|
-
test_run: true,
|
35
|
-
example: 'mcp_observability',
|
36
|
-
});
|
37
|
-
agent.setTags(['test-tag-1', 'test-tag-2']);
|
38
|
-
console.log('š¬ Running agent query...');
|
39
|
-
const result = await agent.run(`Hello, you are a tester can you please answer the follwing questions:
|
40
|
-
- Which resources do you have access to?
|
41
|
-
- Which prompts do you have access to?
|
42
|
-
- Which tools do you have access to?`, 30);
|
43
|
-
console.log(`\nā
Result: ${result}`);
|
44
|
-
// console.log('š§¹ Closing agent...')
|
45
|
-
// await agent.close()
|
46
|
-
// console.log('š Example completed! Check your Langfuse dashboard for traces.')
|
47
|
-
}
|
48
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
49
|
-
main().catch(console.error);
|
50
|
-
}
|
@@ -1,12 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example demonstrates how to use the stream method of MCPAgent to get
|
3
|
-
* intermediate steps and observe the agent's reasoning process in real-time.
|
4
|
-
*
|
5
|
-
* The stream method returns an AsyncGenerator that yields AgentStep objects
|
6
|
-
* for each intermediate step, and finally returns the complete result.
|
7
|
-
*
|
8
|
-
* This example also demonstrates the streamEvents method which yields
|
9
|
-
* LangChain StreamEvent objects for more granular, token-level streaming.
|
10
|
-
*/
|
11
|
-
export {};
|
12
|
-
//# sourceMappingURL=stream_example.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"stream_example.d.ts","sourceRoot":"","sources":["../../examples/stream_example.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG"}
|
@@ -1,198 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example demonstrates how to use the stream method of MCPAgent to get
|
3
|
-
* intermediate steps and observe the agent's reasoning process in real-time.
|
4
|
-
*
|
5
|
-
* The stream method returns an AsyncGenerator that yields AgentStep objects
|
6
|
-
* for each intermediate step, and finally returns the complete result.
|
7
|
-
*
|
8
|
-
* This example also demonstrates the streamEvents method which yields
|
9
|
-
* LangChain StreamEvent objects for more granular, token-level streaming.
|
10
|
-
*/
|
11
|
-
import { ChatAnthropic } from '@langchain/anthropic';
|
12
|
-
import { config } from 'dotenv';
|
13
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
14
|
-
// Load environment variables from .env file
|
15
|
-
config();
|
16
|
-
const everythingServer = {
|
17
|
-
mcpServers: { everything: { command: 'npx', args: ['-y', '@modelcontextprotocol/server-everything'] } },
|
18
|
-
};
|
19
|
-
async function streamingExample() {
|
20
|
-
console.log('š Starting streaming example...\n');
|
21
|
-
// Initialize MCP client and agent
|
22
|
-
const client = new MCPClient(everythingServer);
|
23
|
-
const llm = new ChatAnthropic({ model: 'claude-sonnet-4-20250514', temperature: 0 });
|
24
|
-
const agent = new MCPAgent({
|
25
|
-
llm,
|
26
|
-
client,
|
27
|
-
maxSteps: 10,
|
28
|
-
verbose: true,
|
29
|
-
});
|
30
|
-
const query = `Please help me understand what capabilities you have:
|
31
|
-
1. List all available tools
|
32
|
-
2. Try using a few different tools to demonstrate their functionality
|
33
|
-
3. Show me what resources and prompts are available
|
34
|
-
4. Create a simple example to showcase your abilities`;
|
35
|
-
console.log(`š Query: ${query}\n`);
|
36
|
-
console.log('š Starting to stream agent steps...\n');
|
37
|
-
try {
|
38
|
-
// Use the stream method to get intermediate steps
|
39
|
-
const stream = agent.stream(query);
|
40
|
-
let stepNumber = 1;
|
41
|
-
// Iterate through the async generator to get intermediate steps
|
42
|
-
for await (const step of stream) {
|
43
|
-
console.log(`\n--- Step ${stepNumber} ---`);
|
44
|
-
console.log(`š§ Tool: ${step.action.tool}`);
|
45
|
-
console.log(`š„ Input: ${JSON.stringify(step.action.toolInput, null, 2)}`);
|
46
|
-
console.log(`š¤ Output: ${step.observation}`);
|
47
|
-
console.log('---\n');
|
48
|
-
stepNumber++;
|
49
|
-
}
|
50
|
-
// The final result is the return value when the generator is done
|
51
|
-
// Note: In the loop above, we don't get the final result directly
|
52
|
-
// To get it, we need to manually handle the generator
|
53
|
-
}
|
54
|
-
catch (error) {
|
55
|
-
console.error('ā Error during streaming:', error);
|
56
|
-
}
|
57
|
-
console.log('\nš Streaming complete!');
|
58
|
-
}
|
59
|
-
async function streamingExampleWithFinalResult() {
|
60
|
-
console.log('\n\nš Starting streaming example with final result capture...\n');
|
61
|
-
// Initialize MCP client and agent
|
62
|
-
const client = new MCPClient(everythingServer);
|
63
|
-
const llm = new ChatAnthropic({ model: 'claude-sonnet-4-20250514', temperature: 0 });
|
64
|
-
const agent = new MCPAgent({
|
65
|
-
llm,
|
66
|
-
client,
|
67
|
-
maxSteps: 8,
|
68
|
-
verbose: false, // Less verbose for cleaner output
|
69
|
-
});
|
70
|
-
const query = `What tools do you have access to? Please test 2-3 of them to show me what they can do.`;
|
71
|
-
console.log(`š Query: ${query}\n`);
|
72
|
-
console.log('š Processing with intermediate steps...\n');
|
73
|
-
try {
|
74
|
-
// Create the stream generator
|
75
|
-
const stream = agent.stream(query);
|
76
|
-
let stepNumber = 1;
|
77
|
-
let result = '';
|
78
|
-
// Manually iterate through the generator to capture both steps and final result
|
79
|
-
while (true) {
|
80
|
-
const { done, value } = await stream.next();
|
81
|
-
if (done) {
|
82
|
-
// Generator is complete, value contains the final result
|
83
|
-
result = value;
|
84
|
-
break;
|
85
|
-
}
|
86
|
-
else {
|
87
|
-
// value is an AgentStep
|
88
|
-
console.log(`\nš§ Step ${stepNumber}: ${value.action.tool}`);
|
89
|
-
console.log(` Input: ${JSON.stringify(value.action.toolInput)}`);
|
90
|
-
console.log(` Result: ${value.observation.slice(0, 100)}${value.observation.length > 100 ? '...' : ''}`);
|
91
|
-
stepNumber++;
|
92
|
-
}
|
93
|
-
}
|
94
|
-
console.log(`\n${'='.repeat(50)}`);
|
95
|
-
console.log('šÆ FINAL RESULT:');
|
96
|
-
console.log('='.repeat(50));
|
97
|
-
console.log(result);
|
98
|
-
}
|
99
|
-
catch (error) {
|
100
|
-
console.error('ā Error during streaming:', error);
|
101
|
-
}
|
102
|
-
finally {
|
103
|
-
// Clean up
|
104
|
-
await client.closeAllSessions();
|
105
|
-
}
|
106
|
-
console.log('\nā
Example complete!');
|
107
|
-
}
|
108
|
-
async function streamEventsExample() {
|
109
|
-
console.log('\n\nš Starting streamEvents example (token-level streaming)...\n');
|
110
|
-
// Initialize MCP client and agent
|
111
|
-
const client = new MCPClient(everythingServer);
|
112
|
-
const llm = new ChatAnthropic({ model: 'claude-sonnet-4-20250514', temperature: 0 });
|
113
|
-
const agent = new MCPAgent({
|
114
|
-
llm,
|
115
|
-
client,
|
116
|
-
maxSteps: 5,
|
117
|
-
verbose: false,
|
118
|
-
});
|
119
|
-
const query = `What's the current time and date? Also create a simple text file with today's date.`;
|
120
|
-
console.log(`š Query: ${query}\n`);
|
121
|
-
console.log('š Streaming fine-grained events...\n');
|
122
|
-
try {
|
123
|
-
// Use streamEvents for token-level streaming
|
124
|
-
const eventStream = agent.streamEvents(query);
|
125
|
-
let eventCount = 0;
|
126
|
-
let currentToolCall = null;
|
127
|
-
for await (const event of eventStream) {
|
128
|
-
eventCount++;
|
129
|
-
// Log different types of events
|
130
|
-
switch (event.event) {
|
131
|
-
case 'on_chain_start':
|
132
|
-
if (event.name === 'AgentExecutor') {
|
133
|
-
console.log('š Agent execution started');
|
134
|
-
}
|
135
|
-
break;
|
136
|
-
case 'on_tool_start':
|
137
|
-
currentToolCall = event.name;
|
138
|
-
console.log(`\nš§ Tool started: ${event.name}`);
|
139
|
-
if (event.data?.input) {
|
140
|
-
console.log(` Input: ${JSON.stringify(event.data.input)}`);
|
141
|
-
}
|
142
|
-
break;
|
143
|
-
case 'on_tool_end':
|
144
|
-
if (event.name === currentToolCall) {
|
145
|
-
console.log(`ā
Tool completed: ${event.name}`);
|
146
|
-
if (event.data?.output) {
|
147
|
-
const output = typeof event.data.output === 'string'
|
148
|
-
? event.data.output
|
149
|
-
: JSON.stringify(event.data.output);
|
150
|
-
console.log(` Output: ${output.slice(0, 100)}${output.length > 100 ? '...' : ''}`);
|
151
|
-
}
|
152
|
-
currentToolCall = null;
|
153
|
-
}
|
154
|
-
break;
|
155
|
-
case 'on_chat_model_stream':
|
156
|
-
// This shows token-by-token streaming from the LLM
|
157
|
-
if (event.data?.chunk?.text) {
|
158
|
-
const textContent = event.data.chunk.text;
|
159
|
-
if (typeof textContent === 'string' && textContent.length > 0) {
|
160
|
-
process.stdout.write(textContent);
|
161
|
-
}
|
162
|
-
}
|
163
|
-
break;
|
164
|
-
case 'on_chain_end':
|
165
|
-
if (event.name === 'AgentExecutor') {
|
166
|
-
console.log('\n\nš Agent execution completed');
|
167
|
-
}
|
168
|
-
break;
|
169
|
-
// You can handle many more event types:
|
170
|
-
// - on_llm_start, on_llm_end
|
171
|
-
// - on_parser_start, on_parser_end
|
172
|
-
// - on_retriever_start, on_retriever_end
|
173
|
-
// - etc.
|
174
|
-
}
|
175
|
-
// Limit output for demo purposes
|
176
|
-
if (eventCount > 200) {
|
177
|
-
console.log('\n... (truncated for demo)');
|
178
|
-
break;
|
179
|
-
}
|
180
|
-
}
|
181
|
-
console.log(`\n\nš Total events emitted: ${eventCount}`);
|
182
|
-
}
|
183
|
-
catch (error) {
|
184
|
-
console.error('ā Error during event streaming:', error);
|
185
|
-
}
|
186
|
-
finally {
|
187
|
-
await client.closeAllSessions();
|
188
|
-
}
|
189
|
-
console.log('\nā
StreamEvents example complete!');
|
190
|
-
}
|
191
|
-
// Run all examples
|
192
|
-
async function runAllExamples() {
|
193
|
-
await streamingExample();
|
194
|
-
await streamingExampleWithFinalResult();
|
195
|
-
await streamEventsExample();
|
196
|
-
}
|
197
|
-
// Run the examples
|
198
|
-
runAllExamples().catch(console.error);
|
@@ -1,9 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Structured Output Example - City Research with Playwright
|
3
|
-
*
|
4
|
-
* This example demonstrates intelligent structured output by researching Padova, Italy.
|
5
|
-
* The agent becomes schema-aware and will intelligently retry to gather missing
|
6
|
-
* information until all required fields can be populated.
|
7
|
-
*/
|
8
|
-
export {};
|
9
|
-
//# sourceMappingURL=structured_output.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"structured_output.d.ts","sourceRoot":"","sources":["../../examples/structured_output.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG"}
|