mcp-use 0.1.19 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/adapters/langchain_adapter.d.ts.map +1 -1
- package/dist/src/adapters/langchain_adapter.js +1 -42
- package/dist/src/browser.d.ts +49 -0
- package/dist/src/browser.d.ts.map +1 -0
- package/dist/src/browser.js +75 -0
- package/dist/src/client/base.d.ts +32 -0
- package/dist/src/client/base.d.ts.map +1 -0
- package/dist/src/client/base.js +119 -0
- package/dist/src/client.d.ts +19 -16
- package/dist/src/client.d.ts.map +1 -1
- package/dist/src/client.js +24 -107
- package/dist/src/logging.d.ts +1 -1
- package/dist/src/logging.d.ts.map +1 -1
- package/dist/src/logging.js +31 -16
- package/dist/src/managers/server_manager.js +1 -1
- package/dist/src/oauth-helper.d.ts +135 -0
- package/dist/src/oauth-helper.d.ts.map +1 -0
- package/dist/src/oauth-helper.js +427 -0
- package/package.json +6 -1
- package/dist/examples/add_server_tool.d.ts +0 -8
- package/dist/examples/add_server_tool.d.ts.map +0 -1
- package/dist/examples/add_server_tool.js +0 -79
- package/dist/examples/ai_sdk_example.d.ts +0 -23
- package/dist/examples/ai_sdk_example.d.ts.map +0 -1
- package/dist/examples/ai_sdk_example.js +0 -213
- package/dist/examples/airbnb_use.d.ts +0 -10
- package/dist/examples/airbnb_use.d.ts.map +0 -1
- package/dist/examples/airbnb_use.js +0 -43
- package/dist/examples/blender_use.d.ts +0 -15
- package/dist/examples/blender_use.d.ts.map +0 -1
- package/dist/examples/blender_use.js +0 -39
- package/dist/examples/browser_use.d.ts +0 -10
- package/dist/examples/browser_use.d.ts.map +0 -1
- package/dist/examples/browser_use.js +0 -46
- package/dist/examples/chat_example.d.ts +0 -10
- package/dist/examples/chat_example.d.ts.map +0 -1
- package/dist/examples/chat_example.js +0 -86
- package/dist/examples/filesystem_use.d.ts +0 -11
- package/dist/examples/filesystem_use.d.ts.map +0 -1
- package/dist/examples/filesystem_use.js +0 -43
- package/dist/examples/http_example.d.ts +0 -18
- package/dist/examples/http_example.d.ts.map +0 -1
- package/dist/examples/http_example.js +0 -37
- package/dist/examples/mcp_everything.d.ts +0 -6
- package/dist/examples/mcp_everything.d.ts.map +0 -1
- package/dist/examples/mcp_everything.js +0 -25
- package/dist/examples/multi_server_example.d.ts +0 -10
- package/dist/examples/multi_server_example.d.ts.map +0 -1
- package/dist/examples/multi_server_example.js +0 -51
- package/dist/examples/observability.d.ts +0 -6
- package/dist/examples/observability.d.ts.map +0 -1
- package/dist/examples/observability.js +0 -50
- package/dist/examples/stream_example.d.ts +0 -12
- package/dist/examples/stream_example.d.ts.map +0 -1
- package/dist/examples/stream_example.js +0 -198
- package/dist/examples/structured_output.d.ts +0 -9
- package/dist/examples/structured_output.d.ts.map +0 -1
- package/dist/examples/structured_output.js +0 -95
@@ -1,43 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Basic usage example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect any LLM to MCP tools through a unified interface.
|
6
|
-
*
|
7
|
-
* Special Thanks to https://github.com/modelcontextprotocol/servers/tree/main/src/filesystem
|
8
|
-
* for the server.
|
9
|
-
*/
|
10
|
-
import { ChatOpenAI } from '@langchain/openai';
|
11
|
-
import { config } from 'dotenv';
|
12
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
13
|
-
// Load environment variables from .env file
|
14
|
-
config();
|
15
|
-
const serverConfig = {
|
16
|
-
mcpServers: {
|
17
|
-
filesystem: {
|
18
|
-
command: 'npx',
|
19
|
-
args: [
|
20
|
-
'-y',
|
21
|
-
'@modelcontextprotocol/server-filesystem',
|
22
|
-
'THE_PATH_TO_YOUR_DIRECTORY',
|
23
|
-
],
|
24
|
-
},
|
25
|
-
},
|
26
|
-
};
|
27
|
-
async function main() {
|
28
|
-
// Create MCPClient from config
|
29
|
-
const client = MCPClient.fromDict(serverConfig);
|
30
|
-
// Create LLM
|
31
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o' });
|
32
|
-
// const llm = init_chat_model({ model: "llama-3.1-8b-instant", model_provider: "groq" })
|
33
|
-
// const llm = new ChatAnthropic({ model: "claude-3-" })
|
34
|
-
// const llm = new ChatGroq({ model: "llama3-8b-8192" })
|
35
|
-
// Create agent with the client
|
36
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
37
|
-
// Run the query
|
38
|
-
const result = await agent.run('Hello can you give me a list of files and directories in the current directory', 30);
|
39
|
-
console.log(`\nResult: ${result}`);
|
40
|
-
}
|
41
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
42
|
-
main().catch(console.error);
|
43
|
-
}
|
@@ -1,18 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* HTTP Example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect to an MCP server running on a specific HTTP port.
|
6
|
-
*
|
7
|
-
* Before running this example, you need to start the Playwright MCP server
|
8
|
-
* in another terminal with:
|
9
|
-
*
|
10
|
-
* npx @playwright/mcp@latest --port 8931
|
11
|
-
*
|
12
|
-
* This will start the server on port 8931. Resulting in the config you find below.
|
13
|
-
* Of course you can run this with any server you want at any URL.
|
14
|
-
*
|
15
|
-
* Special thanks to https://github.com/microsoft/playwright-mcp for the server.
|
16
|
-
*/
|
17
|
-
export {};
|
18
|
-
//# sourceMappingURL=http_example.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"http_example.d.ts","sourceRoot":"","sources":["../../examples/http_example.ts"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;;GAeG"}
|
@@ -1,37 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* HTTP Example for mcp-use.
|
3
|
-
*
|
4
|
-
* This example demonstrates how to use the mcp-use library with MCPClient
|
5
|
-
* to connect to an MCP server running on a specific HTTP port.
|
6
|
-
*
|
7
|
-
* Before running this example, you need to start the Playwright MCP server
|
8
|
-
* in another terminal with:
|
9
|
-
*
|
10
|
-
* npx @playwright/mcp@latest --port 8931
|
11
|
-
*
|
12
|
-
* This will start the server on port 8931. Resulting in the config you find below.
|
13
|
-
* Of course you can run this with any server you want at any URL.
|
14
|
-
*
|
15
|
-
* Special thanks to https://github.com/microsoft/playwright-mcp for the server.
|
16
|
-
*/
|
17
|
-
import { ChatOpenAI } from '@langchain/openai';
|
18
|
-
import { config } from 'dotenv';
|
19
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
20
|
-
// Load environment variables from .env file
|
21
|
-
config();
|
22
|
-
async function main() {
|
23
|
-
const config = { mcpServers: { http: { url: 'https://gitmcp.io/docs' } } };
|
24
|
-
// Create MCPClient from config
|
25
|
-
const client = MCPClient.fromDict(config);
|
26
|
-
// Create LLM
|
27
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o' });
|
28
|
-
// Create agent with the client
|
29
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
30
|
-
// Run the query
|
31
|
-
const result = await agent.run('Which tools are available and what can they do?', 30);
|
32
|
-
console.log(`\nResult: ${result}`);
|
33
|
-
await agent.close();
|
34
|
-
}
|
35
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
36
|
-
main().catch(console.error);
|
37
|
-
}
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"mcp_everything.d.ts","sourceRoot":"","sources":["../../examples/mcp_everything.ts"],"names":[],"mappings":"AAAA;;;GAGG"}
|
@@ -1,25 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example shows how to test the different functionalities of MCPs using the MCP server from
|
3
|
-
* anthropic.
|
4
|
-
*/
|
5
|
-
import { ChatOpenAI } from '@langchain/openai';
|
6
|
-
import { config } from 'dotenv';
|
7
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
8
|
-
// Load environment variables from .env file
|
9
|
-
config();
|
10
|
-
const everythingServer = {
|
11
|
-
mcpServers: { everything: { command: 'npx', args: ['-y', '@modelcontextprotocol/server-everything'] } },
|
12
|
-
};
|
13
|
-
async function main() {
|
14
|
-
const client = new MCPClient(everythingServer);
|
15
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o', temperature: 0 });
|
16
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
17
|
-
const result = await agent.run(`Hello, you are a tester can you please answer the follwing questions:
|
18
|
-
- Which resources do you have access to?
|
19
|
-
- Which prompts do you have access to?
|
20
|
-
- Which tools do you have access to?`, 30);
|
21
|
-
console.log(`\nResult: ${result}`);
|
22
|
-
}
|
23
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
24
|
-
main().catch(console.error);
|
25
|
-
}
|
@@ -1,10 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Example demonstrating how to use MCPClient with multiple servers.
|
3
|
-
*
|
4
|
-
* This example shows how to:
|
5
|
-
* 1. Configure multiple MCP servers
|
6
|
-
* 2. Create and manage sessions for each server
|
7
|
-
* 3. Use tools from different servers in a single agent
|
8
|
-
*/
|
9
|
-
export {};
|
10
|
-
//# sourceMappingURL=multi_server_example.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"multi_server_example.d.ts","sourceRoot":"","sources":["../../examples/multi_server_example.ts"],"names":[],"mappings":"AAAA;;;;;;;GAOG"}
|
@@ -1,51 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Example demonstrating how to use MCPClient with multiple servers.
|
3
|
-
*
|
4
|
-
* This example shows how to:
|
5
|
-
* 1. Configure multiple MCP servers
|
6
|
-
* 2. Create and manage sessions for each server
|
7
|
-
* 3. Use tools from different servers in a single agent
|
8
|
-
*/
|
9
|
-
import { ChatAnthropic } from '@langchain/anthropic';
|
10
|
-
import { config } from 'dotenv';
|
11
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
12
|
-
// Load environment variables from .env file
|
13
|
-
config();
|
14
|
-
async function runMultiServerExample() {
|
15
|
-
// Create a configuration with multiple servers
|
16
|
-
const config = {
|
17
|
-
mcpServers: {
|
18
|
-
airbnb: {
|
19
|
-
command: 'npx',
|
20
|
-
args: ['-y', '@openbnb/mcp-server-airbnb', '--ignore-robots-txt'],
|
21
|
-
},
|
22
|
-
playwright: {
|
23
|
-
command: 'npx',
|
24
|
-
args: ['@playwright/mcp@latest'],
|
25
|
-
env: { DISPLAY: ':1' },
|
26
|
-
},
|
27
|
-
filesystem: {
|
28
|
-
command: 'npx',
|
29
|
-
args: [
|
30
|
-
'-y',
|
31
|
-
'@modelcontextprotocol/server-filesystem',
|
32
|
-
'YOUR_DIRECTORY_HERE',
|
33
|
-
],
|
34
|
-
},
|
35
|
-
},
|
36
|
-
};
|
37
|
-
// Create MCPClient with the multi-server configuration
|
38
|
-
const client = MCPClient.fromDict(config);
|
39
|
-
// Create LLM
|
40
|
-
const llm = new ChatAnthropic({ model: 'claude-3-5-sonnet-20240620' });
|
41
|
-
// Create agent with the client
|
42
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 30 });
|
43
|
-
// Example 1: Using tools from different servers in a single query
|
44
|
-
const result = await agent.run('Search for a nice place to stay in Barcelona on Airbnb, '
|
45
|
-
+ 'then use Google to find nearby restaurants and attractions.'
|
46
|
-
+ 'Write the result in the current directory in restarant.txt', 30);
|
47
|
-
console.log(result);
|
48
|
-
}
|
49
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
50
|
-
runMultiServerExample().catch(console.error);
|
51
|
-
}
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"observability.d.ts","sourceRoot":"","sources":["../../examples/observability.ts"],"names":[],"mappings":"AAAA;;;GAGG"}
|
@@ -1,50 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example shows how to test the different functionalities of MCPs using the MCP server from
|
3
|
-
* anthropic.
|
4
|
-
*/
|
5
|
-
import { ChatOpenAI } from '@langchain/openai';
|
6
|
-
import { config } from 'dotenv';
|
7
|
-
import { Logger, MCPAgent, MCPClient } from '../index.js';
|
8
|
-
// Load environment variables from .env file
|
9
|
-
config();
|
10
|
-
// Enable debug logging to see observability messages
|
11
|
-
Logger.setDebug(true);
|
12
|
-
const everythingServer = {
|
13
|
-
mcpServers: { everything: { command: 'npx', args: ['-y', '@modelcontextprotocol/server-everything'] } },
|
14
|
-
};
|
15
|
-
async function main() {
|
16
|
-
console.log('🚀 Starting MCP Observability example with Langfuse tracing...');
|
17
|
-
console.log('📊 Environment variables:');
|
18
|
-
console.log(` LANGFUSE_PUBLIC_KEY: ${process.env.LANGFUSE_PUBLIC_KEY ? '✅ Set' : '❌ Missing'}`);
|
19
|
-
console.log(` LANGFUSE_SECRET_KEY: ${process.env.LANGFUSE_SECRET_KEY ? '✅ Set' : '❌ Missing'}`);
|
20
|
-
console.log(` LANGFUSE_HOST: ${process.env.LANGFUSE_HOST || 'Not set'}`);
|
21
|
-
console.log(` MCP_USE_LANGFUSE: ${process.env.MCP_USE_LANGFUSE || 'Not set'}`);
|
22
|
-
const client = new MCPClient(everythingServer);
|
23
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o', temperature: 0 });
|
24
|
-
const agent = new MCPAgent({
|
25
|
-
llm,
|
26
|
-
client,
|
27
|
-
maxSteps: 30,
|
28
|
-
});
|
29
|
-
// console.log('🔧 Initializing agent...')
|
30
|
-
// await agent.initialize()
|
31
|
-
// Set additional metadata for testing (Optional)
|
32
|
-
agent.setMetadata({
|
33
|
-
agent_id: 'test-agent-123',
|
34
|
-
test_run: true,
|
35
|
-
example: 'mcp_observability',
|
36
|
-
});
|
37
|
-
agent.setTags(['test-tag-1', 'test-tag-2']);
|
38
|
-
console.log('💬 Running agent query...');
|
39
|
-
const result = await agent.run(`Hello, you are a tester can you please answer the follwing questions:
|
40
|
-
- Which resources do you have access to?
|
41
|
-
- Which prompts do you have access to?
|
42
|
-
- Which tools do you have access to?`, 30);
|
43
|
-
console.log(`\n✅ Result: ${result}`);
|
44
|
-
// console.log('🧹 Closing agent...')
|
45
|
-
// await agent.close()
|
46
|
-
// console.log('🎉 Example completed! Check your Langfuse dashboard for traces.')
|
47
|
-
}
|
48
|
-
if (import.meta.url === `file://${process.argv[1]}`) {
|
49
|
-
main().catch(console.error);
|
50
|
-
}
|
@@ -1,12 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example demonstrates how to use the stream method of MCPAgent to get
|
3
|
-
* intermediate steps and observe the agent's reasoning process in real-time.
|
4
|
-
*
|
5
|
-
* The stream method returns an AsyncGenerator that yields AgentStep objects
|
6
|
-
* for each intermediate step, and finally returns the complete result.
|
7
|
-
*
|
8
|
-
* This example also demonstrates the streamEvents method which yields
|
9
|
-
* LangChain StreamEvent objects for more granular, token-level streaming.
|
10
|
-
*/
|
11
|
-
export {};
|
12
|
-
//# sourceMappingURL=stream_example.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"stream_example.d.ts","sourceRoot":"","sources":["../../examples/stream_example.ts"],"names":[],"mappings":"AAAA;;;;;;;;;GASG"}
|
@@ -1,198 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* This example demonstrates how to use the stream method of MCPAgent to get
|
3
|
-
* intermediate steps and observe the agent's reasoning process in real-time.
|
4
|
-
*
|
5
|
-
* The stream method returns an AsyncGenerator that yields AgentStep objects
|
6
|
-
* for each intermediate step, and finally returns the complete result.
|
7
|
-
*
|
8
|
-
* This example also demonstrates the streamEvents method which yields
|
9
|
-
* LangChain StreamEvent objects for more granular, token-level streaming.
|
10
|
-
*/
|
11
|
-
import { ChatAnthropic } from '@langchain/anthropic';
|
12
|
-
import { config } from 'dotenv';
|
13
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
14
|
-
// Load environment variables from .env file
|
15
|
-
config();
|
16
|
-
const everythingServer = {
|
17
|
-
mcpServers: { everything: { command: 'npx', args: ['-y', '@modelcontextprotocol/server-everything'] } },
|
18
|
-
};
|
19
|
-
async function streamingExample() {
|
20
|
-
console.log('🚀 Starting streaming example...\n');
|
21
|
-
// Initialize MCP client and agent
|
22
|
-
const client = new MCPClient(everythingServer);
|
23
|
-
const llm = new ChatAnthropic({ model: 'claude-sonnet-4-20250514', temperature: 0 });
|
24
|
-
const agent = new MCPAgent({
|
25
|
-
llm,
|
26
|
-
client,
|
27
|
-
maxSteps: 10,
|
28
|
-
verbose: true,
|
29
|
-
});
|
30
|
-
const query = `Please help me understand what capabilities you have:
|
31
|
-
1. List all available tools
|
32
|
-
2. Try using a few different tools to demonstrate their functionality
|
33
|
-
3. Show me what resources and prompts are available
|
34
|
-
4. Create a simple example to showcase your abilities`;
|
35
|
-
console.log(`📝 Query: ${query}\n`);
|
36
|
-
console.log('🔄 Starting to stream agent steps...\n');
|
37
|
-
try {
|
38
|
-
// Use the stream method to get intermediate steps
|
39
|
-
const stream = agent.stream(query);
|
40
|
-
let stepNumber = 1;
|
41
|
-
// Iterate through the async generator to get intermediate steps
|
42
|
-
for await (const step of stream) {
|
43
|
-
console.log(`\n--- Step ${stepNumber} ---`);
|
44
|
-
console.log(`🔧 Tool: ${step.action.tool}`);
|
45
|
-
console.log(`📥 Input: ${JSON.stringify(step.action.toolInput, null, 2)}`);
|
46
|
-
console.log(`📤 Output: ${step.observation}`);
|
47
|
-
console.log('---\n');
|
48
|
-
stepNumber++;
|
49
|
-
}
|
50
|
-
// The final result is the return value when the generator is done
|
51
|
-
// Note: In the loop above, we don't get the final result directly
|
52
|
-
// To get it, we need to manually handle the generator
|
53
|
-
}
|
54
|
-
catch (error) {
|
55
|
-
console.error('❌ Error during streaming:', error);
|
56
|
-
}
|
57
|
-
console.log('\n🎉 Streaming complete!');
|
58
|
-
}
|
59
|
-
async function streamingExampleWithFinalResult() {
|
60
|
-
console.log('\n\n🚀 Starting streaming example with final result capture...\n');
|
61
|
-
// Initialize MCP client and agent
|
62
|
-
const client = new MCPClient(everythingServer);
|
63
|
-
const llm = new ChatAnthropic({ model: 'claude-sonnet-4-20250514', temperature: 0 });
|
64
|
-
const agent = new MCPAgent({
|
65
|
-
llm,
|
66
|
-
client,
|
67
|
-
maxSteps: 8,
|
68
|
-
verbose: false, // Less verbose for cleaner output
|
69
|
-
});
|
70
|
-
const query = `What tools do you have access to? Please test 2-3 of them to show me what they can do.`;
|
71
|
-
console.log(`📝 Query: ${query}\n`);
|
72
|
-
console.log('🔄 Processing with intermediate steps...\n');
|
73
|
-
try {
|
74
|
-
// Create the stream generator
|
75
|
-
const stream = agent.stream(query);
|
76
|
-
let stepNumber = 1;
|
77
|
-
let result = '';
|
78
|
-
// Manually iterate through the generator to capture both steps and final result
|
79
|
-
while (true) {
|
80
|
-
const { done, value } = await stream.next();
|
81
|
-
if (done) {
|
82
|
-
// Generator is complete, value contains the final result
|
83
|
-
result = value;
|
84
|
-
break;
|
85
|
-
}
|
86
|
-
else {
|
87
|
-
// value is an AgentStep
|
88
|
-
console.log(`\n🔧 Step ${stepNumber}: ${value.action.tool}`);
|
89
|
-
console.log(` Input: ${JSON.stringify(value.action.toolInput)}`);
|
90
|
-
console.log(` Result: ${value.observation.slice(0, 100)}${value.observation.length > 100 ? '...' : ''}`);
|
91
|
-
stepNumber++;
|
92
|
-
}
|
93
|
-
}
|
94
|
-
console.log(`\n${'='.repeat(50)}`);
|
95
|
-
console.log('🎯 FINAL RESULT:');
|
96
|
-
console.log('='.repeat(50));
|
97
|
-
console.log(result);
|
98
|
-
}
|
99
|
-
catch (error) {
|
100
|
-
console.error('❌ Error during streaming:', error);
|
101
|
-
}
|
102
|
-
finally {
|
103
|
-
// Clean up
|
104
|
-
await client.closeAllSessions();
|
105
|
-
}
|
106
|
-
console.log('\n✅ Example complete!');
|
107
|
-
}
|
108
|
-
async function streamEventsExample() {
|
109
|
-
console.log('\n\n🚀 Starting streamEvents example (token-level streaming)...\n');
|
110
|
-
// Initialize MCP client and agent
|
111
|
-
const client = new MCPClient(everythingServer);
|
112
|
-
const llm = new ChatAnthropic({ model: 'claude-sonnet-4-20250514', temperature: 0 });
|
113
|
-
const agent = new MCPAgent({
|
114
|
-
llm,
|
115
|
-
client,
|
116
|
-
maxSteps: 5,
|
117
|
-
verbose: false,
|
118
|
-
});
|
119
|
-
const query = `What's the current time and date? Also create a simple text file with today's date.`;
|
120
|
-
console.log(`📝 Query: ${query}\n`);
|
121
|
-
console.log('🔄 Streaming fine-grained events...\n');
|
122
|
-
try {
|
123
|
-
// Use streamEvents for token-level streaming
|
124
|
-
const eventStream = agent.streamEvents(query);
|
125
|
-
let eventCount = 0;
|
126
|
-
let currentToolCall = null;
|
127
|
-
for await (const event of eventStream) {
|
128
|
-
eventCount++;
|
129
|
-
// Log different types of events
|
130
|
-
switch (event.event) {
|
131
|
-
case 'on_chain_start':
|
132
|
-
if (event.name === 'AgentExecutor') {
|
133
|
-
console.log('🏁 Agent execution started');
|
134
|
-
}
|
135
|
-
break;
|
136
|
-
case 'on_tool_start':
|
137
|
-
currentToolCall = event.name;
|
138
|
-
console.log(`\n🔧 Tool started: ${event.name}`);
|
139
|
-
if (event.data?.input) {
|
140
|
-
console.log(` Input: ${JSON.stringify(event.data.input)}`);
|
141
|
-
}
|
142
|
-
break;
|
143
|
-
case 'on_tool_end':
|
144
|
-
if (event.name === currentToolCall) {
|
145
|
-
console.log(`✅ Tool completed: ${event.name}`);
|
146
|
-
if (event.data?.output) {
|
147
|
-
const output = typeof event.data.output === 'string'
|
148
|
-
? event.data.output
|
149
|
-
: JSON.stringify(event.data.output);
|
150
|
-
console.log(` Output: ${output.slice(0, 100)}${output.length > 100 ? '...' : ''}`);
|
151
|
-
}
|
152
|
-
currentToolCall = null;
|
153
|
-
}
|
154
|
-
break;
|
155
|
-
case 'on_chat_model_stream':
|
156
|
-
// This shows token-by-token streaming from the LLM
|
157
|
-
if (event.data?.chunk?.text) {
|
158
|
-
const textContent = event.data.chunk.text;
|
159
|
-
if (typeof textContent === 'string' && textContent.length > 0) {
|
160
|
-
process.stdout.write(textContent);
|
161
|
-
}
|
162
|
-
}
|
163
|
-
break;
|
164
|
-
case 'on_chain_end':
|
165
|
-
if (event.name === 'AgentExecutor') {
|
166
|
-
console.log('\n\n🏁 Agent execution completed');
|
167
|
-
}
|
168
|
-
break;
|
169
|
-
// You can handle many more event types:
|
170
|
-
// - on_llm_start, on_llm_end
|
171
|
-
// - on_parser_start, on_parser_end
|
172
|
-
// - on_retriever_start, on_retriever_end
|
173
|
-
// - etc.
|
174
|
-
}
|
175
|
-
// Limit output for demo purposes
|
176
|
-
if (eventCount > 200) {
|
177
|
-
console.log('\n... (truncated for demo)');
|
178
|
-
break;
|
179
|
-
}
|
180
|
-
}
|
181
|
-
console.log(`\n\n📊 Total events emitted: ${eventCount}`);
|
182
|
-
}
|
183
|
-
catch (error) {
|
184
|
-
console.error('❌ Error during event streaming:', error);
|
185
|
-
}
|
186
|
-
finally {
|
187
|
-
await client.closeAllSessions();
|
188
|
-
}
|
189
|
-
console.log('\n✅ StreamEvents example complete!');
|
190
|
-
}
|
191
|
-
// Run all examples
|
192
|
-
async function runAllExamples() {
|
193
|
-
await streamingExample();
|
194
|
-
await streamingExampleWithFinalResult();
|
195
|
-
await streamEventsExample();
|
196
|
-
}
|
197
|
-
// Run the examples
|
198
|
-
runAllExamples().catch(console.error);
|
@@ -1,9 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Structured Output Example - City Research with Playwright
|
3
|
-
*
|
4
|
-
* This example demonstrates intelligent structured output by researching Padova, Italy.
|
5
|
-
* The agent becomes schema-aware and will intelligently retry to gather missing
|
6
|
-
* information until all required fields can be populated.
|
7
|
-
*/
|
8
|
-
export {};
|
9
|
-
//# sourceMappingURL=structured_output.d.ts.map
|
@@ -1 +0,0 @@
|
|
1
|
-
{"version":3,"file":"structured_output.d.ts","sourceRoot":"","sources":["../../examples/structured_output.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG"}
|
@@ -1,95 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* Structured Output Example - City Research with Playwright
|
3
|
-
*
|
4
|
-
* This example demonstrates intelligent structured output by researching Padova, Italy.
|
5
|
-
* The agent becomes schema-aware and will intelligently retry to gather missing
|
6
|
-
* information until all required fields can be populated.
|
7
|
-
*/
|
8
|
-
import { ChatOpenAI } from '@langchain/openai';
|
9
|
-
import { config } from 'dotenv';
|
10
|
-
import { z } from 'zod';
|
11
|
-
import { MCPAgent, MCPClient } from '../index.js';
|
12
|
-
// Load environment variables from .env file
|
13
|
-
config();
|
14
|
-
// Define the structured output schema using Zod
|
15
|
-
const CityInfoSchema = z.object({
|
16
|
-
name: z.string().describe('Official name of the city'),
|
17
|
-
country: z.string().describe('Country where the city is located'),
|
18
|
-
region: z.string().describe('Region or state within the country'),
|
19
|
-
population: z.number().describe('Current population count'),
|
20
|
-
area_km2: z.number().describe('Area in square kilometers'),
|
21
|
-
foundation_date: z.string().describe('When the city was founded (approximate year or period)'),
|
22
|
-
mayor: z.string().describe('Current mayor or city leader'),
|
23
|
-
famous_landmarks: z.array(z.string()).describe('List of famous landmarks, monuments, or attractions'),
|
24
|
-
universities: z.array(z.string()).describe('List of major universities or educational institutions'),
|
25
|
-
economy_sectors: z.array(z.string()).describe('Main economic sectors or industries'),
|
26
|
-
sister_cities: z.array(z.string()).describe('Twin cities or sister cities partnerships'),
|
27
|
-
historical_significance: z.string().describe('Brief description of historical importance'),
|
28
|
-
climate_type: z.string().nullable().describe('Type of climate (e.g., Mediterranean, Continental)'),
|
29
|
-
elevation_meters: z.number().nullable().describe('Elevation above sea level in meters'),
|
30
|
-
});
|
31
|
-
async function main() {
|
32
|
-
const mcpConfig = {
|
33
|
-
mcpServers: {
|
34
|
-
playwright: {
|
35
|
-
command: 'npx',
|
36
|
-
args: ['@playwright/mcp@latest'],
|
37
|
-
env: {
|
38
|
-
DISPLAY: ':1',
|
39
|
-
},
|
40
|
-
},
|
41
|
-
},
|
42
|
-
};
|
43
|
-
const client = new MCPClient(mcpConfig);
|
44
|
-
const llm = new ChatOpenAI({ model: 'gpt-4o' });
|
45
|
-
const agent = new MCPAgent({ llm, client, maxSteps: 50, memoryEnabled: true });
|
46
|
-
try {
|
47
|
-
// Use structured output with intelligent retry
|
48
|
-
// The agent will:
|
49
|
-
// 1. Know exactly what information it needs to collect
|
50
|
-
// 2. Attempt structured output at finish points
|
51
|
-
// 3. Continue execution if required information is missing
|
52
|
-
// 4. Only finish when all required fields can be populated
|
53
|
-
const result = await agent.run(`
|
54
|
-
Research comprehensive information about the city of Padova (also known as Padua) in Italy.
|
55
|
-
|
56
|
-
Visit multiple reliable sources like Wikipedia, official city websites, tourism sites,
|
57
|
-
and university websites to gather detailed information including demographics, history,
|
58
|
-
governance, education, economy, landmarks, and international relationships.
|
59
|
-
`, 50, // maxSteps
|
60
|
-
true, // manageConnector
|
61
|
-
[], // externalHistory
|
62
|
-
CityInfoSchema);
|
63
|
-
// Now you have strongly-typed, validated data!
|
64
|
-
console.log(`Name: ${result.name}`);
|
65
|
-
console.log(`Country: ${result.country}`);
|
66
|
-
console.log(`Region: ${result.region}`);
|
67
|
-
console.log(`Population: ${result.population.toLocaleString()}`);
|
68
|
-
console.log(`Area: ${result.area_km2} km²`);
|
69
|
-
console.log(`Foundation: ${result.foundation_date}`);
|
70
|
-
console.log(`Mayor: ${result.mayor}`);
|
71
|
-
console.log(`Universities: ${result.universities.join(', ')}`);
|
72
|
-
console.log(`Economy: ${result.economy_sectors.join(', ')}`);
|
73
|
-
console.log(`Landmarks: ${result.famous_landmarks.join(', ')}`);
|
74
|
-
console.log(`Sister Cities: ${result.sister_cities.length > 0 ? result.sister_cities.join(', ') : 'None'}`);
|
75
|
-
console.log(`Historical Significance: ${result.historical_significance}`);
|
76
|
-
if (result.climate_type) {
|
77
|
-
console.log(`Climate: ${result.climate_type}`);
|
78
|
-
}
|
79
|
-
if (result.elevation_meters !== null) {
|
80
|
-
console.log(`Elevation: ${result.elevation_meters} meters`);
|
81
|
-
}
|
82
|
-
}
|
83
|
-
catch (error) {
|
84
|
-
console.error('Error:', error);
|
85
|
-
}
|
86
|
-
finally {
|
87
|
-
await agent.close();
|
88
|
-
}
|
89
|
-
}
|
90
|
-
// Handle unhandled promise rejections
|
91
|
-
process.on('unhandledRejection', (reason, promise) => {
|
92
|
-
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
93
|
-
process.exit(1);
|
94
|
-
});
|
95
|
-
main().catch(console.error);
|