@mcpjam/inspector 1.0.2 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/client/index.html
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
<link rel="icon" type="image/svg+xml" href="/mcp_jam.svg" />
|
|
6
6
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
|
7
7
|
<title>MCPJam Inspector</title>
|
|
8
|
-
<script type="module" crossorigin src="/assets/index-
|
|
8
|
+
<script type="module" crossorigin src="/assets/index-BtfU0Trt.js"></script>
|
|
9
9
|
<link rel="stylesheet" crossorigin href="/assets/index-D5Niv-PI.css">
|
|
10
10
|
</head>
|
|
11
11
|
<body>
|
package/dist/server/index.js
CHANGED
|
@@ -543,9 +543,10 @@ resources.get("/widget-content", async (c) => {
|
|
|
543
543
|
400
|
|
544
544
|
);
|
|
545
545
|
}
|
|
546
|
-
const
|
|
547
|
-
|
|
548
|
-
);
|
|
546
|
+
const base64Decoded = Buffer.from(widgetData, "base64").toString("binary");
|
|
547
|
+
const percentEncoded = base64Decoded.split("").map((c2) => "%" + ("00" + c2.charCodeAt(0).toString(16)).slice(-2)).join("");
|
|
548
|
+
const jsonString = decodeURIComponent(percentEncoded);
|
|
549
|
+
const { serverId, uri, toolInput, toolOutput, toolId } = JSON.parse(jsonString);
|
|
549
550
|
const mcpClientManager = c.mcpJamClientManager;
|
|
550
551
|
const connectedServers = mcpClientManager.getConnectedServers();
|
|
551
552
|
let actualServerId = serverId;
|
|
@@ -1317,6 +1318,8 @@ var handleAgentStepFinish = (streamingContext, text, toolCalls, toolResults, emi
|
|
|
1317
1318
|
for (const call of toolCalls) {
|
|
1318
1319
|
const currentToolCallId = ++streamingContext.toolCallId;
|
|
1319
1320
|
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
1321
|
+
const toolName = call.name || call.toolName;
|
|
1322
|
+
streamingContext.toolCallIdToName.set(currentToolCallId, toolName);
|
|
1320
1323
|
if (streamingContext.controller && streamingContext.encoder) {
|
|
1321
1324
|
sendSseEvent(
|
|
1322
1325
|
streamingContext.controller,
|
|
@@ -1325,7 +1328,7 @@ var handleAgentStepFinish = (streamingContext, text, toolCalls, toolResults, emi
|
|
|
1325
1328
|
type: "tool_call",
|
|
1326
1329
|
toolCall: {
|
|
1327
1330
|
id: currentToolCallId,
|
|
1328
|
-
name:
|
|
1331
|
+
name: toolName,
|
|
1329
1332
|
parameters: call.params || call.args || {},
|
|
1330
1333
|
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1331
1334
|
status: "executing"
|
|
@@ -1378,7 +1381,11 @@ var handleAgentStepFinish = (streamingContext, text, toolCalls, toolResults, emi
|
|
|
1378
1381
|
} catch {
|
|
1379
1382
|
}
|
|
1380
1383
|
};
|
|
1381
|
-
var createStreamingResponse = async (model, aiSdkTools, messages, streamingContext, provider, temperature, systemPrompt) => {
|
|
1384
|
+
var createStreamingResponse = async (model, aiSdkTools, messages, streamingContext, provider, toolsWithServerId, temperature, systemPrompt) => {
|
|
1385
|
+
const extractServerId = (toolName) => {
|
|
1386
|
+
const tool2 = toolsWithServerId[toolName];
|
|
1387
|
+
return tool2?._serverId;
|
|
1388
|
+
};
|
|
1382
1389
|
const messageHistory = (messages || []).map((m) => {
|
|
1383
1390
|
switch (m.role) {
|
|
1384
1391
|
case "system":
|
|
@@ -1428,6 +1435,7 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
1428
1435
|
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
1429
1436
|
const name = chunk.chunk.toolName || chunk.chunk.name;
|
|
1430
1437
|
const parameters = chunk.chunk.input ?? chunk.chunk.parameters ?? chunk.chunk.args ?? {};
|
|
1438
|
+
streamingContext.toolCallIdToName.set(currentToolCallId, name);
|
|
1431
1439
|
iterationToolCalls.push({ name, params: parameters });
|
|
1432
1440
|
sendSseEvent(
|
|
1433
1441
|
streamingContext.controller,
|
|
@@ -1447,7 +1455,9 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
1447
1455
|
}
|
|
1448
1456
|
case "tool-result": {
|
|
1449
1457
|
const result = chunk.chunk.output ?? chunk.chunk.result ?? chunk.chunk.value;
|
|
1450
|
-
const currentToolCallId = streamingContext.lastEmittedToolCallId
|
|
1458
|
+
const currentToolCallId = streamingContext.lastEmittedToolCallId ?? ++streamingContext.toolCallId;
|
|
1459
|
+
const toolName = streamingContext.toolCallIdToName.get(currentToolCallId);
|
|
1460
|
+
const serverId = toolName ? extractServerId(toolName) : void 0;
|
|
1451
1461
|
iterationToolResults.push({ result });
|
|
1452
1462
|
sendSseEvent(
|
|
1453
1463
|
streamingContext.controller,
|
|
@@ -1458,7 +1468,8 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
1458
1468
|
id: currentToolCallId,
|
|
1459
1469
|
toolCallId: currentToolCallId,
|
|
1460
1470
|
result,
|
|
1461
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1471
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1472
|
+
serverId
|
|
1462
1473
|
}
|
|
1463
1474
|
}
|
|
1464
1475
|
);
|
|
@@ -1485,6 +1496,8 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
1485
1496
|
if (m.role === "tool") {
|
|
1486
1497
|
const currentToolCallId = streamingContext.lastEmittedToolCallId != null ? streamingContext.lastEmittedToolCallId : ++streamingContext.toolCallId;
|
|
1487
1498
|
const value = m.content;
|
|
1499
|
+
const toolName = streamingContext.toolCallIdToName.get(currentToolCallId);
|
|
1500
|
+
const serverId = toolName ? extractServerId(toolName) : void 0;
|
|
1488
1501
|
iterationToolResults.push({ result: value });
|
|
1489
1502
|
sendSseEvent(streamingContext.controller, streamingContext.encoder, {
|
|
1490
1503
|
type: "tool_result",
|
|
@@ -1492,7 +1505,8 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
1492
1505
|
id: currentToolCallId,
|
|
1493
1506
|
toolCallId: currentToolCallId,
|
|
1494
1507
|
result: value,
|
|
1495
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
1508
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
1509
|
+
serverId
|
|
1496
1510
|
}
|
|
1497
1511
|
});
|
|
1498
1512
|
}
|
|
@@ -1542,6 +1556,7 @@ var sendMessagesToBackend = async (messages, streamingContext, mcpClientManager,
|
|
|
1542
1556
|
const emitToolCall = (call) => {
|
|
1543
1557
|
const currentToolCallId = ++streamingContext.toolCallId;
|
|
1544
1558
|
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
1559
|
+
streamingContext.toolCallIdToName.set(currentToolCallId, call.name);
|
|
1545
1560
|
sendSseEvent(streamingContext.controller, streamingContext.encoder, {
|
|
1546
1561
|
type: "tool_call",
|
|
1547
1562
|
toolCall: {
|
|
@@ -1705,7 +1720,8 @@ chat.post("/", async (c) => {
|
|
|
1705
1720
|
encoder,
|
|
1706
1721
|
toolCallId: 0,
|
|
1707
1722
|
lastEmittedToolCallId: null,
|
|
1708
|
-
stepIndex: 0
|
|
1723
|
+
stepIndex: 0,
|
|
1724
|
+
toolCallIdToName: /* @__PURE__ */ new Map()
|
|
1709
1725
|
};
|
|
1710
1726
|
mcpClientManager.setElicitationCallback(async (request) => {
|
|
1711
1727
|
const elicitationRequest = {
|
|
@@ -1753,10 +1769,23 @@ chat.post("/", async (c) => {
|
|
|
1753
1769
|
requestData.selectedServers
|
|
1754
1770
|
);
|
|
1755
1771
|
} else {
|
|
1756
|
-
const
|
|
1772
|
+
const toolsets = await mcpClientManager.getToolsetsWithServerIds(
|
|
1757
1773
|
requestData.selectedServers
|
|
1758
1774
|
);
|
|
1759
|
-
const
|
|
1775
|
+
const flatToolsWithServerId = {};
|
|
1776
|
+
for (const [serverId, serverTools] of Object.entries(
|
|
1777
|
+
toolsets || {}
|
|
1778
|
+
)) {
|
|
1779
|
+
if (serverTools && typeof serverTools === "object") {
|
|
1780
|
+
for (const [toolName, tool2] of Object.entries(serverTools)) {
|
|
1781
|
+
flatToolsWithServerId[toolName] = {
|
|
1782
|
+
...tool2,
|
|
1783
|
+
_serverId: serverId
|
|
1784
|
+
};
|
|
1785
|
+
}
|
|
1786
|
+
}
|
|
1787
|
+
}
|
|
1788
|
+
const aiSdkTools = convertMastraToolsToVercelTools(flatToolsWithServerId);
|
|
1760
1789
|
const llmModel = createLlmModel(
|
|
1761
1790
|
model,
|
|
1762
1791
|
apiKey || "",
|
|
@@ -1768,6 +1797,7 @@ chat.post("/", async (c) => {
|
|
|
1768
1797
|
messages,
|
|
1769
1798
|
streamingContext,
|
|
1770
1799
|
provider,
|
|
1800
|
+
flatToolsWithServerId,
|
|
1771
1801
|
temperature,
|
|
1772
1802
|
systemPrompt
|
|
1773
1803
|
);
|