@mcpjam/inspector 0.9.8 → 0.9.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Binary file
@@ -5,8 +5,8 @@
5
5
  <link rel="icon" type="image/svg+xml" href="/mcp_jam.svg" />
6
6
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
7
  <title>MCPJam Inspector</title>
8
- <script type="module" crossorigin src="/assets/index-C_Mzo-Ly.js"></script>
9
- <link rel="stylesheet" crossorigin href="/assets/index-CxSz4W4P.css">
8
+ <script type="module" crossorigin src="/assets/index-hyEDWMYC.js"></script>
9
+ <link rel="stylesheet" crossorigin href="/assets/index-ChUeFmGd.css">
10
10
  </head>
11
11
  <body>
12
12
  <div id="root"></div>
@@ -5,7 +5,7 @@ import { cors } from "hono/cors";
5
5
  import { logger } from "hono/logger";
6
6
  import { serveStatic } from "@hono/node-server/serve-static";
7
7
  import { readFileSync } from "fs";
8
- import { join as join2 } from "path";
8
+ import { join } from "path";
9
9
 
10
10
  // routes/mcp/index.ts
11
11
  import { Hono as Hono9 } from "hono";
@@ -540,6 +540,7 @@ import { Hono as Hono6 } from "hono";
540
540
  import { Agent } from "@mastra/core/agent";
541
541
  import { createAnthropic } from "@ai-sdk/anthropic";
542
542
  import { createOpenAI } from "@ai-sdk/openai";
543
+ import { createGoogleGenerativeAI } from "@ai-sdk/google";
543
544
  import { createOllama } from "ollama-ai-provider";
544
545
  import { TextEncoder as TextEncoder3 } from "util";
545
546
 
@@ -552,6 +553,9 @@ function getDefaultTemperatureByProvider(provider) {
552
553
  return 1;
553
554
  case "anthropic":
554
555
  return 0;
556
+ case "google":
557
+ return 0.9;
558
+ // Google's recommended default
555
559
  default:
556
560
  return 0;
557
561
  }
@@ -585,6 +589,8 @@ var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl) => {
585
589
  return createOpenAI({ apiKey, baseURL: "https://api.deepseek.com/v1" })(
586
590
  modelDefinition.id
587
591
  );
592
+ case "google":
593
+ return createGoogleGenerativeAI({ apiKey })(modelDefinition.id);
588
594
  case "ollama":
589
595
  const baseUrl = ollamaBaseUrl || "http://localhost:11434";
590
596
  return createOllama({
@@ -775,10 +781,10 @@ var streamAgentResponse = async (streamingContext, stream) => {
775
781
  dbg("Streaming finished", { hasContent, chunkCount });
776
782
  return { hasContent, chunkCount };
777
783
  };
778
- var fallbackToCompletion = async (agent, messages, streamingContext, provider) => {
784
+ var fallbackToCompletion = async (agent, messages, streamingContext, provider, temperature) => {
779
785
  try {
780
786
  const result = await agent.generate(messages, {
781
- temperature: getDefaultTemperatureByProvider(provider)
787
+ temperature: temperature == null || void 0 ? getDefaultTemperatureByProvider(provider) : temperature
782
788
  });
783
789
  if (result.text && result.text.trim()) {
784
790
  streamingContext.controller.enqueue(
@@ -806,10 +812,10 @@ var fallbackToCompletion = async (agent, messages, streamingContext, provider) =
806
812
  );
807
813
  }
808
814
  };
809
- var createStreamingResponse = async (agent, messages, toolsets, streamingContext, provider) => {
815
+ var createStreamingResponse = async (agent, messages, toolsets, streamingContext, provider, temperature) => {
810
816
  const stream = await agent.stream(messages, {
811
817
  maxSteps: MAX_AGENT_STEPS,
812
- temperature: getDefaultTemperatureByProvider(provider),
818
+ temperature: temperature == null || void 0 ? getDefaultTemperatureByProvider(provider) : temperature,
813
819
  toolsets,
814
820
  onStepFinish: ({ text, toolCalls, toolResults }) => {
815
821
  handleAgentStepFinish(streamingContext, text, toolCalls, toolResults);
@@ -818,7 +824,13 @@ var createStreamingResponse = async (agent, messages, toolsets, streamingContext
818
824
  const { hasContent } = await streamAgentResponse(streamingContext, stream);
819
825
  if (!hasContent) {
820
826
  dbg("No content from textStream; falling back to completion");
821
- await fallbackToCompletion(agent, messages, streamingContext, provider);
827
+ await fallbackToCompletion(
828
+ agent,
829
+ messages,
830
+ streamingContext,
831
+ provider,
832
+ temperature
833
+ );
822
834
  }
823
835
  streamingContext.controller.enqueue(
824
836
  streamingContext.encoder.encode(
@@ -845,6 +857,7 @@ chat.post("/", async (c) => {
845
857
  provider,
846
858
  apiKey,
847
859
  systemPrompt,
860
+ temperature,
848
861
  messages,
849
862
  ollamaBaseUrl,
850
863
  action,
@@ -1017,7 +1030,8 @@ chat.post("/", async (c) => {
1017
1030
  formattedMessages,
1018
1031
  toolsByServer,
1019
1032
  streamingContext,
1020
- provider
1033
+ provider,
1034
+ temperature
1021
1035
  );
1022
1036
  } catch (error) {
1023
1037
  controller.enqueue(
@@ -1059,8 +1073,6 @@ var chat_default = chat;
1059
1073
 
1060
1074
  // routes/mcp/tests.ts
1061
1075
  import { Hono as Hono7 } from "hono";
1062
- import { writeFile, mkdir } from "fs/promises";
1063
- import { join, dirname } from "path";
1064
1076
  import { createAnthropic as createAnthropic2 } from "@ai-sdk/anthropic";
1065
1077
  import { createOpenAI as createOpenAI2 } from "@ai-sdk/openai";
1066
1078
  import { createOllama as createOllama2 } from "ollama-ai-provider";
@@ -1242,61 +1254,6 @@ function normalizeServerConfigName(serverName) {
1242
1254
 
1243
1255
  // routes/mcp/tests.ts
1244
1256
  var tests = new Hono7();
1245
- tests.post("/generate", async (c) => {
1246
- try {
1247
- const body = await c.req.json();
1248
- const test = body?.test;
1249
- const servers2 = body?.servers;
1250
- const model = body?.model;
1251
- if (!test?.id || !test?.prompt || !servers2 || Object.keys(servers2).length === 0) {
1252
- return c.json(
1253
- { success: false, error: "Missing test, servers, or prompt" },
1254
- 400
1255
- );
1256
- }
1257
- const safeName = String(test.title || test.id).toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/(^-|-$)/g, "");
1258
- const filename = `@TestAgent_${safeName || test.id}.ts`;
1259
- const fileContents = `import { Agent } from "@mastra/core/agent";
1260
- import { MCPClient } from "@mastra/mcp";
1261
- import { createAnthropic } from "@ai-sdk/anthropic";
1262
- import { createOpenAI } from "@ai-sdk/openai";
1263
- import { createOllama } from "ollama-ai-provider";
1264
-
1265
- const servers = ${JSON.stringify(servers2, null, 2)} as const;
1266
-
1267
- function createModel() {
1268
- const def = ${JSON.stringify(model || null)} as any;
1269
- if (!def) throw new Error("Model not provided by UI when generating test agent");
1270
- switch (def.provider) {
1271
- case "anthropic": return createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY! })(def.id);
1272
- case "openai": return createOpenAI({ apiKey: process.env.OPENAI_API_KEY! })(def.id);
1273
- case "deepseek": return createOpenAI({ apiKey: process.env.DEEPSEEK_API_KEY!, baseURL: "https://api.deepseek.com/v1" })(def.id);
1274
- case "ollama": return createOllama({ baseURL: process.env.OLLAMA_BASE_URL || "http://localhost:11434" })(def.id, { simulateStreaming: true });
1275
- default: throw new Error("Unsupported provider: " + def.provider);
1276
- }
1277
- }
1278
-
1279
- export const createTestAgent = async () => {
1280
- const mcp = new MCPClient({ servers });
1281
- const toolsets = await mcp.getToolsets();
1282
- return new Agent({
1283
- name: ${JSON.stringify(test.title || "Test Agent")},
1284
- instructions: ${JSON.stringify(test.prompt)},
1285
- model: createModel(),
1286
- tools: undefined,
1287
- defaultGenerateOptions: { toolChoice: "auto" }
1288
- });
1289
- };
1290
- `;
1291
- const targetPath = join(process.cwd(), "server", "agents", filename);
1292
- await mkdir(dirname(targetPath), { recursive: true });
1293
- await writeFile(targetPath, fileContents, "utf8");
1294
- return c.json({ success: true, file: `server/agents/${filename}` });
1295
- } catch (err) {
1296
- const msg = err instanceof Error ? err.message : "Unknown error";
1297
- return c.json({ success: false, error: msg }, 500);
1298
- }
1299
- });
1300
1257
  var tests_default = tests;
1301
1258
  tests.post("/run-all", async (c) => {
1302
1259
  const encoder = new TextEncoder();
@@ -1576,6 +1533,17 @@ var MCPJamClientManager = class {
1576
1533
  getServerIdForName(serverName) {
1577
1534
  return this.serverIdMapping.get(serverName);
1578
1535
  }
1536
+ // Public method to get original server name from a unique server ID
1537
+ getOriginalNameForId(uniqueServerId) {
1538
+ for (const [originalName, uid] of this.serverIdMapping.entries()) {
1539
+ if (uid === uniqueServerId) return originalName;
1540
+ }
1541
+ return void 0;
1542
+ }
1543
+ // Convenience: map an array of unique IDs to their original names (fallback to ID if not found)
1544
+ mapIdsToOriginalNames(uniqueIds) {
1545
+ return uniqueIds.map((id) => this.getOriginalNameForId(id) || id);
1546
+ }
1579
1547
  async connectToServer(serverId, serverConfig) {
1580
1548
  const pending = this.pendingConnections.get(serverId);
1581
1549
  if (pending) {
@@ -1819,6 +1787,10 @@ var MCPJamClientManager = class {
1819
1787
  for (const args of attempts) {
1820
1788
  try {
1821
1789
  const result = await tool.execute(args);
1790
+ if (result && result.isError) {
1791
+ const errorText = result.content && result.content[0] && result.content[0].text ? result.content[0].text : "Unknown error";
1792
+ throw new Error(errorText);
1793
+ }
1822
1794
  return { result };
1823
1795
  } catch (err) {
1824
1796
  lastError = err;
@@ -1979,7 +1951,7 @@ if (process.env.NODE_ENV === "production") {
1979
1951
  if (path.startsWith("/api/")) {
1980
1952
  return c.notFound();
1981
1953
  }
1982
- const indexPath = join2(process.cwd(), "dist", "client", "index.html");
1954
+ const indexPath = join(process.cwd(), "dist", "client", "index.html");
1983
1955
  let htmlContent = readFileSync(indexPath, "utf-8");
1984
1956
  const mcpConfig = getMCPConfigFromEnv();
1985
1957
  if (mcpConfig) {