@mcpjam/inspector 1.0.16 → 1.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,7 +48,7 @@ import { readFileSync as readFileSync2, existsSync as existsSync2 } from "fs";
48
48
  import { join as join2, dirname, resolve } from "path";
49
49
  import { fileURLToPath } from "url";
50
50
 
51
- // ../sdk/dist/chunk-6XEFXCUG.js
51
+ // ../sdk/dist/index.js
52
52
  import { Client } from "@modelcontextprotocol/sdk/client/index.js";
53
53
  import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
54
54
  import {
@@ -152,11 +152,12 @@ var MCPClientManager = class {
152
152
  this.toolsMetadataCache = /* @__PURE__ */ new Map();
153
153
  this.defaultLogJsonRpc = false;
154
154
  this.pendingElicitations = /* @__PURE__ */ new Map();
155
- var _a2, _b2, _c, _d;
155
+ var _a2, _b2, _c, _d, _e;
156
156
  this.defaultClientVersion = (_a2 = options.defaultClientVersion) != null ? _a2 : "1.0.0";
157
- this.defaultCapabilities = { ...(_b2 = options.defaultCapabilities) != null ? _b2 : {} };
158
- this.defaultTimeout = (_c = options.defaultTimeout) != null ? _c : DEFAULT_REQUEST_TIMEOUT_MSEC;
159
- this.defaultLogJsonRpc = (_d = options.defaultLogJsonRpc) != null ? _d : false;
157
+ this.defaultClientName = (_b2 = options.defaultClientName) != null ? _b2 : void 0;
158
+ this.defaultCapabilities = { ...(_c = options.defaultCapabilities) != null ? _c : {} };
159
+ this.defaultTimeout = (_d = options.defaultTimeout) != null ? _d : DEFAULT_REQUEST_TIMEOUT_MSEC;
160
+ this.defaultLogJsonRpc = (_e = options.defaultLogJsonRpc) != null ? _e : false;
160
161
  this.defaultRpcLogger = options.rpcLogger;
161
162
  for (const [id, config] of Object.entries(servers2)) {
162
163
  void this.connectToServer(id, config);
@@ -206,7 +207,7 @@ var MCPClientManager = class {
206
207
  var _a22;
207
208
  const client = new Client(
208
209
  {
209
- name: serverId,
210
+ name: this.defaultClientName ? `${this.defaultClientName}` : serverId,
210
211
  version: (_a22 = config.version) != null ? _a22 : this.defaultClientVersion
211
212
  },
212
213
  {
@@ -1461,7 +1462,14 @@ resources.post("/read", async (c) => {
1461
1462
  resources.post("/widget/store", async (c) => {
1462
1463
  try {
1463
1464
  const body = await c.req.json();
1464
- const { serverId, uri, toolInput, toolOutput, toolResponseMetadata, toolId } = body;
1465
+ const {
1466
+ serverId,
1467
+ uri,
1468
+ toolInput,
1469
+ toolOutput,
1470
+ toolResponseMetadata,
1471
+ toolId
1472
+ } = body;
1465
1473
  if (!serverId || !uri || !toolId) {
1466
1474
  return c.json({ success: false, error: "Missing required fields" }, 400);
1467
1475
  }
@@ -1849,11 +1857,238 @@ var MCPJAM_PROVIDED_MODEL_IDS = [
1849
1857
  "meta-llama/llama-3.3-70b-instruct",
1850
1858
  "openai/gpt-oss-120b",
1851
1859
  "x-ai/grok-4-fast",
1852
- "openai/gpt-5-nano"
1860
+ "openai/gpt-5-nano",
1861
+ "anthropic/claude-sonnet-4.5",
1862
+ "anthropic/claude-haiku-4.5",
1863
+ "openai/gpt-5-codex",
1864
+ "openai/gpt-5",
1865
+ "openai/gpt-5-mini",
1866
+ "google/gemini-2.5-flash-preview-09-2025",
1867
+ "moonshotai/kimi-k2-0905",
1868
+ "google/gemini-2.5-flash",
1869
+ "z-ai/glm-4.6"
1853
1870
  ];
1854
1871
  var isMCPJamProvidedModel = (modelId) => {
1855
1872
  return MCPJAM_PROVIDED_MODEL_IDS.includes(modelId);
1856
1873
  };
1874
+ var Model = /* @__PURE__ */ ((Model2) => {
1875
+ Model2["CLAUDE_OPUS_4_0"] = "claude-opus-4-0";
1876
+ Model2["CLAUDE_SONNET_4_0"] = "claude-sonnet-4-0";
1877
+ Model2["CLAUDE_3_7_SONNET_LATEST"] = "claude-3-7-sonnet-latest";
1878
+ Model2["CLAUDE_3_5_SONNET_LATEST"] = "claude-3-5-sonnet-latest";
1879
+ Model2["CLAUDE_3_5_HAIKU_LATEST"] = "claude-3-5-haiku-latest";
1880
+ Model2["GPT_4_1"] = "gpt-4.1";
1881
+ Model2["GPT_4_1_MINI"] = "gpt-4.1-mini";
1882
+ Model2["GPT_4_1_NANO"] = "gpt-4.1-nano";
1883
+ Model2["GPT_4O"] = "gpt-4o";
1884
+ Model2["GPT_4O_MINI"] = "gpt-4o-mini";
1885
+ Model2["GPT_4_TURBO"] = "gpt-4-turbo";
1886
+ Model2["GPT_4"] = "gpt-4";
1887
+ Model2["GPT_5"] = "gpt-5";
1888
+ Model2["GPT_5_MINI"] = "gpt-5-mini";
1889
+ Model2["GPT_5_NANO"] = "gpt-5-nano";
1890
+ Model2["GPT_5_MAIN"] = "openai/gpt-5";
1891
+ Model2["GPT_5_PRO"] = "gpt-5-pro";
1892
+ Model2["GPT_5_CODEX"] = "gpt-5-codex";
1893
+ Model2["GPT_3_5_TURBO"] = "gpt-3.5-turbo";
1894
+ Model2["DEEPSEEK_CHAT"] = "deepseek-chat";
1895
+ Model2["DEEPSEEK_REASONER"] = "deepseek-reasoner";
1896
+ Model2["GEMINI_2_5_PRO"] = "gemini-2.5-pro";
1897
+ Model2["GEMINI_2_5_FLASH"] = "gemini-2.5-flash";
1898
+ Model2["GEMINI_2_5_FLASH_LITE"] = "gemini-2.5-flash-lite";
1899
+ Model2["GEMINI_2_0_FLASH_EXP"] = "gemini-2.0-flash-exp";
1900
+ Model2["GEMINI_1_5_PRO"] = "gemini-1.5-pro";
1901
+ Model2["GEMINI_1_5_PRO_002"] = "gemini-1.5-pro-002";
1902
+ Model2["GEMINI_1_5_FLASH"] = "gemini-1.5-flash";
1903
+ Model2["GEMINI_1_5_FLASH_002"] = "gemini-1.5-flash-002";
1904
+ Model2["GEMINI_1_5_FLASH_8B"] = "gemini-1.5-flash-8b";
1905
+ Model2["GEMINI_1_5_FLASH_8B_001"] = "gemini-1.5-flash-8b-001";
1906
+ Model2["GEMMA_3_2B"] = "gemma-3-2b";
1907
+ Model2["GEMMA_3_9B"] = "gemma-3-9b";
1908
+ Model2["GEMMA_3_27B"] = "gemma-3-27b";
1909
+ Model2["GEMMA_2_2B"] = "gemma-2-2b";
1910
+ Model2["GEMMA_2_9B"] = "gemma-2-9b";
1911
+ Model2["GEMMA_2_27B"] = "gemma-2-27b";
1912
+ Model2["CODE_GEMMA_2B"] = "codegemma-2b";
1913
+ Model2["CODE_GEMMA_7B"] = "codegemma-7b";
1914
+ Model2["MISTRAL_LARGE_LATEST"] = "mistral-large-latest";
1915
+ Model2["MISTRAL_SMALL_LATEST"] = "mistral-small-latest";
1916
+ Model2["CODESTRAL_LATEST"] = "codestral-latest";
1917
+ Model2["MINISTRAL_8B_LATEST"] = "ministral-8b-latest";
1918
+ Model2["MINISTRAL_3B_LATEST"] = "ministral-3b-latest";
1919
+ return Model2;
1920
+ })(Model || {});
1921
+ var SUPPORTED_MODELS = [
1922
+ {
1923
+ id: "claude-opus-4-0" /* CLAUDE_OPUS_4_0 */,
1924
+ name: "Claude Opus 4",
1925
+ provider: "anthropic"
1926
+ },
1927
+ {
1928
+ id: "claude-sonnet-4-0" /* CLAUDE_SONNET_4_0 */,
1929
+ name: "Claude Sonnet 4",
1930
+ provider: "anthropic"
1931
+ },
1932
+ {
1933
+ id: "claude-3-7-sonnet-latest" /* CLAUDE_3_7_SONNET_LATEST */,
1934
+ name: "Claude Sonnet 3.7",
1935
+ provider: "anthropic"
1936
+ },
1937
+ {
1938
+ id: "claude-3-5-sonnet-latest" /* CLAUDE_3_5_SONNET_LATEST */,
1939
+ name: "Claude Sonnet 3.5",
1940
+ provider: "anthropic"
1941
+ },
1942
+ {
1943
+ id: "claude-3-5-haiku-latest" /* CLAUDE_3_5_HAIKU_LATEST */,
1944
+ name: "Claude Haiku 3.5",
1945
+ provider: "anthropic"
1946
+ },
1947
+ { id: "gpt-5" /* GPT_5 */, name: "GPT-5", provider: "openai" },
1948
+ { id: "gpt-5-mini" /* GPT_5_MINI */, name: "GPT-5 Mini", provider: "openai" },
1949
+ { id: "gpt-5-nano" /* GPT_5_NANO */, name: "GPT-5 Nano", provider: "openai" },
1950
+ { id: Model.GPT_5_CHAT_LATEST, name: "GPT-5 Chat", provider: "openai" },
1951
+ { id: "gpt-5-pro" /* GPT_5_PRO */, name: "GPT-5 Pro", provider: "openai" },
1952
+ { id: "gpt-5-codex" /* GPT_5_CODEX */, name: "GPT-5 Codex", provider: "openai" },
1953
+ { id: "gpt-4.1" /* GPT_4_1 */, name: "GPT-4.1", provider: "openai" },
1954
+ { id: "gpt-4.1-mini" /* GPT_4_1_MINI */, name: "GPT-4.1 Mini", provider: "openai" },
1955
+ { id: "gpt-4.1-nano" /* GPT_4_1_NANO */, name: "GPT-4.1 Nano", provider: "openai" },
1956
+ { id: "gpt-4o" /* GPT_4O */, name: "GPT-4o", provider: "openai" },
1957
+ { id: "gpt-4o-mini" /* GPT_4O_MINI */, name: "GPT-4o Mini", provider: "openai" },
1958
+ { id: "deepseek-chat" /* DEEPSEEK_CHAT */, name: "DeepSeek Chat", provider: "deepseek" },
1959
+ {
1960
+ id: "deepseek-reasoner" /* DEEPSEEK_REASONER */,
1961
+ name: "DeepSeek Reasoner",
1962
+ provider: "deepseek"
1963
+ },
1964
+ // Google Gemini models (latest first)
1965
+ {
1966
+ id: "gemini-2.5-pro" /* GEMINI_2_5_PRO */,
1967
+ name: "Gemini 2.5 Pro",
1968
+ provider: "google"
1969
+ },
1970
+ {
1971
+ id: "gemini-2.5-flash" /* GEMINI_2_5_FLASH */,
1972
+ name: "Gemini 2.5 Flash",
1973
+ provider: "google"
1974
+ },
1975
+ {
1976
+ id: "gemini-2.0-flash-exp" /* GEMINI_2_0_FLASH_EXP */,
1977
+ name: "Gemini 2.0 Flash Experimental",
1978
+ provider: "google"
1979
+ },
1980
+ {
1981
+ id: "gemini-1.5-pro-002" /* GEMINI_1_5_PRO_002 */,
1982
+ name: "Gemini 1.5 Pro 002",
1983
+ provider: "google"
1984
+ },
1985
+ {
1986
+ id: "gemini-1.5-pro" /* GEMINI_1_5_PRO */,
1987
+ name: "Gemini 1.5 Pro",
1988
+ provider: "google"
1989
+ },
1990
+ {
1991
+ id: "gemini-1.5-flash-002" /* GEMINI_1_5_FLASH_002 */,
1992
+ name: "Gemini 1.5 Flash 002",
1993
+ provider: "google"
1994
+ },
1995
+ {
1996
+ id: "gemini-1.5-flash" /* GEMINI_1_5_FLASH */,
1997
+ name: "Gemini 1.5 Flash",
1998
+ provider: "google"
1999
+ },
2000
+ {
2001
+ id: "meta-llama/llama-3.3-70b-instruct",
2002
+ name: "Llama 3.3 70B (Free)",
2003
+ provider: "meta"
2004
+ },
2005
+ {
2006
+ id: "openai/gpt-oss-120b",
2007
+ name: "GPT-OSS 120B (Free)",
2008
+ provider: "openai"
2009
+ },
2010
+ {
2011
+ id: "x-ai/grok-4-fast",
2012
+ name: "Grok 4 Fast (Free)",
2013
+ provider: "x-ai"
2014
+ },
2015
+ {
2016
+ id: "openai/gpt-5-nano",
2017
+ name: "GPT-5 Nano (Free)",
2018
+ provider: "openai"
2019
+ },
2020
+ {
2021
+ id: "anthropic/claude-sonnet-4.5",
2022
+ name: "Claude Sonnet 4.5 (Free)",
2023
+ provider: "anthropic"
2024
+ },
2025
+ {
2026
+ id: "anthropic/claude-haiku-4.5",
2027
+ name: "Claude Haiku 4.5 (Free)",
2028
+ provider: "anthropic"
2029
+ },
2030
+ {
2031
+ id: "openai/gpt-5-codex",
2032
+ name: "GPT-5 Codex (Free)",
2033
+ provider: "openai"
2034
+ },
2035
+ {
2036
+ id: "openai/gpt-5",
2037
+ name: "GPT-5 (Free)",
2038
+ provider: "openai"
2039
+ },
2040
+ {
2041
+ id: "openai/gpt-5-mini",
2042
+ name: "GPT-5 Mini (Free)",
2043
+ provider: "openai"
2044
+ },
2045
+ {
2046
+ id: "google/gemini-2.5-flash-preview-09-2025",
2047
+ name: "Gemini 2.5 Flash Preview (Free)",
2048
+ provider: "google"
2049
+ },
2050
+ {
2051
+ id: "moonshotai/kimi-k2-0905",
2052
+ name: "Kimi K2 (Free)",
2053
+ provider: "moonshotai"
2054
+ },
2055
+ {
2056
+ id: "google/gemini-2.5-flash",
2057
+ name: "Gemini 2.5 Flash (Free)",
2058
+ provider: "google"
2059
+ },
2060
+ {
2061
+ id: "z-ai/glm-4.6",
2062
+ name: "GLM 4.6 (Free)",
2063
+ provider: "z-ai"
2064
+ },
2065
+ // Mistral models
2066
+ {
2067
+ id: "mistral-large-latest" /* MISTRAL_LARGE_LATEST */,
2068
+ name: "Mistral Large",
2069
+ provider: "mistral"
2070
+ },
2071
+ {
2072
+ id: "mistral-small-latest" /* MISTRAL_SMALL_LATEST */,
2073
+ name: "Mistral Small",
2074
+ provider: "mistral"
2075
+ },
2076
+ {
2077
+ id: "codestral-latest" /* CODESTRAL_LATEST */,
2078
+ name: "Codestral",
2079
+ provider: "mistral"
2080
+ },
2081
+ {
2082
+ id: "ministral-8b-latest" /* MINISTRAL_8B_LATEST */,
2083
+ name: "Ministral 8B",
2084
+ provider: "mistral"
2085
+ },
2086
+ {
2087
+ id: "ministral-3b-latest" /* MINISTRAL_3B_LATEST */,
2088
+ name: "Ministral 3B",
2089
+ provider: "mistral"
2090
+ }
2091
+ ];
1857
2092
 
1858
2093
  // routes/mcp/chat.ts
1859
2094
  import { TextEncoder as TextEncoder2 } from "util";
@@ -1870,6 +2105,9 @@ function getDefaultTemperatureByProvider(provider) {
1870
2105
  case "google":
1871
2106
  return 0.9;
1872
2107
  // Google's recommended default
2108
+ case "mistral":
2109
+ return 0.7;
2110
+ // Mistral's recommended default
1873
2111
  default:
1874
2112
  return 0;
1875
2113
  }
@@ -1879,6 +2117,7 @@ function getDefaultTemperatureByProvider(provider) {
1879
2117
  import { createAnthropic } from "@ai-sdk/anthropic";
1880
2118
  import { createDeepSeek } from "@ai-sdk/deepseek";
1881
2119
  import { createGoogleGenerativeAI } from "@ai-sdk/google";
2120
+ import { createMistral } from "@ai-sdk/mistral";
1882
2121
  import { createOpenAI } from "@ai-sdk/openai";
1883
2122
  import { createOllama } from "ollama-ai-provider-v2";
1884
2123
  var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl, litellmBaseUrl) => {
@@ -1901,6 +2140,8 @@ var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl, litellmBaseUrl) =>
1901
2140
  const normalized = /\/api\/?$/.test(raw) ? raw : `${raw.replace(/\/+$/, "")}/api`;
1902
2141
  return createOllama({ baseURL: normalized })(modelDefinition.id);
1903
2142
  }
2143
+ case "mistral":
2144
+ return createMistral({ apiKey })(modelDefinition.id);
1904
2145
  case "litellm": {
1905
2146
  const baseURL = litellmBaseUrl || "http://localhost:4000";
1906
2147
  const openai = createOpenAI({
@@ -2201,6 +2442,10 @@ var handleAgentStepFinish = (streamingContext, text, toolCalls, toolResults, emi
2201
2442
  streamingContext.lastEmittedToolCallId = currentToolCallId;
2202
2443
  const toolName = call.name || call.toolName;
2203
2444
  streamingContext.toolCallIdToName.set(currentToolCallId, toolName);
2445
+ if (!streamingContext.toolNameToCallIds.has(toolName)) {
2446
+ streamingContext.toolNameToCallIds.set(toolName, []);
2447
+ }
2448
+ streamingContext.toolNameToCallIds.get(toolName).push(currentToolCallId);
2204
2449
  if (streamingContext.controller && streamingContext.encoder) {
2205
2450
  sendSseEvent(
2206
2451
  streamingContext.controller,
@@ -2279,88 +2524,181 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
2279
2524
  }
2280
2525
  });
2281
2526
  let steps = 0;
2527
+ let hadError = false;
2282
2528
  while (steps < MAX_AGENT_STEPS) {
2283
2529
  let accumulatedText = "";
2284
2530
  const iterationToolCalls = [];
2285
2531
  const iterationToolResults = [];
2286
- const streamResult = await streamText({
2532
+ let streamResult;
2533
+ let hadStreamError = false;
2534
+ let streamErrorMessage = "";
2535
+ let response = null;
2536
+ const extractErrorMessage = (error) => {
2537
+ if (error.error && typeof error.error === "object") {
2538
+ const apiError = error.error;
2539
+ if (apiError.data?.error?.message) return apiError.data.error.message;
2540
+ if (apiError.responseBody) {
2541
+ try {
2542
+ const parsed = JSON.parse(apiError.responseBody);
2543
+ if (parsed.error?.message) return parsed.error.message;
2544
+ } catch {
2545
+ }
2546
+ }
2547
+ if (apiError.message) return apiError.message;
2548
+ }
2549
+ if (error.error instanceof Error) return error.error.message;
2550
+ return String(error.error || error.message || "Unknown error occurred");
2551
+ };
2552
+ streamResult = streamText({
2287
2553
  model,
2288
2554
  system: systemPrompt || "You are a helpful assistant with access to MCP tools.",
2289
2555
  temperature: temperature ?? getDefaultTemperatureByProvider(provider),
2290
2556
  tools: aiSdkTools,
2291
2557
  messages: messageHistory,
2558
+ onError: (error) => {
2559
+ hadStreamError = true;
2560
+ streamErrorMessage = extractErrorMessage(error);
2561
+ },
2292
2562
  onChunk: async (chunk) => {
2293
- switch (chunk.chunk.type) {
2294
- case "text-delta":
2295
- case "reasoning-delta": {
2296
- const text = chunk.chunk.text;
2297
- if (text) {
2298
- accumulatedText += text;
2563
+ try {
2564
+ switch (chunk.chunk.type) {
2565
+ case "text-delta":
2566
+ case "reasoning-delta": {
2567
+ const text = chunk.chunk.text;
2568
+ if (text) {
2569
+ accumulatedText += text;
2570
+ sendSseEvent(
2571
+ streamingContext.controller,
2572
+ streamingContext.encoder,
2573
+ {
2574
+ type: "text",
2575
+ content: text
2576
+ }
2577
+ );
2578
+ }
2579
+ break;
2580
+ }
2581
+ case "tool-input-start": {
2582
+ break;
2583
+ }
2584
+ case "tool-call": {
2585
+ const currentToolCallId = `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2586
+ streamingContext.lastEmittedToolCallId = currentToolCallId;
2587
+ const name = chunk.chunk.toolName || chunk.chunk.name;
2588
+ const parameters = chunk.chunk.input ?? chunk.chunk.parameters ?? chunk.chunk.args ?? {};
2589
+ streamingContext.toolCallIdToName.set(currentToolCallId, name);
2590
+ if (!streamingContext.toolNameToCallIds.has(name)) {
2591
+ streamingContext.toolNameToCallIds.set(name, []);
2592
+ }
2593
+ streamingContext.toolNameToCallIds.get(name).push(currentToolCallId);
2594
+ iterationToolCalls.push({ name, params: parameters });
2299
2595
  sendSseEvent(
2300
2596
  streamingContext.controller,
2301
2597
  streamingContext.encoder,
2302
2598
  {
2303
- type: "text",
2304
- content: text
2599
+ type: "tool_call",
2600
+ toolCall: {
2601
+ id: currentToolCallId,
2602
+ name,
2603
+ parameters,
2604
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2605
+ status: "executing"
2606
+ }
2305
2607
  }
2306
2608
  );
2609
+ break;
2307
2610
  }
2308
- break;
2309
- }
2310
- case "tool-input-start": {
2311
- break;
2312
- }
2313
- case "tool-call": {
2314
- const currentToolCallId = `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2315
- streamingContext.lastEmittedToolCallId = currentToolCallId;
2316
- const name = chunk.chunk.toolName || chunk.chunk.name;
2317
- const parameters = chunk.chunk.input ?? chunk.chunk.parameters ?? chunk.chunk.args ?? {};
2318
- streamingContext.toolCallIdToName.set(currentToolCallId, name);
2319
- iterationToolCalls.push({ name, params: parameters });
2320
- sendSseEvent(
2321
- streamingContext.controller,
2322
- streamingContext.encoder,
2323
- {
2324
- type: "tool_call",
2325
- toolCall: {
2326
- id: currentToolCallId,
2327
- name,
2328
- parameters,
2329
- timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2330
- status: "executing"
2611
+ case "tool-result": {
2612
+ const result = chunk.chunk.output ?? chunk.chunk.result ?? chunk.chunk.value;
2613
+ const toolName = chunk.chunk.toolName || chunk.chunk.name || null;
2614
+ let currentToolCallId = chunk.chunk.toolCallId || void 0;
2615
+ if (!currentToolCallId && toolName) {
2616
+ const queue = streamingContext.toolNameToCallIds.get(toolName);
2617
+ if (queue && queue.length > 0) {
2618
+ currentToolCallId = queue.shift();
2331
2619
  }
2332
2620
  }
2333
- );
2334
- break;
2335
- }
2336
- case "tool-result": {
2337
- const result = chunk.chunk.output ?? chunk.chunk.result ?? chunk.chunk.value;
2338
- const currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2339
- const toolName = streamingContext.toolCallIdToName.get(currentToolCallId);
2340
- const serverId = toolName ? extractServerId(toolName) : void 0;
2341
- iterationToolResults.push({ result });
2342
- sendSseEvent(
2343
- streamingContext.controller,
2344
- streamingContext.encoder,
2345
- {
2346
- type: "tool_result",
2347
- toolResult: {
2348
- id: currentToolCallId,
2349
- toolCallId: currentToolCallId,
2350
- result,
2351
- timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2352
- serverId
2621
+ if (!currentToolCallId && streamingContext.lastEmittedToolCallId) {
2622
+ currentToolCallId = streamingContext.lastEmittedToolCallId;
2623
+ }
2624
+ if (!currentToolCallId) {
2625
+ currentToolCallId = `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2626
+ }
2627
+ if (toolName && streamingContext.toolNameToCallIds.has(toolName)) {
2628
+ const queue = streamingContext.toolNameToCallIds.get(toolName);
2629
+ const index = queue.indexOf(currentToolCallId);
2630
+ if (index !== -1) {
2631
+ queue.splice(index, 1);
2353
2632
  }
2354
2633
  }
2355
- );
2356
- break;
2634
+ streamingContext.lastEmittedToolCallId = currentToolCallId;
2635
+ if (toolName) {
2636
+ streamingContext.toolCallIdToName.set(
2637
+ currentToolCallId,
2638
+ toolName
2639
+ );
2640
+ }
2641
+ const toolNameForLookup = toolName || streamingContext.toolCallIdToName.get(currentToolCallId);
2642
+ const serverId = toolNameForLookup ? extractServerId(toolNameForLookup) : void 0;
2643
+ iterationToolResults.push({ result });
2644
+ sendSseEvent(
2645
+ streamingContext.controller,
2646
+ streamingContext.encoder,
2647
+ {
2648
+ type: "tool_result",
2649
+ toolResult: {
2650
+ id: currentToolCallId,
2651
+ toolCallId: currentToolCallId,
2652
+ result,
2653
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
2654
+ serverId
2655
+ }
2656
+ }
2657
+ );
2658
+ break;
2659
+ }
2660
+ default:
2661
+ break;
2357
2662
  }
2358
- default:
2359
- break;
2663
+ } catch (chunkError) {
2664
+ hadStreamError = true;
2665
+ streamErrorMessage = chunkError instanceof Error ? chunkError.message : "Error processing chunk";
2360
2666
  }
2361
2667
  }
2362
2668
  });
2363
- await streamResult.consumeStream();
2669
+ try {
2670
+ await streamResult.consumeStream();
2671
+ if (hadStreamError) {
2672
+ throw new Error(streamErrorMessage);
2673
+ }
2674
+ response = await streamResult.response;
2675
+ if (response.error) {
2676
+ throw response.error;
2677
+ }
2678
+ if (response.experimental_providerMetadata?.openai?.error) {
2679
+ throw new Error(
2680
+ response.experimental_providerMetadata.openai.error.message || "OpenAI API error"
2681
+ );
2682
+ }
2683
+ } catch (error) {
2684
+ const errorMessage = streamErrorMessage || extractErrorMessage(error);
2685
+ sendSseEvent(streamingContext.controller, streamingContext.encoder, {
2686
+ type: "error",
2687
+ error: errorMessage
2688
+ });
2689
+ sendSseEvent(
2690
+ streamingContext.controller,
2691
+ streamingContext.encoder,
2692
+ "[DONE]"
2693
+ );
2694
+ hadError = true;
2695
+ steps++;
2696
+ break;
2697
+ }
2698
+ if (!streamResult || hadError) {
2699
+ steps++;
2700
+ break;
2701
+ }
2364
2702
  handleAgentStepFinish(
2365
2703
  streamingContext,
2366
2704
  accumulatedText,
@@ -2368,16 +2706,26 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
2368
2706
  iterationToolResults,
2369
2707
  false
2370
2708
  );
2371
- const resp = await streamResult.response;
2372
- const responseMessages = resp?.messages || [];
2709
+ const responseMessages = response?.messages || [];
2373
2710
  if (responseMessages.length) {
2374
2711
  messageHistory.push(...responseMessages);
2375
2712
  for (const m of responseMessages) {
2376
2713
  if (m.role === "tool") {
2377
- const currentToolCallId = streamingContext.lastEmittedToolCallId != null ? streamingContext.lastEmittedToolCallId : `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2378
2714
  const value = m.content;
2379
- const toolName = streamingContext.toolCallIdToName.get(currentToolCallId);
2380
- const serverId = toolName ? extractServerId(toolName) : void 0;
2715
+ const toolName = m.toolName || m.name;
2716
+ let currentToolCallId;
2717
+ if (toolName && streamingContext.toolNameToCallIds.has(toolName)) {
2718
+ const queue = streamingContext.toolNameToCallIds.get(toolName);
2719
+ if (queue.length > 0) {
2720
+ currentToolCallId = queue.shift();
2721
+ } else {
2722
+ currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2723
+ }
2724
+ } else {
2725
+ currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2726
+ }
2727
+ const toolNameForLookup = toolName || streamingContext.toolCallIdToName.get(currentToolCallId);
2728
+ const serverId = toolNameForLookup ? extractServerId(toolNameForLookup) : void 0;
2381
2729
  iterationToolResults.push({ result: value });
2382
2730
  sendSseEvent(streamingContext.controller, streamingContext.encoder, {
2383
2731
  type: "tool_result",
@@ -2393,18 +2741,20 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
2393
2741
  }
2394
2742
  }
2395
2743
  steps++;
2396
- const finishReason = await streamResult.finishReason;
2744
+ const finishReason = response?.finishReason || "stop";
2397
2745
  const shouldContinue = finishReason === "tool-calls" || accumulatedText.length === 0 && iterationToolResults.length > 0;
2398
2746
  if (!shouldContinue) break;
2399
2747
  }
2400
- sendSseEvent(streamingContext.controller, streamingContext.encoder, {
2401
- type: "elicitation_complete"
2402
- });
2403
- sendSseEvent(
2404
- streamingContext.controller,
2405
- streamingContext.encoder,
2406
- "[DONE]"
2407
- );
2748
+ if (!hadError) {
2749
+ sendSseEvent(streamingContext.controller, streamingContext.encoder, {
2750
+ type: "elicitation_complete"
2751
+ });
2752
+ sendSseEvent(
2753
+ streamingContext.controller,
2754
+ streamingContext.encoder,
2755
+ "[DONE]"
2756
+ );
2757
+ }
2408
2758
  };
2409
2759
  var sendMessagesToBackend = async (messages, streamingContext, mcpClientManager2, baseUrl, modelId, authHeader, selectedServers) => {
2410
2760
  const messageHistory = (messages || []).map((m) => {
@@ -2465,6 +2815,10 @@ var sendMessagesToBackend = async (messages, streamingContext, mcpClientManager2
2465
2815
  const currentToolCallId = `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2466
2816
  streamingContext.lastEmittedToolCallId = currentToolCallId;
2467
2817
  streamingContext.toolCallIdToName.set(currentToolCallId, call.name);
2818
+ if (!streamingContext.toolNameToCallIds.has(call.name)) {
2819
+ streamingContext.toolNameToCallIds.set(call.name, []);
2820
+ }
2821
+ streamingContext.toolNameToCallIds.get(call.name).push(currentToolCallId);
2468
2822
  sendSseEvent(streamingContext.controller, streamingContext.encoder, {
2469
2823
  type: "tool_call",
2470
2824
  toolCall: {
@@ -2477,7 +2831,18 @@ var sendMessagesToBackend = async (messages, streamingContext, mcpClientManager2
2477
2831
  });
2478
2832
  };
2479
2833
  const emitToolResult = (result) => {
2480
- const currentToolCallId = streamingContext.lastEmittedToolCallId != null ? streamingContext.lastEmittedToolCallId : `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2834
+ const toolName = result.toolName;
2835
+ let currentToolCallId;
2836
+ if (toolName && streamingContext.toolNameToCallIds.has(toolName)) {
2837
+ const queue = streamingContext.toolNameToCallIds.get(toolName);
2838
+ if (queue.length > 0) {
2839
+ currentToolCallId = queue.shift();
2840
+ } else {
2841
+ currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2842
+ }
2843
+ } else {
2844
+ currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
2845
+ }
2481
2846
  sendSseEvent(streamingContext.controller, streamingContext.encoder, {
2482
2847
  type: "tool_result",
2483
2848
  toolResult: {
@@ -2630,7 +2995,8 @@ chat.post("/", async (c) => {
2630
2995
  toolCallId: 0,
2631
2996
  lastEmittedToolCallId: null,
2632
2997
  stepIndex: 0,
2633
- toolCallIdToName: /* @__PURE__ */ new Map()
2998
+ toolCallIdToName: /* @__PURE__ */ new Map(),
2999
+ toolNameToCallIds: /* @__PURE__ */ new Map()
2634
3000
  };
2635
3001
  mcpClientManager2.setElicitationCallback(async (request) => {
2636
3002
  const elicitationRequest = {