@mcpjam/inspector 1.0.15 → 1.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/client/assets/index-B7n5hcqu.js +1880 -0
- package/dist/client/assets/index-GlsNu0tU.css +1 -0
- package/dist/client/index.html +2 -2
- package/dist/client/litellm_logo.png +0 -0
- package/dist/client/mistral_logo.png +0 -0
- package/dist/client/moonshot_dark.png +0 -0
- package/dist/client/moonshot_light.png +0 -0
- package/dist/client/z-ai.png +0 -0
- package/dist/server/index.js +479 -87
- package/dist/server/index.js.map +1 -1
- package/package.json +2 -1
- package/dist/client/assets/index-BiLTSE2l.js +0 -1880
- package/dist/client/assets/index-DHyLrKBZ.css +0 -1
package/dist/server/index.js
CHANGED
|
@@ -48,7 +48,7 @@ import { readFileSync as readFileSync2, existsSync as existsSync2 } from "fs";
|
|
|
48
48
|
import { join as join2, dirname, resolve } from "path";
|
|
49
49
|
import { fileURLToPath } from "url";
|
|
50
50
|
|
|
51
|
-
// ../sdk/dist/
|
|
51
|
+
// ../sdk/dist/index.js
|
|
52
52
|
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
53
53
|
import { SSEClientTransport } from "@modelcontextprotocol/sdk/client/sse.js";
|
|
54
54
|
import {
|
|
@@ -152,11 +152,12 @@ var MCPClientManager = class {
|
|
|
152
152
|
this.toolsMetadataCache = /* @__PURE__ */ new Map();
|
|
153
153
|
this.defaultLogJsonRpc = false;
|
|
154
154
|
this.pendingElicitations = /* @__PURE__ */ new Map();
|
|
155
|
-
var _a2, _b2, _c, _d;
|
|
155
|
+
var _a2, _b2, _c, _d, _e;
|
|
156
156
|
this.defaultClientVersion = (_a2 = options.defaultClientVersion) != null ? _a2 : "1.0.0";
|
|
157
|
-
this.
|
|
158
|
-
this.
|
|
159
|
-
this.
|
|
157
|
+
this.defaultClientName = (_b2 = options.defaultClientName) != null ? _b2 : void 0;
|
|
158
|
+
this.defaultCapabilities = { ...(_c = options.defaultCapabilities) != null ? _c : {} };
|
|
159
|
+
this.defaultTimeout = (_d = options.defaultTimeout) != null ? _d : DEFAULT_REQUEST_TIMEOUT_MSEC;
|
|
160
|
+
this.defaultLogJsonRpc = (_e = options.defaultLogJsonRpc) != null ? _e : false;
|
|
160
161
|
this.defaultRpcLogger = options.rpcLogger;
|
|
161
162
|
for (const [id, config] of Object.entries(servers2)) {
|
|
162
163
|
void this.connectToServer(id, config);
|
|
@@ -206,7 +207,7 @@ var MCPClientManager = class {
|
|
|
206
207
|
var _a22;
|
|
207
208
|
const client = new Client(
|
|
208
209
|
{
|
|
209
|
-
name: serverId,
|
|
210
|
+
name: this.defaultClientName ? `${this.defaultClientName}` : serverId,
|
|
210
211
|
version: (_a22 = config.version) != null ? _a22 : this.defaultClientVersion
|
|
211
212
|
},
|
|
212
213
|
{
|
|
@@ -1461,7 +1462,14 @@ resources.post("/read", async (c) => {
|
|
|
1461
1462
|
resources.post("/widget/store", async (c) => {
|
|
1462
1463
|
try {
|
|
1463
1464
|
const body = await c.req.json();
|
|
1464
|
-
const {
|
|
1465
|
+
const {
|
|
1466
|
+
serverId,
|
|
1467
|
+
uri,
|
|
1468
|
+
toolInput,
|
|
1469
|
+
toolOutput,
|
|
1470
|
+
toolResponseMetadata,
|
|
1471
|
+
toolId
|
|
1472
|
+
} = body;
|
|
1465
1473
|
if (!serverId || !uri || !toolId) {
|
|
1466
1474
|
return c.json({ success: false, error: "Missing required fields" }, 400);
|
|
1467
1475
|
}
|
|
@@ -1470,6 +1478,7 @@ resources.post("/widget/store", async (c) => {
|
|
|
1470
1478
|
uri,
|
|
1471
1479
|
toolInput,
|
|
1472
1480
|
toolOutput,
|
|
1481
|
+
toolResponseMetadata: toolResponseMetadata ?? null,
|
|
1473
1482
|
toolId,
|
|
1474
1483
|
timestamp: Date.now()
|
|
1475
1484
|
});
|
|
@@ -1531,7 +1540,7 @@ resources.get("/widget-content/:toolId", async (c) => {
|
|
|
1531
1540
|
404
|
|
1532
1541
|
);
|
|
1533
1542
|
}
|
|
1534
|
-
const { serverId, uri, toolInput, toolOutput } = widgetData;
|
|
1543
|
+
const { serverId, uri, toolInput, toolOutput, toolResponseMetadata } = widgetData;
|
|
1535
1544
|
const mcpClientManager2 = c.mcpClientManager;
|
|
1536
1545
|
const availableServers = mcpClientManager2.listServers().filter((id) => Boolean(mcpClientManager2.getClient(id)));
|
|
1537
1546
|
let actualServerId = serverId;
|
|
@@ -1588,6 +1597,7 @@ resources.get("/widget-content/:toolId", async (c) => {
|
|
|
1588
1597
|
const openaiAPI = {
|
|
1589
1598
|
toolInput: ${JSON.stringify(toolInput)},
|
|
1590
1599
|
toolOutput: ${JSON.stringify(toolOutput)},
|
|
1600
|
+
toolResponseMetadata: ${JSON.stringify(toolResponseMetadata ?? null)},
|
|
1591
1601
|
displayMode: 'inline',
|
|
1592
1602
|
maxHeight: 600,
|
|
1593
1603
|
theme: 'dark',
|
|
@@ -1661,6 +1671,19 @@ resources.get("/widget-content/:toolId", async (c) => {
|
|
|
1661
1671
|
async sendFollowUpMessage(args) {
|
|
1662
1672
|
const prompt = typeof args === 'string' ? args : (args?.prompt || '');
|
|
1663
1673
|
return this.sendFollowupTurn(prompt);
|
|
1674
|
+
},
|
|
1675
|
+
|
|
1676
|
+
async openExternal(options) {
|
|
1677
|
+
const href = typeof options === 'string' ? options : options?.href;
|
|
1678
|
+
if (!href) {
|
|
1679
|
+
throw new Error('href is required for openExternal');
|
|
1680
|
+
}
|
|
1681
|
+
window.parent.postMessage({
|
|
1682
|
+
type: 'openai:openExternal',
|
|
1683
|
+
href
|
|
1684
|
+
}, '*');
|
|
1685
|
+
// Also open in new tab as fallback
|
|
1686
|
+
window.open(href, '_blank', 'noopener,noreferrer');
|
|
1664
1687
|
}
|
|
1665
1688
|
};
|
|
1666
1689
|
|
|
@@ -1834,11 +1857,238 @@ var MCPJAM_PROVIDED_MODEL_IDS = [
|
|
|
1834
1857
|
"meta-llama/llama-3.3-70b-instruct",
|
|
1835
1858
|
"openai/gpt-oss-120b",
|
|
1836
1859
|
"x-ai/grok-4-fast",
|
|
1837
|
-
"openai/gpt-5-nano"
|
|
1860
|
+
"openai/gpt-5-nano",
|
|
1861
|
+
"anthropic/claude-sonnet-4.5",
|
|
1862
|
+
"anthropic/claude-haiku-4.5",
|
|
1863
|
+
"openai/gpt-5-codex",
|
|
1864
|
+
"openai/gpt-5",
|
|
1865
|
+
"openai/gpt-5-mini",
|
|
1866
|
+
"google/gemini-2.5-flash-preview-09-2025",
|
|
1867
|
+
"moonshotai/kimi-k2-0905",
|
|
1868
|
+
"google/gemini-2.5-flash",
|
|
1869
|
+
"z-ai/glm-4.6"
|
|
1838
1870
|
];
|
|
1839
1871
|
var isMCPJamProvidedModel = (modelId) => {
|
|
1840
1872
|
return MCPJAM_PROVIDED_MODEL_IDS.includes(modelId);
|
|
1841
1873
|
};
|
|
1874
|
+
var Model = /* @__PURE__ */ ((Model2) => {
|
|
1875
|
+
Model2["CLAUDE_OPUS_4_0"] = "claude-opus-4-0";
|
|
1876
|
+
Model2["CLAUDE_SONNET_4_0"] = "claude-sonnet-4-0";
|
|
1877
|
+
Model2["CLAUDE_3_7_SONNET_LATEST"] = "claude-3-7-sonnet-latest";
|
|
1878
|
+
Model2["CLAUDE_3_5_SONNET_LATEST"] = "claude-3-5-sonnet-latest";
|
|
1879
|
+
Model2["CLAUDE_3_5_HAIKU_LATEST"] = "claude-3-5-haiku-latest";
|
|
1880
|
+
Model2["GPT_4_1"] = "gpt-4.1";
|
|
1881
|
+
Model2["GPT_4_1_MINI"] = "gpt-4.1-mini";
|
|
1882
|
+
Model2["GPT_4_1_NANO"] = "gpt-4.1-nano";
|
|
1883
|
+
Model2["GPT_4O"] = "gpt-4o";
|
|
1884
|
+
Model2["GPT_4O_MINI"] = "gpt-4o-mini";
|
|
1885
|
+
Model2["GPT_4_TURBO"] = "gpt-4-turbo";
|
|
1886
|
+
Model2["GPT_4"] = "gpt-4";
|
|
1887
|
+
Model2["GPT_5"] = "gpt-5";
|
|
1888
|
+
Model2["GPT_5_MINI"] = "gpt-5-mini";
|
|
1889
|
+
Model2["GPT_5_NANO"] = "gpt-5-nano";
|
|
1890
|
+
Model2["GPT_5_MAIN"] = "openai/gpt-5";
|
|
1891
|
+
Model2["GPT_5_PRO"] = "gpt-5-pro";
|
|
1892
|
+
Model2["GPT_5_CODEX"] = "gpt-5-codex";
|
|
1893
|
+
Model2["GPT_3_5_TURBO"] = "gpt-3.5-turbo";
|
|
1894
|
+
Model2["DEEPSEEK_CHAT"] = "deepseek-chat";
|
|
1895
|
+
Model2["DEEPSEEK_REASONER"] = "deepseek-reasoner";
|
|
1896
|
+
Model2["GEMINI_2_5_PRO"] = "gemini-2.5-pro";
|
|
1897
|
+
Model2["GEMINI_2_5_FLASH"] = "gemini-2.5-flash";
|
|
1898
|
+
Model2["GEMINI_2_5_FLASH_LITE"] = "gemini-2.5-flash-lite";
|
|
1899
|
+
Model2["GEMINI_2_0_FLASH_EXP"] = "gemini-2.0-flash-exp";
|
|
1900
|
+
Model2["GEMINI_1_5_PRO"] = "gemini-1.5-pro";
|
|
1901
|
+
Model2["GEMINI_1_5_PRO_002"] = "gemini-1.5-pro-002";
|
|
1902
|
+
Model2["GEMINI_1_5_FLASH"] = "gemini-1.5-flash";
|
|
1903
|
+
Model2["GEMINI_1_5_FLASH_002"] = "gemini-1.5-flash-002";
|
|
1904
|
+
Model2["GEMINI_1_5_FLASH_8B"] = "gemini-1.5-flash-8b";
|
|
1905
|
+
Model2["GEMINI_1_5_FLASH_8B_001"] = "gemini-1.5-flash-8b-001";
|
|
1906
|
+
Model2["GEMMA_3_2B"] = "gemma-3-2b";
|
|
1907
|
+
Model2["GEMMA_3_9B"] = "gemma-3-9b";
|
|
1908
|
+
Model2["GEMMA_3_27B"] = "gemma-3-27b";
|
|
1909
|
+
Model2["GEMMA_2_2B"] = "gemma-2-2b";
|
|
1910
|
+
Model2["GEMMA_2_9B"] = "gemma-2-9b";
|
|
1911
|
+
Model2["GEMMA_2_27B"] = "gemma-2-27b";
|
|
1912
|
+
Model2["CODE_GEMMA_2B"] = "codegemma-2b";
|
|
1913
|
+
Model2["CODE_GEMMA_7B"] = "codegemma-7b";
|
|
1914
|
+
Model2["MISTRAL_LARGE_LATEST"] = "mistral-large-latest";
|
|
1915
|
+
Model2["MISTRAL_SMALL_LATEST"] = "mistral-small-latest";
|
|
1916
|
+
Model2["CODESTRAL_LATEST"] = "codestral-latest";
|
|
1917
|
+
Model2["MINISTRAL_8B_LATEST"] = "ministral-8b-latest";
|
|
1918
|
+
Model2["MINISTRAL_3B_LATEST"] = "ministral-3b-latest";
|
|
1919
|
+
return Model2;
|
|
1920
|
+
})(Model || {});
|
|
1921
|
+
var SUPPORTED_MODELS = [
|
|
1922
|
+
{
|
|
1923
|
+
id: "claude-opus-4-0" /* CLAUDE_OPUS_4_0 */,
|
|
1924
|
+
name: "Claude Opus 4",
|
|
1925
|
+
provider: "anthropic"
|
|
1926
|
+
},
|
|
1927
|
+
{
|
|
1928
|
+
id: "claude-sonnet-4-0" /* CLAUDE_SONNET_4_0 */,
|
|
1929
|
+
name: "Claude Sonnet 4",
|
|
1930
|
+
provider: "anthropic"
|
|
1931
|
+
},
|
|
1932
|
+
{
|
|
1933
|
+
id: "claude-3-7-sonnet-latest" /* CLAUDE_3_7_SONNET_LATEST */,
|
|
1934
|
+
name: "Claude Sonnet 3.7",
|
|
1935
|
+
provider: "anthropic"
|
|
1936
|
+
},
|
|
1937
|
+
{
|
|
1938
|
+
id: "claude-3-5-sonnet-latest" /* CLAUDE_3_5_SONNET_LATEST */,
|
|
1939
|
+
name: "Claude Sonnet 3.5",
|
|
1940
|
+
provider: "anthropic"
|
|
1941
|
+
},
|
|
1942
|
+
{
|
|
1943
|
+
id: "claude-3-5-haiku-latest" /* CLAUDE_3_5_HAIKU_LATEST */,
|
|
1944
|
+
name: "Claude Haiku 3.5",
|
|
1945
|
+
provider: "anthropic"
|
|
1946
|
+
},
|
|
1947
|
+
{ id: "gpt-5" /* GPT_5 */, name: "GPT-5", provider: "openai" },
|
|
1948
|
+
{ id: "gpt-5-mini" /* GPT_5_MINI */, name: "GPT-5 Mini", provider: "openai" },
|
|
1949
|
+
{ id: "gpt-5-nano" /* GPT_5_NANO */, name: "GPT-5 Nano", provider: "openai" },
|
|
1950
|
+
{ id: Model.GPT_5_CHAT_LATEST, name: "GPT-5 Chat", provider: "openai" },
|
|
1951
|
+
{ id: "gpt-5-pro" /* GPT_5_PRO */, name: "GPT-5 Pro", provider: "openai" },
|
|
1952
|
+
{ id: "gpt-5-codex" /* GPT_5_CODEX */, name: "GPT-5 Codex", provider: "openai" },
|
|
1953
|
+
{ id: "gpt-4.1" /* GPT_4_1 */, name: "GPT-4.1", provider: "openai" },
|
|
1954
|
+
{ id: "gpt-4.1-mini" /* GPT_4_1_MINI */, name: "GPT-4.1 Mini", provider: "openai" },
|
|
1955
|
+
{ id: "gpt-4.1-nano" /* GPT_4_1_NANO */, name: "GPT-4.1 Nano", provider: "openai" },
|
|
1956
|
+
{ id: "gpt-4o" /* GPT_4O */, name: "GPT-4o", provider: "openai" },
|
|
1957
|
+
{ id: "gpt-4o-mini" /* GPT_4O_MINI */, name: "GPT-4o Mini", provider: "openai" },
|
|
1958
|
+
{ id: "deepseek-chat" /* DEEPSEEK_CHAT */, name: "DeepSeek Chat", provider: "deepseek" },
|
|
1959
|
+
{
|
|
1960
|
+
id: "deepseek-reasoner" /* DEEPSEEK_REASONER */,
|
|
1961
|
+
name: "DeepSeek Reasoner",
|
|
1962
|
+
provider: "deepseek"
|
|
1963
|
+
},
|
|
1964
|
+
// Google Gemini models (latest first)
|
|
1965
|
+
{
|
|
1966
|
+
id: "gemini-2.5-pro" /* GEMINI_2_5_PRO */,
|
|
1967
|
+
name: "Gemini 2.5 Pro",
|
|
1968
|
+
provider: "google"
|
|
1969
|
+
},
|
|
1970
|
+
{
|
|
1971
|
+
id: "gemini-2.5-flash" /* GEMINI_2_5_FLASH */,
|
|
1972
|
+
name: "Gemini 2.5 Flash",
|
|
1973
|
+
provider: "google"
|
|
1974
|
+
},
|
|
1975
|
+
{
|
|
1976
|
+
id: "gemini-2.0-flash-exp" /* GEMINI_2_0_FLASH_EXP */,
|
|
1977
|
+
name: "Gemini 2.0 Flash Experimental",
|
|
1978
|
+
provider: "google"
|
|
1979
|
+
},
|
|
1980
|
+
{
|
|
1981
|
+
id: "gemini-1.5-pro-002" /* GEMINI_1_5_PRO_002 */,
|
|
1982
|
+
name: "Gemini 1.5 Pro 002",
|
|
1983
|
+
provider: "google"
|
|
1984
|
+
},
|
|
1985
|
+
{
|
|
1986
|
+
id: "gemini-1.5-pro" /* GEMINI_1_5_PRO */,
|
|
1987
|
+
name: "Gemini 1.5 Pro",
|
|
1988
|
+
provider: "google"
|
|
1989
|
+
},
|
|
1990
|
+
{
|
|
1991
|
+
id: "gemini-1.5-flash-002" /* GEMINI_1_5_FLASH_002 */,
|
|
1992
|
+
name: "Gemini 1.5 Flash 002",
|
|
1993
|
+
provider: "google"
|
|
1994
|
+
},
|
|
1995
|
+
{
|
|
1996
|
+
id: "gemini-1.5-flash" /* GEMINI_1_5_FLASH */,
|
|
1997
|
+
name: "Gemini 1.5 Flash",
|
|
1998
|
+
provider: "google"
|
|
1999
|
+
},
|
|
2000
|
+
{
|
|
2001
|
+
id: "meta-llama/llama-3.3-70b-instruct",
|
|
2002
|
+
name: "Llama 3.3 70B (Free)",
|
|
2003
|
+
provider: "meta"
|
|
2004
|
+
},
|
|
2005
|
+
{
|
|
2006
|
+
id: "openai/gpt-oss-120b",
|
|
2007
|
+
name: "GPT-OSS 120B (Free)",
|
|
2008
|
+
provider: "openai"
|
|
2009
|
+
},
|
|
2010
|
+
{
|
|
2011
|
+
id: "x-ai/grok-4-fast",
|
|
2012
|
+
name: "Grok 4 Fast (Free)",
|
|
2013
|
+
provider: "x-ai"
|
|
2014
|
+
},
|
|
2015
|
+
{
|
|
2016
|
+
id: "openai/gpt-5-nano",
|
|
2017
|
+
name: "GPT-5 Nano (Free)",
|
|
2018
|
+
provider: "openai"
|
|
2019
|
+
},
|
|
2020
|
+
{
|
|
2021
|
+
id: "anthropic/claude-sonnet-4.5",
|
|
2022
|
+
name: "Claude Sonnet 4.5 (Free)",
|
|
2023
|
+
provider: "anthropic"
|
|
2024
|
+
},
|
|
2025
|
+
{
|
|
2026
|
+
id: "anthropic/claude-haiku-4.5",
|
|
2027
|
+
name: "Claude Haiku 4.5 (Free)",
|
|
2028
|
+
provider: "anthropic"
|
|
2029
|
+
},
|
|
2030
|
+
{
|
|
2031
|
+
id: "openai/gpt-5-codex",
|
|
2032
|
+
name: "GPT-5 Codex (Free)",
|
|
2033
|
+
provider: "openai"
|
|
2034
|
+
},
|
|
2035
|
+
{
|
|
2036
|
+
id: "openai/gpt-5",
|
|
2037
|
+
name: "GPT-5 (Free)",
|
|
2038
|
+
provider: "openai"
|
|
2039
|
+
},
|
|
2040
|
+
{
|
|
2041
|
+
id: "openai/gpt-5-mini",
|
|
2042
|
+
name: "GPT-5 Mini (Free)",
|
|
2043
|
+
provider: "openai"
|
|
2044
|
+
},
|
|
2045
|
+
{
|
|
2046
|
+
id: "google/gemini-2.5-flash-preview-09-2025",
|
|
2047
|
+
name: "Gemini 2.5 Flash Preview (Free)",
|
|
2048
|
+
provider: "google"
|
|
2049
|
+
},
|
|
2050
|
+
{
|
|
2051
|
+
id: "moonshotai/kimi-k2-0905",
|
|
2052
|
+
name: "Kimi K2 (Free)",
|
|
2053
|
+
provider: "moonshotai"
|
|
2054
|
+
},
|
|
2055
|
+
{
|
|
2056
|
+
id: "google/gemini-2.5-flash",
|
|
2057
|
+
name: "Gemini 2.5 Flash (Free)",
|
|
2058
|
+
provider: "google"
|
|
2059
|
+
},
|
|
2060
|
+
{
|
|
2061
|
+
id: "z-ai/glm-4.6",
|
|
2062
|
+
name: "GLM 4.6 (Free)",
|
|
2063
|
+
provider: "z-ai"
|
|
2064
|
+
},
|
|
2065
|
+
// Mistral models
|
|
2066
|
+
{
|
|
2067
|
+
id: "mistral-large-latest" /* MISTRAL_LARGE_LATEST */,
|
|
2068
|
+
name: "Mistral Large",
|
|
2069
|
+
provider: "mistral"
|
|
2070
|
+
},
|
|
2071
|
+
{
|
|
2072
|
+
id: "mistral-small-latest" /* MISTRAL_SMALL_LATEST */,
|
|
2073
|
+
name: "Mistral Small",
|
|
2074
|
+
provider: "mistral"
|
|
2075
|
+
},
|
|
2076
|
+
{
|
|
2077
|
+
id: "codestral-latest" /* CODESTRAL_LATEST */,
|
|
2078
|
+
name: "Codestral",
|
|
2079
|
+
provider: "mistral"
|
|
2080
|
+
},
|
|
2081
|
+
{
|
|
2082
|
+
id: "ministral-8b-latest" /* MINISTRAL_8B_LATEST */,
|
|
2083
|
+
name: "Ministral 8B",
|
|
2084
|
+
provider: "mistral"
|
|
2085
|
+
},
|
|
2086
|
+
{
|
|
2087
|
+
id: "ministral-3b-latest" /* MINISTRAL_3B_LATEST */,
|
|
2088
|
+
name: "Ministral 3B",
|
|
2089
|
+
provider: "mistral"
|
|
2090
|
+
}
|
|
2091
|
+
];
|
|
1842
2092
|
|
|
1843
2093
|
// routes/mcp/chat.ts
|
|
1844
2094
|
import { TextEncoder as TextEncoder2 } from "util";
|
|
@@ -1855,6 +2105,9 @@ function getDefaultTemperatureByProvider(provider) {
|
|
|
1855
2105
|
case "google":
|
|
1856
2106
|
return 0.9;
|
|
1857
2107
|
// Google's recommended default
|
|
2108
|
+
case "mistral":
|
|
2109
|
+
return 0.7;
|
|
2110
|
+
// Mistral's recommended default
|
|
1858
2111
|
default:
|
|
1859
2112
|
return 0;
|
|
1860
2113
|
}
|
|
@@ -1864,9 +2117,10 @@ function getDefaultTemperatureByProvider(provider) {
|
|
|
1864
2117
|
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
1865
2118
|
import { createDeepSeek } from "@ai-sdk/deepseek";
|
|
1866
2119
|
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
2120
|
+
import { createMistral } from "@ai-sdk/mistral";
|
|
1867
2121
|
import { createOpenAI } from "@ai-sdk/openai";
|
|
1868
2122
|
import { createOllama } from "ollama-ai-provider-v2";
|
|
1869
|
-
var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl) => {
|
|
2123
|
+
var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl, litellmBaseUrl) => {
|
|
1870
2124
|
if (!modelDefinition?.id || !modelDefinition?.provider) {
|
|
1871
2125
|
throw new Error(
|
|
1872
2126
|
`Invalid model definition: ${JSON.stringify(modelDefinition)}`
|
|
@@ -1886,6 +2140,17 @@ var createLlmModel = (modelDefinition, apiKey, ollamaBaseUrl) => {
|
|
|
1886
2140
|
const normalized = /\/api\/?$/.test(raw) ? raw : `${raw.replace(/\/+$/, "")}/api`;
|
|
1887
2141
|
return createOllama({ baseURL: normalized })(modelDefinition.id);
|
|
1888
2142
|
}
|
|
2143
|
+
case "mistral":
|
|
2144
|
+
return createMistral({ apiKey })(modelDefinition.id);
|
|
2145
|
+
case "litellm": {
|
|
2146
|
+
const baseURL = litellmBaseUrl || "http://localhost:4000";
|
|
2147
|
+
const openai = createOpenAI({
|
|
2148
|
+
apiKey: apiKey || "dummy-key",
|
|
2149
|
+
// LiteLLM may not require API key depending on setup
|
|
2150
|
+
baseURL
|
|
2151
|
+
});
|
|
2152
|
+
return openai.chat(modelDefinition.id);
|
|
2153
|
+
}
|
|
1889
2154
|
default:
|
|
1890
2155
|
throw new Error(
|
|
1891
2156
|
`Unsupported provider: ${modelDefinition.provider} for model: ${modelDefinition.id}`
|
|
@@ -2177,6 +2442,10 @@ var handleAgentStepFinish = (streamingContext, text, toolCalls, toolResults, emi
|
|
|
2177
2442
|
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
2178
2443
|
const toolName = call.name || call.toolName;
|
|
2179
2444
|
streamingContext.toolCallIdToName.set(currentToolCallId, toolName);
|
|
2445
|
+
if (!streamingContext.toolNameToCallIds.has(toolName)) {
|
|
2446
|
+
streamingContext.toolNameToCallIds.set(toolName, []);
|
|
2447
|
+
}
|
|
2448
|
+
streamingContext.toolNameToCallIds.get(toolName).push(currentToolCallId);
|
|
2180
2449
|
if (streamingContext.controller && streamingContext.encoder) {
|
|
2181
2450
|
sendSseEvent(
|
|
2182
2451
|
streamingContext.controller,
|
|
@@ -2255,88 +2524,181 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
2255
2524
|
}
|
|
2256
2525
|
});
|
|
2257
2526
|
let steps = 0;
|
|
2527
|
+
let hadError = false;
|
|
2258
2528
|
while (steps < MAX_AGENT_STEPS) {
|
|
2259
2529
|
let accumulatedText = "";
|
|
2260
2530
|
const iterationToolCalls = [];
|
|
2261
2531
|
const iterationToolResults = [];
|
|
2262
|
-
|
|
2532
|
+
let streamResult;
|
|
2533
|
+
let hadStreamError = false;
|
|
2534
|
+
let streamErrorMessage = "";
|
|
2535
|
+
let response = null;
|
|
2536
|
+
const extractErrorMessage = (error) => {
|
|
2537
|
+
if (error.error && typeof error.error === "object") {
|
|
2538
|
+
const apiError = error.error;
|
|
2539
|
+
if (apiError.data?.error?.message) return apiError.data.error.message;
|
|
2540
|
+
if (apiError.responseBody) {
|
|
2541
|
+
try {
|
|
2542
|
+
const parsed = JSON.parse(apiError.responseBody);
|
|
2543
|
+
if (parsed.error?.message) return parsed.error.message;
|
|
2544
|
+
} catch {
|
|
2545
|
+
}
|
|
2546
|
+
}
|
|
2547
|
+
if (apiError.message) return apiError.message;
|
|
2548
|
+
}
|
|
2549
|
+
if (error.error instanceof Error) return error.error.message;
|
|
2550
|
+
return String(error.error || error.message || "Unknown error occurred");
|
|
2551
|
+
};
|
|
2552
|
+
streamResult = streamText({
|
|
2263
2553
|
model,
|
|
2264
2554
|
system: systemPrompt || "You are a helpful assistant with access to MCP tools.",
|
|
2265
2555
|
temperature: temperature ?? getDefaultTemperatureByProvider(provider),
|
|
2266
2556
|
tools: aiSdkTools,
|
|
2267
2557
|
messages: messageHistory,
|
|
2558
|
+
onError: (error) => {
|
|
2559
|
+
hadStreamError = true;
|
|
2560
|
+
streamErrorMessage = extractErrorMessage(error);
|
|
2561
|
+
},
|
|
2268
2562
|
onChunk: async (chunk) => {
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2563
|
+
try {
|
|
2564
|
+
switch (chunk.chunk.type) {
|
|
2565
|
+
case "text-delta":
|
|
2566
|
+
case "reasoning-delta": {
|
|
2567
|
+
const text = chunk.chunk.text;
|
|
2568
|
+
if (text) {
|
|
2569
|
+
accumulatedText += text;
|
|
2570
|
+
sendSseEvent(
|
|
2571
|
+
streamingContext.controller,
|
|
2572
|
+
streamingContext.encoder,
|
|
2573
|
+
{
|
|
2574
|
+
type: "text",
|
|
2575
|
+
content: text
|
|
2576
|
+
}
|
|
2577
|
+
);
|
|
2578
|
+
}
|
|
2579
|
+
break;
|
|
2580
|
+
}
|
|
2581
|
+
case "tool-input-start": {
|
|
2582
|
+
break;
|
|
2583
|
+
}
|
|
2584
|
+
case "tool-call": {
|
|
2585
|
+
const currentToolCallId = `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2586
|
+
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
2587
|
+
const name = chunk.chunk.toolName || chunk.chunk.name;
|
|
2588
|
+
const parameters = chunk.chunk.input ?? chunk.chunk.parameters ?? chunk.chunk.args ?? {};
|
|
2589
|
+
streamingContext.toolCallIdToName.set(currentToolCallId, name);
|
|
2590
|
+
if (!streamingContext.toolNameToCallIds.has(name)) {
|
|
2591
|
+
streamingContext.toolNameToCallIds.set(name, []);
|
|
2592
|
+
}
|
|
2593
|
+
streamingContext.toolNameToCallIds.get(name).push(currentToolCallId);
|
|
2594
|
+
iterationToolCalls.push({ name, params: parameters });
|
|
2275
2595
|
sendSseEvent(
|
|
2276
2596
|
streamingContext.controller,
|
|
2277
2597
|
streamingContext.encoder,
|
|
2278
2598
|
{
|
|
2279
|
-
type: "
|
|
2280
|
-
|
|
2599
|
+
type: "tool_call",
|
|
2600
|
+
toolCall: {
|
|
2601
|
+
id: currentToolCallId,
|
|
2602
|
+
name,
|
|
2603
|
+
parameters,
|
|
2604
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2605
|
+
status: "executing"
|
|
2606
|
+
}
|
|
2281
2607
|
}
|
|
2282
2608
|
);
|
|
2609
|
+
break;
|
|
2283
2610
|
}
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2289
|
-
|
|
2290
|
-
|
|
2291
|
-
|
|
2292
|
-
const name = chunk.chunk.toolName || chunk.chunk.name;
|
|
2293
|
-
const parameters = chunk.chunk.input ?? chunk.chunk.parameters ?? chunk.chunk.args ?? {};
|
|
2294
|
-
streamingContext.toolCallIdToName.set(currentToolCallId, name);
|
|
2295
|
-
iterationToolCalls.push({ name, params: parameters });
|
|
2296
|
-
sendSseEvent(
|
|
2297
|
-
streamingContext.controller,
|
|
2298
|
-
streamingContext.encoder,
|
|
2299
|
-
{
|
|
2300
|
-
type: "tool_call",
|
|
2301
|
-
toolCall: {
|
|
2302
|
-
id: currentToolCallId,
|
|
2303
|
-
name,
|
|
2304
|
-
parameters,
|
|
2305
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2306
|
-
status: "executing"
|
|
2611
|
+
case "tool-result": {
|
|
2612
|
+
const result = chunk.chunk.output ?? chunk.chunk.result ?? chunk.chunk.value;
|
|
2613
|
+
const toolName = chunk.chunk.toolName || chunk.chunk.name || null;
|
|
2614
|
+
let currentToolCallId = chunk.chunk.toolCallId || void 0;
|
|
2615
|
+
if (!currentToolCallId && toolName) {
|
|
2616
|
+
const queue = streamingContext.toolNameToCallIds.get(toolName);
|
|
2617
|
+
if (queue && queue.length > 0) {
|
|
2618
|
+
currentToolCallId = queue.shift();
|
|
2307
2619
|
}
|
|
2308
2620
|
}
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
streamingContext.encoder,
|
|
2321
|
-
{
|
|
2322
|
-
type: "tool_result",
|
|
2323
|
-
toolResult: {
|
|
2324
|
-
id: currentToolCallId,
|
|
2325
|
-
toolCallId: currentToolCallId,
|
|
2326
|
-
result,
|
|
2327
|
-
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2328
|
-
serverId
|
|
2621
|
+
if (!currentToolCallId && streamingContext.lastEmittedToolCallId) {
|
|
2622
|
+
currentToolCallId = streamingContext.lastEmittedToolCallId;
|
|
2623
|
+
}
|
|
2624
|
+
if (!currentToolCallId) {
|
|
2625
|
+
currentToolCallId = `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2626
|
+
}
|
|
2627
|
+
if (toolName && streamingContext.toolNameToCallIds.has(toolName)) {
|
|
2628
|
+
const queue = streamingContext.toolNameToCallIds.get(toolName);
|
|
2629
|
+
const index = queue.indexOf(currentToolCallId);
|
|
2630
|
+
if (index !== -1) {
|
|
2631
|
+
queue.splice(index, 1);
|
|
2329
2632
|
}
|
|
2330
2633
|
}
|
|
2331
|
-
|
|
2332
|
-
|
|
2634
|
+
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
2635
|
+
if (toolName) {
|
|
2636
|
+
streamingContext.toolCallIdToName.set(
|
|
2637
|
+
currentToolCallId,
|
|
2638
|
+
toolName
|
|
2639
|
+
);
|
|
2640
|
+
}
|
|
2641
|
+
const toolNameForLookup = toolName || streamingContext.toolCallIdToName.get(currentToolCallId);
|
|
2642
|
+
const serverId = toolNameForLookup ? extractServerId(toolNameForLookup) : void 0;
|
|
2643
|
+
iterationToolResults.push({ result });
|
|
2644
|
+
sendSseEvent(
|
|
2645
|
+
streamingContext.controller,
|
|
2646
|
+
streamingContext.encoder,
|
|
2647
|
+
{
|
|
2648
|
+
type: "tool_result",
|
|
2649
|
+
toolResult: {
|
|
2650
|
+
id: currentToolCallId,
|
|
2651
|
+
toolCallId: currentToolCallId,
|
|
2652
|
+
result,
|
|
2653
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
|
|
2654
|
+
serverId
|
|
2655
|
+
}
|
|
2656
|
+
}
|
|
2657
|
+
);
|
|
2658
|
+
break;
|
|
2659
|
+
}
|
|
2660
|
+
default:
|
|
2661
|
+
break;
|
|
2333
2662
|
}
|
|
2334
|
-
|
|
2335
|
-
|
|
2663
|
+
} catch (chunkError) {
|
|
2664
|
+
hadStreamError = true;
|
|
2665
|
+
streamErrorMessage = chunkError instanceof Error ? chunkError.message : "Error processing chunk";
|
|
2336
2666
|
}
|
|
2337
2667
|
}
|
|
2338
2668
|
});
|
|
2339
|
-
|
|
2669
|
+
try {
|
|
2670
|
+
await streamResult.consumeStream();
|
|
2671
|
+
if (hadStreamError) {
|
|
2672
|
+
throw new Error(streamErrorMessage);
|
|
2673
|
+
}
|
|
2674
|
+
response = await streamResult.response;
|
|
2675
|
+
if (response.error) {
|
|
2676
|
+
throw response.error;
|
|
2677
|
+
}
|
|
2678
|
+
if (response.experimental_providerMetadata?.openai?.error) {
|
|
2679
|
+
throw new Error(
|
|
2680
|
+
response.experimental_providerMetadata.openai.error.message || "OpenAI API error"
|
|
2681
|
+
);
|
|
2682
|
+
}
|
|
2683
|
+
} catch (error) {
|
|
2684
|
+
const errorMessage = streamErrorMessage || extractErrorMessage(error);
|
|
2685
|
+
sendSseEvent(streamingContext.controller, streamingContext.encoder, {
|
|
2686
|
+
type: "error",
|
|
2687
|
+
error: errorMessage
|
|
2688
|
+
});
|
|
2689
|
+
sendSseEvent(
|
|
2690
|
+
streamingContext.controller,
|
|
2691
|
+
streamingContext.encoder,
|
|
2692
|
+
"[DONE]"
|
|
2693
|
+
);
|
|
2694
|
+
hadError = true;
|
|
2695
|
+
steps++;
|
|
2696
|
+
break;
|
|
2697
|
+
}
|
|
2698
|
+
if (!streamResult || hadError) {
|
|
2699
|
+
steps++;
|
|
2700
|
+
break;
|
|
2701
|
+
}
|
|
2340
2702
|
handleAgentStepFinish(
|
|
2341
2703
|
streamingContext,
|
|
2342
2704
|
accumulatedText,
|
|
@@ -2344,16 +2706,26 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
2344
2706
|
iterationToolResults,
|
|
2345
2707
|
false
|
|
2346
2708
|
);
|
|
2347
|
-
const
|
|
2348
|
-
const responseMessages = resp?.messages || [];
|
|
2709
|
+
const responseMessages = response?.messages || [];
|
|
2349
2710
|
if (responseMessages.length) {
|
|
2350
2711
|
messageHistory.push(...responseMessages);
|
|
2351
2712
|
for (const m of responseMessages) {
|
|
2352
2713
|
if (m.role === "tool") {
|
|
2353
|
-
const currentToolCallId = streamingContext.lastEmittedToolCallId != null ? streamingContext.lastEmittedToolCallId : `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2354
2714
|
const value = m.content;
|
|
2355
|
-
const toolName =
|
|
2356
|
-
|
|
2715
|
+
const toolName = m.toolName || m.name;
|
|
2716
|
+
let currentToolCallId;
|
|
2717
|
+
if (toolName && streamingContext.toolNameToCallIds.has(toolName)) {
|
|
2718
|
+
const queue = streamingContext.toolNameToCallIds.get(toolName);
|
|
2719
|
+
if (queue.length > 0) {
|
|
2720
|
+
currentToolCallId = queue.shift();
|
|
2721
|
+
} else {
|
|
2722
|
+
currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2723
|
+
}
|
|
2724
|
+
} else {
|
|
2725
|
+
currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2726
|
+
}
|
|
2727
|
+
const toolNameForLookup = toolName || streamingContext.toolCallIdToName.get(currentToolCallId);
|
|
2728
|
+
const serverId = toolNameForLookup ? extractServerId(toolNameForLookup) : void 0;
|
|
2357
2729
|
iterationToolResults.push({ result: value });
|
|
2358
2730
|
sendSseEvent(streamingContext.controller, streamingContext.encoder, {
|
|
2359
2731
|
type: "tool_result",
|
|
@@ -2369,18 +2741,20 @@ var createStreamingResponse = async (model, aiSdkTools, messages, streamingConte
|
|
|
2369
2741
|
}
|
|
2370
2742
|
}
|
|
2371
2743
|
steps++;
|
|
2372
|
-
const finishReason =
|
|
2744
|
+
const finishReason = response?.finishReason || "stop";
|
|
2373
2745
|
const shouldContinue = finishReason === "tool-calls" || accumulatedText.length === 0 && iterationToolResults.length > 0;
|
|
2374
2746
|
if (!shouldContinue) break;
|
|
2375
2747
|
}
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2748
|
+
if (!hadError) {
|
|
2749
|
+
sendSseEvent(streamingContext.controller, streamingContext.encoder, {
|
|
2750
|
+
type: "elicitation_complete"
|
|
2751
|
+
});
|
|
2752
|
+
sendSseEvent(
|
|
2753
|
+
streamingContext.controller,
|
|
2754
|
+
streamingContext.encoder,
|
|
2755
|
+
"[DONE]"
|
|
2756
|
+
);
|
|
2757
|
+
}
|
|
2384
2758
|
};
|
|
2385
2759
|
var sendMessagesToBackend = async (messages, streamingContext, mcpClientManager2, baseUrl, modelId, authHeader, selectedServers) => {
|
|
2386
2760
|
const messageHistory = (messages || []).map((m) => {
|
|
@@ -2441,6 +2815,10 @@ var sendMessagesToBackend = async (messages, streamingContext, mcpClientManager2
|
|
|
2441
2815
|
const currentToolCallId = `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2442
2816
|
streamingContext.lastEmittedToolCallId = currentToolCallId;
|
|
2443
2817
|
streamingContext.toolCallIdToName.set(currentToolCallId, call.name);
|
|
2818
|
+
if (!streamingContext.toolNameToCallIds.has(call.name)) {
|
|
2819
|
+
streamingContext.toolNameToCallIds.set(call.name, []);
|
|
2820
|
+
}
|
|
2821
|
+
streamingContext.toolNameToCallIds.get(call.name).push(currentToolCallId);
|
|
2444
2822
|
sendSseEvent(streamingContext.controller, streamingContext.encoder, {
|
|
2445
2823
|
type: "tool_call",
|
|
2446
2824
|
toolCall: {
|
|
@@ -2453,7 +2831,18 @@ var sendMessagesToBackend = async (messages, streamingContext, mcpClientManager2
|
|
|
2453
2831
|
});
|
|
2454
2832
|
};
|
|
2455
2833
|
const emitToolResult = (result) => {
|
|
2456
|
-
const
|
|
2834
|
+
const toolName = result.toolName;
|
|
2835
|
+
let currentToolCallId;
|
|
2836
|
+
if (toolName && streamingContext.toolNameToCallIds.has(toolName)) {
|
|
2837
|
+
const queue = streamingContext.toolNameToCallIds.get(toolName);
|
|
2838
|
+
if (queue.length > 0) {
|
|
2839
|
+
currentToolCallId = queue.shift();
|
|
2840
|
+
} else {
|
|
2841
|
+
currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2842
|
+
}
|
|
2843
|
+
} else {
|
|
2844
|
+
currentToolCallId = streamingContext.lastEmittedToolCallId ?? `tc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
|
2845
|
+
}
|
|
2457
2846
|
sendSseEvent(streamingContext.controller, streamingContext.encoder, {
|
|
2458
2847
|
type: "tool_result",
|
|
2459
2848
|
toolResult: {
|
|
@@ -2538,6 +2927,7 @@ chat.post("/", async (c) => {
|
|
|
2538
2927
|
temperature,
|
|
2539
2928
|
messages,
|
|
2540
2929
|
ollamaBaseUrl: _ollama_unused,
|
|
2930
|
+
litellmBaseUrl: _litellm_unused,
|
|
2541
2931
|
action,
|
|
2542
2932
|
requestId,
|
|
2543
2933
|
response
|
|
@@ -2605,7 +2995,8 @@ chat.post("/", async (c) => {
|
|
|
2605
2995
|
toolCallId: 0,
|
|
2606
2996
|
lastEmittedToolCallId: null,
|
|
2607
2997
|
stepIndex: 0,
|
|
2608
|
-
toolCallIdToName: /* @__PURE__ */ new Map()
|
|
2998
|
+
toolCallIdToName: /* @__PURE__ */ new Map(),
|
|
2999
|
+
toolNameToCallIds: /* @__PURE__ */ new Map()
|
|
2609
3000
|
};
|
|
2610
3001
|
mcpClientManager2.setElicitationCallback(async (request) => {
|
|
2611
3002
|
const elicitationRequest = {
|
|
@@ -2659,7 +3050,8 @@ chat.post("/", async (c) => {
|
|
|
2659
3050
|
const llmModel = createLlmModel(
|
|
2660
3051
|
model,
|
|
2661
3052
|
apiKey || "",
|
|
2662
|
-
_ollama_unused
|
|
3053
|
+
_ollama_unused,
|
|
3054
|
+
_litellm_unused
|
|
2663
3055
|
);
|
|
2664
3056
|
await createStreamingResponse(
|
|
2665
3057
|
llmModel,
|
|
@@ -2826,7 +3218,7 @@ import { MCPClient } from "@mastra/mcp";
|
|
|
2826
3218
|
import { streamText as streamText2 } from "ai";
|
|
2827
3219
|
|
|
2828
3220
|
// ../node_modules/convex/dist/esm/index.js
|
|
2829
|
-
var version = "1.
|
|
3221
|
+
var version = "1.27.3";
|
|
2830
3222
|
|
|
2831
3223
|
// ../node_modules/convex/dist/esm/values/base64.js
|
|
2832
3224
|
var lookup = [];
|
|
@@ -3457,7 +3849,7 @@ function createApi(pathParts = []) {
|
|
|
3457
3849
|
}
|
|
3458
3850
|
var anyApi = createApi();
|
|
3459
3851
|
|
|
3460
|
-
// ../node_modules/convex/dist/esm/
|
|
3852
|
+
// ../node_modules/convex/dist/esm/browser/long.js
|
|
3461
3853
|
var __defProp4 = Object.defineProperty;
|
|
3462
3854
|
var __defNormalProp3 = (obj, key, value) => key in obj ? __defProp4(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
3463
3855
|
var __publicField3 = (obj, key, value) => __defNormalProp3(obj, typeof key !== "symbol" ? key + "" : key, value);
|
|
@@ -3534,7 +3926,7 @@ var TWO_PWR_32_DBL = TWO_PWR_16_DBL * TWO_PWR_16_DBL;
|
|
|
3534
3926
|
var TWO_PWR_64_DBL = TWO_PWR_32_DBL * TWO_PWR_32_DBL;
|
|
3535
3927
|
var MAX_UNSIGNED_VALUE = new Long(4294967295 | 0, 4294967295 | 0);
|
|
3536
3928
|
|
|
3537
|
-
// ../node_modules/
|
|
3929
|
+
// ../node_modules/jwt-decode/build/esm/index.js
|
|
3538
3930
|
var InvalidTokenError = class extends Error {
|
|
3539
3931
|
};
|
|
3540
3932
|
InvalidTokenError.prototype.name = "InvalidTokenError";
|