extrait 0.2.0 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -1
- package/dist/index.cjs +733 -39
- package/dist/index.js +733 -39
- package/dist/mcp.d.ts +2 -0
- package/dist/types.d.ts +1 -0
- package/package.json +3 -2
package/dist/index.js
CHANGED
|
@@ -1215,10 +1215,36 @@ async function executeMCPToolCalls(calls, toolset, context) {
|
|
|
1215
1215
|
throw new Error("Received a function tool call without id or name.");
|
|
1216
1216
|
}
|
|
1217
1217
|
const tool = toolset.byName.get(toolName);
|
|
1218
|
+
const parsedArguments = parseToolArguments(call.arguments);
|
|
1218
1219
|
if (!tool) {
|
|
1219
|
-
|
|
1220
|
+
const errorMessage = context.request.unknownToolError ? context.request.unknownToolError(toolName) : `Tool "${toolName}" is not registered in the current toolset.`;
|
|
1221
|
+
const metadata2 = {
|
|
1222
|
+
id: callId,
|
|
1223
|
+
type: call.type ?? "function",
|
|
1224
|
+
name: toolName,
|
|
1225
|
+
arguments: parsedArguments,
|
|
1226
|
+
error: errorMessage
|
|
1227
|
+
};
|
|
1228
|
+
const startedAt2 = new Date().toISOString();
|
|
1229
|
+
const execution = {
|
|
1230
|
+
callId,
|
|
1231
|
+
type: metadata2.type,
|
|
1232
|
+
name: toolName,
|
|
1233
|
+
clientId: "__unregistered__",
|
|
1234
|
+
remoteName: toolName,
|
|
1235
|
+
arguments: parsedArguments,
|
|
1236
|
+
error: errorMessage,
|
|
1237
|
+
round: context.round,
|
|
1238
|
+
provider: context.provider,
|
|
1239
|
+
model: context.model,
|
|
1240
|
+
handledLocally: true,
|
|
1241
|
+
startedAt: startedAt2,
|
|
1242
|
+
durationMs: 0
|
|
1243
|
+
};
|
|
1244
|
+
emitToolExecution(context.request, execution);
|
|
1245
|
+
out.push({ call: metadata2, execution });
|
|
1246
|
+
continue;
|
|
1220
1247
|
}
|
|
1221
|
-
const parsedArguments = parseToolArguments(call.arguments);
|
|
1222
1248
|
const args = isRecord(parsedArguments) ? parsedArguments : {};
|
|
1223
1249
|
const metadata = {
|
|
1224
1250
|
id: callId,
|
|
@@ -1518,8 +1544,14 @@ function createOpenAICompatibleAdapter(options) {
|
|
|
1518
1544
|
async stream(request, callbacks = {}) {
|
|
1519
1545
|
const usesResponses = shouldUseResponsesAPI(options, request);
|
|
1520
1546
|
const usesMCP = hasMCPClients(request.mcpClients);
|
|
1521
|
-
if (usesResponses
|
|
1522
|
-
|
|
1547
|
+
if (usesResponses) {
|
|
1548
|
+
if (usesMCP) {
|
|
1549
|
+
return streamWithResponsesAPIWithMCP(options, fetcher, responsesPath, request, callbacks);
|
|
1550
|
+
}
|
|
1551
|
+
return streamWithResponsesAPIPassThrough(options, fetcher, responsesPath, request, callbacks);
|
|
1552
|
+
}
|
|
1553
|
+
if (usesMCP) {
|
|
1554
|
+
return streamWithChatCompletionsWithMCP(options, fetcher, path, request, callbacks);
|
|
1523
1555
|
}
|
|
1524
1556
|
const response = await fetcher(buildURL(options.baseURL, path), {
|
|
1525
1557
|
method: "POST",
|
|
@@ -1804,6 +1836,316 @@ async function completeWithResponsesAPIWithMCP(options, fetcher, path, request)
|
|
|
1804
1836
|
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
1805
1837
|
};
|
|
1806
1838
|
}
|
|
1839
|
+
async function streamWithChatCompletionsWithMCP(options, fetcher, path, request, callbacks) {
|
|
1840
|
+
const maxToolRounds = normalizeMaxToolRounds(request.maxToolRounds ?? options.defaultMaxToolRounds);
|
|
1841
|
+
let messages = buildMessages(request);
|
|
1842
|
+
let aggregatedUsage;
|
|
1843
|
+
let finishReason;
|
|
1844
|
+
let lastPayload;
|
|
1845
|
+
const executedToolCalls = [];
|
|
1846
|
+
const toolExecutions = [];
|
|
1847
|
+
callbacks.onStart?.();
|
|
1848
|
+
for (let round = 1;round <= maxToolRounds + 1; round += 1) {
|
|
1849
|
+
const mcpToolset = await resolveMCPToolset(request.mcpClients);
|
|
1850
|
+
const transportTools = toProviderFunctionTools(mcpToolset);
|
|
1851
|
+
const response = await fetcher(buildURL(options.baseURL, path), {
|
|
1852
|
+
method: "POST",
|
|
1853
|
+
headers: buildHeaders(options),
|
|
1854
|
+
body: JSON.stringify(cleanUndefined({
|
|
1855
|
+
...options.defaultBody,
|
|
1856
|
+
...request.body,
|
|
1857
|
+
model: options.model,
|
|
1858
|
+
messages,
|
|
1859
|
+
temperature: request.temperature,
|
|
1860
|
+
max_tokens: request.maxTokens,
|
|
1861
|
+
tools: transportTools,
|
|
1862
|
+
tool_choice: request.toolChoice,
|
|
1863
|
+
parallel_tool_calls: request.parallelToolCalls,
|
|
1864
|
+
stream: true
|
|
1865
|
+
}))
|
|
1866
|
+
});
|
|
1867
|
+
if (!response.ok) {
|
|
1868
|
+
const message = await response.text();
|
|
1869
|
+
throw new Error(`HTTP ${response.status}: ${message}`);
|
|
1870
|
+
}
|
|
1871
|
+
let roundText = "";
|
|
1872
|
+
let roundUsage;
|
|
1873
|
+
let roundFinishReason;
|
|
1874
|
+
const streamedToolCalls = new Map;
|
|
1875
|
+
await consumeSSE(response, (data) => {
|
|
1876
|
+
if (data === "[DONE]") {
|
|
1877
|
+
return;
|
|
1878
|
+
}
|
|
1879
|
+
const json = safeJSONParse(data);
|
|
1880
|
+
if (!isRecord2(json)) {
|
|
1881
|
+
return;
|
|
1882
|
+
}
|
|
1883
|
+
lastPayload = json;
|
|
1884
|
+
const delta = pickAssistantDelta(json);
|
|
1885
|
+
const chunkUsage = pickUsage(json);
|
|
1886
|
+
const chunkFinishReason = pickFinishReason(json);
|
|
1887
|
+
collectOpenAIStreamToolCalls(json, streamedToolCalls);
|
|
1888
|
+
roundUsage = mergeUsage(roundUsage, chunkUsage);
|
|
1889
|
+
if (chunkFinishReason) {
|
|
1890
|
+
roundFinishReason = chunkFinishReason;
|
|
1891
|
+
}
|
|
1892
|
+
if (delta) {
|
|
1893
|
+
roundText += delta;
|
|
1894
|
+
callbacks.onToken?.(delta);
|
|
1895
|
+
}
|
|
1896
|
+
if (delta || chunkUsage || chunkFinishReason) {
|
|
1897
|
+
const chunk = {
|
|
1898
|
+
textDelta: delta,
|
|
1899
|
+
raw: json,
|
|
1900
|
+
usage: chunkUsage,
|
|
1901
|
+
finishReason: chunkFinishReason
|
|
1902
|
+
};
|
|
1903
|
+
callbacks.onChunk?.(chunk);
|
|
1904
|
+
}
|
|
1905
|
+
});
|
|
1906
|
+
aggregatedUsage = mergeUsage(aggregatedUsage, roundUsage);
|
|
1907
|
+
if (roundFinishReason) {
|
|
1908
|
+
finishReason = roundFinishReason;
|
|
1909
|
+
}
|
|
1910
|
+
const calledTools = buildOpenAIStreamToolCalls(streamedToolCalls);
|
|
1911
|
+
if (calledTools.length === 0) {
|
|
1912
|
+
const out2 = {
|
|
1913
|
+
text: roundText,
|
|
1914
|
+
raw: lastPayload,
|
|
1915
|
+
usage: aggregatedUsage,
|
|
1916
|
+
finishReason,
|
|
1917
|
+
toolCalls: executedToolCalls.length > 0 ? executedToolCalls : undefined,
|
|
1918
|
+
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
1919
|
+
};
|
|
1920
|
+
callbacks.onComplete?.(out2);
|
|
1921
|
+
return out2;
|
|
1922
|
+
}
|
|
1923
|
+
if (round > maxToolRounds) {
|
|
1924
|
+
throw new Error(`Tool call loop exceeded maxToolRounds (${maxToolRounds}).`);
|
|
1925
|
+
}
|
|
1926
|
+
const outputs = await executeMCPToolCalls(calledTools, mcpToolset, {
|
|
1927
|
+
round,
|
|
1928
|
+
request,
|
|
1929
|
+
provider: "openai-compatible",
|
|
1930
|
+
model: options.model
|
|
1931
|
+
});
|
|
1932
|
+
executedToolCalls.push(...outputs.map((entry) => entry.call));
|
|
1933
|
+
toolExecutions.push(...outputs.map((entry) => entry.execution));
|
|
1934
|
+
const assistantMessage = buildOpenAIAssistantToolMessage(roundText, calledTools);
|
|
1935
|
+
const toolMessages = outputs.map((entry) => ({
|
|
1936
|
+
role: "tool",
|
|
1937
|
+
tool_call_id: entry.call.id,
|
|
1938
|
+
content: stringifyToolOutput(entry.call.error ? { error: entry.call.error } : entry.call.output)
|
|
1939
|
+
}));
|
|
1940
|
+
messages = [...messages, assistantMessage, ...toolMessages];
|
|
1941
|
+
}
|
|
1942
|
+
const out = {
|
|
1943
|
+
text: "",
|
|
1944
|
+
raw: lastPayload,
|
|
1945
|
+
usage: aggregatedUsage,
|
|
1946
|
+
finishReason,
|
|
1947
|
+
toolCalls: executedToolCalls.length > 0 ? executedToolCalls : undefined,
|
|
1948
|
+
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
1949
|
+
};
|
|
1950
|
+
callbacks.onComplete?.(out);
|
|
1951
|
+
return out;
|
|
1952
|
+
}
|
|
1953
|
+
async function streamWithResponsesAPIPassThrough(options, fetcher, path, request, callbacks) {
|
|
1954
|
+
const body = isRecord2(request.body) ? request.body : undefined;
|
|
1955
|
+
const response = await fetcher(buildURL(options.baseURL, path), {
|
|
1956
|
+
method: "POST",
|
|
1957
|
+
headers: buildHeaders(options),
|
|
1958
|
+
body: JSON.stringify(cleanUndefined({
|
|
1959
|
+
...options.defaultBody,
|
|
1960
|
+
...request.body,
|
|
1961
|
+
model: options.model,
|
|
1962
|
+
input: buildResponsesInput(request),
|
|
1963
|
+
previous_response_id: pickString(body?.previous_response_id),
|
|
1964
|
+
temperature: request.temperature,
|
|
1965
|
+
max_output_tokens: request.maxTokens,
|
|
1966
|
+
stream: true
|
|
1967
|
+
}))
|
|
1968
|
+
});
|
|
1969
|
+
if (!response.ok) {
|
|
1970
|
+
const message = await response.text();
|
|
1971
|
+
throw new Error(`HTTP ${response.status}: ${message}`);
|
|
1972
|
+
}
|
|
1973
|
+
callbacks.onStart?.();
|
|
1974
|
+
let text = "";
|
|
1975
|
+
let usage;
|
|
1976
|
+
let finishReason;
|
|
1977
|
+
let lastPayload;
|
|
1978
|
+
await consumeSSE(response, (data) => {
|
|
1979
|
+
if (data === "[DONE]") {
|
|
1980
|
+
return;
|
|
1981
|
+
}
|
|
1982
|
+
const json = safeJSONParse(data);
|
|
1983
|
+
if (!isRecord2(json)) {
|
|
1984
|
+
return;
|
|
1985
|
+
}
|
|
1986
|
+
const roundPayload = pickResponsesStreamPayload(json);
|
|
1987
|
+
if (roundPayload) {
|
|
1988
|
+
lastPayload = roundPayload;
|
|
1989
|
+
}
|
|
1990
|
+
const delta = pickResponsesStreamTextDelta(json);
|
|
1991
|
+
const chunkUsage = pickResponsesStreamUsage(json);
|
|
1992
|
+
const chunkFinishReason = pickResponsesStreamFinishReason(json);
|
|
1993
|
+
usage = mergeUsage(usage, chunkUsage);
|
|
1994
|
+
if (chunkFinishReason) {
|
|
1995
|
+
finishReason = chunkFinishReason;
|
|
1996
|
+
}
|
|
1997
|
+
if (delta) {
|
|
1998
|
+
text += delta;
|
|
1999
|
+
callbacks.onToken?.(delta);
|
|
2000
|
+
}
|
|
2001
|
+
if (delta || chunkUsage || chunkFinishReason) {
|
|
2002
|
+
const chunk = {
|
|
2003
|
+
textDelta: delta,
|
|
2004
|
+
raw: json,
|
|
2005
|
+
usage: chunkUsage,
|
|
2006
|
+
finishReason: chunkFinishReason
|
|
2007
|
+
};
|
|
2008
|
+
callbacks.onChunk?.(chunk);
|
|
2009
|
+
}
|
|
2010
|
+
});
|
|
2011
|
+
const finalPayload = lastPayload ?? {};
|
|
2012
|
+
const out = {
|
|
2013
|
+
text: text.length > 0 ? text : pickResponsesText(finalPayload) || pickAssistantText(finalPayload),
|
|
2014
|
+
raw: finalPayload,
|
|
2015
|
+
usage: mergeUsage(usage, pickUsage(finalPayload)),
|
|
2016
|
+
finishReason: finishReason ?? pickResponsesFinishReason(finalPayload) ?? pickFinishReason(finalPayload)
|
|
2017
|
+
};
|
|
2018
|
+
callbacks.onComplete?.(out);
|
|
2019
|
+
return out;
|
|
2020
|
+
}
|
|
2021
|
+
async function streamWithResponsesAPIWithMCP(options, fetcher, path, request, callbacks) {
|
|
2022
|
+
const maxToolRounds = normalizeMaxToolRounds(request.maxToolRounds ?? options.defaultMaxToolRounds);
|
|
2023
|
+
let input = buildResponsesInput(request);
|
|
2024
|
+
let previousResponseId = pickString(isRecord2(request.body) ? request.body.previous_response_id : undefined);
|
|
2025
|
+
let aggregatedUsage;
|
|
2026
|
+
let finishReason;
|
|
2027
|
+
let lastPayload;
|
|
2028
|
+
const executedToolCalls = [];
|
|
2029
|
+
const toolExecutions = [];
|
|
2030
|
+
callbacks.onStart?.();
|
|
2031
|
+
for (let round = 1;round <= maxToolRounds + 1; round += 1) {
|
|
2032
|
+
const mcpToolset = await resolveMCPToolset(request.mcpClients);
|
|
2033
|
+
const transportTools = toResponsesTools(toProviderFunctionTools(mcpToolset));
|
|
2034
|
+
const response = await fetcher(buildURL(options.baseURL, path), {
|
|
2035
|
+
method: "POST",
|
|
2036
|
+
headers: buildHeaders(options),
|
|
2037
|
+
body: JSON.stringify(cleanUndefined({
|
|
2038
|
+
...options.defaultBody,
|
|
2039
|
+
...request.body,
|
|
2040
|
+
model: options.model,
|
|
2041
|
+
input,
|
|
2042
|
+
previous_response_id: previousResponseId,
|
|
2043
|
+
temperature: request.temperature,
|
|
2044
|
+
max_output_tokens: request.maxTokens,
|
|
2045
|
+
tools: transportTools,
|
|
2046
|
+
tool_choice: request.toolChoice,
|
|
2047
|
+
parallel_tool_calls: request.parallelToolCalls,
|
|
2048
|
+
stream: true
|
|
2049
|
+
}))
|
|
2050
|
+
});
|
|
2051
|
+
if (!response.ok) {
|
|
2052
|
+
const message = await response.text();
|
|
2053
|
+
throw new Error(`HTTP ${response.status}: ${message}`);
|
|
2054
|
+
}
|
|
2055
|
+
let roundText = "";
|
|
2056
|
+
let roundUsage;
|
|
2057
|
+
let roundFinishReason;
|
|
2058
|
+
let roundPayload;
|
|
2059
|
+
const streamedToolCalls = new Map;
|
|
2060
|
+
await consumeSSE(response, (data) => {
|
|
2061
|
+
if (data === "[DONE]") {
|
|
2062
|
+
return;
|
|
2063
|
+
}
|
|
2064
|
+
const json = safeJSONParse(data);
|
|
2065
|
+
if (!isRecord2(json)) {
|
|
2066
|
+
return;
|
|
2067
|
+
}
|
|
2068
|
+
const payload = pickResponsesStreamPayload(json);
|
|
2069
|
+
if (payload) {
|
|
2070
|
+
roundPayload = payload;
|
|
2071
|
+
lastPayload = payload;
|
|
2072
|
+
}
|
|
2073
|
+
const delta = pickResponsesStreamTextDelta(json);
|
|
2074
|
+
const chunkUsage = pickResponsesStreamUsage(json);
|
|
2075
|
+
const chunkFinishReason = pickResponsesStreamFinishReason(json);
|
|
2076
|
+
collectResponsesStreamToolCalls(json, streamedToolCalls);
|
|
2077
|
+
roundUsage = mergeUsage(roundUsage, chunkUsage);
|
|
2078
|
+
if (chunkFinishReason) {
|
|
2079
|
+
roundFinishReason = chunkFinishReason;
|
|
2080
|
+
}
|
|
2081
|
+
if (delta) {
|
|
2082
|
+
roundText += delta;
|
|
2083
|
+
callbacks.onToken?.(delta);
|
|
2084
|
+
}
|
|
2085
|
+
if (delta || chunkUsage || chunkFinishReason) {
|
|
2086
|
+
const chunk = {
|
|
2087
|
+
textDelta: delta,
|
|
2088
|
+
raw: json,
|
|
2089
|
+
usage: chunkUsage,
|
|
2090
|
+
finishReason: chunkFinishReason
|
|
2091
|
+
};
|
|
2092
|
+
callbacks.onChunk?.(chunk);
|
|
2093
|
+
}
|
|
2094
|
+
});
|
|
2095
|
+
aggregatedUsage = mergeUsage(aggregatedUsage, roundUsage);
|
|
2096
|
+
const payloadUsage = roundPayload ? pickUsage(roundPayload) : undefined;
|
|
2097
|
+
aggregatedUsage = mergeUsage(aggregatedUsage, payloadUsage);
|
|
2098
|
+
if (roundFinishReason) {
|
|
2099
|
+
finishReason = roundFinishReason;
|
|
2100
|
+
} else if (roundPayload) {
|
|
2101
|
+
finishReason = pickResponsesFinishReason(roundPayload) ?? finishReason;
|
|
2102
|
+
}
|
|
2103
|
+
const payloadToolCalls = roundPayload ? pickResponsesToolCalls(roundPayload) : [];
|
|
2104
|
+
const streamedCalls = buildResponsesStreamToolCalls(streamedToolCalls);
|
|
2105
|
+
const providerToolCalls = payloadToolCalls.length > 0 ? payloadToolCalls : streamedCalls;
|
|
2106
|
+
const functionCalls = providerToolCalls.filter((toolCall) => toolCall.type === "function" && typeof toolCall.id === "string" && typeof toolCall.name === "string");
|
|
2107
|
+
if (functionCalls.length === 0) {
|
|
2108
|
+
const finalText = roundText.length > 0 ? roundText : roundPayload ? pickResponsesText(roundPayload) || pickAssistantText(roundPayload) : "";
|
|
2109
|
+
const out2 = {
|
|
2110
|
+
text: finalText,
|
|
2111
|
+
raw: roundPayload ?? lastPayload,
|
|
2112
|
+
usage: aggregatedUsage,
|
|
2113
|
+
finishReason,
|
|
2114
|
+
toolCalls: executedToolCalls.length > 0 ? executedToolCalls : undefined,
|
|
2115
|
+
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
2116
|
+
};
|
|
2117
|
+
callbacks.onComplete?.(out2);
|
|
2118
|
+
return out2;
|
|
2119
|
+
}
|
|
2120
|
+
if (round > maxToolRounds) {
|
|
2121
|
+
throw new Error(`Tool call loop exceeded maxToolRounds (${maxToolRounds}).`);
|
|
2122
|
+
}
|
|
2123
|
+
const outputs = await executeMCPToolCalls(functionCalls, mcpToolset, {
|
|
2124
|
+
round,
|
|
2125
|
+
request,
|
|
2126
|
+
provider: "openai-compatible",
|
|
2127
|
+
model: options.model
|
|
2128
|
+
});
|
|
2129
|
+
executedToolCalls.push(...outputs.map((entry) => entry.call));
|
|
2130
|
+
toolExecutions.push(...outputs.map((entry) => entry.execution));
|
|
2131
|
+
input = outputs.map((entry) => ({
|
|
2132
|
+
type: "function_call_output",
|
|
2133
|
+
call_id: entry.call.id,
|
|
2134
|
+
output: stringifyToolOutput(entry.call.error ? { error: entry.call.error } : entry.call.output)
|
|
2135
|
+
}));
|
|
2136
|
+
previousResponseId = pickString(roundPayload?.id);
|
|
2137
|
+
}
|
|
2138
|
+
const out = {
|
|
2139
|
+
text: pickResponsesText(lastPayload ?? {}) || pickAssistantText(lastPayload ?? {}),
|
|
2140
|
+
raw: lastPayload,
|
|
2141
|
+
usage: aggregatedUsage,
|
|
2142
|
+
finishReason,
|
|
2143
|
+
toolCalls: executedToolCalls.length > 0 ? executedToolCalls : undefined,
|
|
2144
|
+
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
2145
|
+
};
|
|
2146
|
+
callbacks.onComplete?.(out);
|
|
2147
|
+
return out;
|
|
2148
|
+
}
|
|
1807
2149
|
function shouldUseResponsesAPI(options, request) {
|
|
1808
2150
|
if (options.path?.includes("/responses")) {
|
|
1809
2151
|
return true;
|
|
@@ -1960,6 +2302,190 @@ function pickAssistantDelta(payload) {
|
|
|
1960
2302
|
}
|
|
1961
2303
|
return "";
|
|
1962
2304
|
}
|
|
2305
|
+
function collectOpenAIStreamToolCalls(payload, state) {
|
|
2306
|
+
const choices = payload.choices;
|
|
2307
|
+
if (!Array.isArray(choices) || choices.length === 0 || !isRecord2(choices[0])) {
|
|
2308
|
+
return;
|
|
2309
|
+
}
|
|
2310
|
+
const delta = choices[0].delta;
|
|
2311
|
+
if (!isRecord2(delta) || !Array.isArray(delta.tool_calls)) {
|
|
2312
|
+
return;
|
|
2313
|
+
}
|
|
2314
|
+
for (const rawToolCall of delta.tool_calls) {
|
|
2315
|
+
if (!isRecord2(rawToolCall)) {
|
|
2316
|
+
continue;
|
|
2317
|
+
}
|
|
2318
|
+
const index = toFiniteNumber(rawToolCall.index);
|
|
2319
|
+
const toolIndex = index !== undefined ? Math.floor(index) : 0;
|
|
2320
|
+
const existing = state.get(toolIndex) ?? {
|
|
2321
|
+
index: toolIndex,
|
|
2322
|
+
argumentsText: ""
|
|
2323
|
+
};
|
|
2324
|
+
const id = pickString(rawToolCall.id);
|
|
2325
|
+
if (id) {
|
|
2326
|
+
existing.id = id;
|
|
2327
|
+
}
|
|
2328
|
+
const type = pickString(rawToolCall.type);
|
|
2329
|
+
if (type) {
|
|
2330
|
+
existing.type = type;
|
|
2331
|
+
}
|
|
2332
|
+
const functionCall = isRecord2(rawToolCall.function) ? rawToolCall.function : undefined;
|
|
2333
|
+
const name = pickString(functionCall?.name);
|
|
2334
|
+
if (name) {
|
|
2335
|
+
existing.name = `${existing.name ?? ""}${name}`;
|
|
2336
|
+
}
|
|
2337
|
+
const argumentsDelta = pickString(functionCall?.arguments);
|
|
2338
|
+
if (argumentsDelta) {
|
|
2339
|
+
existing.argumentsText += argumentsDelta;
|
|
2340
|
+
}
|
|
2341
|
+
state.set(toolIndex, existing);
|
|
2342
|
+
}
|
|
2343
|
+
}
|
|
2344
|
+
function buildOpenAIStreamToolCalls(state) {
|
|
2345
|
+
return [...state.values()].sort((a, b) => a.index - b.index).map((entry) => ({
|
|
2346
|
+
id: entry.id ?? "",
|
|
2347
|
+
type: entry.type ?? "function",
|
|
2348
|
+
name: entry.name,
|
|
2349
|
+
arguments: entry.argumentsText.length > 0 ? entry.argumentsText : {}
|
|
2350
|
+
}));
|
|
2351
|
+
}
|
|
2352
|
+
function buildOpenAIAssistantToolMessage(text, toolCalls) {
|
|
2353
|
+
return {
|
|
2354
|
+
role: "assistant",
|
|
2355
|
+
content: text,
|
|
2356
|
+
tool_calls: toolCalls.map((call) => ({
|
|
2357
|
+
id: call.id,
|
|
2358
|
+
type: "function",
|
|
2359
|
+
function: {
|
|
2360
|
+
name: call.name,
|
|
2361
|
+
arguments: typeof call.arguments === "string" ? call.arguments : JSON.stringify(call.arguments ?? {})
|
|
2362
|
+
}
|
|
2363
|
+
}))
|
|
2364
|
+
};
|
|
2365
|
+
}
|
|
2366
|
+
function pickResponsesStreamPayload(payload) {
|
|
2367
|
+
if (isRecord2(payload.response)) {
|
|
2368
|
+
return payload.response;
|
|
2369
|
+
}
|
|
2370
|
+
if ("output" in payload || "output_text" in payload || "status" in payload || "id" in payload) {
|
|
2371
|
+
return payload;
|
|
2372
|
+
}
|
|
2373
|
+
return;
|
|
2374
|
+
}
|
|
2375
|
+
function pickResponsesStreamTextDelta(payload) {
|
|
2376
|
+
const eventType = pickString(payload.type) ?? "";
|
|
2377
|
+
if (!eventType.includes("output_text.delta")) {
|
|
2378
|
+
return "";
|
|
2379
|
+
}
|
|
2380
|
+
const direct = pickString(payload.delta);
|
|
2381
|
+
if (direct) {
|
|
2382
|
+
return direct;
|
|
2383
|
+
}
|
|
2384
|
+
if (isRecord2(payload.delta)) {
|
|
2385
|
+
return pickString(payload.delta.text) ?? pickString(payload.delta.output_text) ?? "";
|
|
2386
|
+
}
|
|
2387
|
+
return "";
|
|
2388
|
+
}
|
|
2389
|
+
function pickResponsesStreamUsage(payload) {
|
|
2390
|
+
const direct = pickUsage(payload);
|
|
2391
|
+
if (direct) {
|
|
2392
|
+
return direct;
|
|
2393
|
+
}
|
|
2394
|
+
if (isRecord2(payload.response)) {
|
|
2395
|
+
return pickUsage(payload.response);
|
|
2396
|
+
}
|
|
2397
|
+
return;
|
|
2398
|
+
}
|
|
2399
|
+
function pickResponsesStreamFinishReason(payload) {
|
|
2400
|
+
const eventType = pickString(payload.type);
|
|
2401
|
+
if (eventType === "response.completed") {
|
|
2402
|
+
return "completed";
|
|
2403
|
+
}
|
|
2404
|
+
if (eventType === "response.failed") {
|
|
2405
|
+
return "failed";
|
|
2406
|
+
}
|
|
2407
|
+
const directStatus = pickString(payload.status);
|
|
2408
|
+
if (directStatus) {
|
|
2409
|
+
return directStatus;
|
|
2410
|
+
}
|
|
2411
|
+
if (isRecord2(payload.response)) {
|
|
2412
|
+
return pickString(payload.response.status);
|
|
2413
|
+
}
|
|
2414
|
+
return;
|
|
2415
|
+
}
|
|
2416
|
+
function collectResponsesStreamToolCalls(payload, state) {
|
|
2417
|
+
if (isRecord2(payload.response)) {
|
|
2418
|
+
collectResponsesStreamToolCallsFromOutput(payload.response.output, state);
|
|
2419
|
+
}
|
|
2420
|
+
collectResponsesStreamToolCallsFromOutput(payload.output, state);
|
|
2421
|
+
if (isRecord2(payload.item)) {
|
|
2422
|
+
const itemKey = pickString(payload.item_id) ?? pickString(payload.call_id);
|
|
2423
|
+
collectResponsesStreamToolCallsFromItem(payload.item, state, itemKey);
|
|
2424
|
+
}
|
|
2425
|
+
if (isRecord2(payload.output_item)) {
|
|
2426
|
+
const itemKey = pickString(payload.item_id) ?? pickString(payload.call_id);
|
|
2427
|
+
collectResponsesStreamToolCallsFromItem(payload.output_item, state, itemKey);
|
|
2428
|
+
}
|
|
2429
|
+
const eventType = pickString(payload.type) ?? "";
|
|
2430
|
+
if (eventType.includes("function_call_arguments.delta")) {
|
|
2431
|
+
const key = pickString(payload.item_id) ?? pickString(payload.call_id) ?? "function_call";
|
|
2432
|
+
const existing = state.get(key) ?? {
|
|
2433
|
+
key,
|
|
2434
|
+
argumentsText: ""
|
|
2435
|
+
};
|
|
2436
|
+
const delta = pickString(payload.delta) ?? (isRecord2(payload.delta) ? pickString(payload.delta.text) ?? pickString(payload.delta.arguments) : undefined) ?? pickString(payload.arguments_delta);
|
|
2437
|
+
if (delta) {
|
|
2438
|
+
existing.argumentsText += delta;
|
|
2439
|
+
}
|
|
2440
|
+
state.set(key, existing);
|
|
2441
|
+
}
|
|
2442
|
+
}
|
|
2443
|
+
function collectResponsesStreamToolCallsFromOutput(output, state) {
|
|
2444
|
+
if (!Array.isArray(output)) {
|
|
2445
|
+
return;
|
|
2446
|
+
}
|
|
2447
|
+
for (const item of output) {
|
|
2448
|
+
if (!isRecord2(item)) {
|
|
2449
|
+
continue;
|
|
2450
|
+
}
|
|
2451
|
+
collectResponsesStreamToolCallsFromItem(item, state);
|
|
2452
|
+
}
|
|
2453
|
+
}
|
|
2454
|
+
function collectResponsesStreamToolCallsFromItem(item, state, forcedKey) {
|
|
2455
|
+
const type = pickString(item.type);
|
|
2456
|
+
if (type !== "function_call" && !type?.includes("tool") && !type?.includes("mcp")) {
|
|
2457
|
+
return;
|
|
2458
|
+
}
|
|
2459
|
+
const key = forcedKey ?? pickString(item.call_id) ?? pickString(item.id) ?? `call_${state.size}`;
|
|
2460
|
+
const existing = state.get(key) ?? {
|
|
2461
|
+
key,
|
|
2462
|
+
argumentsText: ""
|
|
2463
|
+
};
|
|
2464
|
+
const callId = pickString(item.call_id) ?? pickString(item.id);
|
|
2465
|
+
if (callId) {
|
|
2466
|
+
existing.id = callId;
|
|
2467
|
+
}
|
|
2468
|
+
if (type) {
|
|
2469
|
+
existing.type = type;
|
|
2470
|
+
}
|
|
2471
|
+
const name = pickString(item.name);
|
|
2472
|
+
if (name) {
|
|
2473
|
+
existing.name = name;
|
|
2474
|
+
}
|
|
2475
|
+
const argumentsText = pickString(item.arguments);
|
|
2476
|
+
if (argumentsText && argumentsText.length >= existing.argumentsText.length) {
|
|
2477
|
+
existing.argumentsText = argumentsText;
|
|
2478
|
+
}
|
|
2479
|
+
state.set(key, existing);
|
|
2480
|
+
}
|
|
2481
|
+
function buildResponsesStreamToolCalls(state) {
|
|
2482
|
+
return [...state.values()].map((entry) => ({
|
|
2483
|
+
id: entry.id ?? entry.key,
|
|
2484
|
+
type: entry.type === "function_call" ? "function" : entry.type ?? "function",
|
|
2485
|
+
name: entry.name,
|
|
2486
|
+
arguments: entry.argumentsText.length > 0 ? entry.argumentsText : {}
|
|
2487
|
+
}));
|
|
2488
|
+
}
|
|
1963
2489
|
function pickResponsesText(payload) {
|
|
1964
2490
|
const outputText = payload.output_text;
|
|
1965
2491
|
if (typeof outputText === "string") {
|
|
@@ -2052,22 +2578,6 @@ function pickResponsesFinishReason(payload) {
|
|
|
2052
2578
|
}
|
|
2053
2579
|
return;
|
|
2054
2580
|
}
|
|
2055
|
-
async function streamViaComplete(callbacks, complete) {
|
|
2056
|
-
callbacks.onStart?.();
|
|
2057
|
-
const response = await complete();
|
|
2058
|
-
if (response.text.length > 0) {
|
|
2059
|
-
callbacks.onToken?.(response.text);
|
|
2060
|
-
}
|
|
2061
|
-
callbacks.onChunk?.({
|
|
2062
|
-
textDelta: response.text,
|
|
2063
|
-
raw: response.raw,
|
|
2064
|
-
done: true,
|
|
2065
|
-
usage: response.usage,
|
|
2066
|
-
finishReason: response.finishReason
|
|
2067
|
-
});
|
|
2068
|
-
callbacks.onComplete?.(response);
|
|
2069
|
-
return response;
|
|
2070
|
-
}
|
|
2071
2581
|
|
|
2072
2582
|
// src/providers/anthropic-compatible.ts
|
|
2073
2583
|
var DEFAULT_ANTHROPIC_MAX_TOKENS = 1024;
|
|
@@ -2086,7 +2596,7 @@ function createAnthropicCompatibleAdapter(options) {
|
|
|
2086
2596
|
},
|
|
2087
2597
|
async stream(request, callbacks = {}) {
|
|
2088
2598
|
if (hasMCPClients(request.mcpClients)) {
|
|
2089
|
-
return
|
|
2599
|
+
return streamWithMCPToolLoop(options, fetcher, path, request, callbacks);
|
|
2090
2600
|
}
|
|
2091
2601
|
const response = await fetcher(buildURL(options.baseURL, path), {
|
|
2092
2602
|
method: "POST",
|
|
@@ -2260,6 +2770,127 @@ async function completeWithMCPToolLoop(options, fetcher, path, request) {
|
|
|
2260
2770
|
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
2261
2771
|
};
|
|
2262
2772
|
}
|
|
2773
|
+
async function streamWithMCPToolLoop(options, fetcher, path, request, callbacks) {
|
|
2774
|
+
const maxToolRounds = normalizeMaxToolRounds(request.maxToolRounds ?? options.defaultMaxToolRounds);
|
|
2775
|
+
let messages = [{ role: "user", content: request.prompt }];
|
|
2776
|
+
let aggregatedUsage;
|
|
2777
|
+
let finishReason;
|
|
2778
|
+
let lastPayload;
|
|
2779
|
+
const toolCalls = [];
|
|
2780
|
+
const toolExecutions = [];
|
|
2781
|
+
callbacks.onStart?.();
|
|
2782
|
+
for (let round = 1;round <= maxToolRounds + 1; round += 1) {
|
|
2783
|
+
const mcpToolset = await resolveMCPToolset(request.mcpClients);
|
|
2784
|
+
const tools = toAnthropicTools(toProviderFunctionTools(mcpToolset));
|
|
2785
|
+
const response = await fetcher(buildURL(options.baseURL, path), {
|
|
2786
|
+
method: "POST",
|
|
2787
|
+
headers: buildHeaders2(options),
|
|
2788
|
+
body: JSON.stringify(cleanUndefined({
|
|
2789
|
+
...options.defaultBody,
|
|
2790
|
+
...request.body,
|
|
2791
|
+
model: options.model,
|
|
2792
|
+
system: request.systemPrompt,
|
|
2793
|
+
messages,
|
|
2794
|
+
temperature: request.temperature,
|
|
2795
|
+
max_tokens: resolveMaxTokens(request.maxTokens, options.defaultMaxTokens),
|
|
2796
|
+
tools,
|
|
2797
|
+
tool_choice: toAnthropicToolChoice(request.toolChoice),
|
|
2798
|
+
stream: true
|
|
2799
|
+
}))
|
|
2800
|
+
});
|
|
2801
|
+
if (!response.ok) {
|
|
2802
|
+
const message = await response.text();
|
|
2803
|
+
throw new Error(`HTTP ${response.status}: ${message}`);
|
|
2804
|
+
}
|
|
2805
|
+
let roundText = "";
|
|
2806
|
+
let roundUsage;
|
|
2807
|
+
let roundFinishReason;
|
|
2808
|
+
const streamedToolCalls = new Map;
|
|
2809
|
+
await consumeSSE(response, (data) => {
|
|
2810
|
+
if (data === "[DONE]") {
|
|
2811
|
+
return;
|
|
2812
|
+
}
|
|
2813
|
+
const json = safeJSONParse(data);
|
|
2814
|
+
if (!isRecord2(json)) {
|
|
2815
|
+
return;
|
|
2816
|
+
}
|
|
2817
|
+
lastPayload = json;
|
|
2818
|
+
const delta = pickAnthropicDelta(json);
|
|
2819
|
+
const chunkUsage = pickUsage2(json);
|
|
2820
|
+
const chunkFinishReason = pickFinishReason2(json);
|
|
2821
|
+
collectAnthropicStreamToolCalls(json, streamedToolCalls);
|
|
2822
|
+
roundUsage = mergeUsage(roundUsage, chunkUsage);
|
|
2823
|
+
if (chunkFinishReason) {
|
|
2824
|
+
roundFinishReason = chunkFinishReason;
|
|
2825
|
+
}
|
|
2826
|
+
if (delta) {
|
|
2827
|
+
roundText += delta;
|
|
2828
|
+
callbacks.onToken?.(delta);
|
|
2829
|
+
}
|
|
2830
|
+
if (delta || chunkUsage || chunkFinishReason) {
|
|
2831
|
+
const chunk = {
|
|
2832
|
+
textDelta: delta,
|
|
2833
|
+
raw: json,
|
|
2834
|
+
usage: chunkUsage,
|
|
2835
|
+
finishReason: chunkFinishReason
|
|
2836
|
+
};
|
|
2837
|
+
callbacks.onChunk?.(chunk);
|
|
2838
|
+
}
|
|
2839
|
+
});
|
|
2840
|
+
aggregatedUsage = mergeUsage(aggregatedUsage, roundUsage);
|
|
2841
|
+
if (roundFinishReason) {
|
|
2842
|
+
finishReason = roundFinishReason;
|
|
2843
|
+
}
|
|
2844
|
+
const calledTools = buildAnthropicStreamToolCalls(streamedToolCalls);
|
|
2845
|
+
if (calledTools.length === 0) {
|
|
2846
|
+
const out2 = {
|
|
2847
|
+
text: roundText,
|
|
2848
|
+
raw: lastPayload,
|
|
2849
|
+
usage: aggregatedUsage,
|
|
2850
|
+
finishReason,
|
|
2851
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
2852
|
+
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
2853
|
+
};
|
|
2854
|
+
callbacks.onComplete?.(out2);
|
|
2855
|
+
return out2;
|
|
2856
|
+
}
|
|
2857
|
+
if (round > maxToolRounds) {
|
|
2858
|
+
throw new Error(`Tool call loop exceeded maxToolRounds (${maxToolRounds}).`);
|
|
2859
|
+
}
|
|
2860
|
+
const toolResultContent = [];
|
|
2861
|
+
const outputs = await executeMCPToolCalls(calledTools, mcpToolset, {
|
|
2862
|
+
round,
|
|
2863
|
+
request,
|
|
2864
|
+
provider: "anthropic-compatible",
|
|
2865
|
+
model: options.model
|
|
2866
|
+
});
|
|
2867
|
+
toolCalls.push(...outputs.map((entry) => entry.call));
|
|
2868
|
+
toolExecutions.push(...outputs.map((entry) => entry.execution));
|
|
2869
|
+
for (const entry of outputs) {
|
|
2870
|
+
toolResultContent.push({
|
|
2871
|
+
type: "tool_result",
|
|
2872
|
+
tool_use_id: entry.call.id,
|
|
2873
|
+
...entry.call.error ? { is_error: true } : {},
|
|
2874
|
+
content: stringifyToolOutput(entry.call.error ? { error: entry.call.error } : entry.call.output)
|
|
2875
|
+
});
|
|
2876
|
+
}
|
|
2877
|
+
messages = [
|
|
2878
|
+
...messages,
|
|
2879
|
+
{ role: "assistant", content: buildAnthropicAssistantToolContent(roundText, calledTools) },
|
|
2880
|
+
{ role: "user", content: toolResultContent }
|
|
2881
|
+
];
|
|
2882
|
+
}
|
|
2883
|
+
const out = {
|
|
2884
|
+
text: "",
|
|
2885
|
+
raw: lastPayload,
|
|
2886
|
+
usage: aggregatedUsage,
|
|
2887
|
+
finishReason,
|
|
2888
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
2889
|
+
toolExecutions: toolExecutions.length > 0 ? toolExecutions : undefined
|
|
2890
|
+
};
|
|
2891
|
+
callbacks.onComplete?.(out);
|
|
2892
|
+
return out;
|
|
2893
|
+
}
|
|
2263
2894
|
function buildHeaders2(options) {
|
|
2264
2895
|
return {
|
|
2265
2896
|
"content-type": "application/json",
|
|
@@ -2322,6 +2953,83 @@ function pickAnthropicDelta(payload) {
|
|
|
2322
2953
|
}
|
|
2323
2954
|
return "";
|
|
2324
2955
|
}
|
|
2956
|
+
function collectAnthropicStreamToolCalls(payload, state) {
|
|
2957
|
+
const eventType = pickString(payload.type);
|
|
2958
|
+
if (!eventType) {
|
|
2959
|
+
return;
|
|
2960
|
+
}
|
|
2961
|
+
if (eventType === "content_block_start" && isRecord2(payload.content_block)) {
|
|
2962
|
+
const block = payload.content_block;
|
|
2963
|
+
if (pickString(block.type) !== "tool_use") {
|
|
2964
|
+
return;
|
|
2965
|
+
}
|
|
2966
|
+
const index = pickContentBlockIndex(payload.index);
|
|
2967
|
+
const existing = state.get(index) ?? {
|
|
2968
|
+
index,
|
|
2969
|
+
argumentsText: ""
|
|
2970
|
+
};
|
|
2971
|
+
const id = pickString(block.id);
|
|
2972
|
+
if (id) {
|
|
2973
|
+
existing.id = id;
|
|
2974
|
+
}
|
|
2975
|
+
const name = pickString(block.name);
|
|
2976
|
+
if (name) {
|
|
2977
|
+
existing.name = name;
|
|
2978
|
+
}
|
|
2979
|
+
if ("input" in block) {
|
|
2980
|
+
existing.input = block.input;
|
|
2981
|
+
}
|
|
2982
|
+
state.set(index, existing);
|
|
2983
|
+
return;
|
|
2984
|
+
}
|
|
2985
|
+
if (eventType === "content_block_delta" && isRecord2(payload.delta)) {
|
|
2986
|
+
const delta = payload.delta;
|
|
2987
|
+
if (pickString(delta.type) !== "input_json_delta") {
|
|
2988
|
+
return;
|
|
2989
|
+
}
|
|
2990
|
+
const index = pickContentBlockIndex(payload.index);
|
|
2991
|
+
const existing = state.get(index) ?? {
|
|
2992
|
+
index,
|
|
2993
|
+
argumentsText: ""
|
|
2994
|
+
};
|
|
2995
|
+
const partial = pickString(delta.partial_json);
|
|
2996
|
+
if (partial) {
|
|
2997
|
+
existing.argumentsText += partial;
|
|
2998
|
+
}
|
|
2999
|
+
state.set(index, existing);
|
|
3000
|
+
}
|
|
3001
|
+
}
|
|
3002
|
+
function pickContentBlockIndex(value) {
|
|
3003
|
+
const numeric = toFiniteNumber(value);
|
|
3004
|
+
if (numeric !== undefined) {
|
|
3005
|
+
return Math.floor(numeric);
|
|
3006
|
+
}
|
|
3007
|
+
return 0;
|
|
3008
|
+
}
|
|
3009
|
+
function buildAnthropicStreamToolCalls(state) {
|
|
3010
|
+
return [...state.values()].sort((a, b) => a.index - b.index).map((entry) => ({
|
|
3011
|
+
id: entry.id ?? "",
|
|
3012
|
+
type: "function",
|
|
3013
|
+
name: entry.name,
|
|
3014
|
+
arguments: entry.argumentsText.length > 0 ? entry.argumentsText : entry.input ?? {}
|
|
3015
|
+
}));
|
|
3016
|
+
}
|
|
3017
|
+
function buildAnthropicAssistantToolContent(text, toolCalls) {
|
|
3018
|
+
const content = [];
|
|
3019
|
+
if (text.length > 0) {
|
|
3020
|
+
content.push({ type: "text", text });
|
|
3021
|
+
}
|
|
3022
|
+
for (const call of toolCalls) {
|
|
3023
|
+
const parsedArguments = typeof call.arguments === "string" ? parseToolArguments(call.arguments) : call.arguments;
|
|
3024
|
+
content.push({
|
|
3025
|
+
type: "tool_use",
|
|
3026
|
+
id: call.id,
|
|
3027
|
+
name: call.name,
|
|
3028
|
+
input: isRecord2(parsedArguments) ? parsedArguments : {}
|
|
3029
|
+
});
|
|
3030
|
+
}
|
|
3031
|
+
return content;
|
|
3032
|
+
}
|
|
2325
3033
|
function pickUsage2(payload) {
|
|
2326
3034
|
const fromUsage = extractUsageObject(payload.usage);
|
|
2327
3035
|
if (fromUsage) {
|
|
@@ -2401,22 +3109,6 @@ function toAnthropicToolChoice(value) {
|
|
|
2401
3109
|
}
|
|
2402
3110
|
return value;
|
|
2403
3111
|
}
|
|
2404
|
-
async function streamViaComplete2(callbacks, complete) {
|
|
2405
|
-
callbacks.onStart?.();
|
|
2406
|
-
const response = await complete();
|
|
2407
|
-
if (response.text.length > 0) {
|
|
2408
|
-
callbacks.onToken?.(response.text);
|
|
2409
|
-
}
|
|
2410
|
-
callbacks.onChunk?.({
|
|
2411
|
-
textDelta: response.text,
|
|
2412
|
-
raw: response.raw,
|
|
2413
|
-
done: true,
|
|
2414
|
-
usage: response.usage,
|
|
2415
|
-
finishReason: response.finishReason
|
|
2416
|
-
});
|
|
2417
|
-
callbacks.onComplete?.(response);
|
|
2418
|
-
return response;
|
|
2419
|
-
}
|
|
2420
3112
|
|
|
2421
3113
|
// src/providers/registry.ts
|
|
2422
3114
|
class InMemoryProviderRegistry {
|
|
@@ -3899,7 +4591,8 @@ async function createMCPClient(options) {
|
|
|
3899
4591
|
return wrapMCPClient({
|
|
3900
4592
|
id: options.id,
|
|
3901
4593
|
client,
|
|
3902
|
-
transport
|
|
4594
|
+
transport,
|
|
4595
|
+
toolCallTimeoutMs: options.toolCallTimeoutMs
|
|
3903
4596
|
});
|
|
3904
4597
|
}
|
|
3905
4598
|
function wrapMCPClient(options) {
|
|
@@ -3919,7 +4612,8 @@ function wrapMCPClient(options) {
|
|
|
3919
4612
|
};
|
|
3920
4613
|
},
|
|
3921
4614
|
async callTool(params) {
|
|
3922
|
-
|
|
4615
|
+
const callOptions = options.toolCallTimeoutMs === undefined ? undefined : { timeout: options.toolCallTimeoutMs };
|
|
4616
|
+
return options.client.callTool(params, undefined, callOptions);
|
|
3923
4617
|
},
|
|
3924
4618
|
async close() {
|
|
3925
4619
|
await options.client.close();
|