@discomedia/utils 1.0.24 → 1.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-frontend.cjs +153 -116
- package/dist/index-frontend.cjs.map +1 -1
- package/dist/index-frontend.mjs +153 -116
- package/dist/index-frontend.mjs.map +1 -1
- package/dist/index.cjs +153 -116
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +153 -116
- package/dist/index.mjs.map +1 -1
- package/dist/package.json +2 -2
- package/dist/test.js +5128 -1032
- package/dist/test.js.map +1 -1
- package/dist/types/json-tools.d.ts +1 -3
- package/dist/types/json-tools.d.ts.map +1 -1
- package/dist/types/llm-config.d.ts.map +1 -1
- package/dist/types/llm-openai.d.ts +6 -0
- package/dist/types/llm-openai.d.ts.map +1 -1
- package/dist/types/types/llm-types.d.ts +1 -1
- package/dist/types/types/llm-types.d.ts.map +1 -1
- package/dist/types-frontend/json-tools.d.ts +1 -3
- package/dist/types-frontend/json-tools.d.ts.map +1 -1
- package/dist/types-frontend/llm-config.d.ts.map +1 -1
- package/dist/types-frontend/llm-openai.d.ts +6 -0
- package/dist/types-frontend/llm-openai.d.ts.map +1 -1
- package/dist/types-frontend/types/llm-types.d.ts +1 -1
- package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
- package/package.json +2 -2
- package/dist/types/old-test.d.ts +0 -2
- package/dist/types/old-test.d.ts.map +0 -1
- package/dist/types-frontend/old-test.d.ts +0 -2
- package/dist/types-frontend/old-test.d.ts.map +0 -1
package/dist/index-frontend.cjs
CHANGED
|
@@ -249,7 +249,7 @@ const safeJSON = (text) => {
|
|
|
249
249
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
250
250
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
251
251
|
|
|
252
|
-
const VERSION = '5.12.
|
|
252
|
+
const VERSION = '5.12.1'; // x-release-please-version
|
|
253
253
|
|
|
254
254
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
255
255
|
const isRunningInBrowser = () => {
|
|
@@ -1882,8 +1882,119 @@ let Messages$1 = class Messages extends APIResource {
|
|
|
1882
1882
|
}
|
|
1883
1883
|
};
|
|
1884
1884
|
|
|
1885
|
-
function
|
|
1886
|
-
return
|
|
1885
|
+
function isChatCompletionFunctionTool(tool) {
|
|
1886
|
+
return tool !== undefined && 'function' in tool && tool.function !== undefined;
|
|
1887
|
+
}
|
|
1888
|
+
function isAutoParsableResponseFormat(response_format) {
|
|
1889
|
+
return response_format?.['$brand'] === 'auto-parseable-response-format';
|
|
1890
|
+
}
|
|
1891
|
+
function isAutoParsableTool$1(tool) {
|
|
1892
|
+
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
1893
|
+
}
|
|
1894
|
+
function maybeParseChatCompletion(completion, params) {
|
|
1895
|
+
if (!params || !hasAutoParseableInput$1(params)) {
|
|
1896
|
+
return {
|
|
1897
|
+
...completion,
|
|
1898
|
+
choices: completion.choices.map((choice) => {
|
|
1899
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
1900
|
+
return {
|
|
1901
|
+
...choice,
|
|
1902
|
+
message: {
|
|
1903
|
+
...choice.message,
|
|
1904
|
+
parsed: null,
|
|
1905
|
+
...(choice.message.tool_calls ?
|
|
1906
|
+
{
|
|
1907
|
+
tool_calls: choice.message.tool_calls,
|
|
1908
|
+
}
|
|
1909
|
+
: undefined),
|
|
1910
|
+
},
|
|
1911
|
+
};
|
|
1912
|
+
}),
|
|
1913
|
+
};
|
|
1914
|
+
}
|
|
1915
|
+
return parseChatCompletion(completion, params);
|
|
1916
|
+
}
|
|
1917
|
+
function parseChatCompletion(completion, params) {
|
|
1918
|
+
const choices = completion.choices.map((choice) => {
|
|
1919
|
+
if (choice.finish_reason === 'length') {
|
|
1920
|
+
throw new LengthFinishReasonError();
|
|
1921
|
+
}
|
|
1922
|
+
if (choice.finish_reason === 'content_filter') {
|
|
1923
|
+
throw new ContentFilterFinishReasonError();
|
|
1924
|
+
}
|
|
1925
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
1926
|
+
return {
|
|
1927
|
+
...choice,
|
|
1928
|
+
message: {
|
|
1929
|
+
...choice.message,
|
|
1930
|
+
...(choice.message.tool_calls ?
|
|
1931
|
+
{
|
|
1932
|
+
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
1933
|
+
}
|
|
1934
|
+
: undefined),
|
|
1935
|
+
parsed: choice.message.content && !choice.message.refusal ?
|
|
1936
|
+
parseResponseFormat(params, choice.message.content)
|
|
1937
|
+
: null,
|
|
1938
|
+
},
|
|
1939
|
+
};
|
|
1940
|
+
});
|
|
1941
|
+
return { ...completion, choices };
|
|
1942
|
+
}
|
|
1943
|
+
function parseResponseFormat(params, content) {
|
|
1944
|
+
if (params.response_format?.type !== 'json_schema') {
|
|
1945
|
+
return null;
|
|
1946
|
+
}
|
|
1947
|
+
if (params.response_format?.type === 'json_schema') {
|
|
1948
|
+
if ('$parseRaw' in params.response_format) {
|
|
1949
|
+
const response_format = params.response_format;
|
|
1950
|
+
return response_format.$parseRaw(content);
|
|
1951
|
+
}
|
|
1952
|
+
return JSON.parse(content);
|
|
1953
|
+
}
|
|
1954
|
+
return null;
|
|
1955
|
+
}
|
|
1956
|
+
function parseToolCall$1(params, toolCall) {
|
|
1957
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
1958
|
+
return {
|
|
1959
|
+
...toolCall,
|
|
1960
|
+
function: {
|
|
1961
|
+
...toolCall.function,
|
|
1962
|
+
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
1963
|
+
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
1964
|
+
: null,
|
|
1965
|
+
},
|
|
1966
|
+
};
|
|
1967
|
+
}
|
|
1968
|
+
function shouldParseToolCall(params, toolCall) {
|
|
1969
|
+
if (!params || !('tools' in params) || !params.tools) {
|
|
1970
|
+
return false;
|
|
1971
|
+
}
|
|
1972
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
|
|
1973
|
+
return (isChatCompletionFunctionTool(inputTool) &&
|
|
1974
|
+
(isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
|
|
1975
|
+
}
|
|
1976
|
+
function hasAutoParseableInput$1(params) {
|
|
1977
|
+
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
1978
|
+
return true;
|
|
1979
|
+
}
|
|
1980
|
+
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
1981
|
+
}
|
|
1982
|
+
function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
|
|
1983
|
+
for (const toolCall of toolCalls || []) {
|
|
1984
|
+
if (toolCall.type !== 'function') {
|
|
1985
|
+
throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
|
|
1986
|
+
}
|
|
1987
|
+
}
|
|
1988
|
+
}
|
|
1989
|
+
function validateInputTools(tools) {
|
|
1990
|
+
for (const tool of tools ?? []) {
|
|
1991
|
+
if (tool.type !== 'function') {
|
|
1992
|
+
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
1993
|
+
}
|
|
1994
|
+
if (tool.function.strict !== true) {
|
|
1995
|
+
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
1996
|
+
}
|
|
1997
|
+
}
|
|
1887
1998
|
}
|
|
1888
1999
|
|
|
1889
2000
|
const isAssistantMessage = (message) => {
|
|
@@ -2077,104 +2188,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
|
|
|
2077
2188
|
return this._emit('error', new OpenAIError(String(error)));
|
|
2078
2189
|
};
|
|
2079
2190
|
|
|
2080
|
-
function
|
|
2081
|
-
return
|
|
2082
|
-
}
|
|
2083
|
-
function isAutoParsableTool$1(tool) {
|
|
2084
|
-
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
2085
|
-
}
|
|
2086
|
-
function maybeParseChatCompletion(completion, params) {
|
|
2087
|
-
if (!params || !hasAutoParseableInput$1(params)) {
|
|
2088
|
-
return {
|
|
2089
|
-
...completion,
|
|
2090
|
-
choices: completion.choices.map((choice) => ({
|
|
2091
|
-
...choice,
|
|
2092
|
-
message: {
|
|
2093
|
-
...choice.message,
|
|
2094
|
-
parsed: null,
|
|
2095
|
-
...(choice.message.tool_calls ?
|
|
2096
|
-
{
|
|
2097
|
-
tool_calls: choice.message.tool_calls,
|
|
2098
|
-
}
|
|
2099
|
-
: undefined),
|
|
2100
|
-
},
|
|
2101
|
-
})),
|
|
2102
|
-
};
|
|
2103
|
-
}
|
|
2104
|
-
return parseChatCompletion(completion, params);
|
|
2105
|
-
}
|
|
2106
|
-
function parseChatCompletion(completion, params) {
|
|
2107
|
-
const choices = completion.choices.map((choice) => {
|
|
2108
|
-
if (choice.finish_reason === 'length') {
|
|
2109
|
-
throw new LengthFinishReasonError();
|
|
2110
|
-
}
|
|
2111
|
-
if (choice.finish_reason === 'content_filter') {
|
|
2112
|
-
throw new ContentFilterFinishReasonError();
|
|
2113
|
-
}
|
|
2114
|
-
return {
|
|
2115
|
-
...choice,
|
|
2116
|
-
message: {
|
|
2117
|
-
...choice.message,
|
|
2118
|
-
...(choice.message.tool_calls ?
|
|
2119
|
-
{
|
|
2120
|
-
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
2121
|
-
}
|
|
2122
|
-
: undefined),
|
|
2123
|
-
parsed: choice.message.content && !choice.message.refusal ?
|
|
2124
|
-
parseResponseFormat(params, choice.message.content)
|
|
2125
|
-
: null,
|
|
2126
|
-
},
|
|
2127
|
-
};
|
|
2128
|
-
});
|
|
2129
|
-
return { ...completion, choices };
|
|
2130
|
-
}
|
|
2131
|
-
function parseResponseFormat(params, content) {
|
|
2132
|
-
if (params.response_format?.type !== 'json_schema') {
|
|
2133
|
-
return null;
|
|
2134
|
-
}
|
|
2135
|
-
if (params.response_format?.type === 'json_schema') {
|
|
2136
|
-
if ('$parseRaw' in params.response_format) {
|
|
2137
|
-
const response_format = params.response_format;
|
|
2138
|
-
return response_format.$parseRaw(content);
|
|
2139
|
-
}
|
|
2140
|
-
return JSON.parse(content);
|
|
2141
|
-
}
|
|
2142
|
-
return null;
|
|
2143
|
-
}
|
|
2144
|
-
function parseToolCall$1(params, toolCall) {
|
|
2145
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
2146
|
-
return {
|
|
2147
|
-
...toolCall,
|
|
2148
|
-
function: {
|
|
2149
|
-
...toolCall.function,
|
|
2150
|
-
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
2151
|
-
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
2152
|
-
: null,
|
|
2153
|
-
},
|
|
2154
|
-
};
|
|
2155
|
-
}
|
|
2156
|
-
function shouldParseToolCall(params, toolCall) {
|
|
2157
|
-
if (!params) {
|
|
2158
|
-
return false;
|
|
2159
|
-
}
|
|
2160
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
2161
|
-
return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
|
|
2162
|
-
}
|
|
2163
|
-
function hasAutoParseableInput$1(params) {
|
|
2164
|
-
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
2165
|
-
return true;
|
|
2166
|
-
}
|
|
2167
|
-
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
2168
|
-
}
|
|
2169
|
-
function validateInputTools(tools) {
|
|
2170
|
-
for (const tool of tools ?? []) {
|
|
2171
|
-
if (tool.type !== 'function') {
|
|
2172
|
-
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
2173
|
-
}
|
|
2174
|
-
if (tool.function.strict !== true) {
|
|
2175
|
-
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
2176
|
-
}
|
|
2177
|
-
}
|
|
2191
|
+
function isRunnableFunctionWithParse(fn) {
|
|
2192
|
+
return typeof fn.parse === 'function';
|
|
2178
2193
|
}
|
|
2179
2194
|
|
|
2180
2195
|
var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
|
|
@@ -2300,7 +2315,7 @@ class AbstractChatCompletionRunner extends EventStream {
|
|
|
2300
2315
|
async _runTools(client, params, options) {
|
|
2301
2316
|
const role = 'tool';
|
|
2302
2317
|
const { tool_choice = 'auto', stream, ...restParams } = params;
|
|
2303
|
-
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
|
|
2318
|
+
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
|
|
2304
2319
|
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
|
|
2305
2320
|
// TODO(someday): clean this logic up
|
|
2306
2321
|
const inputTools = params.tools.map((tool) => {
|
|
@@ -2418,7 +2433,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
|
|
|
2418
2433
|
for (let i = this.messages.length - 1; i >= 0; i--) {
|
|
2419
2434
|
const message = this.messages[i];
|
|
2420
2435
|
if (isAssistantMessage(message) && message?.tool_calls?.length) {
|
|
2421
|
-
return message.tool_calls.at(-1)?.function;
|
|
2436
|
+
return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
|
|
2422
2437
|
}
|
|
2423
2438
|
}
|
|
2424
2439
|
return;
|
|
@@ -2896,7 +2911,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
|
|
|
2896
2911
|
throw new Error('tool call snapshot missing `type`');
|
|
2897
2912
|
}
|
|
2898
2913
|
if (toolCallSnapshot.type === 'function') {
|
|
2899
|
-
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool
|
|
2914
|
+
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
2900
2915
|
this._emit('tool_calls.function.arguments.done', {
|
|
2901
2916
|
name: toolCallSnapshot.function.name,
|
|
2902
2917
|
index: toolCallIndex,
|
|
@@ -6706,7 +6721,7 @@ OpenAI.Evals = Evals;
|
|
|
6706
6721
|
OpenAI.Containers = Containers;
|
|
6707
6722
|
|
|
6708
6723
|
// llm-openai-config.ts
|
|
6709
|
-
const DEFAULT_MODEL
|
|
6724
|
+
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
6710
6725
|
/** Token costs in USD per 1M tokens. Last updated Feb 2025. */
|
|
6711
6726
|
const openAiModelCosts = {
|
|
6712
6727
|
'gpt-4o': {
|
|
@@ -6745,6 +6760,18 @@ const openAiModelCosts = {
|
|
|
6745
6760
|
inputCost: 0.1 / 1_000_000,
|
|
6746
6761
|
outputCost: 0.4 / 1_000_000,
|
|
6747
6762
|
},
|
|
6763
|
+
'gpt-5': {
|
|
6764
|
+
inputCost: 1.25 / 1_000_000,
|
|
6765
|
+
outputCost: 10 / 1_000_000,
|
|
6766
|
+
},
|
|
6767
|
+
'gpt-5-mini': {
|
|
6768
|
+
inputCost: 0.25 / 1_000_000,
|
|
6769
|
+
outputCost: 2 / 1_000_000,
|
|
6770
|
+
},
|
|
6771
|
+
'gpt-5-nano': {
|
|
6772
|
+
inputCost: 0.05 / 1_000_000,
|
|
6773
|
+
outputCost: 0.4 / 1_000_000,
|
|
6774
|
+
},
|
|
6748
6775
|
'o4-mini': {
|
|
6749
6776
|
inputCost: 1.1 / 1_000_000,
|
|
6750
6777
|
outputCost: 4.4 / 1_000_000,
|
|
@@ -6812,7 +6839,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
|
|
|
6812
6839
|
return inputCost + outputCost + reasoningCost;
|
|
6813
6840
|
}
|
|
6814
6841
|
|
|
6815
|
-
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
6816
6842
|
/**
|
|
6817
6843
|
* Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
|
|
6818
6844
|
* @param {string} jsonStr - The broken JSON string to fix
|
|
@@ -7057,9 +7083,7 @@ function initializeOpenAI(apiKey) {
|
|
|
7057
7083
|
});
|
|
7058
7084
|
}
|
|
7059
7085
|
/**
|
|
7060
|
-
* Fixes broken JSON by sending it to
|
|
7061
|
-
* The GPT-4.1-mini model is a large language model that can understand and generate code,
|
|
7062
|
-
* including JSON. The returned JSON is the fixed version of the input JSON.
|
|
7086
|
+
* Fixes broken JSON by sending it to OpenAI to fix it.
|
|
7063
7087
|
* If the model fails to return valid JSON, an error is thrown.
|
|
7064
7088
|
* @param jsonStr - the broken JSON to fix
|
|
7065
7089
|
* @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
|
|
@@ -7203,8 +7227,11 @@ const isSupportedModel = (model) => {
|
|
|
7203
7227
|
'o3-mini',
|
|
7204
7228
|
'gpt-4.1',
|
|
7205
7229
|
'gpt-4.1-mini',
|
|
7206
|
-
'o4-mini',
|
|
7207
7230
|
'gpt-4.1-nano',
|
|
7231
|
+
'gpt-5',
|
|
7232
|
+
'gpt-5-mini',
|
|
7233
|
+
'gpt-5-nano',
|
|
7234
|
+
'o4-mini',
|
|
7208
7235
|
'o3',
|
|
7209
7236
|
].includes(model);
|
|
7210
7237
|
};
|
|
@@ -7215,8 +7242,9 @@ const isSupportedModel = (model) => {
|
|
|
7215
7242
|
*/
|
|
7216
7243
|
function supportsTemperature(model) {
|
|
7217
7244
|
// Reasoning models don't support temperature
|
|
7218
|
-
|
|
7219
|
-
|
|
7245
|
+
// GPT-5 models also do not support temperature
|
|
7246
|
+
const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
7247
|
+
return !reasoningAndGPT5Models.includes(model);
|
|
7220
7248
|
}
|
|
7221
7249
|
/**
|
|
7222
7250
|
* Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
|
|
@@ -7227,6 +7255,15 @@ function isReasoningModel(model) {
|
|
|
7227
7255
|
const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
|
|
7228
7256
|
return reasoningModels.includes(model);
|
|
7229
7257
|
}
|
|
7258
|
+
/**
|
|
7259
|
+
* Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
|
|
7260
|
+
* @param model The model to check.
|
|
7261
|
+
* @returns True if the model is a GPT-5 model, false otherwise.
|
|
7262
|
+
*/
|
|
7263
|
+
function isGPT5Model(model) {
|
|
7264
|
+
const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
7265
|
+
return gpt5Models.includes(model);
|
|
7266
|
+
}
|
|
7230
7267
|
/**
|
|
7231
7268
|
* Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
|
|
7232
7269
|
*
|
|
@@ -7254,7 +7291,7 @@ function isReasoningModel(model) {
|
|
|
7254
7291
|
* @throws Error if the API call fails
|
|
7255
7292
|
*/
|
|
7256
7293
|
const makeResponsesAPICall = async (input, options = {}) => {
|
|
7257
|
-
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL
|
|
7294
|
+
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
|
|
7258
7295
|
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
7259
7296
|
if (!apiKey) {
|
|
7260
7297
|
throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
|
|
@@ -7365,7 +7402,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
|
|
|
7365
7402
|
* });
|
|
7366
7403
|
*/
|
|
7367
7404
|
async function makeLLMCall(input, options = {}) {
|
|
7368
|
-
const { apiKey, model = DEFAULT_MODEL
|
|
7405
|
+
const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
|
|
7369
7406
|
// Validate model
|
|
7370
7407
|
const normalizedModel = normalizeModelName(model);
|
|
7371
7408
|
if (!isSupportedModel(normalizedModel)) {
|
|
@@ -7457,8 +7494,8 @@ async function makeLLMCall(input, options = {}) {
|
|
|
7457
7494
|
}
|
|
7458
7495
|
if (useWebSearch) {
|
|
7459
7496
|
responsesOptions.tools = [{ type: 'web_search_preview' }];
|
|
7460
|
-
// For reasoning models, we can't force tool choice - they only support 'auto'
|
|
7461
|
-
if (!isReasoningModel(normalizedModel)) {
|
|
7497
|
+
// For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
|
|
7498
|
+
if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
|
|
7462
7499
|
responsesOptions.tool_choice = { type: 'web_search_preview' };
|
|
7463
7500
|
}
|
|
7464
7501
|
}
|