@discomedia/utils 1.0.24 → 1.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-frontend.cjs +153 -116
- package/dist/index-frontend.cjs.map +1 -1
- package/dist/index-frontend.mjs +153 -116
- package/dist/index-frontend.mjs.map +1 -1
- package/dist/index.cjs +153 -116
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +153 -116
- package/dist/index.mjs.map +1 -1
- package/dist/package.json +2 -2
- package/dist/test.js +5128 -1032
- package/dist/test.js.map +1 -1
- package/dist/types/json-tools.d.ts +1 -3
- package/dist/types/json-tools.d.ts.map +1 -1
- package/dist/types/llm-config.d.ts.map +1 -1
- package/dist/types/llm-openai.d.ts +6 -0
- package/dist/types/llm-openai.d.ts.map +1 -1
- package/dist/types/types/llm-types.d.ts +1 -1
- package/dist/types/types/llm-types.d.ts.map +1 -1
- package/dist/types-frontend/json-tools.d.ts +1 -3
- package/dist/types-frontend/json-tools.d.ts.map +1 -1
- package/dist/types-frontend/llm-config.d.ts.map +1 -1
- package/dist/types-frontend/llm-openai.d.ts +6 -0
- package/dist/types-frontend/llm-openai.d.ts.map +1 -1
- package/dist/types-frontend/types/llm-types.d.ts +1 -1
- package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
- package/package.json +2 -2
- package/dist/types/old-test.d.ts +0 -2
- package/dist/types/old-test.d.ts.map +0 -1
- package/dist/types-frontend/old-test.d.ts +0 -2
- package/dist/types-frontend/old-test.d.ts.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -2370,7 +2370,7 @@ const safeJSON = (text) => {
|
|
|
2370
2370
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2371
2371
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
2372
2372
|
|
|
2373
|
-
const VERSION = '5.12.
|
|
2373
|
+
const VERSION = '5.12.1'; // x-release-please-version
|
|
2374
2374
|
|
|
2375
2375
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2376
2376
|
const isRunningInBrowser = () => {
|
|
@@ -4003,8 +4003,119 @@ let Messages$1 = class Messages extends APIResource {
|
|
|
4003
4003
|
}
|
|
4004
4004
|
};
|
|
4005
4005
|
|
|
4006
|
-
function
|
|
4007
|
-
return
|
|
4006
|
+
function isChatCompletionFunctionTool(tool) {
|
|
4007
|
+
return tool !== undefined && 'function' in tool && tool.function !== undefined;
|
|
4008
|
+
}
|
|
4009
|
+
function isAutoParsableResponseFormat(response_format) {
|
|
4010
|
+
return response_format?.['$brand'] === 'auto-parseable-response-format';
|
|
4011
|
+
}
|
|
4012
|
+
function isAutoParsableTool$1(tool) {
|
|
4013
|
+
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4014
|
+
}
|
|
4015
|
+
function maybeParseChatCompletion(completion, params) {
|
|
4016
|
+
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4017
|
+
return {
|
|
4018
|
+
...completion,
|
|
4019
|
+
choices: completion.choices.map((choice) => {
|
|
4020
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4021
|
+
return {
|
|
4022
|
+
...choice,
|
|
4023
|
+
message: {
|
|
4024
|
+
...choice.message,
|
|
4025
|
+
parsed: null,
|
|
4026
|
+
...(choice.message.tool_calls ?
|
|
4027
|
+
{
|
|
4028
|
+
tool_calls: choice.message.tool_calls,
|
|
4029
|
+
}
|
|
4030
|
+
: undefined),
|
|
4031
|
+
},
|
|
4032
|
+
};
|
|
4033
|
+
}),
|
|
4034
|
+
};
|
|
4035
|
+
}
|
|
4036
|
+
return parseChatCompletion(completion, params);
|
|
4037
|
+
}
|
|
4038
|
+
function parseChatCompletion(completion, params) {
|
|
4039
|
+
const choices = completion.choices.map((choice) => {
|
|
4040
|
+
if (choice.finish_reason === 'length') {
|
|
4041
|
+
throw new LengthFinishReasonError();
|
|
4042
|
+
}
|
|
4043
|
+
if (choice.finish_reason === 'content_filter') {
|
|
4044
|
+
throw new ContentFilterFinishReasonError();
|
|
4045
|
+
}
|
|
4046
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4047
|
+
return {
|
|
4048
|
+
...choice,
|
|
4049
|
+
message: {
|
|
4050
|
+
...choice.message,
|
|
4051
|
+
...(choice.message.tool_calls ?
|
|
4052
|
+
{
|
|
4053
|
+
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4054
|
+
}
|
|
4055
|
+
: undefined),
|
|
4056
|
+
parsed: choice.message.content && !choice.message.refusal ?
|
|
4057
|
+
parseResponseFormat(params, choice.message.content)
|
|
4058
|
+
: null,
|
|
4059
|
+
},
|
|
4060
|
+
};
|
|
4061
|
+
});
|
|
4062
|
+
return { ...completion, choices };
|
|
4063
|
+
}
|
|
4064
|
+
function parseResponseFormat(params, content) {
|
|
4065
|
+
if (params.response_format?.type !== 'json_schema') {
|
|
4066
|
+
return null;
|
|
4067
|
+
}
|
|
4068
|
+
if (params.response_format?.type === 'json_schema') {
|
|
4069
|
+
if ('$parseRaw' in params.response_format) {
|
|
4070
|
+
const response_format = params.response_format;
|
|
4071
|
+
return response_format.$parseRaw(content);
|
|
4072
|
+
}
|
|
4073
|
+
return JSON.parse(content);
|
|
4074
|
+
}
|
|
4075
|
+
return null;
|
|
4076
|
+
}
|
|
4077
|
+
function parseToolCall$1(params, toolCall) {
|
|
4078
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
4079
|
+
return {
|
|
4080
|
+
...toolCall,
|
|
4081
|
+
function: {
|
|
4082
|
+
...toolCall.function,
|
|
4083
|
+
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4084
|
+
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4085
|
+
: null,
|
|
4086
|
+
},
|
|
4087
|
+
};
|
|
4088
|
+
}
|
|
4089
|
+
function shouldParseToolCall(params, toolCall) {
|
|
4090
|
+
if (!params || !('tools' in params) || !params.tools) {
|
|
4091
|
+
return false;
|
|
4092
|
+
}
|
|
4093
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
|
|
4094
|
+
return (isChatCompletionFunctionTool(inputTool) &&
|
|
4095
|
+
(isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
|
|
4096
|
+
}
|
|
4097
|
+
function hasAutoParseableInput$1(params) {
|
|
4098
|
+
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4099
|
+
return true;
|
|
4100
|
+
}
|
|
4101
|
+
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4102
|
+
}
|
|
4103
|
+
function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
|
|
4104
|
+
for (const toolCall of toolCalls || []) {
|
|
4105
|
+
if (toolCall.type !== 'function') {
|
|
4106
|
+
throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
|
|
4107
|
+
}
|
|
4108
|
+
}
|
|
4109
|
+
}
|
|
4110
|
+
function validateInputTools(tools) {
|
|
4111
|
+
for (const tool of tools ?? []) {
|
|
4112
|
+
if (tool.type !== 'function') {
|
|
4113
|
+
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4114
|
+
}
|
|
4115
|
+
if (tool.function.strict !== true) {
|
|
4116
|
+
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4117
|
+
}
|
|
4118
|
+
}
|
|
4008
4119
|
}
|
|
4009
4120
|
|
|
4010
4121
|
const isAssistantMessage = (message) => {
|
|
@@ -4198,104 +4309,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
|
|
|
4198
4309
|
return this._emit('error', new OpenAIError(String(error)));
|
|
4199
4310
|
};
|
|
4200
4311
|
|
|
4201
|
-
function
|
|
4202
|
-
return
|
|
4203
|
-
}
|
|
4204
|
-
function isAutoParsableTool$1(tool) {
|
|
4205
|
-
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4206
|
-
}
|
|
4207
|
-
function maybeParseChatCompletion(completion, params) {
|
|
4208
|
-
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4209
|
-
return {
|
|
4210
|
-
...completion,
|
|
4211
|
-
choices: completion.choices.map((choice) => ({
|
|
4212
|
-
...choice,
|
|
4213
|
-
message: {
|
|
4214
|
-
...choice.message,
|
|
4215
|
-
parsed: null,
|
|
4216
|
-
...(choice.message.tool_calls ?
|
|
4217
|
-
{
|
|
4218
|
-
tool_calls: choice.message.tool_calls,
|
|
4219
|
-
}
|
|
4220
|
-
: undefined),
|
|
4221
|
-
},
|
|
4222
|
-
})),
|
|
4223
|
-
};
|
|
4224
|
-
}
|
|
4225
|
-
return parseChatCompletion(completion, params);
|
|
4226
|
-
}
|
|
4227
|
-
function parseChatCompletion(completion, params) {
|
|
4228
|
-
const choices = completion.choices.map((choice) => {
|
|
4229
|
-
if (choice.finish_reason === 'length') {
|
|
4230
|
-
throw new LengthFinishReasonError();
|
|
4231
|
-
}
|
|
4232
|
-
if (choice.finish_reason === 'content_filter') {
|
|
4233
|
-
throw new ContentFilterFinishReasonError();
|
|
4234
|
-
}
|
|
4235
|
-
return {
|
|
4236
|
-
...choice,
|
|
4237
|
-
message: {
|
|
4238
|
-
...choice.message,
|
|
4239
|
-
...(choice.message.tool_calls ?
|
|
4240
|
-
{
|
|
4241
|
-
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4242
|
-
}
|
|
4243
|
-
: undefined),
|
|
4244
|
-
parsed: choice.message.content && !choice.message.refusal ?
|
|
4245
|
-
parseResponseFormat(params, choice.message.content)
|
|
4246
|
-
: null,
|
|
4247
|
-
},
|
|
4248
|
-
};
|
|
4249
|
-
});
|
|
4250
|
-
return { ...completion, choices };
|
|
4251
|
-
}
|
|
4252
|
-
function parseResponseFormat(params, content) {
|
|
4253
|
-
if (params.response_format?.type !== 'json_schema') {
|
|
4254
|
-
return null;
|
|
4255
|
-
}
|
|
4256
|
-
if (params.response_format?.type === 'json_schema') {
|
|
4257
|
-
if ('$parseRaw' in params.response_format) {
|
|
4258
|
-
const response_format = params.response_format;
|
|
4259
|
-
return response_format.$parseRaw(content);
|
|
4260
|
-
}
|
|
4261
|
-
return JSON.parse(content);
|
|
4262
|
-
}
|
|
4263
|
-
return null;
|
|
4264
|
-
}
|
|
4265
|
-
function parseToolCall$1(params, toolCall) {
|
|
4266
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4267
|
-
return {
|
|
4268
|
-
...toolCall,
|
|
4269
|
-
function: {
|
|
4270
|
-
...toolCall.function,
|
|
4271
|
-
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4272
|
-
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4273
|
-
: null,
|
|
4274
|
-
},
|
|
4275
|
-
};
|
|
4276
|
-
}
|
|
4277
|
-
function shouldParseToolCall(params, toolCall) {
|
|
4278
|
-
if (!params) {
|
|
4279
|
-
return false;
|
|
4280
|
-
}
|
|
4281
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4282
|
-
return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
|
|
4283
|
-
}
|
|
4284
|
-
function hasAutoParseableInput$1(params) {
|
|
4285
|
-
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4286
|
-
return true;
|
|
4287
|
-
}
|
|
4288
|
-
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4289
|
-
}
|
|
4290
|
-
function validateInputTools(tools) {
|
|
4291
|
-
for (const tool of tools ?? []) {
|
|
4292
|
-
if (tool.type !== 'function') {
|
|
4293
|
-
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4294
|
-
}
|
|
4295
|
-
if (tool.function.strict !== true) {
|
|
4296
|
-
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4297
|
-
}
|
|
4298
|
-
}
|
|
4312
|
+
function isRunnableFunctionWithParse(fn) {
|
|
4313
|
+
return typeof fn.parse === 'function';
|
|
4299
4314
|
}
|
|
4300
4315
|
|
|
4301
4316
|
var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
|
|
@@ -4421,7 +4436,7 @@ class AbstractChatCompletionRunner extends EventStream {
|
|
|
4421
4436
|
async _runTools(client, params, options) {
|
|
4422
4437
|
const role = 'tool';
|
|
4423
4438
|
const { tool_choice = 'auto', stream, ...restParams } = params;
|
|
4424
|
-
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
|
|
4439
|
+
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
|
|
4425
4440
|
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
|
|
4426
4441
|
// TODO(someday): clean this logic up
|
|
4427
4442
|
const inputTools = params.tools.map((tool) => {
|
|
@@ -4539,7 +4554,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
|
|
|
4539
4554
|
for (let i = this.messages.length - 1; i >= 0; i--) {
|
|
4540
4555
|
const message = this.messages[i];
|
|
4541
4556
|
if (isAssistantMessage(message) && message?.tool_calls?.length) {
|
|
4542
|
-
return message.tool_calls.at(-1)?.function;
|
|
4557
|
+
return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
|
|
4543
4558
|
}
|
|
4544
4559
|
}
|
|
4545
4560
|
return;
|
|
@@ -5017,7 +5032,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
|
|
|
5017
5032
|
throw new Error('tool call snapshot missing `type`');
|
|
5018
5033
|
}
|
|
5019
5034
|
if (toolCallSnapshot.type === 'function') {
|
|
5020
|
-
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool
|
|
5035
|
+
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
5021
5036
|
this._emit('tool_calls.function.arguments.done', {
|
|
5022
5037
|
name: toolCallSnapshot.function.name,
|
|
5023
5038
|
index: toolCallIndex,
|
|
@@ -8827,7 +8842,7 @@ OpenAI.Evals = Evals;
|
|
|
8827
8842
|
OpenAI.Containers = Containers;
|
|
8828
8843
|
|
|
8829
8844
|
// llm-openai-config.ts
|
|
8830
|
-
const DEFAULT_MODEL
|
|
8845
|
+
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8831
8846
|
/** Token costs in USD per 1M tokens. Last updated Feb 2025. */
|
|
8832
8847
|
const openAiModelCosts = {
|
|
8833
8848
|
'gpt-4o': {
|
|
@@ -8866,6 +8881,18 @@ const openAiModelCosts = {
|
|
|
8866
8881
|
inputCost: 0.1 / 1_000_000,
|
|
8867
8882
|
outputCost: 0.4 / 1_000_000,
|
|
8868
8883
|
},
|
|
8884
|
+
'gpt-5': {
|
|
8885
|
+
inputCost: 1.25 / 1_000_000,
|
|
8886
|
+
outputCost: 10 / 1_000_000,
|
|
8887
|
+
},
|
|
8888
|
+
'gpt-5-mini': {
|
|
8889
|
+
inputCost: 0.25 / 1_000_000,
|
|
8890
|
+
outputCost: 2 / 1_000_000,
|
|
8891
|
+
},
|
|
8892
|
+
'gpt-5-nano': {
|
|
8893
|
+
inputCost: 0.05 / 1_000_000,
|
|
8894
|
+
outputCost: 0.4 / 1_000_000,
|
|
8895
|
+
},
|
|
8869
8896
|
'o4-mini': {
|
|
8870
8897
|
inputCost: 1.1 / 1_000_000,
|
|
8871
8898
|
outputCost: 4.4 / 1_000_000,
|
|
@@ -8933,7 +8960,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
|
|
|
8933
8960
|
return inputCost + outputCost + reasoningCost;
|
|
8934
8961
|
}
|
|
8935
8962
|
|
|
8936
|
-
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8937
8963
|
/**
|
|
8938
8964
|
* Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
|
|
8939
8965
|
* @param {string} jsonStr - The broken JSON string to fix
|
|
@@ -9178,9 +9204,7 @@ function initializeOpenAI(apiKey) {
|
|
|
9178
9204
|
});
|
|
9179
9205
|
}
|
|
9180
9206
|
/**
|
|
9181
|
-
* Fixes broken JSON by sending it to
|
|
9182
|
-
* The GPT-4.1-mini model is a large language model that can understand and generate code,
|
|
9183
|
-
* including JSON. The returned JSON is the fixed version of the input JSON.
|
|
9207
|
+
* Fixes broken JSON by sending it to OpenAI to fix it.
|
|
9184
9208
|
* If the model fails to return valid JSON, an error is thrown.
|
|
9185
9209
|
* @param jsonStr - the broken JSON to fix
|
|
9186
9210
|
* @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
|
|
@@ -9324,8 +9348,11 @@ const isSupportedModel = (model) => {
|
|
|
9324
9348
|
'o3-mini',
|
|
9325
9349
|
'gpt-4.1',
|
|
9326
9350
|
'gpt-4.1-mini',
|
|
9327
|
-
'o4-mini',
|
|
9328
9351
|
'gpt-4.1-nano',
|
|
9352
|
+
'gpt-5',
|
|
9353
|
+
'gpt-5-mini',
|
|
9354
|
+
'gpt-5-nano',
|
|
9355
|
+
'o4-mini',
|
|
9329
9356
|
'o3',
|
|
9330
9357
|
].includes(model);
|
|
9331
9358
|
};
|
|
@@ -9336,8 +9363,9 @@ const isSupportedModel = (model) => {
|
|
|
9336
9363
|
*/
|
|
9337
9364
|
function supportsTemperature(model) {
|
|
9338
9365
|
// Reasoning models don't support temperature
|
|
9339
|
-
|
|
9340
|
-
|
|
9366
|
+
// GPT-5 models also do not support temperature
|
|
9367
|
+
const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9368
|
+
return !reasoningAndGPT5Models.includes(model);
|
|
9341
9369
|
}
|
|
9342
9370
|
/**
|
|
9343
9371
|
* Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
|
|
@@ -9348,6 +9376,15 @@ function isReasoningModel(model) {
|
|
|
9348
9376
|
const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
|
|
9349
9377
|
return reasoningModels.includes(model);
|
|
9350
9378
|
}
|
|
9379
|
+
/**
|
|
9380
|
+
* Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
|
|
9381
|
+
* @param model The model to check.
|
|
9382
|
+
* @returns True if the model is a GPT-5 model, false otherwise.
|
|
9383
|
+
*/
|
|
9384
|
+
function isGPT5Model(model) {
|
|
9385
|
+
const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9386
|
+
return gpt5Models.includes(model);
|
|
9387
|
+
}
|
|
9351
9388
|
/**
|
|
9352
9389
|
* Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
|
|
9353
9390
|
*
|
|
@@ -9375,7 +9412,7 @@ function isReasoningModel(model) {
|
|
|
9375
9412
|
* @throws Error if the API call fails
|
|
9376
9413
|
*/
|
|
9377
9414
|
const makeResponsesAPICall = async (input, options = {}) => {
|
|
9378
|
-
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL
|
|
9415
|
+
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
|
|
9379
9416
|
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
9380
9417
|
if (!apiKey) {
|
|
9381
9418
|
throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
|
|
@@ -9486,7 +9523,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
|
|
|
9486
9523
|
* });
|
|
9487
9524
|
*/
|
|
9488
9525
|
async function makeLLMCall(input, options = {}) {
|
|
9489
|
-
const { apiKey, model = DEFAULT_MODEL
|
|
9526
|
+
const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
|
|
9490
9527
|
// Validate model
|
|
9491
9528
|
const normalizedModel = normalizeModelName(model);
|
|
9492
9529
|
if (!isSupportedModel(normalizedModel)) {
|
|
@@ -9578,8 +9615,8 @@ async function makeLLMCall(input, options = {}) {
|
|
|
9578
9615
|
}
|
|
9579
9616
|
if (useWebSearch) {
|
|
9580
9617
|
responsesOptions.tools = [{ type: 'web_search_preview' }];
|
|
9581
|
-
// For reasoning models, we can't force tool choice - they only support 'auto'
|
|
9582
|
-
if (!isReasoningModel(normalizedModel)) {
|
|
9618
|
+
// For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
|
|
9619
|
+
if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
|
|
9583
9620
|
responsesOptions.tool_choice = { type: 'web_search_preview' };
|
|
9584
9621
|
}
|
|
9585
9622
|
}
|