@discomedia/utils 1.0.24 → 1.0.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-frontend.cjs +194 -127
- package/dist/index-frontend.cjs.map +1 -1
- package/dist/index-frontend.mjs +194 -128
- package/dist/index-frontend.mjs.map +1 -1
- package/dist/index.cjs +323 -137
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +323 -138
- package/dist/index.mjs.map +1 -1
- package/dist/package.json +2 -2
- package/dist/test.js +5343 -1060
- package/dist/test.js.map +1 -1
- package/dist/types/index-frontend.d.ts +1 -1
- package/dist/types/index.d.ts +3 -1
- package/dist/types/index.d.ts.map +1 -1
- package/dist/types/json-tools.d.ts +1 -3
- package/dist/types/json-tools.d.ts.map +1 -1
- package/dist/types/llm-config.d.ts.map +1 -1
- package/dist/types/llm-deepseek.d.ts +1 -1
- package/dist/types/llm-deepseek.d.ts.map +1 -1
- package/dist/types/llm-images.d.ts.map +1 -1
- package/dist/types/llm-openai.d.ts +8 -2
- package/dist/types/llm-openai.d.ts.map +1 -1
- package/dist/types/llm-openrouter.d.ts +28 -0
- package/dist/types/llm-openrouter.d.ts.map +1 -0
- package/dist/types/misc-utils.d.ts.map +1 -1
- package/dist/types/types/llm-types.d.ts +27 -4
- package/dist/types/types/llm-types.d.ts.map +1 -1
- package/dist/types/types/logging-types.d.ts +1 -1
- package/dist/types/types/logging-types.d.ts.map +1 -1
- package/dist/types-frontend/index-frontend.d.ts +1 -1
- package/dist/types-frontend/index.d.ts +3 -1
- package/dist/types-frontend/index.d.ts.map +1 -1
- package/dist/types-frontend/json-tools.d.ts +1 -3
- package/dist/types-frontend/json-tools.d.ts.map +1 -1
- package/dist/types-frontend/llm-config.d.ts.map +1 -1
- package/dist/types-frontend/llm-deepseek.d.ts +1 -1
- package/dist/types-frontend/llm-deepseek.d.ts.map +1 -1
- package/dist/types-frontend/llm-images.d.ts.map +1 -1
- package/dist/types-frontend/llm-openai.d.ts +8 -2
- package/dist/types-frontend/llm-openai.d.ts.map +1 -1
- package/dist/types-frontend/llm-openrouter.d.ts +28 -0
- package/dist/types-frontend/llm-openrouter.d.ts.map +1 -0
- package/dist/types-frontend/misc-utils.d.ts.map +1 -1
- package/dist/types-frontend/types/llm-types.d.ts +27 -4
- package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
- package/dist/types-frontend/types/logging-types.d.ts +1 -1
- package/dist/types-frontend/types/logging-types.d.ts.map +1 -1
- package/package.json +2 -2
- package/dist/types/old-test.d.ts +0 -2
- package/dist/types/old-test.d.ts.map +0 -1
- package/dist/types-frontend/old-test.d.ts +0 -2
- package/dist/types-frontend/old-test.d.ts.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -1021,8 +1021,28 @@ function dateTimeForGS(date) {
|
|
|
1021
1021
|
.replace(/\./g, '/');
|
|
1022
1022
|
}
|
|
1023
1023
|
|
|
1024
|
+
/**
|
|
1025
|
+
* Type guard to check if a model is an OpenRouter model
|
|
1026
|
+
*/
|
|
1027
|
+
function isOpenRouterModel(model) {
|
|
1028
|
+
const openRouterModels = [
|
|
1029
|
+
'openai/gpt-5',
|
|
1030
|
+
'openai/gpt-5-mini',
|
|
1031
|
+
'openai/gpt-5-nano',
|
|
1032
|
+
'openai/gpt-oss-120b',
|
|
1033
|
+
'z.ai/glm-4.5',
|
|
1034
|
+
'z.ai/glm-4.5-air',
|
|
1035
|
+
'google/gemini-2.5-flash',
|
|
1036
|
+
'google/gemini-2.5-flash-lite',
|
|
1037
|
+
'deepseek/deepseek-r1-0528',
|
|
1038
|
+
'deepseek/deepseek-chat-v3-0324',
|
|
1039
|
+
];
|
|
1040
|
+
return openRouterModels.includes(model);
|
|
1041
|
+
}
|
|
1042
|
+
|
|
1024
1043
|
var Types = /*#__PURE__*/Object.freeze({
|
|
1025
|
-
__proto__: null
|
|
1044
|
+
__proto__: null,
|
|
1045
|
+
isOpenRouterModel: isOpenRouterModel
|
|
1026
1046
|
});
|
|
1027
1047
|
|
|
1028
1048
|
// Utility function for debug logging
|
|
@@ -1114,29 +1134,31 @@ function hideApiKeyFromurl(url) {
|
|
|
1114
1134
|
* @returns Structured error details.
|
|
1115
1135
|
*/
|
|
1116
1136
|
function extractErrorDetails(error, response) {
|
|
1117
|
-
|
|
1137
|
+
const errMsg = error instanceof Error ? error.message : String(error);
|
|
1138
|
+
const errName = error instanceof Error ? error.name : 'Error';
|
|
1139
|
+
if (errName === 'TypeError' && errMsg.includes('fetch')) {
|
|
1118
1140
|
return { type: 'NETWORK_ERROR', reason: 'Network connectivity issue', status: null };
|
|
1119
1141
|
}
|
|
1120
|
-
if (
|
|
1121
|
-
const match =
|
|
1142
|
+
if (errMsg.includes('HTTP error: 429')) {
|
|
1143
|
+
const match = errMsg.match(/RATE_LIMIT: 429:(\d+)/);
|
|
1122
1144
|
const retryAfter = match ? parseInt(match[1]) : undefined;
|
|
1123
1145
|
return { type: 'RATE_LIMIT', reason: 'Rate limit exceeded', status: 429, retryAfter };
|
|
1124
1146
|
}
|
|
1125
|
-
if (
|
|
1147
|
+
if (errMsg.includes('HTTP error: 401') || errMsg.includes('AUTH_ERROR: 401')) {
|
|
1126
1148
|
return { type: 'AUTH_ERROR', reason: 'Authentication failed - invalid API key', status: 401 };
|
|
1127
1149
|
}
|
|
1128
|
-
if (
|
|
1150
|
+
if (errMsg.includes('HTTP error: 403') || errMsg.includes('AUTH_ERROR: 403')) {
|
|
1129
1151
|
return { type: 'AUTH_ERROR', reason: 'Access forbidden - insufficient permissions', status: 403 };
|
|
1130
1152
|
}
|
|
1131
|
-
if (
|
|
1132
|
-
const status = parseInt(
|
|
1153
|
+
if (errMsg.includes('SERVER_ERROR:')) {
|
|
1154
|
+
const status = parseInt(errMsg.split('SERVER_ERROR: ')[1]) || 500;
|
|
1133
1155
|
return { type: 'SERVER_ERROR', reason: `Server error (${status})`, status };
|
|
1134
1156
|
}
|
|
1135
|
-
if (
|
|
1136
|
-
const status = parseInt(
|
|
1157
|
+
if (errMsg.includes('CLIENT_ERROR:')) {
|
|
1158
|
+
const status = parseInt(errMsg.split('CLIENT_ERROR: ')[1]) || 400;
|
|
1137
1159
|
return { type: 'CLIENT_ERROR', reason: `Client error (${status})`, status };
|
|
1138
1160
|
}
|
|
1139
|
-
return { type: 'UNKNOWN', reason:
|
|
1161
|
+
return { type: 'UNKNOWN', reason: errMsg || 'Unknown error', status: null };
|
|
1140
1162
|
}
|
|
1141
1163
|
/**
|
|
1142
1164
|
* Fetches a resource with intelligent retry logic for handling transient errors.
|
|
@@ -2370,7 +2392,7 @@ const safeJSON = (text) => {
|
|
|
2370
2392
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2371
2393
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
2372
2394
|
|
|
2373
|
-
const VERSION = '5.12.
|
|
2395
|
+
const VERSION = '5.12.2'; // x-release-please-version
|
|
2374
2396
|
|
|
2375
2397
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2376
2398
|
const isRunningInBrowser = () => {
|
|
@@ -4003,8 +4025,119 @@ let Messages$1 = class Messages extends APIResource {
|
|
|
4003
4025
|
}
|
|
4004
4026
|
};
|
|
4005
4027
|
|
|
4006
|
-
function
|
|
4007
|
-
return
|
|
4028
|
+
function isChatCompletionFunctionTool(tool) {
|
|
4029
|
+
return tool !== undefined && 'function' in tool && tool.function !== undefined;
|
|
4030
|
+
}
|
|
4031
|
+
function isAutoParsableResponseFormat(response_format) {
|
|
4032
|
+
return response_format?.['$brand'] === 'auto-parseable-response-format';
|
|
4033
|
+
}
|
|
4034
|
+
function isAutoParsableTool$1(tool) {
|
|
4035
|
+
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4036
|
+
}
|
|
4037
|
+
function maybeParseChatCompletion(completion, params) {
|
|
4038
|
+
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4039
|
+
return {
|
|
4040
|
+
...completion,
|
|
4041
|
+
choices: completion.choices.map((choice) => {
|
|
4042
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4043
|
+
return {
|
|
4044
|
+
...choice,
|
|
4045
|
+
message: {
|
|
4046
|
+
...choice.message,
|
|
4047
|
+
parsed: null,
|
|
4048
|
+
...(choice.message.tool_calls ?
|
|
4049
|
+
{
|
|
4050
|
+
tool_calls: choice.message.tool_calls,
|
|
4051
|
+
}
|
|
4052
|
+
: undefined),
|
|
4053
|
+
},
|
|
4054
|
+
};
|
|
4055
|
+
}),
|
|
4056
|
+
};
|
|
4057
|
+
}
|
|
4058
|
+
return parseChatCompletion(completion, params);
|
|
4059
|
+
}
|
|
4060
|
+
function parseChatCompletion(completion, params) {
|
|
4061
|
+
const choices = completion.choices.map((choice) => {
|
|
4062
|
+
if (choice.finish_reason === 'length') {
|
|
4063
|
+
throw new LengthFinishReasonError();
|
|
4064
|
+
}
|
|
4065
|
+
if (choice.finish_reason === 'content_filter') {
|
|
4066
|
+
throw new ContentFilterFinishReasonError();
|
|
4067
|
+
}
|
|
4068
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4069
|
+
return {
|
|
4070
|
+
...choice,
|
|
4071
|
+
message: {
|
|
4072
|
+
...choice.message,
|
|
4073
|
+
...(choice.message.tool_calls ?
|
|
4074
|
+
{
|
|
4075
|
+
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4076
|
+
}
|
|
4077
|
+
: undefined),
|
|
4078
|
+
parsed: choice.message.content && !choice.message.refusal ?
|
|
4079
|
+
parseResponseFormat(params, choice.message.content)
|
|
4080
|
+
: null,
|
|
4081
|
+
},
|
|
4082
|
+
};
|
|
4083
|
+
});
|
|
4084
|
+
return { ...completion, choices };
|
|
4085
|
+
}
|
|
4086
|
+
function parseResponseFormat(params, content) {
|
|
4087
|
+
if (params.response_format?.type !== 'json_schema') {
|
|
4088
|
+
return null;
|
|
4089
|
+
}
|
|
4090
|
+
if (params.response_format?.type === 'json_schema') {
|
|
4091
|
+
if ('$parseRaw' in params.response_format) {
|
|
4092
|
+
const response_format = params.response_format;
|
|
4093
|
+
return response_format.$parseRaw(content);
|
|
4094
|
+
}
|
|
4095
|
+
return JSON.parse(content);
|
|
4096
|
+
}
|
|
4097
|
+
return null;
|
|
4098
|
+
}
|
|
4099
|
+
function parseToolCall$1(params, toolCall) {
|
|
4100
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
4101
|
+
return {
|
|
4102
|
+
...toolCall,
|
|
4103
|
+
function: {
|
|
4104
|
+
...toolCall.function,
|
|
4105
|
+
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4106
|
+
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4107
|
+
: null,
|
|
4108
|
+
},
|
|
4109
|
+
};
|
|
4110
|
+
}
|
|
4111
|
+
function shouldParseToolCall(params, toolCall) {
|
|
4112
|
+
if (!params || !('tools' in params) || !params.tools) {
|
|
4113
|
+
return false;
|
|
4114
|
+
}
|
|
4115
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
|
|
4116
|
+
return (isChatCompletionFunctionTool(inputTool) &&
|
|
4117
|
+
(isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
|
|
4118
|
+
}
|
|
4119
|
+
function hasAutoParseableInput$1(params) {
|
|
4120
|
+
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4121
|
+
return true;
|
|
4122
|
+
}
|
|
4123
|
+
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4124
|
+
}
|
|
4125
|
+
function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
|
|
4126
|
+
for (const toolCall of toolCalls || []) {
|
|
4127
|
+
if (toolCall.type !== 'function') {
|
|
4128
|
+
throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
|
|
4129
|
+
}
|
|
4130
|
+
}
|
|
4131
|
+
}
|
|
4132
|
+
function validateInputTools(tools) {
|
|
4133
|
+
for (const tool of tools ?? []) {
|
|
4134
|
+
if (tool.type !== 'function') {
|
|
4135
|
+
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4136
|
+
}
|
|
4137
|
+
if (tool.function.strict !== true) {
|
|
4138
|
+
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4139
|
+
}
|
|
4140
|
+
}
|
|
4008
4141
|
}
|
|
4009
4142
|
|
|
4010
4143
|
const isAssistantMessage = (message) => {
|
|
@@ -4198,104 +4331,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
|
|
|
4198
4331
|
return this._emit('error', new OpenAIError(String(error)));
|
|
4199
4332
|
};
|
|
4200
4333
|
|
|
4201
|
-
function
|
|
4202
|
-
return
|
|
4203
|
-
}
|
|
4204
|
-
function isAutoParsableTool$1(tool) {
|
|
4205
|
-
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4206
|
-
}
|
|
4207
|
-
function maybeParseChatCompletion(completion, params) {
|
|
4208
|
-
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4209
|
-
return {
|
|
4210
|
-
...completion,
|
|
4211
|
-
choices: completion.choices.map((choice) => ({
|
|
4212
|
-
...choice,
|
|
4213
|
-
message: {
|
|
4214
|
-
...choice.message,
|
|
4215
|
-
parsed: null,
|
|
4216
|
-
...(choice.message.tool_calls ?
|
|
4217
|
-
{
|
|
4218
|
-
tool_calls: choice.message.tool_calls,
|
|
4219
|
-
}
|
|
4220
|
-
: undefined),
|
|
4221
|
-
},
|
|
4222
|
-
})),
|
|
4223
|
-
};
|
|
4224
|
-
}
|
|
4225
|
-
return parseChatCompletion(completion, params);
|
|
4226
|
-
}
|
|
4227
|
-
function parseChatCompletion(completion, params) {
|
|
4228
|
-
const choices = completion.choices.map((choice) => {
|
|
4229
|
-
if (choice.finish_reason === 'length') {
|
|
4230
|
-
throw new LengthFinishReasonError();
|
|
4231
|
-
}
|
|
4232
|
-
if (choice.finish_reason === 'content_filter') {
|
|
4233
|
-
throw new ContentFilterFinishReasonError();
|
|
4234
|
-
}
|
|
4235
|
-
return {
|
|
4236
|
-
...choice,
|
|
4237
|
-
message: {
|
|
4238
|
-
...choice.message,
|
|
4239
|
-
...(choice.message.tool_calls ?
|
|
4240
|
-
{
|
|
4241
|
-
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4242
|
-
}
|
|
4243
|
-
: undefined),
|
|
4244
|
-
parsed: choice.message.content && !choice.message.refusal ?
|
|
4245
|
-
parseResponseFormat(params, choice.message.content)
|
|
4246
|
-
: null,
|
|
4247
|
-
},
|
|
4248
|
-
};
|
|
4249
|
-
});
|
|
4250
|
-
return { ...completion, choices };
|
|
4251
|
-
}
|
|
4252
|
-
function parseResponseFormat(params, content) {
|
|
4253
|
-
if (params.response_format?.type !== 'json_schema') {
|
|
4254
|
-
return null;
|
|
4255
|
-
}
|
|
4256
|
-
if (params.response_format?.type === 'json_schema') {
|
|
4257
|
-
if ('$parseRaw' in params.response_format) {
|
|
4258
|
-
const response_format = params.response_format;
|
|
4259
|
-
return response_format.$parseRaw(content);
|
|
4260
|
-
}
|
|
4261
|
-
return JSON.parse(content);
|
|
4262
|
-
}
|
|
4263
|
-
return null;
|
|
4264
|
-
}
|
|
4265
|
-
function parseToolCall$1(params, toolCall) {
|
|
4266
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4267
|
-
return {
|
|
4268
|
-
...toolCall,
|
|
4269
|
-
function: {
|
|
4270
|
-
...toolCall.function,
|
|
4271
|
-
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4272
|
-
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4273
|
-
: null,
|
|
4274
|
-
},
|
|
4275
|
-
};
|
|
4276
|
-
}
|
|
4277
|
-
function shouldParseToolCall(params, toolCall) {
|
|
4278
|
-
if (!params) {
|
|
4279
|
-
return false;
|
|
4280
|
-
}
|
|
4281
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4282
|
-
return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
|
|
4283
|
-
}
|
|
4284
|
-
function hasAutoParseableInput$1(params) {
|
|
4285
|
-
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4286
|
-
return true;
|
|
4287
|
-
}
|
|
4288
|
-
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4289
|
-
}
|
|
4290
|
-
function validateInputTools(tools) {
|
|
4291
|
-
for (const tool of tools ?? []) {
|
|
4292
|
-
if (tool.type !== 'function') {
|
|
4293
|
-
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4294
|
-
}
|
|
4295
|
-
if (tool.function.strict !== true) {
|
|
4296
|
-
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4297
|
-
}
|
|
4298
|
-
}
|
|
4334
|
+
function isRunnableFunctionWithParse(fn) {
|
|
4335
|
+
return typeof fn.parse === 'function';
|
|
4299
4336
|
}
|
|
4300
4337
|
|
|
4301
4338
|
var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
|
|
@@ -4421,7 +4458,7 @@ class AbstractChatCompletionRunner extends EventStream {
|
|
|
4421
4458
|
async _runTools(client, params, options) {
|
|
4422
4459
|
const role = 'tool';
|
|
4423
4460
|
const { tool_choice = 'auto', stream, ...restParams } = params;
|
|
4424
|
-
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
|
|
4461
|
+
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
|
|
4425
4462
|
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
|
|
4426
4463
|
// TODO(someday): clean this logic up
|
|
4427
4464
|
const inputTools = params.tools.map((tool) => {
|
|
@@ -4539,7 +4576,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
|
|
|
4539
4576
|
for (let i = this.messages.length - 1; i >= 0; i--) {
|
|
4540
4577
|
const message = this.messages[i];
|
|
4541
4578
|
if (isAssistantMessage(message) && message?.tool_calls?.length) {
|
|
4542
|
-
return message.tool_calls.at(-1)?.function;
|
|
4579
|
+
return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
|
|
4543
4580
|
}
|
|
4544
4581
|
}
|
|
4545
4582
|
return;
|
|
@@ -5017,7 +5054,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
|
|
|
5017
5054
|
throw new Error('tool call snapshot missing `type`');
|
|
5018
5055
|
}
|
|
5019
5056
|
if (toolCallSnapshot.type === 'function') {
|
|
5020
|
-
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool
|
|
5057
|
+
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
5021
5058
|
this._emit('tool_calls.function.arguments.done', {
|
|
5022
5059
|
name: toolCallSnapshot.function.name,
|
|
5023
5060
|
index: toolCallIndex,
|
|
@@ -8827,7 +8864,7 @@ OpenAI.Evals = Evals;
|
|
|
8827
8864
|
OpenAI.Containers = Containers;
|
|
8828
8865
|
|
|
8829
8866
|
// llm-openai-config.ts
|
|
8830
|
-
const DEFAULT_MODEL
|
|
8867
|
+
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8831
8868
|
/** Token costs in USD per 1M tokens. Last updated Feb 2025. */
|
|
8832
8869
|
const openAiModelCosts = {
|
|
8833
8870
|
'gpt-4o': {
|
|
@@ -8866,6 +8903,18 @@ const openAiModelCosts = {
|
|
|
8866
8903
|
inputCost: 0.1 / 1_000_000,
|
|
8867
8904
|
outputCost: 0.4 / 1_000_000,
|
|
8868
8905
|
},
|
|
8906
|
+
'gpt-5': {
|
|
8907
|
+
inputCost: 1.25 / 1_000_000,
|
|
8908
|
+
outputCost: 10 / 1_000_000,
|
|
8909
|
+
},
|
|
8910
|
+
'gpt-5-mini': {
|
|
8911
|
+
inputCost: 0.25 / 1_000_000,
|
|
8912
|
+
outputCost: 2 / 1_000_000,
|
|
8913
|
+
},
|
|
8914
|
+
'gpt-5-nano': {
|
|
8915
|
+
inputCost: 0.05 / 1_000_000,
|
|
8916
|
+
outputCost: 0.4 / 1_000_000,
|
|
8917
|
+
},
|
|
8869
8918
|
'o4-mini': {
|
|
8870
8919
|
inputCost: 1.1 / 1_000_000,
|
|
8871
8920
|
outputCost: 4.4 / 1_000_000,
|
|
@@ -8933,7 +8982,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
|
|
|
8933
8982
|
return inputCost + outputCost + reasoningCost;
|
|
8934
8983
|
}
|
|
8935
8984
|
|
|
8936
|
-
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8937
8985
|
/**
|
|
8938
8986
|
* Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
|
|
8939
8987
|
* @param {string} jsonStr - The broken JSON string to fix
|
|
@@ -9139,7 +9187,8 @@ function fixBrokenJson(jsonStr) {
|
|
|
9139
9187
|
return parse();
|
|
9140
9188
|
}
|
|
9141
9189
|
catch (error) {
|
|
9142
|
-
|
|
9190
|
+
const msg = error instanceof Error ? error.message : String(error);
|
|
9191
|
+
console.error(`Error parsing JSON at position ${index}: ${msg}`);
|
|
9143
9192
|
return null;
|
|
9144
9193
|
}
|
|
9145
9194
|
}
|
|
@@ -9178,9 +9227,7 @@ function initializeOpenAI(apiKey) {
|
|
|
9178
9227
|
});
|
|
9179
9228
|
}
|
|
9180
9229
|
/**
|
|
9181
|
-
* Fixes broken JSON by sending it to
|
|
9182
|
-
* The GPT-4.1-mini model is a large language model that can understand and generate code,
|
|
9183
|
-
* including JSON. The returned JSON is the fixed version of the input JSON.
|
|
9230
|
+
* Fixes broken JSON by sending it to OpenAI to fix it.
|
|
9184
9231
|
* If the model fails to return valid JSON, an error is thrown.
|
|
9185
9232
|
* @param jsonStr - the broken JSON to fix
|
|
9186
9233
|
* @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
|
|
@@ -9324,8 +9371,11 @@ const isSupportedModel = (model) => {
|
|
|
9324
9371
|
'o3-mini',
|
|
9325
9372
|
'gpt-4.1',
|
|
9326
9373
|
'gpt-4.1-mini',
|
|
9327
|
-
'o4-mini',
|
|
9328
9374
|
'gpt-4.1-nano',
|
|
9375
|
+
'gpt-5',
|
|
9376
|
+
'gpt-5-mini',
|
|
9377
|
+
'gpt-5-nano',
|
|
9378
|
+
'o4-mini',
|
|
9329
9379
|
'o3',
|
|
9330
9380
|
].includes(model);
|
|
9331
9381
|
};
|
|
@@ -9336,8 +9386,9 @@ const isSupportedModel = (model) => {
|
|
|
9336
9386
|
*/
|
|
9337
9387
|
function supportsTemperature(model) {
|
|
9338
9388
|
// Reasoning models don't support temperature
|
|
9339
|
-
|
|
9340
|
-
|
|
9389
|
+
// GPT-5 models also do not support temperature
|
|
9390
|
+
const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9391
|
+
return !reasoningAndGPT5Models.includes(model);
|
|
9341
9392
|
}
|
|
9342
9393
|
/**
|
|
9343
9394
|
* Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
|
|
@@ -9348,6 +9399,15 @@ function isReasoningModel(model) {
|
|
|
9348
9399
|
const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
|
|
9349
9400
|
return reasoningModels.includes(model);
|
|
9350
9401
|
}
|
|
9402
|
+
/**
|
|
9403
|
+
* Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
|
|
9404
|
+
* @param model The model to check.
|
|
9405
|
+
* @returns True if the model is a GPT-5 model, false otherwise.
|
|
9406
|
+
*/
|
|
9407
|
+
function isGPT5Model(model) {
|
|
9408
|
+
const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9409
|
+
return gpt5Models.includes(model);
|
|
9410
|
+
}
|
|
9351
9411
|
/**
|
|
9352
9412
|
* Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
|
|
9353
9413
|
*
|
|
@@ -9375,7 +9435,7 @@ function isReasoningModel(model) {
|
|
|
9375
9435
|
* @throws Error if the API call fails
|
|
9376
9436
|
*/
|
|
9377
9437
|
const makeResponsesAPICall = async (input, options = {}) => {
|
|
9378
|
-
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL
|
|
9438
|
+
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
|
|
9379
9439
|
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
9380
9440
|
if (!apiKey) {
|
|
9381
9441
|
throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
|
|
@@ -9486,7 +9546,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
|
|
|
9486
9546
|
* });
|
|
9487
9547
|
*/
|
|
9488
9548
|
async function makeLLMCall(input, options = {}) {
|
|
9489
|
-
const { apiKey, model = DEFAULT_MODEL
|
|
9549
|
+
const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
|
|
9490
9550
|
// Validate model
|
|
9491
9551
|
const normalizedModel = normalizeModelName(model);
|
|
9492
9552
|
if (!isSupportedModel(normalizedModel)) {
|
|
@@ -9578,8 +9638,8 @@ async function makeLLMCall(input, options = {}) {
|
|
|
9578
9638
|
}
|
|
9579
9639
|
if (useWebSearch) {
|
|
9580
9640
|
responsesOptions.tools = [{ type: 'web_search_preview' }];
|
|
9581
|
-
// For reasoning models, we can't force tool choice - they only support 'auto'
|
|
9582
|
-
if (!isReasoningModel(normalizedModel)) {
|
|
9641
|
+
// For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
|
|
9642
|
+
if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
|
|
9583
9643
|
responsesOptions.tool_choice = { type: 'web_search_preview' };
|
|
9584
9644
|
}
|
|
9585
9645
|
}
|
|
@@ -9672,7 +9732,13 @@ async function makeImagesCall(prompt, options = {}) {
|
|
|
9672
9732
|
const enhancedResponse = {
|
|
9673
9733
|
...response,
|
|
9674
9734
|
usage: {
|
|
9675
|
-
|
|
9735
|
+
// OpenAI Images response may not include usage details per image; preserve if present
|
|
9736
|
+
...(response.usage ?? {
|
|
9737
|
+
input_tokens: 0,
|
|
9738
|
+
input_tokens_details: { image_tokens: 0, text_tokens: 0 },
|
|
9739
|
+
output_tokens: 0,
|
|
9740
|
+
total_tokens: 0,
|
|
9741
|
+
}),
|
|
9676
9742
|
provider: 'openai',
|
|
9677
9743
|
model: 'gpt-image-1',
|
|
9678
9744
|
cost,
|
|
@@ -9681,7 +9747,8 @@ async function makeImagesCall(prompt, options = {}) {
|
|
|
9681
9747
|
return enhancedResponse;
|
|
9682
9748
|
}
|
|
9683
9749
|
catch (error) {
|
|
9684
|
-
|
|
9750
|
+
const message = error instanceof Error ? error.message : 'Unknown error';
|
|
9751
|
+
throw new Error(`OpenAI Images API call failed: ${message}`);
|
|
9685
9752
|
}
|
|
9686
9753
|
}
|
|
9687
9754
|
|
|
@@ -9906,14 +9973,15 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
|
|
|
9906
9973
|
const completion = await createDeepseekCompletion(content, responseFormat, mergedOptions);
|
|
9907
9974
|
// Handle tool calls similarly to OpenAI
|
|
9908
9975
|
if (completion.tool_calls && completion.tool_calls.length > 0) {
|
|
9976
|
+
const fnCalls = completion.tool_calls
|
|
9977
|
+
.filter((tc) => tc.type === 'function')
|
|
9978
|
+
.map((tc) => ({
|
|
9979
|
+
id: tc.id,
|
|
9980
|
+
name: tc.function.name,
|
|
9981
|
+
arguments: JSON.parse(tc.function.arguments),
|
|
9982
|
+
}));
|
|
9909
9983
|
return {
|
|
9910
|
-
response: {
|
|
9911
|
-
tool_calls: completion.tool_calls.map((tc) => ({
|
|
9912
|
-
id: tc.id,
|
|
9913
|
-
name: tc.function.name,
|
|
9914
|
-
arguments: JSON.parse(tc.function.arguments),
|
|
9915
|
-
})),
|
|
9916
|
-
},
|
|
9984
|
+
response: { tool_calls: fnCalls },
|
|
9917
9985
|
usage: {
|
|
9918
9986
|
prompt_tokens: completion.usage.prompt_tokens,
|
|
9919
9987
|
completion_tokens: completion.usage.completion_tokens,
|
|
@@ -9968,6 +10036,122 @@ const makeDeepseekCall = async (content, responseFormat = 'json', options = {})
|
|
|
9968
10036
|
}
|
|
9969
10037
|
};
|
|
9970
10038
|
|
|
10039
|
+
// llm-openrouter.ts
|
|
10040
|
+
// Map our ContextMessage to OpenAI chat message
|
|
10041
|
+
function mapContextToMessages(context) {
|
|
10042
|
+
return context.map((msg) => {
|
|
10043
|
+
const role = msg.role === 'developer' ? 'system' : msg.role;
|
|
10044
|
+
return { role, content: msg.content };
|
|
10045
|
+
});
|
|
10046
|
+
}
|
|
10047
|
+
function toOpenRouterModel(model) {
|
|
10048
|
+
if (model && model.includes('/'))
|
|
10049
|
+
return model;
|
|
10050
|
+
const base = normalizeModelName(model || DEFAULT_MODEL);
|
|
10051
|
+
return `openai/${base}`;
|
|
10052
|
+
}
|
|
10053
|
+
// Normalize model name for pricing
|
|
10054
|
+
function normalizeModelForPricing(model) {
|
|
10055
|
+
if (!model)
|
|
10056
|
+
return { provider: 'openai', coreModel: normalizeModelName(DEFAULT_MODEL) };
|
|
10057
|
+
const [maybeProvider, maybeModel] = model.includes('/') ? model.split('/') : ['openai', model];
|
|
10058
|
+
const provider = (maybeProvider === 'deepseek' ? 'deepseek' : 'openai');
|
|
10059
|
+
const coreModel = normalizeModelName(maybeModel || model);
|
|
10060
|
+
return { provider, coreModel };
|
|
10061
|
+
}
|
|
10062
|
+
/**
|
|
10063
|
+
* Make a call through OpenRouter using the OpenAI Chat Completions-compatible API.
|
|
10064
|
+
* Supports: JSON mode, model selection, message history, and tools.
|
|
10065
|
+
*/
|
|
10066
|
+
async function makeOpenRouterCall(input, options = {}) {
|
|
10067
|
+
const { apiKey = process.env.OPENROUTER_API_KEY, model, responseFormat = 'text', tools, toolChoice, context, developerPrompt, temperature = 0.2, max_tokens, top_p, frequency_penalty, presence_penalty, stop, seed, referer = process.env.OPENROUTER_SITE_URL, title = process.env.OPENROUTER_SITE_NAME, } = options;
|
|
10068
|
+
if (!apiKey) {
|
|
10069
|
+
throw new Error('OpenRouter API key is not provided and OPENROUTER_API_KEY is not set');
|
|
10070
|
+
}
|
|
10071
|
+
const client = new OpenAI({
|
|
10072
|
+
apiKey,
|
|
10073
|
+
baseURL: 'https://openrouter.ai/api/v1',
|
|
10074
|
+
defaultHeaders: {
|
|
10075
|
+
...(referer ? { 'HTTP-Referer': referer } : {}),
|
|
10076
|
+
...(title ? { 'X-Title': title } : {}),
|
|
10077
|
+
},
|
|
10078
|
+
});
|
|
10079
|
+
const messages = [];
|
|
10080
|
+
if (developerPrompt && developerPrompt.trim()) {
|
|
10081
|
+
messages.push({ role: 'system', content: developerPrompt });
|
|
10082
|
+
}
|
|
10083
|
+
if (context && context.length > 0) {
|
|
10084
|
+
messages.push(...mapContextToMessages(context));
|
|
10085
|
+
}
|
|
10086
|
+
messages.push({ role: 'user', content: input });
|
|
10087
|
+
// Configure response_format
|
|
10088
|
+
let response_format;
|
|
10089
|
+
let parsingFormat = 'text';
|
|
10090
|
+
if (responseFormat === 'json') {
|
|
10091
|
+
response_format = { type: 'json_object' };
|
|
10092
|
+
parsingFormat = 'json';
|
|
10093
|
+
}
|
|
10094
|
+
else if (typeof responseFormat === 'object') {
|
|
10095
|
+
response_format = { type: 'json_object' };
|
|
10096
|
+
parsingFormat = responseFormat;
|
|
10097
|
+
}
|
|
10098
|
+
const modelId = toOpenRouterModel(model);
|
|
10099
|
+
const completion = await client.chat.completions.create({
|
|
10100
|
+
model: modelId,
|
|
10101
|
+
messages,
|
|
10102
|
+
response_format,
|
|
10103
|
+
tools,
|
|
10104
|
+
tool_choice: toolChoice,
|
|
10105
|
+
temperature,
|
|
10106
|
+
max_tokens,
|
|
10107
|
+
top_p,
|
|
10108
|
+
frequency_penalty,
|
|
10109
|
+
presence_penalty,
|
|
10110
|
+
stop,
|
|
10111
|
+
seed,
|
|
10112
|
+
});
|
|
10113
|
+
const choice = completion.choices && completion.choices.length > 0 ? completion.choices[0] : undefined;
|
|
10114
|
+
const message = (choice && 'message' in choice ? choice.message : undefined);
|
|
10115
|
+
const { provider: pricingProvider, coreModel } = normalizeModelForPricing(modelId);
|
|
10116
|
+
const promptTokens = completion.usage?.prompt_tokens ?? 0;
|
|
10117
|
+
const completionTokens = completion.usage?.completion_tokens ?? 0;
|
|
10118
|
+
const cost = calculateCost(pricingProvider, coreModel, promptTokens, completionTokens);
|
|
10119
|
+
// Tool calls branch: return empty string response and expose tool_calls on LLMResponse
|
|
10120
|
+
const hasToolCalls = Array.isArray(message?.tool_calls) && message.tool_calls.length > 0;
|
|
10121
|
+
if (hasToolCalls) {
|
|
10122
|
+
const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
|
|
10123
|
+
return {
|
|
10124
|
+
response: '',
|
|
10125
|
+
usage: {
|
|
10126
|
+
prompt_tokens: promptTokens,
|
|
10127
|
+
completion_tokens: completionTokens,
|
|
10128
|
+
provider: 'openrouter',
|
|
10129
|
+
model: usageModel,
|
|
10130
|
+
cost,
|
|
10131
|
+
},
|
|
10132
|
+
tool_calls: message.tool_calls,
|
|
10133
|
+
};
|
|
10134
|
+
}
|
|
10135
|
+
const rawText = typeof message?.content === 'string' ? message.content : '';
|
|
10136
|
+
const parsed = await parseResponse(rawText, parsingFormat);
|
|
10137
|
+
if (parsed === null) {
|
|
10138
|
+
throw new Error('Failed to parse OpenRouter response');
|
|
10139
|
+
}
|
|
10140
|
+
// Ensure the model value conforms to LLMModel; otherwise fall back to DEFAULT_MODEL
|
|
10141
|
+
const usageModel = isOpenRouterModel(modelId) ? modelId : DEFAULT_MODEL;
|
|
10142
|
+
return {
|
|
10143
|
+
response: parsed,
|
|
10144
|
+
usage: {
|
|
10145
|
+
prompt_tokens: promptTokens,
|
|
10146
|
+
completion_tokens: completionTokens,
|
|
10147
|
+
provider: 'openrouter',
|
|
10148
|
+
model: usageModel,
|
|
10149
|
+
cost,
|
|
10150
|
+
},
|
|
10151
|
+
...(hasToolCalls ? { tool_calls: message.tool_calls } : {}),
|
|
10152
|
+
};
|
|
10153
|
+
}
|
|
10154
|
+
|
|
9971
10155
|
/**
|
|
9972
10156
|
* A class to measure performance of code execution.
|
|
9973
10157
|
*
|
|
@@ -18277,6 +18461,7 @@ const disco = {
|
|
|
18277
18461
|
call: makeLLMCall,
|
|
18278
18462
|
seek: makeDeepseekCall,
|
|
18279
18463
|
images: makeImagesCall,
|
|
18464
|
+
open: makeOpenRouterCall,
|
|
18280
18465
|
},
|
|
18281
18466
|
polygon: {
|
|
18282
18467
|
fetchTickerInfo: fetchTickerInfo,
|
|
@@ -18325,4 +18510,5 @@ const disco = {
|
|
|
18325
18510
|
exports.AlpacaMarketDataAPI = AlpacaMarketDataAPI;
|
|
18326
18511
|
exports.AlpacaTradingAPI = AlpacaTradingAPI;
|
|
18327
18512
|
exports.disco = disco;
|
|
18513
|
+
exports.isOpenRouterModel = isOpenRouterModel;
|
|
18328
18514
|
//# sourceMappingURL=index.cjs.map
|