@discomedia/utils 1.0.23 → 1.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-frontend.cjs +159 -122
- package/dist/index-frontend.cjs.map +1 -1
- package/dist/index-frontend.mjs +159 -122
- package/dist/index-frontend.mjs.map +1 -1
- package/dist/index.cjs +159 -122
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +159 -122
- package/dist/index.mjs.map +1 -1
- package/dist/package.json +4 -4
- package/dist/test.js +5129 -1037
- package/dist/test.js.map +1 -1
- package/dist/types/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types/alpaca-trading-api.d.ts +4 -9
- package/dist/types/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types/json-tools.d.ts +1 -3
- package/dist/types/json-tools.d.ts.map +1 -1
- package/dist/types/llm-config.d.ts.map +1 -1
- package/dist/types/llm-openai.d.ts +6 -0
- package/dist/types/llm-openai.d.ts.map +1 -1
- package/dist/types/types/alpaca-types.d.ts +2 -8
- package/dist/types/types/alpaca-types.d.ts.map +1 -1
- package/dist/types/types/llm-types.d.ts +1 -1
- package/dist/types/types/llm-types.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-trading-api.d.ts +4 -9
- package/dist/types-frontend/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types-frontend/json-tools.d.ts +1 -3
- package/dist/types-frontend/json-tools.d.ts.map +1 -1
- package/dist/types-frontend/llm-config.d.ts.map +1 -1
- package/dist/types-frontend/llm-openai.d.ts +6 -0
- package/dist/types-frontend/llm-openai.d.ts.map +1 -1
- package/dist/types-frontend/types/alpaca-types.d.ts +2 -8
- package/dist/types-frontend/types/alpaca-types.d.ts.map +1 -1
- package/dist/types-frontend/types/llm-types.d.ts +1 -1
- package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
- package/package.json +4 -4
- package/dist/types/old-test.d.ts +0 -2
- package/dist/types/old-test.d.ts.map +0 -1
- package/dist/types-frontend/old-test.d.ts +0 -2
- package/dist/types-frontend/old-test.d.ts.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -2370,7 +2370,7 @@ const safeJSON = (text) => {
|
|
|
2370
2370
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2371
2371
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
2372
2372
|
|
|
2373
|
-
const VERSION = '5.
|
|
2373
|
+
const VERSION = '5.12.1'; // x-release-please-version
|
|
2374
2374
|
|
|
2375
2375
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2376
2376
|
const isRunningInBrowser = () => {
|
|
@@ -3225,11 +3225,7 @@ class Stream {
|
|
|
3225
3225
|
done = true;
|
|
3226
3226
|
continue;
|
|
3227
3227
|
}
|
|
3228
|
-
if (sse.event === null ||
|
|
3229
|
-
sse.event.startsWith('response.') ||
|
|
3230
|
-
sse.event.startsWith('image_edit.') ||
|
|
3231
|
-
sse.event.startsWith('image_generation.') ||
|
|
3232
|
-
sse.event.startsWith('transcript.')) {
|
|
3228
|
+
if (sse.event === null || !sse.event.startsWith('thread.')) {
|
|
3233
3229
|
let data;
|
|
3234
3230
|
try {
|
|
3235
3231
|
data = JSON.parse(sse.data);
|
|
@@ -4007,8 +4003,119 @@ let Messages$1 = class Messages extends APIResource {
|
|
|
4007
4003
|
}
|
|
4008
4004
|
};
|
|
4009
4005
|
|
|
4010
|
-
function
|
|
4011
|
-
return
|
|
4006
|
+
function isChatCompletionFunctionTool(tool) {
|
|
4007
|
+
return tool !== undefined && 'function' in tool && tool.function !== undefined;
|
|
4008
|
+
}
|
|
4009
|
+
function isAutoParsableResponseFormat(response_format) {
|
|
4010
|
+
return response_format?.['$brand'] === 'auto-parseable-response-format';
|
|
4011
|
+
}
|
|
4012
|
+
function isAutoParsableTool$1(tool) {
|
|
4013
|
+
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4014
|
+
}
|
|
4015
|
+
function maybeParseChatCompletion(completion, params) {
|
|
4016
|
+
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4017
|
+
return {
|
|
4018
|
+
...completion,
|
|
4019
|
+
choices: completion.choices.map((choice) => {
|
|
4020
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4021
|
+
return {
|
|
4022
|
+
...choice,
|
|
4023
|
+
message: {
|
|
4024
|
+
...choice.message,
|
|
4025
|
+
parsed: null,
|
|
4026
|
+
...(choice.message.tool_calls ?
|
|
4027
|
+
{
|
|
4028
|
+
tool_calls: choice.message.tool_calls,
|
|
4029
|
+
}
|
|
4030
|
+
: undefined),
|
|
4031
|
+
},
|
|
4032
|
+
};
|
|
4033
|
+
}),
|
|
4034
|
+
};
|
|
4035
|
+
}
|
|
4036
|
+
return parseChatCompletion(completion, params);
|
|
4037
|
+
}
|
|
4038
|
+
function parseChatCompletion(completion, params) {
|
|
4039
|
+
const choices = completion.choices.map((choice) => {
|
|
4040
|
+
if (choice.finish_reason === 'length') {
|
|
4041
|
+
throw new LengthFinishReasonError();
|
|
4042
|
+
}
|
|
4043
|
+
if (choice.finish_reason === 'content_filter') {
|
|
4044
|
+
throw new ContentFilterFinishReasonError();
|
|
4045
|
+
}
|
|
4046
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4047
|
+
return {
|
|
4048
|
+
...choice,
|
|
4049
|
+
message: {
|
|
4050
|
+
...choice.message,
|
|
4051
|
+
...(choice.message.tool_calls ?
|
|
4052
|
+
{
|
|
4053
|
+
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4054
|
+
}
|
|
4055
|
+
: undefined),
|
|
4056
|
+
parsed: choice.message.content && !choice.message.refusal ?
|
|
4057
|
+
parseResponseFormat(params, choice.message.content)
|
|
4058
|
+
: null,
|
|
4059
|
+
},
|
|
4060
|
+
};
|
|
4061
|
+
});
|
|
4062
|
+
return { ...completion, choices };
|
|
4063
|
+
}
|
|
4064
|
+
function parseResponseFormat(params, content) {
|
|
4065
|
+
if (params.response_format?.type !== 'json_schema') {
|
|
4066
|
+
return null;
|
|
4067
|
+
}
|
|
4068
|
+
if (params.response_format?.type === 'json_schema') {
|
|
4069
|
+
if ('$parseRaw' in params.response_format) {
|
|
4070
|
+
const response_format = params.response_format;
|
|
4071
|
+
return response_format.$parseRaw(content);
|
|
4072
|
+
}
|
|
4073
|
+
return JSON.parse(content);
|
|
4074
|
+
}
|
|
4075
|
+
return null;
|
|
4076
|
+
}
|
|
4077
|
+
function parseToolCall$1(params, toolCall) {
|
|
4078
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
4079
|
+
return {
|
|
4080
|
+
...toolCall,
|
|
4081
|
+
function: {
|
|
4082
|
+
...toolCall.function,
|
|
4083
|
+
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4084
|
+
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4085
|
+
: null,
|
|
4086
|
+
},
|
|
4087
|
+
};
|
|
4088
|
+
}
|
|
4089
|
+
function shouldParseToolCall(params, toolCall) {
|
|
4090
|
+
if (!params || !('tools' in params) || !params.tools) {
|
|
4091
|
+
return false;
|
|
4092
|
+
}
|
|
4093
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
|
|
4094
|
+
return (isChatCompletionFunctionTool(inputTool) &&
|
|
4095
|
+
(isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
|
|
4096
|
+
}
|
|
4097
|
+
function hasAutoParseableInput$1(params) {
|
|
4098
|
+
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4099
|
+
return true;
|
|
4100
|
+
}
|
|
4101
|
+
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4102
|
+
}
|
|
4103
|
+
function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
|
|
4104
|
+
for (const toolCall of toolCalls || []) {
|
|
4105
|
+
if (toolCall.type !== 'function') {
|
|
4106
|
+
throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
|
|
4107
|
+
}
|
|
4108
|
+
}
|
|
4109
|
+
}
|
|
4110
|
+
function validateInputTools(tools) {
|
|
4111
|
+
for (const tool of tools ?? []) {
|
|
4112
|
+
if (tool.type !== 'function') {
|
|
4113
|
+
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4114
|
+
}
|
|
4115
|
+
if (tool.function.strict !== true) {
|
|
4116
|
+
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4117
|
+
}
|
|
4118
|
+
}
|
|
4012
4119
|
}
|
|
4013
4120
|
|
|
4014
4121
|
const isAssistantMessage = (message) => {
|
|
@@ -4202,104 +4309,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
|
|
|
4202
4309
|
return this._emit('error', new OpenAIError(String(error)));
|
|
4203
4310
|
};
|
|
4204
4311
|
|
|
4205
|
-
function
|
|
4206
|
-
return
|
|
4207
|
-
}
|
|
4208
|
-
function isAutoParsableTool$1(tool) {
|
|
4209
|
-
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4210
|
-
}
|
|
4211
|
-
function maybeParseChatCompletion(completion, params) {
|
|
4212
|
-
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4213
|
-
return {
|
|
4214
|
-
...completion,
|
|
4215
|
-
choices: completion.choices.map((choice) => ({
|
|
4216
|
-
...choice,
|
|
4217
|
-
message: {
|
|
4218
|
-
...choice.message,
|
|
4219
|
-
parsed: null,
|
|
4220
|
-
...(choice.message.tool_calls ?
|
|
4221
|
-
{
|
|
4222
|
-
tool_calls: choice.message.tool_calls,
|
|
4223
|
-
}
|
|
4224
|
-
: undefined),
|
|
4225
|
-
},
|
|
4226
|
-
})),
|
|
4227
|
-
};
|
|
4228
|
-
}
|
|
4229
|
-
return parseChatCompletion(completion, params);
|
|
4230
|
-
}
|
|
4231
|
-
function parseChatCompletion(completion, params) {
|
|
4232
|
-
const choices = completion.choices.map((choice) => {
|
|
4233
|
-
if (choice.finish_reason === 'length') {
|
|
4234
|
-
throw new LengthFinishReasonError();
|
|
4235
|
-
}
|
|
4236
|
-
if (choice.finish_reason === 'content_filter') {
|
|
4237
|
-
throw new ContentFilterFinishReasonError();
|
|
4238
|
-
}
|
|
4239
|
-
return {
|
|
4240
|
-
...choice,
|
|
4241
|
-
message: {
|
|
4242
|
-
...choice.message,
|
|
4243
|
-
...(choice.message.tool_calls ?
|
|
4244
|
-
{
|
|
4245
|
-
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4246
|
-
}
|
|
4247
|
-
: undefined),
|
|
4248
|
-
parsed: choice.message.content && !choice.message.refusal ?
|
|
4249
|
-
parseResponseFormat(params, choice.message.content)
|
|
4250
|
-
: null,
|
|
4251
|
-
},
|
|
4252
|
-
};
|
|
4253
|
-
});
|
|
4254
|
-
return { ...completion, choices };
|
|
4255
|
-
}
|
|
4256
|
-
function parseResponseFormat(params, content) {
|
|
4257
|
-
if (params.response_format?.type !== 'json_schema') {
|
|
4258
|
-
return null;
|
|
4259
|
-
}
|
|
4260
|
-
if (params.response_format?.type === 'json_schema') {
|
|
4261
|
-
if ('$parseRaw' in params.response_format) {
|
|
4262
|
-
const response_format = params.response_format;
|
|
4263
|
-
return response_format.$parseRaw(content);
|
|
4264
|
-
}
|
|
4265
|
-
return JSON.parse(content);
|
|
4266
|
-
}
|
|
4267
|
-
return null;
|
|
4268
|
-
}
|
|
4269
|
-
function parseToolCall$1(params, toolCall) {
|
|
4270
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4271
|
-
return {
|
|
4272
|
-
...toolCall,
|
|
4273
|
-
function: {
|
|
4274
|
-
...toolCall.function,
|
|
4275
|
-
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4276
|
-
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4277
|
-
: null,
|
|
4278
|
-
},
|
|
4279
|
-
};
|
|
4280
|
-
}
|
|
4281
|
-
function shouldParseToolCall(params, toolCall) {
|
|
4282
|
-
if (!params) {
|
|
4283
|
-
return false;
|
|
4284
|
-
}
|
|
4285
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4286
|
-
return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
|
|
4287
|
-
}
|
|
4288
|
-
function hasAutoParseableInput$1(params) {
|
|
4289
|
-
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4290
|
-
return true;
|
|
4291
|
-
}
|
|
4292
|
-
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4293
|
-
}
|
|
4294
|
-
function validateInputTools(tools) {
|
|
4295
|
-
for (const tool of tools ?? []) {
|
|
4296
|
-
if (tool.type !== 'function') {
|
|
4297
|
-
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4298
|
-
}
|
|
4299
|
-
if (tool.function.strict !== true) {
|
|
4300
|
-
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4301
|
-
}
|
|
4302
|
-
}
|
|
4312
|
+
function isRunnableFunctionWithParse(fn) {
|
|
4313
|
+
return typeof fn.parse === 'function';
|
|
4303
4314
|
}
|
|
4304
4315
|
|
|
4305
4316
|
var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
|
|
@@ -4425,7 +4436,7 @@ class AbstractChatCompletionRunner extends EventStream {
|
|
|
4425
4436
|
async _runTools(client, params, options) {
|
|
4426
4437
|
const role = 'tool';
|
|
4427
4438
|
const { tool_choice = 'auto', stream, ...restParams } = params;
|
|
4428
|
-
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
|
|
4439
|
+
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
|
|
4429
4440
|
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
|
|
4430
4441
|
// TODO(someday): clean this logic up
|
|
4431
4442
|
const inputTools = params.tools.map((tool) => {
|
|
@@ -4543,7 +4554,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
|
|
|
4543
4554
|
for (let i = this.messages.length - 1; i >= 0; i--) {
|
|
4544
4555
|
const message = this.messages[i];
|
|
4545
4556
|
if (isAssistantMessage(message) && message?.tool_calls?.length) {
|
|
4546
|
-
return message.tool_calls.at(-1)?.function;
|
|
4557
|
+
return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
|
|
4547
4558
|
}
|
|
4548
4559
|
}
|
|
4549
4560
|
return;
|
|
@@ -5021,7 +5032,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
|
|
|
5021
5032
|
throw new Error('tool call snapshot missing `type`');
|
|
5022
5033
|
}
|
|
5023
5034
|
if (toolCallSnapshot.type === 'function') {
|
|
5024
|
-
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool
|
|
5035
|
+
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
5025
5036
|
this._emit('tool_calls.function.arguments.done', {
|
|
5026
5037
|
name: toolCallSnapshot.function.name,
|
|
5027
5038
|
index: toolCallIndex,
|
|
@@ -8831,7 +8842,7 @@ OpenAI.Evals = Evals;
|
|
|
8831
8842
|
OpenAI.Containers = Containers;
|
|
8832
8843
|
|
|
8833
8844
|
// llm-openai-config.ts
|
|
8834
|
-
const DEFAULT_MODEL
|
|
8845
|
+
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8835
8846
|
/** Token costs in USD per 1M tokens. Last updated Feb 2025. */
|
|
8836
8847
|
const openAiModelCosts = {
|
|
8837
8848
|
'gpt-4o': {
|
|
@@ -8870,6 +8881,18 @@ const openAiModelCosts = {
|
|
|
8870
8881
|
inputCost: 0.1 / 1_000_000,
|
|
8871
8882
|
outputCost: 0.4 / 1_000_000,
|
|
8872
8883
|
},
|
|
8884
|
+
'gpt-5': {
|
|
8885
|
+
inputCost: 1.25 / 1_000_000,
|
|
8886
|
+
outputCost: 10 / 1_000_000,
|
|
8887
|
+
},
|
|
8888
|
+
'gpt-5-mini': {
|
|
8889
|
+
inputCost: 0.25 / 1_000_000,
|
|
8890
|
+
outputCost: 2 / 1_000_000,
|
|
8891
|
+
},
|
|
8892
|
+
'gpt-5-nano': {
|
|
8893
|
+
inputCost: 0.05 / 1_000_000,
|
|
8894
|
+
outputCost: 0.4 / 1_000_000,
|
|
8895
|
+
},
|
|
8873
8896
|
'o4-mini': {
|
|
8874
8897
|
inputCost: 1.1 / 1_000_000,
|
|
8875
8898
|
outputCost: 4.4 / 1_000_000,
|
|
@@ -8937,7 +8960,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
|
|
|
8937
8960
|
return inputCost + outputCost + reasoningCost;
|
|
8938
8961
|
}
|
|
8939
8962
|
|
|
8940
|
-
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8941
8963
|
/**
|
|
8942
8964
|
* Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
|
|
8943
8965
|
* @param {string} jsonStr - The broken JSON string to fix
|
|
@@ -9182,9 +9204,7 @@ function initializeOpenAI(apiKey) {
|
|
|
9182
9204
|
});
|
|
9183
9205
|
}
|
|
9184
9206
|
/**
|
|
9185
|
-
* Fixes broken JSON by sending it to
|
|
9186
|
-
* The GPT-4.1-mini model is a large language model that can understand and generate code,
|
|
9187
|
-
* including JSON. The returned JSON is the fixed version of the input JSON.
|
|
9207
|
+
* Fixes broken JSON by sending it to OpenAI to fix it.
|
|
9188
9208
|
* If the model fails to return valid JSON, an error is thrown.
|
|
9189
9209
|
* @param jsonStr - the broken JSON to fix
|
|
9190
9210
|
* @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
|
|
@@ -9328,8 +9348,11 @@ const isSupportedModel = (model) => {
|
|
|
9328
9348
|
'o3-mini',
|
|
9329
9349
|
'gpt-4.1',
|
|
9330
9350
|
'gpt-4.1-mini',
|
|
9331
|
-
'o4-mini',
|
|
9332
9351
|
'gpt-4.1-nano',
|
|
9352
|
+
'gpt-5',
|
|
9353
|
+
'gpt-5-mini',
|
|
9354
|
+
'gpt-5-nano',
|
|
9355
|
+
'o4-mini',
|
|
9333
9356
|
'o3',
|
|
9334
9357
|
].includes(model);
|
|
9335
9358
|
};
|
|
@@ -9340,8 +9363,9 @@ const isSupportedModel = (model) => {
|
|
|
9340
9363
|
*/
|
|
9341
9364
|
function supportsTemperature(model) {
|
|
9342
9365
|
// Reasoning models don't support temperature
|
|
9343
|
-
|
|
9344
|
-
|
|
9366
|
+
// GPT-5 models also do not support temperature
|
|
9367
|
+
const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9368
|
+
return !reasoningAndGPT5Models.includes(model);
|
|
9345
9369
|
}
|
|
9346
9370
|
/**
|
|
9347
9371
|
* Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
|
|
@@ -9352,6 +9376,15 @@ function isReasoningModel(model) {
|
|
|
9352
9376
|
const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
|
|
9353
9377
|
return reasoningModels.includes(model);
|
|
9354
9378
|
}
|
|
9379
|
+
/**
|
|
9380
|
+
* Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
|
|
9381
|
+
* @param model The model to check.
|
|
9382
|
+
* @returns True if the model is a GPT-5 model, false otherwise.
|
|
9383
|
+
*/
|
|
9384
|
+
function isGPT5Model(model) {
|
|
9385
|
+
const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9386
|
+
return gpt5Models.includes(model);
|
|
9387
|
+
}
|
|
9355
9388
|
/**
|
|
9356
9389
|
* Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
|
|
9357
9390
|
*
|
|
@@ -9379,7 +9412,7 @@ function isReasoningModel(model) {
|
|
|
9379
9412
|
* @throws Error if the API call fails
|
|
9380
9413
|
*/
|
|
9381
9414
|
const makeResponsesAPICall = async (input, options = {}) => {
|
|
9382
|
-
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL
|
|
9415
|
+
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
|
|
9383
9416
|
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
9384
9417
|
if (!apiKey) {
|
|
9385
9418
|
throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
|
|
@@ -9490,7 +9523,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
|
|
|
9490
9523
|
* });
|
|
9491
9524
|
*/
|
|
9492
9525
|
async function makeLLMCall(input, options = {}) {
|
|
9493
|
-
const { apiKey, model = DEFAULT_MODEL
|
|
9526
|
+
const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
|
|
9494
9527
|
// Validate model
|
|
9495
9528
|
const normalizedModel = normalizeModelName(model);
|
|
9496
9529
|
if (!isSupportedModel(normalizedModel)) {
|
|
@@ -9582,8 +9615,8 @@ async function makeLLMCall(input, options = {}) {
|
|
|
9582
9615
|
}
|
|
9583
9616
|
if (useWebSearch) {
|
|
9584
9617
|
responsesOptions.tools = [{ type: 'web_search_preview' }];
|
|
9585
|
-
// For reasoning models, we can't force tool choice - they only support 'auto'
|
|
9586
|
-
if (!isReasoningModel(normalizedModel)) {
|
|
9618
|
+
// For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
|
|
9619
|
+
if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
|
|
9587
9620
|
responsesOptions.tool_choice = { type: 'web_search_preview' };
|
|
9588
9621
|
}
|
|
9589
9622
|
}
|
|
@@ -16309,7 +16342,7 @@ class AlpacaMarketDataAPI extends require$$0$3.EventEmitter {
|
|
|
16309
16342
|
hasMorePages = !!pageToken;
|
|
16310
16343
|
// Enhanced logging with date range and progress info
|
|
16311
16344
|
const dateRangeStr = earliestTimestamp && latestTimestamp
|
|
16312
|
-
? `${earliestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${latestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
16345
|
+
? `${new Date(earliestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${new Date(latestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
16313
16346
|
: 'unknown range';
|
|
16314
16347
|
log(`Page ${pageCount}: Fetched ${pageBarsCount.toLocaleString()} bars (total: ${totalBarsCount.toLocaleString()}) for ${symbols.length} symbols, date range: ${dateRangeStr}${hasMorePages ? ', more pages available' : ', complete'}`);
|
|
16315
16348
|
// Prevent infinite loops
|
|
@@ -17621,6 +17654,10 @@ class AlpacaTradingAPI {
|
|
|
17621
17654
|
queryParams.append('period', params.period);
|
|
17622
17655
|
if (params.extended_hours !== undefined)
|
|
17623
17656
|
queryParams.append('extended_hours', params.extended_hours.toString());
|
|
17657
|
+
if (params.start)
|
|
17658
|
+
queryParams.append('start', params.start);
|
|
17659
|
+
if (params.end)
|
|
17660
|
+
queryParams.append('end', params.end);
|
|
17624
17661
|
if (params.date_end)
|
|
17625
17662
|
queryParams.append('date_end', params.date_end);
|
|
17626
17663
|
const response = await this.makeRequest(`/account/portfolio/history?${queryParams.toString()}`);
|