@discomedia/utils 1.0.23 → 1.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-frontend.cjs +159 -122
- package/dist/index-frontend.cjs.map +1 -1
- package/dist/index-frontend.mjs +159 -122
- package/dist/index-frontend.mjs.map +1 -1
- package/dist/index.cjs +159 -122
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +159 -122
- package/dist/index.mjs.map +1 -1
- package/dist/package.json +4 -4
- package/dist/test.js +5129 -1037
- package/dist/test.js.map +1 -1
- package/dist/types/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types/alpaca-trading-api.d.ts +4 -9
- package/dist/types/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types/json-tools.d.ts +1 -3
- package/dist/types/json-tools.d.ts.map +1 -1
- package/dist/types/llm-config.d.ts.map +1 -1
- package/dist/types/llm-openai.d.ts +6 -0
- package/dist/types/llm-openai.d.ts.map +1 -1
- package/dist/types/types/alpaca-types.d.ts +2 -8
- package/dist/types/types/alpaca-types.d.ts.map +1 -1
- package/dist/types/types/llm-types.d.ts +1 -1
- package/dist/types/types/llm-types.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-trading-api.d.ts +4 -9
- package/dist/types-frontend/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types-frontend/json-tools.d.ts +1 -3
- package/dist/types-frontend/json-tools.d.ts.map +1 -1
- package/dist/types-frontend/llm-config.d.ts.map +1 -1
- package/dist/types-frontend/llm-openai.d.ts +6 -0
- package/dist/types-frontend/llm-openai.d.ts.map +1 -1
- package/dist/types-frontend/types/alpaca-types.d.ts +2 -8
- package/dist/types-frontend/types/alpaca-types.d.ts.map +1 -1
- package/dist/types-frontend/types/llm-types.d.ts +1 -1
- package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
- package/package.json +4 -4
- package/dist/types/old-test.d.ts +0 -2
- package/dist/types/old-test.d.ts.map +0 -1
- package/dist/types-frontend/old-test.d.ts +0 -2
- package/dist/types-frontend/old-test.d.ts.map +0 -1
package/dist/index.mjs
CHANGED
|
@@ -2368,7 +2368,7 @@ const safeJSON = (text) => {
|
|
|
2368
2368
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2369
2369
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
2370
2370
|
|
|
2371
|
-
const VERSION = '5.
|
|
2371
|
+
const VERSION = '5.12.1'; // x-release-please-version
|
|
2372
2372
|
|
|
2373
2373
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
2374
2374
|
const isRunningInBrowser = () => {
|
|
@@ -3223,11 +3223,7 @@ class Stream {
|
|
|
3223
3223
|
done = true;
|
|
3224
3224
|
continue;
|
|
3225
3225
|
}
|
|
3226
|
-
if (sse.event === null ||
|
|
3227
|
-
sse.event.startsWith('response.') ||
|
|
3228
|
-
sse.event.startsWith('image_edit.') ||
|
|
3229
|
-
sse.event.startsWith('image_generation.') ||
|
|
3230
|
-
sse.event.startsWith('transcript.')) {
|
|
3226
|
+
if (sse.event === null || !sse.event.startsWith('thread.')) {
|
|
3231
3227
|
let data;
|
|
3232
3228
|
try {
|
|
3233
3229
|
data = JSON.parse(sse.data);
|
|
@@ -4005,8 +4001,119 @@ let Messages$1 = class Messages extends APIResource {
|
|
|
4005
4001
|
}
|
|
4006
4002
|
};
|
|
4007
4003
|
|
|
4008
|
-
function
|
|
4009
|
-
return
|
|
4004
|
+
function isChatCompletionFunctionTool(tool) {
|
|
4005
|
+
return tool !== undefined && 'function' in tool && tool.function !== undefined;
|
|
4006
|
+
}
|
|
4007
|
+
function isAutoParsableResponseFormat(response_format) {
|
|
4008
|
+
return response_format?.['$brand'] === 'auto-parseable-response-format';
|
|
4009
|
+
}
|
|
4010
|
+
function isAutoParsableTool$1(tool) {
|
|
4011
|
+
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4012
|
+
}
|
|
4013
|
+
function maybeParseChatCompletion(completion, params) {
|
|
4014
|
+
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4015
|
+
return {
|
|
4016
|
+
...completion,
|
|
4017
|
+
choices: completion.choices.map((choice) => {
|
|
4018
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4019
|
+
return {
|
|
4020
|
+
...choice,
|
|
4021
|
+
message: {
|
|
4022
|
+
...choice.message,
|
|
4023
|
+
parsed: null,
|
|
4024
|
+
...(choice.message.tool_calls ?
|
|
4025
|
+
{
|
|
4026
|
+
tool_calls: choice.message.tool_calls,
|
|
4027
|
+
}
|
|
4028
|
+
: undefined),
|
|
4029
|
+
},
|
|
4030
|
+
};
|
|
4031
|
+
}),
|
|
4032
|
+
};
|
|
4033
|
+
}
|
|
4034
|
+
return parseChatCompletion(completion, params);
|
|
4035
|
+
}
|
|
4036
|
+
function parseChatCompletion(completion, params) {
|
|
4037
|
+
const choices = completion.choices.map((choice) => {
|
|
4038
|
+
if (choice.finish_reason === 'length') {
|
|
4039
|
+
throw new LengthFinishReasonError();
|
|
4040
|
+
}
|
|
4041
|
+
if (choice.finish_reason === 'content_filter') {
|
|
4042
|
+
throw new ContentFilterFinishReasonError();
|
|
4043
|
+
}
|
|
4044
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
4045
|
+
return {
|
|
4046
|
+
...choice,
|
|
4047
|
+
message: {
|
|
4048
|
+
...choice.message,
|
|
4049
|
+
...(choice.message.tool_calls ?
|
|
4050
|
+
{
|
|
4051
|
+
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4052
|
+
}
|
|
4053
|
+
: undefined),
|
|
4054
|
+
parsed: choice.message.content && !choice.message.refusal ?
|
|
4055
|
+
parseResponseFormat(params, choice.message.content)
|
|
4056
|
+
: null,
|
|
4057
|
+
},
|
|
4058
|
+
};
|
|
4059
|
+
});
|
|
4060
|
+
return { ...completion, choices };
|
|
4061
|
+
}
|
|
4062
|
+
function parseResponseFormat(params, content) {
|
|
4063
|
+
if (params.response_format?.type !== 'json_schema') {
|
|
4064
|
+
return null;
|
|
4065
|
+
}
|
|
4066
|
+
if (params.response_format?.type === 'json_schema') {
|
|
4067
|
+
if ('$parseRaw' in params.response_format) {
|
|
4068
|
+
const response_format = params.response_format;
|
|
4069
|
+
return response_format.$parseRaw(content);
|
|
4070
|
+
}
|
|
4071
|
+
return JSON.parse(content);
|
|
4072
|
+
}
|
|
4073
|
+
return null;
|
|
4074
|
+
}
|
|
4075
|
+
function parseToolCall$1(params, toolCall) {
|
|
4076
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
4077
|
+
return {
|
|
4078
|
+
...toolCall,
|
|
4079
|
+
function: {
|
|
4080
|
+
...toolCall.function,
|
|
4081
|
+
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4082
|
+
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4083
|
+
: null,
|
|
4084
|
+
},
|
|
4085
|
+
};
|
|
4086
|
+
}
|
|
4087
|
+
function shouldParseToolCall(params, toolCall) {
|
|
4088
|
+
if (!params || !('tools' in params) || !params.tools) {
|
|
4089
|
+
return false;
|
|
4090
|
+
}
|
|
4091
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
|
|
4092
|
+
return (isChatCompletionFunctionTool(inputTool) &&
|
|
4093
|
+
(isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
|
|
4094
|
+
}
|
|
4095
|
+
function hasAutoParseableInput$1(params) {
|
|
4096
|
+
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4097
|
+
return true;
|
|
4098
|
+
}
|
|
4099
|
+
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4100
|
+
}
|
|
4101
|
+
function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
|
|
4102
|
+
for (const toolCall of toolCalls || []) {
|
|
4103
|
+
if (toolCall.type !== 'function') {
|
|
4104
|
+
throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
|
|
4105
|
+
}
|
|
4106
|
+
}
|
|
4107
|
+
}
|
|
4108
|
+
function validateInputTools(tools) {
|
|
4109
|
+
for (const tool of tools ?? []) {
|
|
4110
|
+
if (tool.type !== 'function') {
|
|
4111
|
+
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4112
|
+
}
|
|
4113
|
+
if (tool.function.strict !== true) {
|
|
4114
|
+
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4115
|
+
}
|
|
4116
|
+
}
|
|
4010
4117
|
}
|
|
4011
4118
|
|
|
4012
4119
|
const isAssistantMessage = (message) => {
|
|
@@ -4200,104 +4307,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
|
|
|
4200
4307
|
return this._emit('error', new OpenAIError(String(error)));
|
|
4201
4308
|
};
|
|
4202
4309
|
|
|
4203
|
-
function
|
|
4204
|
-
return
|
|
4205
|
-
}
|
|
4206
|
-
function isAutoParsableTool$1(tool) {
|
|
4207
|
-
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
4208
|
-
}
|
|
4209
|
-
function maybeParseChatCompletion(completion, params) {
|
|
4210
|
-
if (!params || !hasAutoParseableInput$1(params)) {
|
|
4211
|
-
return {
|
|
4212
|
-
...completion,
|
|
4213
|
-
choices: completion.choices.map((choice) => ({
|
|
4214
|
-
...choice,
|
|
4215
|
-
message: {
|
|
4216
|
-
...choice.message,
|
|
4217
|
-
parsed: null,
|
|
4218
|
-
...(choice.message.tool_calls ?
|
|
4219
|
-
{
|
|
4220
|
-
tool_calls: choice.message.tool_calls,
|
|
4221
|
-
}
|
|
4222
|
-
: undefined),
|
|
4223
|
-
},
|
|
4224
|
-
})),
|
|
4225
|
-
};
|
|
4226
|
-
}
|
|
4227
|
-
return parseChatCompletion(completion, params);
|
|
4228
|
-
}
|
|
4229
|
-
function parseChatCompletion(completion, params) {
|
|
4230
|
-
const choices = completion.choices.map((choice) => {
|
|
4231
|
-
if (choice.finish_reason === 'length') {
|
|
4232
|
-
throw new LengthFinishReasonError();
|
|
4233
|
-
}
|
|
4234
|
-
if (choice.finish_reason === 'content_filter') {
|
|
4235
|
-
throw new ContentFilterFinishReasonError();
|
|
4236
|
-
}
|
|
4237
|
-
return {
|
|
4238
|
-
...choice,
|
|
4239
|
-
message: {
|
|
4240
|
-
...choice.message,
|
|
4241
|
-
...(choice.message.tool_calls ?
|
|
4242
|
-
{
|
|
4243
|
-
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
4244
|
-
}
|
|
4245
|
-
: undefined),
|
|
4246
|
-
parsed: choice.message.content && !choice.message.refusal ?
|
|
4247
|
-
parseResponseFormat(params, choice.message.content)
|
|
4248
|
-
: null,
|
|
4249
|
-
},
|
|
4250
|
-
};
|
|
4251
|
-
});
|
|
4252
|
-
return { ...completion, choices };
|
|
4253
|
-
}
|
|
4254
|
-
function parseResponseFormat(params, content) {
|
|
4255
|
-
if (params.response_format?.type !== 'json_schema') {
|
|
4256
|
-
return null;
|
|
4257
|
-
}
|
|
4258
|
-
if (params.response_format?.type === 'json_schema') {
|
|
4259
|
-
if ('$parseRaw' in params.response_format) {
|
|
4260
|
-
const response_format = params.response_format;
|
|
4261
|
-
return response_format.$parseRaw(content);
|
|
4262
|
-
}
|
|
4263
|
-
return JSON.parse(content);
|
|
4264
|
-
}
|
|
4265
|
-
return null;
|
|
4266
|
-
}
|
|
4267
|
-
function parseToolCall$1(params, toolCall) {
|
|
4268
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4269
|
-
return {
|
|
4270
|
-
...toolCall,
|
|
4271
|
-
function: {
|
|
4272
|
-
...toolCall.function,
|
|
4273
|
-
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
4274
|
-
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
4275
|
-
: null,
|
|
4276
|
-
},
|
|
4277
|
-
};
|
|
4278
|
-
}
|
|
4279
|
-
function shouldParseToolCall(params, toolCall) {
|
|
4280
|
-
if (!params) {
|
|
4281
|
-
return false;
|
|
4282
|
-
}
|
|
4283
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
4284
|
-
return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
|
|
4285
|
-
}
|
|
4286
|
-
function hasAutoParseableInput$1(params) {
|
|
4287
|
-
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
4288
|
-
return true;
|
|
4289
|
-
}
|
|
4290
|
-
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
4291
|
-
}
|
|
4292
|
-
function validateInputTools(tools) {
|
|
4293
|
-
for (const tool of tools ?? []) {
|
|
4294
|
-
if (tool.type !== 'function') {
|
|
4295
|
-
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
4296
|
-
}
|
|
4297
|
-
if (tool.function.strict !== true) {
|
|
4298
|
-
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
4299
|
-
}
|
|
4300
|
-
}
|
|
4310
|
+
function isRunnableFunctionWithParse(fn) {
|
|
4311
|
+
return typeof fn.parse === 'function';
|
|
4301
4312
|
}
|
|
4302
4313
|
|
|
4303
4314
|
var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
|
|
@@ -4423,7 +4434,7 @@ class AbstractChatCompletionRunner extends EventStream {
|
|
|
4423
4434
|
async _runTools(client, params, options) {
|
|
4424
4435
|
const role = 'tool';
|
|
4425
4436
|
const { tool_choice = 'auto', stream, ...restParams } = params;
|
|
4426
|
-
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
|
|
4437
|
+
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
|
|
4427
4438
|
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
|
|
4428
4439
|
// TODO(someday): clean this logic up
|
|
4429
4440
|
const inputTools = params.tools.map((tool) => {
|
|
@@ -4541,7 +4552,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
|
|
|
4541
4552
|
for (let i = this.messages.length - 1; i >= 0; i--) {
|
|
4542
4553
|
const message = this.messages[i];
|
|
4543
4554
|
if (isAssistantMessage(message) && message?.tool_calls?.length) {
|
|
4544
|
-
return message.tool_calls.at(-1)?.function;
|
|
4555
|
+
return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
|
|
4545
4556
|
}
|
|
4546
4557
|
}
|
|
4547
4558
|
return;
|
|
@@ -5019,7 +5030,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
|
|
|
5019
5030
|
throw new Error('tool call snapshot missing `type`');
|
|
5020
5031
|
}
|
|
5021
5032
|
if (toolCallSnapshot.type === 'function') {
|
|
5022
|
-
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool
|
|
5033
|
+
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
5023
5034
|
this._emit('tool_calls.function.arguments.done', {
|
|
5024
5035
|
name: toolCallSnapshot.function.name,
|
|
5025
5036
|
index: toolCallIndex,
|
|
@@ -8829,7 +8840,7 @@ OpenAI.Evals = Evals;
|
|
|
8829
8840
|
OpenAI.Containers = Containers;
|
|
8830
8841
|
|
|
8831
8842
|
// llm-openai-config.ts
|
|
8832
|
-
const DEFAULT_MODEL
|
|
8843
|
+
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8833
8844
|
/** Token costs in USD per 1M tokens. Last updated Feb 2025. */
|
|
8834
8845
|
const openAiModelCosts = {
|
|
8835
8846
|
'gpt-4o': {
|
|
@@ -8868,6 +8879,18 @@ const openAiModelCosts = {
|
|
|
8868
8879
|
inputCost: 0.1 / 1_000_000,
|
|
8869
8880
|
outputCost: 0.4 / 1_000_000,
|
|
8870
8881
|
},
|
|
8882
|
+
'gpt-5': {
|
|
8883
|
+
inputCost: 1.25 / 1_000_000,
|
|
8884
|
+
outputCost: 10 / 1_000_000,
|
|
8885
|
+
},
|
|
8886
|
+
'gpt-5-mini': {
|
|
8887
|
+
inputCost: 0.25 / 1_000_000,
|
|
8888
|
+
outputCost: 2 / 1_000_000,
|
|
8889
|
+
},
|
|
8890
|
+
'gpt-5-nano': {
|
|
8891
|
+
inputCost: 0.05 / 1_000_000,
|
|
8892
|
+
outputCost: 0.4 / 1_000_000,
|
|
8893
|
+
},
|
|
8871
8894
|
'o4-mini': {
|
|
8872
8895
|
inputCost: 1.1 / 1_000_000,
|
|
8873
8896
|
outputCost: 4.4 / 1_000_000,
|
|
@@ -8935,7 +8958,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
|
|
|
8935
8958
|
return inputCost + outputCost + reasoningCost;
|
|
8936
8959
|
}
|
|
8937
8960
|
|
|
8938
|
-
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
8939
8961
|
/**
|
|
8940
8962
|
* Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
|
|
8941
8963
|
* @param {string} jsonStr - The broken JSON string to fix
|
|
@@ -9180,9 +9202,7 @@ function initializeOpenAI(apiKey) {
|
|
|
9180
9202
|
});
|
|
9181
9203
|
}
|
|
9182
9204
|
/**
|
|
9183
|
-
* Fixes broken JSON by sending it to
|
|
9184
|
-
* The GPT-4.1-mini model is a large language model that can understand and generate code,
|
|
9185
|
-
* including JSON. The returned JSON is the fixed version of the input JSON.
|
|
9205
|
+
* Fixes broken JSON by sending it to OpenAI to fix it.
|
|
9186
9206
|
* If the model fails to return valid JSON, an error is thrown.
|
|
9187
9207
|
* @param jsonStr - the broken JSON to fix
|
|
9188
9208
|
* @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
|
|
@@ -9326,8 +9346,11 @@ const isSupportedModel = (model) => {
|
|
|
9326
9346
|
'o3-mini',
|
|
9327
9347
|
'gpt-4.1',
|
|
9328
9348
|
'gpt-4.1-mini',
|
|
9329
|
-
'o4-mini',
|
|
9330
9349
|
'gpt-4.1-nano',
|
|
9350
|
+
'gpt-5',
|
|
9351
|
+
'gpt-5-mini',
|
|
9352
|
+
'gpt-5-nano',
|
|
9353
|
+
'o4-mini',
|
|
9331
9354
|
'o3',
|
|
9332
9355
|
].includes(model);
|
|
9333
9356
|
};
|
|
@@ -9338,8 +9361,9 @@ const isSupportedModel = (model) => {
|
|
|
9338
9361
|
*/
|
|
9339
9362
|
function supportsTemperature(model) {
|
|
9340
9363
|
// Reasoning models don't support temperature
|
|
9341
|
-
|
|
9342
|
-
|
|
9364
|
+
// GPT-5 models also do not support temperature
|
|
9365
|
+
const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9366
|
+
return !reasoningAndGPT5Models.includes(model);
|
|
9343
9367
|
}
|
|
9344
9368
|
/**
|
|
9345
9369
|
* Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
|
|
@@ -9350,6 +9374,15 @@ function isReasoningModel(model) {
|
|
|
9350
9374
|
const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
|
|
9351
9375
|
return reasoningModels.includes(model);
|
|
9352
9376
|
}
|
|
9377
|
+
/**
|
|
9378
|
+
* Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
|
|
9379
|
+
* @param model The model to check.
|
|
9380
|
+
* @returns True if the model is a GPT-5 model, false otherwise.
|
|
9381
|
+
*/
|
|
9382
|
+
function isGPT5Model(model) {
|
|
9383
|
+
const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
9384
|
+
return gpt5Models.includes(model);
|
|
9385
|
+
}
|
|
9353
9386
|
/**
|
|
9354
9387
|
* Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
|
|
9355
9388
|
*
|
|
@@ -9377,7 +9410,7 @@ function isReasoningModel(model) {
|
|
|
9377
9410
|
* @throws Error if the API call fails
|
|
9378
9411
|
*/
|
|
9379
9412
|
const makeResponsesAPICall = async (input, options = {}) => {
|
|
9380
|
-
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL
|
|
9413
|
+
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
|
|
9381
9414
|
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
9382
9415
|
if (!apiKey) {
|
|
9383
9416
|
throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
|
|
@@ -9488,7 +9521,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
|
|
|
9488
9521
|
* });
|
|
9489
9522
|
*/
|
|
9490
9523
|
async function makeLLMCall(input, options = {}) {
|
|
9491
|
-
const { apiKey, model = DEFAULT_MODEL
|
|
9524
|
+
const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
|
|
9492
9525
|
// Validate model
|
|
9493
9526
|
const normalizedModel = normalizeModelName(model);
|
|
9494
9527
|
if (!isSupportedModel(normalizedModel)) {
|
|
@@ -9580,8 +9613,8 @@ async function makeLLMCall(input, options = {}) {
|
|
|
9580
9613
|
}
|
|
9581
9614
|
if (useWebSearch) {
|
|
9582
9615
|
responsesOptions.tools = [{ type: 'web_search_preview' }];
|
|
9583
|
-
// For reasoning models, we can't force tool choice - they only support 'auto'
|
|
9584
|
-
if (!isReasoningModel(normalizedModel)) {
|
|
9616
|
+
// For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
|
|
9617
|
+
if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
|
|
9585
9618
|
responsesOptions.tool_choice = { type: 'web_search_preview' };
|
|
9586
9619
|
}
|
|
9587
9620
|
}
|
|
@@ -16307,7 +16340,7 @@ class AlpacaMarketDataAPI extends EventEmitter {
|
|
|
16307
16340
|
hasMorePages = !!pageToken;
|
|
16308
16341
|
// Enhanced logging with date range and progress info
|
|
16309
16342
|
const dateRangeStr = earliestTimestamp && latestTimestamp
|
|
16310
|
-
? `${earliestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${latestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
16343
|
+
? `${new Date(earliestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${new Date(latestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
16311
16344
|
: 'unknown range';
|
|
16312
16345
|
log(`Page ${pageCount}: Fetched ${pageBarsCount.toLocaleString()} bars (total: ${totalBarsCount.toLocaleString()}) for ${symbols.length} symbols, date range: ${dateRangeStr}${hasMorePages ? ', more pages available' : ', complete'}`);
|
|
16313
16346
|
// Prevent infinite loops
|
|
@@ -17619,6 +17652,10 @@ class AlpacaTradingAPI {
|
|
|
17619
17652
|
queryParams.append('period', params.period);
|
|
17620
17653
|
if (params.extended_hours !== undefined)
|
|
17621
17654
|
queryParams.append('extended_hours', params.extended_hours.toString());
|
|
17655
|
+
if (params.start)
|
|
17656
|
+
queryParams.append('start', params.start);
|
|
17657
|
+
if (params.end)
|
|
17658
|
+
queryParams.append('end', params.end);
|
|
17622
17659
|
if (params.date_end)
|
|
17623
17660
|
queryParams.append('date_end', params.date_end);
|
|
17624
17661
|
const response = await this.makeRequest(`/account/portfolio/history?${queryParams.toString()}`);
|