@discomedia/utils 1.0.23 → 1.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-frontend.cjs +159 -122
- package/dist/index-frontend.cjs.map +1 -1
- package/dist/index-frontend.mjs +159 -122
- package/dist/index-frontend.mjs.map +1 -1
- package/dist/index.cjs +159 -122
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +159 -122
- package/dist/index.mjs.map +1 -1
- package/dist/package.json +4 -4
- package/dist/test.js +5129 -1037
- package/dist/test.js.map +1 -1
- package/dist/types/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types/alpaca-trading-api.d.ts +4 -9
- package/dist/types/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types/json-tools.d.ts +1 -3
- package/dist/types/json-tools.d.ts.map +1 -1
- package/dist/types/llm-config.d.ts.map +1 -1
- package/dist/types/llm-openai.d.ts +6 -0
- package/dist/types/llm-openai.d.ts.map +1 -1
- package/dist/types/types/alpaca-types.d.ts +2 -8
- package/dist/types/types/alpaca-types.d.ts.map +1 -1
- package/dist/types/types/llm-types.d.ts +1 -1
- package/dist/types/types/llm-types.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-trading-api.d.ts +4 -9
- package/dist/types-frontend/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types-frontend/json-tools.d.ts +1 -3
- package/dist/types-frontend/json-tools.d.ts.map +1 -1
- package/dist/types-frontend/llm-config.d.ts.map +1 -1
- package/dist/types-frontend/llm-openai.d.ts +6 -0
- package/dist/types-frontend/llm-openai.d.ts.map +1 -1
- package/dist/types-frontend/types/alpaca-types.d.ts +2 -8
- package/dist/types-frontend/types/alpaca-types.d.ts.map +1 -1
- package/dist/types-frontend/types/llm-types.d.ts +1 -1
- package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
- package/package.json +4 -4
- package/dist/types/old-test.d.ts +0 -2
- package/dist/types/old-test.d.ts.map +0 -1
- package/dist/types-frontend/old-test.d.ts +0 -2
- package/dist/types-frontend/old-test.d.ts.map +0 -1
package/dist/index-frontend.cjs
CHANGED
|
@@ -249,7 +249,7 @@ const safeJSON = (text) => {
|
|
|
249
249
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
250
250
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
251
251
|
|
|
252
|
-
const VERSION = '5.
|
|
252
|
+
const VERSION = '5.12.1'; // x-release-please-version
|
|
253
253
|
|
|
254
254
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
255
255
|
const isRunningInBrowser = () => {
|
|
@@ -1104,11 +1104,7 @@ class Stream {
|
|
|
1104
1104
|
done = true;
|
|
1105
1105
|
continue;
|
|
1106
1106
|
}
|
|
1107
|
-
if (sse.event === null ||
|
|
1108
|
-
sse.event.startsWith('response.') ||
|
|
1109
|
-
sse.event.startsWith('image_edit.') ||
|
|
1110
|
-
sse.event.startsWith('image_generation.') ||
|
|
1111
|
-
sse.event.startsWith('transcript.')) {
|
|
1107
|
+
if (sse.event === null || !sse.event.startsWith('thread.')) {
|
|
1112
1108
|
let data;
|
|
1113
1109
|
try {
|
|
1114
1110
|
data = JSON.parse(sse.data);
|
|
@@ -1886,8 +1882,119 @@ let Messages$1 = class Messages extends APIResource {
|
|
|
1886
1882
|
}
|
|
1887
1883
|
};
|
|
1888
1884
|
|
|
1889
|
-
function
|
|
1890
|
-
return
|
|
1885
|
+
function isChatCompletionFunctionTool(tool) {
|
|
1886
|
+
return tool !== undefined && 'function' in tool && tool.function !== undefined;
|
|
1887
|
+
}
|
|
1888
|
+
function isAutoParsableResponseFormat(response_format) {
|
|
1889
|
+
return response_format?.['$brand'] === 'auto-parseable-response-format';
|
|
1890
|
+
}
|
|
1891
|
+
function isAutoParsableTool$1(tool) {
|
|
1892
|
+
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
1893
|
+
}
|
|
1894
|
+
function maybeParseChatCompletion(completion, params) {
|
|
1895
|
+
if (!params || !hasAutoParseableInput$1(params)) {
|
|
1896
|
+
return {
|
|
1897
|
+
...completion,
|
|
1898
|
+
choices: completion.choices.map((choice) => {
|
|
1899
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
1900
|
+
return {
|
|
1901
|
+
...choice,
|
|
1902
|
+
message: {
|
|
1903
|
+
...choice.message,
|
|
1904
|
+
parsed: null,
|
|
1905
|
+
...(choice.message.tool_calls ?
|
|
1906
|
+
{
|
|
1907
|
+
tool_calls: choice.message.tool_calls,
|
|
1908
|
+
}
|
|
1909
|
+
: undefined),
|
|
1910
|
+
},
|
|
1911
|
+
};
|
|
1912
|
+
}),
|
|
1913
|
+
};
|
|
1914
|
+
}
|
|
1915
|
+
return parseChatCompletion(completion, params);
|
|
1916
|
+
}
|
|
1917
|
+
function parseChatCompletion(completion, params) {
|
|
1918
|
+
const choices = completion.choices.map((choice) => {
|
|
1919
|
+
if (choice.finish_reason === 'length') {
|
|
1920
|
+
throw new LengthFinishReasonError();
|
|
1921
|
+
}
|
|
1922
|
+
if (choice.finish_reason === 'content_filter') {
|
|
1923
|
+
throw new ContentFilterFinishReasonError();
|
|
1924
|
+
}
|
|
1925
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
1926
|
+
return {
|
|
1927
|
+
...choice,
|
|
1928
|
+
message: {
|
|
1929
|
+
...choice.message,
|
|
1930
|
+
...(choice.message.tool_calls ?
|
|
1931
|
+
{
|
|
1932
|
+
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
1933
|
+
}
|
|
1934
|
+
: undefined),
|
|
1935
|
+
parsed: choice.message.content && !choice.message.refusal ?
|
|
1936
|
+
parseResponseFormat(params, choice.message.content)
|
|
1937
|
+
: null,
|
|
1938
|
+
},
|
|
1939
|
+
};
|
|
1940
|
+
});
|
|
1941
|
+
return { ...completion, choices };
|
|
1942
|
+
}
|
|
1943
|
+
function parseResponseFormat(params, content) {
|
|
1944
|
+
if (params.response_format?.type !== 'json_schema') {
|
|
1945
|
+
return null;
|
|
1946
|
+
}
|
|
1947
|
+
if (params.response_format?.type === 'json_schema') {
|
|
1948
|
+
if ('$parseRaw' in params.response_format) {
|
|
1949
|
+
const response_format = params.response_format;
|
|
1950
|
+
return response_format.$parseRaw(content);
|
|
1951
|
+
}
|
|
1952
|
+
return JSON.parse(content);
|
|
1953
|
+
}
|
|
1954
|
+
return null;
|
|
1955
|
+
}
|
|
1956
|
+
function parseToolCall$1(params, toolCall) {
|
|
1957
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
1958
|
+
return {
|
|
1959
|
+
...toolCall,
|
|
1960
|
+
function: {
|
|
1961
|
+
...toolCall.function,
|
|
1962
|
+
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
1963
|
+
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
1964
|
+
: null,
|
|
1965
|
+
},
|
|
1966
|
+
};
|
|
1967
|
+
}
|
|
1968
|
+
function shouldParseToolCall(params, toolCall) {
|
|
1969
|
+
if (!params || !('tools' in params) || !params.tools) {
|
|
1970
|
+
return false;
|
|
1971
|
+
}
|
|
1972
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
|
|
1973
|
+
return (isChatCompletionFunctionTool(inputTool) &&
|
|
1974
|
+
(isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
|
|
1975
|
+
}
|
|
1976
|
+
function hasAutoParseableInput$1(params) {
|
|
1977
|
+
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
1978
|
+
return true;
|
|
1979
|
+
}
|
|
1980
|
+
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
1981
|
+
}
|
|
1982
|
+
function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
|
|
1983
|
+
for (const toolCall of toolCalls || []) {
|
|
1984
|
+
if (toolCall.type !== 'function') {
|
|
1985
|
+
throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
|
|
1986
|
+
}
|
|
1987
|
+
}
|
|
1988
|
+
}
|
|
1989
|
+
function validateInputTools(tools) {
|
|
1990
|
+
for (const tool of tools ?? []) {
|
|
1991
|
+
if (tool.type !== 'function') {
|
|
1992
|
+
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
1993
|
+
}
|
|
1994
|
+
if (tool.function.strict !== true) {
|
|
1995
|
+
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
1996
|
+
}
|
|
1997
|
+
}
|
|
1891
1998
|
}
|
|
1892
1999
|
|
|
1893
2000
|
const isAssistantMessage = (message) => {
|
|
@@ -2081,104 +2188,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
|
|
|
2081
2188
|
return this._emit('error', new OpenAIError(String(error)));
|
|
2082
2189
|
};
|
|
2083
2190
|
|
|
2084
|
-
function
|
|
2085
|
-
return
|
|
2086
|
-
}
|
|
2087
|
-
function isAutoParsableTool$1(tool) {
|
|
2088
|
-
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
2089
|
-
}
|
|
2090
|
-
function maybeParseChatCompletion(completion, params) {
|
|
2091
|
-
if (!params || !hasAutoParseableInput$1(params)) {
|
|
2092
|
-
return {
|
|
2093
|
-
...completion,
|
|
2094
|
-
choices: completion.choices.map((choice) => ({
|
|
2095
|
-
...choice,
|
|
2096
|
-
message: {
|
|
2097
|
-
...choice.message,
|
|
2098
|
-
parsed: null,
|
|
2099
|
-
...(choice.message.tool_calls ?
|
|
2100
|
-
{
|
|
2101
|
-
tool_calls: choice.message.tool_calls,
|
|
2102
|
-
}
|
|
2103
|
-
: undefined),
|
|
2104
|
-
},
|
|
2105
|
-
})),
|
|
2106
|
-
};
|
|
2107
|
-
}
|
|
2108
|
-
return parseChatCompletion(completion, params);
|
|
2109
|
-
}
|
|
2110
|
-
function parseChatCompletion(completion, params) {
|
|
2111
|
-
const choices = completion.choices.map((choice) => {
|
|
2112
|
-
if (choice.finish_reason === 'length') {
|
|
2113
|
-
throw new LengthFinishReasonError();
|
|
2114
|
-
}
|
|
2115
|
-
if (choice.finish_reason === 'content_filter') {
|
|
2116
|
-
throw new ContentFilterFinishReasonError();
|
|
2117
|
-
}
|
|
2118
|
-
return {
|
|
2119
|
-
...choice,
|
|
2120
|
-
message: {
|
|
2121
|
-
...choice.message,
|
|
2122
|
-
...(choice.message.tool_calls ?
|
|
2123
|
-
{
|
|
2124
|
-
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
2125
|
-
}
|
|
2126
|
-
: undefined),
|
|
2127
|
-
parsed: choice.message.content && !choice.message.refusal ?
|
|
2128
|
-
parseResponseFormat(params, choice.message.content)
|
|
2129
|
-
: null,
|
|
2130
|
-
},
|
|
2131
|
-
};
|
|
2132
|
-
});
|
|
2133
|
-
return { ...completion, choices };
|
|
2134
|
-
}
|
|
2135
|
-
function parseResponseFormat(params, content) {
|
|
2136
|
-
if (params.response_format?.type !== 'json_schema') {
|
|
2137
|
-
return null;
|
|
2138
|
-
}
|
|
2139
|
-
if (params.response_format?.type === 'json_schema') {
|
|
2140
|
-
if ('$parseRaw' in params.response_format) {
|
|
2141
|
-
const response_format = params.response_format;
|
|
2142
|
-
return response_format.$parseRaw(content);
|
|
2143
|
-
}
|
|
2144
|
-
return JSON.parse(content);
|
|
2145
|
-
}
|
|
2146
|
-
return null;
|
|
2147
|
-
}
|
|
2148
|
-
function parseToolCall$1(params, toolCall) {
|
|
2149
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
2150
|
-
return {
|
|
2151
|
-
...toolCall,
|
|
2152
|
-
function: {
|
|
2153
|
-
...toolCall.function,
|
|
2154
|
-
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
2155
|
-
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
2156
|
-
: null,
|
|
2157
|
-
},
|
|
2158
|
-
};
|
|
2159
|
-
}
|
|
2160
|
-
function shouldParseToolCall(params, toolCall) {
|
|
2161
|
-
if (!params) {
|
|
2162
|
-
return false;
|
|
2163
|
-
}
|
|
2164
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
2165
|
-
return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
|
|
2166
|
-
}
|
|
2167
|
-
function hasAutoParseableInput$1(params) {
|
|
2168
|
-
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
2169
|
-
return true;
|
|
2170
|
-
}
|
|
2171
|
-
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
2172
|
-
}
|
|
2173
|
-
function validateInputTools(tools) {
|
|
2174
|
-
for (const tool of tools ?? []) {
|
|
2175
|
-
if (tool.type !== 'function') {
|
|
2176
|
-
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
2177
|
-
}
|
|
2178
|
-
if (tool.function.strict !== true) {
|
|
2179
|
-
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
2180
|
-
}
|
|
2181
|
-
}
|
|
2191
|
+
function isRunnableFunctionWithParse(fn) {
|
|
2192
|
+
return typeof fn.parse === 'function';
|
|
2182
2193
|
}
|
|
2183
2194
|
|
|
2184
2195
|
var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
|
|
@@ -2304,7 +2315,7 @@ class AbstractChatCompletionRunner extends EventStream {
|
|
|
2304
2315
|
async _runTools(client, params, options) {
|
|
2305
2316
|
const role = 'tool';
|
|
2306
2317
|
const { tool_choice = 'auto', stream, ...restParams } = params;
|
|
2307
|
-
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
|
|
2318
|
+
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
|
|
2308
2319
|
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
|
|
2309
2320
|
// TODO(someday): clean this logic up
|
|
2310
2321
|
const inputTools = params.tools.map((tool) => {
|
|
@@ -2422,7 +2433,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
|
|
|
2422
2433
|
for (let i = this.messages.length - 1; i >= 0; i--) {
|
|
2423
2434
|
const message = this.messages[i];
|
|
2424
2435
|
if (isAssistantMessage(message) && message?.tool_calls?.length) {
|
|
2425
|
-
return message.tool_calls.at(-1)?.function;
|
|
2436
|
+
return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
|
|
2426
2437
|
}
|
|
2427
2438
|
}
|
|
2428
2439
|
return;
|
|
@@ -2900,7 +2911,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
|
|
|
2900
2911
|
throw new Error('tool call snapshot missing `type`');
|
|
2901
2912
|
}
|
|
2902
2913
|
if (toolCallSnapshot.type === 'function') {
|
|
2903
|
-
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool
|
|
2914
|
+
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
2904
2915
|
this._emit('tool_calls.function.arguments.done', {
|
|
2905
2916
|
name: toolCallSnapshot.function.name,
|
|
2906
2917
|
index: toolCallIndex,
|
|
@@ -6710,7 +6721,7 @@ OpenAI.Evals = Evals;
|
|
|
6710
6721
|
OpenAI.Containers = Containers;
|
|
6711
6722
|
|
|
6712
6723
|
// llm-openai-config.ts
|
|
6713
|
-
const DEFAULT_MODEL
|
|
6724
|
+
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
6714
6725
|
/** Token costs in USD per 1M tokens. Last updated Feb 2025. */
|
|
6715
6726
|
const openAiModelCosts = {
|
|
6716
6727
|
'gpt-4o': {
|
|
@@ -6749,6 +6760,18 @@ const openAiModelCosts = {
|
|
|
6749
6760
|
inputCost: 0.1 / 1_000_000,
|
|
6750
6761
|
outputCost: 0.4 / 1_000_000,
|
|
6751
6762
|
},
|
|
6763
|
+
'gpt-5': {
|
|
6764
|
+
inputCost: 1.25 / 1_000_000,
|
|
6765
|
+
outputCost: 10 / 1_000_000,
|
|
6766
|
+
},
|
|
6767
|
+
'gpt-5-mini': {
|
|
6768
|
+
inputCost: 0.25 / 1_000_000,
|
|
6769
|
+
outputCost: 2 / 1_000_000,
|
|
6770
|
+
},
|
|
6771
|
+
'gpt-5-nano': {
|
|
6772
|
+
inputCost: 0.05 / 1_000_000,
|
|
6773
|
+
outputCost: 0.4 / 1_000_000,
|
|
6774
|
+
},
|
|
6752
6775
|
'o4-mini': {
|
|
6753
6776
|
inputCost: 1.1 / 1_000_000,
|
|
6754
6777
|
outputCost: 4.4 / 1_000_000,
|
|
@@ -6816,7 +6839,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
|
|
|
6816
6839
|
return inputCost + outputCost + reasoningCost;
|
|
6817
6840
|
}
|
|
6818
6841
|
|
|
6819
|
-
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
6820
6842
|
/**
|
|
6821
6843
|
* Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
|
|
6822
6844
|
* @param {string} jsonStr - The broken JSON string to fix
|
|
@@ -7061,9 +7083,7 @@ function initializeOpenAI(apiKey) {
|
|
|
7061
7083
|
});
|
|
7062
7084
|
}
|
|
7063
7085
|
/**
|
|
7064
|
-
* Fixes broken JSON by sending it to
|
|
7065
|
-
* The GPT-4.1-mini model is a large language model that can understand and generate code,
|
|
7066
|
-
* including JSON. The returned JSON is the fixed version of the input JSON.
|
|
7086
|
+
* Fixes broken JSON by sending it to OpenAI to fix it.
|
|
7067
7087
|
* If the model fails to return valid JSON, an error is thrown.
|
|
7068
7088
|
* @param jsonStr - the broken JSON to fix
|
|
7069
7089
|
* @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
|
|
@@ -7207,8 +7227,11 @@ const isSupportedModel = (model) => {
|
|
|
7207
7227
|
'o3-mini',
|
|
7208
7228
|
'gpt-4.1',
|
|
7209
7229
|
'gpt-4.1-mini',
|
|
7210
|
-
'o4-mini',
|
|
7211
7230
|
'gpt-4.1-nano',
|
|
7231
|
+
'gpt-5',
|
|
7232
|
+
'gpt-5-mini',
|
|
7233
|
+
'gpt-5-nano',
|
|
7234
|
+
'o4-mini',
|
|
7212
7235
|
'o3',
|
|
7213
7236
|
].includes(model);
|
|
7214
7237
|
};
|
|
@@ -7219,8 +7242,9 @@ const isSupportedModel = (model) => {
|
|
|
7219
7242
|
*/
|
|
7220
7243
|
function supportsTemperature(model) {
|
|
7221
7244
|
// Reasoning models don't support temperature
|
|
7222
|
-
|
|
7223
|
-
|
|
7245
|
+
// GPT-5 models also do not support temperature
|
|
7246
|
+
const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
7247
|
+
return !reasoningAndGPT5Models.includes(model);
|
|
7224
7248
|
}
|
|
7225
7249
|
/**
|
|
7226
7250
|
* Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
|
|
@@ -7231,6 +7255,15 @@ function isReasoningModel(model) {
|
|
|
7231
7255
|
const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
|
|
7232
7256
|
return reasoningModels.includes(model);
|
|
7233
7257
|
}
|
|
7258
|
+
/**
|
|
7259
|
+
* Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
|
|
7260
|
+
* @param model The model to check.
|
|
7261
|
+
* @returns True if the model is a GPT-5 model, false otherwise.
|
|
7262
|
+
*/
|
|
7263
|
+
function isGPT5Model(model) {
|
|
7264
|
+
const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
7265
|
+
return gpt5Models.includes(model);
|
|
7266
|
+
}
|
|
7234
7267
|
/**
|
|
7235
7268
|
* Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
|
|
7236
7269
|
*
|
|
@@ -7258,7 +7291,7 @@ function isReasoningModel(model) {
|
|
|
7258
7291
|
* @throws Error if the API call fails
|
|
7259
7292
|
*/
|
|
7260
7293
|
const makeResponsesAPICall = async (input, options = {}) => {
|
|
7261
|
-
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL
|
|
7294
|
+
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
|
|
7262
7295
|
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
7263
7296
|
if (!apiKey) {
|
|
7264
7297
|
throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
|
|
@@ -7369,7 +7402,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
|
|
|
7369
7402
|
* });
|
|
7370
7403
|
*/
|
|
7371
7404
|
async function makeLLMCall(input, options = {}) {
|
|
7372
|
-
const { apiKey, model = DEFAULT_MODEL
|
|
7405
|
+
const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
|
|
7373
7406
|
// Validate model
|
|
7374
7407
|
const normalizedModel = normalizeModelName(model);
|
|
7375
7408
|
if (!isSupportedModel(normalizedModel)) {
|
|
@@ -7461,8 +7494,8 @@ async function makeLLMCall(input, options = {}) {
|
|
|
7461
7494
|
}
|
|
7462
7495
|
if (useWebSearch) {
|
|
7463
7496
|
responsesOptions.tools = [{ type: 'web_search_preview' }];
|
|
7464
|
-
// For reasoning models, we can't force tool choice - they only support 'auto'
|
|
7465
|
-
if (!isReasoningModel(normalizedModel)) {
|
|
7497
|
+
// For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
|
|
7498
|
+
if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
|
|
7466
7499
|
responsesOptions.tool_choice = { type: 'web_search_preview' };
|
|
7467
7500
|
}
|
|
7468
7501
|
}
|
|
@@ -13997,7 +14030,7 @@ class AlpacaMarketDataAPI extends require$$0$3.EventEmitter {
|
|
|
13997
14030
|
hasMorePages = !!pageToken;
|
|
13998
14031
|
// Enhanced logging with date range and progress info
|
|
13999
14032
|
const dateRangeStr = earliestTimestamp && latestTimestamp
|
|
14000
|
-
? `${earliestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${latestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
14033
|
+
? `${new Date(earliestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${new Date(latestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
14001
14034
|
: 'unknown range';
|
|
14002
14035
|
log(`Page ${pageCount}: Fetched ${pageBarsCount.toLocaleString()} bars (total: ${totalBarsCount.toLocaleString()}) for ${symbols.length} symbols, date range: ${dateRangeStr}${hasMorePages ? ', more pages available' : ', complete'}`);
|
|
14003
14036
|
// Prevent infinite loops
|
|
@@ -15309,6 +15342,10 @@ class AlpacaTradingAPI {
|
|
|
15309
15342
|
queryParams.append('period', params.period);
|
|
15310
15343
|
if (params.extended_hours !== undefined)
|
|
15311
15344
|
queryParams.append('extended_hours', params.extended_hours.toString());
|
|
15345
|
+
if (params.start)
|
|
15346
|
+
queryParams.append('start', params.start);
|
|
15347
|
+
if (params.end)
|
|
15348
|
+
queryParams.append('end', params.end);
|
|
15312
15349
|
if (params.date_end)
|
|
15313
15350
|
queryParams.append('date_end', params.date_end);
|
|
15314
15351
|
const response = await this.makeRequest(`/account/portfolio/history?${queryParams.toString()}`);
|