@discomedia/utils 1.0.23 → 1.0.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index-frontend.cjs +159 -122
- package/dist/index-frontend.cjs.map +1 -1
- package/dist/index-frontend.mjs +159 -122
- package/dist/index-frontend.mjs.map +1 -1
- package/dist/index.cjs +159 -122
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +159 -122
- package/dist/index.mjs.map +1 -1
- package/dist/package.json +4 -4
- package/dist/test.js +5129 -1037
- package/dist/test.js.map +1 -1
- package/dist/types/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types/alpaca-trading-api.d.ts +4 -9
- package/dist/types/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types/json-tools.d.ts +1 -3
- package/dist/types/json-tools.d.ts.map +1 -1
- package/dist/types/llm-config.d.ts.map +1 -1
- package/dist/types/llm-openai.d.ts +6 -0
- package/dist/types/llm-openai.d.ts.map +1 -1
- package/dist/types/types/alpaca-types.d.ts +2 -8
- package/dist/types/types/alpaca-types.d.ts.map +1 -1
- package/dist/types/types/llm-types.d.ts +1 -1
- package/dist/types/types/llm-types.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-market-data-api.d.ts.map +1 -1
- package/dist/types-frontend/alpaca-trading-api.d.ts +4 -9
- package/dist/types-frontend/alpaca-trading-api.d.ts.map +1 -1
- package/dist/types-frontend/json-tools.d.ts +1 -3
- package/dist/types-frontend/json-tools.d.ts.map +1 -1
- package/dist/types-frontend/llm-config.d.ts.map +1 -1
- package/dist/types-frontend/llm-openai.d.ts +6 -0
- package/dist/types-frontend/llm-openai.d.ts.map +1 -1
- package/dist/types-frontend/types/alpaca-types.d.ts +2 -8
- package/dist/types-frontend/types/alpaca-types.d.ts.map +1 -1
- package/dist/types-frontend/types/llm-types.d.ts +1 -1
- package/dist/types-frontend/types/llm-types.d.ts.map +1 -1
- package/package.json +4 -4
- package/dist/types/old-test.d.ts +0 -2
- package/dist/types/old-test.d.ts.map +0 -1
- package/dist/types-frontend/old-test.d.ts +0 -2
- package/dist/types-frontend/old-test.d.ts.map +0 -1
package/dist/index-frontend.mjs
CHANGED
|
@@ -247,7 +247,7 @@ const safeJSON = (text) => {
|
|
|
247
247
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
248
248
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
249
249
|
|
|
250
|
-
const VERSION = '5.
|
|
250
|
+
const VERSION = '5.12.1'; // x-release-please-version
|
|
251
251
|
|
|
252
252
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
253
253
|
const isRunningInBrowser = () => {
|
|
@@ -1102,11 +1102,7 @@ class Stream {
|
|
|
1102
1102
|
done = true;
|
|
1103
1103
|
continue;
|
|
1104
1104
|
}
|
|
1105
|
-
if (sse.event === null ||
|
|
1106
|
-
sse.event.startsWith('response.') ||
|
|
1107
|
-
sse.event.startsWith('image_edit.') ||
|
|
1108
|
-
sse.event.startsWith('image_generation.') ||
|
|
1109
|
-
sse.event.startsWith('transcript.')) {
|
|
1105
|
+
if (sse.event === null || !sse.event.startsWith('thread.')) {
|
|
1110
1106
|
let data;
|
|
1111
1107
|
try {
|
|
1112
1108
|
data = JSON.parse(sse.data);
|
|
@@ -1884,8 +1880,119 @@ let Messages$1 = class Messages extends APIResource {
|
|
|
1884
1880
|
}
|
|
1885
1881
|
};
|
|
1886
1882
|
|
|
1887
|
-
function
|
|
1888
|
-
return
|
|
1883
|
+
function isChatCompletionFunctionTool(tool) {
|
|
1884
|
+
return tool !== undefined && 'function' in tool && tool.function !== undefined;
|
|
1885
|
+
}
|
|
1886
|
+
function isAutoParsableResponseFormat(response_format) {
|
|
1887
|
+
return response_format?.['$brand'] === 'auto-parseable-response-format';
|
|
1888
|
+
}
|
|
1889
|
+
function isAutoParsableTool$1(tool) {
|
|
1890
|
+
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
1891
|
+
}
|
|
1892
|
+
function maybeParseChatCompletion(completion, params) {
|
|
1893
|
+
if (!params || !hasAutoParseableInput$1(params)) {
|
|
1894
|
+
return {
|
|
1895
|
+
...completion,
|
|
1896
|
+
choices: completion.choices.map((choice) => {
|
|
1897
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
1898
|
+
return {
|
|
1899
|
+
...choice,
|
|
1900
|
+
message: {
|
|
1901
|
+
...choice.message,
|
|
1902
|
+
parsed: null,
|
|
1903
|
+
...(choice.message.tool_calls ?
|
|
1904
|
+
{
|
|
1905
|
+
tool_calls: choice.message.tool_calls,
|
|
1906
|
+
}
|
|
1907
|
+
: undefined),
|
|
1908
|
+
},
|
|
1909
|
+
};
|
|
1910
|
+
}),
|
|
1911
|
+
};
|
|
1912
|
+
}
|
|
1913
|
+
return parseChatCompletion(completion, params);
|
|
1914
|
+
}
|
|
1915
|
+
function parseChatCompletion(completion, params) {
|
|
1916
|
+
const choices = completion.choices.map((choice) => {
|
|
1917
|
+
if (choice.finish_reason === 'length') {
|
|
1918
|
+
throw new LengthFinishReasonError();
|
|
1919
|
+
}
|
|
1920
|
+
if (choice.finish_reason === 'content_filter') {
|
|
1921
|
+
throw new ContentFilterFinishReasonError();
|
|
1922
|
+
}
|
|
1923
|
+
assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
|
|
1924
|
+
return {
|
|
1925
|
+
...choice,
|
|
1926
|
+
message: {
|
|
1927
|
+
...choice.message,
|
|
1928
|
+
...(choice.message.tool_calls ?
|
|
1929
|
+
{
|
|
1930
|
+
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
1931
|
+
}
|
|
1932
|
+
: undefined),
|
|
1933
|
+
parsed: choice.message.content && !choice.message.refusal ?
|
|
1934
|
+
parseResponseFormat(params, choice.message.content)
|
|
1935
|
+
: null,
|
|
1936
|
+
},
|
|
1937
|
+
};
|
|
1938
|
+
});
|
|
1939
|
+
return { ...completion, choices };
|
|
1940
|
+
}
|
|
1941
|
+
function parseResponseFormat(params, content) {
|
|
1942
|
+
if (params.response_format?.type !== 'json_schema') {
|
|
1943
|
+
return null;
|
|
1944
|
+
}
|
|
1945
|
+
if (params.response_format?.type === 'json_schema') {
|
|
1946
|
+
if ('$parseRaw' in params.response_format) {
|
|
1947
|
+
const response_format = params.response_format;
|
|
1948
|
+
return response_format.$parseRaw(content);
|
|
1949
|
+
}
|
|
1950
|
+
return JSON.parse(content);
|
|
1951
|
+
}
|
|
1952
|
+
return null;
|
|
1953
|
+
}
|
|
1954
|
+
function parseToolCall$1(params, toolCall) {
|
|
1955
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
1956
|
+
return {
|
|
1957
|
+
...toolCall,
|
|
1958
|
+
function: {
|
|
1959
|
+
...toolCall.function,
|
|
1960
|
+
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
1961
|
+
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
1962
|
+
: null,
|
|
1963
|
+
},
|
|
1964
|
+
};
|
|
1965
|
+
}
|
|
1966
|
+
function shouldParseToolCall(params, toolCall) {
|
|
1967
|
+
if (!params || !('tools' in params) || !params.tools) {
|
|
1968
|
+
return false;
|
|
1969
|
+
}
|
|
1970
|
+
const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
|
|
1971
|
+
return (isChatCompletionFunctionTool(inputTool) &&
|
|
1972
|
+
(isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
|
|
1973
|
+
}
|
|
1974
|
+
function hasAutoParseableInput$1(params) {
|
|
1975
|
+
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
1976
|
+
return true;
|
|
1977
|
+
}
|
|
1978
|
+
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
1979
|
+
}
|
|
1980
|
+
function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
|
|
1981
|
+
for (const toolCall of toolCalls || []) {
|
|
1982
|
+
if (toolCall.type !== 'function') {
|
|
1983
|
+
throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
|
|
1984
|
+
}
|
|
1985
|
+
}
|
|
1986
|
+
}
|
|
1987
|
+
function validateInputTools(tools) {
|
|
1988
|
+
for (const tool of tools ?? []) {
|
|
1989
|
+
if (tool.type !== 'function') {
|
|
1990
|
+
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
1991
|
+
}
|
|
1992
|
+
if (tool.function.strict !== true) {
|
|
1993
|
+
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
1994
|
+
}
|
|
1995
|
+
}
|
|
1889
1996
|
}
|
|
1890
1997
|
|
|
1891
1998
|
const isAssistantMessage = (message) => {
|
|
@@ -2079,104 +2186,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
|
|
|
2079
2186
|
return this._emit('error', new OpenAIError(String(error)));
|
|
2080
2187
|
};
|
|
2081
2188
|
|
|
2082
|
-
function
|
|
2083
|
-
return
|
|
2084
|
-
}
|
|
2085
|
-
function isAutoParsableTool$1(tool) {
|
|
2086
|
-
return tool?.['$brand'] === 'auto-parseable-tool';
|
|
2087
|
-
}
|
|
2088
|
-
function maybeParseChatCompletion(completion, params) {
|
|
2089
|
-
if (!params || !hasAutoParseableInput$1(params)) {
|
|
2090
|
-
return {
|
|
2091
|
-
...completion,
|
|
2092
|
-
choices: completion.choices.map((choice) => ({
|
|
2093
|
-
...choice,
|
|
2094
|
-
message: {
|
|
2095
|
-
...choice.message,
|
|
2096
|
-
parsed: null,
|
|
2097
|
-
...(choice.message.tool_calls ?
|
|
2098
|
-
{
|
|
2099
|
-
tool_calls: choice.message.tool_calls,
|
|
2100
|
-
}
|
|
2101
|
-
: undefined),
|
|
2102
|
-
},
|
|
2103
|
-
})),
|
|
2104
|
-
};
|
|
2105
|
-
}
|
|
2106
|
-
return parseChatCompletion(completion, params);
|
|
2107
|
-
}
|
|
2108
|
-
function parseChatCompletion(completion, params) {
|
|
2109
|
-
const choices = completion.choices.map((choice) => {
|
|
2110
|
-
if (choice.finish_reason === 'length') {
|
|
2111
|
-
throw new LengthFinishReasonError();
|
|
2112
|
-
}
|
|
2113
|
-
if (choice.finish_reason === 'content_filter') {
|
|
2114
|
-
throw new ContentFilterFinishReasonError();
|
|
2115
|
-
}
|
|
2116
|
-
return {
|
|
2117
|
-
...choice,
|
|
2118
|
-
message: {
|
|
2119
|
-
...choice.message,
|
|
2120
|
-
...(choice.message.tool_calls ?
|
|
2121
|
-
{
|
|
2122
|
-
tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
|
|
2123
|
-
}
|
|
2124
|
-
: undefined),
|
|
2125
|
-
parsed: choice.message.content && !choice.message.refusal ?
|
|
2126
|
-
parseResponseFormat(params, choice.message.content)
|
|
2127
|
-
: null,
|
|
2128
|
-
},
|
|
2129
|
-
};
|
|
2130
|
-
});
|
|
2131
|
-
return { ...completion, choices };
|
|
2132
|
-
}
|
|
2133
|
-
function parseResponseFormat(params, content) {
|
|
2134
|
-
if (params.response_format?.type !== 'json_schema') {
|
|
2135
|
-
return null;
|
|
2136
|
-
}
|
|
2137
|
-
if (params.response_format?.type === 'json_schema') {
|
|
2138
|
-
if ('$parseRaw' in params.response_format) {
|
|
2139
|
-
const response_format = params.response_format;
|
|
2140
|
-
return response_format.$parseRaw(content);
|
|
2141
|
-
}
|
|
2142
|
-
return JSON.parse(content);
|
|
2143
|
-
}
|
|
2144
|
-
return null;
|
|
2145
|
-
}
|
|
2146
|
-
function parseToolCall$1(params, toolCall) {
|
|
2147
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
2148
|
-
return {
|
|
2149
|
-
...toolCall,
|
|
2150
|
-
function: {
|
|
2151
|
-
...toolCall.function,
|
|
2152
|
-
parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
|
|
2153
|
-
: inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
|
|
2154
|
-
: null,
|
|
2155
|
-
},
|
|
2156
|
-
};
|
|
2157
|
-
}
|
|
2158
|
-
function shouldParseToolCall(params, toolCall) {
|
|
2159
|
-
if (!params) {
|
|
2160
|
-
return false;
|
|
2161
|
-
}
|
|
2162
|
-
const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
|
|
2163
|
-
return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
|
|
2164
|
-
}
|
|
2165
|
-
function hasAutoParseableInput$1(params) {
|
|
2166
|
-
if (isAutoParsableResponseFormat(params.response_format)) {
|
|
2167
|
-
return true;
|
|
2168
|
-
}
|
|
2169
|
-
return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
|
|
2170
|
-
}
|
|
2171
|
-
function validateInputTools(tools) {
|
|
2172
|
-
for (const tool of tools ?? []) {
|
|
2173
|
-
if (tool.type !== 'function') {
|
|
2174
|
-
throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
|
|
2175
|
-
}
|
|
2176
|
-
if (tool.function.strict !== true) {
|
|
2177
|
-
throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
|
|
2178
|
-
}
|
|
2179
|
-
}
|
|
2189
|
+
function isRunnableFunctionWithParse(fn) {
|
|
2190
|
+
return typeof fn.parse === 'function';
|
|
2180
2191
|
}
|
|
2181
2192
|
|
|
2182
2193
|
var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
|
|
@@ -2302,7 +2313,7 @@ class AbstractChatCompletionRunner extends EventStream {
|
|
|
2302
2313
|
async _runTools(client, params, options) {
|
|
2303
2314
|
const role = 'tool';
|
|
2304
2315
|
const { tool_choice = 'auto', stream, ...restParams } = params;
|
|
2305
|
-
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
|
|
2316
|
+
const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
|
|
2306
2317
|
const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
|
|
2307
2318
|
// TODO(someday): clean this logic up
|
|
2308
2319
|
const inputTools = params.tools.map((tool) => {
|
|
@@ -2420,7 +2431,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
|
|
|
2420
2431
|
for (let i = this.messages.length - 1; i >= 0; i--) {
|
|
2421
2432
|
const message = this.messages[i];
|
|
2422
2433
|
if (isAssistantMessage(message) && message?.tool_calls?.length) {
|
|
2423
|
-
return message.tool_calls.at(-1)?.function;
|
|
2434
|
+
return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
|
|
2424
2435
|
}
|
|
2425
2436
|
}
|
|
2426
2437
|
return;
|
|
@@ -2898,7 +2909,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
|
|
|
2898
2909
|
throw new Error('tool call snapshot missing `type`');
|
|
2899
2910
|
}
|
|
2900
2911
|
if (toolCallSnapshot.type === 'function') {
|
|
2901
|
-
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool
|
|
2912
|
+
const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
|
|
2902
2913
|
this._emit('tool_calls.function.arguments.done', {
|
|
2903
2914
|
name: toolCallSnapshot.function.name,
|
|
2904
2915
|
index: toolCallIndex,
|
|
@@ -6708,7 +6719,7 @@ OpenAI.Evals = Evals;
|
|
|
6708
6719
|
OpenAI.Containers = Containers;
|
|
6709
6720
|
|
|
6710
6721
|
// llm-openai-config.ts
|
|
6711
|
-
const DEFAULT_MODEL
|
|
6722
|
+
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
6712
6723
|
/** Token costs in USD per 1M tokens. Last updated Feb 2025. */
|
|
6713
6724
|
const openAiModelCosts = {
|
|
6714
6725
|
'gpt-4o': {
|
|
@@ -6747,6 +6758,18 @@ const openAiModelCosts = {
|
|
|
6747
6758
|
inputCost: 0.1 / 1_000_000,
|
|
6748
6759
|
outputCost: 0.4 / 1_000_000,
|
|
6749
6760
|
},
|
|
6761
|
+
'gpt-5': {
|
|
6762
|
+
inputCost: 1.25 / 1_000_000,
|
|
6763
|
+
outputCost: 10 / 1_000_000,
|
|
6764
|
+
},
|
|
6765
|
+
'gpt-5-mini': {
|
|
6766
|
+
inputCost: 0.25 / 1_000_000,
|
|
6767
|
+
outputCost: 2 / 1_000_000,
|
|
6768
|
+
},
|
|
6769
|
+
'gpt-5-nano': {
|
|
6770
|
+
inputCost: 0.05 / 1_000_000,
|
|
6771
|
+
outputCost: 0.4 / 1_000_000,
|
|
6772
|
+
},
|
|
6750
6773
|
'o4-mini': {
|
|
6751
6774
|
inputCost: 1.1 / 1_000_000,
|
|
6752
6775
|
outputCost: 4.4 / 1_000_000,
|
|
@@ -6814,7 +6837,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
|
|
|
6814
6837
|
return inputCost + outputCost + reasoningCost;
|
|
6815
6838
|
}
|
|
6816
6839
|
|
|
6817
|
-
const DEFAULT_MODEL = 'gpt-4.1-mini';
|
|
6818
6840
|
/**
|
|
6819
6841
|
* Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
|
|
6820
6842
|
* @param {string} jsonStr - The broken JSON string to fix
|
|
@@ -7059,9 +7081,7 @@ function initializeOpenAI(apiKey) {
|
|
|
7059
7081
|
});
|
|
7060
7082
|
}
|
|
7061
7083
|
/**
|
|
7062
|
-
* Fixes broken JSON by sending it to
|
|
7063
|
-
* The GPT-4.1-mini model is a large language model that can understand and generate code,
|
|
7064
|
-
* including JSON. The returned JSON is the fixed version of the input JSON.
|
|
7084
|
+
* Fixes broken JSON by sending it to OpenAI to fix it.
|
|
7065
7085
|
* If the model fails to return valid JSON, an error is thrown.
|
|
7066
7086
|
* @param jsonStr - the broken JSON to fix
|
|
7067
7087
|
* @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
|
|
@@ -7205,8 +7225,11 @@ const isSupportedModel = (model) => {
|
|
|
7205
7225
|
'o3-mini',
|
|
7206
7226
|
'gpt-4.1',
|
|
7207
7227
|
'gpt-4.1-mini',
|
|
7208
|
-
'o4-mini',
|
|
7209
7228
|
'gpt-4.1-nano',
|
|
7229
|
+
'gpt-5',
|
|
7230
|
+
'gpt-5-mini',
|
|
7231
|
+
'gpt-5-nano',
|
|
7232
|
+
'o4-mini',
|
|
7210
7233
|
'o3',
|
|
7211
7234
|
].includes(model);
|
|
7212
7235
|
};
|
|
@@ -7217,8 +7240,9 @@ const isSupportedModel = (model) => {
|
|
|
7217
7240
|
*/
|
|
7218
7241
|
function supportsTemperature(model) {
|
|
7219
7242
|
// Reasoning models don't support temperature
|
|
7220
|
-
|
|
7221
|
-
|
|
7243
|
+
// GPT-5 models also do not support temperature
|
|
7244
|
+
const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
7245
|
+
return !reasoningAndGPT5Models.includes(model);
|
|
7222
7246
|
}
|
|
7223
7247
|
/**
|
|
7224
7248
|
* Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
|
|
@@ -7229,6 +7253,15 @@ function isReasoningModel(model) {
|
|
|
7229
7253
|
const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
|
|
7230
7254
|
return reasoningModels.includes(model);
|
|
7231
7255
|
}
|
|
7256
|
+
/**
|
|
7257
|
+
* Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
|
|
7258
|
+
* @param model The model to check.
|
|
7259
|
+
* @returns True if the model is a GPT-5 model, false otherwise.
|
|
7260
|
+
*/
|
|
7261
|
+
function isGPT5Model(model) {
|
|
7262
|
+
const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
|
|
7263
|
+
return gpt5Models.includes(model);
|
|
7264
|
+
}
|
|
7232
7265
|
/**
|
|
7233
7266
|
* Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
|
|
7234
7267
|
*
|
|
@@ -7256,7 +7289,7 @@ function isReasoningModel(model) {
|
|
|
7256
7289
|
* @throws Error if the API call fails
|
|
7257
7290
|
*/
|
|
7258
7291
|
const makeResponsesAPICall = async (input, options = {}) => {
|
|
7259
|
-
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL
|
|
7292
|
+
const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
|
|
7260
7293
|
const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
|
|
7261
7294
|
if (!apiKey) {
|
|
7262
7295
|
throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
|
|
@@ -7367,7 +7400,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
|
|
|
7367
7400
|
* });
|
|
7368
7401
|
*/
|
|
7369
7402
|
async function makeLLMCall(input, options = {}) {
|
|
7370
|
-
const { apiKey, model = DEFAULT_MODEL
|
|
7403
|
+
const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
|
|
7371
7404
|
// Validate model
|
|
7372
7405
|
const normalizedModel = normalizeModelName(model);
|
|
7373
7406
|
if (!isSupportedModel(normalizedModel)) {
|
|
@@ -7459,8 +7492,8 @@ async function makeLLMCall(input, options = {}) {
|
|
|
7459
7492
|
}
|
|
7460
7493
|
if (useWebSearch) {
|
|
7461
7494
|
responsesOptions.tools = [{ type: 'web_search_preview' }];
|
|
7462
|
-
// For reasoning models, we can't force tool choice - they only support 'auto'
|
|
7463
|
-
if (!isReasoningModel(normalizedModel)) {
|
|
7495
|
+
// For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
|
|
7496
|
+
if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
|
|
7464
7497
|
responsesOptions.tool_choice = { type: 'web_search_preview' };
|
|
7465
7498
|
}
|
|
7466
7499
|
}
|
|
@@ -13995,7 +14028,7 @@ class AlpacaMarketDataAPI extends EventEmitter {
|
|
|
13995
14028
|
hasMorePages = !!pageToken;
|
|
13996
14029
|
// Enhanced logging with date range and progress info
|
|
13997
14030
|
const dateRangeStr = earliestTimestamp && latestTimestamp
|
|
13998
|
-
? `${earliestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${latestTimestamp.toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
14031
|
+
? `${new Date(earliestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })} to ${new Date(latestTimestamp).toLocaleDateString('en-US', { timeZone: 'America/New_York' })}`
|
|
13999
14032
|
: 'unknown range';
|
|
14000
14033
|
log(`Page ${pageCount}: Fetched ${pageBarsCount.toLocaleString()} bars (total: ${totalBarsCount.toLocaleString()}) for ${symbols.length} symbols, date range: ${dateRangeStr}${hasMorePages ? ', more pages available' : ', complete'}`);
|
|
14001
14034
|
// Prevent infinite loops
|
|
@@ -15307,6 +15340,10 @@ class AlpacaTradingAPI {
|
|
|
15307
15340
|
queryParams.append('period', params.period);
|
|
15308
15341
|
if (params.extended_hours !== undefined)
|
|
15309
15342
|
queryParams.append('extended_hours', params.extended_hours.toString());
|
|
15343
|
+
if (params.start)
|
|
15344
|
+
queryParams.append('start', params.start);
|
|
15345
|
+
if (params.end)
|
|
15346
|
+
queryParams.append('end', params.end);
|
|
15310
15347
|
if (params.date_end)
|
|
15311
15348
|
queryParams.append('date_end', params.date_end);
|
|
15312
15349
|
const response = await this.makeRequest(`/account/portfolio/history?${queryParams.toString()}`);
|