@discomedia/utils 1.0.24 → 1.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -247,7 +247,7 @@ const safeJSON = (text) => {
247
247
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
248
248
  const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
249
249
 
250
- const VERSION = '5.12.0'; // x-release-please-version
250
+ const VERSION = '5.12.1'; // x-release-please-version
251
251
 
252
252
  // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
253
253
  const isRunningInBrowser = () => {
@@ -1880,8 +1880,119 @@ let Messages$1 = class Messages extends APIResource {
1880
1880
  }
1881
1881
  };
1882
1882
 
1883
- function isRunnableFunctionWithParse(fn) {
1884
- return typeof fn.parse === 'function';
1883
+ function isChatCompletionFunctionTool(tool) {
1884
+ return tool !== undefined && 'function' in tool && tool.function !== undefined;
1885
+ }
1886
+ function isAutoParsableResponseFormat(response_format) {
1887
+ return response_format?.['$brand'] === 'auto-parseable-response-format';
1888
+ }
1889
+ function isAutoParsableTool$1(tool) {
1890
+ return tool?.['$brand'] === 'auto-parseable-tool';
1891
+ }
1892
+ function maybeParseChatCompletion(completion, params) {
1893
+ if (!params || !hasAutoParseableInput$1(params)) {
1894
+ return {
1895
+ ...completion,
1896
+ choices: completion.choices.map((choice) => {
1897
+ assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
1898
+ return {
1899
+ ...choice,
1900
+ message: {
1901
+ ...choice.message,
1902
+ parsed: null,
1903
+ ...(choice.message.tool_calls ?
1904
+ {
1905
+ tool_calls: choice.message.tool_calls,
1906
+ }
1907
+ : undefined),
1908
+ },
1909
+ };
1910
+ }),
1911
+ };
1912
+ }
1913
+ return parseChatCompletion(completion, params);
1914
+ }
1915
+ function parseChatCompletion(completion, params) {
1916
+ const choices = completion.choices.map((choice) => {
1917
+ if (choice.finish_reason === 'length') {
1918
+ throw new LengthFinishReasonError();
1919
+ }
1920
+ if (choice.finish_reason === 'content_filter') {
1921
+ throw new ContentFilterFinishReasonError();
1922
+ }
1923
+ assertToolCallsAreChatCompletionFunctionToolCalls(choice.message.tool_calls);
1924
+ return {
1925
+ ...choice,
1926
+ message: {
1927
+ ...choice.message,
1928
+ ...(choice.message.tool_calls ?
1929
+ {
1930
+ tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
1931
+ }
1932
+ : undefined),
1933
+ parsed: choice.message.content && !choice.message.refusal ?
1934
+ parseResponseFormat(params, choice.message.content)
1935
+ : null,
1936
+ },
1937
+ };
1938
+ });
1939
+ return { ...completion, choices };
1940
+ }
1941
+ function parseResponseFormat(params, content) {
1942
+ if (params.response_format?.type !== 'json_schema') {
1943
+ return null;
1944
+ }
1945
+ if (params.response_format?.type === 'json_schema') {
1946
+ if ('$parseRaw' in params.response_format) {
1947
+ const response_format = params.response_format;
1948
+ return response_format.$parseRaw(content);
1949
+ }
1950
+ return JSON.parse(content);
1951
+ }
1952
+ return null;
1953
+ }
1954
+ function parseToolCall$1(params, toolCall) {
1955
+ const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name); // TS doesn't narrow based on isChatCompletionTool
1956
+ return {
1957
+ ...toolCall,
1958
+ function: {
1959
+ ...toolCall.function,
1960
+ parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
1961
+ : inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
1962
+ : null,
1963
+ },
1964
+ };
1965
+ }
1966
+ function shouldParseToolCall(params, toolCall) {
1967
+ if (!params || !('tools' in params) || !params.tools) {
1968
+ return false;
1969
+ }
1970
+ const inputTool = params.tools?.find((inputTool) => isChatCompletionFunctionTool(inputTool) && inputTool.function?.name === toolCall.function.name);
1971
+ return (isChatCompletionFunctionTool(inputTool) &&
1972
+ (isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false));
1973
+ }
1974
+ function hasAutoParseableInput$1(params) {
1975
+ if (isAutoParsableResponseFormat(params.response_format)) {
1976
+ return true;
1977
+ }
1978
+ return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
1979
+ }
1980
+ function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
1981
+ for (const toolCall of toolCalls || []) {
1982
+ if (toolCall.type !== 'function') {
1983
+ throw new OpenAIError(`Currently only \`function\` tool calls are supported; Received \`${toolCall.type}\``);
1984
+ }
1985
+ }
1986
+ }
1987
+ function validateInputTools(tools) {
1988
+ for (const tool of tools ?? []) {
1989
+ if (tool.type !== 'function') {
1990
+ throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
1991
+ }
1992
+ if (tool.function.strict !== true) {
1993
+ throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
1994
+ }
1995
+ }
1885
1996
  }
1886
1997
 
1887
1998
  const isAssistantMessage = (message) => {
@@ -2075,104 +2186,8 @@ _EventStream_connectedPromise = new WeakMap(), _EventStream_resolveConnectedProm
2075
2186
  return this._emit('error', new OpenAIError(String(error)));
2076
2187
  };
2077
2188
 
2078
- function isAutoParsableResponseFormat(response_format) {
2079
- return response_format?.['$brand'] === 'auto-parseable-response-format';
2080
- }
2081
- function isAutoParsableTool$1(tool) {
2082
- return tool?.['$brand'] === 'auto-parseable-tool';
2083
- }
2084
- function maybeParseChatCompletion(completion, params) {
2085
- if (!params || !hasAutoParseableInput$1(params)) {
2086
- return {
2087
- ...completion,
2088
- choices: completion.choices.map((choice) => ({
2089
- ...choice,
2090
- message: {
2091
- ...choice.message,
2092
- parsed: null,
2093
- ...(choice.message.tool_calls ?
2094
- {
2095
- tool_calls: choice.message.tool_calls,
2096
- }
2097
- : undefined),
2098
- },
2099
- })),
2100
- };
2101
- }
2102
- return parseChatCompletion(completion, params);
2103
- }
2104
- function parseChatCompletion(completion, params) {
2105
- const choices = completion.choices.map((choice) => {
2106
- if (choice.finish_reason === 'length') {
2107
- throw new LengthFinishReasonError();
2108
- }
2109
- if (choice.finish_reason === 'content_filter') {
2110
- throw new ContentFilterFinishReasonError();
2111
- }
2112
- return {
2113
- ...choice,
2114
- message: {
2115
- ...choice.message,
2116
- ...(choice.message.tool_calls ?
2117
- {
2118
- tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? undefined,
2119
- }
2120
- : undefined),
2121
- parsed: choice.message.content && !choice.message.refusal ?
2122
- parseResponseFormat(params, choice.message.content)
2123
- : null,
2124
- },
2125
- };
2126
- });
2127
- return { ...completion, choices };
2128
- }
2129
- function parseResponseFormat(params, content) {
2130
- if (params.response_format?.type !== 'json_schema') {
2131
- return null;
2132
- }
2133
- if (params.response_format?.type === 'json_schema') {
2134
- if ('$parseRaw' in params.response_format) {
2135
- const response_format = params.response_format;
2136
- return response_format.$parseRaw(content);
2137
- }
2138
- return JSON.parse(content);
2139
- }
2140
- return null;
2141
- }
2142
- function parseToolCall$1(params, toolCall) {
2143
- const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
2144
- return {
2145
- ...toolCall,
2146
- function: {
2147
- ...toolCall.function,
2148
- parsed_arguments: isAutoParsableTool$1(inputTool) ? inputTool.$parseRaw(toolCall.function.arguments)
2149
- : inputTool?.function.strict ? JSON.parse(toolCall.function.arguments)
2150
- : null,
2151
- },
2152
- };
2153
- }
2154
- function shouldParseToolCall(params, toolCall) {
2155
- if (!params) {
2156
- return false;
2157
- }
2158
- const inputTool = params.tools?.find((inputTool) => inputTool.function?.name === toolCall.function.name);
2159
- return isAutoParsableTool$1(inputTool) || inputTool?.function.strict || false;
2160
- }
2161
- function hasAutoParseableInput$1(params) {
2162
- if (isAutoParsableResponseFormat(params.response_format)) {
2163
- return true;
2164
- }
2165
- return (params.tools?.some((t) => isAutoParsableTool$1(t) || (t.type === 'function' && t.function.strict === true)) ?? false);
2166
- }
2167
- function validateInputTools(tools) {
2168
- for (const tool of tools ?? []) {
2169
- if (tool.type !== 'function') {
2170
- throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
2171
- }
2172
- if (tool.function.strict !== true) {
2173
- throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
2174
- }
2175
- }
2189
+ function isRunnableFunctionWithParse(fn) {
2190
+ return typeof fn.parse === 'function';
2176
2191
  }
2177
2192
 
2178
2193
  var _AbstractChatCompletionRunner_instances, _AbstractChatCompletionRunner_getFinalContent, _AbstractChatCompletionRunner_getFinalMessage, _AbstractChatCompletionRunner_getFinalFunctionToolCall, _AbstractChatCompletionRunner_getFinalFunctionToolCallResult, _AbstractChatCompletionRunner_calculateTotalUsage, _AbstractChatCompletionRunner_validateParams, _AbstractChatCompletionRunner_stringifyFunctionCallResult;
@@ -2298,7 +2313,7 @@ class AbstractChatCompletionRunner extends EventStream {
2298
2313
  async _runTools(client, params, options) {
2299
2314
  const role = 'tool';
2300
2315
  const { tool_choice = 'auto', stream, ...restParams } = params;
2301
- const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice?.function?.name;
2316
+ const singleFunctionToCall = typeof tool_choice !== 'string' && tool_choice.type === 'function' && tool_choice?.function?.name;
2302
2317
  const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
2303
2318
  // TODO(someday): clean this logic up
2304
2319
  const inputTools = params.tools.map((tool) => {
@@ -2416,7 +2431,7 @@ _AbstractChatCompletionRunner_instances = new WeakSet(), _AbstractChatCompletion
2416
2431
  for (let i = this.messages.length - 1; i >= 0; i--) {
2417
2432
  const message = this.messages[i];
2418
2433
  if (isAssistantMessage(message) && message?.tool_calls?.length) {
2419
- return message.tool_calls.at(-1)?.function;
2434
+ return message.tool_calls.filter((x) => x.type === 'function').at(-1)?.function;
2420
2435
  }
2421
2436
  }
2422
2437
  return;
@@ -2894,7 +2909,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
2894
2909
  throw new Error('tool call snapshot missing `type`');
2895
2910
  }
2896
2911
  if (toolCallSnapshot.type === 'function') {
2897
- const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => tool.type === 'function' && tool.function.name === toolCallSnapshot.function.name);
2912
+ const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name); // TS doesn't narrow based on isChatCompletionTool
2898
2913
  this._emit('tool_calls.function.arguments.done', {
2899
2914
  name: toolCallSnapshot.function.name,
2900
2915
  index: toolCallIndex,
@@ -6704,7 +6719,7 @@ OpenAI.Evals = Evals;
6704
6719
  OpenAI.Containers = Containers;
6705
6720
 
6706
6721
  // llm-openai-config.ts
6707
- const DEFAULT_MODEL$1 = 'gpt-4.1-mini';
6722
+ const DEFAULT_MODEL = 'gpt-4.1-mini';
6708
6723
  /** Token costs in USD per 1M tokens. Last updated Feb 2025. */
6709
6724
  const openAiModelCosts = {
6710
6725
  'gpt-4o': {
@@ -6743,6 +6758,18 @@ const openAiModelCosts = {
6743
6758
  inputCost: 0.1 / 1_000_000,
6744
6759
  outputCost: 0.4 / 1_000_000,
6745
6760
  },
6761
+ 'gpt-5': {
6762
+ inputCost: 1.25 / 1_000_000,
6763
+ outputCost: 10 / 1_000_000,
6764
+ },
6765
+ 'gpt-5-mini': {
6766
+ inputCost: 0.25 / 1_000_000,
6767
+ outputCost: 2 / 1_000_000,
6768
+ },
6769
+ 'gpt-5-nano': {
6770
+ inputCost: 0.05 / 1_000_000,
6771
+ outputCost: 0.4 / 1_000_000,
6772
+ },
6746
6773
  'o4-mini': {
6747
6774
  inputCost: 1.1 / 1_000_000,
6748
6775
  outputCost: 4.4 / 1_000_000,
@@ -6810,7 +6837,6 @@ function calculateCost(provider, model, inputTokens, outputTokens, reasoningToke
6810
6837
  return inputCost + outputCost + reasoningCost;
6811
6838
  }
6812
6839
 
6813
- const DEFAULT_MODEL = 'gpt-4.1-mini';
6814
6840
  /**
6815
6841
  * Fix a broken JSON string by attempting to extract and parse valid JSON content. This function is very lenient and will attempt to fix many types of JSON errors, including unbalanced brackets, missing or extra commas, improperly escaped $ signs, unquoted strings, trailing commas, missing closing brackets or braces, etc.
6816
6842
  * @param {string} jsonStr - The broken JSON string to fix
@@ -7055,9 +7081,7 @@ function initializeOpenAI(apiKey) {
7055
7081
  });
7056
7082
  }
7057
7083
  /**
7058
- * Fixes broken JSON by sending it to the OpenAI GPT-4.1-mini model as a chat completion.
7059
- * The GPT-4.1-mini model is a large language model that can understand and generate code,
7060
- * including JSON. The returned JSON is the fixed version of the input JSON.
7084
+ * Fixes broken JSON by sending it to OpenAI to fix it.
7061
7085
  * If the model fails to return valid JSON, an error is thrown.
7062
7086
  * @param jsonStr - the broken JSON to fix
7063
7087
  * @param apiKey - the OpenAI API key to use, or undefined to use the value of the OPENAI_API_KEY environment variable
@@ -7201,8 +7225,11 @@ const isSupportedModel = (model) => {
7201
7225
  'o3-mini',
7202
7226
  'gpt-4.1',
7203
7227
  'gpt-4.1-mini',
7204
- 'o4-mini',
7205
7228
  'gpt-4.1-nano',
7229
+ 'gpt-5',
7230
+ 'gpt-5-mini',
7231
+ 'gpt-5-nano',
7232
+ 'o4-mini',
7206
7233
  'o3',
7207
7234
  ].includes(model);
7208
7235
  };
@@ -7213,8 +7240,9 @@ const isSupportedModel = (model) => {
7213
7240
  */
7214
7241
  function supportsTemperature(model) {
7215
7242
  // Reasoning models don't support temperature
7216
- const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
7217
- return !reasoningModels.includes(model);
7243
+ // GPT-5 models also do not support temperature
7244
+ const reasoningAndGPT5Models = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3', 'gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
7245
+ return !reasoningAndGPT5Models.includes(model);
7218
7246
  }
7219
7247
  /**
7220
7248
  * Checks if the given model is a reasoning model. Reasoning models have different tool choice constraints.
@@ -7225,6 +7253,15 @@ function isReasoningModel(model) {
7225
7253
  const reasoningModels = ['o1', 'o1-mini', 'o3-mini', 'o4-mini', 'o3'];
7226
7254
  return reasoningModels.includes(model);
7227
7255
  }
7256
+ /**
7257
+ * Checks if the given model is a GPT-5 model. GPT-5 models don't support tool_choice other than 'auto'.
7258
+ * @param model The model to check.
7259
+ * @returns True if the model is a GPT-5 model, false otherwise.
7260
+ */
7261
+ function isGPT5Model(model) {
7262
+ const gpt5Models = ['gpt-5', 'gpt-5-mini', 'gpt-5-nano'];
7263
+ return gpt5Models.includes(model);
7264
+ }
7228
7265
  /**
7229
7266
  * Makes a call to OpenAI's Responses API for more advanced use cases with built-in tools.
7230
7267
  *
@@ -7252,7 +7289,7 @@ function isReasoningModel(model) {
7252
7289
  * @throws Error if the API call fails
7253
7290
  */
7254
7291
  const makeResponsesAPICall = async (input, options = {}) => {
7255
- const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL$1);
7292
+ const normalizedModel = normalizeModelName(options.model || DEFAULT_MODEL);
7256
7293
  const apiKey = options.apiKey || process.env.OPENAI_API_KEY;
7257
7294
  if (!apiKey) {
7258
7295
  throw new Error('OpenAI API key is not provided and OPENAI_API_KEY environment variable is not set');
@@ -7363,7 +7400,7 @@ const makeResponsesAPICall = async (input, options = {}) => {
7363
7400
  * });
7364
7401
  */
7365
7402
  async function makeLLMCall(input, options = {}) {
7366
- const { apiKey, model = DEFAULT_MODEL$1, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
7403
+ const { apiKey, model = DEFAULT_MODEL, responseFormat = 'text', tools, useCodeInterpreter = false, useWebSearch = false, imageBase64, imageDetail = 'high', context, } = options;
7367
7404
  // Validate model
7368
7405
  const normalizedModel = normalizeModelName(model);
7369
7406
  if (!isSupportedModel(normalizedModel)) {
@@ -7455,8 +7492,8 @@ async function makeLLMCall(input, options = {}) {
7455
7492
  }
7456
7493
  if (useWebSearch) {
7457
7494
  responsesOptions.tools = [{ type: 'web_search_preview' }];
7458
- // For reasoning models, we can't force tool choice - they only support 'auto'
7459
- if (!isReasoningModel(normalizedModel)) {
7495
+ // For reasoning models and GPT-5 models, we can't force tool choice - they only support 'auto'
7496
+ if (!isReasoningModel(normalizedModel) && !isGPT5Model(normalizedModel)) {
7460
7497
  responsesOptions.tool_choice = { type: 'web_search_preview' };
7461
7498
  }
7462
7499
  }