@ai-sdk/openai 2.0.38 → 2.1.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,9 @@
1
1
  import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
- import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
3
  import { z } from 'zod/v4';
5
4
 
6
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
6
+ declare const openaiProviderOptions: z.ZodObject<{
8
7
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
8
  logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
@@ -34,7 +33,7 @@ declare const openaiChatLanguageModelOptions: z.ZodObject<{
34
33
  promptCacheKey: z.ZodOptional<z.ZodString>;
35
34
  safetyIdentifier: z.ZodOptional<z.ZodString>;
36
35
  }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
36
+ type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
38
37
 
39
38
  type OpenAIChatConfig = {
40
39
  provider: string;
@@ -248,214 +247,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
248
247
  }, z.core.$strip>;
249
248
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
250
249
 
251
- declare const codeInterpreterInputSchema: z.ZodObject<{
252
- code: z.ZodOptional<z.ZodNullable<z.ZodString>>;
253
- containerId: z.ZodString;
254
- }, z.core.$strip>;
255
- declare const codeInterpreterOutputSchema: z.ZodObject<{
256
- outputs: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodDiscriminatedUnion<[z.ZodObject<{
257
- type: z.ZodLiteral<"logs">;
258
- logs: z.ZodString;
259
- }, z.core.$strip>, z.ZodObject<{
260
- type: z.ZodLiteral<"image">;
261
- url: z.ZodString;
262
- }, z.core.$strip>]>>>>;
263
- }, z.core.$strip>;
264
- declare const codeInterpreterArgsSchema: z.ZodObject<{
265
- container: z.ZodOptional<z.ZodUnion<readonly [z.ZodString, z.ZodObject<{
266
- fileIds: z.ZodOptional<z.ZodArray<z.ZodString>>;
267
- }, z.core.$strip>]>>;
268
- }, z.core.$strip>;
269
- type CodeInterpreterArgs = {
270
- /**
271
- * The code interpreter container.
272
- * Can be a container ID
273
- * or an object that specifies uploaded file IDs to make available to your code.
274
- */
275
- container?: string | {
276
- fileIds?: string[];
277
- };
278
- };
279
- declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
280
- /**
281
- * The code to run, or null if not available.
282
- */
283
- code?: string | null;
284
- /**
285
- * The ID of the container used to run the code.
286
- */
287
- containerId: string;
288
- }, {
289
- /**
290
- * The outputs generated by the code interpreter, such as logs or images.
291
- * Can be null if no outputs are available.
292
- */
293
- outputs?: Array<{
294
- type: "logs";
295
- /**
296
- * The logs output from the code interpreter.
297
- */
298
- logs: string;
299
- } | {
300
- type: "image";
301
- /**
302
- * The URL of the image output from the code interpreter.
303
- */
304
- url: string;
305
- }> | null;
306
- }, CodeInterpreterArgs>;
307
- declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_utils.Tool<{
308
- /**
309
- * The code to run, or null if not available.
310
- */
311
- code?: string | null;
312
- /**
313
- * The ID of the container used to run the code.
314
- */
315
- containerId: string;
316
- }, {
317
- /**
318
- * The outputs generated by the code interpreter, such as logs or images.
319
- * Can be null if no outputs are available.
320
- */
321
- outputs?: Array<{
322
- type: "logs";
323
- /**
324
- * The logs output from the code interpreter.
325
- */
326
- logs: string;
327
- } | {
328
- type: "image";
329
- /**
330
- * The URL of the image output from the code interpreter.
331
- */
332
- url: string;
333
- }> | null;
334
- }>;
335
-
336
- /**
337
- * A filter used to compare a specified attribute key to a given value using a defined comparison operation.
338
- */
339
- type OpenAIResponsesFileSearchToolComparisonFilter = {
340
- /**
341
- * The key to compare against the value.
342
- */
343
- key: string;
344
- /**
345
- * Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
346
- */
347
- type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
348
- /**
349
- * The value to compare against the attribute key; supports string, number, or boolean types.
350
- */
351
- value: string | number | boolean;
352
- };
353
- /**
354
- * Combine multiple filters using and or or.
355
- */
356
- type OpenAIResponsesFileSearchToolCompoundFilter = {
357
- /**
358
- * Type of operation: and or or.
359
- */
360
- type: 'and' | 'or';
361
- /**
362
- * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
363
- */
364
- filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
365
- };
366
-
367
- declare const fileSearchArgsSchema: z.ZodObject<{
368
- vectorStoreIds: z.ZodArray<z.ZodString>;
369
- maxNumResults: z.ZodOptional<z.ZodNumber>;
370
- ranking: z.ZodOptional<z.ZodObject<{
371
- ranker: z.ZodOptional<z.ZodString>;
372
- scoreThreshold: z.ZodOptional<z.ZodNumber>;
373
- }, z.core.$strip>>;
374
- filters: z.ZodOptional<z.ZodUnion<readonly [z.ZodObject<{
375
- key: z.ZodString;
376
- type: z.ZodEnum<{
377
- lt: "lt";
378
- ne: "ne";
379
- eq: "eq";
380
- gt: "gt";
381
- gte: "gte";
382
- lte: "lte";
383
- }>;
384
- value: z.ZodUnion<readonly [z.ZodString, z.ZodNumber, z.ZodBoolean]>;
385
- }, z.core.$strip>, z.ZodType<any, unknown, z.core.$ZodTypeInternals<any, unknown>>]>>;
386
- }, z.core.$strip>;
387
- declare const fileSearchOutputSchema: z.ZodObject<{
388
- queries: z.ZodArray<z.ZodString>;
389
- results: z.ZodNullable<z.ZodArray<z.ZodObject<{
390
- attributes: z.ZodRecord<z.ZodString, z.ZodUnknown>;
391
- fileId: z.ZodString;
392
- filename: z.ZodString;
393
- score: z.ZodNumber;
394
- text: z.ZodString;
395
- }, z.core.$strip>>>;
396
- }, z.core.$strip>;
397
- declare const fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{}, {
398
- /**
399
- * The search query to execute.
400
- */
401
- queries: string[];
402
- /**
403
- * The results of the file search tool call.
404
- */
405
- results: null | {
406
- /**
407
- * Set of 16 key-value pairs that can be attached to an object.
408
- * This can be useful for storing additional information about the object
409
- * in a structured format, and querying for objects via API or the dashboard.
410
- * Keys are strings with a maximum length of 64 characters.
411
- * Values are strings with a maximum length of 512 characters, booleans, or numbers.
412
- */
413
- attributes: Record<string, unknown>;
414
- /**
415
- * The unique ID of the file.
416
- */
417
- fileId: string;
418
- /**
419
- * The name of the file.
420
- */
421
- filename: string;
422
- /**
423
- * The relevance score of the file - a value between 0 and 1.
424
- */
425
- score: number;
426
- /**
427
- * The text that was retrieved from the file.
428
- */
429
- text: string;
430
- }[];
431
- }, {
432
- /**
433
- * List of vector store IDs to search through.
434
- */
435
- vectorStoreIds: string[];
436
- /**
437
- * Maximum number of search results to return. Defaults to 10.
438
- */
439
- maxNumResults?: number;
440
- /**
441
- * Ranking options for the search.
442
- */
443
- ranking?: {
444
- /**
445
- * The ranker to use for the file search.
446
- */
447
- ranker?: string;
448
- /**
449
- * The score threshold for the file search, a number between 0 and 1.
450
- * Numbers closer to 1 will attempt to return only the most relevant results,
451
- * but may return fewer results.
452
- */
453
- scoreThreshold?: number;
454
- };
455
- /**
456
- * A filter to apply.
457
- */
458
- filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
459
- }>;
460
-
461
- export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions };
250
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
@@ -1,10 +1,9 @@
1
1
  import { LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
2
- import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
3
2
  import { FetchFunction } from '@ai-sdk/provider-utils';
4
3
  import { z } from 'zod/v4';
5
4
 
6
5
  type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
7
- declare const openaiChatLanguageModelOptions: z.ZodObject<{
6
+ declare const openaiProviderOptions: z.ZodObject<{
8
7
  logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
9
8
  logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
10
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
@@ -34,7 +33,7 @@ declare const openaiChatLanguageModelOptions: z.ZodObject<{
34
33
  promptCacheKey: z.ZodOptional<z.ZodString>;
35
34
  safetyIdentifier: z.ZodOptional<z.ZodString>;
36
35
  }, z.core.$strip>;
37
- type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
36
+ type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
38
37
 
39
38
  type OpenAIChatConfig = {
40
39
  provider: string;
@@ -248,214 +247,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
248
247
  }, z.core.$strip>;
249
248
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
250
249
 
251
- declare const codeInterpreterInputSchema: z.ZodObject<{
252
- code: z.ZodOptional<z.ZodNullable<z.ZodString>>;
253
- containerId: z.ZodString;
254
- }, z.core.$strip>;
255
- declare const codeInterpreterOutputSchema: z.ZodObject<{
256
- outputs: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodDiscriminatedUnion<[z.ZodObject<{
257
- type: z.ZodLiteral<"logs">;
258
- logs: z.ZodString;
259
- }, z.core.$strip>, z.ZodObject<{
260
- type: z.ZodLiteral<"image">;
261
- url: z.ZodString;
262
- }, z.core.$strip>]>>>>;
263
- }, z.core.$strip>;
264
- declare const codeInterpreterArgsSchema: z.ZodObject<{
265
- container: z.ZodOptional<z.ZodUnion<readonly [z.ZodString, z.ZodObject<{
266
- fileIds: z.ZodOptional<z.ZodArray<z.ZodString>>;
267
- }, z.core.$strip>]>>;
268
- }, z.core.$strip>;
269
- type CodeInterpreterArgs = {
270
- /**
271
- * The code interpreter container.
272
- * Can be a container ID
273
- * or an object that specifies uploaded file IDs to make available to your code.
274
- */
275
- container?: string | {
276
- fileIds?: string[];
277
- };
278
- };
279
- declare const codeInterpreterToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
280
- /**
281
- * The code to run, or null if not available.
282
- */
283
- code?: string | null;
284
- /**
285
- * The ID of the container used to run the code.
286
- */
287
- containerId: string;
288
- }, {
289
- /**
290
- * The outputs generated by the code interpreter, such as logs or images.
291
- * Can be null if no outputs are available.
292
- */
293
- outputs?: Array<{
294
- type: "logs";
295
- /**
296
- * The logs output from the code interpreter.
297
- */
298
- logs: string;
299
- } | {
300
- type: "image";
301
- /**
302
- * The URL of the image output from the code interpreter.
303
- */
304
- url: string;
305
- }> | null;
306
- }, CodeInterpreterArgs>;
307
- declare const codeInterpreter: (args?: CodeInterpreterArgs) => _ai_sdk_provider_utils.Tool<{
308
- /**
309
- * The code to run, or null if not available.
310
- */
311
- code?: string | null;
312
- /**
313
- * The ID of the container used to run the code.
314
- */
315
- containerId: string;
316
- }, {
317
- /**
318
- * The outputs generated by the code interpreter, such as logs or images.
319
- * Can be null if no outputs are available.
320
- */
321
- outputs?: Array<{
322
- type: "logs";
323
- /**
324
- * The logs output from the code interpreter.
325
- */
326
- logs: string;
327
- } | {
328
- type: "image";
329
- /**
330
- * The URL of the image output from the code interpreter.
331
- */
332
- url: string;
333
- }> | null;
334
- }>;
335
-
336
- /**
337
- * A filter used to compare a specified attribute key to a given value using a defined comparison operation.
338
- */
339
- type OpenAIResponsesFileSearchToolComparisonFilter = {
340
- /**
341
- * The key to compare against the value.
342
- */
343
- key: string;
344
- /**
345
- * Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
346
- */
347
- type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
348
- /**
349
- * The value to compare against the attribute key; supports string, number, or boolean types.
350
- */
351
- value: string | number | boolean;
352
- };
353
- /**
354
- * Combine multiple filters using and or or.
355
- */
356
- type OpenAIResponsesFileSearchToolCompoundFilter = {
357
- /**
358
- * Type of operation: and or or.
359
- */
360
- type: 'and' | 'or';
361
- /**
362
- * Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
363
- */
364
- filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
365
- };
366
-
367
- declare const fileSearchArgsSchema: z.ZodObject<{
368
- vectorStoreIds: z.ZodArray<z.ZodString>;
369
- maxNumResults: z.ZodOptional<z.ZodNumber>;
370
- ranking: z.ZodOptional<z.ZodObject<{
371
- ranker: z.ZodOptional<z.ZodString>;
372
- scoreThreshold: z.ZodOptional<z.ZodNumber>;
373
- }, z.core.$strip>>;
374
- filters: z.ZodOptional<z.ZodUnion<readonly [z.ZodObject<{
375
- key: z.ZodString;
376
- type: z.ZodEnum<{
377
- lt: "lt";
378
- ne: "ne";
379
- eq: "eq";
380
- gt: "gt";
381
- gte: "gte";
382
- lte: "lte";
383
- }>;
384
- value: z.ZodUnion<readonly [z.ZodString, z.ZodNumber, z.ZodBoolean]>;
385
- }, z.core.$strip>, z.ZodType<any, unknown, z.core.$ZodTypeInternals<any, unknown>>]>>;
386
- }, z.core.$strip>;
387
- declare const fileSearchOutputSchema: z.ZodObject<{
388
- queries: z.ZodArray<z.ZodString>;
389
- results: z.ZodNullable<z.ZodArray<z.ZodObject<{
390
- attributes: z.ZodRecord<z.ZodString, z.ZodUnknown>;
391
- fileId: z.ZodString;
392
- filename: z.ZodString;
393
- score: z.ZodNumber;
394
- text: z.ZodString;
395
- }, z.core.$strip>>>;
396
- }, z.core.$strip>;
397
- declare const fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{}, {
398
- /**
399
- * The search query to execute.
400
- */
401
- queries: string[];
402
- /**
403
- * The results of the file search tool call.
404
- */
405
- results: null | {
406
- /**
407
- * Set of 16 key-value pairs that can be attached to an object.
408
- * This can be useful for storing additional information about the object
409
- * in a structured format, and querying for objects via API or the dashboard.
410
- * Keys are strings with a maximum length of 64 characters.
411
- * Values are strings with a maximum length of 512 characters, booleans, or numbers.
412
- */
413
- attributes: Record<string, unknown>;
414
- /**
415
- * The unique ID of the file.
416
- */
417
- fileId: string;
418
- /**
419
- * The name of the file.
420
- */
421
- filename: string;
422
- /**
423
- * The relevance score of the file - a value between 0 and 1.
424
- */
425
- score: number;
426
- /**
427
- * The text that was retrieved from the file.
428
- */
429
- text: string;
430
- }[];
431
- }, {
432
- /**
433
- * List of vector store IDs to search through.
434
- */
435
- vectorStoreIds: string[];
436
- /**
437
- * Maximum number of search results to return. Defaults to 10.
438
- */
439
- maxNumResults?: number;
440
- /**
441
- * Ranking options for the search.
442
- */
443
- ranking?: {
444
- /**
445
- * The ranker to use for the file search.
446
- */
447
- ranker?: string;
448
- /**
449
- * The score threshold for the file search, a number between 0 and 1.
450
- * Numbers closer to 1 will attempt to return only the most relevant results,
451
- * but may return fewer results.
452
- */
453
- scoreThreshold?: number;
454
- };
455
- /**
456
- * A filter to apply.
457
- */
458
- filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
459
- }>;
460
-
461
- export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, codeInterpreter, codeInterpreterArgsSchema, codeInterpreterInputSchema, codeInterpreterOutputSchema, codeInterpreterToolFactory, fileSearch, fileSearchArgsSchema, fileSearchOutputSchema, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions };
250
+ export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, type OpenAIProviderOptions, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions, openaiProviderOptions };
@@ -27,20 +27,12 @@ __export(internal_exports, {
27
27
  OpenAIResponsesLanguageModel: () => OpenAIResponsesLanguageModel,
28
28
  OpenAISpeechModel: () => OpenAISpeechModel,
29
29
  OpenAITranscriptionModel: () => OpenAITranscriptionModel,
30
- codeInterpreter: () => codeInterpreter,
31
- codeInterpreterArgsSchema: () => codeInterpreterArgsSchema,
32
- codeInterpreterInputSchema: () => codeInterpreterInputSchema,
33
- codeInterpreterOutputSchema: () => codeInterpreterOutputSchema,
34
- codeInterpreterToolFactory: () => codeInterpreterToolFactory,
35
- fileSearch: () => fileSearch,
36
- fileSearchArgsSchema: () => fileSearchArgsSchema,
37
- fileSearchOutputSchema: () => fileSearchOutputSchema,
38
30
  hasDefaultResponseFormat: () => hasDefaultResponseFormat,
39
31
  modelMaxImagesPerCall: () => modelMaxImagesPerCall,
40
32
  openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
41
- openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions,
42
33
  openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
43
- openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
34
+ openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions,
35
+ openaiProviderOptions: () => openaiProviderOptions
44
36
  });
45
37
  module.exports = __toCommonJS(internal_exports);
46
38
 
@@ -278,7 +270,7 @@ function mapOpenAIFinishReason(finishReason) {
278
270
 
279
271
  // src/chat/openai-chat-options.ts
280
272
  var import_v42 = require("zod/v4");
281
- var openaiChatLanguageModelOptions = import_v42.z.object({
273
+ var openaiProviderOptions = import_v42.z.object({
282
274
  /**
283
275
  * Modify the likelihood of specified tokens appearing in the completion.
284
276
  *
@@ -460,7 +452,7 @@ var OpenAIChatLanguageModel = class {
460
452
  const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
461
453
  provider: "openai",
462
454
  providerOptions,
463
- schema: openaiChatLanguageModelOptions
455
+ schema: openaiProviderOptions
464
456
  })) != null ? _a : {};
465
457
  const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
466
458
  if (topK != null) {
@@ -2145,6 +2137,7 @@ async function convertToOpenAIResponsesInput({
2145
2137
  });
2146
2138
  break;
2147
2139
  }
2140
+ // assistant tool result parts are from provider-executed tools:
2148
2141
  case "tool-result": {
2149
2142
  if (store) {
2150
2143
  input.push({ type: "item_reference", id: part.toolCallId });
@@ -2164,40 +2157,26 @@ async function convertToOpenAIResponsesInput({
2164
2157
  });
2165
2158
  const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2166
2159
  if (reasoningId != null) {
2167
- const reasoningMessage = reasoningMessages[reasoningId];
2168
- if (store) {
2169
- if (reasoningMessage === void 0) {
2170
- input.push({ type: "item_reference", id: reasoningId });
2171
- reasoningMessages[reasoningId] = {
2172
- type: "reasoning",
2173
- id: reasoningId,
2174
- summary: []
2175
- };
2176
- }
2160
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2161
+ const summaryParts = [];
2162
+ if (part.text.length > 0) {
2163
+ summaryParts.push({ type: "summary_text", text: part.text });
2164
+ } else if (existingReasoningMessage !== void 0) {
2165
+ warnings.push({
2166
+ type: "other",
2167
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2168
+ });
2169
+ }
2170
+ if (existingReasoningMessage === void 0) {
2171
+ reasoningMessages[reasoningId] = {
2172
+ type: "reasoning",
2173
+ id: reasoningId,
2174
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2175
+ summary: summaryParts
2176
+ };
2177
+ input.push(reasoningMessages[reasoningId]);
2177
2178
  } else {
2178
- const summaryParts = [];
2179
- if (part.text.length > 0) {
2180
- summaryParts.push({
2181
- type: "summary_text",
2182
- text: part.text
2183
- });
2184
- } else if (reasoningMessage !== void 0) {
2185
- warnings.push({
2186
- type: "other",
2187
- message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2188
- });
2189
- }
2190
- if (reasoningMessage === void 0) {
2191
- reasoningMessages[reasoningId] = {
2192
- type: "reasoning",
2193
- id: reasoningId,
2194
- encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2195
- summary: summaryParts
2196
- };
2197
- input.push(reasoningMessages[reasoningId]);
2198
- } else {
2199
- reasoningMessage.summary.push(...summaryParts);
2200
- }
2179
+ existingReasoningMessage.summary.push(...summaryParts);
2201
2180
  }
2202
2181
  } else {
2203
2182
  warnings.push({
@@ -2297,9 +2276,6 @@ var codeInterpreterToolFactory = (0, import_provider_utils10.createProviderDefin
2297
2276
  inputSchema: codeInterpreterInputSchema,
2298
2277
  outputSchema: codeInterpreterOutputSchema
2299
2278
  });
2300
- var codeInterpreter = (args = {}) => {
2301
- return codeInterpreterToolFactory(args);
2302
- };
2303
2279
 
2304
2280
  // src/tool/file-search.ts
2305
2281
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
@@ -2950,7 +2926,7 @@ var OpenAIResponsesLanguageModel = class {
2950
2926
  ])
2951
2927
  ),
2952
2928
  service_tier: import_v418.z.string().nullish(),
2953
- incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).nullish(),
2929
+ incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).nullable(),
2954
2930
  usage: usageSchema2
2955
2931
  })
2956
2932
  ),
@@ -3837,19 +3813,11 @@ var openaiResponsesProviderOptionsSchema = import_v418.z.object({
3837
3813
  OpenAIResponsesLanguageModel,
3838
3814
  OpenAISpeechModel,
3839
3815
  OpenAITranscriptionModel,
3840
- codeInterpreter,
3841
- codeInterpreterArgsSchema,
3842
- codeInterpreterInputSchema,
3843
- codeInterpreterOutputSchema,
3844
- codeInterpreterToolFactory,
3845
- fileSearch,
3846
- fileSearchArgsSchema,
3847
- fileSearchOutputSchema,
3848
3816
  hasDefaultResponseFormat,
3849
3817
  modelMaxImagesPerCall,
3850
3818
  openAITranscriptionProviderOptions,
3851
- openaiChatLanguageModelOptions,
3852
3819
  openaiCompletionProviderOptions,
3853
- openaiEmbeddingProviderOptions
3820
+ openaiEmbeddingProviderOptions,
3821
+ openaiProviderOptions
3854
3822
  });
3855
3823
  //# sourceMappingURL=index.js.map