@ai-sdk/openai 2.1.0-beta.5 → 2.1.0-beta.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/dist/index.d.mts +6 -6
- package/dist/index.d.ts +6 -6
- package/dist/index.js +37 -23
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +37 -23
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +225 -14
- package/dist/internal/index.d.ts +225 -14
- package/dist/internal/index.js +55 -22
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +47 -22
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +4 -4
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,22 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.1.0-beta.7
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 77f2b20: enables code_interpreter and file_search capabilities in the Azure provider through the Responses API
|
|
8
|
+
- 8dac895: feat: `LanguageModelV3`
|
|
9
|
+
- 10c1322: fix: moved dependency `@ai-sdk/test-server` to devDependencies
|
|
10
|
+
- Updated dependencies [8dac895]
|
|
11
|
+
- @ai-sdk/provider-utils@3.1.0-beta.5
|
|
12
|
+
- @ai-sdk/provider@2.1.0-beta.3
|
|
13
|
+
|
|
14
|
+
## 2.1.0-beta.6
|
|
15
|
+
|
|
16
|
+
### Patch Changes
|
|
17
|
+
|
|
18
|
+
- fe49278: feat(provider/openai): only send item references for reasoning when store: true
|
|
19
|
+
|
|
3
20
|
## 2.1.0-beta.5
|
|
4
21
|
|
|
5
22
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { ProviderV3,
|
|
1
|
+
import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
3
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
4
4
|
import { z } from 'zod/v4';
|
|
@@ -245,23 +245,23 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
245
245
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
246
246
|
|
|
247
247
|
interface OpenAIProvider extends ProviderV3 {
|
|
248
|
-
(modelId: OpenAIResponsesModelId):
|
|
248
|
+
(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
249
249
|
/**
|
|
250
250
|
Creates an OpenAI model for text generation.
|
|
251
251
|
*/
|
|
252
|
-
languageModel(modelId: OpenAIResponsesModelId):
|
|
252
|
+
languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
253
253
|
/**
|
|
254
254
|
Creates an OpenAI chat model for text generation.
|
|
255
255
|
*/
|
|
256
|
-
chat(modelId: OpenAIChatModelId):
|
|
256
|
+
chat(modelId: OpenAIChatModelId): LanguageModelV3;
|
|
257
257
|
/**
|
|
258
258
|
Creates an OpenAI responses API model for text generation.
|
|
259
259
|
*/
|
|
260
|
-
responses(modelId: OpenAIResponsesModelId):
|
|
260
|
+
responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
261
261
|
/**
|
|
262
262
|
Creates an OpenAI completion model for text generation.
|
|
263
263
|
*/
|
|
264
|
-
completion(modelId: OpenAICompletionModelId):
|
|
264
|
+
completion(modelId: OpenAICompletionModelId): LanguageModelV3;
|
|
265
265
|
/**
|
|
266
266
|
Creates a model for text embeddings.
|
|
267
267
|
*/
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { ProviderV3,
|
|
1
|
+
import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
3
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
4
4
|
import { z } from 'zod/v4';
|
|
@@ -245,23 +245,23 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
245
245
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
246
246
|
|
|
247
247
|
interface OpenAIProvider extends ProviderV3 {
|
|
248
|
-
(modelId: OpenAIResponsesModelId):
|
|
248
|
+
(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
249
249
|
/**
|
|
250
250
|
Creates an OpenAI model for text generation.
|
|
251
251
|
*/
|
|
252
|
-
languageModel(modelId: OpenAIResponsesModelId):
|
|
252
|
+
languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
253
253
|
/**
|
|
254
254
|
Creates an OpenAI chat model for text generation.
|
|
255
255
|
*/
|
|
256
|
-
chat(modelId: OpenAIChatModelId):
|
|
256
|
+
chat(modelId: OpenAIChatModelId): LanguageModelV3;
|
|
257
257
|
/**
|
|
258
258
|
Creates an OpenAI responses API model for text generation.
|
|
259
259
|
*/
|
|
260
|
-
responses(modelId: OpenAIResponsesModelId):
|
|
260
|
+
responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
261
261
|
/**
|
|
262
262
|
Creates an OpenAI completion model for text generation.
|
|
263
263
|
*/
|
|
264
|
-
completion(modelId: OpenAICompletionModelId):
|
|
264
|
+
completion(modelId: OpenAICompletionModelId): LanguageModelV3;
|
|
265
265
|
/**
|
|
266
266
|
Creates a model for text embeddings.
|
|
267
267
|
*/
|
package/dist/index.js
CHANGED
|
@@ -415,7 +415,7 @@ function prepareChatTools({
|
|
|
415
415
|
// src/chat/openai-chat-language-model.ts
|
|
416
416
|
var OpenAIChatLanguageModel = class {
|
|
417
417
|
constructor(modelId, config) {
|
|
418
|
-
this.specificationVersion = "
|
|
418
|
+
this.specificationVersion = "v3";
|
|
419
419
|
this.supportedUrls = {
|
|
420
420
|
"image/*": [/^https?:\/\/.*$/]
|
|
421
421
|
};
|
|
@@ -1224,7 +1224,7 @@ var openaiCompletionProviderOptions = import_v44.z.object({
|
|
|
1224
1224
|
// src/completion/openai-completion-language-model.ts
|
|
1225
1225
|
var OpenAICompletionLanguageModel = class {
|
|
1226
1226
|
constructor(modelId, config) {
|
|
1227
|
-
this.specificationVersion = "
|
|
1227
|
+
this.specificationVersion = "v3";
|
|
1228
1228
|
this.supportedUrls = {
|
|
1229
1229
|
// No URLs are supported for completion models.
|
|
1230
1230
|
};
|
|
@@ -2088,26 +2088,40 @@ async function convertToOpenAIResponsesInput({
|
|
|
2088
2088
|
});
|
|
2089
2089
|
const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
|
|
2090
2090
|
if (reasoningId != null) {
|
|
2091
|
-
const
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
if (existingReasoningMessage === void 0) {
|
|
2102
|
-
reasoningMessages[reasoningId] = {
|
|
2103
|
-
type: "reasoning",
|
|
2104
|
-
id: reasoningId,
|
|
2105
|
-
encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
|
|
2106
|
-
summary: summaryParts
|
|
2107
|
-
};
|
|
2108
|
-
input.push(reasoningMessages[reasoningId]);
|
|
2091
|
+
const reasoningMessage = reasoningMessages[reasoningId];
|
|
2092
|
+
if (store) {
|
|
2093
|
+
if (reasoningMessage === void 0) {
|
|
2094
|
+
input.push({ type: "item_reference", id: reasoningId });
|
|
2095
|
+
reasoningMessages[reasoningId] = {
|
|
2096
|
+
type: "reasoning",
|
|
2097
|
+
id: reasoningId,
|
|
2098
|
+
summary: []
|
|
2099
|
+
};
|
|
2100
|
+
}
|
|
2109
2101
|
} else {
|
|
2110
|
-
|
|
2102
|
+
const summaryParts = [];
|
|
2103
|
+
if (part.text.length > 0) {
|
|
2104
|
+
summaryParts.push({
|
|
2105
|
+
type: "summary_text",
|
|
2106
|
+
text: part.text
|
|
2107
|
+
});
|
|
2108
|
+
} else if (reasoningMessage !== void 0) {
|
|
2109
|
+
warnings.push({
|
|
2110
|
+
type: "other",
|
|
2111
|
+
message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
|
|
2112
|
+
});
|
|
2113
|
+
}
|
|
2114
|
+
if (reasoningMessage === void 0) {
|
|
2115
|
+
reasoningMessages[reasoningId] = {
|
|
2116
|
+
type: "reasoning",
|
|
2117
|
+
id: reasoningId,
|
|
2118
|
+
encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
|
|
2119
|
+
summary: summaryParts
|
|
2120
|
+
};
|
|
2121
|
+
input.push(reasoningMessages[reasoningId]);
|
|
2122
|
+
} else {
|
|
2123
|
+
reasoningMessage.summary.push(...summaryParts);
|
|
2124
|
+
}
|
|
2111
2125
|
}
|
|
2112
2126
|
} else {
|
|
2113
2127
|
warnings.push({
|
|
@@ -2360,7 +2374,7 @@ var LOGPROBS_SCHEMA = import_v415.z.array(
|
|
|
2360
2374
|
);
|
|
2361
2375
|
var OpenAIResponsesLanguageModel = class {
|
|
2362
2376
|
constructor(modelId, config) {
|
|
2363
|
-
this.specificationVersion = "
|
|
2377
|
+
this.specificationVersion = "v3";
|
|
2364
2378
|
this.supportedUrls = {
|
|
2365
2379
|
"image/*": [/^https?:\/\/.*$/],
|
|
2366
2380
|
"application/pdf": [/^https?:\/\/.*$/]
|
|
@@ -3870,7 +3884,7 @@ var openaiTranscriptionResponseSchema = import_v418.z.object({
|
|
|
3870
3884
|
});
|
|
3871
3885
|
|
|
3872
3886
|
// src/version.ts
|
|
3873
|
-
var VERSION = true ? "2.1.0-beta.
|
|
3887
|
+
var VERSION = true ? "2.1.0-beta.7" : "0.0.0-test";
|
|
3874
3888
|
|
|
3875
3889
|
// src/openai-provider.ts
|
|
3876
3890
|
function createOpenAI(options = {}) {
|