@ai-sdk/openai 2.1.0-beta.12 → 2.1.0-beta.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +13 -6
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +13 -6
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -2
- package/dist/internal/index.d.ts +2 -2
- package/dist/internal/index.js +12 -5
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +12 -5
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/internal/index.mjs
CHANGED
|
@@ -1597,9 +1597,13 @@ import { z as z8 } from "zod/v4";
|
|
|
1597
1597
|
var modelMaxImagesPerCall = {
|
|
1598
1598
|
"dall-e-3": 1,
|
|
1599
1599
|
"dall-e-2": 10,
|
|
1600
|
-
"gpt-image-1": 10
|
|
1600
|
+
"gpt-image-1": 10,
|
|
1601
|
+
"gpt-image-1-mini": 10
|
|
1601
1602
|
};
|
|
1602
|
-
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1603
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1604
|
+
"gpt-image-1",
|
|
1605
|
+
"gpt-image-1-mini"
|
|
1606
|
+
]);
|
|
1603
1607
|
|
|
1604
1608
|
// src/image/openai-image-model.ts
|
|
1605
1609
|
var OpenAIImageModel = class {
|
|
@@ -3367,7 +3371,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3367
3371
|
controller.enqueue({
|
|
3368
3372
|
type: "tool-input-start",
|
|
3369
3373
|
id: value.item.id,
|
|
3370
|
-
toolName: webSearchToolName != null ? webSearchToolName : "web_search"
|
|
3374
|
+
toolName: webSearchToolName != null ? webSearchToolName : "web_search",
|
|
3375
|
+
providerExecuted: true
|
|
3371
3376
|
});
|
|
3372
3377
|
} else if (value.item.type === "computer_call") {
|
|
3373
3378
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3377,7 +3382,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3377
3382
|
controller.enqueue({
|
|
3378
3383
|
type: "tool-input-start",
|
|
3379
3384
|
id: value.item.id,
|
|
3380
|
-
toolName: "computer_use"
|
|
3385
|
+
toolName: "computer_use",
|
|
3386
|
+
providerExecuted: true
|
|
3381
3387
|
});
|
|
3382
3388
|
} else if (value.item.type === "code_interpreter_call") {
|
|
3383
3389
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3390,7 +3396,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3390
3396
|
controller.enqueue({
|
|
3391
3397
|
type: "tool-input-start",
|
|
3392
3398
|
id: value.item.id,
|
|
3393
|
-
toolName: "code_interpreter"
|
|
3399
|
+
toolName: "code_interpreter",
|
|
3400
|
+
providerExecuted: true
|
|
3394
3401
|
});
|
|
3395
3402
|
controller.enqueue({
|
|
3396
3403
|
type: "tool-input-delta",
|