@ai-sdk/openai 2.0.0-alpha.10 → 2.0.0-alpha.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +19 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +61 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +61 -2
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +12 -0
- package/dist/internal/index.d.ts +12 -0
- package/dist/internal/index.js +61 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +61 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,24 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-alpha.12
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 2f542fa: Add reasoning-part-finish parts for reasoning models in the responses API
|
|
8
|
+
- e2aceaf: feat: add raw chunk support
|
|
9
|
+
- Updated dependencies [e2aceaf]
|
|
10
|
+
- @ai-sdk/provider@2.0.0-alpha.12
|
|
11
|
+
- @ai-sdk/provider-utils@3.0.0-alpha.12
|
|
12
|
+
|
|
13
|
+
## 2.0.0-alpha.11
|
|
14
|
+
|
|
15
|
+
### Patch Changes
|
|
16
|
+
|
|
17
|
+
- 8d12da5: feat(provider/openai): add serviceTier option for flex processing
|
|
18
|
+
- Updated dependencies [c1e6647]
|
|
19
|
+
- @ai-sdk/provider@2.0.0-alpha.11
|
|
20
|
+
- @ai-sdk/provider-utils@3.0.0-alpha.11
|
|
21
|
+
|
|
3
22
|
## 2.0.0-alpha.10
|
|
4
23
|
|
|
5
24
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -164,12 +164,14 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
164
164
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
165
165
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
166
166
|
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
167
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<["auto", "flex"]>>>;
|
|
167
168
|
}, "strip", z.ZodTypeAny, {
|
|
168
169
|
user?: string | null | undefined;
|
|
169
170
|
parallelToolCalls?: boolean | null | undefined;
|
|
170
171
|
reasoningEffort?: string | null | undefined;
|
|
171
172
|
store?: boolean | null | undefined;
|
|
172
173
|
metadata?: any;
|
|
174
|
+
serviceTier?: "auto" | "flex" | null | undefined;
|
|
173
175
|
previousResponseId?: string | null | undefined;
|
|
174
176
|
strictSchemas?: boolean | null | undefined;
|
|
175
177
|
instructions?: string | null | undefined;
|
|
@@ -180,6 +182,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
180
182
|
reasoningEffort?: string | null | undefined;
|
|
181
183
|
store?: boolean | null | undefined;
|
|
182
184
|
metadata?: any;
|
|
185
|
+
serviceTier?: "auto" | "flex" | null | undefined;
|
|
183
186
|
previousResponseId?: string | null | undefined;
|
|
184
187
|
strictSchemas?: boolean | null | undefined;
|
|
185
188
|
instructions?: string | null | undefined;
|
package/dist/index.d.ts
CHANGED
|
@@ -164,12 +164,14 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
164
164
|
strictSchemas: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
165
165
|
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
166
166
|
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
167
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<["auto", "flex"]>>>;
|
|
167
168
|
}, "strip", z.ZodTypeAny, {
|
|
168
169
|
user?: string | null | undefined;
|
|
169
170
|
parallelToolCalls?: boolean | null | undefined;
|
|
170
171
|
reasoningEffort?: string | null | undefined;
|
|
171
172
|
store?: boolean | null | undefined;
|
|
172
173
|
metadata?: any;
|
|
174
|
+
serviceTier?: "auto" | "flex" | null | undefined;
|
|
173
175
|
previousResponseId?: string | null | undefined;
|
|
174
176
|
strictSchemas?: boolean | null | undefined;
|
|
175
177
|
instructions?: string | null | undefined;
|
|
@@ -180,6 +182,7 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
180
182
|
reasoningEffort?: string | null | undefined;
|
|
181
183
|
store?: boolean | null | undefined;
|
|
182
184
|
metadata?: any;
|
|
185
|
+
serviceTier?: "auto" | "flex" | null | undefined;
|
|
183
186
|
previousResponseId?: string | null | undefined;
|
|
184
187
|
strictSchemas?: boolean | null | undefined;
|
|
185
188
|
instructions?: string | null | undefined;
|
package/dist/index.js
CHANGED
|
@@ -282,7 +282,14 @@ var openaiProviderOptions = import_zod.z.object({
|
|
|
282
282
|
*
|
|
283
283
|
* @default true
|
|
284
284
|
*/
|
|
285
|
-
structuredOutputs: import_zod.z.boolean().optional()
|
|
285
|
+
structuredOutputs: import_zod.z.boolean().optional(),
|
|
286
|
+
/**
|
|
287
|
+
* Service tier for the request. Set to 'flex' for 50% cheaper processing
|
|
288
|
+
* at the cost of increased latency. Only available for o3 and o4-mini models.
|
|
289
|
+
*
|
|
290
|
+
* @default 'auto'
|
|
291
|
+
*/
|
|
292
|
+
serviceTier: import_zod.z.enum(["auto", "flex"]).optional()
|
|
286
293
|
});
|
|
287
294
|
|
|
288
295
|
// src/openai-error.ts
|
|
@@ -453,6 +460,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
453
460
|
metadata: openaiOptions.metadata,
|
|
454
461
|
prediction: openaiOptions.prediction,
|
|
455
462
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
463
|
+
service_tier: openaiOptions.serviceTier,
|
|
456
464
|
// messages:
|
|
457
465
|
messages
|
|
458
466
|
};
|
|
@@ -526,6 +534,14 @@ var OpenAIChatLanguageModel = class {
|
|
|
526
534
|
});
|
|
527
535
|
}
|
|
528
536
|
}
|
|
537
|
+
if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
|
|
538
|
+
warnings.push({
|
|
539
|
+
type: "unsupported-setting",
|
|
540
|
+
setting: "serviceTier",
|
|
541
|
+
details: "flex processing is only available for o3 and o4-mini models"
|
|
542
|
+
});
|
|
543
|
+
baseArgs.service_tier = void 0;
|
|
544
|
+
}
|
|
529
545
|
const {
|
|
530
546
|
tools: openaiTools2,
|
|
531
547
|
toolChoice: openaiToolChoice,
|
|
@@ -652,6 +668,9 @@ var OpenAIChatLanguageModel = class {
|
|
|
652
668
|
},
|
|
653
669
|
transform(chunk, controller) {
|
|
654
670
|
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
671
|
+
if (options.includeRawChunks) {
|
|
672
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
673
|
+
}
|
|
655
674
|
if (!chunk.success) {
|
|
656
675
|
finishReason = "error";
|
|
657
676
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -897,6 +916,9 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
897
916
|
function isReasoningModel(modelId) {
|
|
898
917
|
return modelId.startsWith("o");
|
|
899
918
|
}
|
|
919
|
+
function supportsFlexProcessing(modelId) {
|
|
920
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
921
|
+
}
|
|
900
922
|
function getSystemMessageMode(modelId) {
|
|
901
923
|
var _a, _b;
|
|
902
924
|
if (!isReasoningModel(modelId)) {
|
|
@@ -1228,6 +1250,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1228
1250
|
controller.enqueue({ type: "stream-start", warnings });
|
|
1229
1251
|
},
|
|
1230
1252
|
transform(chunk, controller) {
|
|
1253
|
+
if (options.includeRawChunks) {
|
|
1254
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1255
|
+
}
|
|
1231
1256
|
if (!chunk.success) {
|
|
1232
1257
|
finishReason = "error";
|
|
1233
1258
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -2012,6 +2037,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2012
2037
|
store: openaiOptions == null ? void 0 : openaiOptions.store,
|
|
2013
2038
|
user: openaiOptions == null ? void 0 : openaiOptions.user,
|
|
2014
2039
|
instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
|
|
2040
|
+
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
2015
2041
|
// model-specific settings:
|
|
2016
2042
|
...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
|
|
2017
2043
|
reasoning: {
|
|
@@ -2045,6 +2071,14 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2045
2071
|
});
|
|
2046
2072
|
}
|
|
2047
2073
|
}
|
|
2074
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
|
|
2075
|
+
warnings.push({
|
|
2076
|
+
type: "unsupported-setting",
|
|
2077
|
+
setting: "serviceTier",
|
|
2078
|
+
details: "flex processing is only available for o3 and o4-mini models"
|
|
2079
|
+
});
|
|
2080
|
+
delete baseArgs.service_tier;
|
|
2081
|
+
}
|
|
2048
2082
|
const {
|
|
2049
2083
|
tools: openaiTools2,
|
|
2050
2084
|
toolChoice: openaiToolChoice,
|
|
@@ -2232,6 +2266,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2232
2266
|
let responseId = null;
|
|
2233
2267
|
const ongoingToolCalls = {};
|
|
2234
2268
|
let hasToolCalls = false;
|
|
2269
|
+
let lastReasoningSummaryIndex = null;
|
|
2235
2270
|
return {
|
|
2236
2271
|
stream: response.pipeThrough(
|
|
2237
2272
|
new TransformStream({
|
|
@@ -2240,6 +2275,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2240
2275
|
},
|
|
2241
2276
|
transform(chunk, controller) {
|
|
2242
2277
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2278
|
+
if (options.includeRawChunks) {
|
|
2279
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2280
|
+
}
|
|
2243
2281
|
if (!chunk.success) {
|
|
2244
2282
|
finishReason = "error";
|
|
2245
2283
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -2285,10 +2323,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2285
2323
|
text: value.delta
|
|
2286
2324
|
});
|
|
2287
2325
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2326
|
+
if (lastReasoningSummaryIndex !== null && value.summary_index !== lastReasoningSummaryIndex) {
|
|
2327
|
+
controller.enqueue({ type: "reasoning-part-finish" });
|
|
2328
|
+
}
|
|
2329
|
+
lastReasoningSummaryIndex = value.summary_index;
|
|
2288
2330
|
controller.enqueue({
|
|
2289
2331
|
type: "reasoning",
|
|
2290
2332
|
text: value.delta
|
|
2291
2333
|
});
|
|
2334
|
+
} else if (isResponseReasoningSummaryPartDoneChunk(value)) {
|
|
2335
|
+
controller.enqueue({ type: "reasoning-part-finish" });
|
|
2292
2336
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2293
2337
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2294
2338
|
hasToolCalls = true;
|
|
@@ -2417,6 +2461,13 @@ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
|
|
|
2417
2461
|
summary_index: import_zod12.z.number(),
|
|
2418
2462
|
delta: import_zod12.z.string()
|
|
2419
2463
|
});
|
|
2464
|
+
var responseReasoningSummaryPartDoneSchema = import_zod12.z.object({
|
|
2465
|
+
type: import_zod12.z.literal("response.reasoning_summary_part.done"),
|
|
2466
|
+
item_id: import_zod12.z.string(),
|
|
2467
|
+
output_index: import_zod12.z.number(),
|
|
2468
|
+
summary_index: import_zod12.z.number(),
|
|
2469
|
+
part: import_zod12.z.unknown().nullish()
|
|
2470
|
+
});
|
|
2420
2471
|
var openaiResponsesChunkSchema = import_zod12.z.union([
|
|
2421
2472
|
textDeltaChunkSchema,
|
|
2422
2473
|
responseFinishedChunkSchema,
|
|
@@ -2426,6 +2477,7 @@ var openaiResponsesChunkSchema = import_zod12.z.union([
|
|
|
2426
2477
|
responseOutputItemAddedSchema,
|
|
2427
2478
|
responseAnnotationAddedSchema,
|
|
2428
2479
|
responseReasoningSummaryTextDeltaSchema,
|
|
2480
|
+
responseReasoningSummaryPartDoneSchema,
|
|
2429
2481
|
import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
|
|
2430
2482
|
// fallback for unknown chunks
|
|
2431
2483
|
]);
|
|
@@ -2453,6 +2505,9 @@ function isResponseAnnotationAddedChunk(chunk) {
|
|
|
2453
2505
|
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2454
2506
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2455
2507
|
}
|
|
2508
|
+
function isResponseReasoningSummaryPartDoneChunk(chunk) {
|
|
2509
|
+
return chunk.type === "response.reasoning_summary_part.done";
|
|
2510
|
+
}
|
|
2456
2511
|
function getResponsesModelConfig(modelId) {
|
|
2457
2512
|
if (modelId.startsWith("o")) {
|
|
2458
2513
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
@@ -2474,6 +2529,9 @@ function getResponsesModelConfig(modelId) {
|
|
|
2474
2529
|
requiredAutoTruncation: false
|
|
2475
2530
|
};
|
|
2476
2531
|
}
|
|
2532
|
+
function supportsFlexProcessing2(modelId) {
|
|
2533
|
+
return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
|
|
2534
|
+
}
|
|
2477
2535
|
var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
|
|
2478
2536
|
metadata: import_zod12.z.any().nullish(),
|
|
2479
2537
|
parallelToolCalls: import_zod12.z.boolean().nullish(),
|
|
@@ -2483,7 +2541,8 @@ var openaiResponsesProviderOptionsSchema = import_zod12.z.object({
|
|
|
2483
2541
|
reasoningEffort: import_zod12.z.string().nullish(),
|
|
2484
2542
|
strictSchemas: import_zod12.z.boolean().nullish(),
|
|
2485
2543
|
instructions: import_zod12.z.string().nullish(),
|
|
2486
|
-
reasoningSummary: import_zod12.z.string().nullish()
|
|
2544
|
+
reasoningSummary: import_zod12.z.string().nullish(),
|
|
2545
|
+
serviceTier: import_zod12.z.enum(["auto", "flex"]).nullish()
|
|
2487
2546
|
});
|
|
2488
2547
|
|
|
2489
2548
|
// src/openai-speech-model.ts
|