@ai-sdk/openai 2.0.0-alpha.11 → 2.0.0-alpha.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/dist/index.js +27 -0
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +27 -0
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +27 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +27 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,15 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-alpha.12
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 2f542fa: Add reasoning-part-finish parts for reasoning models in the responses API
|
|
8
|
+
- e2aceaf: feat: add raw chunk support
|
|
9
|
+
- Updated dependencies [e2aceaf]
|
|
10
|
+
- @ai-sdk/provider@2.0.0-alpha.12
|
|
11
|
+
- @ai-sdk/provider-utils@3.0.0-alpha.12
|
|
12
|
+
|
|
3
13
|
## 2.0.0-alpha.11
|
|
4
14
|
|
|
5
15
|
### Patch Changes
|
package/dist/index.js
CHANGED
|
@@ -668,6 +668,9 @@ var OpenAIChatLanguageModel = class {
|
|
|
668
668
|
},
|
|
669
669
|
transform(chunk, controller) {
|
|
670
670
|
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
|
|
671
|
+
if (options.includeRawChunks) {
|
|
672
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
673
|
+
}
|
|
671
674
|
if (!chunk.success) {
|
|
672
675
|
finishReason = "error";
|
|
673
676
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -1247,6 +1250,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1247
1250
|
controller.enqueue({ type: "stream-start", warnings });
|
|
1248
1251
|
},
|
|
1249
1252
|
transform(chunk, controller) {
|
|
1253
|
+
if (options.includeRawChunks) {
|
|
1254
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
1255
|
+
}
|
|
1250
1256
|
if (!chunk.success) {
|
|
1251
1257
|
finishReason = "error";
|
|
1252
1258
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -2260,6 +2266,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2260
2266
|
let responseId = null;
|
|
2261
2267
|
const ongoingToolCalls = {};
|
|
2262
2268
|
let hasToolCalls = false;
|
|
2269
|
+
let lastReasoningSummaryIndex = null;
|
|
2263
2270
|
return {
|
|
2264
2271
|
stream: response.pipeThrough(
|
|
2265
2272
|
new TransformStream({
|
|
@@ -2268,6 +2275,9 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2268
2275
|
},
|
|
2269
2276
|
transform(chunk, controller) {
|
|
2270
2277
|
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
2278
|
+
if (options.includeRawChunks) {
|
|
2279
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
2280
|
+
}
|
|
2271
2281
|
if (!chunk.success) {
|
|
2272
2282
|
finishReason = "error";
|
|
2273
2283
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -2313,10 +2323,16 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2313
2323
|
text: value.delta
|
|
2314
2324
|
});
|
|
2315
2325
|
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
2326
|
+
if (lastReasoningSummaryIndex !== null && value.summary_index !== lastReasoningSummaryIndex) {
|
|
2327
|
+
controller.enqueue({ type: "reasoning-part-finish" });
|
|
2328
|
+
}
|
|
2329
|
+
lastReasoningSummaryIndex = value.summary_index;
|
|
2316
2330
|
controller.enqueue({
|
|
2317
2331
|
type: "reasoning",
|
|
2318
2332
|
text: value.delta
|
|
2319
2333
|
});
|
|
2334
|
+
} else if (isResponseReasoningSummaryPartDoneChunk(value)) {
|
|
2335
|
+
controller.enqueue({ type: "reasoning-part-finish" });
|
|
2320
2336
|
} else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
|
|
2321
2337
|
ongoingToolCalls[value.output_index] = void 0;
|
|
2322
2338
|
hasToolCalls = true;
|
|
@@ -2445,6 +2461,13 @@ var responseReasoningSummaryTextDeltaSchema = import_zod12.z.object({
|
|
|
2445
2461
|
summary_index: import_zod12.z.number(),
|
|
2446
2462
|
delta: import_zod12.z.string()
|
|
2447
2463
|
});
|
|
2464
|
+
var responseReasoningSummaryPartDoneSchema = import_zod12.z.object({
|
|
2465
|
+
type: import_zod12.z.literal("response.reasoning_summary_part.done"),
|
|
2466
|
+
item_id: import_zod12.z.string(),
|
|
2467
|
+
output_index: import_zod12.z.number(),
|
|
2468
|
+
summary_index: import_zod12.z.number(),
|
|
2469
|
+
part: import_zod12.z.unknown().nullish()
|
|
2470
|
+
});
|
|
2448
2471
|
var openaiResponsesChunkSchema = import_zod12.z.union([
|
|
2449
2472
|
textDeltaChunkSchema,
|
|
2450
2473
|
responseFinishedChunkSchema,
|
|
@@ -2454,6 +2477,7 @@ var openaiResponsesChunkSchema = import_zod12.z.union([
|
|
|
2454
2477
|
responseOutputItemAddedSchema,
|
|
2455
2478
|
responseAnnotationAddedSchema,
|
|
2456
2479
|
responseReasoningSummaryTextDeltaSchema,
|
|
2480
|
+
responseReasoningSummaryPartDoneSchema,
|
|
2457
2481
|
import_zod12.z.object({ type: import_zod12.z.string() }).passthrough()
|
|
2458
2482
|
// fallback for unknown chunks
|
|
2459
2483
|
]);
|
|
@@ -2481,6 +2505,9 @@ function isResponseAnnotationAddedChunk(chunk) {
|
|
|
2481
2505
|
function isResponseReasoningSummaryTextDeltaChunk(chunk) {
|
|
2482
2506
|
return chunk.type === "response.reasoning_summary_text.delta";
|
|
2483
2507
|
}
|
|
2508
|
+
function isResponseReasoningSummaryPartDoneChunk(chunk) {
|
|
2509
|
+
return chunk.type === "response.reasoning_summary_part.done";
|
|
2510
|
+
}
|
|
2484
2511
|
function getResponsesModelConfig(modelId) {
|
|
2485
2512
|
if (modelId.startsWith("o")) {
|
|
2486
2513
|
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|