@ai-sdk/openai 0.0.63 → 0.0.65
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.js +40 -22
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +40 -22
- package/dist/index.mjs.map +1 -1
- package/internal/dist/index.js +40 -22
- package/internal/dist/index.js.map +1 -1
- package/internal/dist/index.mjs +40 -22
- package/internal/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,17 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 0.0.65
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- e8aed44: Add OpenAI cached prompt tokens to experimental_providerMetadata for generateText and streamText
|
|
8
|
+
|
|
9
|
+
## 0.0.64
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- 5aa576d: feat (provider/openai): support store parameter for distillation
|
|
14
|
+
|
|
3
15
|
## 0.0.63
|
|
4
16
|
|
|
5
17
|
### Patch Changes
|
package/dist/index.js
CHANGED
|
@@ -250,7 +250,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
250
250
|
seed,
|
|
251
251
|
providerMetadata
|
|
252
252
|
}) {
|
|
253
|
-
var _a, _b, _c;
|
|
253
|
+
var _a, _b, _c, _d, _e;
|
|
254
254
|
const type = mode.type;
|
|
255
255
|
const warnings = [];
|
|
256
256
|
if (topK != null) {
|
|
@@ -296,6 +296,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
296
296
|
seed,
|
|
297
297
|
// openai specific settings:
|
|
298
298
|
max_completion_tokens: (_b = (_a = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _a.maxCompletionTokens) != null ? _b : void 0,
|
|
299
|
+
store: (_d = (_c = providerMetadata == null ? void 0 : providerMetadata.openai) == null ? void 0 : _c.store) != null ? _d : void 0,
|
|
299
300
|
// response format:
|
|
300
301
|
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? { type: "json_object" } : void 0,
|
|
301
302
|
// messages:
|
|
@@ -333,7 +334,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
333
334
|
json_schema: {
|
|
334
335
|
schema: mode.schema,
|
|
335
336
|
strict: true,
|
|
336
|
-
name: (
|
|
337
|
+
name: (_e = mode.name) != null ? _e : "response",
|
|
337
338
|
description: mode.description
|
|
338
339
|
}
|
|
339
340
|
} : { type: "json_object" }
|
|
@@ -383,7 +384,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
383
384
|
}
|
|
384
385
|
}
|
|
385
386
|
async doGenerate(options) {
|
|
386
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
387
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r;
|
|
387
388
|
const { args, warnings } = this.getArgs(options);
|
|
388
389
|
const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
|
|
389
390
|
url: this.config.url({
|
|
@@ -401,13 +402,18 @@ var OpenAIChatLanguageModel = class {
|
|
|
401
402
|
});
|
|
402
403
|
const { messages: rawPrompt, ...rawSettings } = args;
|
|
403
404
|
const choice = response.choices[0];
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
405
|
+
let providerMetadata;
|
|
406
|
+
if (((_b = (_a = response.usage) == null ? void 0 : _a.completion_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null || ((_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens_details) == null ? void 0 : _d.cached_tokens) != null) {
|
|
407
|
+
providerMetadata = { openai: {} };
|
|
408
|
+
if (((_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null) {
|
|
409
|
+
providerMetadata.openai.reasoningTokens = (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens_details) == null ? void 0 : _h.reasoning_tokens;
|
|
407
410
|
}
|
|
408
|
-
|
|
411
|
+
if (((_j = (_i = response.usage) == null ? void 0 : _i.prompt_tokens_details) == null ? void 0 : _j.cached_tokens) != null) {
|
|
412
|
+
providerMetadata.openai.cachedPromptTokens = (_l = (_k = response.usage) == null ? void 0 : _k.prompt_tokens_details) == null ? void 0 : _l.cached_tokens;
|
|
413
|
+
}
|
|
414
|
+
}
|
|
409
415
|
return {
|
|
410
|
-
text: (
|
|
416
|
+
text: (_m = choice.message.content) != null ? _m : void 0,
|
|
411
417
|
toolCalls: this.settings.useLegacyFunctionCalling && choice.message.function_call ? [
|
|
412
418
|
{
|
|
413
419
|
toolCallType: "function",
|
|
@@ -415,7 +421,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
415
421
|
toolName: choice.message.function_call.name,
|
|
416
422
|
args: choice.message.function_call.arguments
|
|
417
423
|
}
|
|
418
|
-
] : (
|
|
424
|
+
] : (_n = choice.message.tool_calls) == null ? void 0 : _n.map((toolCall) => {
|
|
419
425
|
var _a2;
|
|
420
426
|
return {
|
|
421
427
|
toolCallType: "function",
|
|
@@ -426,8 +432,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
426
432
|
}),
|
|
427
433
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
428
434
|
usage: {
|
|
429
|
-
promptTokens: (
|
|
430
|
-
completionTokens: (
|
|
435
|
+
promptTokens: (_p = (_o = response.usage) == null ? void 0 : _o.prompt_tokens) != null ? _p : NaN,
|
|
436
|
+
completionTokens: (_r = (_q = response.usage) == null ? void 0 : _q.completion_tokens) != null ? _r : NaN
|
|
431
437
|
},
|
|
432
438
|
rawCall: { rawPrompt, rawSettings },
|
|
433
439
|
rawResponse: { headers: responseHeaders },
|
|
@@ -504,11 +510,12 @@ var OpenAIChatLanguageModel = class {
|
|
|
504
510
|
let logprobs;
|
|
505
511
|
let isFirstChunk = true;
|
|
506
512
|
const { useLegacyFunctionCalling } = this.settings;
|
|
513
|
+
let providerMetadata;
|
|
507
514
|
return {
|
|
508
515
|
stream: response.pipeThrough(
|
|
509
516
|
new TransformStream({
|
|
510
517
|
transform(chunk, controller) {
|
|
511
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
518
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
|
|
512
519
|
if (!chunk.success) {
|
|
513
520
|
finishReason = "error";
|
|
514
521
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -532,6 +539,13 @@ var OpenAIChatLanguageModel = class {
|
|
|
532
539
|
promptTokens: (_a = value.usage.prompt_tokens) != null ? _a : void 0,
|
|
533
540
|
completionTokens: (_b = value.usage.completion_tokens) != null ? _b : void 0
|
|
534
541
|
};
|
|
542
|
+
if (((_c = value.usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null) {
|
|
543
|
+
providerMetadata = {
|
|
544
|
+
openai: {
|
|
545
|
+
cachedPromptTokens: (_d = value.usage.prompt_tokens_details) == null ? void 0 : _d.cached_tokens
|
|
546
|
+
}
|
|
547
|
+
};
|
|
548
|
+
}
|
|
535
549
|
}
|
|
536
550
|
const choice = value.choices[0];
|
|
537
551
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -578,7 +592,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
578
592
|
message: `Expected 'id' to be a string.`
|
|
579
593
|
});
|
|
580
594
|
}
|
|
581
|
-
if (((
|
|
595
|
+
if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
|
|
582
596
|
throw new import_provider2.InvalidResponseDataError({
|
|
583
597
|
data: toolCallDelta,
|
|
584
598
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -589,11 +603,11 @@ var OpenAIChatLanguageModel = class {
|
|
|
589
603
|
type: "function",
|
|
590
604
|
function: {
|
|
591
605
|
name: toolCallDelta.function.name,
|
|
592
|
-
arguments: (
|
|
606
|
+
arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
|
|
593
607
|
}
|
|
594
608
|
};
|
|
595
609
|
const toolCall2 = toolCalls[index];
|
|
596
|
-
if (((
|
|
610
|
+
if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null) {
|
|
597
611
|
if (toolCall2.function.arguments.length > 0) {
|
|
598
612
|
controller.enqueue({
|
|
599
613
|
type: "tool-call-delta",
|
|
@@ -607,7 +621,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
607
621
|
controller.enqueue({
|
|
608
622
|
type: "tool-call",
|
|
609
623
|
toolCallType: "function",
|
|
610
|
-
toolCallId: (
|
|
624
|
+
toolCallId: (_i = toolCall2.id) != null ? _i : (0, import_provider_utils3.generateId)(),
|
|
611
625
|
toolName: toolCall2.function.name,
|
|
612
626
|
args: toolCall2.function.arguments
|
|
613
627
|
});
|
|
@@ -616,21 +630,21 @@ var OpenAIChatLanguageModel = class {
|
|
|
616
630
|
continue;
|
|
617
631
|
}
|
|
618
632
|
const toolCall = toolCalls[index];
|
|
619
|
-
if (((
|
|
620
|
-
toolCall.function.arguments += (
|
|
633
|
+
if (((_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null) {
|
|
634
|
+
toolCall.function.arguments += (_l = (_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null ? _l : "";
|
|
621
635
|
}
|
|
622
636
|
controller.enqueue({
|
|
623
637
|
type: "tool-call-delta",
|
|
624
638
|
toolCallType: "function",
|
|
625
639
|
toolCallId: toolCall.id,
|
|
626
640
|
toolName: toolCall.function.name,
|
|
627
|
-
argsTextDelta: (
|
|
641
|
+
argsTextDelta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
|
|
628
642
|
});
|
|
629
|
-
if (((
|
|
643
|
+
if (((_n = toolCall.function) == null ? void 0 : _n.name) != null && ((_o = toolCall.function) == null ? void 0 : _o.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
|
|
630
644
|
controller.enqueue({
|
|
631
645
|
type: "tool-call",
|
|
632
646
|
toolCallType: "function",
|
|
633
|
-
toolCallId: (
|
|
647
|
+
toolCallId: (_p = toolCall.id) != null ? _p : (0, import_provider_utils3.generateId)(),
|
|
634
648
|
toolName: toolCall.function.name,
|
|
635
649
|
args: toolCall.function.arguments
|
|
636
650
|
});
|
|
@@ -647,7 +661,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
647
661
|
usage: {
|
|
648
662
|
promptTokens: (_a = usage.promptTokens) != null ? _a : NaN,
|
|
649
663
|
completionTokens: (_b = usage.completionTokens) != null ? _b : NaN
|
|
650
|
-
}
|
|
664
|
+
},
|
|
665
|
+
...providerMetadata != null ? { providerMetadata } : {}
|
|
651
666
|
});
|
|
652
667
|
}
|
|
653
668
|
})
|
|
@@ -661,6 +676,9 @@ var OpenAIChatLanguageModel = class {
|
|
|
661
676
|
var openAITokenUsageSchema = import_zod2.z.object({
|
|
662
677
|
prompt_tokens: import_zod2.z.number().nullish(),
|
|
663
678
|
completion_tokens: import_zod2.z.number().nullish(),
|
|
679
|
+
prompt_tokens_details: import_zod2.z.object({
|
|
680
|
+
cached_tokens: import_zod2.z.number().nullish()
|
|
681
|
+
}).nullish(),
|
|
664
682
|
completion_tokens_details: import_zod2.z.object({
|
|
665
683
|
reasoning_tokens: import_zod2.z.number().nullish()
|
|
666
684
|
}).nullish()
|