@ai-sdk/openai-compatible 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.js +33 -25
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +33 -25
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,18 @@
|
|
1
1
|
# @ai-sdk/openai-compatible
|
2
2
|
|
3
|
+
## 1.0.4
|
4
|
+
|
5
|
+
### Patch Changes
|
6
|
+
|
7
|
+
- 5f4c71f: feat (provider/openai-compatible): fall back to look for usage in choices
|
8
|
+
- da314cd: chore (provider/openai-compatible): inline usage fallback logic
|
9
|
+
|
10
|
+
## 1.0.3
|
11
|
+
|
12
|
+
### Patch Changes
|
13
|
+
|
14
|
+
- a0934f8: feat (provider/openai-compatible): look for reasoning in 'reasoning' field as well
|
15
|
+
|
3
16
|
## 1.0.2
|
4
17
|
|
5
18
|
### Patch Changes
|
package/dist/index.js
CHANGED
@@ -374,7 +374,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
374
374
|
};
|
375
375
|
}
|
376
376
|
async doGenerate(options) {
|
377
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
|
377
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
|
378
378
|
const { args, warnings } = await this.getArgs({ ...options });
|
379
379
|
const body = JSON.stringify(args);
|
380
380
|
const {
|
@@ -401,7 +401,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
401
401
|
if (text != null && text.length > 0) {
|
402
402
|
content.push({ type: "text", text });
|
403
403
|
}
|
404
|
-
const reasoning = choice.message.reasoning_content;
|
404
|
+
const reasoning = (_a = choice.message.reasoning_content) != null ? _a : choice.message.reasoning;
|
405
405
|
if (reasoning != null && reasoning.length > 0) {
|
406
406
|
content.push({
|
407
407
|
type: "reasoning",
|
@@ -412,7 +412,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
412
412
|
for (const toolCall of choice.message.tool_calls) {
|
413
413
|
content.push({
|
414
414
|
type: "tool-call",
|
415
|
-
toolCallId: (
|
415
|
+
toolCallId: (_b = toolCall.id) != null ? _b : (0, import_provider_utils.generateId)(),
|
416
416
|
toolName: toolCall.function.name,
|
417
417
|
input: toolCall.function.arguments
|
418
418
|
});
|
@@ -420,11 +420,11 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
420
420
|
}
|
421
421
|
const providerMetadata = {
|
422
422
|
[this.providerOptionsName]: {},
|
423
|
-
...await ((
|
423
|
+
...await ((_d = (_c = this.config.metadataExtractor) == null ? void 0 : _c.extractMetadata) == null ? void 0 : _d.call(_c, {
|
424
424
|
parsedBody: rawResponse
|
425
425
|
}))
|
426
426
|
};
|
427
|
-
const completionTokenDetails = (
|
427
|
+
const completionTokenDetails = (_e = responseBody.usage) == null ? void 0 : _e.completion_tokens_details;
|
428
428
|
if ((completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens) != null) {
|
429
429
|
providerMetadata[this.providerOptionsName].acceptedPredictionTokens = completionTokenDetails == null ? void 0 : completionTokenDetails.accepted_prediction_tokens;
|
430
430
|
}
|
@@ -435,11 +435,11 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
435
435
|
content,
|
436
436
|
finishReason: mapOpenAICompatibleFinishReason(choice.finish_reason),
|
437
437
|
usage: {
|
438
|
-
inputTokens: (
|
439
|
-
outputTokens: (
|
440
|
-
totalTokens: (
|
441
|
-
reasoningTokens: (
|
442
|
-
cachedInputTokens: (
|
438
|
+
inputTokens: (_g = (_f = responseBody.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
439
|
+
outputTokens: (_i = (_h = responseBody.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0,
|
440
|
+
totalTokens: (_k = (_j = responseBody.usage) == null ? void 0 : _j.total_tokens) != null ? _k : void 0,
|
441
|
+
reasoningTokens: (_n = (_m = (_l = responseBody.usage) == null ? void 0 : _l.completion_tokens_details) == null ? void 0 : _m.reasoning_tokens) != null ? _n : void 0,
|
442
|
+
cachedInputTokens: (_q = (_p = (_o = responseBody.usage) == null ? void 0 : _o.prompt_tokens_details) == null ? void 0 : _p.cached_tokens) != null ? _q : void 0
|
443
443
|
},
|
444
444
|
providerMetadata,
|
445
445
|
request: { body },
|
@@ -502,7 +502,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
502
502
|
},
|
503
503
|
// TODO we lost type safety on Chunk, most likely due to the error schema. MUST FIX
|
504
504
|
transform(chunk, controller) {
|
505
|
-
var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
505
|
+
var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
|
506
506
|
if (options.includeRawChunks) {
|
507
507
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
508
508
|
}
|
@@ -525,14 +525,15 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
525
525
|
...getResponseMetadata(value)
|
526
526
|
});
|
527
527
|
}
|
528
|
-
|
528
|
+
const effectiveUsage = (_c = value.usage) != null ? _c : (_b = (_a2 = value.choices) == null ? void 0 : _a2[0]) == null ? void 0 : _b.usage;
|
529
|
+
if (effectiveUsage != null) {
|
529
530
|
const {
|
530
531
|
prompt_tokens,
|
531
532
|
completion_tokens,
|
532
533
|
total_tokens,
|
533
534
|
prompt_tokens_details,
|
534
535
|
completion_tokens_details
|
535
|
-
} =
|
536
|
+
} = effectiveUsage;
|
536
537
|
usage.promptTokens = prompt_tokens != null ? prompt_tokens : void 0;
|
537
538
|
usage.completionTokens = completion_tokens != null ? completion_tokens : void 0;
|
538
539
|
usage.totalTokens = total_tokens != null ? total_tokens : void 0;
|
@@ -559,7 +560,8 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
559
560
|
return;
|
560
561
|
}
|
561
562
|
const delta = choice.delta;
|
562
|
-
|
563
|
+
const reasoningContent = (_d = delta.reasoning_content) != null ? _d : delta.reasoning;
|
564
|
+
if (reasoningContent) {
|
563
565
|
if (!isActiveReasoning) {
|
564
566
|
controller.enqueue({
|
565
567
|
type: "reasoning-start",
|
@@ -570,7 +572,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
570
572
|
controller.enqueue({
|
571
573
|
type: "reasoning-delta",
|
572
574
|
id: "reasoning-0",
|
573
|
-
delta:
|
575
|
+
delta: reasoningContent
|
574
576
|
});
|
575
577
|
}
|
576
578
|
if (delta.content) {
|
@@ -594,7 +596,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
594
596
|
message: `Expected 'id' to be a string.`
|
595
597
|
});
|
596
598
|
}
|
597
|
-
if (((
|
599
|
+
if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
|
598
600
|
throw new import_provider3.InvalidResponseDataError({
|
599
601
|
data: toolCallDelta,
|
600
602
|
message: `Expected 'function.name' to be a string.`
|
@@ -610,12 +612,12 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
610
612
|
type: "function",
|
611
613
|
function: {
|
612
614
|
name: toolCallDelta.function.name,
|
613
|
-
arguments: (
|
615
|
+
arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
|
614
616
|
},
|
615
617
|
hasFinished: false
|
616
618
|
};
|
617
619
|
const toolCall2 = toolCalls[index];
|
618
|
-
if (((
|
620
|
+
if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null) {
|
619
621
|
if (toolCall2.function.arguments.length > 0) {
|
620
622
|
controller.enqueue({
|
621
623
|
type: "tool-input-start",
|
@@ -630,7 +632,7 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
630
632
|
});
|
631
633
|
controller.enqueue({
|
632
634
|
type: "tool-call",
|
633
|
-
toolCallId: (
|
635
|
+
toolCallId: (_i = toolCall2.id) != null ? _i : (0, import_provider_utils.generateId)(),
|
634
636
|
toolName: toolCall2.function.name,
|
635
637
|
input: toolCall2.function.arguments
|
636
638
|
});
|
@@ -643,22 +645,22 @@ var OpenAICompatibleChatLanguageModel = class {
|
|
643
645
|
if (toolCall.hasFinished) {
|
644
646
|
continue;
|
645
647
|
}
|
646
|
-
if (((
|
647
|
-
toolCall.function.arguments += (
|
648
|
+
if (((_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null) {
|
649
|
+
toolCall.function.arguments += (_l = (_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null ? _l : "";
|
648
650
|
}
|
649
651
|
controller.enqueue({
|
650
652
|
type: "tool-input-delta",
|
651
653
|
id: toolCall.id,
|
652
|
-
delta: (
|
654
|
+
delta: (_m = toolCallDelta.function.arguments) != null ? _m : ""
|
653
655
|
});
|
654
|
-
if (((
|
656
|
+
if (((_n = toolCall.function) == null ? void 0 : _n.name) != null && ((_o = toolCall.function) == null ? void 0 : _o.arguments) != null && (0, import_provider_utils.isParsableJson)(toolCall.function.arguments)) {
|
655
657
|
controller.enqueue({
|
656
658
|
type: "tool-input-end",
|
657
659
|
id: toolCall.id
|
658
660
|
});
|
659
661
|
controller.enqueue({
|
660
662
|
type: "tool-call",
|
661
|
-
toolCallId: (
|
663
|
+
toolCallId: (_p = toolCall.id) != null ? _p : (0, import_provider_utils.generateId)(),
|
662
664
|
toolName: toolCall.function.name,
|
663
665
|
input: toolCall.function.arguments
|
664
666
|
});
|
@@ -742,6 +744,7 @@ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
|
|
742
744
|
role: import_v43.z.literal("assistant").nullish(),
|
743
745
|
content: import_v43.z.string().nullish(),
|
744
746
|
reasoning_content: import_v43.z.string().nullish(),
|
747
|
+
reasoning: import_v43.z.string().nullish(),
|
745
748
|
tool_calls: import_v43.z.array(
|
746
749
|
import_v43.z.object({
|
747
750
|
id: import_v43.z.string().nullish(),
|
@@ -767,7 +770,10 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
|
|
767
770
|
delta: import_v43.z.object({
|
768
771
|
role: import_v43.z.enum(["assistant"]).nullish(),
|
769
772
|
content: import_v43.z.string().nullish(),
|
773
|
+
// Most openai-compatible models set `reasoning_content`, but some
|
774
|
+
// providers serving `gpt-oss` set `reasoning`. See #7866
|
770
775
|
reasoning_content: import_v43.z.string().nullish(),
|
776
|
+
reasoning: import_v43.z.string().nullish(),
|
771
777
|
tool_calls: import_v43.z.array(
|
772
778
|
import_v43.z.object({
|
773
779
|
index: import_v43.z.number(),
|
@@ -779,7 +785,9 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
|
|
779
785
|
})
|
780
786
|
).nullish()
|
781
787
|
}).nullish(),
|
782
|
-
finish_reason: import_v43.z.string().nullish()
|
788
|
+
finish_reason: import_v43.z.string().nullish(),
|
789
|
+
// Some providers report usage within each choice in streaming chunks
|
790
|
+
usage: openaiCompatibleTokenUsageSchema
|
783
791
|
})
|
784
792
|
),
|
785
793
|
usage: openaiCompatibleTokenUsageSchema
|