@ai-sdk/openai 2.0.0-canary.13 → 2.0.0-canary.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +41 -0
- package/dist/index.d.mts +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +116 -25
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +116 -25
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +30 -4
- package/dist/internal/index.d.ts +30 -4
- package/dist/internal/index.js +116 -25
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +116 -25
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -11,6 +11,16 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
11
11
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
12
|
*/
|
|
13
13
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
+
/**
|
|
15
|
+
* Return the log probabilities of the tokens.
|
|
16
|
+
*
|
|
17
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
+
* were generated.
|
|
19
|
+
*
|
|
20
|
+
* Setting to a number will return the log probabilities of the top n
|
|
21
|
+
* tokens that were generated.
|
|
22
|
+
*/
|
|
23
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
14
24
|
/**
|
|
15
25
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
16
26
|
*/
|
|
@@ -49,6 +59,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
59
|
}, "strip", z.ZodTypeAny, {
|
|
50
60
|
user?: string | undefined;
|
|
51
61
|
logitBias?: Record<number, number> | undefined;
|
|
62
|
+
logprobs?: number | boolean | undefined;
|
|
52
63
|
parallelToolCalls?: boolean | undefined;
|
|
53
64
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
54
65
|
maxCompletionTokens?: number | undefined;
|
|
@@ -59,6 +70,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
59
70
|
}, {
|
|
60
71
|
user?: string | undefined;
|
|
61
72
|
logitBias?: Record<number, number> | undefined;
|
|
73
|
+
logprobs?: number | boolean | undefined;
|
|
62
74
|
parallelToolCalls?: boolean | undefined;
|
|
63
75
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
64
76
|
maxCompletionTokens?: number | undefined;
|
|
@@ -82,10 +94,12 @@ type OpenAIChatConfig = {
|
|
|
82
94
|
declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
83
95
|
readonly specificationVersion = "v2";
|
|
84
96
|
readonly modelId: OpenAIChatModelId;
|
|
97
|
+
readonly supportedUrls: {
|
|
98
|
+
'image/*': RegExp[];
|
|
99
|
+
};
|
|
85
100
|
private readonly config;
|
|
86
101
|
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
|
|
87
102
|
get provider(): string;
|
|
88
|
-
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
89
103
|
private getArgs;
|
|
90
104
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
91
105
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -121,14 +135,26 @@ declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
|
121
135
|
monitor and detect abuse. Learn more.
|
|
122
136
|
*/
|
|
123
137
|
user: z.ZodOptional<z.ZodString>;
|
|
138
|
+
/**
|
|
139
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
140
|
+
the response size and can slow down response times. However, it can
|
|
141
|
+
be useful to better understand how the model is behaving.
|
|
142
|
+
Setting to true will return the log probabilities of the tokens that
|
|
143
|
+
were generated.
|
|
144
|
+
Setting to a number will return the log probabilities of the top n
|
|
145
|
+
tokens that were generated.
|
|
146
|
+
*/
|
|
147
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
124
148
|
}, "strip", z.ZodTypeAny, {
|
|
125
149
|
user?: string | undefined;
|
|
126
150
|
logitBias?: Record<string, number> | undefined;
|
|
151
|
+
logprobs?: number | boolean | undefined;
|
|
127
152
|
echo?: boolean | undefined;
|
|
128
153
|
suffix?: string | undefined;
|
|
129
154
|
}, {
|
|
130
155
|
user?: string | undefined;
|
|
131
156
|
logitBias?: Record<string, number> | undefined;
|
|
157
|
+
logprobs?: number | boolean | undefined;
|
|
132
158
|
echo?: boolean | undefined;
|
|
133
159
|
suffix?: string | undefined;
|
|
134
160
|
}>;
|
|
@@ -151,7 +177,7 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
151
177
|
private get providerOptionsName();
|
|
152
178
|
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
153
179
|
get provider(): string;
|
|
154
|
-
|
|
180
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
155
181
|
private getArgs;
|
|
156
182
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
157
183
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -220,7 +246,7 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
220
246
|
readonly modelId: OpenAIImageModelId;
|
|
221
247
|
private readonly settings;
|
|
222
248
|
private readonly config;
|
|
223
|
-
readonly specificationVersion = "
|
|
249
|
+
readonly specificationVersion = "v2";
|
|
224
250
|
get maxImagesPerCall(): number;
|
|
225
251
|
get provider(): string;
|
|
226
252
|
constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
|
|
@@ -321,7 +347,7 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
|
321
347
|
readonly modelId: OpenAIResponsesModelId;
|
|
322
348
|
private readonly config;
|
|
323
349
|
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
|
|
324
|
-
|
|
350
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
325
351
|
get provider(): string;
|
|
326
352
|
private getArgs;
|
|
327
353
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -11,6 +11,16 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
11
11
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
12
12
|
*/
|
|
13
13
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodNumber, z.ZodNumber>>;
|
|
14
|
+
/**
|
|
15
|
+
* Return the log probabilities of the tokens.
|
|
16
|
+
*
|
|
17
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
18
|
+
* were generated.
|
|
19
|
+
*
|
|
20
|
+
* Setting to a number will return the log probabilities of the top n
|
|
21
|
+
* tokens that were generated.
|
|
22
|
+
*/
|
|
23
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
14
24
|
/**
|
|
15
25
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
16
26
|
*/
|
|
@@ -49,6 +59,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
49
59
|
}, "strip", z.ZodTypeAny, {
|
|
50
60
|
user?: string | undefined;
|
|
51
61
|
logitBias?: Record<number, number> | undefined;
|
|
62
|
+
logprobs?: number | boolean | undefined;
|
|
52
63
|
parallelToolCalls?: boolean | undefined;
|
|
53
64
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
54
65
|
maxCompletionTokens?: number | undefined;
|
|
@@ -59,6 +70,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
59
70
|
}, {
|
|
60
71
|
user?: string | undefined;
|
|
61
72
|
logitBias?: Record<number, number> | undefined;
|
|
73
|
+
logprobs?: number | boolean | undefined;
|
|
62
74
|
parallelToolCalls?: boolean | undefined;
|
|
63
75
|
reasoningEffort?: "low" | "medium" | "high" | undefined;
|
|
64
76
|
maxCompletionTokens?: number | undefined;
|
|
@@ -82,10 +94,12 @@ type OpenAIChatConfig = {
|
|
|
82
94
|
declare class OpenAIChatLanguageModel implements LanguageModelV2 {
|
|
83
95
|
readonly specificationVersion = "v2";
|
|
84
96
|
readonly modelId: OpenAIChatModelId;
|
|
97
|
+
readonly supportedUrls: {
|
|
98
|
+
'image/*': RegExp[];
|
|
99
|
+
};
|
|
85
100
|
private readonly config;
|
|
86
101
|
constructor(modelId: OpenAIChatModelId, config: OpenAIChatConfig);
|
|
87
102
|
get provider(): string;
|
|
88
|
-
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
89
103
|
private getArgs;
|
|
90
104
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
91
105
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -121,14 +135,26 @@ declare const openaiCompletionProviderOptions: z.ZodObject<{
|
|
|
121
135
|
monitor and detect abuse. Learn more.
|
|
122
136
|
*/
|
|
123
137
|
user: z.ZodOptional<z.ZodString>;
|
|
138
|
+
/**
|
|
139
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
140
|
+
the response size and can slow down response times. However, it can
|
|
141
|
+
be useful to better understand how the model is behaving.
|
|
142
|
+
Setting to true will return the log probabilities of the tokens that
|
|
143
|
+
were generated.
|
|
144
|
+
Setting to a number will return the log probabilities of the top n
|
|
145
|
+
tokens that were generated.
|
|
146
|
+
*/
|
|
147
|
+
logprobs: z.ZodOptional<z.ZodUnion<[z.ZodBoolean, z.ZodNumber]>>;
|
|
124
148
|
}, "strip", z.ZodTypeAny, {
|
|
125
149
|
user?: string | undefined;
|
|
126
150
|
logitBias?: Record<string, number> | undefined;
|
|
151
|
+
logprobs?: number | boolean | undefined;
|
|
127
152
|
echo?: boolean | undefined;
|
|
128
153
|
suffix?: string | undefined;
|
|
129
154
|
}, {
|
|
130
155
|
user?: string | undefined;
|
|
131
156
|
logitBias?: Record<string, number> | undefined;
|
|
157
|
+
logprobs?: number | boolean | undefined;
|
|
132
158
|
echo?: boolean | undefined;
|
|
133
159
|
suffix?: string | undefined;
|
|
134
160
|
}>;
|
|
@@ -151,7 +177,7 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
151
177
|
private get providerOptionsName();
|
|
152
178
|
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
153
179
|
get provider(): string;
|
|
154
|
-
|
|
180
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
155
181
|
private getArgs;
|
|
156
182
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
157
183
|
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -220,7 +246,7 @@ declare class OpenAIImageModel implements ImageModelV2 {
|
|
|
220
246
|
readonly modelId: OpenAIImageModelId;
|
|
221
247
|
private readonly settings;
|
|
222
248
|
private readonly config;
|
|
223
|
-
readonly specificationVersion = "
|
|
249
|
+
readonly specificationVersion = "v2";
|
|
224
250
|
get maxImagesPerCall(): number;
|
|
225
251
|
get provider(): string;
|
|
226
252
|
constructor(modelId: OpenAIImageModelId, settings: OpenAIImageSettings, config: OpenAIImageModelConfig);
|
|
@@ -321,7 +347,7 @@ declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
|
321
347
|
readonly modelId: OpenAIResponsesModelId;
|
|
322
348
|
private readonly config;
|
|
323
349
|
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
|
|
324
|
-
|
|
350
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
325
351
|
get provider(): string;
|
|
326
352
|
private getArgs;
|
|
327
353
|
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
package/dist/internal/index.js
CHANGED
|
@@ -246,6 +246,16 @@ var openaiProviderOptions = import_zod.z.object({
|
|
|
246
246
|
* the GPT tokenizer) to an associated bias value from -100 to 100.
|
|
247
247
|
*/
|
|
248
248
|
logitBias: import_zod.z.record(import_zod.z.coerce.number(), import_zod.z.number()).optional(),
|
|
249
|
+
/**
|
|
250
|
+
* Return the log probabilities of the tokens.
|
|
251
|
+
*
|
|
252
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
253
|
+
* were generated.
|
|
254
|
+
*
|
|
255
|
+
* Setting to a number will return the log probabilities of the top n
|
|
256
|
+
* tokens that were generated.
|
|
257
|
+
*/
|
|
258
|
+
logprobs: import_zod.z.union([import_zod.z.boolean(), import_zod.z.number()]).optional(),
|
|
249
259
|
/**
|
|
250
260
|
* Whether to enable parallel function calling during tool use. Default to true.
|
|
251
261
|
*/
|
|
@@ -363,17 +373,15 @@ function prepareTools({
|
|
|
363
373
|
var OpenAIChatLanguageModel = class {
|
|
364
374
|
constructor(modelId, config) {
|
|
365
375
|
this.specificationVersion = "v2";
|
|
376
|
+
this.supportedUrls = {
|
|
377
|
+
"image/*": [/^https?:\/\/.*$/]
|
|
378
|
+
};
|
|
366
379
|
this.modelId = modelId;
|
|
367
380
|
this.config = config;
|
|
368
381
|
}
|
|
369
382
|
get provider() {
|
|
370
383
|
return this.config.provider;
|
|
371
384
|
}
|
|
372
|
-
async getSupportedUrls() {
|
|
373
|
-
return {
|
|
374
|
-
"image/*": [/^https?:\/\/.*$/]
|
|
375
|
-
};
|
|
376
|
-
}
|
|
377
385
|
async getArgs({
|
|
378
386
|
prompt,
|
|
379
387
|
maxOutputTokens,
|
|
@@ -422,6 +430,8 @@ var OpenAIChatLanguageModel = class {
|
|
|
422
430
|
model: this.modelId,
|
|
423
431
|
// model specific settings:
|
|
424
432
|
logit_bias: openaiOptions.logitBias,
|
|
433
|
+
logprobs: openaiOptions.logprobs === true || typeof openaiOptions.logprobs === "number" ? true : void 0,
|
|
434
|
+
top_logprobs: typeof openaiOptions.logprobs === "number" ? openaiOptions.logprobs : typeof openaiOptions.logprobs === "boolean" ? openaiOptions.logprobs ? 0 : void 0 : void 0,
|
|
425
435
|
user: openaiOptions.user,
|
|
426
436
|
parallel_tool_calls: openaiOptions.parallelToolCalls,
|
|
427
437
|
// standardized settings:
|
|
@@ -494,6 +504,20 @@ var OpenAIChatLanguageModel = class {
|
|
|
494
504
|
message: "logitBias is not supported for reasoning models"
|
|
495
505
|
});
|
|
496
506
|
}
|
|
507
|
+
if (baseArgs.logprobs != null) {
|
|
508
|
+
baseArgs.logprobs = void 0;
|
|
509
|
+
warnings.push({
|
|
510
|
+
type: "other",
|
|
511
|
+
message: "logprobs is not supported for reasoning models"
|
|
512
|
+
});
|
|
513
|
+
}
|
|
514
|
+
if (baseArgs.top_logprobs != null) {
|
|
515
|
+
baseArgs.top_logprobs = void 0;
|
|
516
|
+
warnings.push({
|
|
517
|
+
type: "other",
|
|
518
|
+
message: "topLogprobs is not supported for reasoning models"
|
|
519
|
+
});
|
|
520
|
+
}
|
|
497
521
|
if (baseArgs.max_tokens != null) {
|
|
498
522
|
if (baseArgs.max_completion_tokens == null) {
|
|
499
523
|
baseArgs.max_completion_tokens = baseArgs.max_tokens;
|
|
@@ -529,7 +553,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
529
553
|
};
|
|
530
554
|
}
|
|
531
555
|
async doGenerate(options) {
|
|
532
|
-
var _a, _b, _c, _d, _e, _f, _g, _h;
|
|
556
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
533
557
|
const { args: body, warnings } = await this.getArgs(options);
|
|
534
558
|
const {
|
|
535
559
|
responseHeaders,
|
|
@@ -579,12 +603,15 @@ var OpenAIChatLanguageModel = class {
|
|
|
579
603
|
if ((promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null) {
|
|
580
604
|
providerMetadata.openai.cachedPromptTokens = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens;
|
|
581
605
|
}
|
|
606
|
+
if (((_e = choice.logprobs) == null ? void 0 : _e.content) != null) {
|
|
607
|
+
providerMetadata.openai.logprobs = choice.logprobs.content;
|
|
608
|
+
}
|
|
582
609
|
return {
|
|
583
610
|
content,
|
|
584
611
|
finishReason: mapOpenAIFinishReason(choice.finish_reason),
|
|
585
612
|
usage: {
|
|
586
|
-
inputTokens: (
|
|
587
|
-
outputTokens: (
|
|
613
|
+
inputTokens: (_g = (_f = response.usage) == null ? void 0 : _f.prompt_tokens) != null ? _g : void 0,
|
|
614
|
+
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.completion_tokens) != null ? _i : void 0
|
|
588
615
|
},
|
|
589
616
|
request: { body },
|
|
590
617
|
response: {
|
|
@@ -819,6 +846,20 @@ var openaiChatResponseSchema = import_zod3.z.object({
|
|
|
819
846
|
).nullish()
|
|
820
847
|
}),
|
|
821
848
|
index: import_zod3.z.number(),
|
|
849
|
+
logprobs: import_zod3.z.object({
|
|
850
|
+
content: import_zod3.z.array(
|
|
851
|
+
import_zod3.z.object({
|
|
852
|
+
token: import_zod3.z.string(),
|
|
853
|
+
logprob: import_zod3.z.number(),
|
|
854
|
+
top_logprobs: import_zod3.z.array(
|
|
855
|
+
import_zod3.z.object({
|
|
856
|
+
token: import_zod3.z.string(),
|
|
857
|
+
logprob: import_zod3.z.number()
|
|
858
|
+
})
|
|
859
|
+
)
|
|
860
|
+
})
|
|
861
|
+
).nullish()
|
|
862
|
+
}).nullish(),
|
|
822
863
|
finish_reason: import_zod3.z.string().nullish()
|
|
823
864
|
})
|
|
824
865
|
),
|
|
@@ -838,7 +879,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
838
879
|
import_zod3.z.object({
|
|
839
880
|
index: import_zod3.z.number(),
|
|
840
881
|
id: import_zod3.z.string().nullish(),
|
|
841
|
-
type: import_zod3.z.literal("function").
|
|
882
|
+
type: import_zod3.z.literal("function").nullish(),
|
|
842
883
|
function: import_zod3.z.object({
|
|
843
884
|
name: import_zod3.z.string().nullish(),
|
|
844
885
|
arguments: import_zod3.z.string().nullish()
|
|
@@ -846,7 +887,7 @@ var openaiChatChunkSchema = import_zod3.z.union([
|
|
|
846
887
|
})
|
|
847
888
|
).nullish()
|
|
848
889
|
}).nullish(),
|
|
849
|
-
finish_reason: import_zod3.z.string().
|
|
890
|
+
finish_reason: import_zod3.z.string().nullish(),
|
|
850
891
|
index: import_zod3.z.number()
|
|
851
892
|
})
|
|
852
893
|
),
|
|
@@ -877,11 +918,23 @@ var reasoningModels = {
|
|
|
877
918
|
"o1-preview-2024-09-12": {
|
|
878
919
|
systemMessageMode: "remove"
|
|
879
920
|
},
|
|
921
|
+
o3: {
|
|
922
|
+
systemMessageMode: "developer"
|
|
923
|
+
},
|
|
924
|
+
"o3-2025-04-16": {
|
|
925
|
+
systemMessageMode: "developer"
|
|
926
|
+
},
|
|
880
927
|
"o3-mini": {
|
|
881
928
|
systemMessageMode: "developer"
|
|
882
929
|
},
|
|
883
930
|
"o3-mini-2025-01-31": {
|
|
884
931
|
systemMessageMode: "developer"
|
|
932
|
+
},
|
|
933
|
+
"o4-mini": {
|
|
934
|
+
systemMessageMode: "developer"
|
|
935
|
+
},
|
|
936
|
+
"o4-mini-2025-04-16": {
|
|
937
|
+
systemMessageMode: "developer"
|
|
885
938
|
}
|
|
886
939
|
};
|
|
887
940
|
|
|
@@ -994,13 +1047,26 @@ var openaiCompletionProviderOptions = import_zod4.z.object({
|
|
|
994
1047
|
A unique identifier representing your end-user, which can help OpenAI to
|
|
995
1048
|
monitor and detect abuse. Learn more.
|
|
996
1049
|
*/
|
|
997
|
-
user: import_zod4.z.string().optional()
|
|
1050
|
+
user: import_zod4.z.string().optional(),
|
|
1051
|
+
/**
|
|
1052
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
1053
|
+
the response size and can slow down response times. However, it can
|
|
1054
|
+
be useful to better understand how the model is behaving.
|
|
1055
|
+
Setting to true will return the log probabilities of the tokens that
|
|
1056
|
+
were generated.
|
|
1057
|
+
Setting to a number will return the log probabilities of the top n
|
|
1058
|
+
tokens that were generated.
|
|
1059
|
+
*/
|
|
1060
|
+
logprobs: import_zod4.z.union([import_zod4.z.boolean(), import_zod4.z.number()]).optional()
|
|
998
1061
|
});
|
|
999
1062
|
|
|
1000
1063
|
// src/openai-completion-language-model.ts
|
|
1001
1064
|
var OpenAICompletionLanguageModel = class {
|
|
1002
1065
|
constructor(modelId, config) {
|
|
1003
1066
|
this.specificationVersion = "v2";
|
|
1067
|
+
this.supportedUrls = {
|
|
1068
|
+
// No URLs are supported for completion models.
|
|
1069
|
+
};
|
|
1004
1070
|
this.modelId = modelId;
|
|
1005
1071
|
this.config = config;
|
|
1006
1072
|
}
|
|
@@ -1010,11 +1076,6 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1010
1076
|
get provider() {
|
|
1011
1077
|
return this.config.provider;
|
|
1012
1078
|
}
|
|
1013
|
-
async getSupportedUrls() {
|
|
1014
|
-
return {
|
|
1015
|
-
// no supported urls for completion models
|
|
1016
|
-
};
|
|
1017
|
-
}
|
|
1018
1079
|
async getArgs({
|
|
1019
1080
|
prompt,
|
|
1020
1081
|
maxOutputTokens,
|
|
@@ -1068,6 +1129,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1068
1129
|
// model specific settings:
|
|
1069
1130
|
echo: openaiOptions.echo,
|
|
1070
1131
|
logit_bias: openaiOptions.logitBias,
|
|
1132
|
+
logprobs: (openaiOptions == null ? void 0 : openaiOptions.logprobs) === true ? 0 : (openaiOptions == null ? void 0 : openaiOptions.logprobs) === false ? void 0 : openaiOptions == null ? void 0 : openaiOptions.logprobs,
|
|
1071
1133
|
suffix: openaiOptions.suffix,
|
|
1072
1134
|
user: openaiOptions.user,
|
|
1073
1135
|
// standardized settings:
|
|
@@ -1106,6 +1168,10 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1106
1168
|
fetch: this.config.fetch
|
|
1107
1169
|
});
|
|
1108
1170
|
const choice = response.choices[0];
|
|
1171
|
+
const providerMetadata = { openai: {} };
|
|
1172
|
+
if (choice.logprobs != null) {
|
|
1173
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1174
|
+
}
|
|
1109
1175
|
return {
|
|
1110
1176
|
content: [{ type: "text", text: choice.text }],
|
|
1111
1177
|
usage: {
|
|
@@ -1119,6 +1185,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1119
1185
|
headers: responseHeaders,
|
|
1120
1186
|
body: rawResponse
|
|
1121
1187
|
},
|
|
1188
|
+
providerMetadata,
|
|
1122
1189
|
warnings
|
|
1123
1190
|
};
|
|
1124
1191
|
}
|
|
@@ -1145,6 +1212,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1145
1212
|
fetch: this.config.fetch
|
|
1146
1213
|
});
|
|
1147
1214
|
let finishReason = "unknown";
|
|
1215
|
+
const providerMetadata = { openai: {} };
|
|
1148
1216
|
const usage = {
|
|
1149
1217
|
inputTokens: void 0,
|
|
1150
1218
|
outputTokens: void 0
|
|
@@ -1183,6 +1251,9 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1183
1251
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
1184
1252
|
finishReason = mapOpenAIFinishReason(choice.finish_reason);
|
|
1185
1253
|
}
|
|
1254
|
+
if ((choice == null ? void 0 : choice.logprobs) != null) {
|
|
1255
|
+
providerMetadata.openai.logprobs = choice.logprobs;
|
|
1256
|
+
}
|
|
1186
1257
|
if ((choice == null ? void 0 : choice.text) != null) {
|
|
1187
1258
|
controller.enqueue({
|
|
1188
1259
|
type: "text",
|
|
@@ -1194,6 +1265,7 @@ var OpenAICompletionLanguageModel = class {
|
|
|
1194
1265
|
controller.enqueue({
|
|
1195
1266
|
type: "finish",
|
|
1196
1267
|
finishReason,
|
|
1268
|
+
providerMetadata,
|
|
1197
1269
|
usage
|
|
1198
1270
|
});
|
|
1199
1271
|
}
|
|
@@ -1211,7 +1283,12 @@ var openaiCompletionResponseSchema = import_zod5.z.object({
|
|
|
1211
1283
|
choices: import_zod5.z.array(
|
|
1212
1284
|
import_zod5.z.object({
|
|
1213
1285
|
text: import_zod5.z.string(),
|
|
1214
|
-
finish_reason: import_zod5.z.string()
|
|
1286
|
+
finish_reason: import_zod5.z.string(),
|
|
1287
|
+
logprobs: import_zod5.z.object({
|
|
1288
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1289
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1290
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1291
|
+
}).nullish()
|
|
1215
1292
|
})
|
|
1216
1293
|
),
|
|
1217
1294
|
usage: import_zod5.z.object({
|
|
@@ -1228,7 +1305,12 @@ var openaiCompletionChunkSchema = import_zod5.z.union([
|
|
|
1228
1305
|
import_zod5.z.object({
|
|
1229
1306
|
text: import_zod5.z.string(),
|
|
1230
1307
|
finish_reason: import_zod5.z.string().nullish(),
|
|
1231
|
-
index: import_zod5.z.number()
|
|
1308
|
+
index: import_zod5.z.number(),
|
|
1309
|
+
logprobs: import_zod5.z.object({
|
|
1310
|
+
tokens: import_zod5.z.array(import_zod5.z.string()),
|
|
1311
|
+
token_logprobs: import_zod5.z.array(import_zod5.z.number()),
|
|
1312
|
+
top_logprobs: import_zod5.z.array(import_zod5.z.record(import_zod5.z.string(), import_zod5.z.number())).nullish()
|
|
1313
|
+
}).nullish()
|
|
1232
1314
|
})
|
|
1233
1315
|
),
|
|
1234
1316
|
usage: import_zod5.z.object({
|
|
@@ -1345,7 +1427,7 @@ var OpenAIImageModel = class {
|
|
|
1345
1427
|
this.modelId = modelId;
|
|
1346
1428
|
this.settings = settings;
|
|
1347
1429
|
this.config = config;
|
|
1348
|
-
this.specificationVersion = "
|
|
1430
|
+
this.specificationVersion = "v2";
|
|
1349
1431
|
}
|
|
1350
1432
|
get maxImagesPerCall() {
|
|
1351
1433
|
var _a, _b;
|
|
@@ -1405,12 +1487,23 @@ var OpenAIImageModel = class {
|
|
|
1405
1487
|
timestamp: currentDate,
|
|
1406
1488
|
modelId: this.modelId,
|
|
1407
1489
|
headers: responseHeaders
|
|
1490
|
+
},
|
|
1491
|
+
providerMetadata: {
|
|
1492
|
+
openai: {
|
|
1493
|
+
images: response.data.map(
|
|
1494
|
+
(item) => item.revised_prompt ? {
|
|
1495
|
+
revisedPrompt: item.revised_prompt
|
|
1496
|
+
} : null
|
|
1497
|
+
)
|
|
1498
|
+
}
|
|
1408
1499
|
}
|
|
1409
1500
|
};
|
|
1410
1501
|
}
|
|
1411
1502
|
};
|
|
1412
1503
|
var openaiImageResponseSchema = import_zod8.z.object({
|
|
1413
|
-
data: import_zod8.z.array(
|
|
1504
|
+
data: import_zod8.z.array(
|
|
1505
|
+
import_zod8.z.object({ b64_json: import_zod8.z.string(), revised_prompt: import_zod8.z.string().optional() })
|
|
1506
|
+
)
|
|
1414
1507
|
});
|
|
1415
1508
|
|
|
1416
1509
|
// src/openai-transcription-model.ts
|
|
@@ -1912,13 +2005,11 @@ function prepareResponsesTools({
|
|
|
1912
2005
|
var OpenAIResponsesLanguageModel = class {
|
|
1913
2006
|
constructor(modelId, config) {
|
|
1914
2007
|
this.specificationVersion = "v2";
|
|
1915
|
-
this.
|
|
1916
|
-
this.config = config;
|
|
1917
|
-
}
|
|
1918
|
-
async getSupportedUrls() {
|
|
1919
|
-
return {
|
|
2008
|
+
this.supportedUrls = {
|
|
1920
2009
|
"image/*": [/^https?:\/\/.*$/]
|
|
1921
2010
|
};
|
|
2011
|
+
this.modelId = modelId;
|
|
2012
|
+
this.config = config;
|
|
1922
2013
|
}
|
|
1923
2014
|
get provider() {
|
|
1924
2015
|
return this.config.provider;
|