@llmgateway/ai-sdk-provider 2.1.0 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +5 -8
- package/dist/index.d.ts +5 -8
- package/dist/index.js +17 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +17 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +61 -65
- package/dist/internal/index.d.ts +61 -65
- package/dist/internal/index.js +43 -27
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +43 -27
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -57,6 +57,65 @@ type LLMGatewayUsageAccounting = {
|
|
|
57
57
|
};
|
|
58
58
|
};
|
|
59
59
|
|
|
60
|
+
type LLMGatewayCompletionSettings = {
|
|
61
|
+
/**
|
|
62
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
63
|
+
|
|
64
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
65
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
66
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
67
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
68
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
69
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
70
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
71
|
+
|
|
72
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
73
|
+
token from being generated.
|
|
74
|
+
*/
|
|
75
|
+
logitBias?: Record<number, number>;
|
|
76
|
+
/**
|
|
77
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
78
|
+
the response size and can slow down response times. However, it can
|
|
79
|
+
be useful to better understand how the model is behaving.
|
|
80
|
+
|
|
81
|
+
Setting to true will return the log probabilities of the tokens that
|
|
82
|
+
were generated.
|
|
83
|
+
|
|
84
|
+
Setting to a number will return the log probabilities of the top n
|
|
85
|
+
tokens that were generated.
|
|
86
|
+
*/
|
|
87
|
+
logprobs?: boolean | number;
|
|
88
|
+
/**
|
|
89
|
+
The suffix that comes after a completion of inserted text.
|
|
90
|
+
*/
|
|
91
|
+
suffix?: string;
|
|
92
|
+
} & LLMGatewaySharedSettings;
|
|
93
|
+
|
|
94
|
+
type LLMGatewayCompletionConfig = {
|
|
95
|
+
provider: string;
|
|
96
|
+
compatibility: 'strict' | 'compatible';
|
|
97
|
+
headers: () => Record<string, string | undefined>;
|
|
98
|
+
url: (options: {
|
|
99
|
+
modelId: string;
|
|
100
|
+
path: string;
|
|
101
|
+
}) => string;
|
|
102
|
+
fetch?: typeof fetch;
|
|
103
|
+
extraBody?: Record<string, unknown>;
|
|
104
|
+
};
|
|
105
|
+
declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
|
|
106
|
+
readonly specificationVersion: "v2";
|
|
107
|
+
readonly provider = "llmgateway";
|
|
108
|
+
readonly modelId: LLMGatewayChatModelId;
|
|
109
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
110
|
+
readonly defaultObjectGenerationMode: undefined;
|
|
111
|
+
readonly settings: LLMGatewayCompletionSettings;
|
|
112
|
+
private readonly config;
|
|
113
|
+
constructor(modelId: LLMGatewayChatModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
|
|
114
|
+
private getArgs;
|
|
115
|
+
doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
116
|
+
doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
117
|
+
}
|
|
118
|
+
|
|
60
119
|
declare const providers: [{
|
|
61
120
|
readonly id: "llmgateway";
|
|
62
121
|
readonly name: "LLM Gateway";
|
|
@@ -246,7 +305,6 @@ declare const providers: [{
|
|
|
246
305
|
}];
|
|
247
306
|
|
|
248
307
|
type Provider = (typeof providers)[number]['id'];
|
|
249
|
-
type Model = (typeof models)[number]['providers'][number]['modelName'];
|
|
250
308
|
interface ProviderModelMapping {
|
|
251
309
|
providerId: (typeof providers)[number]['id'];
|
|
252
310
|
modelName: string;
|
|
@@ -2926,69 +2984,7 @@ declare const models: [{
|
|
|
2926
2984
|
readonly jsonOutput: true;
|
|
2927
2985
|
}];
|
|
2928
2986
|
|
|
2929
|
-
type
|
|
2930
|
-
type LLMGatewayCompletionModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName$1}`;
|
|
2931
|
-
type LLMGatewayCompletionSettings = {
|
|
2932
|
-
/**
|
|
2933
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
2934
|
-
|
|
2935
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
2936
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
2937
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
2938
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
2939
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
2940
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
2941
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
2942
|
-
|
|
2943
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
2944
|
-
token from being generated.
|
|
2945
|
-
*/
|
|
2946
|
-
logitBias?: Record<number, number>;
|
|
2947
|
-
/**
|
|
2948
|
-
Return the log probabilities of the tokens. Including logprobs will increase
|
|
2949
|
-
the response size and can slow down response times. However, it can
|
|
2950
|
-
be useful to better understand how the model is behaving.
|
|
2951
|
-
|
|
2952
|
-
Setting to true will return the log probabilities of the tokens that
|
|
2953
|
-
were generated.
|
|
2954
|
-
|
|
2955
|
-
Setting to a number will return the log probabilities of the top n
|
|
2956
|
-
tokens that were generated.
|
|
2957
|
-
*/
|
|
2958
|
-
logprobs?: boolean | number;
|
|
2959
|
-
/**
|
|
2960
|
-
The suffix that comes after a completion of inserted text.
|
|
2961
|
-
*/
|
|
2962
|
-
suffix?: string;
|
|
2963
|
-
} & LLMGatewaySharedSettings;
|
|
2964
|
-
|
|
2965
|
-
type LLMGatewayCompletionConfig = {
|
|
2966
|
-
provider: string;
|
|
2967
|
-
compatibility: 'strict' | 'compatible';
|
|
2968
|
-
headers: () => Record<string, string | undefined>;
|
|
2969
|
-
url: (options: {
|
|
2970
|
-
modelId: string;
|
|
2971
|
-
path: string;
|
|
2972
|
-
}) => string;
|
|
2973
|
-
fetch?: typeof fetch;
|
|
2974
|
-
extraBody?: Record<string, unknown>;
|
|
2975
|
-
};
|
|
2976
|
-
declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
|
|
2977
|
-
readonly specificationVersion: "v2";
|
|
2978
|
-
readonly provider = "llmgateway";
|
|
2979
|
-
readonly modelId: LLMGatewayCompletionModelId;
|
|
2980
|
-
readonly supportedUrls: Record<string, RegExp[]>;
|
|
2981
|
-
readonly defaultObjectGenerationMode: undefined;
|
|
2982
|
-
readonly settings: LLMGatewayCompletionSettings;
|
|
2983
|
-
private readonly config;
|
|
2984
|
-
constructor(modelId: LLMGatewayCompletionModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
|
|
2985
|
-
private getArgs;
|
|
2986
|
-
doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
2987
|
-
doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
2988
|
-
}
|
|
2989
|
-
|
|
2990
|
-
type ProviderModelName = (typeof models)[number]['providers'][number]['modelName'];
|
|
2991
|
-
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName}` | 'test-model';
|
|
2987
|
+
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${(typeof models)[number]['id']}` | 'test-model';
|
|
2992
2988
|
type LLMGatewayChatSettings = {
|
|
2993
2989
|
/**
|
|
2994
2990
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
@@ -3080,4 +3076,4 @@ declare class LLMGatewayChatLanguageModel implements LanguageModelV2 {
|
|
|
3080
3076
|
}>;
|
|
3081
3077
|
}
|
|
3082
3078
|
|
|
3083
|
-
export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type
|
|
3079
|
+
export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type LLMGatewayCompletionSettings, type LLMGatewayProviderOptions, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, type ModelDefinition, type Provider, type ProviderModelMapping, type StabilityLevel, models };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -57,6 +57,65 @@ type LLMGatewayUsageAccounting = {
|
|
|
57
57
|
};
|
|
58
58
|
};
|
|
59
59
|
|
|
60
|
+
type LLMGatewayCompletionSettings = {
|
|
61
|
+
/**
|
|
62
|
+
Modify the likelihood of specified tokens appearing in the completion.
|
|
63
|
+
|
|
64
|
+
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
65
|
+
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
66
|
+
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
67
|
+
the bias is added to the logits generated by the model prior to sampling.
|
|
68
|
+
The exact effect will vary per model, but values between -1 and 1 should
|
|
69
|
+
decrease or increase likelihood of selection; values like -100 or 100
|
|
70
|
+
should result in a ban or exclusive selection of the relevant token.
|
|
71
|
+
|
|
72
|
+
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
73
|
+
token from being generated.
|
|
74
|
+
*/
|
|
75
|
+
logitBias?: Record<number, number>;
|
|
76
|
+
/**
|
|
77
|
+
Return the log probabilities of the tokens. Including logprobs will increase
|
|
78
|
+
the response size and can slow down response times. However, it can
|
|
79
|
+
be useful to better understand how the model is behaving.
|
|
80
|
+
|
|
81
|
+
Setting to true will return the log probabilities of the tokens that
|
|
82
|
+
were generated.
|
|
83
|
+
|
|
84
|
+
Setting to a number will return the log probabilities of the top n
|
|
85
|
+
tokens that were generated.
|
|
86
|
+
*/
|
|
87
|
+
logprobs?: boolean | number;
|
|
88
|
+
/**
|
|
89
|
+
The suffix that comes after a completion of inserted text.
|
|
90
|
+
*/
|
|
91
|
+
suffix?: string;
|
|
92
|
+
} & LLMGatewaySharedSettings;
|
|
93
|
+
|
|
94
|
+
type LLMGatewayCompletionConfig = {
|
|
95
|
+
provider: string;
|
|
96
|
+
compatibility: 'strict' | 'compatible';
|
|
97
|
+
headers: () => Record<string, string | undefined>;
|
|
98
|
+
url: (options: {
|
|
99
|
+
modelId: string;
|
|
100
|
+
path: string;
|
|
101
|
+
}) => string;
|
|
102
|
+
fetch?: typeof fetch;
|
|
103
|
+
extraBody?: Record<string, unknown>;
|
|
104
|
+
};
|
|
105
|
+
declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
|
|
106
|
+
readonly specificationVersion: "v2";
|
|
107
|
+
readonly provider = "llmgateway";
|
|
108
|
+
readonly modelId: LLMGatewayChatModelId;
|
|
109
|
+
readonly supportedUrls: Record<string, RegExp[]>;
|
|
110
|
+
readonly defaultObjectGenerationMode: undefined;
|
|
111
|
+
readonly settings: LLMGatewayCompletionSettings;
|
|
112
|
+
private readonly config;
|
|
113
|
+
constructor(modelId: LLMGatewayChatModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
|
|
114
|
+
private getArgs;
|
|
115
|
+
doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
116
|
+
doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
117
|
+
}
|
|
118
|
+
|
|
60
119
|
declare const providers: [{
|
|
61
120
|
readonly id: "llmgateway";
|
|
62
121
|
readonly name: "LLM Gateway";
|
|
@@ -246,7 +305,6 @@ declare const providers: [{
|
|
|
246
305
|
}];
|
|
247
306
|
|
|
248
307
|
type Provider = (typeof providers)[number]['id'];
|
|
249
|
-
type Model = (typeof models)[number]['providers'][number]['modelName'];
|
|
250
308
|
interface ProviderModelMapping {
|
|
251
309
|
providerId: (typeof providers)[number]['id'];
|
|
252
310
|
modelName: string;
|
|
@@ -2926,69 +2984,7 @@ declare const models: [{
|
|
|
2926
2984
|
readonly jsonOutput: true;
|
|
2927
2985
|
}];
|
|
2928
2986
|
|
|
2929
|
-
type
|
|
2930
|
-
type LLMGatewayCompletionModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName$1}`;
|
|
2931
|
-
type LLMGatewayCompletionSettings = {
|
|
2932
|
-
/**
|
|
2933
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
2934
|
-
|
|
2935
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
2936
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
2937
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
2938
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
2939
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
2940
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
2941
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
2942
|
-
|
|
2943
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
2944
|
-
token from being generated.
|
|
2945
|
-
*/
|
|
2946
|
-
logitBias?: Record<number, number>;
|
|
2947
|
-
/**
|
|
2948
|
-
Return the log probabilities of the tokens. Including logprobs will increase
|
|
2949
|
-
the response size and can slow down response times. However, it can
|
|
2950
|
-
be useful to better understand how the model is behaving.
|
|
2951
|
-
|
|
2952
|
-
Setting to true will return the log probabilities of the tokens that
|
|
2953
|
-
were generated.
|
|
2954
|
-
|
|
2955
|
-
Setting to a number will return the log probabilities of the top n
|
|
2956
|
-
tokens that were generated.
|
|
2957
|
-
*/
|
|
2958
|
-
logprobs?: boolean | number;
|
|
2959
|
-
/**
|
|
2960
|
-
The suffix that comes after a completion of inserted text.
|
|
2961
|
-
*/
|
|
2962
|
-
suffix?: string;
|
|
2963
|
-
} & LLMGatewaySharedSettings;
|
|
2964
|
-
|
|
2965
|
-
type LLMGatewayCompletionConfig = {
|
|
2966
|
-
provider: string;
|
|
2967
|
-
compatibility: 'strict' | 'compatible';
|
|
2968
|
-
headers: () => Record<string, string | undefined>;
|
|
2969
|
-
url: (options: {
|
|
2970
|
-
modelId: string;
|
|
2971
|
-
path: string;
|
|
2972
|
-
}) => string;
|
|
2973
|
-
fetch?: typeof fetch;
|
|
2974
|
-
extraBody?: Record<string, unknown>;
|
|
2975
|
-
};
|
|
2976
|
-
declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
|
|
2977
|
-
readonly specificationVersion: "v2";
|
|
2978
|
-
readonly provider = "llmgateway";
|
|
2979
|
-
readonly modelId: LLMGatewayCompletionModelId;
|
|
2980
|
-
readonly supportedUrls: Record<string, RegExp[]>;
|
|
2981
|
-
readonly defaultObjectGenerationMode: undefined;
|
|
2982
|
-
readonly settings: LLMGatewayCompletionSettings;
|
|
2983
|
-
private readonly config;
|
|
2984
|
-
constructor(modelId: LLMGatewayCompletionModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
|
|
2985
|
-
private getArgs;
|
|
2986
|
-
doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
2987
|
-
doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
2988
|
-
}
|
|
2989
|
-
|
|
2990
|
-
type ProviderModelName = (typeof models)[number]['providers'][number]['modelName'];
|
|
2991
|
-
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName}` | 'test-model';
|
|
2987
|
+
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${(typeof models)[number]['id']}` | 'test-model';
|
|
2992
2988
|
type LLMGatewayChatSettings = {
|
|
2993
2989
|
/**
|
|
2994
2990
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
@@ -3080,4 +3076,4 @@ declare class LLMGatewayChatLanguageModel implements LanguageModelV2 {
|
|
|
3080
3076
|
}>;
|
|
3081
3077
|
}
|
|
3082
3078
|
|
|
3083
|
-
export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type
|
|
3079
|
+
export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type LLMGatewayCompletionSettings, type LLMGatewayProviderOptions, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, type ModelDefinition, type Provider, type ProviderModelMapping, type StabilityLevel, models };
|
package/dist/internal/index.js
CHANGED
|
@@ -53,6 +53,32 @@ __export(index_exports, {
|
|
|
53
53
|
});
|
|
54
54
|
module.exports = __toCommonJS(index_exports);
|
|
55
55
|
|
|
56
|
+
// src/schemas/reasoning-details.ts
|
|
57
|
+
var import_v4 = require("zod/v4");
|
|
58
|
+
var ReasoningDetailSummarySchema = import_v4.z.object({
|
|
59
|
+
type: import_v4.z.literal("reasoning.summary" /* Summary */),
|
|
60
|
+
summary: import_v4.z.string()
|
|
61
|
+
});
|
|
62
|
+
var ReasoningDetailEncryptedSchema = import_v4.z.object({
|
|
63
|
+
type: import_v4.z.literal("reasoning.encrypted" /* Encrypted */),
|
|
64
|
+
data: import_v4.z.string()
|
|
65
|
+
});
|
|
66
|
+
var ReasoningDetailTextSchema = import_v4.z.object({
|
|
67
|
+
type: import_v4.z.literal("reasoning.text" /* Text */),
|
|
68
|
+
text: import_v4.z.string().nullish(),
|
|
69
|
+
signature: import_v4.z.string().nullish()
|
|
70
|
+
});
|
|
71
|
+
var ReasoningDetailUnionSchema = import_v4.z.union([
|
|
72
|
+
ReasoningDetailSummarySchema,
|
|
73
|
+
ReasoningDetailEncryptedSchema,
|
|
74
|
+
ReasoningDetailTextSchema
|
|
75
|
+
]);
|
|
76
|
+
var ReasoningDetailsWithUnknownSchema = import_v4.z.union([
|
|
77
|
+
ReasoningDetailUnionSchema,
|
|
78
|
+
import_v4.z.unknown().transform(() => null)
|
|
79
|
+
]);
|
|
80
|
+
var ReasoningDetailArraySchema = import_v4.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
|
|
81
|
+
|
|
56
82
|
// node_modules/.pnpm/@ai-sdk+provider@2.0.0-beta.1/node_modules/@ai-sdk/provider/dist/index.mjs
|
|
57
83
|
var marker = "vercel.ai.error";
|
|
58
84
|
var symbol = Symbol.for(marker);
|
|
@@ -875,32 +901,6 @@ function convertUint8ArrayToBase64(array) {
|
|
|
875
901
|
return btoa(latin1string);
|
|
876
902
|
}
|
|
877
903
|
|
|
878
|
-
// src/schemas/reasoning-details.ts
|
|
879
|
-
var import_v4 = require("zod/v4");
|
|
880
|
-
var ReasoningDetailSummarySchema = import_v4.z.object({
|
|
881
|
-
type: import_v4.z.literal("reasoning.summary" /* Summary */),
|
|
882
|
-
summary: import_v4.z.string()
|
|
883
|
-
});
|
|
884
|
-
var ReasoningDetailEncryptedSchema = import_v4.z.object({
|
|
885
|
-
type: import_v4.z.literal("reasoning.encrypted" /* Encrypted */),
|
|
886
|
-
data: import_v4.z.string()
|
|
887
|
-
});
|
|
888
|
-
var ReasoningDetailTextSchema = import_v4.z.object({
|
|
889
|
-
type: import_v4.z.literal("reasoning.text" /* Text */),
|
|
890
|
-
text: import_v4.z.string().nullish(),
|
|
891
|
-
signature: import_v4.z.string().nullish()
|
|
892
|
-
});
|
|
893
|
-
var ReasoningDetailUnionSchema = import_v4.z.union([
|
|
894
|
-
ReasoningDetailSummarySchema,
|
|
895
|
-
ReasoningDetailEncryptedSchema,
|
|
896
|
-
ReasoningDetailTextSchema
|
|
897
|
-
]);
|
|
898
|
-
var ReasoningDetailsWithUnknownSchema = import_v4.z.union([
|
|
899
|
-
ReasoningDetailUnionSchema,
|
|
900
|
-
import_v4.z.unknown().transform(() => null)
|
|
901
|
-
]);
|
|
902
|
-
var ReasoningDetailArraySchema = import_v4.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
|
|
903
|
-
|
|
904
904
|
// src/schemas/error-response.ts
|
|
905
905
|
var import_v42 = require("zod/v4");
|
|
906
906
|
var LLMGatewayErrorResponseSchema = import_v42.z.object({
|
|
@@ -1310,6 +1310,7 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
1310
1310
|
tools,
|
|
1311
1311
|
toolChoice
|
|
1312
1312
|
}) {
|
|
1313
|
+
var _a15;
|
|
1313
1314
|
const baseArgs = __spreadValues(__spreadValues({
|
|
1314
1315
|
// model id:
|
|
1315
1316
|
model: this.modelId,
|
|
@@ -1338,6 +1339,19 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
1338
1339
|
usage: this.settings.usage
|
|
1339
1340
|
}, this.config.extraBody), this.settings.extraBody);
|
|
1340
1341
|
if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
|
|
1342
|
+
if ("schema" in responseFormat && responseFormat.schema) {
|
|
1343
|
+
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
1344
|
+
response_format: {
|
|
1345
|
+
type: "json_schema",
|
|
1346
|
+
json_schema: {
|
|
1347
|
+
name: responseFormat.schema.name || "response",
|
|
1348
|
+
description: responseFormat.schema.description,
|
|
1349
|
+
schema: responseFormat.schema,
|
|
1350
|
+
strict: (_a15 = responseFormat.schema.strict) != null ? _a15 : true
|
|
1351
|
+
}
|
|
1352
|
+
}
|
|
1353
|
+
});
|
|
1354
|
+
}
|
|
1341
1355
|
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
1342
1356
|
response_format: { type: "json_object" }
|
|
1343
1357
|
});
|
|
@@ -1430,7 +1444,9 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
1430
1444
|
}
|
|
1431
1445
|
}
|
|
1432
1446
|
return null;
|
|
1433
|
-
}).filter(
|
|
1447
|
+
}).filter(
|
|
1448
|
+
(p) => p !== null
|
|
1449
|
+
) : choice.message.reasoning ? [
|
|
1434
1450
|
{
|
|
1435
1451
|
type: "reasoning",
|
|
1436
1452
|
text: choice.message.reasoning
|