@llmgateway/ai-sdk-provider 2.1.0 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,6 +57,65 @@ type LLMGatewayUsageAccounting = {
57
57
  };
58
58
  };
59
59
 
60
+ type LLMGatewayCompletionSettings = {
61
+ /**
62
+ Modify the likelihood of specified tokens appearing in the completion.
63
+
64
+ Accepts a JSON object that maps tokens (specified by their token ID in
65
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
66
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
67
+ the bias is added to the logits generated by the model prior to sampling.
68
+ The exact effect will vary per model, but values between -1 and 1 should
69
+ decrease or increase likelihood of selection; values like -100 or 100
70
+ should result in a ban or exclusive selection of the relevant token.
71
+
72
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
73
+ token from being generated.
74
+ */
75
+ logitBias?: Record<number, number>;
76
+ /**
77
+ Return the log probabilities of the tokens. Including logprobs will increase
78
+ the response size and can slow down response times. However, it can
79
+ be useful to better understand how the model is behaving.
80
+
81
+ Setting to true will return the log probabilities of the tokens that
82
+ were generated.
83
+
84
+ Setting to a number will return the log probabilities of the top n
85
+ tokens that were generated.
86
+ */
87
+ logprobs?: boolean | number;
88
+ /**
89
+ The suffix that comes after a completion of inserted text.
90
+ */
91
+ suffix?: string;
92
+ } & LLMGatewaySharedSettings;
93
+
94
+ type LLMGatewayCompletionConfig = {
95
+ provider: string;
96
+ compatibility: 'strict' | 'compatible';
97
+ headers: () => Record<string, string | undefined>;
98
+ url: (options: {
99
+ modelId: string;
100
+ path: string;
101
+ }) => string;
102
+ fetch?: typeof fetch;
103
+ extraBody?: Record<string, unknown>;
104
+ };
105
+ declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
106
+ readonly specificationVersion: "v2";
107
+ readonly provider = "llmgateway";
108
+ readonly modelId: LLMGatewayChatModelId;
109
+ readonly supportedUrls: Record<string, RegExp[]>;
110
+ readonly defaultObjectGenerationMode: undefined;
111
+ readonly settings: LLMGatewayCompletionSettings;
112
+ private readonly config;
113
+ constructor(modelId: LLMGatewayChatModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
114
+ private getArgs;
115
+ doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
116
+ doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
117
+ }
118
+
60
119
  declare const providers: [{
61
120
  readonly id: "llmgateway";
62
121
  readonly name: "LLM Gateway";
@@ -246,7 +305,6 @@ declare const providers: [{
246
305
  }];
247
306
 
248
307
  type Provider = (typeof providers)[number]['id'];
249
- type Model = (typeof models)[number]['providers'][number]['modelName'];
250
308
  interface ProviderModelMapping {
251
309
  providerId: (typeof providers)[number]['id'];
252
310
  modelName: string;
@@ -2926,69 +2984,7 @@ declare const models: [{
2926
2984
  readonly jsonOutput: true;
2927
2985
  }];
2928
2986
 
2929
- type ProviderModelName$1 = (typeof models)[number]['providers'][number]['modelName'];
2930
- type LLMGatewayCompletionModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName$1}`;
2931
- type LLMGatewayCompletionSettings = {
2932
- /**
2933
- Modify the likelihood of specified tokens appearing in the completion.
2934
-
2935
- Accepts a JSON object that maps tokens (specified by their token ID in
2936
- the GPT tokenizer) to an associated bias value from -100 to 100. You
2937
- can use this tokenizer tool to convert text to token IDs. Mathematically,
2938
- the bias is added to the logits generated by the model prior to sampling.
2939
- The exact effect will vary per model, but values between -1 and 1 should
2940
- decrease or increase likelihood of selection; values like -100 or 100
2941
- should result in a ban or exclusive selection of the relevant token.
2942
-
2943
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
2944
- token from being generated.
2945
- */
2946
- logitBias?: Record<number, number>;
2947
- /**
2948
- Return the log probabilities of the tokens. Including logprobs will increase
2949
- the response size and can slow down response times. However, it can
2950
- be useful to better understand how the model is behaving.
2951
-
2952
- Setting to true will return the log probabilities of the tokens that
2953
- were generated.
2954
-
2955
- Setting to a number will return the log probabilities of the top n
2956
- tokens that were generated.
2957
- */
2958
- logprobs?: boolean | number;
2959
- /**
2960
- The suffix that comes after a completion of inserted text.
2961
- */
2962
- suffix?: string;
2963
- } & LLMGatewaySharedSettings;
2964
-
2965
- type LLMGatewayCompletionConfig = {
2966
- provider: string;
2967
- compatibility: 'strict' | 'compatible';
2968
- headers: () => Record<string, string | undefined>;
2969
- url: (options: {
2970
- modelId: string;
2971
- path: string;
2972
- }) => string;
2973
- fetch?: typeof fetch;
2974
- extraBody?: Record<string, unknown>;
2975
- };
2976
- declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
2977
- readonly specificationVersion: "v2";
2978
- readonly provider = "llmgateway";
2979
- readonly modelId: LLMGatewayCompletionModelId;
2980
- readonly supportedUrls: Record<string, RegExp[]>;
2981
- readonly defaultObjectGenerationMode: undefined;
2982
- readonly settings: LLMGatewayCompletionSettings;
2983
- private readonly config;
2984
- constructor(modelId: LLMGatewayCompletionModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
2985
- private getArgs;
2986
- doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
2987
- doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
2988
- }
2989
-
2990
- type ProviderModelName = (typeof models)[number]['providers'][number]['modelName'];
2991
- type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName}` | 'test-model';
2987
+ type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${(typeof models)[number]['id']}` | 'test-model';
2992
2988
  type LLMGatewayChatSettings = {
2993
2989
  /**
2994
2990
  Modify the likelihood of specified tokens appearing in the completion.
@@ -3080,4 +3076,4 @@ declare class LLMGatewayChatLanguageModel implements LanguageModelV2 {
3080
3076
  }>;
3081
3077
  }
3082
3078
 
3083
- export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type LLMGatewayCompletionModelId, type LLMGatewayCompletionSettings, type LLMGatewayProviderOptions, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, type Model, type ModelDefinition, type Provider, type ProviderModelMapping, type StabilityLevel, models };
3079
+ export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type LLMGatewayCompletionSettings, type LLMGatewayProviderOptions, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, type ModelDefinition, type Provider, type ProviderModelMapping, type StabilityLevel, models };
@@ -57,6 +57,65 @@ type LLMGatewayUsageAccounting = {
57
57
  };
58
58
  };
59
59
 
60
+ type LLMGatewayCompletionSettings = {
61
+ /**
62
+ Modify the likelihood of specified tokens appearing in the completion.
63
+
64
+ Accepts a JSON object that maps tokens (specified by their token ID in
65
+ the GPT tokenizer) to an associated bias value from -100 to 100. You
66
+ can use this tokenizer tool to convert text to token IDs. Mathematically,
67
+ the bias is added to the logits generated by the model prior to sampling.
68
+ The exact effect will vary per model, but values between -1 and 1 should
69
+ decrease or increase likelihood of selection; values like -100 or 100
70
+ should result in a ban or exclusive selection of the relevant token.
71
+
72
+ As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
73
+ token from being generated.
74
+ */
75
+ logitBias?: Record<number, number>;
76
+ /**
77
+ Return the log probabilities of the tokens. Including logprobs will increase
78
+ the response size and can slow down response times. However, it can
79
+ be useful to better understand how the model is behaving.
80
+
81
+ Setting to true will return the log probabilities of the tokens that
82
+ were generated.
83
+
84
+ Setting to a number will return the log probabilities of the top n
85
+ tokens that were generated.
86
+ */
87
+ logprobs?: boolean | number;
88
+ /**
89
+ The suffix that comes after a completion of inserted text.
90
+ */
91
+ suffix?: string;
92
+ } & LLMGatewaySharedSettings;
93
+
94
+ type LLMGatewayCompletionConfig = {
95
+ provider: string;
96
+ compatibility: 'strict' | 'compatible';
97
+ headers: () => Record<string, string | undefined>;
98
+ url: (options: {
99
+ modelId: string;
100
+ path: string;
101
+ }) => string;
102
+ fetch?: typeof fetch;
103
+ extraBody?: Record<string, unknown>;
104
+ };
105
+ declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
106
+ readonly specificationVersion: "v2";
107
+ readonly provider = "llmgateway";
108
+ readonly modelId: LLMGatewayChatModelId;
109
+ readonly supportedUrls: Record<string, RegExp[]>;
110
+ readonly defaultObjectGenerationMode: undefined;
111
+ readonly settings: LLMGatewayCompletionSettings;
112
+ private readonly config;
113
+ constructor(modelId: LLMGatewayChatModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
114
+ private getArgs;
115
+ doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
116
+ doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
117
+ }
118
+
60
119
  declare const providers: [{
61
120
  readonly id: "llmgateway";
62
121
  readonly name: "LLM Gateway";
@@ -246,7 +305,6 @@ declare const providers: [{
246
305
  }];
247
306
 
248
307
  type Provider = (typeof providers)[number]['id'];
249
- type Model = (typeof models)[number]['providers'][number]['modelName'];
250
308
  interface ProviderModelMapping {
251
309
  providerId: (typeof providers)[number]['id'];
252
310
  modelName: string;
@@ -2926,69 +2984,7 @@ declare const models: [{
2926
2984
  readonly jsonOutput: true;
2927
2985
  }];
2928
2986
 
2929
- type ProviderModelName$1 = (typeof models)[number]['providers'][number]['modelName'];
2930
- type LLMGatewayCompletionModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName$1}`;
2931
- type LLMGatewayCompletionSettings = {
2932
- /**
2933
- Modify the likelihood of specified tokens appearing in the completion.
2934
-
2935
- Accepts a JSON object that maps tokens (specified by their token ID in
2936
- the GPT tokenizer) to an associated bias value from -100 to 100. You
2937
- can use this tokenizer tool to convert text to token IDs. Mathematically,
2938
- the bias is added to the logits generated by the model prior to sampling.
2939
- The exact effect will vary per model, but values between -1 and 1 should
2940
- decrease or increase likelihood of selection; values like -100 or 100
2941
- should result in a ban or exclusive selection of the relevant token.
2942
-
2943
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
2944
- token from being generated.
2945
- */
2946
- logitBias?: Record<number, number>;
2947
- /**
2948
- Return the log probabilities of the tokens. Including logprobs will increase
2949
- the response size and can slow down response times. However, it can
2950
- be useful to better understand how the model is behaving.
2951
-
2952
- Setting to true will return the log probabilities of the tokens that
2953
- were generated.
2954
-
2955
- Setting to a number will return the log probabilities of the top n
2956
- tokens that were generated.
2957
- */
2958
- logprobs?: boolean | number;
2959
- /**
2960
- The suffix that comes after a completion of inserted text.
2961
- */
2962
- suffix?: string;
2963
- } & LLMGatewaySharedSettings;
2964
-
2965
- type LLMGatewayCompletionConfig = {
2966
- provider: string;
2967
- compatibility: 'strict' | 'compatible';
2968
- headers: () => Record<string, string | undefined>;
2969
- url: (options: {
2970
- modelId: string;
2971
- path: string;
2972
- }) => string;
2973
- fetch?: typeof fetch;
2974
- extraBody?: Record<string, unknown>;
2975
- };
2976
- declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
2977
- readonly specificationVersion: "v2";
2978
- readonly provider = "llmgateway";
2979
- readonly modelId: LLMGatewayCompletionModelId;
2980
- readonly supportedUrls: Record<string, RegExp[]>;
2981
- readonly defaultObjectGenerationMode: undefined;
2982
- readonly settings: LLMGatewayCompletionSettings;
2983
- private readonly config;
2984
- constructor(modelId: LLMGatewayCompletionModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
2985
- private getArgs;
2986
- doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
2987
- doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
2988
- }
2989
-
2990
- type ProviderModelName = (typeof models)[number]['providers'][number]['modelName'];
2991
- type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName}` | 'test-model';
2987
+ type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${(typeof models)[number]['id']}` | 'test-model';
2992
2988
  type LLMGatewayChatSettings = {
2993
2989
  /**
2994
2990
  Modify the likelihood of specified tokens appearing in the completion.
@@ -3080,4 +3076,4 @@ declare class LLMGatewayChatLanguageModel implements LanguageModelV2 {
3080
3076
  }>;
3081
3077
  }
3082
3078
 
3083
- export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type LLMGatewayCompletionModelId, type LLMGatewayCompletionSettings, type LLMGatewayProviderOptions, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, type Model, type ModelDefinition, type Provider, type ProviderModelMapping, type StabilityLevel, models };
3079
+ export { LLMGatewayChatLanguageModel, type LLMGatewayChatModelId, type LLMGatewayChatSettings, LLMGatewayCompletionLanguageModel, type LLMGatewayCompletionSettings, type LLMGatewayProviderOptions, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, type ModelDefinition, type Provider, type ProviderModelMapping, type StabilityLevel, models };
@@ -53,6 +53,32 @@ __export(index_exports, {
53
53
  });
54
54
  module.exports = __toCommonJS(index_exports);
55
55
 
56
+ // src/schemas/reasoning-details.ts
57
+ var import_v4 = require("zod/v4");
58
+ var ReasoningDetailSummarySchema = import_v4.z.object({
59
+ type: import_v4.z.literal("reasoning.summary" /* Summary */),
60
+ summary: import_v4.z.string()
61
+ });
62
+ var ReasoningDetailEncryptedSchema = import_v4.z.object({
63
+ type: import_v4.z.literal("reasoning.encrypted" /* Encrypted */),
64
+ data: import_v4.z.string()
65
+ });
66
+ var ReasoningDetailTextSchema = import_v4.z.object({
67
+ type: import_v4.z.literal("reasoning.text" /* Text */),
68
+ text: import_v4.z.string().nullish(),
69
+ signature: import_v4.z.string().nullish()
70
+ });
71
+ var ReasoningDetailUnionSchema = import_v4.z.union([
72
+ ReasoningDetailSummarySchema,
73
+ ReasoningDetailEncryptedSchema,
74
+ ReasoningDetailTextSchema
75
+ ]);
76
+ var ReasoningDetailsWithUnknownSchema = import_v4.z.union([
77
+ ReasoningDetailUnionSchema,
78
+ import_v4.z.unknown().transform(() => null)
79
+ ]);
80
+ var ReasoningDetailArraySchema = import_v4.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
81
+
56
82
  // node_modules/.pnpm/@ai-sdk+provider@2.0.0-beta.1/node_modules/@ai-sdk/provider/dist/index.mjs
57
83
  var marker = "vercel.ai.error";
58
84
  var symbol = Symbol.for(marker);
@@ -875,32 +901,6 @@ function convertUint8ArrayToBase64(array) {
875
901
  return btoa(latin1string);
876
902
  }
877
903
 
878
- // src/schemas/reasoning-details.ts
879
- var import_v4 = require("zod/v4");
880
- var ReasoningDetailSummarySchema = import_v4.z.object({
881
- type: import_v4.z.literal("reasoning.summary" /* Summary */),
882
- summary: import_v4.z.string()
883
- });
884
- var ReasoningDetailEncryptedSchema = import_v4.z.object({
885
- type: import_v4.z.literal("reasoning.encrypted" /* Encrypted */),
886
- data: import_v4.z.string()
887
- });
888
- var ReasoningDetailTextSchema = import_v4.z.object({
889
- type: import_v4.z.literal("reasoning.text" /* Text */),
890
- text: import_v4.z.string().nullish(),
891
- signature: import_v4.z.string().nullish()
892
- });
893
- var ReasoningDetailUnionSchema = import_v4.z.union([
894
- ReasoningDetailSummarySchema,
895
- ReasoningDetailEncryptedSchema,
896
- ReasoningDetailTextSchema
897
- ]);
898
- var ReasoningDetailsWithUnknownSchema = import_v4.z.union([
899
- ReasoningDetailUnionSchema,
900
- import_v4.z.unknown().transform(() => null)
901
- ]);
902
- var ReasoningDetailArraySchema = import_v4.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
903
-
904
904
  // src/schemas/error-response.ts
905
905
  var import_v42 = require("zod/v4");
906
906
  var LLMGatewayErrorResponseSchema = import_v42.z.object({
@@ -1430,7 +1430,9 @@ var LLMGatewayChatLanguageModel = class {
1430
1430
  }
1431
1431
  }
1432
1432
  return null;
1433
- }).filter((p) => p !== null) : choice.message.reasoning ? [
1433
+ }).filter(
1434
+ (p) => p !== null
1435
+ ) : choice.message.reasoning ? [
1434
1436
  {
1435
1437
  type: "reasoning",
1436
1438
  text: choice.message.reasoning