@llmgateway/ai-sdk-provider 2.1.0 → 2.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +5 -8
- package/dist/index.d.ts +5 -8
- package/dist/index.js +3 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +3 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +61 -65
- package/dist/internal/index.d.ts +61 -65
- package/dist/internal/index.js +29 -27
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -27
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -2736,8 +2736,7 @@ declare const models: [{
|
|
|
2736
2736
|
readonly jsonOutput: true;
|
|
2737
2737
|
}];
|
|
2738
2738
|
|
|
2739
|
-
type
|
|
2740
|
-
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName$1}` | 'test-model';
|
|
2739
|
+
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${(typeof models)[number]['id']}` | 'test-model';
|
|
2741
2740
|
type LLMGatewayChatSettings = {
|
|
2742
2741
|
/**
|
|
2743
2742
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
@@ -2833,8 +2832,6 @@ type LLMGatewayUsageAccounting = {
|
|
|
2833
2832
|
};
|
|
2834
2833
|
};
|
|
2835
2834
|
|
|
2836
|
-
type ProviderModelName = (typeof models)[number]['providers'][number]['modelName'];
|
|
2837
|
-
type LLMGatewayCompletionModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName}`;
|
|
2838
2835
|
type LLMGatewayCompletionSettings = {
|
|
2839
2836
|
/**
|
|
2840
2837
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
@@ -2935,12 +2932,12 @@ type LLMGatewayCompletionConfig = {
|
|
|
2935
2932
|
declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
|
|
2936
2933
|
readonly specificationVersion: "v2";
|
|
2937
2934
|
readonly provider = "llmgateway";
|
|
2938
|
-
readonly modelId:
|
|
2935
|
+
readonly modelId: LLMGatewayChatModelId;
|
|
2939
2936
|
readonly supportedUrls: Record<string, RegExp[]>;
|
|
2940
2937
|
readonly defaultObjectGenerationMode: undefined;
|
|
2941
2938
|
readonly settings: LLMGatewayCompletionSettings;
|
|
2942
2939
|
private readonly config;
|
|
2943
|
-
constructor(modelId:
|
|
2940
|
+
constructor(modelId: LLMGatewayChatModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
|
|
2944
2941
|
private getArgs;
|
|
2945
2942
|
doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
2946
2943
|
doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -2958,7 +2955,7 @@ interface LLMGatewayProvider extends LanguageModelV2 {
|
|
|
2958
2955
|
/**
|
|
2959
2956
|
Creates an LLMGateway completion model for text generation.
|
|
2960
2957
|
*/
|
|
2961
|
-
completion(modelId:
|
|
2958
|
+
completion(modelId: LLMGatewayChatModelId, settings?: LLMGatewayCompletionSettings): LLMGatewayCompletionLanguageModel;
|
|
2962
2959
|
}
|
|
2963
2960
|
interface LLMGatewayProviderSettings {
|
|
2964
2961
|
/**
|
|
@@ -3026,7 +3023,7 @@ declare class LLMGateway {
|
|
|
3026
3023
|
constructor(options?: LLMGatewayProviderSettings);
|
|
3027
3024
|
private get baseConfig();
|
|
3028
3025
|
chat(modelId: LLMGatewayChatModelId, settings?: LLMGatewayChatSettings): LLMGatewayChatLanguageModel;
|
|
3029
|
-
completion(modelId:
|
|
3026
|
+
completion(modelId: LLMGatewayChatModelId, settings?: LLMGatewayCompletionSettings): LLMGatewayCompletionLanguageModel;
|
|
3030
3027
|
}
|
|
3031
3028
|
|
|
3032
3029
|
export { LLMGateway, type LLMGatewayCompletionSettings, type LLMGatewayProvider, type LLMGatewayProviderOptions, type LLMGatewayProviderSettings, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, createLLMGateway, llmgateway };
|
package/dist/index.d.ts
CHANGED
|
@@ -2736,8 +2736,7 @@ declare const models: [{
|
|
|
2736
2736
|
readonly jsonOutput: true;
|
|
2737
2737
|
}];
|
|
2738
2738
|
|
|
2739
|
-
type
|
|
2740
|
-
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName$1}` | 'test-model';
|
|
2739
|
+
type LLMGatewayChatModelId = (typeof models)[number]['id'] | `${Provider}/${(typeof models)[number]['id']}` | 'test-model';
|
|
2741
2740
|
type LLMGatewayChatSettings = {
|
|
2742
2741
|
/**
|
|
2743
2742
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
@@ -2833,8 +2832,6 @@ type LLMGatewayUsageAccounting = {
|
|
|
2833
2832
|
};
|
|
2834
2833
|
};
|
|
2835
2834
|
|
|
2836
|
-
type ProviderModelName = (typeof models)[number]['providers'][number]['modelName'];
|
|
2837
|
-
type LLMGatewayCompletionModelId = (typeof models)[number]['id'] | `${Provider}/${ProviderModelName}`;
|
|
2838
2835
|
type LLMGatewayCompletionSettings = {
|
|
2839
2836
|
/**
|
|
2840
2837
|
Modify the likelihood of specified tokens appearing in the completion.
|
|
@@ -2935,12 +2932,12 @@ type LLMGatewayCompletionConfig = {
|
|
|
2935
2932
|
declare class LLMGatewayCompletionLanguageModel implements LanguageModelV2 {
|
|
2936
2933
|
readonly specificationVersion: "v2";
|
|
2937
2934
|
readonly provider = "llmgateway";
|
|
2938
|
-
readonly modelId:
|
|
2935
|
+
readonly modelId: LLMGatewayChatModelId;
|
|
2939
2936
|
readonly supportedUrls: Record<string, RegExp[]>;
|
|
2940
2937
|
readonly defaultObjectGenerationMode: undefined;
|
|
2941
2938
|
readonly settings: LLMGatewayCompletionSettings;
|
|
2942
2939
|
private readonly config;
|
|
2943
|
-
constructor(modelId:
|
|
2940
|
+
constructor(modelId: LLMGatewayChatModelId, settings: LLMGatewayCompletionSettings, config: LLMGatewayCompletionConfig);
|
|
2944
2941
|
private getArgs;
|
|
2945
2942
|
doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
2946
2943
|
doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
@@ -2958,7 +2955,7 @@ interface LLMGatewayProvider extends LanguageModelV2 {
|
|
|
2958
2955
|
/**
|
|
2959
2956
|
Creates an LLMGateway completion model for text generation.
|
|
2960
2957
|
*/
|
|
2961
|
-
completion(modelId:
|
|
2958
|
+
completion(modelId: LLMGatewayChatModelId, settings?: LLMGatewayCompletionSettings): LLMGatewayCompletionLanguageModel;
|
|
2962
2959
|
}
|
|
2963
2960
|
interface LLMGatewayProviderSettings {
|
|
2964
2961
|
/**
|
|
@@ -3026,7 +3023,7 @@ declare class LLMGateway {
|
|
|
3026
3023
|
constructor(options?: LLMGatewayProviderSettings);
|
|
3027
3024
|
private get baseConfig();
|
|
3028
3025
|
chat(modelId: LLMGatewayChatModelId, settings?: LLMGatewayChatSettings): LLMGatewayChatLanguageModel;
|
|
3029
|
-
completion(modelId:
|
|
3026
|
+
completion(modelId: LLMGatewayChatModelId, settings?: LLMGatewayCompletionSettings): LLMGatewayCompletionLanguageModel;
|
|
3030
3027
|
}
|
|
3031
3028
|
|
|
3032
3029
|
export { LLMGateway, type LLMGatewayCompletionSettings, type LLMGatewayProvider, type LLMGatewayProviderOptions, type LLMGatewayProviderSettings, type LLMGatewaySharedSettings, type LLMGatewayUsageAccounting, createLLMGateway, llmgateway };
|
package/dist/index.js
CHANGED
|
@@ -1475,7 +1475,9 @@ var LLMGatewayChatLanguageModel = class {
|
|
|
1475
1475
|
}
|
|
1476
1476
|
}
|
|
1477
1477
|
return null;
|
|
1478
|
-
}).filter(
|
|
1478
|
+
}).filter(
|
|
1479
|
+
(p) => p !== null
|
|
1480
|
+
) : choice.message.reasoning ? [
|
|
1479
1481
|
{
|
|
1480
1482
|
type: "reasoning",
|
|
1481
1483
|
text: choice.message.reasoning
|