@openrouter/ai-sdk-provider 2.6.0 → 2.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +34 -0
- package/dist/index.d.ts +34 -0
- package/dist/index.js +31 -28
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +31 -28
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +34 -0
- package/dist/internal/index.d.ts +34 -0
- package/dist/internal/index.js +30 -27
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +30 -27
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.d.mts
CHANGED
|
@@ -373,6 +373,40 @@ type OpenRouterSharedSettings = OpenRouterProviderOptions & {
|
|
|
373
373
|
*/
|
|
374
374
|
include: boolean;
|
|
375
375
|
};
|
|
376
|
+
/**
|
|
377
|
+
* Default temperature for model calls. Controls randomness in the output.
|
|
378
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
379
|
+
* Range: 0 to 2, where 0 is deterministic and higher values are more random.
|
|
380
|
+
*/
|
|
381
|
+
temperature?: number;
|
|
382
|
+
/**
|
|
383
|
+
* Default top-p (nucleus sampling) for model calls.
|
|
384
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
385
|
+
* Range: 0 to 1.
|
|
386
|
+
*/
|
|
387
|
+
topP?: number;
|
|
388
|
+
/**
|
|
389
|
+
* Default top-k sampling for model calls.
|
|
390
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
391
|
+
*/
|
|
392
|
+
topK?: number;
|
|
393
|
+
/**
|
|
394
|
+
* Default frequency penalty for model calls.
|
|
395
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
396
|
+
* Range: -2 to 2.
|
|
397
|
+
*/
|
|
398
|
+
frequencyPenalty?: number;
|
|
399
|
+
/**
|
|
400
|
+
* Default presence penalty for model calls.
|
|
401
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
402
|
+
* Range: -2 to 2.
|
|
403
|
+
*/
|
|
404
|
+
presencePenalty?: number;
|
|
405
|
+
/**
|
|
406
|
+
* Default maximum number of tokens to generate.
|
|
407
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
408
|
+
*/
|
|
409
|
+
maxTokens?: number;
|
|
376
410
|
};
|
|
377
411
|
/**
|
|
378
412
|
* Usage accounting response
|
package/dist/index.d.ts
CHANGED
|
@@ -373,6 +373,40 @@ type OpenRouterSharedSettings = OpenRouterProviderOptions & {
|
|
|
373
373
|
*/
|
|
374
374
|
include: boolean;
|
|
375
375
|
};
|
|
376
|
+
/**
|
|
377
|
+
* Default temperature for model calls. Controls randomness in the output.
|
|
378
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
379
|
+
* Range: 0 to 2, where 0 is deterministic and higher values are more random.
|
|
380
|
+
*/
|
|
381
|
+
temperature?: number;
|
|
382
|
+
/**
|
|
383
|
+
* Default top-p (nucleus sampling) for model calls.
|
|
384
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
385
|
+
* Range: 0 to 1.
|
|
386
|
+
*/
|
|
387
|
+
topP?: number;
|
|
388
|
+
/**
|
|
389
|
+
* Default top-k sampling for model calls.
|
|
390
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
391
|
+
*/
|
|
392
|
+
topK?: number;
|
|
393
|
+
/**
|
|
394
|
+
* Default frequency penalty for model calls.
|
|
395
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
396
|
+
* Range: -2 to 2.
|
|
397
|
+
*/
|
|
398
|
+
frequencyPenalty?: number;
|
|
399
|
+
/**
|
|
400
|
+
* Default presence penalty for model calls.
|
|
401
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
402
|
+
* Range: -2 to 2.
|
|
403
|
+
*/
|
|
404
|
+
presencePenalty?: number;
|
|
405
|
+
/**
|
|
406
|
+
* Default maximum number of tokens to generate.
|
|
407
|
+
* Can be overridden at call time via generateText/streamText options.
|
|
408
|
+
*/
|
|
409
|
+
maxTokens?: number;
|
|
376
410
|
};
|
|
377
411
|
/**
|
|
378
412
|
* Usage accounting response
|
package/dist/index.js
CHANGED
|
@@ -3385,7 +3385,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
3385
3385
|
tools,
|
|
3386
3386
|
toolChoice
|
|
3387
3387
|
}) {
|
|
3388
|
-
var _a16;
|
|
3388
|
+
var _a16, _b16;
|
|
3389
3389
|
const baseArgs = __spreadValues(__spreadValues({
|
|
3390
3390
|
// model id:
|
|
3391
3391
|
model: this.modelId,
|
|
@@ -3396,12 +3396,12 @@ var OpenRouterChatLanguageModel = class {
|
|
|
3396
3396
|
top_logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
3397
3397
|
user: this.settings.user,
|
|
3398
3398
|
parallel_tool_calls: this.settings.parallelToolCalls,
|
|
3399
|
-
// standardized settings:
|
|
3400
|
-
max_tokens: maxOutputTokens,
|
|
3401
|
-
temperature,
|
|
3402
|
-
top_p: topP,
|
|
3403
|
-
frequency_penalty: frequencyPenalty,
|
|
3404
|
-
presence_penalty: presencePenalty,
|
|
3399
|
+
// standardized settings (call-level options override model-level settings):
|
|
3400
|
+
max_tokens: maxOutputTokens != null ? maxOutputTokens : this.settings.maxTokens,
|
|
3401
|
+
temperature: temperature != null ? temperature : this.settings.temperature,
|
|
3402
|
+
top_p: topP != null ? topP : this.settings.topP,
|
|
3403
|
+
frequency_penalty: frequencyPenalty != null ? frequencyPenalty : this.settings.frequencyPenalty,
|
|
3404
|
+
presence_penalty: presencePenalty != null ? presencePenalty : this.settings.presencePenalty,
|
|
3405
3405
|
seed,
|
|
3406
3406
|
stop: stopSequences,
|
|
3407
3407
|
response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? responseFormat.schema != null ? {
|
|
@@ -3414,7 +3414,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
3414
3414
|
description: responseFormat.description
|
|
3415
3415
|
})
|
|
3416
3416
|
} : { type: "json_object" } : void 0,
|
|
3417
|
-
top_k: topK,
|
|
3417
|
+
top_k: topK != null ? topK : this.settings.topK,
|
|
3418
3418
|
// messages:
|
|
3419
3419
|
messages: convertToOpenRouterChatMessages(prompt),
|
|
3420
3420
|
// OpenRouter specific settings:
|
|
@@ -3435,14 +3435,18 @@ var OpenRouterChatLanguageModel = class {
|
|
|
3435
3435
|
const mappedTools = [];
|
|
3436
3436
|
for (const tool2 of tools) {
|
|
3437
3437
|
if (tool2.type === "function") {
|
|
3438
|
-
|
|
3438
|
+
const openrouterOptions = (_b16 = tool2.providerOptions) == null ? void 0 : _b16.openrouter;
|
|
3439
|
+
const eagerInputStreaming = openrouterOptions == null ? void 0 : openrouterOptions.eager_input_streaming;
|
|
3440
|
+
mappedTools.push(__spreadValues({
|
|
3439
3441
|
type: "function",
|
|
3440
3442
|
function: {
|
|
3441
3443
|
name: tool2.name,
|
|
3442
3444
|
description: tool2.description,
|
|
3443
3445
|
parameters: tool2.inputSchema
|
|
3444
3446
|
}
|
|
3445
|
-
}
|
|
3447
|
+
}, eagerInputStreaming != null && {
|
|
3448
|
+
eager_input_streaming: eagerInputStreaming
|
|
3449
|
+
}));
|
|
3446
3450
|
} else if (tool2.type === "provider") {
|
|
3447
3451
|
mappedTools.push(mapProviderTool(tool2));
|
|
3448
3452
|
}
|
|
@@ -3780,18 +3784,16 @@ var OpenRouterChatLanguageModel = class {
|
|
|
3780
3784
|
return;
|
|
3781
3785
|
}
|
|
3782
3786
|
const delta = choice.delta;
|
|
3783
|
-
const emitReasoningChunk = (chunkText
|
|
3787
|
+
const emitReasoningChunk = (chunkText) => {
|
|
3784
3788
|
if (!reasoningStarted) {
|
|
3785
3789
|
reasoningId = generateId();
|
|
3786
3790
|
controller.enqueue({
|
|
3787
|
-
providerMetadata,
|
|
3788
3791
|
type: "reasoning-start",
|
|
3789
3792
|
id: reasoningId
|
|
3790
3793
|
});
|
|
3791
3794
|
reasoningStarted = true;
|
|
3792
3795
|
}
|
|
3793
3796
|
controller.enqueue({
|
|
3794
|
-
providerMetadata,
|
|
3795
3797
|
type: "reasoning-delta",
|
|
3796
3798
|
delta: chunkText,
|
|
3797
3799
|
id: reasoningId || generateId()
|
|
@@ -3813,15 +3815,10 @@ var OpenRouterChatLanguageModel = class {
|
|
|
3813
3815
|
}
|
|
3814
3816
|
}
|
|
3815
3817
|
if (!textStarted) {
|
|
3816
|
-
const reasoningMetadata = {
|
|
3817
|
-
openrouter: {
|
|
3818
|
-
reasoning_details: accumulatedReasoningDetails.map((d) => __spreadValues({}, d))
|
|
3819
|
-
}
|
|
3820
|
-
};
|
|
3821
3818
|
for (const detail of delta.reasoning_details) {
|
|
3822
3819
|
switch (detail.type) {
|
|
3823
3820
|
case "reasoning.text" /* Text */: {
|
|
3824
|
-
emitReasoningChunk(detail.text || ""
|
|
3821
|
+
emitReasoningChunk(detail.text || "");
|
|
3825
3822
|
break;
|
|
3826
3823
|
}
|
|
3827
3824
|
case "reasoning.encrypted" /* Encrypted */: {
|
|
@@ -3829,7 +3826,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
3829
3826
|
}
|
|
3830
3827
|
case "reasoning.summary" /* Summary */: {
|
|
3831
3828
|
if (detail.summary) {
|
|
3832
|
-
emitReasoningChunk(detail.summary
|
|
3829
|
+
emitReasoningChunk(detail.summary);
|
|
3833
3830
|
}
|
|
3834
3831
|
break;
|
|
3835
3832
|
}
|
|
@@ -4118,6 +4115,12 @@ var OpenRouterChatLanguageModel = class {
|
|
|
4118
4115
|
if (accumulatedFileAnnotations.length > 0) {
|
|
4119
4116
|
openrouterMetadata.annotations = accumulatedFileAnnotations;
|
|
4120
4117
|
}
|
|
4118
|
+
if (usage.inputTokens.total === void 0 && openrouterUsage.promptTokens !== void 0) {
|
|
4119
|
+
usage.inputTokens.total = openrouterUsage.promptTokens;
|
|
4120
|
+
}
|
|
4121
|
+
if (usage.outputTokens.total === void 0 && openrouterUsage.completionTokens !== void 0) {
|
|
4122
|
+
usage.outputTokens.total = openrouterUsage.completionTokens;
|
|
4123
|
+
}
|
|
4121
4124
|
usage.raw = rawUsage;
|
|
4122
4125
|
controller.enqueue({
|
|
4123
4126
|
type: "finish",
|
|
@@ -4353,16 +4356,16 @@ var OpenRouterCompletionLanguageModel = class {
|
|
|
4353
4356
|
logprobs: typeof this.settings.logprobs === "number" ? this.settings.logprobs : typeof this.settings.logprobs === "boolean" ? this.settings.logprobs ? 0 : void 0 : void 0,
|
|
4354
4357
|
suffix: this.settings.suffix,
|
|
4355
4358
|
user: this.settings.user,
|
|
4356
|
-
// standardized settings:
|
|
4357
|
-
max_tokens: maxOutputTokens,
|
|
4358
|
-
temperature,
|
|
4359
|
-
top_p: topP,
|
|
4360
|
-
frequency_penalty: frequencyPenalty,
|
|
4361
|
-
presence_penalty: presencePenalty,
|
|
4359
|
+
// standardized settings (call-level options override model-level settings):
|
|
4360
|
+
max_tokens: maxOutputTokens != null ? maxOutputTokens : this.settings.maxTokens,
|
|
4361
|
+
temperature: temperature != null ? temperature : this.settings.temperature,
|
|
4362
|
+
top_p: topP != null ? topP : this.settings.topP,
|
|
4363
|
+
frequency_penalty: frequencyPenalty != null ? frequencyPenalty : this.settings.frequencyPenalty,
|
|
4364
|
+
presence_penalty: presencePenalty != null ? presencePenalty : this.settings.presencePenalty,
|
|
4362
4365
|
seed,
|
|
4363
4366
|
stop: stopSequences,
|
|
4364
4367
|
response_format: responseFormat,
|
|
4365
|
-
top_k: topK,
|
|
4368
|
+
top_k: topK != null ? topK : this.settings.topK,
|
|
4366
4369
|
// prompt:
|
|
4367
4370
|
prompt: completionPrompt,
|
|
4368
4371
|
// OpenRouter specific settings:
|
|
@@ -4932,7 +4935,7 @@ function withUserAgentSuffix2(headers, ...userAgentSuffixParts) {
|
|
|
4932
4935
|
}
|
|
4933
4936
|
|
|
4934
4937
|
// src/version.ts
|
|
4935
|
-
var VERSION2 = false ? "0.0.0-test" : "2.
|
|
4938
|
+
var VERSION2 = false ? "0.0.0-test" : "2.7.0";
|
|
4936
4939
|
|
|
4937
4940
|
// src/provider.ts
|
|
4938
4941
|
function createOpenRouter(options = {}) {
|