@openrouter/ai-sdk-provider 2.2.5 → 2.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2725,11 +2725,12 @@ function convertToOpenRouterChatMessages(prompt) {
2725
2725
  }
2726
2726
  finalReasoningDetails = uniqueDetails.length > 0 ? uniqueDetails : void 0;
2727
2727
  }
2728
+ const effectiveReasoning = reasoning && finalReasoningDetails ? reasoning : void 0;
2728
2729
  messages.push({
2729
2730
  role: "assistant",
2730
2731
  content: text,
2731
2732
  tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
2732
- reasoning: reasoning || void 0,
2733
+ reasoning: effectiveReasoning,
2733
2734
  reasoning_details: finalReasoningDetails,
2734
2735
  annotations: messageAnnotations,
2735
2736
  cache_control: getCacheControl(providerOptions)
@@ -3111,7 +3112,9 @@ var OpenRouterChatLanguageModel = class {
3111
3112
  // Provider routing settings:
3112
3113
  provider: this.settings.provider,
3113
3114
  // Debug settings:
3114
- debug: this.settings.debug
3115
+ debug: this.settings.debug,
3116
+ // Anthropic automatic caching:
3117
+ cache_control: this.settings.cache_control
3115
3118
  }, this.config.extraBody), this.settings.extraBody);
3116
3119
  if (tools && tools.length > 0) {
3117
3120
  const mappedTools = tools.filter(
@@ -3132,10 +3135,11 @@ var OpenRouterChatLanguageModel = class {
3132
3135
  return baseArgs;
3133
3136
  }
3134
3137
  async doGenerate(options) {
3135
- var _a16, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3138
+ var _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
3136
3139
  const providerOptions = options.providerOptions || {};
3137
3140
  const openrouterOptions = providerOptions.openrouter || {};
3138
- const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
3141
+ const _a16 = openrouterOptions, { cacheControl } = _a16, restOpenrouterOptions = __objRest(_a16, ["cacheControl"]);
3142
+ const args = __spreadValues(__spreadValues(__spreadValues({}, this.getArgs(options)), restOpenrouterOptions), cacheControl != null && !("cache_control" in restOpenrouterOptions) ? { cache_control: cacheControl } : {});
3139
3143
  const { value: responseValue, responseHeaders } = await postJsonToApi({
3140
3144
  url: this.config.url({
3141
3145
  path: "/chat/completions",
@@ -3172,7 +3176,7 @@ var OpenRouterChatLanguageModel = class {
3172
3176
  });
3173
3177
  }
3174
3178
  const usageInfo = response.usage ? computeTokenUsage(response.usage) : emptyUsage();
3175
- const reasoningDetails = (_a16 = choice.message.reasoning_details) != null ? _a16 : [];
3179
+ const reasoningDetails = (_b16 = choice.message.reasoning_details) != null ? _b16 : [];
3176
3180
  const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
3177
3181
  switch (detail.type) {
3178
3182
  case "reasoning.text" /* Text */: {
@@ -3241,9 +3245,9 @@ var OpenRouterChatLanguageModel = class {
3241
3245
  for (const toolCall of choice.message.tool_calls) {
3242
3246
  content.push({
3243
3247
  type: "tool-call",
3244
- toolCallId: (_b16 = toolCall.id) != null ? _b16 : generateId(),
3248
+ toolCallId: (_c = toolCall.id) != null ? _c : generateId(),
3245
3249
  toolName: toolCall.function.name,
3246
- input: (_c = toolCall.function.arguments) != null ? _c : "{}",
3250
+ input: (_d = toolCall.function.arguments) != null ? _d : "{}",
3247
3251
  providerMetadata: !reasoningDetailsAttachedToToolCall ? {
3248
3252
  openrouter: {
3249
3253
  reasoning_details: reasoningDetails
@@ -3270,19 +3274,19 @@ var OpenRouterChatLanguageModel = class {
3270
3274
  sourceType: "url",
3271
3275
  id: annotation.url_citation.url,
3272
3276
  url: annotation.url_citation.url,
3273
- title: (_d = annotation.url_citation.title) != null ? _d : "",
3277
+ title: (_e = annotation.url_citation.title) != null ? _e : "",
3274
3278
  providerMetadata: {
3275
3279
  openrouter: {
3276
- content: (_e = annotation.url_citation.content) != null ? _e : "",
3277
- startIndex: (_f = annotation.url_citation.start_index) != null ? _f : 0,
3278
- endIndex: (_g = annotation.url_citation.end_index) != null ? _g : 0
3280
+ content: (_f = annotation.url_citation.content) != null ? _f : "",
3281
+ startIndex: (_g = annotation.url_citation.start_index) != null ? _g : 0,
3282
+ endIndex: (_h = annotation.url_citation.end_index) != null ? _h : 0
3279
3283
  }
3280
3284
  }
3281
3285
  });
3282
3286
  }
3283
3287
  }
3284
3288
  }
3285
- const fileAnnotations = (_h = choice.message.annotations) == null ? void 0 : _h.filter(
3289
+ const fileAnnotations = (_i = choice.message.annotations) == null ? void 0 : _i.filter(
3286
3290
  (a) => a.type === "file"
3287
3291
  );
3288
3292
  const hasToolCalls = choice.message.tool_calls && choice.message.tool_calls.length > 0;
@@ -3290,7 +3294,7 @@ var OpenRouterChatLanguageModel = class {
3290
3294
  (d) => d.type === "reasoning.encrypted" /* Encrypted */ && d.data
3291
3295
  );
3292
3296
  const shouldOverrideFinishReason = hasToolCalls && hasEncryptedReasoning && choice.finish_reason === "stop";
3293
- const effectiveFinishReason = shouldOverrideFinishReason ? createFinishReason("tool-calls", (_i = choice.finish_reason) != null ? _i : void 0) : mapOpenRouterFinishReason(choice.finish_reason);
3297
+ const effectiveFinishReason = shouldOverrideFinishReason ? createFinishReason("tool-calls", (_j = choice.finish_reason) != null ? _j : void 0) : mapOpenRouterFinishReason(choice.finish_reason);
3294
3298
  return {
3295
3299
  content,
3296
3300
  finishReason: effectiveFinishReason,
@@ -3298,22 +3302,22 @@ var OpenRouterChatLanguageModel = class {
3298
3302
  warnings: [],
3299
3303
  providerMetadata: {
3300
3304
  openrouter: OpenRouterProviderMetadataSchema.parse({
3301
- provider: (_j = response.provider) != null ? _j : "",
3302
- reasoning_details: (_k = choice.message.reasoning_details) != null ? _k : [],
3305
+ provider: (_k = response.provider) != null ? _k : "",
3306
+ reasoning_details: (_l = choice.message.reasoning_details) != null ? _l : [],
3303
3307
  annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : void 0,
3304
3308
  usage: __spreadValues(__spreadValues(__spreadValues(__spreadValues({
3305
- promptTokens: (_l = usageInfo.inputTokens.total) != null ? _l : 0,
3306
- completionTokens: (_m = usageInfo.outputTokens.total) != null ? _m : 0,
3307
- totalTokens: ((_n = usageInfo.inputTokens.total) != null ? _n : 0) + ((_o = usageInfo.outputTokens.total) != null ? _o : 0)
3308
- }, ((_p = response.usage) == null ? void 0 : _p.cost) != null ? { cost: response.usage.cost } : {}), ((_r = (_q = response.usage) == null ? void 0 : _q.prompt_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? {
3309
+ promptTokens: (_m = usageInfo.inputTokens.total) != null ? _m : 0,
3310
+ completionTokens: (_n = usageInfo.outputTokens.total) != null ? _n : 0,
3311
+ totalTokens: ((_o = usageInfo.inputTokens.total) != null ? _o : 0) + ((_p = usageInfo.outputTokens.total) != null ? _p : 0)
3312
+ }, ((_q = response.usage) == null ? void 0 : _q.cost) != null ? { cost: response.usage.cost } : {}), ((_s = (_r = response.usage) == null ? void 0 : _r.prompt_tokens_details) == null ? void 0 : _s.cached_tokens) != null ? {
3309
3313
  promptTokensDetails: {
3310
3314
  cachedTokens: response.usage.prompt_tokens_details.cached_tokens
3311
3315
  }
3312
- } : {}), ((_t = (_s = response.usage) == null ? void 0 : _s.completion_tokens_details) == null ? void 0 : _t.reasoning_tokens) != null ? {
3316
+ } : {}), ((_u = (_t = response.usage) == null ? void 0 : _t.completion_tokens_details) == null ? void 0 : _u.reasoning_tokens) != null ? {
3313
3317
  completionTokensDetails: {
3314
3318
  reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens
3315
3319
  }
3316
- } : {}), ((_v = (_u = response.usage) == null ? void 0 : _u.cost_details) == null ? void 0 : _v.upstream_inference_cost) != null ? {
3320
+ } : {}), ((_w = (_v = response.usage) == null ? void 0 : _v.cost_details) == null ? void 0 : _w.upstream_inference_cost) != null ? {
3317
3321
  costDetails: {
3318
3322
  upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost
3319
3323
  }
@@ -3329,10 +3333,11 @@ var OpenRouterChatLanguageModel = class {
3329
3333
  };
3330
3334
  }
3331
3335
  async doStream(options) {
3332
- var _a16;
3336
+ var _b16;
3333
3337
  const providerOptions = options.providerOptions || {};
3334
3338
  const openrouterOptions = providerOptions.openrouter || {};
3335
- const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
3339
+ const _a16 = openrouterOptions, { cacheControl } = _a16, restOpenrouterOptions = __objRest(_a16, ["cacheControl"]);
3340
+ const args = __spreadValues(__spreadValues(__spreadValues({}, this.getArgs(options)), restOpenrouterOptions), cacheControl != null && !("cache_control" in restOpenrouterOptions) ? { cache_control: cacheControl } : {});
3336
3341
  const { value: response, responseHeaders } = await postJsonToApi({
3337
3342
  url: this.config.url({
3338
3343
  path: "/chat/completions",
@@ -3344,7 +3349,7 @@ var OpenRouterChatLanguageModel = class {
3344
3349
  // only include stream_options when in strict compatibility mode:
3345
3350
  stream_options: this.config.compatibility === "strict" ? __spreadValues({
3346
3351
  include_usage: true
3347
- }, ((_a16 = this.settings.usage) == null ? void 0 : _a16.include) ? { include_usage: true } : {}) : void 0
3352
+ }, ((_b16 = this.settings.usage) == null ? void 0 : _b16.include) ? { include_usage: true } : {}) : void 0
3348
3353
  }),
3349
3354
  failedResponseHandler: openrouterFailedResponseHandler,
3350
3355
  successfulResponseHandler: createEventSourceResponseHandler(
@@ -3384,7 +3389,7 @@ var OpenRouterChatLanguageModel = class {
3384
3389
  stream: response.pipeThrough(
3385
3390
  new TransformStream({
3386
3391
  transform(chunk, controller) {
3387
- var _a17, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
3392
+ var _a17, _b17, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
3388
3393
  if (options.includeRawChunks) {
3389
3394
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3390
3395
  }
@@ -3421,7 +3426,7 @@ var OpenRouterChatLanguageModel = class {
3421
3426
  Object.assign(usage.outputTokens, computed.outputTokens);
3422
3427
  rawUsage = value.usage;
3423
3428
  const promptTokens = (_a17 = value.usage.prompt_tokens) != null ? _a17 : 0;
3424
- const completionTokens = (_b16 = value.usage.completion_tokens) != null ? _b16 : 0;
3429
+ const completionTokens = (_b17 = value.usage.completion_tokens) != null ? _b17 : 0;
3425
3430
  openrouterUsage.promptTokens = promptTokens;
3426
3431
  if (value.usage.prompt_tokens_details) {
3427
3432
  openrouterUsage.promptTokensDetails = {