@openrouter/ai-sdk-provider 0.6.0 → 0.7.0-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -37,8 +37,7 @@ const { text } = await generateText({
37
37
 
38
38
  ## Supported models
39
39
 
40
- This list is not a definitive list of models supported by OpenRouter, as it constantly changes as we add new models (and deprecate old ones) to our system.
41
- You can find the latest list of models supported by OpenRouter [here](https://openrouter.ai/models).
40
+ This list is not a definitive list of models supported by OpenRouter, as it constantly changes as we add new models (and deprecate old ones) to our system. You can find the latest list of models supported by OpenRouter [here](https://openrouter.ai/models).
42
41
 
43
42
  You can find the latest list of tool-supported models supported by OpenRouter [here](https://openrouter.ai/models?order=newest&supported_parameters=tools). (Note: This list may contain models that are not compatible with the AI SDK.)
44
43
 
@@ -166,7 +165,7 @@ The provider supports [OpenRouter usage accounting](https://openrouter.ai/docs/u
166
165
  const model = openrouter('openai/gpt-3.5-turbo', {
167
166
  usage: {
168
167
  include: true,
169
- }
168
+ },
170
169
  });
171
170
 
172
171
  // Access usage accounting data
@@ -178,6 +177,9 @@ const result = await generateText({
178
177
  // Provider-specific usage details (available in providerMetadata)
179
178
  if (result.providerMetadata?.openrouter?.usage) {
180
179
  console.log('Cost:', result.providerMetadata.openrouter.usage.cost);
181
- console.log('Total Tokens:', result.providerMetadata.openrouter.usage.totalTokens);
180
+ console.log(
181
+ 'Total Tokens:',
182
+ result.providerMetadata.openrouter.usage.totalTokens,
183
+ );
182
184
  }
183
185
  ```
package/dist/index.d.mts CHANGED
@@ -56,8 +56,8 @@ type OpenRouterUsageAccounting = {
56
56
  cost?: number;
57
57
  };
58
58
 
59
- type OpenRouterChatModelId = string;
60
- type OpenRouterChatSettings = {
59
+ type OpenRouterCompletionModelId = string;
60
+ type OpenRouterCompletionSettings = {
61
61
  /**
62
62
  Modify the likelihood of specified tokens appearing in the completion.
63
63
 
@@ -71,7 +71,7 @@ type OpenRouterChatSettings = {
71
71
 
72
72
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
73
73
  token from being generated.
74
- */
74
+ */
75
75
  logitBias?: Record<number, number>;
76
76
  /**
77
77
  Return the log probabilities of the tokens. Including logprobs will increase
@@ -83,21 +83,16 @@ type OpenRouterChatSettings = {
83
83
 
84
84
  Setting to a number will return the log probabilities of the top n
85
85
  tokens that were generated.
86
- */
86
+ */
87
87
  logprobs?: boolean | number;
88
88
  /**
89
- Whether to enable parallel function calling during tool use. Default to true.
89
+ The suffix that comes after a completion of inserted text.
90
90
  */
91
- parallelToolCalls?: boolean;
92
- /**
93
- A unique identifier representing your end-user, which can help OpenRouter to
94
- monitor and detect abuse. Learn more.
95
- */
96
- user?: string;
91
+ suffix?: string;
97
92
  } & OpenRouterSharedSettings;
98
93
 
99
- type OpenRouterCompletionModelId = string;
100
- type OpenRouterCompletionSettings = {
94
+ type OpenRouterChatModelId = string;
95
+ type OpenRouterChatSettings = {
101
96
  /**
102
97
  Modify the likelihood of specified tokens appearing in the completion.
103
98
 
@@ -111,7 +106,7 @@ type OpenRouterCompletionSettings = {
111
106
 
112
107
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
113
108
  token from being generated.
114
- */
109
+ */
115
110
  logitBias?: Record<number, number>;
116
111
  /**
117
112
  Return the log probabilities of the tokens. Including logprobs will increase
@@ -123,12 +118,17 @@ type OpenRouterCompletionSettings = {
123
118
 
124
119
  Setting to a number will return the log probabilities of the top n
125
120
  tokens that were generated.
126
- */
121
+ */
127
122
  logprobs?: boolean | number;
128
123
  /**
129
- The suffix that comes after a completion of inserted text.
124
+ Whether to enable parallel function calling during tool use. Default to true.
130
125
  */
131
- suffix?: string;
126
+ parallelToolCalls?: boolean;
127
+ /**
128
+ A unique identifier representing your end-user, which can help OpenRouter to
129
+ monitor and detect abuse. Learn more.
130
+ */
131
+ user?: string;
132
132
  } & OpenRouterSharedSettings;
133
133
 
134
134
  type OpenRouterChatConfig = {
@@ -142,6 +142,8 @@ type OpenRouterChatConfig = {
142
142
  fetch?: typeof fetch;
143
143
  extraBody?: Record<string, unknown>;
144
144
  };
145
+ type DoGenerateOutput = Awaited<ReturnType<LanguageModelV1['doGenerate']>>;
146
+ type DoStreamOutput = Awaited<ReturnType<LanguageModelV1['doStream']>>;
145
147
  declare class OpenRouterChatLanguageModel implements LanguageModelV1 {
146
148
  readonly specificationVersion = "v1";
147
149
  readonly defaultObjectGenerationMode = "tool";
@@ -151,8 +153,8 @@ declare class OpenRouterChatLanguageModel implements LanguageModelV1 {
151
153
  constructor(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings, config: OpenRouterChatConfig);
152
154
  get provider(): string;
153
155
  private getArgs;
154
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
155
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
156
+ doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<DoGenerateOutput>;
157
+ doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<DoStreamOutput>;
156
158
  }
157
159
 
158
160
  type OpenRouterCompletionConfig = {
package/dist/index.d.ts CHANGED
@@ -56,8 +56,8 @@ type OpenRouterUsageAccounting = {
56
56
  cost?: number;
57
57
  };
58
58
 
59
- type OpenRouterChatModelId = string;
60
- type OpenRouterChatSettings = {
59
+ type OpenRouterCompletionModelId = string;
60
+ type OpenRouterCompletionSettings = {
61
61
  /**
62
62
  Modify the likelihood of specified tokens appearing in the completion.
63
63
 
@@ -71,7 +71,7 @@ type OpenRouterChatSettings = {
71
71
 
72
72
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
73
73
  token from being generated.
74
- */
74
+ */
75
75
  logitBias?: Record<number, number>;
76
76
  /**
77
77
  Return the log probabilities of the tokens. Including logprobs will increase
@@ -83,21 +83,16 @@ type OpenRouterChatSettings = {
83
83
 
84
84
  Setting to a number will return the log probabilities of the top n
85
85
  tokens that were generated.
86
- */
86
+ */
87
87
  logprobs?: boolean | number;
88
88
  /**
89
- Whether to enable parallel function calling during tool use. Default to true.
89
+ The suffix that comes after a completion of inserted text.
90
90
  */
91
- parallelToolCalls?: boolean;
92
- /**
93
- A unique identifier representing your end-user, which can help OpenRouter to
94
- monitor and detect abuse. Learn more.
95
- */
96
- user?: string;
91
+ suffix?: string;
97
92
  } & OpenRouterSharedSettings;
98
93
 
99
- type OpenRouterCompletionModelId = string;
100
- type OpenRouterCompletionSettings = {
94
+ type OpenRouterChatModelId = string;
95
+ type OpenRouterChatSettings = {
101
96
  /**
102
97
  Modify the likelihood of specified tokens appearing in the completion.
103
98
 
@@ -111,7 +106,7 @@ type OpenRouterCompletionSettings = {
111
106
 
112
107
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
113
108
  token from being generated.
114
- */
109
+ */
115
110
  logitBias?: Record<number, number>;
116
111
  /**
117
112
  Return the log probabilities of the tokens. Including logprobs will increase
@@ -123,12 +118,17 @@ type OpenRouterCompletionSettings = {
123
118
 
124
119
  Setting to a number will return the log probabilities of the top n
125
120
  tokens that were generated.
126
- */
121
+ */
127
122
  logprobs?: boolean | number;
128
123
  /**
129
- The suffix that comes after a completion of inserted text.
124
+ Whether to enable parallel function calling during tool use. Default to true.
130
125
  */
131
- suffix?: string;
126
+ parallelToolCalls?: boolean;
127
+ /**
128
+ A unique identifier representing your end-user, which can help OpenRouter to
129
+ monitor and detect abuse. Learn more.
130
+ */
131
+ user?: string;
132
132
  } & OpenRouterSharedSettings;
133
133
 
134
134
  type OpenRouterChatConfig = {
@@ -142,6 +142,8 @@ type OpenRouterChatConfig = {
142
142
  fetch?: typeof fetch;
143
143
  extraBody?: Record<string, unknown>;
144
144
  };
145
+ type DoGenerateOutput = Awaited<ReturnType<LanguageModelV1['doGenerate']>>;
146
+ type DoStreamOutput = Awaited<ReturnType<LanguageModelV1['doStream']>>;
145
147
  declare class OpenRouterChatLanguageModel implements LanguageModelV1 {
146
148
  readonly specificationVersion = "v1";
147
149
  readonly defaultObjectGenerationMode = "tool";
@@ -151,8 +153,8 @@ declare class OpenRouterChatLanguageModel implements LanguageModelV1 {
151
153
  constructor(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings, config: OpenRouterChatConfig);
152
154
  get provider(): string;
153
155
  private getArgs;
154
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
155
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
156
+ doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<DoGenerateOutput>;
157
+ doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<DoStreamOutput>;
156
158
  }
157
159
 
158
160
  type OpenRouterCompletionConfig = {
package/dist/index.js CHANGED
@@ -58,10 +58,36 @@ module.exports = __toCommonJS(index_exports);
58
58
  // src/openrouter-facade.ts
59
59
  var import_provider_utils5 = require("@ai-sdk/provider-utils");
60
60
 
61
+ // src/schemas/reasoning-details.ts
62
+ var import_zod = require("zod");
63
+ var ReasoningDetailSummarySchema = import_zod.z.object({
64
+ type: import_zod.z.literal("reasoning.summary" /* Summary */),
65
+ summary: import_zod.z.string()
66
+ });
67
+ var ReasoningDetailEncryptedSchema = import_zod.z.object({
68
+ type: import_zod.z.literal("reasoning.encrypted" /* Encrypted */),
69
+ data: import_zod.z.string()
70
+ });
71
+ var ReasoningDetailTextSchema = import_zod.z.object({
72
+ type: import_zod.z.literal("reasoning.text" /* Text */),
73
+ text: import_zod.z.string().nullish(),
74
+ signature: import_zod.z.string().nullish()
75
+ });
76
+ var ReasoningDetailUnionSchema = import_zod.z.union([
77
+ ReasoningDetailSummarySchema,
78
+ ReasoningDetailEncryptedSchema,
79
+ ReasoningDetailTextSchema
80
+ ]);
81
+ var ReasoningDetailsWithUnknownSchema = import_zod.z.union([
82
+ ReasoningDetailUnionSchema,
83
+ import_zod.z.unknown().transform(() => null)
84
+ ]);
85
+ var ReasoningDetailArraySchema = import_zod.z.array(ReasoningDetailsWithUnknownSchema).transform((d) => d.filter((d2) => !!d2));
86
+
61
87
  // src/openrouter-chat-language-model.ts
62
88
  var import_provider = require("@ai-sdk/provider");
63
89
  var import_provider_utils3 = require("@ai-sdk/provider-utils");
64
- var import_zod2 = require("zod");
90
+ var import_zod3 = require("zod");
65
91
 
66
92
  // src/convert-to-openrouter-chat-messages.ts
67
93
  var import_provider_utils = require("@ai-sdk/provider-utils");
@@ -145,6 +171,8 @@ function convertToOpenRouterChatMessages(prompt) {
145
171
  }
146
172
  case "assistant": {
147
173
  let text = "";
174
+ let reasoning = "";
175
+ const reasoningDetails = [];
148
176
  const toolCalls = [];
149
177
  for (const part of content) {
150
178
  switch (part.type) {
@@ -163,10 +191,23 @@ function convertToOpenRouterChatMessages(prompt) {
163
191
  });
164
192
  break;
165
193
  }
194
+ case "reasoning": {
195
+ reasoning += part.text;
196
+ reasoningDetails.push({
197
+ type: "reasoning.text" /* Text */,
198
+ text: part.text,
199
+ signature: part.signature
200
+ });
201
+ break;
202
+ }
203
+ case "redacted-reasoning": {
204
+ reasoningDetails.push({
205
+ type: "reasoning.encrypted" /* Encrypted */,
206
+ data: part.data
207
+ });
208
+ break;
209
+ }
166
210
  case "file":
167
- // TODO: Handle reasoning and redacted-reasoning
168
- case "reasoning":
169
- case "redacted-reasoning":
170
211
  break;
171
212
  default: {
172
213
  const _exhaustiveCheck = part;
@@ -178,6 +219,8 @@ function convertToOpenRouterChatMessages(prompt) {
178
219
  role: "assistant",
179
220
  content: text,
180
221
  tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
222
+ reasoning: reasoning || void 0,
223
+ reasoning_details: reasoningDetails.length > 0 ? reasoningDetails : void 0,
181
224
  cache_control: getCacheControl(providerMetadata)
182
225
  });
183
226
  break;
@@ -234,13 +277,13 @@ function mapOpenRouterFinishReason(finishReason) {
234
277
 
235
278
  // src/openrouter-error.ts
236
279
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
237
- var import_zod = require("zod");
238
- var OpenRouterErrorResponseSchema = import_zod.z.object({
239
- error: import_zod.z.object({
240
- message: import_zod.z.string(),
241
- type: import_zod.z.string(),
242
- param: import_zod.z.any().nullable(),
243
- code: import_zod.z.string().nullable()
280
+ var import_zod2 = require("zod");
281
+ var OpenRouterErrorResponseSchema = import_zod2.z.object({
282
+ error: import_zod2.z.object({
283
+ message: import_zod2.z.string(),
284
+ type: import_zod2.z.string(),
285
+ param: import_zod2.z.any().nullable(),
286
+ code: import_zod2.z.string().nullable()
244
287
  })
245
288
  });
246
289
  var openrouterFailedResponseHandler = (0, import_provider_utils2.createJsonErrorResponseHandler)({
@@ -387,13 +430,56 @@ var OpenRouterChatLanguageModel = class {
387
430
  };
388
431
  }
389
432
  const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
433
+ const reasoningDetails = (_h = choice.message.reasoning_details) != null ? _h : [];
434
+ const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
435
+ var _a2;
436
+ switch (detail.type) {
437
+ case "reasoning.text" /* Text */: {
438
+ if (detail.text) {
439
+ return {
440
+ type: "text",
441
+ text: detail.text,
442
+ signature: (_a2 = detail.signature) != null ? _a2 : void 0
443
+ };
444
+ }
445
+ break;
446
+ }
447
+ case "reasoning.summary" /* Summary */: {
448
+ if (detail.summary) {
449
+ return {
450
+ type: "text",
451
+ text: detail.summary
452
+ };
453
+ }
454
+ break;
455
+ }
456
+ case "reasoning.encrypted" /* Encrypted */: {
457
+ if (detail.data) {
458
+ return {
459
+ type: "redacted",
460
+ data: detail.data
461
+ };
462
+ }
463
+ break;
464
+ }
465
+ default: {
466
+ detail;
467
+ }
468
+ }
469
+ return null;
470
+ }).filter((p) => p !== null) : choice.message.reasoning ? [
471
+ {
472
+ type: "text",
473
+ text: choice.message.reasoning
474
+ }
475
+ ] : [];
390
476
  return __spreadValues({
391
477
  response: {
392
478
  id: response.id,
393
479
  modelId: response.model
394
480
  },
395
- text: (_h = choice.message.content) != null ? _h : void 0,
396
- reasoning: (_i = choice.message.reasoning) != null ? _i : void 0,
481
+ text: (_i = choice.message.content) != null ? _i : void 0,
482
+ reasoning,
397
483
  toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
398
484
  var _a2;
399
485
  return {
@@ -512,11 +598,56 @@ var OpenRouterChatLanguageModel = class {
512
598
  textDelta: delta.reasoning
513
599
  });
514
600
  }
601
+ if (delta.reasoning_details && delta.reasoning_details.length > 0) {
602
+ for (const detail of delta.reasoning_details) {
603
+ switch (detail.type) {
604
+ case "reasoning.text" /* Text */: {
605
+ if (detail.text) {
606
+ controller.enqueue({
607
+ type: "reasoning",
608
+ textDelta: detail.text
609
+ });
610
+ }
611
+ if (detail.signature) {
612
+ controller.enqueue({
613
+ type: "reasoning-signature",
614
+ signature: detail.signature
615
+ });
616
+ }
617
+ break;
618
+ }
619
+ case "reasoning.encrypted" /* Encrypted */: {
620
+ if (detail.data) {
621
+ controller.enqueue({
622
+ type: "redacted-reasoning",
623
+ data: detail.data
624
+ });
625
+ }
626
+ break;
627
+ }
628
+ case "reasoning.summary" /* Summary */: {
629
+ if (detail.summary) {
630
+ controller.enqueue({
631
+ type: "reasoning",
632
+ textDelta: detail.summary
633
+ });
634
+ }
635
+ break;
636
+ }
637
+ default: {
638
+ detail;
639
+ break;
640
+ }
641
+ }
642
+ }
643
+ }
515
644
  const mappedLogprobs = mapOpenRouterChatLogProbsOutput(
516
645
  choice == null ? void 0 : choice.logprobs
517
646
  );
518
647
  if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
519
- if (logprobs === void 0) logprobs = [];
648
+ if (logprobs === void 0) {
649
+ logprobs = [];
650
+ }
520
651
  logprobs.push(...mappedLogprobs);
521
652
  }
522
653
  if (delta.tool_calls != null) {
@@ -639,95 +770,97 @@ var OpenRouterChatLanguageModel = class {
639
770
  };
640
771
  }
641
772
  };
642
- var OpenRouterChatCompletionBaseResponseSchema = import_zod2.z.object({
643
- id: import_zod2.z.string().optional(),
644
- model: import_zod2.z.string().optional(),
645
- usage: import_zod2.z.object({
646
- prompt_tokens: import_zod2.z.number(),
647
- prompt_tokens_details: import_zod2.z.object({
648
- cached_tokens: import_zod2.z.number()
773
+ var OpenRouterChatCompletionBaseResponseSchema = import_zod3.z.object({
774
+ id: import_zod3.z.string().optional(),
775
+ model: import_zod3.z.string().optional(),
776
+ usage: import_zod3.z.object({
777
+ prompt_tokens: import_zod3.z.number(),
778
+ prompt_tokens_details: import_zod3.z.object({
779
+ cached_tokens: import_zod3.z.number()
649
780
  }).optional(),
650
- completion_tokens: import_zod2.z.number(),
651
- completion_tokens_details: import_zod2.z.object({
652
- reasoning_tokens: import_zod2.z.number()
781
+ completion_tokens: import_zod3.z.number(),
782
+ completion_tokens_details: import_zod3.z.object({
783
+ reasoning_tokens: import_zod3.z.number()
653
784
  }).optional(),
654
- total_tokens: import_zod2.z.number(),
655
- cost: import_zod2.z.number().optional()
785
+ total_tokens: import_zod3.z.number(),
786
+ cost: import_zod3.z.number().optional()
656
787
  }).nullish()
657
788
  });
658
789
  var OpenRouterNonStreamChatCompletionResponseSchema = OpenRouterChatCompletionBaseResponseSchema.extend({
659
- choices: import_zod2.z.array(
660
- import_zod2.z.object({
661
- message: import_zod2.z.object({
662
- role: import_zod2.z.literal("assistant"),
663
- content: import_zod2.z.string().nullable().optional(),
664
- reasoning: import_zod2.z.string().nullable().optional(),
665
- tool_calls: import_zod2.z.array(
666
- import_zod2.z.object({
667
- id: import_zod2.z.string().optional().nullable(),
668
- type: import_zod2.z.literal("function"),
669
- function: import_zod2.z.object({
670
- name: import_zod2.z.string(),
671
- arguments: import_zod2.z.string()
790
+ choices: import_zod3.z.array(
791
+ import_zod3.z.object({
792
+ message: import_zod3.z.object({
793
+ role: import_zod3.z.literal("assistant"),
794
+ content: import_zod3.z.string().nullable().optional(),
795
+ reasoning: import_zod3.z.string().nullable().optional(),
796
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
797
+ tool_calls: import_zod3.z.array(
798
+ import_zod3.z.object({
799
+ id: import_zod3.z.string().optional().nullable(),
800
+ type: import_zod3.z.literal("function"),
801
+ function: import_zod3.z.object({
802
+ name: import_zod3.z.string(),
803
+ arguments: import_zod3.z.string()
672
804
  })
673
805
  })
674
806
  ).optional()
675
807
  }),
676
- index: import_zod2.z.number(),
677
- logprobs: import_zod2.z.object({
678
- content: import_zod2.z.array(
679
- import_zod2.z.object({
680
- token: import_zod2.z.string(),
681
- logprob: import_zod2.z.number(),
682
- top_logprobs: import_zod2.z.array(
683
- import_zod2.z.object({
684
- token: import_zod2.z.string(),
685
- logprob: import_zod2.z.number()
808
+ index: import_zod3.z.number(),
809
+ logprobs: import_zod3.z.object({
810
+ content: import_zod3.z.array(
811
+ import_zod3.z.object({
812
+ token: import_zod3.z.string(),
813
+ logprob: import_zod3.z.number(),
814
+ top_logprobs: import_zod3.z.array(
815
+ import_zod3.z.object({
816
+ token: import_zod3.z.string(),
817
+ logprob: import_zod3.z.number()
686
818
  })
687
819
  )
688
820
  })
689
821
  ).nullable()
690
822
  }).nullable().optional(),
691
- finish_reason: import_zod2.z.string().optional().nullable()
823
+ finish_reason: import_zod3.z.string().optional().nullable()
692
824
  })
693
825
  )
694
826
  });
695
- var OpenRouterStreamChatCompletionChunkSchema = import_zod2.z.union([
827
+ var OpenRouterStreamChatCompletionChunkSchema = import_zod3.z.union([
696
828
  OpenRouterChatCompletionBaseResponseSchema.extend({
697
- choices: import_zod2.z.array(
698
- import_zod2.z.object({
699
- delta: import_zod2.z.object({
700
- role: import_zod2.z.enum(["assistant"]).optional(),
701
- content: import_zod2.z.string().nullish(),
702
- reasoning: import_zod2.z.string().nullish().optional(),
703
- tool_calls: import_zod2.z.array(
704
- import_zod2.z.object({
705
- index: import_zod2.z.number(),
706
- id: import_zod2.z.string().nullish(),
707
- type: import_zod2.z.literal("function").optional(),
708
- function: import_zod2.z.object({
709
- name: import_zod2.z.string().nullish(),
710
- arguments: import_zod2.z.string().nullish()
829
+ choices: import_zod3.z.array(
830
+ import_zod3.z.object({
831
+ delta: import_zod3.z.object({
832
+ role: import_zod3.z.enum(["assistant"]).optional(),
833
+ content: import_zod3.z.string().nullish(),
834
+ reasoning: import_zod3.z.string().nullish().optional(),
835
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
836
+ tool_calls: import_zod3.z.array(
837
+ import_zod3.z.object({
838
+ index: import_zod3.z.number(),
839
+ id: import_zod3.z.string().nullish(),
840
+ type: import_zod3.z.literal("function").optional(),
841
+ function: import_zod3.z.object({
842
+ name: import_zod3.z.string().nullish(),
843
+ arguments: import_zod3.z.string().nullish()
711
844
  })
712
845
  })
713
846
  ).nullish()
714
847
  }).nullish(),
715
- logprobs: import_zod2.z.object({
716
- content: import_zod2.z.array(
717
- import_zod2.z.object({
718
- token: import_zod2.z.string(),
719
- logprob: import_zod2.z.number(),
720
- top_logprobs: import_zod2.z.array(
721
- import_zod2.z.object({
722
- token: import_zod2.z.string(),
723
- logprob: import_zod2.z.number()
848
+ logprobs: import_zod3.z.object({
849
+ content: import_zod3.z.array(
850
+ import_zod3.z.object({
851
+ token: import_zod3.z.string(),
852
+ logprob: import_zod3.z.number(),
853
+ top_logprobs: import_zod3.z.array(
854
+ import_zod3.z.object({
855
+ token: import_zod3.z.string(),
856
+ logprob: import_zod3.z.number()
724
857
  })
725
858
  )
726
859
  })
727
860
  ).nullable()
728
861
  }).nullish(),
729
- finish_reason: import_zod2.z.string().nullable().optional(),
730
- index: import_zod2.z.number()
862
+ finish_reason: import_zod3.z.string().nullable().optional(),
863
+ index: import_zod3.z.number()
731
864
  })
732
865
  )
733
866
  }),
@@ -787,7 +920,7 @@ function prepareToolsAndToolChoice(mode) {
787
920
  // src/openrouter-completion-language-model.ts
788
921
  var import_provider3 = require("@ai-sdk/provider");
789
922
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
790
- var import_zod3 = require("zod");
923
+ var import_zod4 = require("zod");
791
924
 
792
925
  // src/convert-to-openrouter-completion-prompt.ts
793
926
  var import_provider2 = require("@ai-sdk/provider");
@@ -1117,7 +1250,9 @@ var OpenRouterCompletionLanguageModel = class {
1117
1250
  choice == null ? void 0 : choice.logprobs
1118
1251
  );
1119
1252
  if (mappedLogprobs == null ? void 0 : mappedLogprobs.length) {
1120
- if (logprobs === void 0) logprobs = [];
1253
+ if (logprobs === void 0) {
1254
+ logprobs = [];
1255
+ }
1121
1256
  logprobs.push(...mappedLogprobs);
1122
1257
  }
1123
1258
  },
@@ -1137,26 +1272,27 @@ var OpenRouterCompletionLanguageModel = class {
1137
1272
  };
1138
1273
  }
1139
1274
  };
1140
- var OpenRouterCompletionChunkSchema = import_zod3.z.union([
1141
- import_zod3.z.object({
1142
- id: import_zod3.z.string().optional(),
1143
- model: import_zod3.z.string().optional(),
1144
- choices: import_zod3.z.array(
1145
- import_zod3.z.object({
1146
- text: import_zod3.z.string(),
1147
- reasoning: import_zod3.z.string().nullish().optional(),
1148
- finish_reason: import_zod3.z.string().nullish(),
1149
- index: import_zod3.z.number(),
1150
- logprobs: import_zod3.z.object({
1151
- tokens: import_zod3.z.array(import_zod3.z.string()),
1152
- token_logprobs: import_zod3.z.array(import_zod3.z.number()),
1153
- top_logprobs: import_zod3.z.array(import_zod3.z.record(import_zod3.z.string(), import_zod3.z.number())).nullable()
1275
+ var OpenRouterCompletionChunkSchema = import_zod4.z.union([
1276
+ import_zod4.z.object({
1277
+ id: import_zod4.z.string().optional(),
1278
+ model: import_zod4.z.string().optional(),
1279
+ choices: import_zod4.z.array(
1280
+ import_zod4.z.object({
1281
+ text: import_zod4.z.string(),
1282
+ reasoning: import_zod4.z.string().nullish().optional(),
1283
+ reasoning_details: ReasoningDetailArraySchema.nullish(),
1284
+ finish_reason: import_zod4.z.string().nullish(),
1285
+ index: import_zod4.z.number(),
1286
+ logprobs: import_zod4.z.object({
1287
+ tokens: import_zod4.z.array(import_zod4.z.string()),
1288
+ token_logprobs: import_zod4.z.array(import_zod4.z.number()),
1289
+ top_logprobs: import_zod4.z.array(import_zod4.z.record(import_zod4.z.string(), import_zod4.z.number())).nullable()
1154
1290
  }).nullable().optional()
1155
1291
  })
1156
1292
  ),
1157
- usage: import_zod3.z.object({
1158
- prompt_tokens: import_zod3.z.number(),
1159
- completion_tokens: import_zod3.z.number()
1293
+ usage: import_zod4.z.object({
1294
+ prompt_tokens: import_zod4.z.number(),
1295
+ completion_tokens: import_zod4.z.number()
1160
1296
  }).optional().nullable()
1161
1297
  }),
1162
1298
  OpenRouterErrorResponseSchema