@providerprotocol/ai 0.0.23 → 0.0.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,4 @@
1
- import { g as Provider } from '../provider-DR1yins0.js';
1
+ import { g as Provider } from '../provider-x4RocsnK.js';
2
2
 
3
3
  /**
4
4
  * OpenRouter-specific types for the Unified Provider Protocol.
@@ -9,6 +9,47 @@ import { g as Provider } from '../provider-DR1yins0.js';
9
9
  *
10
10
  * @module types
11
11
  */
12
+ /**
13
+ * Reasoning configuration for OpenRouter.
14
+ *
15
+ * Controls reasoning effort and whether to include reasoning tokens in responses.
16
+ *
17
+ * @see {@link https://openrouter.ai/docs/guides/best-practices/reasoning-tokens}
18
+ */
19
+ interface OpenRouterReasoningConfig {
20
+ /**
21
+ * Reasoning effort level.
22
+ *
23
+ * Controls how much compute the model spends on reasoning:
24
+ * - `'xhigh'`: ~95% reasoning budget
25
+ * - `'high'`: ~80% reasoning budget
26
+ * - `'medium'`: ~50% reasoning budget (default)
27
+ * - `'low'`: ~20% reasoning budget
28
+ * - `'minimal'`: ~10% reasoning budget
29
+ * - `'none'`: No reasoning
30
+ */
31
+ effort?: 'xhigh' | 'high' | 'medium' | 'low' | 'minimal' | 'none';
32
+ /**
33
+ * Maximum tokens for reasoning.
34
+ *
35
+ * For Anthropic: minimum 1024, maximum 128000.
36
+ * For Gemini: Maps to thinkingBudget.
37
+ */
38
+ max_tokens?: number;
39
+ /**
40
+ * Whether to exclude reasoning from the response.
41
+ *
42
+ * When true, the model uses reasoning internally but doesn't include
43
+ * reasoning tokens in the response.
44
+ */
45
+ exclude?: boolean;
46
+ /**
47
+ * Whether reasoning is enabled.
48
+ *
49
+ * Defaults to true when the reasoning parameter is present.
50
+ */
51
+ enabled?: boolean;
52
+ }
12
53
  /**
13
54
  * Parameters for OpenRouter's Chat Completions API.
14
55
  *
@@ -98,6 +139,24 @@ interface OpenRouterCompletionsParams {
98
139
  /** If true, returns the transformed request body sent to the provider */
99
140
  echo_upstream_body?: boolean;
100
141
  };
142
+ /**
143
+ * Reasoning configuration for thinking models.
144
+ *
145
+ * Controls how much reasoning effort the model uses and whether to include
146
+ * reasoning tokens in the response.
147
+ *
148
+ * @see {@link https://openrouter.ai/docs/guides/best-practices/reasoning-tokens}
149
+ */
150
+ reasoning?: OpenRouterReasoningConfig;
151
+ /**
152
+ * Legacy reasoning toggle (use `reasoning` instead).
153
+ *
154
+ * - `true`: Equivalent to `reasoning: {}`
155
+ * - `false`: Equivalent to `reasoning: { exclude: true }`
156
+ *
157
+ * @deprecated Use `reasoning` parameter instead
158
+ */
159
+ include_reasoning?: boolean;
101
160
  }
102
161
  /**
103
162
  * Image generation configuration for OpenRouter.
@@ -11,10 +11,10 @@ import {
11
11
  isAssistantMessage,
12
12
  isToolResultMessage,
13
13
  isUserMessage
14
- } from "../chunk-MF5ETY5O.js";
14
+ } from "../chunk-6AZVUI6H.js";
15
15
  import {
16
16
  parseSSEStream
17
- } from "../chunk-NWS5IKNR.js";
17
+ } from "../chunk-TOJCZMVU.js";
18
18
  import {
19
19
  resolveApiKey
20
20
  } from "../chunk-55X3W2MN.js";
@@ -175,7 +175,8 @@ function transformMessage(message) {
175
175
  }
176
176
  if (isAssistantMessage(message)) {
177
177
  const validContent = filterValidContent(message.content);
178
- const textContent = validContent.filter((c) => c.type === "text").map((c) => c.text).join("");
178
+ const nonReasoningContent = validContent.filter((c) => c.type !== "reasoning");
179
+ const textContent = nonReasoningContent.filter((c) => c.type === "text").map((c) => c.text).join("");
179
180
  const assistantMessage = {
180
181
  role: "assistant",
181
182
  content: textContent || null
@@ -190,6 +191,10 @@ function transformMessage(message) {
190
191
  }
191
192
  }));
192
193
  }
194
+ const openrouterMeta = message.metadata?.openrouter;
195
+ if (openrouterMeta?.reasoning_details && openrouterMeta.reasoning_details.length > 0) {
196
+ assistantMessage.reasoning_details = openrouterMeta.reasoning_details;
197
+ }
193
198
  return assistantMessage;
194
199
  }
195
200
  if (isToolResultMessage(message)) {
@@ -261,10 +266,20 @@ function transformResponse(data) {
261
266
  if (!choice) {
262
267
  throw new Error("No choices in OpenRouter response");
263
268
  }
264
- const content = [];
269
+ const reasoningContent = [];
270
+ const textContent = [];
265
271
  let structuredData;
272
+ if (choice.message.reasoning_details && choice.message.reasoning_details.length > 0) {
273
+ for (const detail of choice.message.reasoning_details) {
274
+ if (detail.type === "reasoning.text" && detail.text) {
275
+ reasoningContent.push({ type: "reasoning", text: detail.text });
276
+ } else if (detail.type === "reasoning.summary" && detail.summary) {
277
+ reasoningContent.push({ type: "reasoning", text: detail.summary });
278
+ }
279
+ }
280
+ }
266
281
  if (choice.message.content) {
267
- content.push({ type: "text", text: choice.message.content });
282
+ textContent.push({ type: "text", text: choice.message.content });
268
283
  try {
269
284
  structuredData = JSON.parse(choice.message.content);
270
285
  } catch {
@@ -274,10 +289,11 @@ function transformResponse(data) {
274
289
  for (const image of choice.message.images) {
275
290
  const imageBlock = parseGeneratedImage(image.image_url.url);
276
291
  if (imageBlock) {
277
- content.push(imageBlock);
292
+ textContent.push(imageBlock);
278
293
  }
279
294
  }
280
295
  }
296
+ const content = [...reasoningContent, ...textContent];
281
297
  const toolCalls = [];
282
298
  if (choice.message.tool_calls) {
283
299
  for (const call of choice.message.tool_calls) {
@@ -303,7 +319,9 @@ function transformResponse(data) {
303
319
  openrouter: {
304
320
  model: data.model,
305
321
  finish_reason: choice.finish_reason,
306
- system_fingerprint: data.system_fingerprint
322
+ system_fingerprint: data.system_fingerprint,
323
+ // Store reasoning_details for multi-turn context preservation
324
+ reasoning_details: choice.message.reasoning_details
307
325
  }
308
326
  }
309
327
  }
@@ -357,6 +375,8 @@ function createStreamState() {
357
375
  id: "",
358
376
  model: "",
359
377
  text: "",
378
+ reasoning: "",
379
+ reasoningDetails: [],
360
380
  toolCalls: /* @__PURE__ */ new Map(),
361
381
  images: [],
362
382
  finishReason: null,
@@ -417,6 +437,26 @@ function transformStreamEvent(chunk, state) {
417
437
  state.images.push(image.image_url.url);
418
438
  }
419
439
  }
440
+ if (choice.delta.reasoning_details) {
441
+ for (const detail of choice.delta.reasoning_details) {
442
+ state.reasoningDetails.push(detail);
443
+ if (detail.type === "reasoning.text" && detail.text) {
444
+ state.reasoning += detail.text;
445
+ events.push({
446
+ type: StreamEventType.ReasoningDelta,
447
+ index: 0,
448
+ delta: { text: detail.text }
449
+ });
450
+ } else if (detail.type === "reasoning.summary" && detail.summary) {
451
+ state.reasoning += detail.summary;
452
+ events.push({
453
+ type: StreamEventType.ReasoningDelta,
454
+ index: 0,
455
+ delta: { text: detail.summary }
456
+ });
457
+ }
458
+ }
459
+ }
420
460
  if (choice.finish_reason) {
421
461
  state.finishReason = choice.finish_reason;
422
462
  events.push({ type: StreamEventType.MessageStop, index: 0, delta: {} });
@@ -430,10 +470,14 @@ function transformStreamEvent(chunk, state) {
430
470
  return events;
431
471
  }
432
472
  function buildResponseFromState(state) {
433
- const content = [];
473
+ const reasoningContent = [];
474
+ const textContent = [];
434
475
  let structuredData;
476
+ if (state.reasoning) {
477
+ reasoningContent.push({ type: "reasoning", text: state.reasoning });
478
+ }
435
479
  if (state.text) {
436
- content.push({ type: "text", text: state.text });
480
+ textContent.push({ type: "text", text: state.text });
437
481
  try {
438
482
  structuredData = JSON.parse(state.text);
439
483
  } catch {
@@ -442,9 +486,10 @@ function buildResponseFromState(state) {
442
486
  for (const imageUrl of state.images) {
443
487
  const imageBlock = parseGeneratedImage(imageUrl);
444
488
  if (imageBlock) {
445
- content.push(imageBlock);
489
+ textContent.push(imageBlock);
446
490
  }
447
491
  }
492
+ const content = [...reasoningContent, ...textContent];
448
493
  const toolCalls = [];
449
494
  for (const [, toolCall] of state.toolCalls) {
450
495
  let args = {};
@@ -469,7 +514,9 @@ function buildResponseFromState(state) {
469
514
  metadata: {
470
515
  openrouter: {
471
516
  model: state.model,
472
- finish_reason: state.finishReason
517
+ finish_reason: state.finishReason,
518
+ // Store reasoning_details for multi-turn context preservation
519
+ reasoning_details: state.reasoningDetails.length > 0 ? state.reasoningDetails : void 0
473
520
  }
474
521
  }
475
522
  }
@@ -829,6 +876,18 @@ function transformMessage2(message) {
829
876
  }
830
877
  const openrouterMeta = message.metadata?.openrouter;
831
878
  const functionCallItems = openrouterMeta?.functionCallItems;
879
+ if (openrouterMeta?.reasoningEncryptedContent) {
880
+ try {
881
+ const reasoningData = JSON.parse(openrouterMeta.reasoningEncryptedContent);
882
+ items.push({
883
+ type: "reasoning",
884
+ id: reasoningData.id,
885
+ summary: reasoningData.summary,
886
+ encrypted_content: reasoningData.encrypted_content
887
+ });
888
+ } catch {
889
+ }
890
+ }
832
891
  if (functionCallItems && functionCallItems.length > 0) {
833
892
  for (const fc of functionCallItems) {
834
893
  items.push({
@@ -912,17 +971,19 @@ function transformTool2(tool) {
912
971
  };
913
972
  }
914
973
  function transformResponse2(data) {
915
- const content = [];
974
+ const reasoningContent = [];
975
+ const textContent = [];
916
976
  const toolCalls = [];
917
977
  const functionCallItems = [];
918
978
  let hadRefusal = false;
919
979
  let structuredData;
980
+ let reasoningEncryptedContent;
920
981
  for (const item of data.output) {
921
982
  if (item.type === "message") {
922
983
  const messageItem = item;
923
984
  for (const part of messageItem.content) {
924
985
  if (part.type === "output_text") {
925
- content.push({ type: "text", text: part.text });
986
+ textContent.push({ type: "text", text: part.text });
926
987
  if (structuredData === void 0) {
927
988
  try {
928
989
  structuredData = JSON.parse(part.text);
@@ -930,7 +991,7 @@ function transformResponse2(data) {
930
991
  }
931
992
  }
932
993
  } else if (part.type === "refusal") {
933
- content.push({ type: "text", text: part.refusal });
994
+ textContent.push({ type: "text", text: part.refusal });
934
995
  hadRefusal = true;
935
996
  }
936
997
  }
@@ -955,14 +1016,26 @@ function transformResponse2(data) {
955
1016
  } else if (item.type === "image_generation_call") {
956
1017
  const imageGen = item;
957
1018
  if (imageGen.result) {
958
- content.push({
1019
+ textContent.push({
959
1020
  type: "image",
960
1021
  mimeType: "image/png",
961
1022
  source: { type: "base64", data: imageGen.result }
962
1023
  });
963
1024
  }
1025
+ } else if (item.type === "reasoning") {
1026
+ const reasoningItem = item;
1027
+ const reasoningText = reasoningItem.summary.filter((s) => s.type === "summary_text").map((s) => s.text).join("");
1028
+ if (reasoningText) {
1029
+ reasoningContent.push({ type: "reasoning", text: reasoningText });
1030
+ }
1031
+ reasoningEncryptedContent = JSON.stringify({
1032
+ id: reasoningItem.id,
1033
+ summary: reasoningItem.summary,
1034
+ encrypted_content: reasoningItem.encrypted_content
1035
+ });
964
1036
  }
965
1037
  }
1038
+ const content = [...reasoningContent, ...textContent];
966
1039
  const responseId = data.id || generateId();
967
1040
  const message = new AssistantMessage(
968
1041
  content,
@@ -975,7 +1048,9 @@ function transformResponse2(data) {
975
1048
  status: data.status,
976
1049
  // Store response_id for multi-turn tool calling
977
1050
  response_id: responseId,
978
- functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
1051
+ functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0,
1052
+ // Store encrypted reasoning content for multi-turn context (stateless mode)
1053
+ reasoningEncryptedContent
979
1054
  }
980
1055
  }
981
1056
  }
@@ -1010,6 +1085,7 @@ function createStreamState2() {
1010
1085
  id: "",
1011
1086
  model: "",
1012
1087
  textByIndex: /* @__PURE__ */ new Map(),
1088
+ reasoningByIndex: /* @__PURE__ */ new Map(),
1013
1089
  toolCalls: /* @__PURE__ */ new Map(),
1014
1090
  images: /* @__PURE__ */ new Map(),
1015
1091
  status: "in_progress",
@@ -1113,6 +1189,13 @@ function transformStreamEvent2(event, state) {
1113
1189
  if (imageGen.result) {
1114
1190
  state.images.set(event.output_index, imageGen.result);
1115
1191
  }
1192
+ } else if (event.item.type === "reasoning") {
1193
+ const reasoningItem = event.item;
1194
+ state.reasoningEncryptedContent = JSON.stringify({
1195
+ id: reasoningItem.id,
1196
+ summary: reasoningItem.summary,
1197
+ encrypted_content: reasoningItem.encrypted_content
1198
+ });
1116
1199
  }
1117
1200
  events.push({
1118
1201
  type: StreamEventType.ContentBlockStop,
@@ -1193,13 +1276,16 @@ function transformStreamEvent2(event, state) {
1193
1276
  toolCall.arguments = event.arguments;
1194
1277
  break;
1195
1278
  }
1196
- case "response.reasoning.delta":
1279
+ case "response.reasoning.delta": {
1280
+ const currentReasoning = state.reasoningByIndex.get(0) ?? "";
1281
+ state.reasoningByIndex.set(0, currentReasoning + event.delta);
1197
1282
  events.push({
1198
1283
  type: StreamEventType.ReasoningDelta,
1199
1284
  index: 0,
1200
1285
  delta: { text: event.delta }
1201
1286
  });
1202
1287
  break;
1288
+ }
1203
1289
  case "error":
1204
1290
  break;
1205
1291
  default:
@@ -1208,11 +1294,17 @@ function transformStreamEvent2(event, state) {
1208
1294
  return events;
1209
1295
  }
1210
1296
  function buildResponseFromState2(state) {
1211
- const content = [];
1297
+ const reasoningContent = [];
1298
+ const textContent = [];
1212
1299
  let structuredData;
1300
+ for (const [, reasoning] of state.reasoningByIndex) {
1301
+ if (reasoning) {
1302
+ reasoningContent.push({ type: "reasoning", text: reasoning });
1303
+ }
1304
+ }
1213
1305
  for (const [, text] of state.textByIndex) {
1214
1306
  if (text) {
1215
- content.push({ type: "text", text });
1307
+ textContent.push({ type: "text", text });
1216
1308
  if (structuredData === void 0) {
1217
1309
  try {
1218
1310
  structuredData = JSON.parse(text);
@@ -1223,13 +1315,14 @@ function buildResponseFromState2(state) {
1223
1315
  }
1224
1316
  for (const [, imageData] of state.images) {
1225
1317
  if (imageData) {
1226
- content.push({
1318
+ textContent.push({
1227
1319
  type: "image",
1228
1320
  mimeType: "image/png",
1229
1321
  source: { type: "base64", data: imageData }
1230
1322
  });
1231
1323
  }
1232
1324
  }
1325
+ const content = [...reasoningContent, ...textContent];
1233
1326
  const toolCalls = [];
1234
1327
  const functionCallItems = [];
1235
1328
  for (const [, toolCall] of state.toolCalls) {
@@ -1269,7 +1362,9 @@ function buildResponseFromState2(state) {
1269
1362
  status: state.status,
1270
1363
  // Store response_id for multi-turn tool calling
1271
1364
  response_id: responseId,
1272
- functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0
1365
+ functionCallItems: functionCallItems.length > 0 ? functionCallItems : void 0,
1366
+ // Store encrypted reasoning content for multi-turn context (stateless mode)
1367
+ reasoningEncryptedContent: state.reasoningEncryptedContent
1273
1368
  }
1274
1369
  }
1275
1370
  }