@effect/ai-openrouter 0.8.3 → 4.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/dist/Generated.d.ts +19505 -0
  2. package/dist/Generated.d.ts.map +1 -0
  3. package/dist/Generated.js +5115 -0
  4. package/dist/Generated.js.map +1 -0
  5. package/dist/OpenRouterClient.d.ts +116 -0
  6. package/dist/OpenRouterClient.d.ts.map +1 -0
  7. package/dist/OpenRouterClient.js +120 -0
  8. package/dist/OpenRouterClient.js.map +1 -0
  9. package/dist/{dts/OpenRouterConfig.d.ts → OpenRouterConfig.d.ts} +9 -9
  10. package/dist/OpenRouterConfig.d.ts.map +1 -0
  11. package/dist/{esm/OpenRouterConfig.js → OpenRouterConfig.js} +8 -5
  12. package/dist/OpenRouterConfig.js.map +1 -0
  13. package/dist/OpenRouterError.d.ts +83 -0
  14. package/dist/OpenRouterError.d.ts.map +1 -0
  15. package/dist/OpenRouterError.js +10 -0
  16. package/dist/OpenRouterError.js.map +1 -0
  17. package/dist/OpenRouterLanguageModel.d.ts +285 -0
  18. package/dist/OpenRouterLanguageModel.d.ts.map +1 -0
  19. package/dist/OpenRouterLanguageModel.js +1210 -0
  20. package/dist/OpenRouterLanguageModel.js.map +1 -0
  21. package/dist/index.d.ts +29 -0
  22. package/dist/index.d.ts.map +1 -0
  23. package/dist/index.js +30 -0
  24. package/dist/index.js.map +1 -0
  25. package/dist/internal/errors.d.ts +2 -0
  26. package/dist/internal/errors.d.ts.map +1 -0
  27. package/dist/internal/errors.js +347 -0
  28. package/dist/internal/errors.js.map +1 -0
  29. package/dist/{dts/internal → internal}/utilities.d.ts.map +1 -1
  30. package/dist/internal/utilities.js +77 -0
  31. package/dist/internal/utilities.js.map +1 -0
  32. package/package.json +45 -62
  33. package/src/Generated.ts +9312 -5435
  34. package/src/OpenRouterClient.ts +223 -304
  35. package/src/OpenRouterConfig.ts +14 -14
  36. package/src/OpenRouterError.ts +92 -0
  37. package/src/OpenRouterLanguageModel.ts +941 -570
  38. package/src/index.ts +20 -4
  39. package/src/internal/errors.ts +373 -0
  40. package/src/internal/utilities.ts +78 -11
  41. package/Generated/package.json +0 -6
  42. package/OpenRouterClient/package.json +0 -6
  43. package/OpenRouterConfig/package.json +0 -6
  44. package/OpenRouterLanguageModel/package.json +0 -6
  45. package/README.md +0 -5
  46. package/dist/cjs/Generated.js +0 -5813
  47. package/dist/cjs/Generated.js.map +0 -1
  48. package/dist/cjs/OpenRouterClient.js +0 -229
  49. package/dist/cjs/OpenRouterClient.js.map +0 -1
  50. package/dist/cjs/OpenRouterConfig.js +0 -30
  51. package/dist/cjs/OpenRouterConfig.js.map +0 -1
  52. package/dist/cjs/OpenRouterLanguageModel.js +0 -825
  53. package/dist/cjs/OpenRouterLanguageModel.js.map +0 -1
  54. package/dist/cjs/index.js +0 -16
  55. package/dist/cjs/index.js.map +0 -1
  56. package/dist/cjs/internal/utilities.js +0 -29
  57. package/dist/cjs/internal/utilities.js.map +0 -1
  58. package/dist/dts/Generated.d.ts +0 -11026
  59. package/dist/dts/Generated.d.ts.map +0 -1
  60. package/dist/dts/OpenRouterClient.d.ts +0 -407
  61. package/dist/dts/OpenRouterClient.d.ts.map +0 -1
  62. package/dist/dts/OpenRouterConfig.d.ts.map +0 -1
  63. package/dist/dts/OpenRouterLanguageModel.d.ts +0 -215
  64. package/dist/dts/OpenRouterLanguageModel.d.ts.map +0 -1
  65. package/dist/dts/index.d.ts +0 -17
  66. package/dist/dts/index.d.ts.map +0 -1
  67. package/dist/esm/Generated.js +0 -5457
  68. package/dist/esm/Generated.js.map +0 -1
  69. package/dist/esm/OpenRouterClient.js +0 -214
  70. package/dist/esm/OpenRouterClient.js.map +0 -1
  71. package/dist/esm/OpenRouterConfig.js.map +0 -1
  72. package/dist/esm/OpenRouterLanguageModel.js +0 -814
  73. package/dist/esm/OpenRouterLanguageModel.js.map +0 -1
  74. package/dist/esm/index.js +0 -17
  75. package/dist/esm/index.js.map +0 -1
  76. package/dist/esm/internal/utilities.js +0 -21
  77. package/dist/esm/internal/utilities.js.map +0 -1
  78. package/dist/esm/package.json +0 -4
  79. package/index/package.json +0 -6
  80. /package/dist/{dts/internal → internal}/utilities.d.ts +0 -0
@@ -0,0 +1,1210 @@
1
+ /**
2
+ * @since 1.0.0
3
+ */
4
+ /** @effect-diagnostics preferSchemaOverJson:skip-file */
5
+ import * as Arr from "effect/Array";
6
+ import * as DateTime from "effect/DateTime";
7
+ import * as Effect from "effect/Effect";
8
+ import * as Base64 from "effect/encoding/Base64";
9
+ import { dual } from "effect/Function";
10
+ import * as Layer from "effect/Layer";
11
+ import * as Predicate from "effect/Predicate";
12
+ import * as Redactable from "effect/Redactable";
13
+ import * as SchemaAST from "effect/SchemaAST";
14
+ import * as ServiceMap from "effect/ServiceMap";
15
+ import * as Stream from "effect/Stream";
16
+ import * as AiError from "effect/unstable/ai/AiError";
17
+ import { toCodecAnthropic } from "effect/unstable/ai/AnthropicStructuredOutput";
18
+ import * as IdGenerator from "effect/unstable/ai/IdGenerator";
19
+ import * as LanguageModel from "effect/unstable/ai/LanguageModel";
20
+ import * as AiModel from "effect/unstable/ai/Model";
21
+ import { toCodecOpenAI } from "effect/unstable/ai/OpenAiStructuredOutput";
22
+ import { addGenAIAnnotations } from "effect/unstable/ai/Telemetry";
23
+ import * as Tool from "effect/unstable/ai/Tool";
24
+ import { ReasoningDetailsDuplicateTracker, resolveFinishReason } from "./internal/utilities.js";
25
+ import { OpenRouterClient } from "./OpenRouterClient.js";
26
+ // =============================================================================
27
+ // Configuration
28
+ // =============================================================================
29
+ /**
30
+ * Service definition for OpenRouter language model configuration.
31
+ *
32
+ * @since 1.0.0
33
+ * @category services
34
+ */
35
+ export class Config extends /*#__PURE__*/ServiceMap.Service()("@effect/ai-openrouter/OpenRouterLanguageModel/Config") {}
36
+ // =============================================================================
37
+ // Language Model
38
+ // =============================================================================
39
+ /**
40
+ * @since 1.0.0
41
+ * @category constructors
42
+ */
43
+ export const model = (model, config) => AiModel.make("openai", layer({
44
+ model,
45
+ config
46
+ }));
47
+ /**
48
+ * Creates an OpenRouter language model service.
49
+ *
50
+ * @since 1.0.0
51
+ * @category constructors
52
+ */
53
+ export const make = /*#__PURE__*/Effect.fnUntraced(function* ({
54
+ model,
55
+ config: providerConfig
56
+ }) {
57
+ const client = yield* OpenRouterClient;
58
+ const codecTransformer = getCodecTransformer(model);
59
+ const makeConfig = Effect.gen(function* () {
60
+ const services = yield* Effect.services();
61
+ return {
62
+ model,
63
+ ...providerConfig,
64
+ ...services.mapUnsafe.get(Config.key)
65
+ };
66
+ });
67
+ const makeRequest = Effect.fnUntraced(function* ({
68
+ config,
69
+ options
70
+ }) {
71
+ const messages = yield* prepareMessages({
72
+ options
73
+ });
74
+ const {
75
+ tools,
76
+ toolChoice
77
+ } = yield* prepareTools({
78
+ options,
79
+ transformer: codecTransformer
80
+ });
81
+ const responseFormat = yield* getResponseFormat({
82
+ config,
83
+ options,
84
+ transformer: codecTransformer
85
+ });
86
+ const request = {
87
+ ...config,
88
+ messages,
89
+ ...(Predicate.isNotUndefined(responseFormat) ? {
90
+ response_format: responseFormat
91
+ } : undefined),
92
+ ...(Predicate.isNotUndefined(tools) ? {
93
+ tools
94
+ } : undefined),
95
+ ...(Predicate.isNotUndefined(toolChoice) ? {
96
+ tool_choice: toolChoice
97
+ } : undefined)
98
+ };
99
+ return request;
100
+ });
101
+ return yield* LanguageModel.make({
102
+ generateText: Effect.fnUntraced(function* (options) {
103
+ const config = yield* makeConfig;
104
+ const request = yield* makeRequest({
105
+ config,
106
+ options
107
+ });
108
+ annotateRequest(options.span, request);
109
+ const [rawResponse, response] = yield* client.createChatCompletion(request);
110
+ annotateResponse(options.span, rawResponse);
111
+ return yield* makeResponse({
112
+ rawResponse,
113
+ response
114
+ });
115
+ }),
116
+ streamText: Effect.fnUntraced(function* (options) {
117
+ const config = yield* makeConfig;
118
+ const request = yield* makeRequest({
119
+ config,
120
+ options
121
+ });
122
+ annotateRequest(options.span, request);
123
+ const [response, stream] = yield* client.createChatCompletionStream(request);
124
+ return yield* makeStreamResponse({
125
+ response,
126
+ stream
127
+ });
128
+ }, (effect, options) => effect.pipe(Stream.unwrap, Stream.map(response => {
129
+ annotateStreamResponse(options.span, response);
130
+ return response;
131
+ })))
132
+ }).pipe(Effect.provideService(LanguageModel.CurrentCodecTransformer, codecTransformer));
133
+ });
134
+ /**
135
+ * Creates a layer for the OpenRouter language model.
136
+ *
137
+ * @since 1.0.0
138
+ * @category layers
139
+ */
140
+ export const layer = options => Layer.effect(LanguageModel.LanguageModel, make(options));
141
+ /**
142
+ * Provides config overrides for OpenRouter language model operations.
143
+ *
144
+ * @since 1.0.0
145
+ * @category configuration
146
+ */
147
+ export const withConfigOverride = /*#__PURE__*/dual(2, (self, overrides) => Effect.flatMap(Effect.serviceOption(Config), config => Effect.provideService(self, Config, {
148
+ ...(config._tag === "Some" ? config.value : {}),
149
+ ...overrides
150
+ })));
151
+ // =============================================================================
152
+ // Prompt Conversion
153
+ // =============================================================================
154
+ const prepareMessages = /*#__PURE__*/Effect.fnUntraced(function* ({
155
+ options
156
+ }) {
157
+ const messages = [];
158
+ const reasoningDetailsTracker = new ReasoningDetailsDuplicateTracker();
159
+ for (const message of options.prompt.content) {
160
+ switch (message.role) {
161
+ case "system":
162
+ {
163
+ const cache_control = getCacheControl(message);
164
+ messages.push({
165
+ role: "system",
166
+ content: [{
167
+ type: "text",
168
+ text: message.content,
169
+ ...(Predicate.isNotNull(cache_control) ? {
170
+ cache_control
171
+ } : undefined)
172
+ }]
173
+ });
174
+ break;
175
+ }
176
+ case "user":
177
+ {
178
+ const content = [];
179
+ // Get the message-level cache control
180
+ const messageCacheControl = getCacheControl(message);
181
+ if (message.content.length === 1 && message.content[0].type === "text") {
182
+ messages.push({
183
+ role: "user",
184
+ content: Predicate.isNotNull(messageCacheControl) ? [{
185
+ type: "text",
186
+ text: message.content[0].text,
187
+ cache_control: messageCacheControl
188
+ }] : message.content[0].text
189
+ });
190
+ break;
191
+ }
192
+ // Find the index of the last text part in the message content
193
+ let lastTextPartIndex = -1;
194
+ for (let i = message.content.length - 1; i >= 0; i--) {
195
+ if (message.content[i].type === "text") {
196
+ lastTextPartIndex = i;
197
+ break;
198
+ }
199
+ }
200
+ for (let index = 0; index < message.content.length; index++) {
201
+ const part = message.content[index];
202
+ const isLastTextPart = part.type === "text" && index === lastTextPartIndex;
203
+ const partCacheControl = getCacheControl(part);
204
+ switch (part.type) {
205
+ case "text":
206
+ {
207
+ const cache_control = Predicate.isNotNull(partCacheControl) ? partCacheControl : isLastTextPart ? messageCacheControl : null;
208
+ content.push({
209
+ type: "text",
210
+ text: part.text,
211
+ ...(Predicate.isNotNull(cache_control) ? {
212
+ cache_control
213
+ } : undefined)
214
+ });
215
+ break;
216
+ }
217
+ case "file":
218
+ {
219
+ if (part.mediaType.startsWith("image/")) {
220
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
221
+ content.push({
222
+ type: "image_url",
223
+ image_url: {
224
+ url: part.data instanceof URL ? part.data.toString() : part.data instanceof Uint8Array ? `data:${mediaType};base64,${Base64.encode(part.data)}` : part.data
225
+ },
226
+ ...(Predicate.isNotNull(partCacheControl) ? {
227
+ cache_control: partCacheControl
228
+ } : undefined)
229
+ });
230
+ break;
231
+ }
232
+ const options = part.options.openrouter;
233
+ const fileName = options?.fileName ?? part.fileName ?? "";
234
+ content.push({
235
+ type: "file",
236
+ file: {
237
+ filename: fileName,
238
+ file_data: part.data instanceof URL ? part.data.toString() : part.data instanceof Uint8Array ? `data:${part.mediaType};base64,${Base64.encode(part.data)}` : part.data
239
+ },
240
+ ...(Predicate.isNotNull(partCacheControl) ? {
241
+ cache_control: partCacheControl
242
+ } : undefined)
243
+ });
244
+ break;
245
+ }
246
+ }
247
+ }
248
+ messages.push({
249
+ role: "user",
250
+ content
251
+ });
252
+ break;
253
+ }
254
+ case "assistant":
255
+ {
256
+ let text = "";
257
+ let reasoning = "";
258
+ const toolCalls = [];
259
+ for (const part of message.content) {
260
+ switch (part.type) {
261
+ case "text":
262
+ {
263
+ text += part.text;
264
+ break;
265
+ }
266
+ case "reasoning":
267
+ {
268
+ reasoning += part.text;
269
+ break;
270
+ }
271
+ case "tool-call":
272
+ {
273
+ toolCalls.push({
274
+ type: "function",
275
+ id: part.id,
276
+ function: {
277
+ name: part.name,
278
+ arguments: JSON.stringify(part.params)
279
+ }
280
+ });
281
+ break;
282
+ }
283
+ default:
284
+ {
285
+ break;
286
+ }
287
+ }
288
+ }
289
+ const messageReasoningDetails = message.options.openrouter?.reasoningDetails;
290
+ // Use message-level reasoning details if available, otherwise find from parts
291
+ // Priority: message-level > first tool call > first reasoning part
292
+ // This prevents duplicate thinking blocks when Claude makes parallel tool calls
293
+ const candidateReasoningDetails = Predicate.isNotNullish(messageReasoningDetails) && Array.isArray(messageReasoningDetails) && messageReasoningDetails.length > 0 ? messageReasoningDetails : findFirstReasoningDetails(message.content);
294
+ // Deduplicate reasoning details across all messages to prevent "Duplicate
295
+ // item found with id" errors in multi-turn conversations.
296
+ let reasoningDetails = null;
297
+ if (Predicate.isNotNull(candidateReasoningDetails) && candidateReasoningDetails.length > 0) {
298
+ const uniqueReasoningDetails = [];
299
+ for (const detail of candidateReasoningDetails) {
300
+ if (reasoningDetailsTracker.upsert(detail)) {
301
+ uniqueReasoningDetails.push(detail);
302
+ }
303
+ }
304
+ if (uniqueReasoningDetails.length > 0) {
305
+ reasoningDetails = uniqueReasoningDetails;
306
+ }
307
+ }
308
+ messages.push({
309
+ role: "assistant",
310
+ content: text,
311
+ reasoning: reasoning.length > 0 ? reasoning : null,
312
+ ...(Predicate.isNotNull(reasoningDetails) ? {
313
+ reasoning_details: reasoningDetails
314
+ } : undefined),
315
+ ...(toolCalls.length > 0 ? {
316
+ tool_calls: toolCalls
317
+ } : undefined)
318
+ });
319
+ break;
320
+ }
321
+ case "tool":
322
+ {
323
+ for (const part of message.content) {
324
+ // Skip tool approval parts
325
+ if (part.type === "tool-approval-response") {
326
+ continue;
327
+ }
328
+ messages.push({
329
+ role: "tool",
330
+ tool_call_id: part.id,
331
+ content: JSON.stringify(part.result)
332
+ });
333
+ }
334
+ break;
335
+ }
336
+ }
337
+ }
338
+ return messages;
339
+ });
340
+ // =============================================================================
341
+ // HTTP Details
342
+ // =============================================================================
343
+ const buildHttpRequestDetails = request => ({
344
+ method: request.method,
345
+ url: request.url,
346
+ urlParams: Array.from(request.urlParams),
347
+ hash: request.hash,
348
+ headers: Redactable.redact(request.headers)
349
+ });
350
+ const buildHttpResponseDetails = response => ({
351
+ status: response.status,
352
+ headers: Redactable.redact(response.headers)
353
+ });
354
+ // =============================================================================
355
+ // Response Conversion
356
+ // =============================================================================
357
+ const makeResponse = /*#__PURE__*/Effect.fnUntraced(function* ({
358
+ rawResponse,
359
+ response
360
+ }) {
361
+ const idGenerator = yield* IdGenerator.IdGenerator;
362
+ const parts = [];
363
+ let hasToolCalls = false;
364
+ let hasEncryptedReasoning = false;
365
+ const createdAt = new Date(rawResponse.created * 1000);
366
+ parts.push({
367
+ type: "response-metadata",
368
+ id: rawResponse.id,
369
+ modelId: rawResponse.model,
370
+ timestamp: DateTime.formatIso(DateTime.fromDateUnsafe(createdAt)),
371
+ request: buildHttpRequestDetails(response.request)
372
+ });
373
+ const choice = rawResponse.choices[0];
374
+ if (Predicate.isUndefined(choice)) {
375
+ return yield* AiError.make({
376
+ module: "OpenRouterLanguageModel",
377
+ method: "makeResponse",
378
+ reason: new AiError.InvalidOutputError({
379
+ description: "Received response with empty choices"
380
+ })
381
+ });
382
+ }
383
+ const message = choice.message;
384
+ let finishReason = choice.finish_reason;
385
+ const reasoningDetails = message.reasoning_details;
386
+ if (Predicate.isNotNullish(reasoningDetails) && reasoningDetails.length > 0) {
387
+ for (const detail of reasoningDetails) {
388
+ switch (detail.type) {
389
+ case "reasoning.text":
390
+ {
391
+ if (Predicate.isNotNullish(detail.text) && detail.text.length > 0) {
392
+ parts.push({
393
+ type: "reasoning",
394
+ text: detail.text,
395
+ metadata: {
396
+ openrouter: {
397
+ reasoningDetails: [detail]
398
+ }
399
+ }
400
+ });
401
+ }
402
+ break;
403
+ }
404
+ case "reasoning.summary":
405
+ {
406
+ if (detail.summary.length > 0) {
407
+ parts.push({
408
+ type: "reasoning",
409
+ text: detail.summary,
410
+ metadata: {
411
+ openrouter: {
412
+ reasoningDetails: [detail]
413
+ }
414
+ }
415
+ });
416
+ }
417
+ break;
418
+ }
419
+ case "reasoning.encrypted":
420
+ {
421
+ if (detail.data.length > 0) {
422
+ hasEncryptedReasoning = true;
423
+ parts.push({
424
+ type: "reasoning",
425
+ text: "[REDACTED]",
426
+ metadata: {
427
+ openrouter: {
428
+ reasoningDetails: [detail]
429
+ }
430
+ }
431
+ });
432
+ }
433
+ break;
434
+ }
435
+ }
436
+ }
437
+ } else if (Predicate.isNotNullish(message.reasoning) && message.reasoning.length > 0) {
438
+ // message.reasoning fallback only when reasoning_details absent/empty
439
+ parts.push({
440
+ type: "reasoning",
441
+ text: message.reasoning
442
+ });
443
+ }
444
+ const content = message.content;
445
+ if (Predicate.isNotNullish(content)) {
446
+ if (typeof content === "string") {
447
+ if (content.length > 0) {
448
+ parts.push({
449
+ type: "text",
450
+ text: content
451
+ });
452
+ }
453
+ } else {
454
+ for (const item of content) {
455
+ if (item.type === "text") {
456
+ parts.push({
457
+ type: "text",
458
+ text: item.text
459
+ });
460
+ }
461
+ }
462
+ }
463
+ }
464
+ const toolCalls = message.tool_calls;
465
+ if (Predicate.isNotNullish(toolCalls) && toolCalls.length > 0) {
466
+ hasToolCalls = true;
467
+ for (let index = 0; index < toolCalls.length; index++) {
468
+ const toolCall = toolCalls[index];
469
+ const toolName = toolCall.function.name;
470
+ const toolParams = toolCall.function.arguments ?? "{}";
471
+ const params = yield* Effect.try({
472
+ try: () => Tool.unsafeSecureJsonParse(toolParams),
473
+ catch: cause => AiError.make({
474
+ module: "OpenRouterLanguageModel",
475
+ method: "makeResponse",
476
+ reason: new AiError.ToolParameterValidationError({
477
+ toolName,
478
+ toolParams: {},
479
+ description: `Failed to securely JSON parse tool parameters: ${cause}`
480
+ })
481
+ })
482
+ });
483
+ parts.push({
484
+ type: "tool-call",
485
+ id: toolCall.id,
486
+ name: toolName,
487
+ params,
488
+ // Only attach reasoning_details to the first tool call to avoid
489
+ // duplicating thinking blocks for parallel tool calls (Claude)
490
+ ...(index === 0 && Predicate.isNotNullish(reasoningDetails) && reasoningDetails.length > 0 ? {
491
+ metadata: {
492
+ openrouter: {
493
+ reasoningDetails
494
+ }
495
+ }
496
+ } : undefined)
497
+ });
498
+ }
499
+ }
500
+ const images = message.images;
501
+ if (Predicate.isNotNullish(images)) {
502
+ for (const image of images) {
503
+ const url = image.image_url.url;
504
+ if (url.startsWith("data:")) {
505
+ const mediaType = getMediaType(url, "image/jpeg");
506
+ const data = getBase64FromDataUrl(url);
507
+ parts.push({
508
+ type: "file",
509
+ mediaType,
510
+ data
511
+ });
512
+ } else {
513
+ const id = yield* idGenerator.generateId();
514
+ parts.push({
515
+ type: "source",
516
+ sourceType: "url",
517
+ id,
518
+ url,
519
+ title: ""
520
+ });
521
+ }
522
+ }
523
+ }
524
+ const annotations = choice.message.annotations;
525
+ if (Predicate.isNotNullish(annotations)) {
526
+ for (const annotation of annotations) {
527
+ if (annotation.type === "url_citation") {
528
+ parts.push({
529
+ type: "source",
530
+ sourceType: "url",
531
+ id: annotation.url_citation.url,
532
+ url: annotation.url_citation.url,
533
+ title: annotation.url_citation.title ?? "",
534
+ metadata: {
535
+ openrouter: {
536
+ ...(Predicate.isNotUndefined(annotation.url_citation.content) ? {
537
+ content: annotation.url_citation.content
538
+ } : undefined),
539
+ ...(Predicate.isNotUndefined(annotation.url_citation.start_index) ? {
540
+ startIndex: annotation.url_citation.start_index
541
+ } : undefined),
542
+ ...(Predicate.isNotUndefined(annotation.url_citation.end_index) ? {
543
+ endIndex: annotation.url_citation.end_index
544
+ } : undefined)
545
+ }
546
+ }
547
+ });
548
+ }
549
+ }
550
+ }
551
+ // Extract file annotations to expose in provider metadata
552
+ const fileAnnotations = annotations?.filter(annotation => {
553
+ return annotation.type === "file";
554
+ });
555
+ // Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted
556
+ // reasoning (thoughtSignature), the model returns 'stop' but expects continuation.
557
+ // Override to 'tool-calls' so the SDK knows to continue the conversation.
558
+ if (hasEncryptedReasoning && hasToolCalls && finishReason === "stop") {
559
+ finishReason = "tool_calls";
560
+ }
561
+ parts.push({
562
+ type: "finish",
563
+ reason: resolveFinishReason(finishReason),
564
+ usage: getUsage(rawResponse.usage),
565
+ response: buildHttpResponseDetails(response),
566
+ metadata: {
567
+ openrouter: {
568
+ systemFingerprint: rawResponse.system_fingerprint ?? null,
569
+ usage: rawResponse.usage ?? null,
570
+ ...(Predicate.isNotUndefined(fileAnnotations) && fileAnnotations.length > 0 ? {
571
+ annotations: fileAnnotations
572
+ } : undefined),
573
+ ...(Predicate.hasProperty(rawResponse, "provider") && Predicate.isString(rawResponse.provider) ? {
574
+ provider: rawResponse.provider
575
+ } : undefined)
576
+ }
577
+ }
578
+ });
579
+ return parts;
580
+ });
581
+ const makeStreamResponse = /*#__PURE__*/Effect.fnUntraced(function* ({
582
+ response,
583
+ stream
584
+ }) {
585
+ const idGenerator = yield* IdGenerator.IdGenerator;
586
+ let textStarted = false;
587
+ let reasoningStarted = false;
588
+ let responseMetadataEmitted = false;
589
+ let reasoningDetailsAttachedToToolCall = false;
590
+ let finishReason = "other";
591
+ let openRouterResponseId = undefined;
592
+ let activeReasoningId = undefined;
593
+ let activeTextId = undefined;
594
+ let totalToolCalls = 0;
595
+ const activeToolCalls = [];
596
+ // Track reasoning details to preserve for multi-turn conversations
597
+ const accumulatedReasoningDetails = [];
598
+ // Track file annotations to expose in provider metadata
599
+ const accumulatedFileAnnotations = [];
600
+ const usage = {
601
+ inputTokens: {
602
+ total: undefined,
603
+ uncached: undefined,
604
+ cacheRead: undefined,
605
+ cacheWrite: undefined
606
+ },
607
+ outputTokens: {
608
+ total: undefined,
609
+ text: undefined,
610
+ reasoning: undefined
611
+ }
612
+ };
613
+ return stream.pipe(Stream.mapEffect(Effect.fnUntraced(function* (event) {
614
+ const parts = [];
615
+ if (Predicate.isNotUndefined(event.error)) {
616
+ finishReason = "error";
617
+ parts.push({
618
+ type: "error",
619
+ error: event.error
620
+ });
621
+ }
622
+ if (Predicate.isNotUndefined(event.id) && !responseMetadataEmitted) {
623
+ const timestamp = yield* DateTime.now;
624
+ parts.push({
625
+ type: "response-metadata",
626
+ id: event.id,
627
+ modelId: event.model,
628
+ timestamp: DateTime.formatIso(timestamp),
629
+ request: buildHttpRequestDetails(response.request)
630
+ });
631
+ responseMetadataEmitted = true;
632
+ }
633
+ if (Predicate.isNotUndefined(event.usage)) {
634
+ const computed = getUsage(event.usage);
635
+ usage.inputTokens = computed.inputTokens;
636
+ usage.outputTokens = computed.outputTokens;
637
+ }
638
+ const choice = event.choices[0];
639
+ if (Predicate.isUndefined(choice)) {
640
+ return yield* AiError.make({
641
+ module: "OpenRouterLanguageModel",
642
+ method: "makeStreamResponse",
643
+ reason: new AiError.InvalidOutputError({
644
+ description: "Received response with empty choices"
645
+ })
646
+ });
647
+ }
648
+ if (Predicate.isNotNull(choice.finish_reason)) {
649
+ finishReason = resolveFinishReason(choice.finish_reason);
650
+ }
651
+ const delta = choice.delta;
652
+ if (Predicate.isNullish(delta)) {
653
+ return parts;
654
+ }
655
+ const emitReasoning = Effect.fnUntraced(function* (delta, metadata) {
656
+ if (!reasoningStarted) {
657
+ activeReasoningId = openRouterResponseId ?? (yield* idGenerator.generateId());
658
+ parts.push({
659
+ type: "reasoning-start",
660
+ id: activeReasoningId,
661
+ metadata
662
+ });
663
+ reasoningStarted = true;
664
+ }
665
+ parts.push({
666
+ type: "reasoning-delta",
667
+ id: activeReasoningId,
668
+ delta,
669
+ metadata
670
+ });
671
+ });
672
+ const reasoningDetails = delta.reasoning_details;
673
+ if (Predicate.isNotUndefined(reasoningDetails) && reasoningDetails.length > 0) {
674
+ // Accumulate reasoning_details to preserve for multi-turn conversations
675
+ // Merge consecutive reasoning.text items into a single entry
676
+ for (const detail of reasoningDetails) {
677
+ if (detail.type === "reasoning.text") {
678
+ const lastDetail = accumulatedReasoningDetails[accumulatedReasoningDetails.length - 1];
679
+ if (Predicate.isNotUndefined(lastDetail) && lastDetail.type === "reasoning.text") {
680
+ // Merge with the previous text detail
681
+ lastDetail.text = (lastDetail.text ?? "") + (detail.text ?? "");
682
+ lastDetail.signature = lastDetail.signature ?? detail.signature ?? null;
683
+ lastDetail.format = lastDetail.format ?? detail.format ?? null;
684
+ } else {
685
+ // Start a new text detail
686
+ accumulatedReasoningDetails.push({
687
+ ...detail
688
+ });
689
+ }
690
+ } else {
691
+ // Non-text details (encrypted, summary) are pushed as-is
692
+ accumulatedReasoningDetails.push(detail);
693
+ }
694
+ }
695
+ // Emit reasoning_details in providerMetadata for each delta chunk
696
+ // so users can accumulate them on their end before sending back
697
+ const metadata = {
698
+ openrouter: {
699
+ reasoningDetails
700
+ }
701
+ };
702
+ for (const detail of reasoningDetails) {
703
+ switch (detail.type) {
704
+ case "reasoning.text":
705
+ {
706
+ if (Predicate.isNotNullish(detail.text)) {
707
+ yield* emitReasoning(detail.text, metadata);
708
+ }
709
+ break;
710
+ }
711
+ case "reasoning.summary":
712
+ {
713
+ if (Predicate.isNotNullish(detail.summary)) {
714
+ yield* emitReasoning(detail.summary, metadata);
715
+ }
716
+ break;
717
+ }
718
+ case "reasoning.encrypted":
719
+ {
720
+ if (Predicate.isNotNullish(detail.data)) {
721
+ yield* emitReasoning("[REDACTED]", metadata);
722
+ }
723
+ break;
724
+ }
725
+ }
726
+ }
727
+ } else if (Predicate.isNotNullish(delta.reasoning)) {
728
+ yield* emitReasoning(delta.reasoning);
729
+ }
730
+ const content = delta.content;
731
+ if (Predicate.isNotNullish(content)) {
732
+ // If reasoning was previously active and now we're starting text content,
733
+ // we should end the reasoning first to maintain proper order
734
+ if (reasoningStarted && !textStarted) {
735
+ parts.push({
736
+ type: "reasoning-end",
737
+ id: activeReasoningId,
738
+ // Include accumulated reasoning_details so the we can update the
739
+ // reasoning part's provider metadata with the correct signature.
740
+ // The signature typically arrives in the last reasoning delta,
741
+ // but reasoning-start only carries the first delta's metadata.
742
+ metadata: accumulatedReasoningDetails.length > 0 ? {
743
+ openRouter: {
744
+ reasoningDetails: accumulatedReasoningDetails
745
+ }
746
+ } : undefined
747
+ });
748
+ reasoningStarted = false;
749
+ }
750
+ if (!textStarted) {
751
+ activeTextId = openRouterResponseId ?? (yield* idGenerator.generateId());
752
+ parts.push({
753
+ type: "text-start",
754
+ id: activeTextId
755
+ });
756
+ textStarted = true;
757
+ }
758
+ parts.push({
759
+ type: "text-delta",
760
+ id: activeTextId,
761
+ delta: content
762
+ });
763
+ }
764
+ const annotations = delta.annotations;
765
+ if (Predicate.isNotNullish(annotations)) {
766
+ for (const annotation of annotations) {
767
+ if (annotation.type === "url_citation") {
768
+ parts.push({
769
+ type: "source",
770
+ sourceType: "url",
771
+ id: annotation.url_citation.url,
772
+ url: annotation.url_citation.url,
773
+ title: annotation.url_citation.title ?? "",
774
+ metadata: {
775
+ openrouter: {
776
+ ...(Predicate.isNotUndefined(annotation.url_citation.content) ? {
777
+ content: annotation.url_citation.content
778
+ } : undefined),
779
+ ...(Predicate.isNotUndefined(annotation.url_citation.start_index) ? {
780
+ startIndex: annotation.url_citation.start_index
781
+ } : undefined),
782
+ ...(Predicate.isNotUndefined(annotation.url_citation.end_index) ? {
783
+ startIndex: annotation.url_citation.end_index
784
+ } : undefined)
785
+ }
786
+ }
787
+ });
788
+ } else if (annotation.type === "file") {
789
+ accumulatedFileAnnotations.push(annotation);
790
+ }
791
+ }
792
+ }
793
+ const toolCalls = delta.tool_calls;
794
+ if (Predicate.isNotNullish(toolCalls)) {
795
+ for (const toolCall of toolCalls) {
796
+ const index = toolCall.index ?? toolCalls.length - 1;
797
+ let activeToolCall = activeToolCalls[index];
798
+ // Tool call start - OpenRouter returns all information except the
799
+ // tool call parameters in the first chunk
800
+ if (Predicate.isUndefined(activeToolCall)) {
801
+ if (toolCall.type !== "function") {
802
+ return yield* AiError.make({
803
+ module: "OpenRouterLanguageModel",
804
+ method: "makeStreamResponse",
805
+ reason: new AiError.InvalidOutputError({
806
+ description: "Received tool call delta that was not of type: 'function'"
807
+ })
808
+ });
809
+ }
810
+ if (Predicate.isUndefined(toolCall.id)) {
811
+ return yield* AiError.make({
812
+ module: "OpenRouterLanguageModel",
813
+ method: "makeStreamResponse",
814
+ reason: new AiError.InvalidOutputError({
815
+ description: "Received tool call delta without a tool call identifier"
816
+ })
817
+ });
818
+ }
819
+ if (Predicate.isUndefined(toolCall.function?.name)) {
820
+ return yield* AiError.make({
821
+ module: "OpenRouterLanguageModel",
822
+ method: "makeStreamResponse",
823
+ reason: new AiError.InvalidOutputError({
824
+ description: "Received tool call delta without a tool call name"
825
+ })
826
+ });
827
+ }
828
+ activeToolCall = {
829
+ id: toolCall.id,
830
+ type: "function",
831
+ name: toolCall.function.name,
832
+ params: toolCall.function.arguments ?? ""
833
+ };
834
+ activeToolCalls[index] = activeToolCall;
835
+ parts.push({
836
+ type: "tool-params-start",
837
+ id: activeToolCall.id,
838
+ name: activeToolCall.name
839
+ });
840
+ // Emit a tool call delta part if parameters were also sent
841
+ if (activeToolCall.params.length > 0) {
842
+ parts.push({
843
+ type: "tool-params-delta",
844
+ id: activeToolCall.id,
845
+ delta: activeToolCall.params
846
+ });
847
+ }
848
+ } else {
849
+ // If an active tool call was found, update and emit the delta for
850
+ // the tool call's parameters
851
+ activeToolCall.params += toolCall.function?.arguments ?? "";
852
+ parts.push({
853
+ type: "tool-params-delta",
854
+ id: activeToolCall.id,
855
+ delta: activeToolCall.params
856
+ });
857
+ }
858
+ // Check if the tool call is complete
859
+ // @effect-diagnostics-next-line tryCatchInEffectGen:off
860
+ try {
861
+ const params = Tool.unsafeSecureJsonParse(activeToolCall.params);
862
+ parts.push({
863
+ type: "tool-params-end",
864
+ id: activeToolCall.id
865
+ });
866
+ parts.push({
867
+ type: "tool-call",
868
+ id: activeToolCall.id,
869
+ name: activeToolCall.name,
870
+ params,
871
+ // Only attach reasoning_details to the first tool call to avoid
872
+ // duplicating thinking blocks for parallel tool calls (Claude)
873
+ metadata: reasoningDetailsAttachedToToolCall ? undefined : {
874
+ openrouter: {
875
+ reasoningDetails: accumulatedReasoningDetails
876
+ }
877
+ }
878
+ });
879
+ reasoningDetailsAttachedToToolCall = true;
880
+ // Increment the total tool calls emitted by the stream and
881
+ // remove the active tool call
882
+ totalToolCalls += 1;
883
+ delete activeToolCalls[toolCall.index];
884
+ } catch {
885
+ // Tool call incomplete, continue parsing
886
+ continue;
887
+ }
888
+ }
889
+ }
890
+ const images = delta.images;
891
+ if (Predicate.isNotNullish(images)) {
892
+ for (const image of images) {
893
+ parts.push({
894
+ type: "file",
895
+ mediaType: getMediaType(image.image_url.url, "image/jpeg"),
896
+ data: getBase64FromDataUrl(image.image_url.url)
897
+ });
898
+ }
899
+ }
900
+ // Usage is only emitted by the last part of the stream, so we need to
901
+ // handle flushing any remaining text / reasoning / tool calls
902
+ if (Predicate.isNotUndefined(event.usage)) {
903
+ // Fix for Gemini 3 thoughtSignature: when there are tool calls with encrypted
904
+ // reasoning (thoughtSignature), the model returns 'stop' but expects continuation.
905
+ // Override to 'tool-calls' so the SDK knows to continue the conversation.
906
+ const hasEncryptedReasoning = accumulatedReasoningDetails.some(detail => detail.type === "reasoning.encrypted" && detail.data.length > 0);
907
+ if (totalToolCalls > 0 && hasEncryptedReasoning && finishReason === "stop") {
908
+ finishReason = resolveFinishReason("tool-calls");
909
+ }
910
+ // Forward any unsent tool calls if finish reason is 'tool-calls'
911
+ if (finishReason === "tool-calls") {
912
+ for (const toolCall of activeToolCalls) {
913
+ // Coerce invalid tool call parameters to an empty object
914
+ let params;
915
+ // @effect-diagnostics-next-line tryCatchInEffectGen:off
916
+ try {
917
+ params = Tool.unsafeSecureJsonParse(toolCall.params);
918
+ } catch {
919
+ params = {};
920
+ }
921
+ // Only attach reasoning_details to the first tool call to avoid
922
+ // duplicating thinking blocks for parallel tool calls (Claude)
923
+ parts.push({
924
+ type: "tool-call",
925
+ id: toolCall.id,
926
+ name: toolCall.name,
927
+ params,
928
+ metadata: reasoningDetailsAttachedToToolCall ? undefined : {
929
+ openrouter: {
930
+ reasoningDetails: accumulatedReasoningDetails
931
+ }
932
+ }
933
+ });
934
+ reasoningDetailsAttachedToToolCall = true;
935
+ }
936
+ }
937
+ // End reasoning first if it was started, to maintain proper order
938
+ if (reasoningStarted) {
939
+ parts.push({
940
+ type: "reasoning-end",
941
+ id: activeReasoningId,
942
+ // Include accumulated reasoning_details so that we can update the
943
+ // reasoning part's provider metadata with the correct signature,
944
+ metadata: accumulatedReasoningDetails.length > 0 ? {
945
+ openrouter: {
946
+ reasoningDetails: accumulatedReasoningDetails
947
+ }
948
+ } : undefined
949
+ });
950
+ }
951
+ if (textStarted) {
952
+ parts.push({
953
+ type: "text-end",
954
+ id: activeTextId
955
+ });
956
+ }
957
+ const metadata = {
958
+ openrouter: {
959
+ ...(Predicate.isNotNullish(event.system_fingerprint) ? {
960
+ systemFingerprint: event.system_fingerprint
961
+ } : undefined),
962
+ ...(Predicate.isNotUndefined(event.usage) ? {
963
+ usage: event.usage
964
+ } : undefined),
965
+ ...(Predicate.hasProperty(event, "provider") && Predicate.isString(event.provider) ? {
966
+ provider: event.provider
967
+ } : undefined),
968
+ ...(accumulatedFileAnnotations.length > 0 ? {
969
+ annotations: accumulatedFileAnnotations
970
+ } : undefined)
971
+ }
972
+ };
973
+ parts.push({
974
+ type: "finish",
975
+ reason: finishReason,
976
+ usage,
977
+ response: buildHttpResponseDetails(response),
978
+ metadata
979
+ });
980
+ }
981
+ return parts;
982
+ })), Stream.flattenIterable);
983
+ });
984
+ // =============================================================================
985
+ // Tool Conversion
986
+ // =============================================================================
987
+ const prepareTools = /*#__PURE__*/Effect.fnUntraced(function* ({
988
+ options,
989
+ transformer
990
+ }) {
991
+ if (options.tools.length === 0) {
992
+ return {
993
+ tools: undefined,
994
+ toolChoice: undefined
995
+ };
996
+ }
997
+ const hasProviderDefinedTools = options.tools.some(tool => Tool.isProviderDefined(tool));
998
+ if (hasProviderDefinedTools) {
999
+ return yield* AiError.make({
1000
+ module: "OpenRouterLanguageModel",
1001
+ method: "prepareTools",
1002
+ reason: new AiError.InvalidUserInputError({
1003
+ description: "Provider-defined tools are unsupported by the OpenRouter " + "provider integration at this time"
1004
+ })
1005
+ });
1006
+ }
1007
+ let tools = [];
1008
+ let toolChoice = undefined;
1009
+ for (const tool of options.tools) {
1010
+ const description = Tool.getDescription(tool);
1011
+ const parameters = yield* tryJsonSchema(tool.parametersSchema, "prepareTools", transformer);
1012
+ const strict = Tool.getStrictMode(tool) ?? null;
1013
+ tools.push({
1014
+ type: "function",
1015
+ function: {
1016
+ name: tool.name,
1017
+ parameters,
1018
+ strict,
1019
+ ...(Predicate.isNotUndefined(description) ? {
1020
+ description
1021
+ } : undefined)
1022
+ }
1023
+ });
1024
+ }
1025
+ if (options.toolChoice === "none") {
1026
+ toolChoice = "none";
1027
+ } else if (options.toolChoice === "auto") {
1028
+ toolChoice = "auto";
1029
+ } else if (options.toolChoice === "required") {
1030
+ toolChoice = "required";
1031
+ } else if ("tool" in options.toolChoice) {
1032
+ toolChoice = {
1033
+ type: "function",
1034
+ function: {
1035
+ name: options.toolChoice.tool
1036
+ }
1037
+ };
1038
+ } else {
1039
+ const allowedTools = new Set(options.toolChoice.oneOf);
1040
+ tools = tools.filter(tool => allowedTools.has(tool.function.name));
1041
+ toolChoice = options.toolChoice.mode === "required" ? "required" : "auto";
1042
+ }
1043
+ return {
1044
+ tools,
1045
+ toolChoice
1046
+ };
1047
+ });
1048
+ // =============================================================================
1049
+ // Telemetry
1050
+ // =============================================================================
1051
+ const annotateRequest = (span, request) => {
1052
+ addGenAIAnnotations(span, {
1053
+ system: "openrouter",
1054
+ operation: {
1055
+ name: "chat"
1056
+ },
1057
+ request: {
1058
+ model: request.model,
1059
+ temperature: request.temperature,
1060
+ topP: request.top_p,
1061
+ maxTokens: request.max_tokens,
1062
+ stopSequences: Arr.ensure(request.stop).filter(Predicate.isNotNullish)
1063
+ }
1064
+ });
1065
+ };
1066
+ const annotateResponse = (span, response) => {
1067
+ addGenAIAnnotations(span, {
1068
+ response: {
1069
+ id: response.id,
1070
+ model: response.model,
1071
+ finishReasons: response.choices.map(choice => choice.finish_reason).filter(Predicate.isNotNullish)
1072
+ },
1073
+ usage: {
1074
+ inputTokens: response.usage?.prompt_tokens,
1075
+ outputTokens: response.usage?.completion_tokens
1076
+ }
1077
+ });
1078
+ };
1079
+ const annotateStreamResponse = (span, part) => {
1080
+ if (part.type === "response-metadata") {
1081
+ addGenAIAnnotations(span, {
1082
+ response: {
1083
+ id: part.id,
1084
+ model: part.modelId
1085
+ }
1086
+ });
1087
+ }
1088
+ if (part.type === "finish") {
1089
+ addGenAIAnnotations(span, {
1090
+ response: {
1091
+ finishReasons: [part.reason]
1092
+ },
1093
+ usage: {
1094
+ inputTokens: part.usage.inputTokens.total,
1095
+ outputTokens: part.usage.outputTokens.total
1096
+ }
1097
+ });
1098
+ }
1099
+ };
1100
+ // =============================================================================
1101
+ // Internal Utilities
1102
+ // =============================================================================
1103
+ const getCacheControl = part => part.options.openrouter?.cacheControl ?? null;
1104
+ const findFirstReasoningDetails = content => {
1105
+ for (const part of content) {
1106
+ // First try tool calls since they have complete accumulated reasoning details
1107
+ if (part.type === "tool-call") {
1108
+ const details = part.options.openrouter?.reasoningDetails;
1109
+ if (Predicate.isNotNullish(details) && Array.isArray(details) && details.length > 0) {
1110
+ return details;
1111
+ }
1112
+ }
1113
+ // Fallback to reasoning parts which have delta reasoning details
1114
+ if (part.type === "reasoning") {
1115
+ const details = part.options.openrouter?.reasoningDetails;
1116
+ if (Predicate.isNotNullish(details) && Array.isArray(details) && details.length > 0) {
1117
+ return details;
1118
+ }
1119
+ }
1120
+ }
1121
+ return null;
1122
+ };
1123
+ const getCodecTransformer = model => {
1124
+ if (model.startsWith("anthropic/") || model.startsWith("claude-")) {
1125
+ return toCodecAnthropic;
1126
+ }
1127
+ if (model.startsWith("openai/") || model.startsWith("gpt-") || model.startsWith("o1-") || model.startsWith("o3-") || model.startsWith("o4-")) {
1128
+ return toCodecOpenAI;
1129
+ }
1130
+ return LanguageModel.defaultCodecTransformer;
1131
+ };
1132
+ const unsupportedSchemaError = (error, method) => AiError.make({
1133
+ module: "OpenRouterLanguageModel",
1134
+ method,
1135
+ reason: new AiError.UnsupportedSchemaError({
1136
+ description: error instanceof Error ? error.message : String(error)
1137
+ })
1138
+ });
1139
+ const tryJsonSchema = (schema, method, transformer) => Effect.try({
1140
+ try: () => Tool.getJsonSchemaFromSchema(schema, {
1141
+ transformer
1142
+ }),
1143
+ catch: error => unsupportedSchemaError(error, method)
1144
+ });
1145
+ const getResponseFormat = /*#__PURE__*/Effect.fnUntraced(function* ({
1146
+ config,
1147
+ options,
1148
+ transformer
1149
+ }) {
1150
+ if (options.responseFormat.type === "json") {
1151
+ const description = SchemaAST.resolveDescription(options.responseFormat.schema.ast);
1152
+ const jsonSchema = yield* tryJsonSchema(options.responseFormat.schema, "getResponseFormat", transformer);
1153
+ return {
1154
+ type: "json_schema",
1155
+ json_schema: {
1156
+ name: options.responseFormat.objectName,
1157
+ schema: jsonSchema,
1158
+ strict: config.strictJsonSchema ?? null,
1159
+ ...(Predicate.isNotUndefined(description) ? {
1160
+ description
1161
+ } : undefined)
1162
+ }
1163
+ };
1164
+ }
1165
+ return undefined;
1166
+ });
1167
+ const getMediaType = (dataUrl, defaultMediaType) => {
1168
+ const match = dataUrl.match(/^data:([^;]+)/);
1169
+ return match ? match[1] ?? defaultMediaType : defaultMediaType;
1170
+ };
1171
+ const getBase64FromDataUrl = dataUrl => {
1172
+ const match = dataUrl.match(/^data:[^;]*;base64,(.+)$/);
1173
+ return match ? match[1] : dataUrl;
1174
+ };
1175
+ const getUsage = usage => {
1176
+ if (Predicate.isUndefined(usage)) {
1177
+ return {
1178
+ inputTokens: {
1179
+ uncached: undefined,
1180
+ total: 0,
1181
+ cacheRead: undefined,
1182
+ cacheWrite: undefined
1183
+ },
1184
+ outputTokens: {
1185
+ total: 0,
1186
+ text: undefined,
1187
+ reasoning: undefined
1188
+ }
1189
+ };
1190
+ }
1191
+ const promptTokens = usage.prompt_tokens;
1192
+ const completionTokens = usage.completion_tokens;
1193
+ const cacheReadTokens = usage.prompt_tokens_details?.cached_tokens ?? 0;
1194
+ const cacheWriteTokens = usage.prompt_tokens_details?.cache_write_tokens ?? 0;
1195
+ const reasoningTokens = usage.completion_tokens_details?.reasoning_tokens ?? 0;
1196
+ return {
1197
+ inputTokens: {
1198
+ uncached: promptTokens - cacheReadTokens,
1199
+ total: promptTokens,
1200
+ cacheRead: cacheReadTokens,
1201
+ cacheWrite: cacheWriteTokens
1202
+ },
1203
+ outputTokens: {
1204
+ total: completionTokens,
1205
+ text: completionTokens - reasoningTokens,
1206
+ reasoning: reasoningTokens
1207
+ }
1208
+ };
1209
+ };
1210
+ //# sourceMappingURL=OpenRouterLanguageModel.js.map