@langchain/openrouter 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +174 -0
  3. package/dist/api-types.d.cts +368 -0
  4. package/dist/api-types.d.cts.map +1 -0
  5. package/dist/api-types.d.ts +368 -0
  6. package/dist/api-types.d.ts.map +1 -0
  7. package/dist/chat_models/index.cjs +401 -0
  8. package/dist/chat_models/index.cjs.map +1 -0
  9. package/dist/chat_models/index.d.cts +160 -0
  10. package/dist/chat_models/index.d.cts.map +1 -0
  11. package/dist/chat_models/index.d.ts +160 -0
  12. package/dist/chat_models/index.d.ts.map +1 -0
  13. package/dist/chat_models/index.js +401 -0
  14. package/dist/chat_models/index.js.map +1 -0
  15. package/dist/chat_models/types.d.cts +97 -0
  16. package/dist/chat_models/types.d.cts.map +1 -0
  17. package/dist/chat_models/types.d.ts +97 -0
  18. package/dist/chat_models/types.d.ts.map +1 -0
  19. package/dist/converters/messages.cjs +90 -0
  20. package/dist/converters/messages.cjs.map +1 -0
  21. package/dist/converters/messages.js +87 -0
  22. package/dist/converters/messages.js.map +1 -0
  23. package/dist/converters/tools.cjs +30 -0
  24. package/dist/converters/tools.cjs.map +1 -0
  25. package/dist/converters/tools.js +29 -0
  26. package/dist/converters/tools.js.map +1 -0
  27. package/dist/index.cjs +10 -0
  28. package/dist/index.d.cts +6 -0
  29. package/dist/index.d.ts +6 -0
  30. package/dist/index.js +5 -0
  31. package/dist/profiles.cjs +2570 -0
  32. package/dist/profiles.cjs.map +1 -0
  33. package/dist/profiles.d.cts +7 -0
  34. package/dist/profiles.d.cts.map +1 -0
  35. package/dist/profiles.d.ts +7 -0
  36. package/dist/profiles.d.ts.map +1 -0
  37. package/dist/profiles.js +2569 -0
  38. package/dist/profiles.js.map +1 -0
  39. package/dist/utils/errors.cjs +82 -0
  40. package/dist/utils/errors.cjs.map +1 -0
  41. package/dist/utils/errors.d.cts +68 -0
  42. package/dist/utils/errors.d.cts.map +1 -0
  43. package/dist/utils/errors.d.ts +68 -0
  44. package/dist/utils/errors.d.ts.map +1 -0
  45. package/dist/utils/errors.js +80 -0
  46. package/dist/utils/errors.js.map +1 -0
  47. package/dist/utils/stream.cjs +28 -0
  48. package/dist/utils/stream.cjs.map +1 -0
  49. package/dist/utils/stream.js +27 -0
  50. package/dist/utils/stream.js.map +1 -0
  51. package/dist/utils/structured_output.cjs +44 -0
  52. package/dist/utils/structured_output.cjs.map +1 -0
  53. package/dist/utils/structured_output.js +43 -0
  54. package/dist/utils/structured_output.js.map +1 -0
  55. package/package.json +86 -0
@@ -0,0 +1,401 @@
1
+ const require_messages = require('../converters/messages.cjs');
2
+ const require_tools = require('../converters/tools.cjs');
3
+ const require_errors = require('../utils/errors.cjs');
4
+ const require_structured_output = require('../utils/structured_output.cjs');
5
+ const require_stream = require('../utils/stream.cjs');
6
+ const require_profiles = require('../profiles.cjs');
7
+ let _langchain_core_language_models_chat_models = require("@langchain/core/language_models/chat_models");
8
+ let _langchain_core_messages = require("@langchain/core/messages");
9
+ let _langchain_core_outputs = require("@langchain/core/outputs");
10
+ let _langchain_core_runnables = require("@langchain/core/runnables");
11
+ let _langchain_core_utils_json_schema = require("@langchain/core/utils/json_schema");
12
+ let _langchain_core_utils_types = require("@langchain/core/utils/types");
13
+ let _langchain_core_output_parsers = require("@langchain/core/output_parsers");
14
+ let _langchain_core_output_parsers_openai_tools = require("@langchain/core/output_parsers/openai_tools");
15
+ let _langchain_core_utils_env = require("@langchain/core/utils/env");
16
+ let eventsource_parser_stream = require("eventsource-parser/stream");
17
+
18
+ //#region src/chat_models/index.ts
19
+ const DEFAULT_BASE_URL = "https://openrouter.ai/api/v1";
20
+ /**
21
+ * OpenRouter chat model integration.
22
+ *
23
+ * Talks directly to the OpenRouter REST API via `fetch` (no SDK dependency)
24
+ * and supports tool calling, structured output, and streaming. Any model
25
+ * available on OpenRouter can be used by passing its identifier (e.g.
26
+ * `"anthropic/claude-4-sonnet"`) as the `model` param.
27
+ */
28
+ var ChatOpenRouter = class extends _langchain_core_language_models_chat_models.BaseChatModel {
29
+ static lc_name() {
30
+ return "ChatOpenRouter";
31
+ }
32
+ lc_serializable = true;
33
+ /** Maps secret fields to the environment variable they can be loaded from. */
34
+ get lc_secrets() {
35
+ return { apiKey: "OPENROUTER_API_KEY" };
36
+ }
37
+ /** Allows serialized JSON to use `modelName` as an alias for `model`. */
38
+ get lc_aliases() {
39
+ return { modelName: "model" };
40
+ }
41
+ /** Fields that may be overridden per-call via `.bind()` / `.withConfig()`. */
42
+ get callKeys() {
43
+ return [
44
+ ...super.callKeys,
45
+ "tools",
46
+ "tool_choice",
47
+ "response_format",
48
+ "seed",
49
+ "strict",
50
+ "temperature",
51
+ "maxTokens",
52
+ "topP",
53
+ "topK",
54
+ "frequencyPenalty",
55
+ "presencePenalty",
56
+ "repetitionPenalty",
57
+ "minP",
58
+ "topA",
59
+ "stop",
60
+ "logitBias",
61
+ "topLogprobs",
62
+ "user",
63
+ "transforms",
64
+ "models",
65
+ "route",
66
+ "provider",
67
+ "plugins",
68
+ "prediction"
69
+ ];
70
+ }
71
+ /** Model identifier, e.g. `"anthropic/claude-4-sonnet"`. */
72
+ model;
73
+ /** OpenRouter API key. Falls back to the `OPENROUTER_API_KEY` env var. */
74
+ apiKey;
75
+ /** Base URL for the API. Defaults to `"https://openrouter.ai/api/v1"`. */
76
+ baseURL;
77
+ /** Sampling temperature (0–2). */
78
+ temperature;
79
+ /** Maximum number of tokens to generate. */
80
+ maxTokens;
81
+ /** Nucleus sampling cutoff probability. */
82
+ topP;
83
+ /** Top-K sampling: only consider the K most likely tokens. */
84
+ topK;
85
+ /** Additive penalty based on how often a token has appeared so far (−2 to 2). */
86
+ frequencyPenalty;
87
+ /** Additive penalty based on whether a token has appeared at all (−2 to 2). */
88
+ presencePenalty;
89
+ /** Multiplicative penalty applied to repeated token logits (0 to 2). */
90
+ repetitionPenalty;
91
+ /** Minimum probability threshold for token sampling. */
92
+ minP;
93
+ /** Top-A sampling threshold. */
94
+ topA;
95
+ /** Random seed for deterministic generation. */
96
+ seed;
97
+ /** Stop sequences that halt generation. */
98
+ stop;
99
+ /** Token-level biases to apply during sampling. */
100
+ logitBias;
101
+ /** Number of most-likely log-probabilities to return per token. */
102
+ topLogprobs;
103
+ /** Stable identifier for end-users, used for abuse detection. */
104
+ user;
105
+ /** OpenRouter-specific transformations to apply to the request. */
106
+ transforms;
107
+ /** OpenRouter-specific list of models for routing. */
108
+ models;
109
+ /** OpenRouter-specific routing strategy. */
110
+ route;
111
+ /** OpenRouter-specific provider preferences and ordering. */
112
+ provider;
113
+ /** OpenRouter plugins to enable (e.g. web search). */
114
+ plugins;
115
+ /** Your site URL — used for rankings on openrouter.ai. */
116
+ siteUrl;
117
+ /** Your site/app name — used for rankings on openrouter.ai. */
118
+ siteName;
119
+ /** Extra params passed through to the API body. */
120
+ modelKwargs;
121
+ /** Whether to include token usage in streaming chunks. Defaults to `true`. */
122
+ streamUsage;
123
+ constructor(fields) {
124
+ super(fields);
125
+ const apiKey = fields.apiKey ?? (0, _langchain_core_utils_env.getEnvironmentVariable)("OPENROUTER_API_KEY");
126
+ if (!apiKey) throw new require_errors.OpenRouterAuthError("OpenRouter API key is required. Get one at https://openrouter.ai/keys and set it via the `apiKey` parameter or the OPENROUTER_API_KEY environment variable.");
127
+ this.apiKey = apiKey;
128
+ if (!fields.model) throw new Error("ChatOpenRouter requires a `model` parameter, e.g. \"openai/gpt-4o-mini\".");
129
+ this.model = fields.model;
130
+ this.baseURL = fields.baseURL ?? DEFAULT_BASE_URL;
131
+ this.temperature = fields.temperature;
132
+ this.maxTokens = fields.maxTokens;
133
+ this.topP = fields.topP;
134
+ this.topK = fields.topK;
135
+ this.frequencyPenalty = fields.frequencyPenalty;
136
+ this.presencePenalty = fields.presencePenalty;
137
+ this.repetitionPenalty = fields.repetitionPenalty;
138
+ this.minP = fields.minP;
139
+ this.topA = fields.topA;
140
+ this.seed = fields.seed;
141
+ this.stop = fields.stop;
142
+ this.logitBias = fields.logitBias;
143
+ this.topLogprobs = fields.topLogprobs;
144
+ this.user = fields.user;
145
+ this.transforms = fields.transforms;
146
+ this.models = fields.models;
147
+ this.route = fields.route;
148
+ this.provider = fields.provider;
149
+ this.plugins = fields.plugins;
150
+ this.siteUrl = fields.siteUrl;
151
+ this.siteName = fields.siteName;
152
+ this.modelKwargs = fields.modelKwargs;
153
+ this.streamUsage = fields.streamUsage ?? true;
154
+ }
155
+ _llmType() {
156
+ return "openrouter";
157
+ }
158
+ /** Static capability profile (context size, tool support, etc.) for the current model. */
159
+ get profile() {
160
+ return require_profiles.default[this.model] ?? {};
161
+ }
162
+ /** Builds auth + content-type headers, plus optional site attribution headers. */
163
+ buildHeaders() {
164
+ return {
165
+ Authorization: `Bearer ${this.apiKey}`,
166
+ "Content-Type": "application/json",
167
+ ...this.siteUrl ? { "HTTP-Referer": this.siteUrl } : {},
168
+ ...this.siteName ? { "X-Title": this.siteName } : {}
169
+ };
170
+ }
171
+ /** Returns the full chat-completions endpoint URL. */
172
+ buildUrl() {
173
+ return `${this.baseURL}/chat/completions`;
174
+ }
175
+ /**
176
+ * Merges constructor-level defaults with per-call overrides into the
177
+ * API request body (everything except `messages`, which is added later).
178
+ */
179
+ invocationParams(options) {
180
+ const tools = options.tools ? require_tools.convertToolsToOpenRouter(options.tools, { strict: options.strict }) : void 0;
181
+ const toolChoice = require_tools.formatToolChoice(options.tool_choice);
182
+ return {
183
+ model: this.model,
184
+ temperature: options.temperature ?? this.temperature,
185
+ max_tokens: options.maxTokens ?? this.maxTokens,
186
+ top_p: options.topP ?? this.topP,
187
+ top_k: options.topK ?? this.topK,
188
+ frequency_penalty: options.frequencyPenalty ?? this.frequencyPenalty,
189
+ presence_penalty: options.presencePenalty ?? this.presencePenalty,
190
+ repetition_penalty: options.repetitionPenalty ?? this.repetitionPenalty,
191
+ min_p: options.minP ?? this.minP,
192
+ top_a: options.topA ?? this.topA,
193
+ seed: options.seed ?? this.seed,
194
+ stop: options.stop ?? this.stop,
195
+ logit_bias: options.logitBias ?? this.logitBias,
196
+ top_logprobs: options.topLogprobs ?? this.topLogprobs,
197
+ user: options.user ?? this.user,
198
+ tools,
199
+ tool_choice: toolChoice,
200
+ response_format: options.response_format,
201
+ ...options.prediction ? { prediction: options.prediction } : {},
202
+ transforms: options.transforms ?? this.transforms,
203
+ models: options.models ?? this.models,
204
+ route: options.route ?? this.route,
205
+ provider: options.provider ?? this.provider,
206
+ plugins: options.plugins ?? this.plugins,
207
+ ...this.modelKwargs
208
+ };
209
+ }
210
+ /** Returns metadata for LangSmith tracing (provider, model name, temperature, etc.). */
211
+ getLsParams(options) {
212
+ const params = this.invocationParams(options);
213
+ return {
214
+ ls_provider: "openrouter",
215
+ ls_model_name: this.model,
216
+ ls_model_type: "chat",
217
+ ls_temperature: params.temperature ?? void 0,
218
+ ls_max_tokens: params.max_tokens ?? void 0,
219
+ ls_stop: options.stop
220
+ };
221
+ }
222
+ /**
223
+ * Non-streaming generation. Sends a single request and returns the
224
+ * complete response with the generated message and token usage.
225
+ */
226
+ async _generate(messages, options, runManager) {
227
+ const body = {
228
+ ...this.invocationParams(options),
229
+ messages: require_messages.convertMessagesToOpenRouterParams(messages, this.model),
230
+ stream: false
231
+ };
232
+ const response = await fetch(this.buildUrl(), {
233
+ method: "POST",
234
+ headers: this.buildHeaders(),
235
+ body: JSON.stringify(body),
236
+ signal: options.signal
237
+ });
238
+ if (!response.ok) throw await require_errors.OpenRouterError.fromResponse(response);
239
+ const data = await response.json();
240
+ const choice = data.choices[0];
241
+ if (!choice) throw new require_errors.OpenRouterError("No choices returned in response.");
242
+ const message = require_messages.convertOpenRouterResponseToBaseMessage(choice, data);
243
+ if (_langchain_core_messages.AIMessage.isInstance(message)) message.usage_metadata = require_messages.convertUsageMetadata(data.usage);
244
+ const text = typeof message.content === "string" ? message.content : "";
245
+ await runManager?.handleLLMNewToken(text);
246
+ return {
247
+ generations: [{
248
+ text,
249
+ message,
250
+ generationInfo: { finish_reason: choice.finish_reason }
251
+ }],
252
+ llmOutput: { tokenUsage: data.usage }
253
+ };
254
+ }
255
+ /**
256
+ * Streaming generation. Opens an SSE connection and yields one
257
+ * `ChatGenerationChunk` per delta received from the API. The stream
258
+ * pipeline is: raw bytes -> text -> SSE events -> JSON-parsed deltas.
259
+ */
260
+ async *_streamResponseChunks(messages, options, runManager) {
261
+ const body = {
262
+ ...this.invocationParams(options),
263
+ messages: require_messages.convertMessagesToOpenRouterParams(messages, this.model),
264
+ stream: true
265
+ };
266
+ const response = await fetch(this.buildUrl(), {
267
+ method: "POST",
268
+ headers: this.buildHeaders(),
269
+ body: JSON.stringify(body),
270
+ signal: options.signal
271
+ });
272
+ if (!response.ok) throw await require_errors.OpenRouterError.fromResponse(response);
273
+ if (!response.body) return;
274
+ const reader = response.body.pipeThrough(new TextDecoderStream()).pipeThrough(new eventsource_parser_stream.EventSourceParserStream()).pipeThrough(new require_stream.OpenRouterJsonParseStream()).getReader();
275
+ let defaultRole;
276
+ try {
277
+ while (true) {
278
+ const { done, value: data } = await reader.read();
279
+ if (done) break;
280
+ if (!data) continue;
281
+ const choice = data.choices?.[0];
282
+ if (!choice?.delta) continue;
283
+ const chunk = require_messages.convertOpenRouterDeltaToBaseMessageChunk(choice.delta, data, defaultRole);
284
+ defaultRole = choice.delta.role ?? defaultRole;
285
+ if (data.usage && this.streamUsage && _langchain_core_messages.AIMessageChunk.isInstance(chunk)) chunk.usage_metadata = require_messages.convertUsageMetadata(data.usage);
286
+ const text = typeof chunk.content === "string" ? chunk.content : "";
287
+ yield new _langchain_core_outputs.ChatGenerationChunk({
288
+ message: chunk,
289
+ text,
290
+ generationInfo: { ...choice.finish_reason ? { finish_reason: choice.finish_reason } : {} }
291
+ });
292
+ await runManager?.handleLLMNewToken(text);
293
+ }
294
+ } finally {
295
+ reader.releaseLock();
296
+ }
297
+ }
298
+ /**
299
+ * Returns a new Runnable with the given tools bound into every call.
300
+ * Equivalent to `.withConfig({ tools, ...kwargs })`.
301
+ */
302
+ bindTools(tools, kwargs) {
303
+ return this.withConfig({
304
+ ...kwargs,
305
+ tools
306
+ });
307
+ }
308
+ withStructuredOutput(outputSchema, config) {
309
+ let llm;
310
+ let outputParser;
311
+ const { schema, name, includeRaw } = {
312
+ ...config,
313
+ schema: outputSchema
314
+ };
315
+ const method = require_structured_output.resolveOpenRouterStructuredOutputMethod({
316
+ model: this.model,
317
+ method: config?.method,
318
+ profile: this.profile,
319
+ models: this.models,
320
+ route: this.route
321
+ });
322
+ const asJsonSchema = (0, _langchain_core_utils_json_schema.toJsonSchema)(schema);
323
+ if (method === "jsonSchema") {
324
+ const schemaName = name ?? "extract";
325
+ llm = this.withConfig({
326
+ response_format: {
327
+ type: "json_schema",
328
+ json_schema: {
329
+ name: schemaName,
330
+ description: (0, _langchain_core_utils_types.getSchemaDescription)(schema),
331
+ schema: asJsonSchema,
332
+ strict: config?.strict
333
+ }
334
+ },
335
+ ls_structured_output_format: {
336
+ kwargs: { method: "json_schema" },
337
+ schema: {
338
+ title: schemaName,
339
+ description: (0, _langchain_core_utils_types.getSchemaDescription)(schema),
340
+ ...asJsonSchema
341
+ }
342
+ }
343
+ });
344
+ outputParser = (0, _langchain_core_utils_types.isInteropZodSchema)(schema) ? _langchain_core_output_parsers.StructuredOutputParser.fromZodSchema(schema) : new _langchain_core_output_parsers.JsonOutputParser();
345
+ } else if (method === "jsonMode") {
346
+ llm = this.withConfig({
347
+ response_format: { type: "json_object" },
348
+ ls_structured_output_format: {
349
+ kwargs: { method: "json_mode" },
350
+ schema: {
351
+ title: name ?? "extract",
352
+ ...asJsonSchema
353
+ }
354
+ }
355
+ });
356
+ outputParser = (0, _langchain_core_utils_types.isInteropZodSchema)(schema) ? _langchain_core_output_parsers.StructuredOutputParser.fromZodSchema(schema) : new _langchain_core_output_parsers.JsonOutputParser();
357
+ } else {
358
+ let functionName = name ?? "extract";
359
+ if ("name" in schema) functionName = schema.name;
360
+ llm = this.withConfig({
361
+ tools: [{
362
+ type: "function",
363
+ function: {
364
+ name: functionName,
365
+ description: (0, _langchain_core_utils_types.getSchemaDescription)(schema) ?? "",
366
+ parameters: asJsonSchema
367
+ }
368
+ }],
369
+ tool_choice: {
370
+ type: "function",
371
+ function: { name: functionName }
372
+ },
373
+ ls_structured_output_format: {
374
+ kwargs: { method: "function_calling" },
375
+ schema: {
376
+ title: functionName,
377
+ ...asJsonSchema
378
+ }
379
+ },
380
+ ...config?.strict !== void 0 ? { strict: config.strict } : {}
381
+ });
382
+ outputParser = (0, _langchain_core_utils_types.isInteropZodSchema)(schema) ? new _langchain_core_output_parsers_openai_tools.JsonOutputKeyToolsParser({
383
+ returnSingle: true,
384
+ keyName: functionName,
385
+ zodSchema: schema
386
+ }) : new _langchain_core_output_parsers_openai_tools.JsonOutputKeyToolsParser({
387
+ returnSingle: true,
388
+ keyName: functionName
389
+ });
390
+ }
391
+ if (!includeRaw) return llm.pipe(outputParser).withConfig({ runName: "ChatOpenRouterStructuredOutput" });
392
+ const parserAssign = _langchain_core_runnables.RunnablePassthrough.assign({ parsed: (input, config) => outputParser.invoke(input.raw, config) });
393
+ const parserNone = _langchain_core_runnables.RunnablePassthrough.assign({ parsed: () => null });
394
+ const parsedWithFallback = parserAssign.withFallbacks({ fallbacks: [parserNone] });
395
+ return _langchain_core_runnables.RunnableSequence.from([{ raw: llm }, parsedWithFallback]).withConfig({ runName: "ChatOpenRouterStructuredOutput" });
396
+ }
397
+ };
398
+
399
+ //#endregion
400
+ exports.ChatOpenRouter = ChatOpenRouter;
401
+ //# sourceMappingURL=index.cjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.cjs","names":["BaseChatModel","OpenRouterAuthError","PROFILES","convertToolsToOpenRouter","formatToolChoice","convertMessagesToOpenRouterParams","OpenRouterError","convertOpenRouterResponseToBaseMessage","AIMessage","convertUsageMetadata","EventSourceParserStream","OpenRouterJsonParseStream","convertOpenRouterDeltaToBaseMessageChunk","AIMessageChunk","ChatGenerationChunk","resolveOpenRouterStructuredOutputMethod","StructuredOutputParser","JsonOutputParser","JsonOutputKeyToolsParser","RunnablePassthrough","RunnableSequence"],"sources":["../../src/chat_models/index.ts"],"sourcesContent":["import {\n BaseChatModel,\n type BindToolsInput,\n type LangSmithParams,\n} from \"@langchain/core/language_models/chat_models\";\nimport type { ModelProfile } from \"@langchain/core/language_models/profile\";\nimport type {\n BaseLanguageModelInput,\n StructuredOutputMethodOptions,\n} from \"@langchain/core/language_models/base\";\nimport {\n AIMessage,\n AIMessageChunk,\n BaseMessage,\n} from \"@langchain/core/messages\";\nimport { ChatGenerationChunk, type ChatResult } from \"@langchain/core/outputs\";\nimport type { CallbackManagerForLLMRun } from \"@langchain/core/callbacks/manager\";\nimport {\n Runnable,\n RunnablePassthrough,\n RunnableSequence,\n} from \"@langchain/core/runnables\";\nimport { toJsonSchema } from \"@langchain/core/utils/json_schema\";\nimport {\n type InteropZodType,\n isInteropZodSchema,\n getSchemaDescription,\n} from \"@langchain/core/utils/types\";\nimport {\n JsonOutputParser,\n StructuredOutputParser,\n} from \"@langchain/core/output_parsers\";\nimport { JsonOutputKeyToolsParser } from \"@langchain/core/output_parsers/openai_tools\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { EventSourceParserStream } from \"eventsource-parser/stream\";\n\nimport type {\n ChatOpenRouterParams,\n ChatOpenRouterCallOptions,\n} from \"./types.js\";\nimport type { OpenAI as OpenAIClient } from \"openai\";\nimport type { OpenRouter } from \"../api-types.js\";\n\n/**\n * Full request body sent to the OpenRouter `/chat/completions` endpoint.\n *\n * Extends the base generation params with OpenRouter-specific sampling\n * knobs (`top_k`, `min_p`, etc.) and features (`prediction`, `transforms`)\n * that aren't part of the standard OpenAI spec.\n */\ntype OpenRouterRequestBody = Omit<\n OpenRouter.ChatGenerationParams,\n \"messages\"\n> & {\n messages: OpenAIClient.Chat.Completions.ChatCompletionMessageParam[];\n top_k?: number | null;\n repetition_penalty?: number | null;\n min_p?: number | null;\n top_a?: number | null;\n prediction?: { type: \"content\"; content: string };\n transforms?: string[];\n};\nimport {\n convertMessagesToOpenRouterParams,\n convertOpenRouterResponseToBaseMessage,\n convertOpenRouterDeltaToBaseMessageChunk,\n convertUsageMetadata,\n} from \"../converters/messages.js\";\nimport {\n convertToolsToOpenRouter,\n formatToolChoice,\n} from \"../converters/tools.js\";\nimport { OpenRouterError, OpenRouterAuthError } from \"../utils/errors.js\";\nimport { resolveOpenRouterStructuredOutputMethod } from \"../utils/structured_output.js\";\nimport { OpenRouterJsonParseStream } from \"../utils/stream.js\";\nimport PROFILES from \"../profiles.js\";\n\nconst DEFAULT_BASE_URL = \"https://openrouter.ai/api/v1\";\n\n/**\n * OpenRouter chat model integration.\n *\n * Talks directly to the OpenRouter REST API via `fetch` (no SDK dependency)\n * and supports tool calling, structured output, and streaming. Any model\n * available on OpenRouter can be used by passing its identifier (e.g.\n * `\"anthropic/claude-4-sonnet\"`) as the `model` param.\n */\nexport class ChatOpenRouter extends BaseChatModel<\n ChatOpenRouterCallOptions,\n AIMessageChunk\n> {\n static lc_name() {\n return \"ChatOpenRouter\";\n }\n\n lc_serializable = true;\n\n /** Maps secret fields to the environment variable they can be loaded from. */\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n apiKey: \"OPENROUTER_API_KEY\",\n };\n }\n\n /** Allows serialized JSON to use `modelName` as an alias for `model`. */\n get lc_aliases(): Record<string, string> {\n return {\n modelName: \"model\",\n };\n }\n\n /** Fields that may be overridden per-call via `.bind()` / `.withConfig()`. */\n get callKeys(): string[] {\n return [\n ...super.callKeys,\n \"tools\",\n \"tool_choice\",\n \"response_format\",\n \"seed\",\n \"strict\",\n \"temperature\",\n \"maxTokens\",\n \"topP\",\n \"topK\",\n \"frequencyPenalty\",\n \"presencePenalty\",\n \"repetitionPenalty\",\n \"minP\",\n \"topA\",\n \"stop\",\n \"logitBias\",\n \"topLogprobs\",\n \"user\",\n \"transforms\",\n \"models\",\n \"route\",\n \"provider\",\n \"plugins\",\n \"prediction\",\n ];\n }\n\n /** Model identifier, e.g. `\"anthropic/claude-4-sonnet\"`. */\n model: string;\n\n /** OpenRouter API key. Falls back to the `OPENROUTER_API_KEY` env var. */\n apiKey: string;\n\n /** Base URL for the API. Defaults to `\"https://openrouter.ai/api/v1\"`. */\n baseURL: string;\n\n /** Sampling temperature (0–2). */\n temperature?: number;\n\n /** Maximum number of tokens to generate. */\n maxTokens?: number;\n\n /** Nucleus sampling cutoff probability. */\n topP?: number;\n\n /** Top-K sampling: only consider the K most likely tokens. */\n topK?: number;\n\n /** Additive penalty based on how often a token has appeared so far (−2 to 2). */\n frequencyPenalty?: number;\n\n /** Additive penalty based on whether a token has appeared at all (−2 to 2). */\n presencePenalty?: number;\n\n /** Multiplicative penalty applied to repeated token logits (0 to 2). */\n repetitionPenalty?: number;\n\n /** Minimum probability threshold for token sampling. */\n minP?: number;\n\n /** Top-A sampling threshold. */\n topA?: number;\n\n /** Random seed for deterministic generation. */\n seed?: number;\n\n /** Stop sequences that halt generation. */\n stop?: string[];\n\n /** Token-level biases to apply during sampling. */\n logitBias?: Record<string, number>;\n\n /** Number of most-likely log-probabilities to return per token. */\n topLogprobs?: number;\n\n /** Stable identifier for end-users, used for abuse detection. */\n user?: string;\n\n /** OpenRouter-specific transformations to apply to the request. */\n transforms?: string[];\n\n /** OpenRouter-specific list of models for routing. */\n models?: string[];\n\n /** OpenRouter-specific routing strategy. */\n route?: \"fallback\";\n\n /** OpenRouter-specific provider preferences and ordering. */\n provider?: OpenRouter.ProviderPreferences;\n\n /** OpenRouter plugins to enable (e.g. web search). */\n plugins?: ChatOpenRouterParams[\"plugins\"];\n\n /** Your site URL — used for rankings on openrouter.ai. */\n siteUrl?: string;\n\n /** Your site/app name — used for rankings on openrouter.ai. */\n siteName?: string;\n\n /** Extra params passed through to the API body. */\n modelKwargs?: Record<string, unknown>;\n\n /** Whether to include token usage in streaming chunks. Defaults to `true`. */\n streamUsage: boolean;\n\n constructor(fields: ChatOpenRouterParams) {\n super(fields);\n const apiKey =\n fields.apiKey ?? getEnvironmentVariable(\"OPENROUTER_API_KEY\");\n if (!apiKey) {\n throw new OpenRouterAuthError(\n \"OpenRouter API key is required. Get one at https://openrouter.ai/keys and set it via the `apiKey` parameter or the OPENROUTER_API_KEY environment variable.\"\n );\n }\n this.apiKey = apiKey;\n if (!fields.model) {\n throw new Error(\n 'ChatOpenRouter requires a `model` parameter, e.g. \"openai/gpt-4o-mini\".'\n );\n }\n this.model = fields.model;\n this.baseURL = fields.baseURL ?? DEFAULT_BASE_URL;\n this.temperature = fields.temperature;\n this.maxTokens = fields.maxTokens;\n this.topP = fields.topP;\n this.topK = fields.topK;\n this.frequencyPenalty = fields.frequencyPenalty;\n this.presencePenalty = fields.presencePenalty;\n this.repetitionPenalty = fields.repetitionPenalty;\n this.minP = fields.minP;\n this.topA = fields.topA;\n this.seed = fields.seed;\n this.stop = fields.stop;\n this.logitBias = fields.logitBias;\n this.topLogprobs = fields.topLogprobs;\n this.user = fields.user;\n this.transforms = fields.transforms;\n this.models = fields.models;\n this.route = fields.route;\n this.provider = fields.provider;\n this.plugins = fields.plugins;\n this.siteUrl = fields.siteUrl;\n this.siteName = fields.siteName;\n this.modelKwargs = fields.modelKwargs;\n this.streamUsage = fields.streamUsage ?? true;\n }\n\n _llmType(): string {\n return \"openrouter\";\n }\n\n /** Static capability profile (context size, tool support, etc.) for the current model. */\n get profile(): ModelProfile {\n return PROFILES[this.model] ?? {};\n }\n\n /** Builds auth + content-type headers, plus optional site attribution headers. */\n private buildHeaders(): Record<string, string> {\n return {\n Authorization: `Bearer ${this.apiKey}`,\n \"Content-Type\": \"application/json\",\n ...(this.siteUrl ? { \"HTTP-Referer\": this.siteUrl } : {}),\n ...(this.siteName ? { \"X-Title\": this.siteName } : {}),\n };\n }\n\n /** Returns the full chat-completions endpoint URL. */\n private buildUrl(): string {\n return `${this.baseURL}/chat/completions`;\n }\n\n /**\n * Merges constructor-level defaults with per-call overrides into the\n * API request body (everything except `messages`, which is added later).\n */\n override invocationParams(\n options: this[\"ParsedCallOptions\"]\n ): Omit<OpenRouterRequestBody, \"messages\"> {\n const tools = options.tools\n ? convertToolsToOpenRouter(options.tools, { strict: options.strict })\n : undefined;\n const toolChoice = formatToolChoice(options.tool_choice);\n\n return {\n model: this.model,\n temperature: options.temperature ?? this.temperature,\n max_tokens: options.maxTokens ?? this.maxTokens,\n top_p: options.topP ?? this.topP,\n top_k: options.topK ?? this.topK,\n frequency_penalty: options.frequencyPenalty ?? this.frequencyPenalty,\n presence_penalty: options.presencePenalty ?? this.presencePenalty,\n repetition_penalty: options.repetitionPenalty ?? this.repetitionPenalty,\n min_p: options.minP ?? this.minP,\n top_a: options.topA ?? this.topA,\n seed: options.seed ?? this.seed,\n stop: options.stop ?? this.stop,\n logit_bias: options.logitBias ?? this.logitBias,\n top_logprobs: options.topLogprobs ?? this.topLogprobs,\n user: options.user ?? this.user,\n tools,\n tool_choice: toolChoice,\n response_format: options.response_format,\n ...(options.prediction ? { prediction: options.prediction } : {}),\n transforms: options.transforms ?? this.transforms,\n models: options.models ?? this.models,\n route: options.route ?? this.route,\n provider: options.provider ?? this.provider,\n plugins: options.plugins ?? this.plugins,\n ...this.modelKwargs,\n };\n }\n\n /** Returns metadata for LangSmith tracing (provider, model name, temperature, etc.). */\n override getLsParams(options: this[\"ParsedCallOptions\"]): LangSmithParams {\n const params = this.invocationParams(options);\n return {\n ls_provider: \"openrouter\",\n ls_model_name: this.model,\n ls_model_type: \"chat\",\n ls_temperature: params.temperature ?? undefined,\n ls_max_tokens: params.max_tokens ?? undefined,\n ls_stop: options.stop,\n };\n }\n\n /**\n * Non-streaming generation. Sends a single request and returns the\n * complete response with the generated message and token usage.\n */\n async _generate(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): Promise<ChatResult> {\n const body: OpenRouterRequestBody = {\n ...this.invocationParams(options),\n messages: convertMessagesToOpenRouterParams(messages, this.model),\n stream: false,\n };\n\n const response = await fetch(this.buildUrl(), {\n method: \"POST\",\n headers: this.buildHeaders(),\n body: JSON.stringify(body),\n signal: options.signal,\n });\n\n if (!response.ok) {\n throw await OpenRouterError.fromResponse(response);\n }\n\n const data: OpenRouter.ChatResponse = await response.json();\n const choice = data.choices[0];\n\n if (!choice) {\n throw new OpenRouterError(\"No choices returned in response.\");\n }\n\n const message = convertOpenRouterResponseToBaseMessage(choice, data);\n if (AIMessage.isInstance(message)) {\n message.usage_metadata = convertUsageMetadata(data.usage);\n }\n\n const text = typeof message.content === \"string\" ? message.content : \"\";\n\n await runManager?.handleLLMNewToken(text);\n\n return {\n generations: [\n {\n text,\n message,\n generationInfo: {\n finish_reason: choice.finish_reason,\n },\n },\n ],\n llmOutput: { tokenUsage: data.usage },\n };\n }\n\n /**\n * Streaming generation. Opens an SSE connection and yields one\n * `ChatGenerationChunk` per delta received from the API. The stream\n * pipeline is: raw bytes -> text -> SSE events -> JSON-parsed deltas.\n */\n async *_streamResponseChunks(\n messages: BaseMessage[],\n options: this[\"ParsedCallOptions\"],\n runManager?: CallbackManagerForLLMRun\n ): AsyncGenerator<ChatGenerationChunk> {\n const body: OpenRouterRequestBody = {\n ...this.invocationParams(options),\n messages: convertMessagesToOpenRouterParams(messages, this.model),\n stream: true,\n };\n\n const response = await fetch(this.buildUrl(), {\n method: \"POST\",\n headers: this.buildHeaders(),\n body: JSON.stringify(body),\n signal: options.signal,\n });\n\n if (!response.ok) {\n throw await OpenRouterError.fromResponse(response);\n }\n\n if (!response.body) {\n return;\n }\n\n const stream = response.body\n .pipeThrough(new TextDecoderStream())\n .pipeThrough(new EventSourceParserStream())\n .pipeThrough(new OpenRouterJsonParseStream());\n\n const reader = stream.getReader();\n let defaultRole: string | undefined;\n\n try {\n while (true) {\n const { done, value: data } = await reader.read();\n if (done) break;\n if (!data) continue;\n\n const choice = data.choices?.[0];\n if (!choice?.delta) continue;\n\n const chunk = convertOpenRouterDeltaToBaseMessageChunk(\n choice.delta,\n data,\n defaultRole\n );\n defaultRole = choice.delta.role ?? defaultRole;\n\n if (\n data.usage &&\n this.streamUsage &&\n AIMessageChunk.isInstance(chunk)\n ) {\n chunk.usage_metadata = convertUsageMetadata(data.usage);\n }\n\n const text = typeof chunk.content === \"string\" ? chunk.content : \"\";\n\n const generationChunk = new ChatGenerationChunk({\n message: chunk,\n text,\n generationInfo: {\n ...(choice.finish_reason\n ? { finish_reason: choice.finish_reason }\n : {}),\n },\n });\n\n yield generationChunk;\n await runManager?.handleLLMNewToken(text);\n }\n } finally {\n reader.releaseLock();\n }\n }\n\n /**\n * Returns a new Runnable with the given tools bound into every call.\n * Equivalent to `.withConfig({ tools, ...kwargs })`.\n */\n override bindTools(\n tools: BindToolsInput[],\n kwargs?: Partial<ChatOpenRouterCallOptions>\n ): Runnable<\n BaseLanguageModelInput,\n AIMessageChunk,\n ChatOpenRouterCallOptions\n > {\n return this.withConfig({\n ...kwargs,\n tools,\n } as Partial<ChatOpenRouterCallOptions>);\n }\n\n /**\n * Returns a Runnable that forces the model to produce output conforming\n * to `outputSchema` (a Zod schema or plain JSON Schema object).\n *\n * The extraction strategy (JSON Schema response format, function calling,\n * or JSON mode) is chosen automatically based on model capabilities —\n * see {@link resolveOpenRouterStructuredOutputMethod}. You can override\n * this via `config.method`.\n *\n * When `config.includeRaw` is `true` the returned object contains both\n * the raw `BaseMessage` and the parsed output, with a fallback that\n * sets `parsed: null` if the parser throws.\n */\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<false>\n ): Runnable<BaseLanguageModelInput, RunOutput>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<true>\n ): Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput extends Record<string, any> = Record<string, any>\n >(\n outputSchema:\n | InteropZodType<RunOutput>\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | Record<string, any>,\n config?: StructuredOutputMethodOptions<boolean>\n ):\n | Runnable<BaseLanguageModelInput, RunOutput>\n | Runnable<BaseLanguageModelInput, { raw: BaseMessage; parsed: RunOutput }>;\n\n withStructuredOutput<\n RunOutput extends Record<string, unknown> = Record<string, unknown>\n >(\n outputSchema: InteropZodType<RunOutput> | Record<string, unknown>,\n config?: StructuredOutputMethodOptions<boolean>\n ) {\n let llm: Runnable<BaseLanguageModelInput>;\n let outputParser: Runnable<AIMessageChunk, RunOutput>;\n\n const { schema, name, includeRaw } = {\n ...config,\n schema: outputSchema,\n };\n\n const method = resolveOpenRouterStructuredOutputMethod({\n model: this.model,\n method: config?.method,\n profile: this.profile,\n models: this.models,\n route: this.route,\n });\n\n const asJsonSchema = toJsonSchema(schema);\n\n if (method === \"jsonSchema\") {\n const schemaName = name ?? \"extract\";\n llm = this.withConfig({\n response_format: {\n type: \"json_schema\",\n json_schema: {\n name: schemaName,\n description: getSchemaDescription(schema),\n schema: asJsonSchema,\n strict: config?.strict,\n },\n },\n ls_structured_output_format: {\n kwargs: { method: \"json_schema\" },\n schema: {\n title: schemaName,\n description: getSchemaDescription(schema),\n ...asJsonSchema,\n },\n },\n } as Partial<ChatOpenRouterCallOptions>);\n\n outputParser = isInteropZodSchema(schema)\n ? StructuredOutputParser.fromZodSchema(schema)\n : new JsonOutputParser<RunOutput>();\n } else if (method === \"jsonMode\") {\n llm = this.withConfig({\n response_format: { type: \"json_object\" },\n ls_structured_output_format: {\n kwargs: { method: \"json_mode\" },\n schema: { title: name ?? \"extract\", ...asJsonSchema },\n },\n } as Partial<ChatOpenRouterCallOptions>);\n\n outputParser = isInteropZodSchema(schema)\n ? StructuredOutputParser.fromZodSchema(schema)\n : new JsonOutputParser<RunOutput>();\n } else {\n let functionName = name ?? \"extract\";\n if (\"name\" in (schema as Record<string, unknown>)) {\n functionName = (schema as Record<string, unknown>).name as string;\n }\n\n llm = this.withConfig({\n tools: [\n {\n type: \"function\" as const,\n function: {\n name: functionName,\n description: getSchemaDescription(schema) ?? \"\",\n parameters: asJsonSchema,\n },\n },\n ],\n tool_choice: {\n type: \"function\" as const,\n function: { name: functionName },\n },\n ls_structured_output_format: {\n kwargs: { method: \"function_calling\" },\n schema: { title: functionName, ...asJsonSchema },\n },\n ...(config?.strict !== undefined ? { strict: config.strict } : {}),\n } as Partial<ChatOpenRouterCallOptions>);\n\n outputParser = isInteropZodSchema(schema)\n ? new JsonOutputKeyToolsParser({\n returnSingle: true,\n keyName: functionName,\n zodSchema: schema,\n })\n : new JsonOutputKeyToolsParser<RunOutput>({\n returnSingle: true,\n keyName: functionName,\n });\n }\n\n if (!includeRaw) {\n return llm.pipe(outputParser).withConfig({\n runName: \"ChatOpenRouterStructuredOutput\",\n }) as Runnable<BaseLanguageModelInput, RunOutput>;\n }\n\n const parserAssign = RunnablePassthrough.assign({\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n parsed: (input: any, config) => outputParser.invoke(input.raw, config),\n });\n const parserNone = RunnablePassthrough.assign({\n parsed: () => null,\n });\n const parsedWithFallback = parserAssign.withFallbacks({\n fallbacks: [parserNone],\n });\n return RunnableSequence.from<\n BaseLanguageModelInput,\n { raw: BaseMessage; parsed: RunOutput }\n >([{ raw: llm }, parsedWithFallback]).withConfig({\n runName: \"ChatOpenRouterStructuredOutput\",\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AA6EA,MAAM,mBAAmB;;;;;;;;;AAUzB,IAAa,iBAAb,cAAoCA,0DAGlC;CACA,OAAO,UAAU;AACf,SAAO;;CAGT,kBAAkB;;CAGlB,IAAI,aAAoD;AACtD,SAAO,EACL,QAAQ,sBACT;;;CAIH,IAAI,aAAqC;AACvC,SAAO,EACL,WAAW,SACZ;;;CAIH,IAAI,WAAqB;AACvB,SAAO;GACL,GAAG,MAAM;GACT;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD;;;CAIH;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;;CAGA;CAEA,YAAY,QAA8B;AACxC,QAAM,OAAO;EACb,MAAM,SACJ,OAAO,gEAAiC,qBAAqB;AAC/D,MAAI,CAAC,OACH,OAAM,IAAIC,mCACR,8JACD;AAEH,OAAK,SAAS;AACd,MAAI,CAAC,OAAO,MACV,OAAM,IAAI,MACR,4EACD;AAEH,OAAK,QAAQ,OAAO;AACpB,OAAK,UAAU,OAAO,WAAW;AACjC,OAAK,cAAc,OAAO;AAC1B,OAAK,YAAY,OAAO;AACxB,OAAK,OAAO,OAAO;AACnB,OAAK,OAAO,OAAO;AACnB,OAAK,mBAAmB,OAAO;AAC/B,OAAK,kBAAkB,OAAO;AAC9B,OAAK,oBAAoB,OAAO;AAChC,OAAK,OAAO,OAAO;AACnB,OAAK,OAAO,OAAO;AACnB,OAAK,OAAO,OAAO;AACnB,OAAK,OAAO,OAAO;AACnB,OAAK,YAAY,OAAO;AACxB,OAAK,cAAc,OAAO;AAC1B,OAAK,OAAO,OAAO;AACnB,OAAK,aAAa,OAAO;AACzB,OAAK,SAAS,OAAO;AACrB,OAAK,QAAQ,OAAO;AACpB,OAAK,WAAW,OAAO;AACvB,OAAK,UAAU,OAAO;AACtB,OAAK,UAAU,OAAO;AACtB,OAAK,WAAW,OAAO;AACvB,OAAK,cAAc,OAAO;AAC1B,OAAK,cAAc,OAAO,eAAe;;CAG3C,WAAmB;AACjB,SAAO;;;CAIT,IAAI,UAAwB;AAC1B,SAAOC,yBAAS,KAAK,UAAU,EAAE;;;CAInC,AAAQ,eAAuC;AAC7C,SAAO;GACL,eAAe,UAAU,KAAK;GAC9B,gBAAgB;GAChB,GAAI,KAAK,UAAU,EAAE,gBAAgB,KAAK,SAAS,GAAG,EAAE;GACxD,GAAI,KAAK,WAAW,EAAE,WAAW,KAAK,UAAU,GAAG,EAAE;GACtD;;;CAIH,AAAQ,WAAmB;AACzB,SAAO,GAAG,KAAK,QAAQ;;;;;;CAOzB,AAAS,iBACP,SACyC;EACzC,MAAM,QAAQ,QAAQ,QAClBC,uCAAyB,QAAQ,OAAO,EAAE,QAAQ,QAAQ,QAAQ,CAAC,GACnE;EACJ,MAAM,aAAaC,+BAAiB,QAAQ,YAAY;AAExD,SAAO;GACL,OAAO,KAAK;GACZ,aAAa,QAAQ,eAAe,KAAK;GACzC,YAAY,QAAQ,aAAa,KAAK;GACtC,OAAO,QAAQ,QAAQ,KAAK;GAC5B,OAAO,QAAQ,QAAQ,KAAK;GAC5B,mBAAmB,QAAQ,oBAAoB,KAAK;GACpD,kBAAkB,QAAQ,mBAAmB,KAAK;GAClD,oBAAoB,QAAQ,qBAAqB,KAAK;GACtD,OAAO,QAAQ,QAAQ,KAAK;GAC5B,OAAO,QAAQ,QAAQ,KAAK;GAC5B,MAAM,QAAQ,QAAQ,KAAK;GAC3B,MAAM,QAAQ,QAAQ,KAAK;GAC3B,YAAY,QAAQ,aAAa,KAAK;GACtC,cAAc,QAAQ,eAAe,KAAK;GAC1C,MAAM,QAAQ,QAAQ,KAAK;GAC3B;GACA,aAAa;GACb,iBAAiB,QAAQ;GACzB,GAAI,QAAQ,aAAa,EAAE,YAAY,QAAQ,YAAY,GAAG,EAAE;GAChE,YAAY,QAAQ,cAAc,KAAK;GACvC,QAAQ,QAAQ,UAAU,KAAK;GAC/B,OAAO,QAAQ,SAAS,KAAK;GAC7B,UAAU,QAAQ,YAAY,KAAK;GACnC,SAAS,QAAQ,WAAW,KAAK;GACjC,GAAG,KAAK;GACT;;;CAIH,AAAS,YAAY,SAAqD;EACxE,MAAM,SAAS,KAAK,iBAAiB,QAAQ;AAC7C,SAAO;GACL,aAAa;GACb,eAAe,KAAK;GACpB,eAAe;GACf,gBAAgB,OAAO,eAAe;GACtC,eAAe,OAAO,cAAc;GACpC,SAAS,QAAQ;GAClB;;;;;;CAOH,MAAM,UACJ,UACA,SACA,YACqB;EACrB,MAAM,OAA8B;GAClC,GAAG,KAAK,iBAAiB,QAAQ;GACjC,UAAUC,mDAAkC,UAAU,KAAK,MAAM;GACjE,QAAQ;GACT;EAED,MAAM,WAAW,MAAM,MAAM,KAAK,UAAU,EAAE;GAC5C,QAAQ;GACR,SAAS,KAAK,cAAc;GAC5B,MAAM,KAAK,UAAU,KAAK;GAC1B,QAAQ,QAAQ;GACjB,CAAC;AAEF,MAAI,CAAC,SAAS,GACZ,OAAM,MAAMC,+BAAgB,aAAa,SAAS;EAGpD,MAAM,OAAgC,MAAM,SAAS,MAAM;EAC3D,MAAM,SAAS,KAAK,QAAQ;AAE5B,MAAI,CAAC,OACH,OAAM,IAAIA,+BAAgB,mCAAmC;EAG/D,MAAM,UAAUC,wDAAuC,QAAQ,KAAK;AACpE,MAAIC,mCAAU,WAAW,QAAQ,CAC/B,SAAQ,iBAAiBC,sCAAqB,KAAK,MAAM;EAG3D,MAAM,OAAO,OAAO,QAAQ,YAAY,WAAW,QAAQ,UAAU;AAErE,QAAM,YAAY,kBAAkB,KAAK;AAEzC,SAAO;GACL,aAAa,CACX;IACE;IACA;IACA,gBAAgB,EACd,eAAe,OAAO,eACvB;IACF,CACF;GACD,WAAW,EAAE,YAAY,KAAK,OAAO;GACtC;;;;;;;CAQH,OAAO,sBACL,UACA,SACA,YACqC;EACrC,MAAM,OAA8B;GAClC,GAAG,KAAK,iBAAiB,QAAQ;GACjC,UAAUJ,mDAAkC,UAAU,KAAK,MAAM;GACjE,QAAQ;GACT;EAED,MAAM,WAAW,MAAM,MAAM,KAAK,UAAU,EAAE;GAC5C,QAAQ;GACR,SAAS,KAAK,cAAc;GAC5B,MAAM,KAAK,UAAU,KAAK;GAC1B,QAAQ,QAAQ;GACjB,CAAC;AAEF,MAAI,CAAC,SAAS,GACZ,OAAM,MAAMC,+BAAgB,aAAa,SAAS;AAGpD,MAAI,CAAC,SAAS,KACZ;EAQF,MAAM,SALS,SAAS,KACrB,YAAY,IAAI,mBAAmB,CAAC,CACpC,YAAY,IAAII,mDAAyB,CAAC,CAC1C,YAAY,IAAIC,0CAA2B,CAAC,CAEzB,WAAW;EACjC,IAAI;AAEJ,MAAI;AACF,UAAO,MAAM;IACX,MAAM,EAAE,MAAM,OAAO,SAAS,MAAM,OAAO,MAAM;AACjD,QAAI,KAAM;AACV,QAAI,CAAC,KAAM;IAEX,MAAM,SAAS,KAAK,UAAU;AAC9B,QAAI,CAAC,QAAQ,MAAO;IAEpB,MAAM,QAAQC,0DACZ,OAAO,OACP,MACA,YACD;AACD,kBAAc,OAAO,MAAM,QAAQ;AAEnC,QACE,KAAK,SACL,KAAK,eACLC,wCAAe,WAAW,MAAM,CAEhC,OAAM,iBAAiBJ,sCAAqB,KAAK,MAAM;IAGzD,MAAM,OAAO,OAAO,MAAM,YAAY,WAAW,MAAM,UAAU;AAYjE,UAVwB,IAAIK,4CAAoB;KAC9C,SAAS;KACT;KACA,gBAAgB,EACd,GAAI,OAAO,gBACP,EAAE,eAAe,OAAO,eAAe,GACvC,EAAE,EACP;KACF,CAAC;AAGF,UAAM,YAAY,kBAAkB,KAAK;;YAEnC;AACR,UAAO,aAAa;;;;;;;CAQxB,AAAS,UACP,OACA,QAKA;AACA,SAAO,KAAK,WAAW;GACrB,GAAG;GACH;GACD,CAAuC;;CAmD1C,qBAGE,cACA,QACA;EACA,IAAI;EACJ,IAAI;EAEJ,MAAM,EAAE,QAAQ,MAAM,eAAe;GACnC,GAAG;GACH,QAAQ;GACT;EAED,MAAM,SAASC,kEAAwC;GACrD,OAAO,KAAK;GACZ,QAAQ,QAAQ;GAChB,SAAS,KAAK;GACd,QAAQ,KAAK;GACb,OAAO,KAAK;GACb,CAAC;EAEF,MAAM,mEAA4B,OAAO;AAEzC,MAAI,WAAW,cAAc;GAC3B,MAAM,aAAa,QAAQ;AAC3B,SAAM,KAAK,WAAW;IACpB,iBAAiB;KACf,MAAM;KACN,aAAa;MACX,MAAM;MACN,mEAAkC,OAAO;MACzC,QAAQ;MACR,QAAQ,QAAQ;MACjB;KACF;IACD,6BAA6B;KAC3B,QAAQ,EAAE,QAAQ,eAAe;KACjC,QAAQ;MACN,OAAO;MACP,mEAAkC,OAAO;MACzC,GAAG;MACJ;KACF;IACF,CAAuC;AAExC,sEAAkC,OAAO,GACrCC,sDAAuB,cAAc,OAAO,GAC5C,IAAIC,iDAA6B;aAC5B,WAAW,YAAY;AAChC,SAAM,KAAK,WAAW;IACpB,iBAAiB,EAAE,MAAM,eAAe;IACxC,6BAA6B;KAC3B,QAAQ,EAAE,QAAQ,aAAa;KAC/B,QAAQ;MAAE,OAAO,QAAQ;MAAW,GAAG;MAAc;KACtD;IACF,CAAuC;AAExC,sEAAkC,OAAO,GACrCD,sDAAuB,cAAc,OAAO,GAC5C,IAAIC,iDAA6B;SAChC;GACL,IAAI,eAAe,QAAQ;AAC3B,OAAI,UAAW,OACb,gBAAgB,OAAmC;AAGrD,SAAM,KAAK,WAAW;IACpB,OAAO,CACL;KACE,MAAM;KACN,UAAU;MACR,MAAM;MACN,mEAAkC,OAAO,IAAI;MAC7C,YAAY;MACb;KACF,CACF;IACD,aAAa;KACX,MAAM;KACN,UAAU,EAAE,MAAM,cAAc;KACjC;IACD,6BAA6B;KAC3B,QAAQ,EAAE,QAAQ,oBAAoB;KACtC,QAAQ;MAAE,OAAO;MAAc,GAAG;MAAc;KACjD;IACD,GAAI,QAAQ,WAAW,SAAY,EAAE,QAAQ,OAAO,QAAQ,GAAG,EAAE;IAClE,CAAuC;AAExC,sEAAkC,OAAO,GACrC,IAAIC,qEAAyB;IAC3B,cAAc;IACd,SAAS;IACT,WAAW;IACZ,CAAC,GACF,IAAIA,qEAAoC;IACtC,cAAc;IACd,SAAS;IACV,CAAC;;AAGR,MAAI,CAAC,WACH,QAAO,IAAI,KAAK,aAAa,CAAC,WAAW,EACvC,SAAS,kCACV,CAAC;EAGJ,MAAM,eAAeC,8CAAoB,OAAO,EAE9C,SAAS,OAAY,WAAW,aAAa,OAAO,MAAM,KAAK,OAAO,EACvE,CAAC;EACF,MAAM,aAAaA,8CAAoB,OAAO,EAC5C,cAAc,MACf,CAAC;EACF,MAAM,qBAAqB,aAAa,cAAc,EACpD,WAAW,CAAC,WAAW,EACxB,CAAC;AACF,SAAOC,2CAAiB,KAGtB,CAAC,EAAE,KAAK,KAAK,EAAE,mBAAmB,CAAC,CAAC,WAAW,EAC/C,SAAS,kCACV,CAAC"}
@@ -0,0 +1,160 @@
1
+ import { OpenRouter } from "../api-types.cjs";
2
+ import { ChatOpenRouterCallOptions, ChatOpenRouterParams } from "./types.cjs";
3
+ import { BaseChatModel, BindToolsInput, LangSmithParams } from "@langchain/core/language_models/chat_models";
4
+ import { ModelProfile } from "@langchain/core/language_models/profile";
5
+ import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
6
+ import { AIMessageChunk, BaseMessage } from "@langchain/core/messages";
7
+ import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
8
+ import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
9
+ import { Runnable } from "@langchain/core/runnables";
10
+ import { InteropZodType } from "@langchain/core/utils/types";
11
+ import { OpenAI } from "openai";
12
+
13
+ //#region src/chat_models/index.d.ts
14
+ /**
15
+ * Full request body sent to the OpenRouter `/chat/completions` endpoint.
16
+ *
17
+ * Extends the base generation params with OpenRouter-specific sampling
18
+ * knobs (`top_k`, `min_p`, etc.) and features (`prediction`, `transforms`)
19
+ * that aren't part of the standard OpenAI spec.
20
+ */
21
+ type OpenRouterRequestBody = Omit<OpenRouter.ChatGenerationParams, "messages"> & {
22
+ messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[];
23
+ top_k?: number | null;
24
+ repetition_penalty?: number | null;
25
+ min_p?: number | null;
26
+ top_a?: number | null;
27
+ prediction?: {
28
+ type: "content";
29
+ content: string;
30
+ };
31
+ transforms?: string[];
32
+ };
33
+ /**
34
+ * OpenRouter chat model integration.
35
+ *
36
+ * Talks directly to the OpenRouter REST API via `fetch` (no SDK dependency)
37
+ * and supports tool calling, structured output, and streaming. Any model
38
+ * available on OpenRouter can be used by passing its identifier (e.g.
39
+ * `"anthropic/claude-4-sonnet"`) as the `model` param.
40
+ */
41
+ declare class ChatOpenRouter extends BaseChatModel<ChatOpenRouterCallOptions, AIMessageChunk> {
42
+ static lc_name(): string;
43
+ lc_serializable: boolean;
44
+ /** Maps secret fields to the environment variable they can be loaded from. */
45
+ get lc_secrets(): {
46
+ [key: string]: string;
47
+ } | undefined;
48
+ /** Allows serialized JSON to use `modelName` as an alias for `model`. */
49
+ get lc_aliases(): Record<string, string>;
50
+ /** Fields that may be overridden per-call via `.bind()` / `.withConfig()`. */
51
+ get callKeys(): string[];
52
+ /** Model identifier, e.g. `"anthropic/claude-4-sonnet"`. */
53
+ model: string;
54
+ /** OpenRouter API key. Falls back to the `OPENROUTER_API_KEY` env var. */
55
+ apiKey: string;
56
+ /** Base URL for the API. Defaults to `"https://openrouter.ai/api/v1"`. */
57
+ baseURL: string;
58
+ /** Sampling temperature (0–2). */
59
+ temperature?: number;
60
+ /** Maximum number of tokens to generate. */
61
+ maxTokens?: number;
62
+ /** Nucleus sampling cutoff probability. */
63
+ topP?: number;
64
+ /** Top-K sampling: only consider the K most likely tokens. */
65
+ topK?: number;
66
+ /** Additive penalty based on how often a token has appeared so far (−2 to 2). */
67
+ frequencyPenalty?: number;
68
+ /** Additive penalty based on whether a token has appeared at all (−2 to 2). */
69
+ presencePenalty?: number;
70
+ /** Multiplicative penalty applied to repeated token logits (0 to 2). */
71
+ repetitionPenalty?: number;
72
+ /** Minimum probability threshold for token sampling. */
73
+ minP?: number;
74
+ /** Top-A sampling threshold. */
75
+ topA?: number;
76
+ /** Random seed for deterministic generation. */
77
+ seed?: number;
78
+ /** Stop sequences that halt generation. */
79
+ stop?: string[];
80
+ /** Token-level biases to apply during sampling. */
81
+ logitBias?: Record<string, number>;
82
+ /** Number of most-likely log-probabilities to return per token. */
83
+ topLogprobs?: number;
84
+ /** Stable identifier for end-users, used for abuse detection. */
85
+ user?: string;
86
+ /** OpenRouter-specific transformations to apply to the request. */
87
+ transforms?: string[];
88
+ /** OpenRouter-specific list of models for routing. */
89
+ models?: string[];
90
+ /** OpenRouter-specific routing strategy. */
91
+ route?: "fallback";
92
+ /** OpenRouter-specific provider preferences and ordering. */
93
+ provider?: OpenRouter.ProviderPreferences;
94
+ /** OpenRouter plugins to enable (e.g. web search). */
95
+ plugins?: ChatOpenRouterParams["plugins"];
96
+ /** Your site URL — used for rankings on openrouter.ai. */
97
+ siteUrl?: string;
98
+ /** Your site/app name — used for rankings on openrouter.ai. */
99
+ siteName?: string;
100
+ /** Extra params passed through to the API body. */
101
+ modelKwargs?: Record<string, unknown>;
102
+ /** Whether to include token usage in streaming chunks. Defaults to `true`. */
103
+ streamUsage: boolean;
104
+ constructor(fields: ChatOpenRouterParams);
105
+ _llmType(): string;
106
+ /** Static capability profile (context size, tool support, etc.) for the current model. */
107
+ get profile(): ModelProfile;
108
+ /** Builds auth + content-type headers, plus optional site attribution headers. */
109
+ private buildHeaders;
110
+ /** Returns the full chat-completions endpoint URL. */
111
+ private buildUrl;
112
+ /**
113
+ * Merges constructor-level defaults with per-call overrides into the
114
+ * API request body (everything except `messages`, which is added later).
115
+ */
116
+ invocationParams(options: this["ParsedCallOptions"]): Omit<OpenRouterRequestBody, "messages">;
117
+ /** Returns metadata for LangSmith tracing (provider, model name, temperature, etc.). */
118
+ getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
119
+ /**
120
+ * Non-streaming generation. Sends a single request and returns the
121
+ * complete response with the generated message and token usage.
122
+ */
123
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
124
+ /**
125
+ * Streaming generation. Opens an SSE connection and yields one
126
+ * `ChatGenerationChunk` per delta received from the API. The stream
127
+ * pipeline is: raw bytes -> text -> SSE events -> JSON-parsed deltas.
128
+ */
129
+ _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
130
+ /**
131
+ * Returns a new Runnable with the given tools bound into every call.
132
+ * Equivalent to `.withConfig({ tools, ...kwargs })`.
133
+ */
134
+ bindTools(tools: BindToolsInput[], kwargs?: Partial<ChatOpenRouterCallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, ChatOpenRouterCallOptions>;
135
+ /**
136
+ * Returns a Runnable that forces the model to produce output conforming
137
+ * to `outputSchema` (a Zod schema or plain JSON Schema object).
138
+ *
139
+ * The extraction strategy (JSON Schema response format, function calling,
140
+ * or JSON mode) is chosen automatically based on model capabilities —
141
+ * see {@link resolveOpenRouterStructuredOutputMethod}. You can override
142
+ * this via `config.method`.
143
+ *
144
+ * When `config.includeRaw` is `true` the returned object contains both
145
+ * the raw `BaseMessage` and the parsed output, with a fallback that
146
+ * sets `parsed: null` if the parser throws.
147
+ */
148
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
149
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
150
+ raw: BaseMessage;
151
+ parsed: RunOutput;
152
+ }>;
153
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: InteropZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<boolean>): Runnable<BaseLanguageModelInput, RunOutput> | Runnable<BaseLanguageModelInput, {
154
+ raw: BaseMessage;
155
+ parsed: RunOutput;
156
+ }>;
157
+ }
158
+ //#endregion
159
+ export { ChatOpenRouter };
160
+ //# sourceMappingURL=index.d.cts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.cts","names":[],"sources":["../../src/chat_models/index.ts"],"mappings":";;;;;;;;;;;;;;;AAyCkD;;;;;KAS7C,qBAAA,GAAwB,IAAA,CAC3B,UAAA,CAAW,oBAAA;EAGX,QAAA,EAAU,MAAA,CAAa,IAAA,CAAK,WAAA,CAAY,0BAAA;EACxC,KAAA;EACA,kBAAA;EACA,KAAA;EACA,KAAA;EACA,UAAA;IAAe,IAAA;IAAiB,OAAA;EAAA;EAChC,UAAA;AAAA;;;;;;;;;cA2BW,cAAA,SAAuB,aAAA,CAClC,yBAAA,EACA,cAAA;EAAA,OAEO,OAAA,CAAA;EAIP,eAAA;EAR0B;EAAA,IAWtB,UAAA,CAAA;IAAA,CAAiB,GAAA;EAAA;EAuFT;EAAA,IAhFR,UAAA,CAAA,GAAc,MAAA;EAqGR;EAAA,IA9FN,QAAA,CAAA;EA4GgB;EA7EpB,KAAA;EAqJQ;EAlJR,MAAA;EAsL0D;EAnL1D,OAAA;EAsMe;EAnMf,WAAA;EAoMG;EAjMH,SAAA;EAyPe;EAtPf,IAAA;EAuPG;EApPH,IAAA;EAoUmB;EAjUnB,gBAAA;EAmUE;EAhUF,eAAA;EAkUE;EA/TF,iBAAA;EAsVoB;EAnVpB,IAAA;EAsVqB;EAnVrB,IAAA;EAqVM;EAlVN,IAAA;EAoVY;EAjVZ,IAAA;EAiVG;EA9UH,SAAA,GAAY,MAAA;EAkV8B;EA/U1C,WAAA;EAkVM;EA/UN,IAAA;EAkVW;EA/UX,UAAA;EAgV2C;EA7U3C,MAAA;EA6UG;EA1UH,KAAA;EA8U0C;EA3U1C,QAAA,GAAW,UAAA,CAAW,mBAAA;EA8UhB;EA3UN,OAAA,GAAU,oBAAA;EA8UC;EA3UX,OAAA;EA6UqC;EA1UrC,QAAA;EA2Ua;EAxUb,WAAA,GAAc,MAAA;EAwUmD;EArUjE,WAAA;EAEA,WAAA,CAAY,MAAA,EAAQ,oBAAA;EA0CpB,QAAA,CAAA;EA/K+C;EAAA,IAoL3C,OAAA,CAAA,GAAW,YAAA;EAnLf;EAAA,QAwLQ,YAAA;EArLD;EAAA,QA+LC,QAAA;EAxLJ;;;;EAgMK,gBAAA,CACP,OAAA,8BACC,IAAA,CAAK,qBAAA;EArJR;EAyLS,WAAA,CAAY,OAAA,8BAAqC,eAAA;EAnL1D;;;;EAmMM,SAAA,CACJ,QAAA,EAAU,WAAA,IACV,OAAA,6BACA,UAAA,GAAa,wBAAA,GACZ,OAAA,CAAQ,UAAA;EAxLX;;;;;EA6OO,qBAAA,CACL,QAAA,EAAU,WAAA,IACV,OAAA,6BACA,UAAA,GAAa,wBAAA,GACZ,cAAA,CAAe,mBAAA;EA/NlB;;;;EA6SS,SAAA,CACP,KAAA,EAAO,cAAA,IACP,MAAA,GAAS,OAAA,CAAQ,yBAAA,IAChB,QAAA,CACD,sBAAA,EACA,cAAA,EACA,yBAAA;EAvSF;;;;;;;;;;;;;EA4TA,oBAAA,mBAEoB,MAAA,gBAAsB,MAAA,cAAA,CAExC,YAAA,EACI,cAAA,CAAe,SAAA,IAEf,MAAA,eACJ,MAAA,GAAS,6BAAA,UACR,QAAA,CAAS,sBAAA,EAAwB,SAAA;EAEpC,oBAAA,mBAEoB,MAAA,gBAAsB,MAAA,cAAA,CAExC,YAAA,EACI,cAAA,CAAe,SAAA,IAEf,MAAA,eACJ,MAAA,GAAS,6BAAA,SACR,QAAA,CAAS,sBAAA;IAA0B,GAAA,EAAK,WAAA;IAAa,MAAA,EAAQ,SAAA;EAAA;EAEhE,oBAAA,mBAEoB,MAAA,gBAAsB,MAAA,cAAA,CAExC,YAAA,EACI,cAAA,CAAe,SAAA,IAEf,MAAA,eACJ,MAAA,GAAS,6BAAA,YAEP,QAAA,CAAS,sBAAA,EAAwB,SAAA,IACjC,QAAA,CAAS,sBAAA;IAA0B,GAAA,EAAK,WAAA;IAAa,MAAA,EAAQ,SAAA;EAAA;AAAA"}