@langchain/openrouter 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +174 -0
- package/dist/api-types.d.cts +368 -0
- package/dist/api-types.d.cts.map +1 -0
- package/dist/api-types.d.ts +368 -0
- package/dist/api-types.d.ts.map +1 -0
- package/dist/chat_models/index.cjs +401 -0
- package/dist/chat_models/index.cjs.map +1 -0
- package/dist/chat_models/index.d.cts +160 -0
- package/dist/chat_models/index.d.cts.map +1 -0
- package/dist/chat_models/index.d.ts +160 -0
- package/dist/chat_models/index.d.ts.map +1 -0
- package/dist/chat_models/index.js +401 -0
- package/dist/chat_models/index.js.map +1 -0
- package/dist/chat_models/types.d.cts +97 -0
- package/dist/chat_models/types.d.cts.map +1 -0
- package/dist/chat_models/types.d.ts +97 -0
- package/dist/chat_models/types.d.ts.map +1 -0
- package/dist/converters/messages.cjs +90 -0
- package/dist/converters/messages.cjs.map +1 -0
- package/dist/converters/messages.js +87 -0
- package/dist/converters/messages.js.map +1 -0
- package/dist/converters/tools.cjs +30 -0
- package/dist/converters/tools.cjs.map +1 -0
- package/dist/converters/tools.js +29 -0
- package/dist/converters/tools.js.map +1 -0
- package/dist/index.cjs +10 -0
- package/dist/index.d.cts +6 -0
- package/dist/index.d.ts +6 -0
- package/dist/index.js +5 -0
- package/dist/profiles.cjs +2570 -0
- package/dist/profiles.cjs.map +1 -0
- package/dist/profiles.d.cts +7 -0
- package/dist/profiles.d.cts.map +1 -0
- package/dist/profiles.d.ts +7 -0
- package/dist/profiles.d.ts.map +1 -0
- package/dist/profiles.js +2569 -0
- package/dist/profiles.js.map +1 -0
- package/dist/utils/errors.cjs +82 -0
- package/dist/utils/errors.cjs.map +1 -0
- package/dist/utils/errors.d.cts +68 -0
- package/dist/utils/errors.d.cts.map +1 -0
- package/dist/utils/errors.d.ts +68 -0
- package/dist/utils/errors.d.ts.map +1 -0
- package/dist/utils/errors.js +80 -0
- package/dist/utils/errors.js.map +1 -0
- package/dist/utils/stream.cjs +28 -0
- package/dist/utils/stream.cjs.map +1 -0
- package/dist/utils/stream.js +27 -0
- package/dist/utils/stream.js.map +1 -0
- package/dist/utils/structured_output.cjs +44 -0
- package/dist/utils/structured_output.cjs.map +1 -0
- package/dist/utils/structured_output.js +43 -0
- package/dist/utils/structured_output.js.map +1 -0
- package/package.json +86 -0
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import { OpenRouter } from "../api-types.cjs";
|
|
2
|
+
import { BaseChatModelCallOptions, BaseChatModelParams, BindToolsInput } from "@langchain/core/language_models/chat_models";
|
|
3
|
+
|
|
4
|
+
//#region src/chat_models/types.d.ts
|
|
5
|
+
type OpenRouterResponseFormat = OpenRouter.ChatGenerationParams["response_format"];
|
|
6
|
+
/**
|
|
7
|
+
* Plugin configuration for OpenRouter plugins (e.g. web search).
|
|
8
|
+
*/
|
|
9
|
+
interface OpenRouterPlugin {
|
|
10
|
+
id: string;
|
|
11
|
+
[key: string]: any;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Shared fields that can be set at construction time or overridden per-call.
|
|
15
|
+
*/
|
|
16
|
+
interface ChatOpenRouterFields {
|
|
17
|
+
/** Sampling temperature (0–2). */
|
|
18
|
+
temperature?: number;
|
|
19
|
+
/** Maximum number of tokens to generate. */
|
|
20
|
+
maxTokens?: number;
|
|
21
|
+
/** Nucleus sampling cutoff probability. */
|
|
22
|
+
topP?: number;
|
|
23
|
+
/** Top-K sampling: only consider the K most likely tokens. */
|
|
24
|
+
topK?: number;
|
|
25
|
+
/** Additive penalty based on how often a token has appeared so far (−2 to 2). */
|
|
26
|
+
frequencyPenalty?: number;
|
|
27
|
+
/** Additive penalty based on whether a token has appeared at all (−2 to 2). */
|
|
28
|
+
presencePenalty?: number;
|
|
29
|
+
/** Multiplicative penalty applied to repeated token logits (0 to 2). */
|
|
30
|
+
repetitionPenalty?: number;
|
|
31
|
+
/** Minimum probability threshold for token sampling. */
|
|
32
|
+
minP?: number;
|
|
33
|
+
/** Top-A sampling threshold. */
|
|
34
|
+
topA?: number;
|
|
35
|
+
/** Random seed for deterministic generation. */
|
|
36
|
+
seed?: number;
|
|
37
|
+
/** Stop sequences that halt generation. */
|
|
38
|
+
stop?: string[];
|
|
39
|
+
/** Token-level biases to apply during sampling. */
|
|
40
|
+
logitBias?: Record<string, number>;
|
|
41
|
+
/** Number of most-likely log-probabilities to return per token. */
|
|
42
|
+
topLogprobs?: number;
|
|
43
|
+
/** OpenRouter-specific transformations to apply to the request. */
|
|
44
|
+
transforms?: string[];
|
|
45
|
+
/** OpenRouter-specific list of models for routing. */
|
|
46
|
+
models?: string[];
|
|
47
|
+
/** OpenRouter-specific routing strategy. */
|
|
48
|
+
route?: "fallback";
|
|
49
|
+
/** OpenRouter-specific provider preferences and ordering. */
|
|
50
|
+
provider?: OpenRouter.ProviderPreferences;
|
|
51
|
+
/** OpenRouter plugins to enable (e.g. web search). */
|
|
52
|
+
plugins?: OpenRouterPlugin[];
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Constructor parameters for `ChatOpenRouter`.
|
|
56
|
+
*/
|
|
57
|
+
interface ChatOpenRouterParams extends BaseChatModelParams, ChatOpenRouterFields {
|
|
58
|
+
/** Model identifier, e.g. "anthropic/claude-4-sonnet". */
|
|
59
|
+
model?: string;
|
|
60
|
+
/** OpenRouter API key. Falls back to `OPENROUTER_API_KEY` env var. */
|
|
61
|
+
apiKey?: string;
|
|
62
|
+
/** Base URL for the API. Defaults to "https://openrouter.ai/api/v1". */
|
|
63
|
+
baseURL?: string;
|
|
64
|
+
/** Your site URL — used for OpenRouter rankings / rate limits. */
|
|
65
|
+
siteUrl?: string;
|
|
66
|
+
/** Your site name — shown on the OpenRouter leaderboard. */
|
|
67
|
+
siteName?: string;
|
|
68
|
+
/** Stable identifier for end-users, used for abuse detection. */
|
|
69
|
+
user?: string;
|
|
70
|
+
/** Extra params passed through to the API body. */
|
|
71
|
+
modelKwargs?: Record<string, unknown>;
|
|
72
|
+
/** Whether to include usage in streaming chunks. Defaults to true. */
|
|
73
|
+
streamUsage?: boolean;
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Per-call options for `ChatOpenRouter`.
|
|
77
|
+
*/
|
|
78
|
+
interface ChatOpenRouterCallOptions extends BaseChatModelCallOptions, ChatOpenRouterFields {
|
|
79
|
+
/** Tool definitions to bind for this call. */
|
|
80
|
+
tools?: BindToolsInput[];
|
|
81
|
+
/** Response format constraint (text, JSON object, or JSON schema). */
|
|
82
|
+
response_format?: OpenRouterResponseFormat;
|
|
83
|
+
/** Whether tool schemas should use strict mode. */
|
|
84
|
+
strict?: boolean;
|
|
85
|
+
/** Predicted output content for latency optimization. */
|
|
86
|
+
prediction?: {
|
|
87
|
+
type: "content";
|
|
88
|
+
content: string;
|
|
89
|
+
};
|
|
90
|
+
/** Stable identifier for end-users, used for abuse detection. */
|
|
91
|
+
user?: string;
|
|
92
|
+
/** Abort signal to cancel the request. */
|
|
93
|
+
signal?: AbortSignal;
|
|
94
|
+
}
|
|
95
|
+
//#endregion
|
|
96
|
+
export { ChatOpenRouterCallOptions, ChatOpenRouterFields, ChatOpenRouterParams, OpenRouterPlugin, OpenRouterResponseFormat };
|
|
97
|
+
//# sourceMappingURL=types.d.cts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.cts","names":[],"sources":["../../src/chat_models/types.ts"],"mappings":";;;;KAOY,wBAAA,GACV,UAAA,CAAW,oBAAA;AADb;;;AAAA,UAMiB,gBAAA;EACf,EAAA;EAAA,CAEC,GAAA;AAAA;;;;UAMc,oBAAA;EAAoB;EAEnC,WAAA;EAsBY;EApBZ,SAAA;EAgCU;EA9BV,IAAA;EA8B0B;EA5B1B,IAAA;EAJA;EAMA,gBAAA;EAFA;EAIA,eAAA;EAAA;EAEA,iBAAA;EAEA;EAAA,IAAA;EAIA;EAFA,IAAA;EAMA;EAJA,IAAA;EAMA;EAJA,IAAA;EAQA;EANA,SAAA,GAAY,MAAA;EAUZ;EARA,WAAA;EAQsB;EANtB,UAAA;EAQU;EANV,MAAA;EAM0B;EAJ1B,KAAA;EAWA;EATA,QAAA,GAAW,UAAA,CAAW,mBAAA;EAwBR;EAtBd,OAAA,GAAU,gBAAA;AAAA;;;;UAMK,oBAAA,SACP,mBAAA,EACN,oBAAA;EAEF;EAAA,KAAA;EAIA;EAFA,MAAA;EAMA;EAJA,OAAA;EAQA;EANA,OAAA;EAQA;EANA,QAAA;EAMW;EAJX,IAAA;EAWA;EATA,WAAA,GAAc,MAAA;EAYN;EAVR,WAAA;AAAA;;;;UAMe,yBAAA,SACP,wBAAA,EACN,oBAAA;EADM;EAGR,KAAA,GAAQ,cAAA;EAAR;EAEA,eAAA,GAAkB,wBAAA;EAAlB;EAEA,MAAA;EAAA;EAEA,UAAA;IAAe,IAAA;IAAiB,OAAA;EAAA;EAIhC;EAFA,IAAA;EAEoB;EAApB,MAAA,GAAS,WAAA;AAAA"}
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
import { OpenRouter } from "../api-types.js";
|
|
2
|
+
import { BaseChatModelCallOptions, BaseChatModelParams, BindToolsInput } from "@langchain/core/language_models/chat_models";
|
|
3
|
+
|
|
4
|
+
//#region src/chat_models/types.d.ts
|
|
5
|
+
type OpenRouterResponseFormat = OpenRouter.ChatGenerationParams["response_format"];
|
|
6
|
+
/**
|
|
7
|
+
* Plugin configuration for OpenRouter plugins (e.g. web search).
|
|
8
|
+
*/
|
|
9
|
+
interface OpenRouterPlugin {
|
|
10
|
+
id: string;
|
|
11
|
+
[key: string]: any;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Shared fields that can be set at construction time or overridden per-call.
|
|
15
|
+
*/
|
|
16
|
+
interface ChatOpenRouterFields {
|
|
17
|
+
/** Sampling temperature (0–2). */
|
|
18
|
+
temperature?: number;
|
|
19
|
+
/** Maximum number of tokens to generate. */
|
|
20
|
+
maxTokens?: number;
|
|
21
|
+
/** Nucleus sampling cutoff probability. */
|
|
22
|
+
topP?: number;
|
|
23
|
+
/** Top-K sampling: only consider the K most likely tokens. */
|
|
24
|
+
topK?: number;
|
|
25
|
+
/** Additive penalty based on how often a token has appeared so far (−2 to 2). */
|
|
26
|
+
frequencyPenalty?: number;
|
|
27
|
+
/** Additive penalty based on whether a token has appeared at all (−2 to 2). */
|
|
28
|
+
presencePenalty?: number;
|
|
29
|
+
/** Multiplicative penalty applied to repeated token logits (0 to 2). */
|
|
30
|
+
repetitionPenalty?: number;
|
|
31
|
+
/** Minimum probability threshold for token sampling. */
|
|
32
|
+
minP?: number;
|
|
33
|
+
/** Top-A sampling threshold. */
|
|
34
|
+
topA?: number;
|
|
35
|
+
/** Random seed for deterministic generation. */
|
|
36
|
+
seed?: number;
|
|
37
|
+
/** Stop sequences that halt generation. */
|
|
38
|
+
stop?: string[];
|
|
39
|
+
/** Token-level biases to apply during sampling. */
|
|
40
|
+
logitBias?: Record<string, number>;
|
|
41
|
+
/** Number of most-likely log-probabilities to return per token. */
|
|
42
|
+
topLogprobs?: number;
|
|
43
|
+
/** OpenRouter-specific transformations to apply to the request. */
|
|
44
|
+
transforms?: string[];
|
|
45
|
+
/** OpenRouter-specific list of models for routing. */
|
|
46
|
+
models?: string[];
|
|
47
|
+
/** OpenRouter-specific routing strategy. */
|
|
48
|
+
route?: "fallback";
|
|
49
|
+
/** OpenRouter-specific provider preferences and ordering. */
|
|
50
|
+
provider?: OpenRouter.ProviderPreferences;
|
|
51
|
+
/** OpenRouter plugins to enable (e.g. web search). */
|
|
52
|
+
plugins?: OpenRouterPlugin[];
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Constructor parameters for `ChatOpenRouter`.
|
|
56
|
+
*/
|
|
57
|
+
interface ChatOpenRouterParams extends BaseChatModelParams, ChatOpenRouterFields {
|
|
58
|
+
/** Model identifier, e.g. "anthropic/claude-4-sonnet". */
|
|
59
|
+
model?: string;
|
|
60
|
+
/** OpenRouter API key. Falls back to `OPENROUTER_API_KEY` env var. */
|
|
61
|
+
apiKey?: string;
|
|
62
|
+
/** Base URL for the API. Defaults to "https://openrouter.ai/api/v1". */
|
|
63
|
+
baseURL?: string;
|
|
64
|
+
/** Your site URL — used for OpenRouter rankings / rate limits. */
|
|
65
|
+
siteUrl?: string;
|
|
66
|
+
/** Your site name — shown on the OpenRouter leaderboard. */
|
|
67
|
+
siteName?: string;
|
|
68
|
+
/** Stable identifier for end-users, used for abuse detection. */
|
|
69
|
+
user?: string;
|
|
70
|
+
/** Extra params passed through to the API body. */
|
|
71
|
+
modelKwargs?: Record<string, unknown>;
|
|
72
|
+
/** Whether to include usage in streaming chunks. Defaults to true. */
|
|
73
|
+
streamUsage?: boolean;
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Per-call options for `ChatOpenRouter`.
|
|
77
|
+
*/
|
|
78
|
+
interface ChatOpenRouterCallOptions extends BaseChatModelCallOptions, ChatOpenRouterFields {
|
|
79
|
+
/** Tool definitions to bind for this call. */
|
|
80
|
+
tools?: BindToolsInput[];
|
|
81
|
+
/** Response format constraint (text, JSON object, or JSON schema). */
|
|
82
|
+
response_format?: OpenRouterResponseFormat;
|
|
83
|
+
/** Whether tool schemas should use strict mode. */
|
|
84
|
+
strict?: boolean;
|
|
85
|
+
/** Predicted output content for latency optimization. */
|
|
86
|
+
prediction?: {
|
|
87
|
+
type: "content";
|
|
88
|
+
content: string;
|
|
89
|
+
};
|
|
90
|
+
/** Stable identifier for end-users, used for abuse detection. */
|
|
91
|
+
user?: string;
|
|
92
|
+
/** Abort signal to cancel the request. */
|
|
93
|
+
signal?: AbortSignal;
|
|
94
|
+
}
|
|
95
|
+
//#endregion
|
|
96
|
+
export { ChatOpenRouterCallOptions, ChatOpenRouterFields, ChatOpenRouterParams, OpenRouterPlugin, OpenRouterResponseFormat };
|
|
97
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","names":[],"sources":["../../src/chat_models/types.ts"],"mappings":";;;;KAOY,wBAAA,GACV,UAAA,CAAW,oBAAA;AADb;;;AAAA,UAMiB,gBAAA;EACf,EAAA;EAAA,CAEC,GAAA;AAAA;;;;UAMc,oBAAA;EAAoB;EAEnC,WAAA;EAsBY;EApBZ,SAAA;EAgCU;EA9BV,IAAA;EA8B0B;EA5B1B,IAAA;EAJA;EAMA,gBAAA;EAFA;EAIA,eAAA;EAAA;EAEA,iBAAA;EAEA;EAAA,IAAA;EAIA;EAFA,IAAA;EAMA;EAJA,IAAA;EAMA;EAJA,IAAA;EAQA;EANA,SAAA,GAAY,MAAA;EAUZ;EARA,WAAA;EAQsB;EANtB,UAAA;EAQU;EANV,MAAA;EAM0B;EAJ1B,KAAA;EAWA;EATA,QAAA,GAAW,UAAA,CAAW,mBAAA;EAwBR;EAtBd,OAAA,GAAU,gBAAA;AAAA;;;;UAMK,oBAAA,SACP,mBAAA,EACN,oBAAA;EAEF;EAAA,KAAA;EAIA;EAFA,MAAA;EAMA;EAJA,OAAA;EAQA;EANA,OAAA;EAQA;EANA,QAAA;EAMW;EAJX,IAAA;EAWA;EATA,WAAA,GAAc,MAAA;EAYN;EAVR,WAAA;AAAA;;;;UAMe,yBAAA,SACP,wBAAA,EACN,oBAAA;EADM;EAGR,KAAA,GAAQ,cAAA;EAAR;EAEA,eAAA,GAAkB,wBAAA;EAAlB;EAEA,MAAA;EAAA;EAEA,UAAA;IAAe,IAAA;IAAiB,OAAA;EAAA;EAIhC;EAFA,IAAA;EAEoB;EAApB,MAAA,GAAS,WAAA;AAAA"}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
let _langchain_openai = require("@langchain/openai");
|
|
2
|
+
|
|
3
|
+
//#region src/converters/messages.ts
|
|
4
|
+
/**
|
|
5
|
+
* Convert an array of LangChain messages to the OpenRouter request format.
|
|
6
|
+
*
|
|
7
|
+
* Delegates to the OpenAI completions converter since OpenRouter's chat
|
|
8
|
+
* API is wire-compatible with OpenAI's. This gives us full support for
|
|
9
|
+
* standard content blocks, reasoning-model developer role mapping,
|
|
10
|
+
* multi-modal inputs, and all edge cases handled upstream.
|
|
11
|
+
*/
|
|
12
|
+
function convertMessagesToOpenRouterParams(messages, model) {
|
|
13
|
+
return (0, _langchain_openai.convertMessagesToCompletionsMessageParams)({
|
|
14
|
+
messages,
|
|
15
|
+
model
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Convert a non-streaming OpenRouter response choice into a BaseMessage.
|
|
20
|
+
*
|
|
21
|
+
* Delegates to the OpenAI completions converter for tool call parsing,
|
|
22
|
+
* multi-modal output handling, and audio support, then patches
|
|
23
|
+
* response_metadata to reflect the OpenRouter provider.
|
|
24
|
+
*/
|
|
25
|
+
function convertOpenRouterResponseToBaseMessage(choice, rawResponse) {
|
|
26
|
+
const message = (0, _langchain_openai.convertCompletionsMessageToBaseMessage)({
|
|
27
|
+
message: choice.message,
|
|
28
|
+
rawResponse
|
|
29
|
+
});
|
|
30
|
+
message.response_metadata = {
|
|
31
|
+
...message.response_metadata,
|
|
32
|
+
model: rawResponse.model,
|
|
33
|
+
model_provider: "openrouter",
|
|
34
|
+
model_name: rawResponse.model,
|
|
35
|
+
finish_reason: choice.finish_reason
|
|
36
|
+
};
|
|
37
|
+
return message;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Convert a streaming delta into a BaseMessageChunk.
|
|
41
|
+
*
|
|
42
|
+
* Delegates to the OpenAI completions converter for tool call chunk
|
|
43
|
+
* parsing, audio handling, and role-specific message types, then
|
|
44
|
+
* patches response_metadata to reflect the OpenRouter provider.
|
|
45
|
+
*/
|
|
46
|
+
function convertOpenRouterDeltaToBaseMessageChunk(delta, rawChunk, defaultRole) {
|
|
47
|
+
const chunk = (0, _langchain_openai.convertCompletionsDeltaToBaseMessageChunk)({
|
|
48
|
+
delta,
|
|
49
|
+
rawResponse: rawChunk,
|
|
50
|
+
defaultRole: defaultRole ?? "assistant"
|
|
51
|
+
});
|
|
52
|
+
chunk.response_metadata = {
|
|
53
|
+
...chunk.response_metadata,
|
|
54
|
+
model_provider: "openrouter"
|
|
55
|
+
};
|
|
56
|
+
return chunk;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Convert OpenRouter usage info to LangChain's `UsageMetadata`,
|
|
60
|
+
* including prompt/completion token detail breakdowns when available.
|
|
61
|
+
*/
|
|
62
|
+
function convertUsageMetadata(usage) {
|
|
63
|
+
if (!usage) return void 0;
|
|
64
|
+
const result = {
|
|
65
|
+
input_tokens: usage.prompt_tokens,
|
|
66
|
+
output_tokens: usage.completion_tokens,
|
|
67
|
+
total_tokens: usage.total_tokens
|
|
68
|
+
};
|
|
69
|
+
const promptDetails = usage.prompt_tokens_details;
|
|
70
|
+
if (promptDetails) {
|
|
71
|
+
const input_token_details = {};
|
|
72
|
+
if (promptDetails.cached_tokens != null) input_token_details.cache_read = promptDetails.cached_tokens;
|
|
73
|
+
if (promptDetails.audio_tokens != null) input_token_details.audio = promptDetails.audio_tokens;
|
|
74
|
+
if (Object.keys(input_token_details).length > 0) result.input_token_details = input_token_details;
|
|
75
|
+
}
|
|
76
|
+
const completionDetails = usage.completion_tokens_details;
|
|
77
|
+
if (completionDetails) {
|
|
78
|
+
const output_token_details = {};
|
|
79
|
+
if (completionDetails.reasoning_tokens != null) output_token_details.reasoning = completionDetails.reasoning_tokens;
|
|
80
|
+
if (Object.keys(output_token_details).length > 0) result.output_token_details = output_token_details;
|
|
81
|
+
}
|
|
82
|
+
return result;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
//#endregion
|
|
86
|
+
exports.convertMessagesToOpenRouterParams = convertMessagesToOpenRouterParams;
|
|
87
|
+
exports.convertOpenRouterDeltaToBaseMessageChunk = convertOpenRouterDeltaToBaseMessageChunk;
|
|
88
|
+
exports.convertOpenRouterResponseToBaseMessage = convertOpenRouterResponseToBaseMessage;
|
|
89
|
+
exports.convertUsageMetadata = convertUsageMetadata;
|
|
90
|
+
//# sourceMappingURL=messages.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"messages.cjs","names":[],"sources":["../../src/converters/messages.ts"],"sourcesContent":["import {\n BaseMessage,\n BaseMessageChunk,\n UsageMetadata,\n} from \"@langchain/core/messages\";\nimport {\n convertMessagesToCompletionsMessageParams,\n convertCompletionsMessageToBaseMessage,\n convertCompletionsDeltaToBaseMessageChunk,\n} from \"@langchain/openai\";\nimport type { OpenAI as OpenAIClient } from \"openai\";\nimport type { OpenRouter } from \"../api-types.js\";\n\n/**\n * The inner data shape of a streaming SSE chunk. Each parsed SSE event\n * contains this object directly (without the `data` wrapper that the\n * OpenAPI spec describes on `ChatStreamingResponseChunk`).\n */\nexport type StreamingChunkData = OpenRouter.ChatStreamingResponseChunk[\"data\"];\n\n// LangChain → OpenRouter\n/**\n * Convert an array of LangChain messages to the OpenRouter request format.\n *\n * Delegates to the OpenAI completions converter since OpenRouter's chat\n * API is wire-compatible with OpenAI's. This gives us full support for\n * standard content blocks, reasoning-model developer role mapping,\n * multi-modal inputs, and all edge cases handled upstream.\n */\nexport function convertMessagesToOpenRouterParams(\n messages: BaseMessage[],\n model?: string\n): OpenAIClient.Chat.Completions.ChatCompletionMessageParam[] {\n return convertMessagesToCompletionsMessageParams({\n messages,\n model,\n });\n}\n\n// OpenRouter → LangChain (non-streaming)\n/**\n * Convert a non-streaming OpenRouter response choice into a BaseMessage.\n *\n * Delegates to the OpenAI completions converter for tool call parsing,\n * multi-modal output handling, and audio support, then patches\n * response_metadata to reflect the OpenRouter provider.\n */\nexport function convertOpenRouterResponseToBaseMessage(\n choice: OpenRouter.ChatResponseChoice,\n rawResponse: OpenRouter.ChatResponse\n): BaseMessage {\n const message = convertCompletionsMessageToBaseMessage({\n message:\n choice.message as unknown as OpenAIClient.Chat.Completions.ChatCompletionMessage,\n rawResponse:\n rawResponse as unknown as OpenAIClient.Chat.Completions.ChatCompletion,\n });\n\n message.response_metadata = {\n ...message.response_metadata,\n model: rawResponse.model,\n model_provider: \"openrouter\",\n model_name: rawResponse.model,\n finish_reason: choice.finish_reason,\n };\n\n return message;\n}\n\n// OpenRouter → LangChain (streaming)\n/**\n * Convert a streaming delta into a BaseMessageChunk.\n *\n * Delegates to the OpenAI completions converter for tool call chunk\n * parsing, audio handling, and role-specific message types, then\n * patches response_metadata to reflect the OpenRouter provider.\n */\nexport function convertOpenRouterDeltaToBaseMessageChunk(\n delta: OpenRouter.ChatStreamingMessageChunk,\n rawChunk: StreamingChunkData,\n defaultRole?: string\n): BaseMessageChunk {\n const chunk = convertCompletionsDeltaToBaseMessageChunk({\n delta: delta as Record<string, unknown>,\n rawResponse:\n rawChunk as unknown as OpenAIClient.Chat.Completions.ChatCompletionChunk,\n defaultRole: (defaultRole ??\n \"assistant\") as OpenAIClient.Chat.ChatCompletionRole,\n });\n\n chunk.response_metadata = {\n ...chunk.response_metadata,\n model_provider: \"openrouter\",\n };\n\n return chunk;\n}\n\n// Usage metadata\n\n/**\n * Convert OpenRouter usage info to LangChain's `UsageMetadata`,\n * including prompt/completion token detail breakdowns when available.\n */\nexport function convertUsageMetadata(\n usage?: OpenRouter.ChatGenerationTokenUsage\n): UsageMetadata | undefined {\n if (!usage) return undefined;\n\n const result: UsageMetadata = {\n input_tokens: usage.prompt_tokens,\n output_tokens: usage.completion_tokens,\n total_tokens: usage.total_tokens,\n };\n\n const promptDetails = usage.prompt_tokens_details;\n if (promptDetails) {\n const input_token_details: Record<string, number> = {};\n if (promptDetails.cached_tokens != null)\n input_token_details.cache_read = promptDetails.cached_tokens;\n if (promptDetails.audio_tokens != null)\n input_token_details.audio = promptDetails.audio_tokens;\n if (Object.keys(input_token_details).length > 0)\n result.input_token_details = input_token_details;\n }\n\n const completionDetails = usage.completion_tokens_details;\n if (completionDetails) {\n const output_token_details: Record<string, number> = {};\n if (completionDetails.reasoning_tokens != null)\n output_token_details.reasoning = completionDetails.reasoning_tokens;\n if (Object.keys(output_token_details).length > 0)\n result.output_token_details = output_token_details;\n }\n\n return result;\n}\n"],"mappings":";;;;;;;;;;;AA6BA,SAAgB,kCACd,UACA,OAC4D;AAC5D,yEAAiD;EAC/C;EACA;EACD,CAAC;;;;;;;;;AAWJ,SAAgB,uCACd,QACA,aACa;CACb,MAAM,wEAAiD;EACrD,SACE,OAAO;EAEP;EACH,CAAC;AAEF,SAAQ,oBAAoB;EAC1B,GAAG,QAAQ;EACX,OAAO,YAAY;EACnB,gBAAgB;EAChB,YAAY,YAAY;EACxB,eAAe,OAAO;EACvB;AAED,QAAO;;;;;;;;;AAWT,SAAgB,yCACd,OACA,UACA,aACkB;CAClB,MAAM,yEAAkD;EAC/C;EACP,aACE;EACF,aAAc,eACZ;EACH,CAAC;AAEF,OAAM,oBAAoB;EACxB,GAAG,MAAM;EACT,gBAAgB;EACjB;AAED,QAAO;;;;;;AAST,SAAgB,qBACd,OAC2B;AAC3B,KAAI,CAAC,MAAO,QAAO;CAEnB,MAAM,SAAwB;EAC5B,cAAc,MAAM;EACpB,eAAe,MAAM;EACrB,cAAc,MAAM;EACrB;CAED,MAAM,gBAAgB,MAAM;AAC5B,KAAI,eAAe;EACjB,MAAM,sBAA8C,EAAE;AACtD,MAAI,cAAc,iBAAiB,KACjC,qBAAoB,aAAa,cAAc;AACjD,MAAI,cAAc,gBAAgB,KAChC,qBAAoB,QAAQ,cAAc;AAC5C,MAAI,OAAO,KAAK,oBAAoB,CAAC,SAAS,EAC5C,QAAO,sBAAsB;;CAGjC,MAAM,oBAAoB,MAAM;AAChC,KAAI,mBAAmB;EACrB,MAAM,uBAA+C,EAAE;AACvD,MAAI,kBAAkB,oBAAoB,KACxC,sBAAqB,YAAY,kBAAkB;AACrD,MAAI,OAAO,KAAK,qBAAqB,CAAC,SAAS,EAC7C,QAAO,uBAAuB;;AAGlC,QAAO"}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { convertCompletionsDeltaToBaseMessageChunk, convertCompletionsMessageToBaseMessage, convertMessagesToCompletionsMessageParams } from "@langchain/openai";
|
|
2
|
+
|
|
3
|
+
//#region src/converters/messages.ts
|
|
4
|
+
/**
|
|
5
|
+
* Convert an array of LangChain messages to the OpenRouter request format.
|
|
6
|
+
*
|
|
7
|
+
* Delegates to the OpenAI completions converter since OpenRouter's chat
|
|
8
|
+
* API is wire-compatible with OpenAI's. This gives us full support for
|
|
9
|
+
* standard content blocks, reasoning-model developer role mapping,
|
|
10
|
+
* multi-modal inputs, and all edge cases handled upstream.
|
|
11
|
+
*/
|
|
12
|
+
function convertMessagesToOpenRouterParams(messages, model) {
|
|
13
|
+
return convertMessagesToCompletionsMessageParams({
|
|
14
|
+
messages,
|
|
15
|
+
model
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Convert a non-streaming OpenRouter response choice into a BaseMessage.
|
|
20
|
+
*
|
|
21
|
+
* Delegates to the OpenAI completions converter for tool call parsing,
|
|
22
|
+
* multi-modal output handling, and audio support, then patches
|
|
23
|
+
* response_metadata to reflect the OpenRouter provider.
|
|
24
|
+
*/
|
|
25
|
+
function convertOpenRouterResponseToBaseMessage(choice, rawResponse) {
|
|
26
|
+
const message = convertCompletionsMessageToBaseMessage({
|
|
27
|
+
message: choice.message,
|
|
28
|
+
rawResponse
|
|
29
|
+
});
|
|
30
|
+
message.response_metadata = {
|
|
31
|
+
...message.response_metadata,
|
|
32
|
+
model: rawResponse.model,
|
|
33
|
+
model_provider: "openrouter",
|
|
34
|
+
model_name: rawResponse.model,
|
|
35
|
+
finish_reason: choice.finish_reason
|
|
36
|
+
};
|
|
37
|
+
return message;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Convert a streaming delta into a BaseMessageChunk.
|
|
41
|
+
*
|
|
42
|
+
* Delegates to the OpenAI completions converter for tool call chunk
|
|
43
|
+
* parsing, audio handling, and role-specific message types, then
|
|
44
|
+
* patches response_metadata to reflect the OpenRouter provider.
|
|
45
|
+
*/
|
|
46
|
+
function convertOpenRouterDeltaToBaseMessageChunk(delta, rawChunk, defaultRole) {
|
|
47
|
+
const chunk = convertCompletionsDeltaToBaseMessageChunk({
|
|
48
|
+
delta,
|
|
49
|
+
rawResponse: rawChunk,
|
|
50
|
+
defaultRole: defaultRole ?? "assistant"
|
|
51
|
+
});
|
|
52
|
+
chunk.response_metadata = {
|
|
53
|
+
...chunk.response_metadata,
|
|
54
|
+
model_provider: "openrouter"
|
|
55
|
+
};
|
|
56
|
+
return chunk;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Convert OpenRouter usage info to LangChain's `UsageMetadata`,
|
|
60
|
+
* including prompt/completion token detail breakdowns when available.
|
|
61
|
+
*/
|
|
62
|
+
function convertUsageMetadata(usage) {
|
|
63
|
+
if (!usage) return void 0;
|
|
64
|
+
const result = {
|
|
65
|
+
input_tokens: usage.prompt_tokens,
|
|
66
|
+
output_tokens: usage.completion_tokens,
|
|
67
|
+
total_tokens: usage.total_tokens
|
|
68
|
+
};
|
|
69
|
+
const promptDetails = usage.prompt_tokens_details;
|
|
70
|
+
if (promptDetails) {
|
|
71
|
+
const input_token_details = {};
|
|
72
|
+
if (promptDetails.cached_tokens != null) input_token_details.cache_read = promptDetails.cached_tokens;
|
|
73
|
+
if (promptDetails.audio_tokens != null) input_token_details.audio = promptDetails.audio_tokens;
|
|
74
|
+
if (Object.keys(input_token_details).length > 0) result.input_token_details = input_token_details;
|
|
75
|
+
}
|
|
76
|
+
const completionDetails = usage.completion_tokens_details;
|
|
77
|
+
if (completionDetails) {
|
|
78
|
+
const output_token_details = {};
|
|
79
|
+
if (completionDetails.reasoning_tokens != null) output_token_details.reasoning = completionDetails.reasoning_tokens;
|
|
80
|
+
if (Object.keys(output_token_details).length > 0) result.output_token_details = output_token_details;
|
|
81
|
+
}
|
|
82
|
+
return result;
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
//#endregion
|
|
86
|
+
export { convertMessagesToOpenRouterParams, convertOpenRouterDeltaToBaseMessageChunk, convertOpenRouterResponseToBaseMessage, convertUsageMetadata };
|
|
87
|
+
//# sourceMappingURL=messages.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"messages.js","names":[],"sources":["../../src/converters/messages.ts"],"sourcesContent":["import {\n BaseMessage,\n BaseMessageChunk,\n UsageMetadata,\n} from \"@langchain/core/messages\";\nimport {\n convertMessagesToCompletionsMessageParams,\n convertCompletionsMessageToBaseMessage,\n convertCompletionsDeltaToBaseMessageChunk,\n} from \"@langchain/openai\";\nimport type { OpenAI as OpenAIClient } from \"openai\";\nimport type { OpenRouter } from \"../api-types.js\";\n\n/**\n * The inner data shape of a streaming SSE chunk. Each parsed SSE event\n * contains this object directly (without the `data` wrapper that the\n * OpenAPI spec describes on `ChatStreamingResponseChunk`).\n */\nexport type StreamingChunkData = OpenRouter.ChatStreamingResponseChunk[\"data\"];\n\n// LangChain → OpenRouter\n/**\n * Convert an array of LangChain messages to the OpenRouter request format.\n *\n * Delegates to the OpenAI completions converter since OpenRouter's chat\n * API is wire-compatible with OpenAI's. This gives us full support for\n * standard content blocks, reasoning-model developer role mapping,\n * multi-modal inputs, and all edge cases handled upstream.\n */\nexport function convertMessagesToOpenRouterParams(\n messages: BaseMessage[],\n model?: string\n): OpenAIClient.Chat.Completions.ChatCompletionMessageParam[] {\n return convertMessagesToCompletionsMessageParams({\n messages,\n model,\n });\n}\n\n// OpenRouter → LangChain (non-streaming)\n/**\n * Convert a non-streaming OpenRouter response choice into a BaseMessage.\n *\n * Delegates to the OpenAI completions converter for tool call parsing,\n * multi-modal output handling, and audio support, then patches\n * response_metadata to reflect the OpenRouter provider.\n */\nexport function convertOpenRouterResponseToBaseMessage(\n choice: OpenRouter.ChatResponseChoice,\n rawResponse: OpenRouter.ChatResponse\n): BaseMessage {\n const message = convertCompletionsMessageToBaseMessage({\n message:\n choice.message as unknown as OpenAIClient.Chat.Completions.ChatCompletionMessage,\n rawResponse:\n rawResponse as unknown as OpenAIClient.Chat.Completions.ChatCompletion,\n });\n\n message.response_metadata = {\n ...message.response_metadata,\n model: rawResponse.model,\n model_provider: \"openrouter\",\n model_name: rawResponse.model,\n finish_reason: choice.finish_reason,\n };\n\n return message;\n}\n\n// OpenRouter → LangChain (streaming)\n/**\n * Convert a streaming delta into a BaseMessageChunk.\n *\n * Delegates to the OpenAI completions converter for tool call chunk\n * parsing, audio handling, and role-specific message types, then\n * patches response_metadata to reflect the OpenRouter provider.\n */\nexport function convertOpenRouterDeltaToBaseMessageChunk(\n delta: OpenRouter.ChatStreamingMessageChunk,\n rawChunk: StreamingChunkData,\n defaultRole?: string\n): BaseMessageChunk {\n const chunk = convertCompletionsDeltaToBaseMessageChunk({\n delta: delta as Record<string, unknown>,\n rawResponse:\n rawChunk as unknown as OpenAIClient.Chat.Completions.ChatCompletionChunk,\n defaultRole: (defaultRole ??\n \"assistant\") as OpenAIClient.Chat.ChatCompletionRole,\n });\n\n chunk.response_metadata = {\n ...chunk.response_metadata,\n model_provider: \"openrouter\",\n };\n\n return chunk;\n}\n\n// Usage metadata\n\n/**\n * Convert OpenRouter usage info to LangChain's `UsageMetadata`,\n * including prompt/completion token detail breakdowns when available.\n */\nexport function convertUsageMetadata(\n usage?: OpenRouter.ChatGenerationTokenUsage\n): UsageMetadata | undefined {\n if (!usage) return undefined;\n\n const result: UsageMetadata = {\n input_tokens: usage.prompt_tokens,\n output_tokens: usage.completion_tokens,\n total_tokens: usage.total_tokens,\n };\n\n const promptDetails = usage.prompt_tokens_details;\n if (promptDetails) {\n const input_token_details: Record<string, number> = {};\n if (promptDetails.cached_tokens != null)\n input_token_details.cache_read = promptDetails.cached_tokens;\n if (promptDetails.audio_tokens != null)\n input_token_details.audio = promptDetails.audio_tokens;\n if (Object.keys(input_token_details).length > 0)\n result.input_token_details = input_token_details;\n }\n\n const completionDetails = usage.completion_tokens_details;\n if (completionDetails) {\n const output_token_details: Record<string, number> = {};\n if (completionDetails.reasoning_tokens != null)\n output_token_details.reasoning = completionDetails.reasoning_tokens;\n if (Object.keys(output_token_details).length > 0)\n result.output_token_details = output_token_details;\n }\n\n return result;\n}\n"],"mappings":";;;;;;;;;;;AA6BA,SAAgB,kCACd,UACA,OAC4D;AAC5D,QAAO,0CAA0C;EAC/C;EACA;EACD,CAAC;;;;;;;;;AAWJ,SAAgB,uCACd,QACA,aACa;CACb,MAAM,UAAU,uCAAuC;EACrD,SACE,OAAO;EAEP;EACH,CAAC;AAEF,SAAQ,oBAAoB;EAC1B,GAAG,QAAQ;EACX,OAAO,YAAY;EACnB,gBAAgB;EAChB,YAAY,YAAY;EACxB,eAAe,OAAO;EACvB;AAED,QAAO;;;;;;;;;AAWT,SAAgB,yCACd,OACA,UACA,aACkB;CAClB,MAAM,QAAQ,0CAA0C;EAC/C;EACP,aACE;EACF,aAAc,eACZ;EACH,CAAC;AAEF,OAAM,oBAAoB;EACxB,GAAG,MAAM;EACT,gBAAgB;EACjB;AAED,QAAO;;;;;;AAST,SAAgB,qBACd,OAC2B;AAC3B,KAAI,CAAC,MAAO,QAAO;CAEnB,MAAM,SAAwB;EAC5B,cAAc,MAAM;EACpB,eAAe,MAAM;EACrB,cAAc,MAAM;EACrB;CAED,MAAM,gBAAgB,MAAM;AAC5B,KAAI,eAAe;EACjB,MAAM,sBAA8C,EAAE;AACtD,MAAI,cAAc,iBAAiB,KACjC,qBAAoB,aAAa,cAAc;AACjD,MAAI,cAAc,gBAAgB,KAChC,qBAAoB,QAAQ,cAAc;AAC5C,MAAI,OAAO,KAAK,oBAAoB,CAAC,SAAS,EAC5C,QAAO,sBAAsB;;CAGjC,MAAM,oBAAoB,MAAM;AAChC,KAAI,mBAAmB;EACrB,MAAM,uBAA+C,EAAE;AACvD,MAAI,kBAAkB,oBAAoB,KACxC,sBAAqB,YAAY,kBAAkB;AACrD,MAAI,OAAO,KAAK,qBAAqB,CAAC,SAAS,EAC7C,QAAO,uBAAuB;;AAGlC,QAAO"}
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
let _langchain_core_utils_function_calling = require("@langchain/core/utils/function_calling");
|
|
2
|
+
|
|
3
|
+
//#region src/converters/tools.ts
|
|
4
|
+
/**
|
|
5
|
+
* Convert LangChain tool inputs to the OpenRouter (OpenAI-compatible) format.
|
|
6
|
+
*/
|
|
7
|
+
function convertToolsToOpenRouter(tools, options) {
|
|
8
|
+
return tools.map((tool) => {
|
|
9
|
+
return (0, _langchain_core_utils_function_calling.convertToOpenAITool)(tool, { strict: options?.strict });
|
|
10
|
+
});
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Convert a LangChain `ToolChoice` value to the OpenRouter wire format.
|
|
14
|
+
*/
|
|
15
|
+
function formatToolChoice(toolChoice) {
|
|
16
|
+
if (toolChoice === void 0 || toolChoice === null) return void 0;
|
|
17
|
+
if (toolChoice === "auto") return "auto";
|
|
18
|
+
if (toolChoice === "none") return "none";
|
|
19
|
+
if (toolChoice === "any" || toolChoice === "required") return "required";
|
|
20
|
+
if (typeof toolChoice === "string") return {
|
|
21
|
+
type: "function",
|
|
22
|
+
function: { name: toolChoice }
|
|
23
|
+
};
|
|
24
|
+
return toolChoice;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
//#endregion
|
|
28
|
+
exports.convertToolsToOpenRouter = convertToolsToOpenRouter;
|
|
29
|
+
exports.formatToolChoice = formatToolChoice;
|
|
30
|
+
//# sourceMappingURL=tools.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tools.cjs","names":[],"sources":["../../src/converters/tools.ts"],"sourcesContent":["import { convertToOpenAITool } from \"@langchain/core/utils/function_calling\";\nimport type { BindToolsInput } from \"@langchain/core/language_models/chat_models\";\nimport type { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { OpenRouter } from \"../api-types.js\";\n\n/**\n * Convert LangChain tool inputs to the OpenRouter (OpenAI-compatible) format.\n */\nexport function convertToolsToOpenRouter(\n tools: BindToolsInput[],\n options?: { strict?: boolean }\n): OpenRouter.ToolDefinitionJson[] {\n return tools.map((tool) => {\n const converted: ToolDefinition = convertToOpenAITool(tool, {\n strict: options?.strict,\n });\n return converted as unknown as OpenRouter.ToolDefinitionJson;\n });\n}\n\n/**\n * Convert a LangChain `ToolChoice` value to the OpenRouter wire format.\n */\nexport function formatToolChoice(\n toolChoice?: string | Record<string, unknown>\n): OpenRouter.ToolChoiceOption | undefined {\n if (toolChoice === undefined || toolChoice === null) return undefined;\n if (toolChoice === \"auto\") return \"auto\";\n if (toolChoice === \"none\") return \"none\";\n if (toolChoice === \"any\" || toolChoice === \"required\") return \"required\";\n if (typeof toolChoice === \"string\") {\n return { type: \"function\", function: { name: toolChoice } };\n }\n return toolChoice as unknown as OpenRouter.ToolChoiceOption;\n}\n"],"mappings":";;;;;;AAQA,SAAgB,yBACd,OACA,SACiC;AACjC,QAAO,MAAM,KAAK,SAAS;AAIzB,yEAHsD,MAAM,EAC1D,QAAQ,SAAS,QAClB,CAAC;GAEF;;;;;AAMJ,SAAgB,iBACd,YACyC;AACzC,KAAI,eAAe,UAAa,eAAe,KAAM,QAAO;AAC5D,KAAI,eAAe,OAAQ,QAAO;AAClC,KAAI,eAAe,OAAQ,QAAO;AAClC,KAAI,eAAe,SAAS,eAAe,WAAY,QAAO;AAC9D,KAAI,OAAO,eAAe,SACxB,QAAO;EAAE,MAAM;EAAY,UAAU,EAAE,MAAM,YAAY;EAAE;AAE7D,QAAO"}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { convertToOpenAITool } from "@langchain/core/utils/function_calling";
|
|
2
|
+
|
|
3
|
+
//#region src/converters/tools.ts
|
|
4
|
+
/**
|
|
5
|
+
* Convert LangChain tool inputs to the OpenRouter (OpenAI-compatible) format.
|
|
6
|
+
*/
|
|
7
|
+
function convertToolsToOpenRouter(tools, options) {
|
|
8
|
+
return tools.map((tool) => {
|
|
9
|
+
return convertToOpenAITool(tool, { strict: options?.strict });
|
|
10
|
+
});
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Convert a LangChain `ToolChoice` value to the OpenRouter wire format.
|
|
14
|
+
*/
|
|
15
|
+
function formatToolChoice(toolChoice) {
|
|
16
|
+
if (toolChoice === void 0 || toolChoice === null) return void 0;
|
|
17
|
+
if (toolChoice === "auto") return "auto";
|
|
18
|
+
if (toolChoice === "none") return "none";
|
|
19
|
+
if (toolChoice === "any" || toolChoice === "required") return "required";
|
|
20
|
+
if (typeof toolChoice === "string") return {
|
|
21
|
+
type: "function",
|
|
22
|
+
function: { name: toolChoice }
|
|
23
|
+
};
|
|
24
|
+
return toolChoice;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
//#endregion
|
|
28
|
+
export { convertToolsToOpenRouter, formatToolChoice };
|
|
29
|
+
//# sourceMappingURL=tools.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tools.js","names":[],"sources":["../../src/converters/tools.ts"],"sourcesContent":["import { convertToOpenAITool } from \"@langchain/core/utils/function_calling\";\nimport type { BindToolsInput } from \"@langchain/core/language_models/chat_models\";\nimport type { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { OpenRouter } from \"../api-types.js\";\n\n/**\n * Convert LangChain tool inputs to the OpenRouter (OpenAI-compatible) format.\n */\nexport function convertToolsToOpenRouter(\n tools: BindToolsInput[],\n options?: { strict?: boolean }\n): OpenRouter.ToolDefinitionJson[] {\n return tools.map((tool) => {\n const converted: ToolDefinition = convertToOpenAITool(tool, {\n strict: options?.strict,\n });\n return converted as unknown as OpenRouter.ToolDefinitionJson;\n });\n}\n\n/**\n * Convert a LangChain `ToolChoice` value to the OpenRouter wire format.\n */\nexport function formatToolChoice(\n toolChoice?: string | Record<string, unknown>\n): OpenRouter.ToolChoiceOption | undefined {\n if (toolChoice === undefined || toolChoice === null) return undefined;\n if (toolChoice === \"auto\") return \"auto\";\n if (toolChoice === \"none\") return \"none\";\n if (toolChoice === \"any\" || toolChoice === \"required\") return \"required\";\n if (typeof toolChoice === \"string\") {\n return { type: \"function\", function: { name: toolChoice } };\n }\n return toolChoice as unknown as OpenRouter.ToolChoiceOption;\n}\n"],"mappings":";;;;;;AAQA,SAAgB,yBACd,OACA,SACiC;AACjC,QAAO,MAAM,KAAK,SAAS;AAIzB,SAHkC,oBAAoB,MAAM,EAC1D,QAAQ,SAAS,QAClB,CAAC;GAEF;;;;;AAMJ,SAAgB,iBACd,YACyC;AACzC,KAAI,eAAe,UAAa,eAAe,KAAM,QAAO;AAC5D,KAAI,eAAe,OAAQ,QAAO;AAClC,KAAI,eAAe,OAAQ,QAAO;AAClC,KAAI,eAAe,SAAS,eAAe,WAAY,QAAO;AAC9D,KAAI,OAAO,eAAe,SACxB,QAAO;EAAE,MAAM;EAAY,UAAU,EAAE,MAAM,YAAY;EAAE;AAE7D,QAAO"}
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
|
|
2
|
+
const require_errors = require('./utils/errors.cjs');
|
|
3
|
+
const require_profiles = require('./profiles.cjs');
|
|
4
|
+
const require_index = require('./chat_models/index.cjs');
|
|
5
|
+
|
|
6
|
+
exports.ChatOpenRouter = require_index.ChatOpenRouter;
|
|
7
|
+
exports.OPENROUTER_MODEL_PROFILES = require_profiles;
|
|
8
|
+
exports.OpenRouterAuthError = require_errors.OpenRouterAuthError;
|
|
9
|
+
exports.OpenRouterError = require_errors.OpenRouterError;
|
|
10
|
+
exports.OpenRouterRateLimitError = require_errors.OpenRouterRateLimitError;
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
import { OpenRouter } from "./api-types.cjs";
|
|
2
|
+
import { ChatOpenRouterCallOptions, ChatOpenRouterFields, ChatOpenRouterParams, OpenRouterPlugin, OpenRouterResponseFormat } from "./chat_models/types.cjs";
|
|
3
|
+
import { ChatOpenRouter } from "./chat_models/index.cjs";
|
|
4
|
+
import { OpenRouterAuthError, OpenRouterError, OpenRouterRateLimitError } from "./utils/errors.cjs";
|
|
5
|
+
import { PROFILES } from "./profiles.cjs";
|
|
6
|
+
export { ChatOpenRouter, type ChatOpenRouterCallOptions, type ChatOpenRouterFields, type ChatOpenRouterParams as ChatOpenRouterInput, PROFILES as OPENROUTER_MODEL_PROFILES, type OpenRouter, OpenRouterAuthError, OpenRouterError, type OpenRouterPlugin, OpenRouterRateLimitError, type OpenRouterResponseFormat };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
import { OpenRouter } from "./api-types.js";
|
|
2
|
+
import { ChatOpenRouterCallOptions, ChatOpenRouterFields, ChatOpenRouterParams, OpenRouterPlugin, OpenRouterResponseFormat } from "./chat_models/types.js";
|
|
3
|
+
import { ChatOpenRouter } from "./chat_models/index.js";
|
|
4
|
+
import { OpenRouterAuthError, OpenRouterError, OpenRouterRateLimitError } from "./utils/errors.js";
|
|
5
|
+
import { PROFILES } from "./profiles.js";
|
|
6
|
+
export { ChatOpenRouter, type ChatOpenRouterCallOptions, type ChatOpenRouterFields, type ChatOpenRouterParams as ChatOpenRouterInput, PROFILES as OPENROUTER_MODEL_PROFILES, type OpenRouter, OpenRouterAuthError, OpenRouterError, type OpenRouterPlugin, OpenRouterRateLimitError, type OpenRouterResponseFormat };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
import { OpenRouterAuthError, OpenRouterError, OpenRouterRateLimitError } from "./utils/errors.js";
|
|
2
|
+
import PROFILES from "./profiles.js";
|
|
3
|
+
import { ChatOpenRouter } from "./chat_models/index.js";
|
|
4
|
+
|
|
5
|
+
export { ChatOpenRouter, PROFILES as OPENROUTER_MODEL_PROFILES, OpenRouterAuthError, OpenRouterError, OpenRouterRateLimitError };
|