openrouter-pricing-mcp 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,244 @@
1
+ # OpenRouter MCP Server
2
+
3
+ [![npm version](https://img.shields.io/npm/v/openrouter-pricing-mcp.svg)](https://www.npmjs.com/package/openrouter-pricing-mcp)
4
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
5
+ [![Node.js](https://img.shields.io/badge/node-%3E%3D18.0.0-brightgreen.svg)](https://nodejs.org)
6
+ [![MCP](https://img.shields.io/badge/MCP-compatible-blueviolet.svg)](https://modelcontextprotocol.io)
7
+
8
+ A [Model Context Protocol (MCP)](https://modelcontextprotocol.io) server that provides **live model pricing data** directly from the [OpenRouter](https://openrouter.ai) API. Query, compare, and discover the cheapest AI models — right from your AI coding assistant.
9
+
10
+ Works with any MCP-compatible client: **Antigravity**, **Claude Desktop**, **Cursor**, and more.
11
+
12
+ ---
13
+
14
+ ## Table of Contents
15
+
16
+ - [Features](#features)
17
+ - [Quick Start](#quick-start)
18
+ - [Installation](#installation)
19
+ - [Antigravity](#antigravity)
20
+ - [Claude Desktop](#claude-desktop)
21
+ - [Cursor](#cursor)
22
+ - [Other MCP Clients](#other-mcp-clients)
23
+ - [Configuration](#configuration)
24
+ - [Available Tools](#available-tools)
25
+ - [Examples](#examples)
26
+ - [Development](#development)
27
+ - [Contributing](#contributing)
28
+ - [License](#license)
29
+
30
+ ---
31
+
32
+ ## Features
33
+
34
+ - 🔴 **Live Pricing** — Pulls real-time data from the OpenRouter `/api/v1/models` endpoint
35
+ - ⚡ **Intelligent Caching** — 5-minute in-memory cache for fast responses without hammering the API
36
+ - 🔑 **Optional API Key** — Works without an API key for public data; pass one via environment variable for authenticated access
37
+ - 🔍 **Fuzzy Search** — Mistype a model name? Get smart suggestions instead of a cryptic error
38
+ - 📊 **Table-Formatted Comparisons** — Side-by-side model comparisons in clean markdown tables
39
+ - 🆓 **Free Model Detection** — Free-tier models are clearly flagged
40
+
41
+ ---
42
+
43
+ ## Quick Start
44
+
45
+ No installation required — run directly with `npx`:
46
+
47
+ ```bash
48
+ npx -y openrouter-pricing-mcp
49
+ ```
50
+
51
+ ---
52
+
53
+ ## Installation
54
+
55
+ ### Antigravity
56
+
57
+ Add the following to your Antigravity MCP settings:
58
+
59
+ ```json
60
+ {
61
+ "mcpServers": {
62
+ "openrouter": {
63
+ "command": "npx",
64
+ "args": ["-y", "openrouter-pricing-mcp"]
65
+ }
66
+ }
67
+ }
68
+ ```
69
+
70
+ To pass an OpenRouter API key (optional):
71
+
72
+ ```json
73
+ {
74
+ "mcpServers": {
75
+ "openrouter": {
76
+ "command": "npx",
77
+ "args": ["-y", "openrouter-pricing-mcp"],
78
+ "env": {
79
+ "OPENROUTER_API_KEY": "sk-or-v1-your-key-here"
80
+ }
81
+ }
82
+ }
83
+ }
84
+ ```
85
+
86
+ ### Claude Desktop
87
+
88
+ Add to your `claude_desktop_config.json` (found in `~/Library/Application Support/Claude/` on macOS or `%APPDATA%\Claude\` on Windows):
89
+
90
+ ```json
91
+ {
92
+ "mcpServers": {
93
+ "openrouter": {
94
+ "command": "npx",
95
+ "args": ["-y", "openrouter-pricing-mcp"],
96
+ "env": {
97
+ "OPENROUTER_API_KEY": "sk-or-v1-your-key-here"
98
+ }
99
+ }
100
+ }
101
+ }
102
+ ```
103
+
104
+ ### Cursor
105
+
106
+ Add to your Cursor MCP configuration:
107
+
108
+ ```json
109
+ {
110
+ "mcpServers": {
111
+ "openrouter": {
112
+ "command": "npx",
113
+ "args": ["-y", "openrouter-pricing-mcp"],
114
+ "env": {
115
+ "OPENROUTER_API_KEY": "sk-or-v1-your-key-here"
116
+ }
117
+ }
118
+ }
119
+ }
120
+ ```
121
+
122
+ ### Other MCP Clients
123
+
124
+ This server communicates over **stdio** and is compatible with any MCP client. Use `npx -y openrouter-pricing-mcp` as the command, or install globally:
125
+
126
+ ```bash
127
+ npm install -g openrouter-pricing-mcp
128
+ openrouter-pricing-mcp
129
+ ```
130
+
131
+ ---
132
+
133
+ ## Configuration
134
+
135
+ | Environment Variable | Required | Description |
136
+ |---|---|---|
137
+ | `OPENROUTER_API_KEY` | No | Your OpenRouter API key. The models endpoint is public, so this is optional. Providing a key may give you higher rate limits. |
138
+
139
+ ---
140
+
141
+ ## Available Tools
142
+
143
+ | Tool | Description |
144
+ |---|---|
145
+ | `get_model_pricing` | Get detailed pricing for a specific model by its full ID |
146
+ | `list_all_models_pricing` | Browse all available models with pricing (supports limit) |
147
+ | `compare_model_costs` | Compare multiple models side-by-side in a markdown table |
148
+ | `get_cheapest_models` | Find the cheapest models sorted by prompt or completion cost |
149
+ | `find_models_by_context_length` | Discover models with a minimum context window size |
150
+
151
+ ---
152
+
153
+ ## Examples
154
+
155
+ ### Get pricing for a specific model
156
+
157
+ > *"How much does GPT-4o cost on OpenRouter?"*
158
+
159
+ The AI assistant calls `get_model_pricing` with `model_id: "openai/gpt-4o"` and returns:
160
+
161
+ ```
162
+ Model: GPT-4o (openai/gpt-4o)
163
+ Context Length: 128,000 tokens
164
+ Prompt Cost: $0.00000250 / token
165
+ Completion Cost: $0.00001000 / token
166
+ Image Cost: $0.00361300 / token
167
+ Request Cost: FREE
168
+ ```
169
+
170
+ ### Compare models
171
+
172
+ > *"Compare Claude Sonnet 4 vs GPT-4o vs Gemini 2.5 Pro"*
173
+
174
+ The AI assistant calls `compare_model_costs` and returns:
175
+
176
+ ```
177
+ | Model | Prompt Cost | Completion Cost | Context Length |
178
+ |-------|------------|----------------|----------------|
179
+ | openai/gpt-4o | $0.00000250 | $0.00001000 | 128,000 |
180
+ | anthropic/claude-sonnet-4 | $0.00000300 | $0.00001500 | 200,000 |
181
+ | google/gemini-2.5-pro-preview | $0.00000125 | $0.00001000 | 1,048,576 |
182
+ ```
183
+
184
+ ### Find cheapest models
185
+
186
+ > *"What are the 5 cheapest models?"*
187
+
188
+ The AI assistant calls `get_cheapest_models` and returns a ranked list, with free-tier models flagged with 🆓.
189
+
190
+ ### Find models by context length
191
+
192
+ > *"Which models support at least 200k tokens?"*
193
+
194
+ The AI assistant calls `find_models_by_context_length` with `min_context_length: 200000`.
195
+
196
+ ---
197
+
198
+ ## Development
199
+
200
+ ```bash
201
+ # Clone the repository
202
+ git clone https://github.com/Semicolon-D/openrouter-pricing-mcp.git
203
+ cd openrouter-pricing-mcp
204
+
205
+ # Install dependencies
206
+ npm install
207
+
208
+ # Build
209
+ npm run build
210
+
211
+ # Run tests
212
+ npm test
213
+
214
+ # Watch mode (auto-rebuild on changes)
215
+ npm run dev
216
+ ```
217
+
218
+ ### Project Structure
219
+
220
+ ```
221
+ openrouter-mcp/
222
+ ├── src/
223
+ │ ├── index.ts # MCP server + exported tool handlers
224
+ │ └── index.test.ts # Unit tests (23 tests, 7 suites)
225
+ ├── build/ # Compiled output (auto-generated)
226
+ ├── package.json
227
+ ├── tsconfig.json
228
+ ├── README.md
229
+ ├── CONTRIBUTING.md
230
+ ├── LICENSE
231
+ └── .gitignore
232
+ ```
233
+
234
+ ---
235
+
236
+ ## Contributing
237
+
238
+ Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
239
+
240
+ ---
241
+
242
+ ## License
243
+
244
+ This project is licensed under the [MIT License](LICENSE) — use it however you want, no strings attached.
package/build/index.js ADDED
@@ -0,0 +1,314 @@
1
+ #!/usr/bin/env node
2
+ import { Server } from "@modelcontextprotocol/sdk/server/index.js";
3
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
4
+ import { CallToolRequestSchema, ListToolsRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
5
+ // ─── Cache ───────────────────────────────────────────────────────────────────
6
+ let cachedModels = [];
7
+ let lastFetchTime = 0;
8
+ const CACHE_DURATION_MS = 5 * 60 * 1000; // 5 minutes
9
+ export function _resetCache() {
10
+ cachedModels = [];
11
+ lastFetchTime = 0;
12
+ }
13
+ export async function getModels(fetchFn = fetchModelsFromAPI) {
14
+ const now = Date.now();
15
+ if (cachedModels.length > 0 && now - lastFetchTime < CACHE_DURATION_MS) {
16
+ return cachedModels;
17
+ }
18
+ try {
19
+ cachedModels = await fetchFn();
20
+ lastFetchTime = now;
21
+ return cachedModels;
22
+ }
23
+ catch (error) {
24
+ if (cachedModels.length > 0) {
25
+ return cachedModels;
26
+ }
27
+ throw new Error(`Error fetching models from OpenRouter: ${error.message}`);
28
+ }
29
+ }
30
+ async function fetchModelsFromAPI() {
31
+ const headers = {
32
+ "Content-Type": "application/json",
33
+ };
34
+ // Use API key if provided (optional — the models endpoint is public)
35
+ const apiKey = process.env.OPENROUTER_API_KEY;
36
+ if (apiKey) {
37
+ headers["Authorization"] = `Bearer ${apiKey}`;
38
+ }
39
+ const response = await fetch("https://openrouter.ai/api/v1/models", {
40
+ headers,
41
+ });
42
+ if (!response.ok) {
43
+ throw new Error(`Failed to fetch models: ${response.status} ${response.statusText}`);
44
+ }
45
+ const data = await response.json();
46
+ return data.data;
47
+ }
48
+ // ─── Helpers ─────────────────────────────────────────────────────────────────
49
+ export function formatCost(costStr) {
50
+ const cost = parseFloat(costStr);
51
+ if (isNaN(cost))
52
+ return costStr;
53
+ if (cost === 0)
54
+ return "FREE";
55
+ return `$${cost.toFixed(8)}`;
56
+ }
57
+ // ─── Tool Handlers ───────────────────────────────────────────────────────────
58
+ export function handleGetModelPricing(models, args) {
59
+ const modelId = String(args.model_id ?? "");
60
+ const model = models.find((m) => m.id === modelId);
61
+ if (!model) {
62
+ // Attempt fuzzy match
63
+ const fuzzy = models.filter((m) => m.id.toLowerCase().includes(modelId.toLowerCase()));
64
+ if (fuzzy.length > 0) {
65
+ const suggestions = fuzzy.slice(0, 5).map((m) => m.id).join("\n ");
66
+ return {
67
+ content: [
68
+ {
69
+ type: "text",
70
+ text: `Model "${modelId}" not found. Did you mean one of these?\n ${suggestions}`,
71
+ },
72
+ ],
73
+ isError: true,
74
+ };
75
+ }
76
+ return {
77
+ content: [{ type: "text", text: `Model "${modelId}" not found.` }],
78
+ isError: true,
79
+ };
80
+ }
81
+ return {
82
+ content: [
83
+ {
84
+ type: "text",
85
+ text: [
86
+ `Model: ${model.name} (${model.id})`,
87
+ `Context Length: ${model.context_length?.toLocaleString() ?? "N/A"} tokens`,
88
+ `Prompt Cost: ${formatCost(model.pricing.prompt)} / token`,
89
+ `Completion Cost: ${formatCost(model.pricing.completion)} / token`,
90
+ `Image Cost: ${formatCost(model.pricing.image)} / token`,
91
+ `Request Cost: ${formatCost(model.pricing.request)}`,
92
+ ].join("\n"),
93
+ },
94
+ ],
95
+ };
96
+ }
97
+ export function handleListAllModelsPricing(models, args) {
98
+ const limit = Math.min(Math.max(Number(args.limit) || 50, 1), 200);
99
+ const results = models.slice(0, limit);
100
+ const formatted = results
101
+ .map((m, i) => `${i + 1}. ${m.id} — Prompt: ${formatCost(m.pricing.prompt)}, Completion: ${formatCost(m.pricing.completion)}`)
102
+ .join("\n");
103
+ return {
104
+ content: [
105
+ {
106
+ type: "text",
107
+ text: `Showing ${results.length} of ${models.length} models:\n\n${formatted}`,
108
+ },
109
+ ],
110
+ };
111
+ }
112
+ export function handleCompareModelCosts(models, args) {
113
+ const modelIds = args.model_ids;
114
+ if (!Array.isArray(modelIds) || modelIds.length === 0) {
115
+ return {
116
+ content: [{ type: "text", text: "model_ids must be a non-empty array of strings." }],
117
+ isError: true,
118
+ };
119
+ }
120
+ const found = [];
121
+ const notFound = [];
122
+ for (const id of modelIds) {
123
+ const m = models.find((model) => model.id === id);
124
+ if (m)
125
+ found.push(m);
126
+ else
127
+ notFound.push(id);
128
+ }
129
+ if (found.length === 0) {
130
+ return {
131
+ content: [{ type: "text", text: `None of the specified models were found: ${notFound.join(", ")}` }],
132
+ isError: true,
133
+ };
134
+ }
135
+ const header = `| Model | Prompt Cost | Completion Cost | Context Length |`;
136
+ const separator = `|-------|------------|----------------|----------------|`;
137
+ const rows = found.map((m) => `| ${m.id} | ${formatCost(m.pricing.prompt)} | ${formatCost(m.pricing.completion)} | ${m.context_length?.toLocaleString() ?? "N/A"} |`);
138
+ let text = `${header}\n${separator}\n${rows.join("\n")}`;
139
+ if (notFound.length > 0) {
140
+ text += `\n\n⚠️ Models not found: ${notFound.join(", ")}`;
141
+ }
142
+ return {
143
+ content: [{ type: "text", text }],
144
+ };
145
+ }
146
+ export function handleGetCheapestModels(models, args) {
147
+ const limit = Math.min(Math.max(Number(args.limit) || 10, 1), 100);
148
+ const metric = String(args.metric) === "completion" ? "completion" : "prompt";
149
+ const sorted = [...models]
150
+ .filter((m) => {
151
+ const val = parseFloat(m.pricing[metric]);
152
+ return !isNaN(val) && val >= 0;
153
+ })
154
+ .sort((a, b) => parseFloat(a.pricing[metric]) - parseFloat(b.pricing[metric]));
155
+ const results = sorted.slice(0, limit);
156
+ const formatted = results
157
+ .map((m, i) => `${i + 1}. ${m.id} — ${metric}: ${formatCost(m.pricing[metric])}${parseFloat(m.pricing[metric]) === 0 ? " 🆓" : ""}`)
158
+ .join("\n");
159
+ return {
160
+ content: [
161
+ {
162
+ type: "text",
163
+ text: `Top ${results.length} cheapest models by ${metric} cost:\n\n${formatted}`,
164
+ },
165
+ ],
166
+ };
167
+ }
168
+ export function handleFindModelsByContextLength(models, args) {
169
+ const minContext = Number(args.min_context_length);
170
+ if (!minContext || minContext <= 0) {
171
+ return {
172
+ content: [{ type: "text", text: "min_context_length must be a positive number." }],
173
+ isError: true,
174
+ };
175
+ }
176
+ const limit = Math.min(Math.max(Number(args.limit) || 20, 1), 100);
177
+ const matching = models
178
+ .filter((m) => m.context_length >= minContext)
179
+ .sort((a, b) => b.context_length - a.context_length);
180
+ const results = matching.slice(0, limit);
181
+ const formatted = results
182
+ .map((m, i) => `${i + 1}. ${m.id} — Context: ${m.context_length.toLocaleString()} tokens, Prompt: ${formatCost(m.pricing.prompt)}`)
183
+ .join("\n");
184
+ return {
185
+ content: [
186
+ {
187
+ type: "text",
188
+ text: `Found ${matching.length} models supporting >= ${minContext.toLocaleString()} tokens (showing top ${results.length}):\n\n${formatted}`,
189
+ },
190
+ ],
191
+ };
192
+ }
193
+ // ─── Tool Definitions ────────────────────────────────────────────────────────
194
+ export const TOOL_DEFINITIONS = [
195
+ {
196
+ name: "get_model_pricing",
197
+ description: "Get pricing details for a specific OpenRouter model by its full ID (e.g. google/gemini-2.5-pro-preview). Includes prompt, completion, image, and request costs.",
198
+ inputSchema: {
199
+ type: "object",
200
+ properties: {
201
+ model_id: {
202
+ type: "string",
203
+ description: "The full ID of the model (e.g. openai/gpt-4o, anthropic/claude-sonnet-4)",
204
+ },
205
+ },
206
+ required: ["model_id"],
207
+ },
208
+ },
209
+ {
210
+ name: "list_all_models_pricing",
211
+ description: "List all OpenRouter models and their pricing. Useful to discover available models and browse prices.",
212
+ inputSchema: {
213
+ type: "object",
214
+ properties: {
215
+ limit: {
216
+ type: "number",
217
+ description: "Limit the number of results returned (default: 50, max: 200)",
218
+ },
219
+ },
220
+ required: [],
221
+ },
222
+ },
223
+ {
224
+ name: "compare_model_costs",
225
+ description: "Compare costs between multiple OpenRouter models side-by-side in a table format.",
226
+ inputSchema: {
227
+ type: "object",
228
+ properties: {
229
+ model_ids: {
230
+ type: "array",
231
+ items: { type: "string" },
232
+ description: "Array of model IDs to compare (e.g. [\"openai/gpt-4o\", \"anthropic/claude-sonnet-4\"])",
233
+ },
234
+ },
235
+ required: ["model_ids"],
236
+ },
237
+ },
238
+ {
239
+ name: "get_cheapest_models",
240
+ description: "Find the most cost-effective OpenRouter models, sorted by price. Free models are flagged with 🆓.",
241
+ inputSchema: {
242
+ type: "object",
243
+ properties: {
244
+ limit: {
245
+ type: "number",
246
+ description: "Number of results to return (default: 10, max: 100)",
247
+ },
248
+ metric: {
249
+ type: "string",
250
+ description: "Which metric to sort by: 'prompt' or 'completion' (default: 'prompt')",
251
+ enum: ["prompt", "completion"],
252
+ },
253
+ },
254
+ required: [],
255
+ },
256
+ },
257
+ {
258
+ name: "find_models_by_context_length",
259
+ description: "Find models that support at least a specific context window size (e.g., models with >= 128000 tokens).",
260
+ inputSchema: {
261
+ type: "object",
262
+ properties: {
263
+ min_context_length: {
264
+ type: "number",
265
+ description: "Minimum context length required in tokens",
266
+ },
267
+ limit: {
268
+ type: "number",
269
+ description: "Number of results (default: 20, max: 100)",
270
+ },
271
+ },
272
+ required: ["min_context_length"],
273
+ },
274
+ },
275
+ ];
276
+ // ─── Server Setup ────────────────────────────────────────────────────────────
277
+ const HANDLER_MAP = {
278
+ get_model_pricing: handleGetModelPricing,
279
+ list_all_models_pricing: handleListAllModelsPricing,
280
+ compare_model_costs: handleCompareModelCosts,
281
+ get_cheapest_models: handleGetCheapestModels,
282
+ find_models_by_context_length: handleFindModelsByContextLength,
283
+ };
284
+ function createServer() {
285
+ const server = new Server({ name: "openrouter-pricing-mcp", version: "1.0.0" }, { capabilities: { tools: {} } });
286
+ server.setRequestHandler(ListToolsRequestSchema, async () => ({
287
+ tools: TOOL_DEFINITIONS,
288
+ }));
289
+ server.setRequestHandler(CallToolRequestSchema, async (request) => {
290
+ const { name, arguments: args } = request.params;
291
+ const handler = HANDLER_MAP[name];
292
+ if (!handler) {
293
+ throw new Error(`Unknown tool: ${name}`);
294
+ }
295
+ const models = await getModels();
296
+ return handler(models, (args ?? {}));
297
+ });
298
+ return server;
299
+ }
300
+ async function main() {
301
+ const server = createServer();
302
+ const transport = new StdioServerTransport();
303
+ await server.connect(transport);
304
+ console.error("OpenRouter MCP Server running on stdio");
305
+ }
306
+ // Only start the server when this file is executed directly (not imported by tests)
307
+ const isDirectRun = process.argv[1] &&
308
+ import.meta.url.endsWith(process.argv[1].replace(/\\/g, "/"));
309
+ if (isDirectRun) {
310
+ main().catch((error) => {
311
+ console.error("Fatal error in main():", error);
312
+ process.exit(1);
313
+ });
314
+ }
@@ -0,0 +1,193 @@
1
+ import { describe, it } from "node:test";
2
+ import assert from "node:assert/strict";
3
+ import { formatCost, handleGetModelPricing, handleListAllModelsPricing, handleCompareModelCosts, handleGetCheapestModels, handleFindModelsByContextLength, TOOL_DEFINITIONS, } from "./index.js";
4
+ // ─── Fixture Data ────────────────────────────────────────────────────────────
5
+ const MOCK_MODELS = [
6
+ {
7
+ id: "openai/gpt-4o",
8
+ name: "GPT-4o",
9
+ pricing: { prompt: "0.0000025", completion: "0.00001", request: "0", image: "0.003613" },
10
+ context_length: 128000,
11
+ },
12
+ {
13
+ id: "anthropic/claude-sonnet-4",
14
+ name: "Claude Sonnet 4",
15
+ pricing: { prompt: "0.000003", completion: "0.000015", request: "0", image: "0.0048" },
16
+ context_length: 200000,
17
+ },
18
+ {
19
+ id: "google/gemini-2.5-pro-preview",
20
+ name: "Gemini 2.5 Pro Preview",
21
+ pricing: { prompt: "0.00000125", completion: "0.00001", request: "0", image: "0.000265" },
22
+ context_length: 1048576,
23
+ },
24
+ {
25
+ id: "meta-llama/llama-3-8b-instruct:free",
26
+ name: "Llama 3 8B Instruct (Free)",
27
+ pricing: { prompt: "0", completion: "0", request: "0", image: "0" },
28
+ context_length: 8192,
29
+ },
30
+ {
31
+ id: "mistralai/mistral-small",
32
+ name: "Mistral Small",
33
+ pricing: { prompt: "0.0000001", completion: "0.0000003", request: "0", image: "0" },
34
+ context_length: 32000,
35
+ },
36
+ ];
37
+ // ─── formatCost ──────────────────────────────────────────────────────────────
38
+ describe("formatCost", () => {
39
+ it("formats a normal cost", () => {
40
+ assert.equal(formatCost("0.000003"), "$0.00000300");
41
+ });
42
+ it("returns FREE for zero-cost", () => {
43
+ assert.equal(formatCost("0"), "FREE");
44
+ });
45
+ it("handles NaN gracefully by returning original string", () => {
46
+ assert.equal(formatCost("N/A"), "N/A");
47
+ });
48
+ it("handles very small costs accurately", () => {
49
+ assert.equal(formatCost("0.0000001"), "$0.00000010");
50
+ });
51
+ });
52
+ // ─── get_model_pricing ──────────────────────────────────────────────────────
53
+ describe("handleGetModelPricing", () => {
54
+ it("returns pricing for a known model", () => {
55
+ const result = handleGetModelPricing(MOCK_MODELS, { model_id: "openai/gpt-4o" });
56
+ assert.equal(result.isError, undefined);
57
+ const text = result.content[0].text;
58
+ assert.ok(text.includes("GPT-4o"));
59
+ assert.ok(text.includes("openai/gpt-4o"));
60
+ assert.ok(text.includes("128,000"));
61
+ });
62
+ it("returns fuzzy suggestions for a partial match", () => {
63
+ const result = handleGetModelPricing(MOCK_MODELS, { model_id: "gpt-4" });
64
+ assert.equal(result.isError, true);
65
+ const text = result.content[0].text;
66
+ assert.ok(text.includes("Did you mean"));
67
+ assert.ok(text.includes("openai/gpt-4o"));
68
+ });
69
+ it("returns a clean error for a completely unknown model", () => {
70
+ const result = handleGetModelPricing(MOCK_MODELS, { model_id: "nonexistent/model-xyz-999" });
71
+ assert.equal(result.isError, true);
72
+ assert.ok(result.content[0].text.includes("not found"));
73
+ });
74
+ });
75
+ // ─── list_all_models_pricing ────────────────────────────────────────────────
76
+ describe("handleListAllModelsPricing", () => {
77
+ it("returns a list limited to specified count", () => {
78
+ const result = handleListAllModelsPricing(MOCK_MODELS, { limit: 2 });
79
+ const text = result.content[0].text;
80
+ assert.ok(text.includes("Showing 2 of 5 models"));
81
+ assert.ok(text.includes("openai/gpt-4o"));
82
+ assert.ok(text.includes("anthropic/claude-sonnet-4"));
83
+ // Should NOT include the third model
84
+ assert.ok(!text.includes("google/gemini"));
85
+ });
86
+ it("defaults to 50 when no limit is provided", () => {
87
+ const result = handleListAllModelsPricing(MOCK_MODELS, {});
88
+ const text = result.content[0].text;
89
+ // All 5 models should be shown since 5 < 50
90
+ assert.ok(text.includes("Showing 5 of 5 models"));
91
+ });
92
+ it("clamps limit to max 200", () => {
93
+ const result = handleListAllModelsPricing(MOCK_MODELS, { limit: 999 });
94
+ const text = result.content[0].text;
95
+ assert.ok(text.includes("Showing 5 of 5 models"));
96
+ });
97
+ });
98
+ // ─── compare_model_costs ────────────────────────────────────────────────────
99
+ describe("handleCompareModelCosts", () => {
100
+ it("compares two models in a table format", () => {
101
+ const result = handleCompareModelCosts(MOCK_MODELS, {
102
+ model_ids: ["openai/gpt-4o", "anthropic/claude-sonnet-4"],
103
+ });
104
+ const text = result.content[0].text;
105
+ assert.ok(text.includes("openai/gpt-4o"));
106
+ assert.ok(text.includes("anthropic/claude-sonnet-4"));
107
+ assert.ok(text.includes("|")); // table format
108
+ });
109
+ it("reports not-found models alongside found ones", () => {
110
+ const result = handleCompareModelCosts(MOCK_MODELS, {
111
+ model_ids: ["openai/gpt-4o", "fake/model"],
112
+ });
113
+ const text = result.content[0].text;
114
+ assert.ok(text.includes("openai/gpt-4o"));
115
+ assert.ok(text.includes("fake/model"));
116
+ assert.ok(text.includes("⚠️"));
117
+ });
118
+ it("returns an error if no models are found", () => {
119
+ const result = handleCompareModelCosts(MOCK_MODELS, {
120
+ model_ids: ["fake/a", "fake/b"],
121
+ });
122
+ assert.equal(result.isError, true);
123
+ });
124
+ it("returns an error for invalid input", () => {
125
+ const result = handleCompareModelCosts(MOCK_MODELS, { model_ids: "not-an-array" });
126
+ assert.equal(result.isError, true);
127
+ });
128
+ });
129
+ // ─── get_cheapest_models ────────────────────────────────────────────────────
130
+ describe("handleGetCheapestModels", () => {
131
+ it("sorts models by prompt cost ascending", () => {
132
+ const result = handleGetCheapestModels(MOCK_MODELS, { limit: 3, metric: "prompt" });
133
+ const text = result.content[0].text;
134
+ const lines = text.split("\n").filter((l) => l.match(/^\d+\./));
135
+ // First should be the free model (cost 0)
136
+ assert.ok(lines[0].includes("llama-3-8b"));
137
+ assert.ok(lines[0].includes("🆓"));
138
+ });
139
+ it("sorts by completion cost if specified", () => {
140
+ const result = handleGetCheapestModels(MOCK_MODELS, { metric: "completion" });
141
+ const text = result.content[0].text;
142
+ assert.ok(text.includes("completion"));
143
+ });
144
+ it("defaults to prompt and limit 10", () => {
145
+ const result = handleGetCheapestModels(MOCK_MODELS, {});
146
+ const text = result.content[0].text;
147
+ assert.ok(text.includes("prompt cost"));
148
+ });
149
+ });
150
+ // ─── find_models_by_context_length ──────────────────────────────────────────
151
+ describe("handleFindModelsByContextLength", () => {
152
+ it("filters models by min context length", () => {
153
+ const result = handleFindModelsByContextLength(MOCK_MODELS, { min_context_length: 100000 });
154
+ const text = result.content[0].text;
155
+ // Only GPT-4o (128k), Claude (200k), and Gemini (1M) should match
156
+ assert.ok(text.includes("3 models"));
157
+ assert.ok(text.includes("google/gemini")); // should be first (largest context)
158
+ });
159
+ it("returns an error for invalid input", () => {
160
+ const result = handleFindModelsByContextLength(MOCK_MODELS, { min_context_length: -5 });
161
+ assert.equal(result.isError, true);
162
+ });
163
+ it("includes pricing in the output", () => {
164
+ const result = handleFindModelsByContextLength(MOCK_MODELS, { min_context_length: 128000 });
165
+ const text = result.content[0].text;
166
+ assert.ok(text.includes("Prompt:"));
167
+ });
168
+ });
169
+ // ─── TOOL_DEFINITIONS ───────────────────────────────────────────────────────
170
+ describe("TOOL_DEFINITIONS", () => {
171
+ it("exports exactly 5 tools", () => {
172
+ assert.equal(TOOL_DEFINITIONS.length, 5);
173
+ });
174
+ it("all tools have a name, description, and inputSchema", () => {
175
+ for (const tool of TOOL_DEFINITIONS) {
176
+ assert.ok(tool.name, "Tool must have a name");
177
+ assert.ok(tool.description, "Tool must have a description");
178
+ assert.ok(tool.inputSchema, "Tool must have an inputSchema");
179
+ assert.equal(tool.inputSchema.type, "object");
180
+ }
181
+ });
182
+ it("tool names match known handler names", () => {
183
+ const knownNames = [
184
+ "get_model_pricing",
185
+ "list_all_models_pricing",
186
+ "compare_model_costs",
187
+ "get_cheapest_models",
188
+ "find_models_by_context_length",
189
+ ];
190
+ const actualNames = TOOL_DEFINITIONS.map((t) => t.name);
191
+ assert.deepEqual(actualNames, knownNames);
192
+ });
193
+ });
package/package.json ADDED
@@ -0,0 +1,50 @@
1
+ {
2
+ "name": "openrouter-pricing-mcp",
3
+ "version": "1.0.0",
4
+ "description": "An MCP server providing live model pricing data from OpenRouter. Query, compare, and discover the cheapest AI models.",
5
+ "type": "module",
6
+ "main": "build/index.js",
7
+ "bin": {
8
+ "openrouter-pricing-mcp": "build/index.js"
9
+ },
10
+ "files": [
11
+ "build",
12
+ "README.md",
13
+ "LICENSE"
14
+ ],
15
+ "scripts": {
16
+ "build": "tsc",
17
+ "start": "node build/index.js",
18
+ "dev": "tsc --watch",
19
+ "test": "npm run build && node --test build/index.test.js",
20
+ "prepublishOnly": "npm run build && npm test"
21
+ },
22
+ "engines": {
23
+ "node": ">=18.0.0"
24
+ },
25
+ "keywords": [
26
+ "mcp",
27
+ "model-context-protocol",
28
+ "openrouter",
29
+ "ai",
30
+ "pricing",
31
+ "llm",
32
+ "claude",
33
+ "gpt",
34
+ "gemini",
35
+ "antigravity"
36
+ ],
37
+ "author": "",
38
+ "license": "MIT",
39
+ "repository": {
40
+ "type": "git",
41
+ "url": "https://github.com/Semicolon-D/openrouter-mcp-server.git"
42
+ },
43
+ "dependencies": {
44
+ "@modelcontextprotocol/sdk": "^1.5.0"
45
+ },
46
+ "devDependencies": {
47
+ "@types/node": "^22.0.0",
48
+ "typescript": "^5.0.0"
49
+ }
50
+ }