openrouter-pricing-mcp 1.0.2 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/build/index.js CHANGED
@@ -29,7 +29,6 @@ async function fetchModelsFromAPI() {
29
29
  const headers = {
30
30
  "Content-Type": "application/json",
31
31
  };
32
- // Use API key if provided (optional — the models endpoint is public)
33
32
  const apiKey = process.env.OPENROUTER_API_KEY;
34
33
  if (apiKey) {
35
34
  headers["Authorization"] = `Bearer ${apiKey}`;
@@ -44,20 +43,107 @@ async function fetchModelsFromAPI() {
44
43
  return data.data;
45
44
  }
46
45
  // ─── Helpers ─────────────────────────────────────────────────────────────────
47
- export function formatCost(costStr) {
46
+ const currencyFormat = new Intl.NumberFormat("en-US", {
47
+ style: "currency",
48
+ currency: "USD",
49
+ minimumFractionDigits: 2,
50
+ maximumFractionDigits: 4,
51
+ });
52
+ export function formatCostPer1M(costStr) {
48
53
  const cost = parseFloat(costStr);
49
54
  if (isNaN(cost))
50
55
  return costStr;
51
56
  if (cost === 0)
52
57
  return "FREE";
53
- return `$${cost.toFixed(8)}`;
58
+ return currencyFormat.format(cost * 1_000_000);
59
+ }
60
+ export function formatCostUnit(costStr) {
61
+ const cost = parseFloat(costStr);
62
+ if (isNaN(cost))
63
+ return costStr;
64
+ if (cost === 0)
65
+ return "FREE";
66
+ return currencyFormat.format(cost);
54
67
  }
55
68
  // ─── Tool Handlers ───────────────────────────────────────────────────────────
69
+ export function handleGetModelCapabilities(models, args) {
70
+ const modelId = String(args.model_id ?? "");
71
+ const model = models.find((m) => m.id === modelId);
72
+ if (!model) {
73
+ const fuzzy = models.filter((m) => m.id.toLowerCase().includes(modelId.toLowerCase()));
74
+ if (fuzzy.length > 0) {
75
+ const suggestions = fuzzy.slice(0, 5).map((m) => m.id).join("\n ");
76
+ return {
77
+ content: [
78
+ {
79
+ type: "text",
80
+ text: `Model "${modelId}" not found. Did you mean one of these?\n ${suggestions}`,
81
+ },
82
+ ],
83
+ isError: true,
84
+ };
85
+ }
86
+ return {
87
+ content: [{ type: "text", text: `Model "${modelId}" not found.` }],
88
+ isError: true,
89
+ };
90
+ }
91
+ const inputModalities = model.architecture?.input_modalities ?? [];
92
+ const outputModalities = model.architecture?.output_modalities ?? [];
93
+ const supportedParams = model.supported_parameters ?? [];
94
+ const maxCompletionTokens = model.top_provider?.max_completion_tokens;
95
+ const isModerated = model.top_provider?.is_moderated;
96
+ const tokenizer = model.architecture?.tokenizer;
97
+ const knowledgeCutoff = model.knowledge_cutoff;
98
+ // Derive capability flags from supported_parameters
99
+ const hasToolUse = supportedParams.includes("tools") || supportedParams.includes("tool_choice");
100
+ const hasReasoning = supportedParams.includes("reasoning") || supportedParams.includes("include_reasoning");
101
+ const hasStructuredOutput = supportedParams.includes("structured_outputs") || supportedParams.includes("response_format");
102
+ const hasVision = inputModalities.includes("image");
103
+ const hasAudioInput = inputModalities.includes("audio");
104
+ const hasAudioOutput = outputModalities.includes("audio");
105
+ const hasImageOutput = outputModalities.includes("image");
106
+ const lines = [
107
+ `Model: ${model.name} (${model.id})`,
108
+ ];
109
+ if (model.description) {
110
+ lines.push(``, `── Description ──`, model.description);
111
+ }
112
+ lines.push(``, `── Pricing ──`, `Prompt: ${formatCostPer1M(model.pricing.prompt)} / 1M tokens`, `Completion: ${formatCostPer1M(model.pricing.completion)} / 1M tokens`, model.pricing.image ? `Image: ${formatCostUnit(model.pricing.image)} / image` : "", model.pricing.request ? `Request: ${formatCostUnit(model.pricing.request)} / request` : "", ``, `── Context & Limits ──`, `Context Length: ${model.context_length?.toLocaleString() ?? "N/A"} tokens`);
113
+ if (maxCompletionTokens != null) {
114
+ lines.push(`Max Completion Tokens: ${maxCompletionTokens.toLocaleString()}`);
115
+ }
116
+ lines.push(``, `── Modalities ──`, `Input: ${inputModalities.length > 0 ? inputModalities.join(", ") : "text"}`, `Output: ${outputModalities.length > 0 ? outputModalities.join(", ") : "text"}`, ``, `── Capabilities ──`, `🔧 Tool Use (Function Calling): ${hasToolUse ? "✅ Yes" : "❌ No"}`, `🧠 Reasoning / Thinking: ${hasReasoning ? "✅ Yes" : "❌ No"}`, `📋 Structured Output (JSON): ${hasStructuredOutput ? "✅ Yes" : "❌ No"}`, `👁️ Vision (Image Input): ${hasVision ? "✅ Yes" : "❌ No"}`, `🎤 Audio Input: ${hasAudioInput ? "✅ Yes" : "❌ No"}`, `🔊 Audio Output: ${hasAudioOutput ? "✅ Yes" : "❌ No"}`, `🎨 Image Generation: ${hasImageOutput ? "✅ Yes" : "❌ No"}`);
117
+ if (tokenizer) {
118
+ lines.push(``, `── Technical Details ──`);
119
+ lines.push(`Tokenizer: ${tokenizer}`);
120
+ }
121
+ if (isModerated != null) {
122
+ if (!tokenizer)
123
+ lines.push(``, `── Technical Details ──`);
124
+ lines.push(`Content Moderated: ${isModerated ? "Yes" : "No"}`);
125
+ }
126
+ if (knowledgeCutoff) {
127
+ lines.push(`Knowledge Cutoff: ${knowledgeCutoff}`);
128
+ }
129
+ if (supportedParams.length > 0) {
130
+ lines.push(``, `── Supported Parameters ──`, supportedParams.join(", "));
131
+ }
132
+ // Filter out empty strings from conditional pushes
133
+ const filteredLines = lines.filter((l, i) => !(l === "" && i > 0 && lines[i - 1] === ""));
134
+ return {
135
+ content: [
136
+ {
137
+ type: "text",
138
+ text: filteredLines.join("\n"),
139
+ },
140
+ ],
141
+ };
142
+ }
56
143
  export function handleGetModelPricing(models, args) {
57
144
  const modelId = String(args.model_id ?? "");
58
145
  const model = models.find((m) => m.id === modelId);
59
146
  if (!model) {
60
- // Attempt fuzzy match
61
147
  const fuzzy = models.filter((m) => m.id.toLowerCase().includes(modelId.toLowerCase()));
62
148
  if (fuzzy.length > 0) {
63
149
  const suggestions = fuzzy.slice(0, 5).map((m) => m.id).join("\n ");
@@ -76,18 +162,21 @@ export function handleGetModelPricing(models, args) {
76
162
  isError: true,
77
163
  };
78
164
  }
165
+ const pricingLines = [
166
+ `Model: ${model.name} (${model.id})`,
167
+ ];
168
+ if (model.description) {
169
+ pricingLines.push(``, model.description);
170
+ }
171
+ pricingLines.push(``, `Context Length: ${model.context_length?.toLocaleString() ?? "N/A"} tokens`, `Prompt Cost: ${formatCostPer1M(model.pricing.prompt)} / 1M tokens`, `Completion Cost: ${formatCostPer1M(model.pricing.completion)} / 1M tokens`, `Image Cost: ${formatCostUnit(model.pricing.image ?? "0")} / image`, `Request Cost: ${formatCostUnit(model.pricing.request ?? "0")} / request`);
172
+ if (model.top_provider?.max_completion_tokens) {
173
+ pricingLines.push(`Max Completion Tokens: ${model.top_provider.max_completion_tokens.toLocaleString()}`);
174
+ }
79
175
  return {
80
176
  content: [
81
177
  {
82
178
  type: "text",
83
- text: [
84
- `Model: ${model.name} (${model.id})`,
85
- `Context Length: ${model.context_length?.toLocaleString() ?? "N/A"} tokens`,
86
- `Prompt Cost: ${formatCost(model.pricing.prompt)} / token`,
87
- `Completion Cost: ${formatCost(model.pricing.completion)} / token`,
88
- `Image Cost: ${formatCost(model.pricing.image)} / token`,
89
- `Request Cost: ${formatCost(model.pricing.request)}`,
90
- ].join("\n"),
179
+ text: pricingLines.join("\n"),
91
180
  },
92
181
  ],
93
182
  };
@@ -96,7 +185,7 @@ export function handleListAllModelsPricing(models, args) {
96
185
  const limit = Math.min(Math.max(Number(args.limit) || 50, 1), 200);
97
186
  const results = models.slice(0, limit);
98
187
  const formatted = results
99
- .map((m, i) => `${i + 1}. ${m.id} — Prompt: ${formatCost(m.pricing.prompt)}, Completion: ${formatCost(m.pricing.completion)}`)
188
+ .map((m, i) => `${i + 1}. ${m.id} — Prompt: ${formatCostPer1M(m.pricing.prompt)}/1M, Completion: ${formatCostPer1M(m.pricing.completion)}/1M`)
100
189
  .join("\n");
101
190
  return {
102
191
  content: [
@@ -130,9 +219,23 @@ export function handleCompareModelCosts(models, args) {
130
219
  isError: true,
131
220
  };
132
221
  }
133
- const header = `| Model | Prompt Cost | Completion Cost | Context Length |`;
134
- const separator = `|-------|------------|----------------|----------------|`;
135
- const rows = found.map((m) => `| ${m.id} | ${formatCost(m.pricing.prompt)} | ${formatCost(m.pricing.completion)} | ${m.context_length?.toLocaleString() ?? "N/A"} |`);
222
+ // Helper to derive capability flags
223
+ function getCapFlags(m) {
224
+ const params = m.supported_parameters ?? [];
225
+ const inp = m.architecture?.input_modalities ?? [];
226
+ return {
227
+ tools: params.includes("tools") || params.includes("tool_choice") ? "✅" : "❌",
228
+ reasoning: params.includes("reasoning") || params.includes("include_reasoning") ? "✅" : "❌",
229
+ vision: inp.includes("image") ? "✅" : "❌",
230
+ json: params.includes("structured_outputs") || params.includes("response_format") ? "✅" : "❌",
231
+ };
232
+ }
233
+ const header = `| Model | Prompt/1M | Completion/1M | Context | Tools | Reasoning | Vision | JSON |`;
234
+ const separator = `|-------|----------|--------------|---------|-------|-----------|--------|------|`;
235
+ const rows = found.map((m) => {
236
+ const caps = getCapFlags(m);
237
+ return `| ${m.id} | ${formatCostPer1M(m.pricing.prompt)} | ${formatCostPer1M(m.pricing.completion)} | ${m.context_length?.toLocaleString() ?? "N/A"} | ${caps.tools} | ${caps.reasoning} | ${caps.vision} | ${caps.json} |`;
238
+ });
136
239
  let text = `${header}\n${separator}\n${rows.join("\n")}`;
137
240
  if (notFound.length > 0) {
138
241
  text += `\n\n⚠️ Models not found: ${notFound.join(", ")}`;
@@ -152,13 +255,13 @@ export function handleGetCheapestModels(models, args) {
152
255
  .sort((a, b) => parseFloat(a.pricing[metric]) - parseFloat(b.pricing[metric]));
153
256
  const results = sorted.slice(0, limit);
154
257
  const formatted = results
155
- .map((m, i) => `${i + 1}. ${m.id} — ${metric}: ${formatCost(m.pricing[metric])}${parseFloat(m.pricing[metric]) === 0 ? " 🆓" : ""}`)
258
+ .map((m, i) => `${i + 1}. ${m.id} — ${metric}: ${formatCostPer1M(m.pricing[metric])}/1M${parseFloat(m.pricing[metric]) === 0 ? " 🆓" : ""}`)
156
259
  .join("\n");
157
260
  return {
158
261
  content: [
159
262
  {
160
263
  type: "text",
161
- text: `Top ${results.length} cheapest models by ${metric} cost:\n\n${formatted}`,
264
+ text: `Top ${results.length} cheapest models by ${metric} cost (per 1M tokens):\n\n${formatted}`,
162
265
  },
163
266
  ],
164
267
  };
@@ -177,7 +280,7 @@ export function handleFindModelsByContextLength(models, args) {
177
280
  .sort((a, b) => b.context_length - a.context_length);
178
281
  const results = matching.slice(0, limit);
179
282
  const formatted = results
180
- .map((m, i) => `${i + 1}. ${m.id} — Context: ${m.context_length.toLocaleString()} tokens, Prompt: ${formatCost(m.pricing.prompt)}`)
283
+ .map((m, i) => `${i + 1}. ${m.id} — Context: ${m.context_length.toLocaleString()} tokens, Prompt: ${formatCostPer1M(m.pricing.prompt)}/1M`)
181
284
  .join("\n");
182
285
  return {
183
286
  content: [
@@ -192,7 +295,7 @@ export function handleFindModelsByContextLength(models, args) {
192
295
  export const TOOL_DEFINITIONS = [
193
296
  {
194
297
  name: "get_model_pricing",
195
- description: "Get pricing details for a specific OpenRouter model by its full ID (e.g. google/gemini-2.5-pro-preview). Includes prompt, completion, image, and request costs.",
298
+ description: "Get pricing details and description for a specific OpenRouter model by its full ID (e.g. google/gemini-2.5-pro-preview). Includes prompt, completion, image, and request costs formatted per 1M tokens, plus model description and max output length.",
196
299
  inputSchema: {
197
300
  type: "object",
198
301
  properties: {
@@ -220,7 +323,7 @@ export const TOOL_DEFINITIONS = [
220
323
  },
221
324
  {
222
325
  name: "compare_model_costs",
223
- description: "Compare costs between multiple OpenRouter models side-by-side in a table format.",
326
+ description: "Compare costs and capabilities between multiple OpenRouter models side-by-side in a table format. Shows pricing, context length, and key capability flags (tools, reasoning, vision, JSON output).",
224
327
  inputSchema: {
225
328
  type: "object",
226
329
  properties: {
@@ -270,6 +373,20 @@ export const TOOL_DEFINITIONS = [
270
373
  required: ["min_context_length"],
271
374
  },
272
375
  },
376
+ {
377
+ name: "get_model_capabilities",
378
+ description: "Get a comprehensive model card for a specific OpenRouter model. Includes description, pricing, context limits, input/output modalities, capability flags (tool use, reasoning, vision, structured output, audio), technical details, and all supported parameters.",
379
+ inputSchema: {
380
+ type: "object",
381
+ properties: {
382
+ model_id: {
383
+ type: "string",
384
+ description: "The full ID of the model (e.g. openai/gpt-4o, anthropic/claude-sonnet-4)",
385
+ },
386
+ },
387
+ required: ["model_id"],
388
+ },
389
+ },
273
390
  ];
274
391
  // ─── Server Setup ────────────────────────────────────────────────────────────
275
392
  const HANDLER_MAP = {
@@ -278,9 +395,10 @@ const HANDLER_MAP = {
278
395
  compare_model_costs: handleCompareModelCosts,
279
396
  get_cheapest_models: handleGetCheapestModels,
280
397
  find_models_by_context_length: handleFindModelsByContextLength,
398
+ get_model_capabilities: handleGetModelCapabilities,
281
399
  };
282
400
  export function createServer() {
283
- const server = new Server({ name: "openrouter-pricing-mcp", version: "1.0.0" }, { capabilities: { tools: {} } });
401
+ const server = new Server({ name: "openrouter-pricing-mcp", version: "1.2.0" }, { capabilities: { tools: {} } });
284
402
  server.setRequestHandler(ListToolsRequestSchema, async () => ({
285
403
  tools: TOOL_DEFINITIONS,
286
404
  }));
@@ -1,19 +1,45 @@
1
1
  import { describe, it } from "node:test";
2
2
  import assert from "node:assert/strict";
3
- import { formatCost, handleGetModelPricing, handleListAllModelsPricing, handleCompareModelCosts, handleGetCheapestModels, handleFindModelsByContextLength, TOOL_DEFINITIONS, } from "./index.js";
3
+ import { formatCostPer1M, formatCostUnit, handleGetModelPricing, handleListAllModelsPricing, handleCompareModelCosts, handleGetCheapestModels, handleFindModelsByContextLength, handleGetModelCapabilities, TOOL_DEFINITIONS, } from "./index.js";
4
4
  // ─── Fixture Data ────────────────────────────────────────────────────────────
5
5
  const MOCK_MODELS = [
6
6
  {
7
7
  id: "openai/gpt-4o",
8
8
  name: "GPT-4o",
9
+ description: "GPT-4o is OpenAI's versatile flagship model with vision capabilities and strong reasoning.",
9
10
  pricing: { prompt: "0.0000025", completion: "0.00001", request: "0", image: "0.003613" },
10
11
  context_length: 128000,
12
+ architecture: {
13
+ modality: "text+image->text",
14
+ input_modalities: ["text", "image"],
15
+ output_modalities: ["text"],
16
+ tokenizer: "GPT",
17
+ },
18
+ top_provider: {
19
+ context_length: 128000,
20
+ max_completion_tokens: 16384,
21
+ is_moderated: true,
22
+ },
23
+ supported_parameters: ["max_tokens", "temperature", "tools", "tool_choice", "response_format", "structured_outputs"],
11
24
  },
12
25
  {
13
26
  id: "anthropic/claude-sonnet-4",
14
27
  name: "Claude Sonnet 4",
28
+ description: "Claude Sonnet 4 excels in coding and reasoning with improved precision and controllability.",
15
29
  pricing: { prompt: "0.000003", completion: "0.000015", request: "0", image: "0.0048" },
16
30
  context_length: 200000,
31
+ architecture: {
32
+ modality: "text+image->text",
33
+ input_modalities: ["text", "image"],
34
+ output_modalities: ["text"],
35
+ tokenizer: "Claude",
36
+ },
37
+ top_provider: {
38
+ context_length: 200000,
39
+ max_completion_tokens: 128000,
40
+ is_moderated: true,
41
+ },
42
+ supported_parameters: ["include_reasoning", "max_tokens", "reasoning", "response_format", "tools", "tool_choice", "structured_outputs"],
17
43
  },
18
44
  {
19
45
  id: "google/gemini-2.5-pro-preview",
@@ -35,18 +61,25 @@ const MOCK_MODELS = [
35
61
  },
36
62
  ];
37
63
  // ─── formatCost ──────────────────────────────────────────────────────────────
38
- describe("formatCost", () => {
39
- it("formats a normal cost", () => {
40
- assert.equal(formatCost("0.000003"), "$0.00000300");
64
+ describe("formatCostPer1M", () => {
65
+ it("formats a normal cost per 1M tokens properly", () => {
66
+ // 0.0000025 * 1M = 2.50
67
+ assert.equal(formatCostPer1M("0.0000025"), "$2.50");
41
68
  });
42
69
  it("returns FREE for zero-cost", () => {
43
- assert.equal(formatCost("0"), "FREE");
70
+ assert.equal(formatCostPer1M("0"), "FREE");
44
71
  });
45
72
  it("handles NaN gracefully by returning original string", () => {
46
- assert.equal(formatCost("N/A"), "N/A");
73
+ assert.equal(formatCostPer1M("N/A"), "N/A");
47
74
  });
48
75
  it("handles very small costs accurately", () => {
49
- assert.equal(formatCost("0.0000001"), "$0.00000010");
76
+ // 0.0000001 * 1M = 0.10
77
+ assert.equal(formatCostPer1M("0.0000001"), "$0.10");
78
+ });
79
+ });
80
+ describe("formatCostUnit", () => {
81
+ it("formats a per-unit cost nicely", () => {
82
+ assert.equal(formatCostUnit("0.003613"), "$0.0036");
50
83
  });
51
84
  });
52
85
  // ─── get_model_pricing ──────────────────────────────────────────────────────
@@ -58,6 +91,9 @@ describe("handleGetModelPricing", () => {
58
91
  assert.ok(text.includes("GPT-4o"));
59
92
  assert.ok(text.includes("openai/gpt-4o"));
60
93
  assert.ok(text.includes("128,000"));
94
+ assert.ok(text.includes("$2.50 / 1M")); // Checking the 1M formatting
95
+ assert.ok(text.includes("versatile flagship")); // description included
96
+ assert.ok(text.includes("16,384")); // max completion tokens
61
97
  });
62
98
  it("returns fuzzy suggestions for a partial match", () => {
63
99
  const result = handleGetModelPricing(MOCK_MODELS, { model_id: "gpt-4" });
@@ -105,6 +141,12 @@ describe("handleCompareModelCosts", () => {
105
141
  assert.ok(text.includes("openai/gpt-4o"));
106
142
  assert.ok(text.includes("anthropic/claude-sonnet-4"));
107
143
  assert.ok(text.includes("|")); // table format
144
+ // Should include capability columns
145
+ assert.ok(text.includes("Tools"));
146
+ assert.ok(text.includes("Reasoning"));
147
+ assert.ok(text.includes("Vision"));
148
+ assert.ok(text.includes("JSON"));
149
+ assert.ok(text.includes("✅")); // Both have tools
108
150
  });
109
151
  it("reports not-found models alongside found ones", () => {
110
152
  const result = handleCompareModelCosts(MOCK_MODELS, {
@@ -144,8 +186,8 @@ describe("handleGetCheapestModels", () => {
144
186
  it("defaults to prompt and limit 10", () => {
145
187
  const result = handleGetCheapestModels(MOCK_MODELS, {});
146
188
  const text = result.content[0].text;
147
- assert.ok(text.includes("prompt cost"));
148
- });
189
+ assert.ok(text.includes("prompt"));
190
+ }); // Wait, the default falls back to prompt cost, which prints "prompt: $... / 1M" so text includes "prompt"
149
191
  });
150
192
  // ─── find_models_by_context_length ──────────────────────────────────────────
151
193
  describe("handleFindModelsByContextLength", () => {
@@ -166,10 +208,68 @@ describe("handleFindModelsByContextLength", () => {
166
208
  assert.ok(text.includes("Prompt:"));
167
209
  });
168
210
  });
211
+ // ─── get_model_capabilities ─────────────────────────────────────────────────
212
+ describe("handleGetModelCapabilities", () => {
213
+ it("returns capabilities for a known model with full metadata", () => {
214
+ const result = handleGetModelCapabilities(MOCK_MODELS, { model_id: "openai/gpt-4o" });
215
+ assert.equal(result.isError, undefined);
216
+ const text = result.content[0].text;
217
+ assert.ok(text.includes("GPT-4o"));
218
+ // Description section
219
+ assert.ok(text.includes("Description"));
220
+ assert.ok(text.includes("versatile flagship"));
221
+ // Pricing section
222
+ assert.ok(text.includes("Pricing"));
223
+ assert.ok(text.includes("$2.50 / 1M tokens"));
224
+ // Capabilities
225
+ assert.ok(text.includes("Tool Use"));
226
+ assert.ok(text.includes("✅ Yes")); // tool use should be yes
227
+ assert.ok(text.includes("Vision"));
228
+ assert.ok(text.includes("✅ Yes")); // vision should be yes (image input)
229
+ assert.ok(text.includes("Reasoning"));
230
+ assert.ok(text.includes("❌ No")); // GPT-4o doesn't have reasoning
231
+ assert.ok(text.includes("Tokenizer: GPT"));
232
+ assert.ok(text.includes("16,384")); // max completion tokens
233
+ assert.ok(text.includes("Content Moderated: Yes"));
234
+ });
235
+ it("returns capabilities for Claude with reasoning support", () => {
236
+ const result = handleGetModelCapabilities(MOCK_MODELS, { model_id: "anthropic/claude-sonnet-4" });
237
+ const text = result.content[0].text;
238
+ assert.ok(text.includes("Reasoning"));
239
+ assert.ok(text.includes("✅ Yes")); // Claude has reasoning
240
+ assert.ok(text.includes("Tokenizer: Claude"));
241
+ assert.ok(text.includes("128,000")); // max completion tokens
242
+ });
243
+ it("handles models with minimal metadata gracefully", () => {
244
+ const result = handleGetModelCapabilities(MOCK_MODELS, { model_id: "meta-llama/llama-3-8b-instruct:free" });
245
+ assert.equal(result.isError, undefined);
246
+ const text = result.content[0].text;
247
+ assert.ok(text.includes("Llama 3 8B"));
248
+ // Should show all capabilities as No since no supported_parameters
249
+ assert.ok(text.includes("❌ No"));
250
+ });
251
+ it("returns fuzzy suggestions for partial match", () => {
252
+ const result = handleGetModelCapabilities(MOCK_MODELS, { model_id: "gpt-4" });
253
+ assert.equal(result.isError, true);
254
+ assert.ok(result.content[0].text.includes("Did you mean"));
255
+ });
256
+ it("returns error for unknown model", () => {
257
+ const result = handleGetModelCapabilities(MOCK_MODELS, { model_id: "nonexistent/xyz" });
258
+ assert.equal(result.isError, true);
259
+ assert.ok(result.content[0].text.includes("not found"));
260
+ });
261
+ it("shows supported parameters list", () => {
262
+ const result = handleGetModelCapabilities(MOCK_MODELS, { model_id: "openai/gpt-4o" });
263
+ const text = result.content[0].text;
264
+ assert.ok(text.includes("Supported Parameters"));
265
+ assert.ok(text.includes("max_tokens"));
266
+ assert.ok(text.includes("tools"));
267
+ });
268
+ });
169
269
  // ─── TOOL_DEFINITIONS ───────────────────────────────────────────────────────
170
270
  describe("TOOL_DEFINITIONS", () => {
171
- it("exports exactly 5 tools", () => {
172
- assert.equal(TOOL_DEFINITIONS.length, 5);
271
+ it("exports exactly 6 tools", () => {
272
+ assert.equal(TOOL_DEFINITIONS.length, 6);
173
273
  });
174
274
  it("all tools have a name, description, and inputSchema", () => {
175
275
  for (const tool of TOOL_DEFINITIONS) {
@@ -186,6 +286,7 @@ describe("TOOL_DEFINITIONS", () => {
186
286
  "compare_model_costs",
187
287
  "get_cheapest_models",
188
288
  "find_models_by_context_length",
289
+ "get_model_capabilities",
189
290
  ];
190
291
  const actualNames = TOOL_DEFINITIONS.map((t) => t.name);
191
292
  assert.deepEqual(actualNames, knownNames);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "openrouter-pricing-mcp",
3
- "version": "1.0.2",
3
+ "version": "1.2.0",
4
4
  "description": "An MCP server providing live model pricing data from OpenRouter. Query, compare, and discover the cheapest AI models.",
5
5
  "type": "module",
6
6
  "main": "build/index.js",