converse-mcp-server 1.5.4 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +10 -2
- package/docs/PROVIDERS.md +53 -11
- package/package.json +1 -1
- package/src/config.js +2 -0
- package/src/providers/anthropic.js +6 -15
- package/src/providers/openrouter-endpoints-client.js +216 -0
- package/src/providers/openrouter.js +154 -6
- package/src/tools/chat.js +27 -4
- package/src/tools/consensus.js +27 -4
package/README.md
CHANGED
|
@@ -67,6 +67,8 @@ MAX_MCP_OUTPUT_TOKENS=200000
|
|
|
67
67
|
# Optional: Provider-specific settings
|
|
68
68
|
XAI_BASE_URL=https://api.x.ai/v1
|
|
69
69
|
OPENROUTER_REFERER=https://github.com/FallDownTheSystem/converse
|
|
70
|
+
OPENROUTER_TITLE=YourAppName
|
|
71
|
+
OPENROUTER_DYNAMIC_MODELS=true
|
|
70
72
|
```
|
|
71
73
|
|
|
72
74
|
### 2. Get API Keys
|
|
@@ -102,6 +104,8 @@ There are several ways to add the Converse MCP Server to Claude:
|
|
|
102
104
|
"DEEPSEEK_API_KEY": "your_key_here",
|
|
103
105
|
"OPENROUTER_API_KEY": "your_key_here",
|
|
104
106
|
"OPENROUTER_REFERER": "https://github.com/YourUsername/YourApp",
|
|
107
|
+
"OPENROUTER_DYNAMIC_MODELS": "true",
|
|
108
|
+
"OPENROUTER_TITLE": "YourAppName",
|
|
105
109
|
"MAX_MCP_OUTPUT_TOKENS": "200000"
|
|
106
110
|
}
|
|
107
111
|
}
|
|
@@ -128,6 +132,8 @@ There are several ways to add the Converse MCP Server to Claude:
|
|
|
128
132
|
"DEEPSEEK_API_KEY": "your_key_here",
|
|
129
133
|
"OPENROUTER_API_KEY": "your_key_here",
|
|
130
134
|
"OPENROUTER_REFERER": "https://github.com/YourUsername/YourApp",
|
|
135
|
+
"OPENROUTER_DYNAMIC_MODELS": "true",
|
|
136
|
+
"OPENROUTER_TITLE": "YourAppName",
|
|
131
137
|
"MAX_MCP_OUTPUT_TOKENS": "200000"
|
|
132
138
|
}
|
|
133
139
|
}
|
|
@@ -197,13 +203,15 @@ For more detailed instructions, see the [official MCP configuration guide](https
|
|
|
197
203
|
|
|
198
204
|
### 1. Chat Tool
|
|
199
205
|
|
|
200
|
-
General conversational AI with context and continuation support.
|
|
206
|
+
General conversational AI with context and continuation support. Supports automatic model routing - simple names route by keyword, slash format (e.g., `anthropic/claude-3.5-sonnet`) checks native providers first before routing to OpenRouter.
|
|
201
207
|
|
|
202
208
|
```javascript
|
|
203
209
|
// Example usage
|
|
204
210
|
{
|
|
205
211
|
"prompt": "How should I structure the authentication module for this Express.js API?",
|
|
206
|
-
"model": "gemini-2.5-flash",
|
|
212
|
+
"model": "gemini-2.5-flash", // Routes to Google
|
|
213
|
+
// "model": "anthropic/claude-3.5-sonnet", // Routes to OpenRouter (if enabled)
|
|
214
|
+
// "model": "openrouter/auto", // Auto-select best model
|
|
207
215
|
"files": ["/path/to/src/auth.js", "/path/to/config.json"],
|
|
208
216
|
"images": ["/path/to/architecture.png"],
|
|
209
217
|
"temperature": 0.5,
|
package/docs/PROVIDERS.md
CHANGED
|
@@ -67,12 +67,20 @@ This guide documents all supported AI providers in the Converse MCP Server and t
|
|
|
67
67
|
- **Environment Variables**:
|
|
68
68
|
- `OPENROUTER_API_KEY` - Your API key
|
|
69
69
|
- `OPENROUTER_REFERER` - Required referer URL (e.g., your GitHub repo)
|
|
70
|
-
-
|
|
70
|
+
- `OPENROUTER_TITLE` - Optional title for request tracking
|
|
71
|
+
- `OPENROUTER_DYNAMIC_MODELS` - Enable dynamic model discovery (default: false)
|
|
72
|
+
- **Static Models**: Pre-configured models available without dynamic discovery
|
|
73
|
+
- `qwen/qwen3-235b-a22b-thinking-2507` - Qwen3 235B with thinking capabilities
|
|
74
|
+
- `qwen/qwen3-coder` - Qwen3 specialized for coding
|
|
75
|
+
- `moonshotai/kimi-k2` - Kimi K2 with 200K context window
|
|
76
|
+
- `openrouter/auto` - Auto-selects best model using NotDiamond routing
|
|
77
|
+
- **Dynamic Models**: When `OPENROUTER_DYNAMIC_MODELS=true`, any model in `provider/model` format
|
|
71
78
|
- `anthropic/claude-3.5-sonnet`
|
|
72
79
|
- `openai/gpt-4-turbo`
|
|
73
80
|
- `google/gemini-pro`
|
|
74
81
|
- `mistralai/mistral-large`
|
|
75
82
|
- `meta-llama/llama-3.1-405b-instruct`
|
|
83
|
+
- And many more - see [openrouter.ai/models](https://openrouter.ai/models)
|
|
76
84
|
|
|
77
85
|
## Configuration Examples
|
|
78
86
|
|
|
@@ -87,6 +95,10 @@ DEEPSEEK_API_KEY=your_deepseek_key_here
|
|
|
87
95
|
# OpenRouter requires both API key and referer
|
|
88
96
|
OPENROUTER_API_KEY=sk-or-your_key_here
|
|
89
97
|
OPENROUTER_REFERER=https://github.com/YourUsername/YourApp
|
|
98
|
+
# Optional: Enable dynamic model discovery to use any OpenRouter model
|
|
99
|
+
OPENROUTER_DYNAMIC_MODELS=true
|
|
100
|
+
# Optional: Add title for request tracking
|
|
101
|
+
OPENROUTER_TITLE=YourAppName
|
|
90
102
|
```
|
|
91
103
|
|
|
92
104
|
### Claude Configuration (claude_desktop_config.json)
|
|
@@ -102,7 +114,9 @@ OPENROUTER_REFERER=https://github.com/YourUsername/YourApp
|
|
|
102
114
|
"MISTRAL_API_KEY": "your_key_here",
|
|
103
115
|
"DEEPSEEK_API_KEY": "your_key_here",
|
|
104
116
|
"OPENROUTER_API_KEY": "your_key_here",
|
|
105
|
-
"OPENROUTER_REFERER": "https://github.com/YourUsername/YourApp"
|
|
117
|
+
"OPENROUTER_REFERER": "https://github.com/YourUsername/YourApp",
|
|
118
|
+
"OPENROUTER_DYNAMIC_MODELS": "true",
|
|
119
|
+
"OPENROUTER_TITLE": "YourAppName"
|
|
106
120
|
}
|
|
107
121
|
}
|
|
108
122
|
}
|
|
@@ -133,17 +147,38 @@ All providers support streaming responses for real-time output.
|
|
|
133
147
|
|
|
134
148
|
When using the chat or consensus tools, specify models using their identifiers:
|
|
135
149
|
|
|
150
|
+
### Model Routing Logic
|
|
151
|
+
|
|
152
|
+
1. **Simple Names**: Models without "/" are routed by keyword matching:
|
|
153
|
+
- Contains "gpt", "o1", "o3", "o4" → OpenAI
|
|
154
|
+
- Contains "claude", "opus", "sonnet", "haiku" → Anthropic
|
|
155
|
+
- Contains "gemini", "flash", "pro" → Google
|
|
156
|
+
- Contains "grok" → X.AI
|
|
157
|
+
- Contains "mistral", "magistral" → Mistral
|
|
158
|
+
- Contains "deepseek", "reasoner", "r1" → DeepSeek
|
|
159
|
+
- Contains "qwen", "kimi", "k2" → OpenRouter
|
|
160
|
+
|
|
161
|
+
2. **Slash Format**: Models with "/" check native providers first:
|
|
162
|
+
- If exact model exists in a native provider → Routes to that provider
|
|
163
|
+
- If not found in any native provider → Routes to OpenRouter
|
|
164
|
+
- This allows using models like "anthropic/claude-3.5-sonnet" via OpenRouter
|
|
165
|
+
|
|
166
|
+
3. **OpenRouter Auto**: Special aliases route to OpenRouter's auto-selection:
|
|
167
|
+
- "openrouter/auto", "openrouter auto", "auto router", "auto-router"
|
|
168
|
+
|
|
136
169
|
```javascript
|
|
137
170
|
// Chat tool examples
|
|
138
171
|
{
|
|
139
|
-
"model": "gpt-4o", // OpenAI
|
|
140
|
-
"model": "claude-opus-4", // Anthropic (auto-resolves
|
|
141
|
-
"model": "sonnet", // Anthropic (
|
|
142
|
-
"model": "gemini-2.5-pro", // Google
|
|
143
|
-
"model": "grok-4-0709", // X.AI
|
|
144
|
-
"model": "mistral-large", // Mistral
|
|
145
|
-
"model": "deepseek-chat", // DeepSeek
|
|
146
|
-
"model": "anthropic/claude-3.5-sonnet" // OpenRouter
|
|
172
|
+
"model": "gpt-4o", // OpenAI (keyword match)
|
|
173
|
+
"model": "claude-opus-4", // Anthropic (keyword match, auto-resolves)
|
|
174
|
+
"model": "sonnet", // Anthropic (keyword match)
|
|
175
|
+
"model": "gemini-2.5-pro", // Google (keyword match)
|
|
176
|
+
"model": "grok-4-0709", // X.AI (keyword match)
|
|
177
|
+
"model": "mistral-large", // Mistral (keyword match)
|
|
178
|
+
"model": "deepseek-chat", // DeepSeek (keyword match)
|
|
179
|
+
"model": "anthropic/claude-3.5-sonnet", // OpenRouter (slash format, not in Anthropic)
|
|
180
|
+
"model": "qwen/qwen3-coder", // OpenRouter (static model)
|
|
181
|
+
"model": "openrouter/auto" // OpenRouter auto-selection
|
|
147
182
|
}
|
|
148
183
|
|
|
149
184
|
// Consensus tool with multiple providers
|
|
@@ -176,4 +211,11 @@ When using the chat or consensus tools, specify models using their identifiers:
|
|
|
176
211
|
### OpenRouter Compliance
|
|
177
212
|
- The `OPENROUTER_REFERER` header is **required**
|
|
178
213
|
- Use your application URL or GitHub repository
|
|
179
|
-
- This helps OpenRouter track usage for compliance
|
|
214
|
+
- This helps OpenRouter track usage for compliance
|
|
215
|
+
|
|
216
|
+
### OpenRouter Dynamic Models
|
|
217
|
+
- Enable with `OPENROUTER_DYNAMIC_MODELS=true`
|
|
218
|
+
- First request to a new model may be slower (fetches capabilities)
|
|
219
|
+
- Model capabilities are cached for 24 hours
|
|
220
|
+
- Use any model from [openrouter.ai/models](https://openrouter.ai/models)
|
|
221
|
+
- Models must use `provider/model` format (e.g., `meta-llama/llama-3.2-90b`)
|
package/package.json
CHANGED
package/src/config.js
CHANGED
|
@@ -86,6 +86,8 @@ const CONFIG_SCHEMA = {
|
|
|
86
86
|
// Provider-specific configuration
|
|
87
87
|
providers: {
|
|
88
88
|
OPENROUTER_REFERER: { type: 'string', required: false, description: 'OpenRouter referer header for compliance' },
|
|
89
|
+
OPENROUTER_TITLE: { type: 'string', required: false, description: 'OpenRouter X-Title header for request tracking' },
|
|
90
|
+
OPENROUTER_DYNAMIC_MODELS: { type: 'boolean', default: false, description: 'Enable dynamic model discovery via OpenRouter endpoints API' },
|
|
89
91
|
},
|
|
90
92
|
|
|
91
93
|
|
|
@@ -364,7 +364,9 @@ export const anthropicProvider = {
|
|
|
364
364
|
apiKey: config.apiKeys.anthropic,
|
|
365
365
|
defaultHeaders: {
|
|
366
366
|
'anthropic-beta': betaHeaders.join(',')
|
|
367
|
-
}
|
|
367
|
+
},
|
|
368
|
+
// Increase timeout to 20 minutes for thinking models that may take longer
|
|
369
|
+
timeout: 20 * 60 * 1000
|
|
368
370
|
});
|
|
369
371
|
|
|
370
372
|
// Convert messages to Anthropic format (system messages are always cached)
|
|
@@ -383,15 +385,13 @@ export const anthropicProvider = {
|
|
|
383
385
|
requestPayload.system = systemPrompt;
|
|
384
386
|
}
|
|
385
387
|
|
|
386
|
-
//
|
|
387
|
-
// For Claude 4 series models, let the SDK use its defaults (32k for opus, 64k for sonnet)
|
|
388
|
+
// Set max tokens - API requires this field
|
|
388
389
|
if (maxTokens) {
|
|
389
390
|
requestPayload.max_tokens = Math.min(maxTokens, modelConfig.maxOutputTokens || 8192);
|
|
390
|
-
} else
|
|
391
|
-
//
|
|
391
|
+
} else {
|
|
392
|
+
// Use model's default max output tokens
|
|
392
393
|
requestPayload.max_tokens = modelConfig.maxOutputTokens || 8192;
|
|
393
394
|
}
|
|
394
|
-
// For 4 series models without explicit maxTokens, don't set max_tokens - let SDK use defaults
|
|
395
395
|
|
|
396
396
|
// Add thinking configuration for models that support it
|
|
397
397
|
if (modelConfig.supportsThinking && reasoning_effort) {
|
|
@@ -523,15 +523,6 @@ export const anthropicProvider = {
|
|
|
523
523
|
debugError(`[Anthropic] Error message:`, error.message);
|
|
524
524
|
debugError(`[Anthropic] Error response:`, error.response);
|
|
525
525
|
throw new AnthropicProviderError(`Context length exceeded for model: ${error.message}`, ErrorCodes.CONTEXT_LENGTH_EXCEEDED, error);
|
|
526
|
-
} else if (error.message?.includes('Streaming is strongly recommended')) {
|
|
527
|
-
// This is just a warning from the SDK about long requests
|
|
528
|
-
debugLog(`[Anthropic] SDK streaming recommendation warning`);
|
|
529
|
-
debugError(`[Anthropic] Full error object:`, error);
|
|
530
|
-
// Check if there's an actual error response
|
|
531
|
-
if (error.response || error.status) {
|
|
532
|
-
debugError(`[Anthropic] Error response status:`, error.status);
|
|
533
|
-
debugError(`[Anthropic] Error response data:`, error.response);
|
|
534
|
-
}
|
|
535
526
|
}
|
|
536
527
|
|
|
537
528
|
// Generic error handling
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenRouter Endpoints API Client
|
|
3
|
+
*
|
|
4
|
+
* Handles fetching model capabilities from OpenRouter's endpoints API.
|
|
5
|
+
* Provides caching and error handling for dynamic model discovery.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { debugLog, debugError } from '../utils/console.js';
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Parse an OpenRouter model ID into author and slug components
|
|
12
|
+
* @param {string} modelId - Model ID in format "author/slug"
|
|
13
|
+
* @returns {{author: string, slug: string} | null} Parsed components or null if invalid
|
|
14
|
+
*/
|
|
15
|
+
function parseModelId(modelId) {
|
|
16
|
+
if (!modelId || typeof modelId !== 'string') {
|
|
17
|
+
return null;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const parts = modelId.split('/');
|
|
21
|
+
if (parts.length !== 2) {
|
|
22
|
+
return null;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
const [author, slug] = parts;
|
|
26
|
+
if (!author || !slug) {
|
|
27
|
+
return null;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return { author, slug };
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Convert endpoint data to model configuration format
|
|
35
|
+
* @param {Object} endpointData - Raw endpoint data from API
|
|
36
|
+
* @returns {Object} Model configuration object
|
|
37
|
+
*/
|
|
38
|
+
function convertEndpointToModelConfig(endpointData) {
|
|
39
|
+
const data = endpointData.data;
|
|
40
|
+
const modelId = data.id;
|
|
41
|
+
|
|
42
|
+
// Find the best endpoint (prefer primary providers)
|
|
43
|
+
const preferredProviders = ['Anthropic', 'OpenAI', 'Google', 'XAI'];
|
|
44
|
+
let selectedEndpoint = data.endpoints[0]; // Default to first
|
|
45
|
+
|
|
46
|
+
for (const endpoint of data.endpoints) {
|
|
47
|
+
if (preferredProviders.includes(endpoint.provider_name)) {
|
|
48
|
+
selectedEndpoint = endpoint;
|
|
49
|
+
break;
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
// Extract supported parameters
|
|
54
|
+
const supportedParams = selectedEndpoint.supported_parameters || [];
|
|
55
|
+
|
|
56
|
+
return {
|
|
57
|
+
modelName: modelId,
|
|
58
|
+
friendlyName: data.name || `${modelId} (via OpenRouter)`,
|
|
59
|
+
description: data.description || `Dynamic model: ${modelId}`,
|
|
60
|
+
contextWindow: selectedEndpoint.context_length || 8192,
|
|
61
|
+
maxOutputTokens: selectedEndpoint.max_completion_tokens || 4096,
|
|
62
|
+
supportsStreaming: true, // Most models support streaming
|
|
63
|
+
supportsImages: data.architecture?.input_modalities?.includes('image') || false,
|
|
64
|
+
supportsTemperature: supportedParams.includes('temperature'),
|
|
65
|
+
supportsWebSearch: false, // Not in API response, conservative default
|
|
66
|
+
supportsThinking: supportedParams.includes('reasoning'),
|
|
67
|
+
supportsTools: supportedParams.includes('tools'),
|
|
68
|
+
timeout: 300000, // 5 minutes default
|
|
69
|
+
isDynamic: true,
|
|
70
|
+
// Store additional metadata
|
|
71
|
+
metadata: {
|
|
72
|
+
architecture: data.architecture,
|
|
73
|
+
endpoints: data.endpoints,
|
|
74
|
+
pricing: selectedEndpoint.pricing,
|
|
75
|
+
selectedProvider: selectedEndpoint.provider_name,
|
|
76
|
+
maxPromptTokens: selectedEndpoint.max_prompt_tokens
|
|
77
|
+
}
|
|
78
|
+
};
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Fetch model endpoints from OpenRouter API
|
|
83
|
+
* @param {string} modelId - Model ID in format "author/slug"
|
|
84
|
+
* @returns {Promise<Object|null>} Model configuration or null if not found
|
|
85
|
+
*/
|
|
86
|
+
export async function fetchModelEndpoints(modelId) {
|
|
87
|
+
const parsed = parseModelId(modelId);
|
|
88
|
+
if (!parsed) {
|
|
89
|
+
debugLog(`[OpenRouter Endpoints] Invalid model ID format: ${modelId}`);
|
|
90
|
+
return null;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const { author, slug } = parsed;
|
|
94
|
+
const url = `https://openrouter.ai/api/v1/models/${author}/${slug}/endpoints`;
|
|
95
|
+
|
|
96
|
+
try {
|
|
97
|
+
debugLog(`[OpenRouter Endpoints] Fetching endpoints for ${modelId}`);
|
|
98
|
+
|
|
99
|
+
const response = await fetch(url, {
|
|
100
|
+
method: 'GET',
|
|
101
|
+
headers: {
|
|
102
|
+
'Accept': 'application/json'
|
|
103
|
+
}
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
if (response.status === 404) {
|
|
107
|
+
debugLog(`[OpenRouter Endpoints] Model not found: ${modelId}`);
|
|
108
|
+
return null;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if (!response.ok) {
|
|
112
|
+
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
const data = await response.json();
|
|
116
|
+
|
|
117
|
+
// Validate response structure
|
|
118
|
+
if (!data?.data?.id || !data?.data?.endpoints?.length) {
|
|
119
|
+
debugLog(`[OpenRouter Endpoints] Invalid response structure for ${modelId}`);
|
|
120
|
+
return null;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
const modelConfig = convertEndpointToModelConfig(data);
|
|
124
|
+
debugLog(`[OpenRouter Endpoints] Successfully fetched config for ${modelId}`);
|
|
125
|
+
|
|
126
|
+
return modelConfig;
|
|
127
|
+
|
|
128
|
+
} catch (error) {
|
|
129
|
+
debugError(`[OpenRouter Endpoints] Error fetching ${modelId}:`, error);
|
|
130
|
+
return null;
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Create a simple in-memory cache for model endpoints
|
|
136
|
+
*/
|
|
137
|
+
export function createEndpointsCache() {
|
|
138
|
+
const cache = new Map();
|
|
139
|
+
const DEFAULT_TTL = 24 * 60 * 60 * 1000; // 24 hours
|
|
140
|
+
const FAILED_TTL = 5 * 60 * 1000; // 5 minutes for failed requests
|
|
141
|
+
|
|
142
|
+
return {
|
|
143
|
+
/**
|
|
144
|
+
* Get a cached value
|
|
145
|
+
* @param {string} key - Cache key
|
|
146
|
+
* @returns {{found: boolean, value: any}} Cache result
|
|
147
|
+
*/
|
|
148
|
+
get(key) {
|
|
149
|
+
const entry = cache.get(key);
|
|
150
|
+
if (!entry) {
|
|
151
|
+
return { found: false, value: null };
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
if (Date.now() > entry.expiry) {
|
|
155
|
+
cache.delete(key);
|
|
156
|
+
return { found: false, value: null };
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
return { found: true, value: entry.value };
|
|
160
|
+
},
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Set a cached value
|
|
164
|
+
* @param {string} key - Cache key
|
|
165
|
+
* @param {Object|null} value - Value to cache
|
|
166
|
+
* @param {boolean} isFailure - Whether this is a failed request
|
|
167
|
+
*/
|
|
168
|
+
set(key, value, isFailure = false) {
|
|
169
|
+
const ttl = isFailure ? FAILED_TTL : DEFAULT_TTL;
|
|
170
|
+
cache.set(key, {
|
|
171
|
+
value,
|
|
172
|
+
expiry: Date.now() + ttl
|
|
173
|
+
});
|
|
174
|
+
},
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* Clear the entire cache
|
|
178
|
+
*/
|
|
179
|
+
clear() {
|
|
180
|
+
cache.clear();
|
|
181
|
+
},
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* Get cache size
|
|
185
|
+
* @returns {number} Number of cached entries
|
|
186
|
+
*/
|
|
187
|
+
size() {
|
|
188
|
+
return cache.size;
|
|
189
|
+
}
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// Create a singleton cache instance
|
|
194
|
+
export const endpointsCache = createEndpointsCache();
|
|
195
|
+
|
|
196
|
+
/**
|
|
197
|
+
* Fetch model endpoints with caching
|
|
198
|
+
* @param {string} modelId - Model ID in format "author/slug"
|
|
199
|
+
* @returns {Promise<Object|null>} Model configuration or null if not found
|
|
200
|
+
*/
|
|
201
|
+
export async function fetchModelEndpointsWithCache(modelId) {
|
|
202
|
+
// Check cache first
|
|
203
|
+
const cached = endpointsCache.get(modelId);
|
|
204
|
+
if (cached.found) {
|
|
205
|
+
debugLog(`[OpenRouter Endpoints] Using cached config for ${modelId}`);
|
|
206
|
+
return cached.value;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
// Fetch from API
|
|
210
|
+
const config = await fetchModelEndpoints(modelId);
|
|
211
|
+
|
|
212
|
+
// Cache the result (including null for not found)
|
|
213
|
+
endpointsCache.set(modelId, config, config === null);
|
|
214
|
+
|
|
215
|
+
return config;
|
|
216
|
+
}
|
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
import { createOpenAICompatibleProvider } from './openai-compatible.js';
|
|
12
12
|
import { debugLog } from '../utils/console.js';
|
|
13
13
|
import { ProviderError, ErrorCodes } from './interface.js';
|
|
14
|
+
import { fetchModelEndpointsWithCache } from './openrouter-endpoints-client.js';
|
|
14
15
|
|
|
15
16
|
// Define supported OpenRouter models with their capabilities
|
|
16
17
|
// Only including the three specific models requested
|
|
@@ -54,6 +55,19 @@ const SUPPORTED_MODELS = {
|
|
|
54
55
|
timeout: 300000,
|
|
55
56
|
description: 'Moonshot AI Kimi K2 with extended context window',
|
|
56
57
|
aliases: ['kimi-k2', 'moonshot-kimi', 'kimi k2', 'kimi', 'moonshot kimi', 'moonshot-k2', 'k2']
|
|
58
|
+
},
|
|
59
|
+
'openrouter/auto': {
|
|
60
|
+
modelName: 'openrouter/auto',
|
|
61
|
+
friendlyName: 'OpenRouter Auto (via NotDiamond)',
|
|
62
|
+
contextWindow: 128000, // Safe default for auto-routing
|
|
63
|
+
maxOutputTokens: 8192, // Safe default
|
|
64
|
+
supportsStreaming: true,
|
|
65
|
+
supportsImages: false, // Conservative default
|
|
66
|
+
supportsTemperature: true,
|
|
67
|
+
supportsWebSearch: false,
|
|
68
|
+
timeout: 300000,
|
|
69
|
+
description: 'Auto-selects the best model for your prompt using NotDiamond routing',
|
|
70
|
+
aliases: ['openrouter auto', 'auto router', 'auto-router', 'openrouter-auto']
|
|
57
71
|
}
|
|
58
72
|
};
|
|
59
73
|
|
|
@@ -84,12 +98,14 @@ function getCustomHeaders(config) {
|
|
|
84
98
|
const headers = {};
|
|
85
99
|
|
|
86
100
|
// REQUIRED: HTTP-Referer header for compliance
|
|
87
|
-
//
|
|
88
|
-
const referer = config?.providers?.openrouterreferer ||
|
|
101
|
+
// Handle both camelCase (from tests) and lowercase (from config.js) keys
|
|
102
|
+
const referer = config?.providers?.openrouterreferer ||
|
|
103
|
+
config?.providers?.openrouterReferer ||
|
|
104
|
+
'https://github.com/FallDownTheSystem/converse';
|
|
89
105
|
headers['HTTP-Referer'] = referer;
|
|
90
106
|
|
|
91
107
|
// Optional: X-Title header for request tracking
|
|
92
|
-
const title = config?.providers?.openroutertitle;
|
|
108
|
+
const title = config?.providers?.openroutertitle || config?.providers?.openrouterTitle;
|
|
93
109
|
if (title) {
|
|
94
110
|
headers['X-Title'] = title;
|
|
95
111
|
}
|
|
@@ -169,18 +185,150 @@ export const openrouterProvider = createOpenAICompatibleProvider({
|
|
|
169
185
|
}
|
|
170
186
|
});
|
|
171
187
|
|
|
172
|
-
|
|
188
|
+
/**
|
|
189
|
+
* Check if a model string follows OpenRouter's provider/model format
|
|
190
|
+
*/
|
|
191
|
+
function isOpenRouterModelFormat(modelName) {
|
|
192
|
+
return typeof modelName === 'string' && modelName.includes('/');
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Create a dynamic model configuration from minimal information
|
|
197
|
+
*/
|
|
198
|
+
function createDynamicModelConfig(modelName) {
|
|
199
|
+
return {
|
|
200
|
+
modelName,
|
|
201
|
+
friendlyName: `${modelName} (via OpenRouter)`,
|
|
202
|
+
contextWindow: 8192, // Safe default
|
|
203
|
+
maxOutputTokens: 4096, // Safe default
|
|
204
|
+
supportsStreaming: true,
|
|
205
|
+
supportsImages: false, // Conservative default
|
|
206
|
+
supportsTemperature: true,
|
|
207
|
+
supportsWebSearch: false,
|
|
208
|
+
timeout: 300000,
|
|
209
|
+
description: `Dynamic model: ${modelName}`,
|
|
210
|
+
isDynamic: true // Flag to identify dynamic models
|
|
211
|
+
};
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
// Store for dynamically discovered models
|
|
215
|
+
const dynamicModels = new Map();
|
|
216
|
+
|
|
217
|
+
// Override methods to support dynamic models
|
|
218
|
+
const originalGetSupportedModels = openrouterProvider.getSupportedModels;
|
|
219
|
+
openrouterProvider.getSupportedModels = function() {
|
|
220
|
+
const staticModels = originalGetSupportedModels.call(this);
|
|
221
|
+
|
|
222
|
+
// Merge dynamic models if any exist
|
|
223
|
+
if (dynamicModels.size > 0) {
|
|
224
|
+
const allModels = { ...staticModels };
|
|
225
|
+
for (const [modelName, config] of dynamicModels) {
|
|
226
|
+
allModels[modelName] = config;
|
|
227
|
+
}
|
|
228
|
+
return allModels;
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
return staticModels;
|
|
232
|
+
};
|
|
233
|
+
|
|
234
|
+
// Create an async version of getModelConfig for API fetching
|
|
235
|
+
openrouterProvider.getModelConfigAsync = async function(modelName) {
|
|
236
|
+
// First check static models
|
|
237
|
+
const staticConfig = this.getModelConfig(modelName);
|
|
238
|
+
if (staticConfig && !staticConfig.isDynamic) {
|
|
239
|
+
return staticConfig;
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
// Check if already in dynamic models cache
|
|
243
|
+
if (dynamicModels.has(modelName)) {
|
|
244
|
+
return dynamicModels.get(modelName);
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// If dynamic models are enabled and model follows format, fetch from API
|
|
248
|
+
const config = this._lastConfig || {};
|
|
249
|
+
const dynamicModelsEnabled = config?.providers?.openrouterdynamicmodels ||
|
|
250
|
+
config?.providers?.openrouterDynamicModels;
|
|
251
|
+
if (dynamicModelsEnabled && isOpenRouterModelFormat(modelName)) {
|
|
252
|
+
debugLog(`[OpenRouter] Fetching dynamic model config for: ${modelName}`);
|
|
253
|
+
|
|
254
|
+
// Fetch from API with caching
|
|
255
|
+
const apiConfig = await fetchModelEndpointsWithCache(modelName);
|
|
256
|
+
|
|
257
|
+
if (apiConfig) {
|
|
258
|
+
// Store in dynamic models cache
|
|
259
|
+
dynamicModels.set(modelName, apiConfig);
|
|
260
|
+
return apiConfig;
|
|
261
|
+
} else {
|
|
262
|
+
// Model not found on API, create default config to avoid repeated lookups
|
|
263
|
+
const defaultConfig = createDynamicModelConfig(modelName);
|
|
264
|
+
defaultConfig.notFoundOnApi = true;
|
|
265
|
+
dynamicModels.set(modelName, defaultConfig);
|
|
266
|
+
return defaultConfig;
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
return null;
|
|
271
|
+
};
|
|
272
|
+
|
|
273
|
+
const originalGetModelConfig = openrouterProvider.getModelConfig;
|
|
274
|
+
openrouterProvider.getModelConfig = function(modelName) {
|
|
275
|
+
// First check static models
|
|
276
|
+
const staticConfig = originalGetModelConfig.call(this, modelName);
|
|
277
|
+
if (staticConfig) {
|
|
278
|
+
return staticConfig;
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
// Check dynamic models
|
|
282
|
+
if (dynamicModels.has(modelName)) {
|
|
283
|
+
return dynamicModels.get(modelName);
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
// For synchronous calls, create default config if dynamic models enabled
|
|
287
|
+
const config = this._lastConfig || {};
|
|
288
|
+
const dynamicModelsEnabled = config?.providers?.openrouterdynamicmodels ||
|
|
289
|
+
config?.providers?.openrouterDynamicModels;
|
|
290
|
+
if (dynamicModelsEnabled && isOpenRouterModelFormat(modelName)) {
|
|
291
|
+
// Note: This is a fallback for synchronous calls
|
|
292
|
+
// The async version should be preferred for accurate model info
|
|
293
|
+
const dynamicConfig = createDynamicModelConfig(modelName);
|
|
294
|
+
dynamicConfig.needsApiUpdate = true;
|
|
295
|
+
return dynamicConfig;
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
return null;
|
|
299
|
+
};
|
|
300
|
+
|
|
301
|
+
// Override the invoke method to add dynamic headers and model support
|
|
173
302
|
const originalInvoke = openrouterProvider.invoke;
|
|
174
303
|
openrouterProvider.invoke = async function(messages, options = {}) {
|
|
304
|
+
// Store config for use in getModelConfig
|
|
305
|
+
this._lastConfig = options.config;
|
|
306
|
+
|
|
175
307
|
// Validate referer configuration
|
|
176
|
-
//
|
|
177
|
-
if (!options.config?.providers?.openrouterreferer) {
|
|
308
|
+
// Handle both camelCase (from tests) and lowercase (from config.js) keys
|
|
309
|
+
if (!options.config?.providers?.openrouterreferer && !options.config?.providers?.openrouterReferer) {
|
|
178
310
|
throw new OpenRouterProviderError(
|
|
179
311
|
'OpenRouter requires HTTP-Referer header. Please set OPENROUTER_REFERER in your environment',
|
|
180
312
|
ErrorCodes.INVALID_REQUEST
|
|
181
313
|
);
|
|
182
314
|
}
|
|
183
315
|
|
|
316
|
+
// Check if we need to fetch dynamic model config
|
|
317
|
+
const modelName = options.model;
|
|
318
|
+
if (modelName) {
|
|
319
|
+
const existingConfig = this.getModelConfig(modelName);
|
|
320
|
+
|
|
321
|
+
// If the model needs API update, fetch it now
|
|
322
|
+
if (existingConfig?.needsApiUpdate) {
|
|
323
|
+
const dynamicModelsEnabled = options.config?.providers?.openrouterdynamicmodels ||
|
|
324
|
+
options.config?.providers?.openrouterDynamicModels;
|
|
325
|
+
if (dynamicModelsEnabled) {
|
|
326
|
+
debugLog(`[OpenRouter] Fetching API config for model: ${modelName}`);
|
|
327
|
+
await this.getModelConfigAsync(modelName);
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
|
|
184
332
|
// Create a modified config with custom headers
|
|
185
333
|
const modifiedOptions = {
|
|
186
334
|
...options,
|
package/src/tools/chat.js
CHANGED
|
@@ -159,7 +159,7 @@ export async function chatTool(args, dependencies) {
|
|
|
159
159
|
} else {
|
|
160
160
|
// Use specified provider/model
|
|
161
161
|
// Try to map model to provider
|
|
162
|
-
providerName = mapModelToProvider(model);
|
|
162
|
+
providerName = mapModelToProvider(model, providers);
|
|
163
163
|
selectedProvider = providers[providerName];
|
|
164
164
|
|
|
165
165
|
if (!selectedProvider) {
|
|
@@ -282,7 +282,7 @@ function resolveAutoModel(model, providerName) {
|
|
|
282
282
|
return defaults[providerName] || 'gpt-4o-mini';
|
|
283
283
|
}
|
|
284
284
|
|
|
285
|
-
function mapModelToProvider(model) {
|
|
285
|
+
function mapModelToProvider(model, providers) {
|
|
286
286
|
const modelLower = model.toLowerCase();
|
|
287
287
|
|
|
288
288
|
// Handle "auto" - default to OpenAI
|
|
@@ -290,6 +290,30 @@ function mapModelToProvider(model) {
|
|
|
290
290
|
return 'openai';
|
|
291
291
|
}
|
|
292
292
|
|
|
293
|
+
// Check OpenRouter-specific patterns first
|
|
294
|
+
if (modelLower === 'openrouter auto' || modelLower === 'auto router' ||
|
|
295
|
+
modelLower === 'auto-router' || modelLower === 'openrouter-auto') {
|
|
296
|
+
return 'openrouter';
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
// If model contains "/", check if native provider supports it
|
|
300
|
+
if (modelLower.includes('/')) {
|
|
301
|
+
// Check each provider to see if they have this exact model
|
|
302
|
+
for (const [providerName, provider] of Object.entries(providers)) {
|
|
303
|
+
if (provider && provider.getModelConfig) {
|
|
304
|
+
const modelConfig = provider.getModelConfig(model);
|
|
305
|
+
if (modelConfig && !modelConfig.isDynamic && !modelConfig.needsApiUpdate) {
|
|
306
|
+
// Model exists in this provider's static list
|
|
307
|
+
return providerName;
|
|
308
|
+
}
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
// No native provider has this model, route to OpenRouter
|
|
312
|
+
return 'openrouter';
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// For non-slash models, use keyword matching as before
|
|
316
|
+
|
|
293
317
|
// OpenAI models
|
|
294
318
|
if (modelLower.includes('gpt') || modelLower.includes('o1') ||
|
|
295
319
|
modelLower.includes('o3') || modelLower.includes('o4')) {
|
|
@@ -326,8 +350,7 @@ function mapModelToProvider(model) {
|
|
|
326
350
|
|
|
327
351
|
// OpenRouter models (specific model patterns)
|
|
328
352
|
if (modelLower.includes('qwen') || modelLower.includes('kimi') ||
|
|
329
|
-
modelLower.includes('moonshot') || modelLower === 'k2'
|
|
330
|
-
modelLower.includes('/')) { // OpenRouter uses provider/model format
|
|
353
|
+
modelLower.includes('moonshot') || modelLower === 'k2') {
|
|
331
354
|
return 'openrouter';
|
|
332
355
|
}
|
|
333
356
|
|
package/src/tools/consensus.js
CHANGED
|
@@ -154,7 +154,7 @@ export async function consensusTool(args, dependencies) {
|
|
|
154
154
|
}
|
|
155
155
|
|
|
156
156
|
const modelName = modelSpec.model;
|
|
157
|
-
const providerName = mapModelToProvider(modelName);
|
|
157
|
+
const providerName = mapModelToProvider(modelName, providers);
|
|
158
158
|
const resolvedModelName = resolveAutoModel(modelName, providerName);
|
|
159
159
|
const provider = providers[providerName];
|
|
160
160
|
|
|
@@ -416,7 +416,7 @@ function resolveAutoModel(model, providerName) {
|
|
|
416
416
|
return defaults[providerName] || 'o3';
|
|
417
417
|
}
|
|
418
418
|
|
|
419
|
-
function mapModelToProvider(model) {
|
|
419
|
+
function mapModelToProvider(model, providers) {
|
|
420
420
|
const modelLower = model.toLowerCase();
|
|
421
421
|
|
|
422
422
|
// Handle "auto" - default to OpenAI
|
|
@@ -424,6 +424,30 @@ function mapModelToProvider(model) {
|
|
|
424
424
|
return 'openai';
|
|
425
425
|
}
|
|
426
426
|
|
|
427
|
+
// Check OpenRouter-specific patterns first
|
|
428
|
+
if (modelLower === 'openrouter auto' || modelLower === 'auto router' ||
|
|
429
|
+
modelLower === 'auto-router' || modelLower === 'openrouter-auto') {
|
|
430
|
+
return 'openrouter';
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
// If model contains "/", check if native provider supports it
|
|
434
|
+
if (modelLower.includes('/')) {
|
|
435
|
+
// Check each provider to see if they have this exact model
|
|
436
|
+
for (const [providerName, provider] of Object.entries(providers)) {
|
|
437
|
+
if (provider && provider.getModelConfig) {
|
|
438
|
+
const modelConfig = provider.getModelConfig(model);
|
|
439
|
+
if (modelConfig && !modelConfig.isDynamic && !modelConfig.needsApiUpdate) {
|
|
440
|
+
// Model exists in this provider's static list
|
|
441
|
+
return providerName;
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
// No native provider has this model, route to OpenRouter
|
|
446
|
+
return 'openrouter';
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
// For non-slash models, use keyword matching as before
|
|
450
|
+
|
|
427
451
|
// OpenAI models
|
|
428
452
|
if (modelLower.includes('gpt') || modelLower.includes('o1') ||
|
|
429
453
|
modelLower.includes('o3') || modelLower.includes('o4')) {
|
|
@@ -460,8 +484,7 @@ function mapModelToProvider(model) {
|
|
|
460
484
|
|
|
461
485
|
// OpenRouter models (specific model patterns)
|
|
462
486
|
if (modelLower.includes('qwen') || modelLower.includes('kimi') ||
|
|
463
|
-
modelLower.includes('moonshot') || modelLower === 'k2'
|
|
464
|
-
modelLower.includes('/')) { // OpenRouter uses provider/model format
|
|
487
|
+
modelLower.includes('moonshot') || modelLower === 'k2') {
|
|
465
488
|
return 'openrouter';
|
|
466
489
|
}
|
|
467
490
|
|