mcp-ts-template 1.1.7 → 1.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -3,7 +3,7 @@
3
3
  [![TypeScript](https://img.shields.io/badge/TypeScript-^5.8.3-blue.svg)](https://www.typescriptlang.org/)
4
4
  [![Model Context Protocol SDK](https://img.shields.io/badge/MCP%20SDK-1.11.0-green.svg)](https://github.com/modelcontextprotocol/typescript-sdk)
5
5
  [![MCP Spec Version](https://img.shields.io/badge/MCP%20Spec-2025--03--26-lightgrey.svg)](https://github.com/modelcontextprotocol/modelcontextprotocol/blob/main/docs/specification/2025-03-26/changelog.mdx)
6
- [![Version](https://img.shields.io/badge/Version-1.1.7-blue.svg)](./CHANGELOG.md)
6
+ [![Version](https://img.shields.io/badge/Version-1.1.9-blue.svg)](./CHANGELOG.md)
7
7
  [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0)
8
8
  [![Status](https://img.shields.io/badge/Status-Stable-green.svg)](https://github.com/cyanheads/mcp-ts-template/issues)
9
9
  [![GitHub](https://img.shields.io/github/stars/cyanheads/mcp-ts-template?style=social)](https://github.com/cyanheads/mcp-ts-template)
@@ -90,17 +90,26 @@ Get the example server running in minutes:
90
90
 
91
91
  Configure the MCP server's behavior using these environment variables:
92
92
 
93
- | Variable | Description | Default |
94
- | :-------------------- | :-------------------------------------------------------------------------------------------------- | :------------------------------------- |
95
- | `MCP_TRANSPORT_TYPE` | Server transport: `stdio` or `http`. | `stdio` |
96
- | `MCP_HTTP_PORT` | Port for the HTTP server (if `MCP_TRANSPORT_TYPE=http`). | `3010` |
97
- | `MCP_HTTP_HOST` | Host address for the HTTP server (if `MCP_TRANSPORT_TYPE=http`). | `127.0.0.1` |
98
- | `MCP_ALLOWED_ORIGINS` | Comma-separated allowed origins for CORS (if `MCP_TRANSPORT_TYPE=http`). | (none) |
99
- | `MCP_SERVER_NAME` | Optional server name (used in MCP initialization). | (from package.json) |
100
- | `MCP_SERVER_VERSION` | Optional server version (used in MCP initialization). | (from package.json) |
101
- | `MCP_LOG_LEVEL` | Server logging level (`debug`, `info`, `warning`, `error`, etc.). | `info` |
102
- | `NODE_ENV` | Runtime environment (`development`, `production`). | `development` |
103
- | `MCP_AUTH_SECRET_KEY` | **Required for HTTP transport.** Secret key (min 32 chars) for signing/verifying auth tokens (JWT). | (none - **MUST be set in production**) |
93
+ | Variable | Description | Default |
94
+ | :------------------------ | :-------------------------------------------------------------------------------------------------- | :----------------------------------------- |
95
+ | `MCP_TRANSPORT_TYPE` | Server transport: `stdio` or `http`. | `stdio` |
96
+ | `MCP_HTTP_PORT` | Port for the HTTP server (if `MCP_TRANSPORT_TYPE=http`). | `3010` |
97
+ | `MCP_HTTP_HOST` | Host address for the HTTP server (if `MCP_TRANSPORT_TYPE=http`). | `127.0.0.1` |
98
+ | `MCP_ALLOWED_ORIGINS` | Comma-separated allowed origins for CORS (if `MCP_TRANSPORT_TYPE=http`). | (none) |
99
+ | `MCP_SERVER_NAME` | Optional server name (used in MCP initialization). | (from package.json) |
100
+ | `MCP_SERVER_VERSION` | Optional server version (used in MCP initialization). | (from package.json) |
101
+ | `MCP_LOG_LEVEL` | Server logging level (`debug`, `info`, `warning`, `error`, etc.). | `info` |
102
+ | `NODE_ENV` | Runtime environment (`development`, `production`). | `development` |
103
+ | `MCP_AUTH_SECRET_KEY` | **Required for HTTP transport.** Secret key (min 32 chars) for signing/verifying auth tokens (JWT). | (none - **MUST be set in production**) |
104
+ | `OPENROUTER_APP_URL` | URL of the application (used by OpenRouter service for HTTP Referer). | `https://caseyjhand.com` |
105
+ | `OPENROUTER_APP_NAME` | Name of the application (used by OpenRouter service for X-Title header). | 'mcp-ts-template' |
106
+ | `OPENROUTER_API_KEY` | API key for OpenRouter.ai service. Optional, but service will be unconfigured without it. | (none) |
107
+ | `LLM_DEFAULT_MODEL` | Default model to use for LLM calls via OpenRouter. | `google/gemini-2.5-flash-preview:thinking` |
108
+ | `LLM_DEFAULT_TEMPERATURE` | Default temperature for LLM calls (0-2). Optional. | (none) |
109
+ | `LLM_DEFAULT_TOP_P` | Default top_p for LLM calls (0-1). Optional. | (none) |
110
+ | `LLM_DEFAULT_MAX_TOKENS` | Default max_tokens for LLM calls. Optional. | (none) |
111
+ | `LLM_DEFAULT_TOP_K` | Default top_k for LLM calls (non-negative integer). Optional. | (none) |
112
+ | `LLM_DEFAULT_MIN_P` | Default min_p for LLM calls (0-1). Optional. | (none) |
104
113
 
105
114
  **Note on HTTP Port Retries:** If the `MCP_HTTP_PORT` is busy, the server automatically tries the next port (up to 15 times).
106
115
 
@@ -211,6 +220,7 @@ This project is licensed under the Apache License 2.0. See the [LICENSE](LICENSE
211
220
  | **Utilities (Security)** | IdGenerator | Generates unique IDs (prefixed or UUIDs). | `src/utils/security/idGenerator.ts` |
212
221
  | | RateLimiter | Request throttling based on keys. | `src/utils/security/rateLimiter.ts` |
213
222
  | | Sanitization | Input validation/cleaning (HTML, paths, URLs, numbers, JSON) & log redaction (`validator`, `sanitize-html`). | `src/utils/security/sanitization.ts` |
223
+ | **Services** | OpenRouter Provider | Service for interacting with OpenRouter API via OpenAI SDK compatibility. | `src/services/openRouterProvider.ts` |
214
224
  | **Type Safety** | Global Types | Shared TypeScript definitions for consistent interfaces (Errors, MCP types). | `src/types-global/` |
215
225
  | | Zod Schemas | Used for robust validation of configuration files and tool/resource inputs. | Throughout (`config`, `mcp-client`, tools, etc.) |
216
226
  | **Error Handling** | Pattern-Based Classification | Automatically categorize errors based on message patterns. | `src/utils/internal/errorHandler.ts` |
@@ -58,6 +58,15 @@ export declare const config: {
58
58
  * Default: undefined (Auth middleware should throw error if not set in production)
59
59
  */
60
60
  mcpAuthSecretKey: string | undefined;
61
+ openrouterAppUrl: string;
62
+ openrouterAppName: string;
63
+ openrouterApiKey: string | undefined;
64
+ llmDefaultModel: string;
65
+ llmDefaultTemperature: number | undefined;
66
+ llmDefaultTopP: number | undefined;
67
+ llmDefaultMaxTokens: number | undefined;
68
+ llmDefaultTopK: number | undefined;
69
+ llmDefaultMinP: number | undefined;
61
70
  };
62
71
  /**
63
72
  * The configured logging level for the application.
@@ -32,6 +32,16 @@ const EnvSchema = z.object({
32
32
  MCP_HTTP_HOST: z.string().default('127.0.0.1'),
33
33
  MCP_ALLOWED_ORIGINS: z.string().optional(), // Comma-separated string
34
34
  MCP_AUTH_SECRET_KEY: z.string().min(32, "MCP_AUTH_SECRET_KEY must be at least 32 characters long for security").optional(), // Secret for signing/verifying tokens
35
+ // OpenRouter and LLM specific configurations
36
+ OPENROUTER_APP_URL: z.string().url("OPENROUTER_APP_URL must be a valid URL").optional(),
37
+ OPENROUTER_APP_NAME: z.string().optional(),
38
+ OPENROUTER_API_KEY: z.string().optional(),
39
+ LLM_DEFAULT_MODEL: z.string().default('google/gemini-2.5-flash-preview:thinking'),
40
+ LLM_DEFAULT_TEMPERATURE: z.coerce.number().min(0).max(2).optional(),
41
+ LLM_DEFAULT_TOP_P: z.coerce.number().min(0).max(1).optional(),
42
+ LLM_DEFAULT_MAX_TOKENS: z.coerce.number().int().positive().optional(),
43
+ LLM_DEFAULT_TOP_K: z.coerce.number().int().nonnegative().optional(), // top_k can be 0
44
+ LLM_DEFAULT_MIN_P: z.coerce.number().min(0).max(1).optional(),
35
45
  });
36
46
  // Parse and validate environment variables
37
47
  const parsedEnv = EnvSchema.safeParse(process.env);
@@ -105,6 +115,16 @@ export const config = {
105
115
  * Default: undefined (Auth middleware should throw error if not set in production)
106
116
  */
107
117
  mcpAuthSecretKey: env.MCP_AUTH_SECRET_KEY,
118
+ // OpenRouter and LLM specific properties
119
+ openrouterAppUrl: env.OPENROUTER_APP_URL || 'http://localhost:3000', // Default if not set
120
+ openrouterAppName: env.OPENROUTER_APP_NAME || pkg.name || 'MCP TS App', // Default if not set
121
+ openrouterApiKey: env.OPENROUTER_API_KEY, // No default, service handles if missing
122
+ llmDefaultModel: env.LLM_DEFAULT_MODEL,
123
+ llmDefaultTemperature: env.LLM_DEFAULT_TEMPERATURE,
124
+ llmDefaultTopP: env.LLM_DEFAULT_TOP_P,
125
+ llmDefaultMaxTokens: env.LLM_DEFAULT_MAX_TOKENS,
126
+ llmDefaultTopK: env.LLM_DEFAULT_TOP_K,
127
+ llmDefaultMinP: env.LLM_DEFAULT_MIN_P,
108
128
  // Note: mcpClient configuration is loaded separately via src/mcp-client/configLoader.ts
109
129
  // Note: Logger-specific configurations (LOG_FILE_PATH, LOG_MAX_FILES, etc.)
110
130
  // are typically handled directly within the logger utility (src/utils/internal/logger.ts)
@@ -0,0 +1,43 @@
1
+ import { ChatCompletion, ChatCompletionChunk, ChatCompletionCreateParamsNonStreaming, ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions';
2
+ import { Stream } from 'openai/streaming';
3
+ import { OperationContext, RequestContext } from '../utils/internal/requestContext.js';
4
+ type OpenRouterChatParams = (ChatCompletionCreateParamsNonStreaming | ChatCompletionCreateParamsStreaming) & {
5
+ top_k?: number;
6
+ min_p?: number;
7
+ transforms?: string[];
8
+ models?: string[];
9
+ route?: 'fallback';
10
+ provider?: Record<string, any>;
11
+ };
12
+ /**
13
+ * Service class for interacting with the OpenRouter API using the OpenAI SDK compatibility.
14
+ */
15
+ declare class OpenRouterProvider {
16
+ private client?;
17
+ readonly status: 'unconfigured' | 'initializing' | 'ready' | 'error';
18
+ private initializationError;
19
+ constructor(apiKey: string | undefined, context?: OperationContext);
20
+ private checkReady;
21
+ /**
22
+ * Creates a chat completion using the OpenRouter API. Can return a stream or a single response.
23
+ * @param params - The parameters for the chat completion request, potentially including OpenRouter-specific fields and stream option.
24
+ * @param context - The request context for logging and error handling.
25
+ * @returns A promise that resolves with the chat completion response or an async iterable stream.
26
+ * @throws {McpError} If the service is not ready, the API call fails, or returns an error.
27
+ */
28
+ chatCompletion(params: OpenRouterChatParams, // Use the defined type here
29
+ context: RequestContext): Promise<ChatCompletion | Stream<ChatCompletionChunk>>;
30
+ /**
31
+ * Lists available models from OpenRouter.
32
+ * Note: The standard OpenAI SDK doesn't have a direct equivalent for listing models
33
+ * from a custom base URL like OpenRouter's `/models` endpoint.
34
+ * This method uses fetch directly.
35
+ * @param context - The request context for logging.
36
+ * @returns A promise that resolves with the list of models.
37
+ * @throws {McpError} If the service is not ready or the API call fails.
38
+ */
39
+ listModels(context: RequestContext): Promise<any>;
40
+ }
41
+ declare const openRouterProviderInstance: OpenRouterProvider;
42
+ export { openRouterProviderInstance as openRouterProvider };
43
+ export type { OpenRouterProvider };
@@ -0,0 +1,269 @@
1
+ import OpenAI from 'openai';
2
+ import { config } from '../config/index.js';
3
+ import { BaseErrorCode, McpError } from '../types-global/errors.js';
4
+ import { ErrorHandler } from '../utils/internal/errorHandler.js';
5
+ import { logger } from '../utils/internal/logger.js';
6
+ import { sanitization } from '../utils/security/sanitization.js';
7
+ import { rateLimiter } from '../utils/security/rateLimiter.js';
8
+ // Use the updated config properties
9
+ const YOUR_SITE_URL = config.openrouterAppUrl;
10
+ const YOUR_SITE_NAME = config.openrouterAppName;
11
+ /**
12
+ * Service class for interacting with the OpenRouter API using the OpenAI SDK compatibility.
13
+ */
14
+ class OpenRouterProvider {
15
+ constructor(apiKey, context) {
16
+ this.initializationError = null;
17
+ const opContext = context || { operation: 'OpenRouterProvider.constructor' };
18
+ this.status = 'initializing'; // Start in initializing state
19
+ if (!apiKey) {
20
+ this.status = 'unconfigured';
21
+ logger.warning('OPENROUTER_API_KEY is not set. OpenRouter service is not configured.', { ...opContext, service: 'OpenRouterProvider' });
22
+ return; // Stop initialization
23
+ }
24
+ try {
25
+ this.client = new OpenAI({
26
+ baseURL: 'https://openrouter.ai/api/v1',
27
+ apiKey: apiKey,
28
+ defaultHeaders: {
29
+ 'HTTP-Referer': YOUR_SITE_URL, // Use config value
30
+ 'X-Title': YOUR_SITE_NAME, // Use config value
31
+ },
32
+ });
33
+ this.status = 'ready';
34
+ logger.info('OpenRouter Service Initialized and Ready', { ...opContext, service: 'OpenRouterProvider' });
35
+ }
36
+ catch (error) {
37
+ this.status = 'error';
38
+ this.initializationError = error instanceof Error ? error : new Error(String(error));
39
+ logger.error('Failed to initialize OpenRouter client', { ...opContext, service: 'OpenRouterProvider', error: this.initializationError.message });
40
+ // Don't throw here, let status indicate failure
41
+ }
42
+ }
43
+ checkReady(operation, context) {
44
+ if (this.status !== 'ready') {
45
+ let errorCode = BaseErrorCode.SERVICE_UNAVAILABLE;
46
+ let message = `OpenRouter service is not available (status: ${this.status}).`;
47
+ if (this.status === 'unconfigured') {
48
+ errorCode = BaseErrorCode.CONFIGURATION_ERROR;
49
+ message = 'OpenRouter service is not configured (missing API key).';
50
+ }
51
+ else if (this.status === 'error') {
52
+ errorCode = BaseErrorCode.INITIALIZATION_FAILED;
53
+ message = `OpenRouter service failed to initialize: ${this.initializationError?.message || 'Unknown error'}`;
54
+ }
55
+ logger.error(`[${operation}] Attempted to use OpenRouter service when not ready.`, { ...context, status: this.status });
56
+ throw new McpError(errorCode, message, { operation, status: this.status, cause: this.initializationError });
57
+ }
58
+ if (!this.client) {
59
+ // This should theoretically not happen if status is 'ready', but belts and suspenders
60
+ logger.error(`[${operation}] Service status is ready, but client is missing.`, { ...context });
61
+ throw new McpError(BaseErrorCode.INTERNAL_ERROR, 'Internal inconsistency: OpenRouter client is missing despite ready status.', { operation });
62
+ }
63
+ }
64
+ /**
65
+ * Creates a chat completion using the OpenRouter API. Can return a stream or a single response.
66
+ * @param params - The parameters for the chat completion request, potentially including OpenRouter-specific fields and stream option.
67
+ * @param context - The request context for logging and error handling.
68
+ * @returns A promise that resolves with the chat completion response or an async iterable stream.
69
+ * @throws {McpError} If the service is not ready, the API call fails, or returns an error.
70
+ */
71
+ async chatCompletion(params, // Use the defined type here
72
+ context) {
73
+ const operation = 'OpenRouterProvider.chatCompletion';
74
+ this.checkReady(operation, context); // Check if service is ready
75
+ // Determine if streaming is requested
76
+ const isStreaming = params.stream === true;
77
+ // Explicitly pick known standard OpenAI params and apply defaults
78
+ // Use the appropriate type based on streaming
79
+ const standardParams = {
80
+ model: params.model || config.llmDefaultModel,
81
+ messages: params.messages,
82
+ // Only include standard params if they exist in the input or have a default from config
83
+ ...(params.temperature !== undefined || config.llmDefaultTemperature !== undefined ? { temperature: params.temperature ?? config.llmDefaultTemperature } : {}),
84
+ ...(params.top_p !== undefined || config.llmDefaultTopP !== undefined ? { top_p: params.top_p ?? config.llmDefaultTopP } : {}),
85
+ // Only include penalties if explicitly provided in params, ignore config defaults for these
86
+ ...(params.presence_penalty !== undefined ? { presence_penalty: params.presence_penalty } : {}),
87
+ ...(params.max_tokens !== undefined || config.llmDefaultMaxTokens !== undefined ? { max_tokens: params.max_tokens ?? config.llmDefaultMaxTokens } : {}),
88
+ ...(params.stream !== undefined && { stream: params.stream }), // Keep stream param if provided
89
+ ...(params.tools !== undefined && { tools: params.tools }),
90
+ ...(params.tool_choice !== undefined && { tool_choice: params.tool_choice }),
91
+ ...(params.response_format !== undefined && { response_format: params.response_format }),
92
+ ...(params.stop !== undefined && { stop: params.stop }),
93
+ ...(params.seed !== undefined && { seed: params.seed }),
94
+ // Only include penalties if explicitly provided in params, ignore config defaults for these
95
+ ...(params.frequency_penalty !== undefined ? { frequency_penalty: params.frequency_penalty } : {}),
96
+ ...(params.logit_bias !== undefined && { logit_bias: params.logit_bias }),
97
+ // Add other standard OpenAI params here if needed, checking params object first
98
+ };
99
+ // Collect remaining/non-standard params for extra_body
100
+ const extraBody = {};
101
+ const standardKeys = new Set(Object.keys(standardParams)); // Use Set for faster lookups
102
+ standardKeys.add('messages'); // Ensure messages isn't added to extraBody
103
+ for (const key in params) {
104
+ // Ensure the key is actually a property of params before checking standardKeys
105
+ if (Object.prototype.hasOwnProperty.call(params, key) && !standardKeys.has(key)) {
106
+ extraBody[key] = params[key];
107
+ }
108
+ }
109
+ // Apply defaults for known non-standard params if they weren't provided in input `params`
110
+ // Check if the key exists in extraBody before applying default from config
111
+ if (extraBody.top_k === undefined && config.llmDefaultTopK !== undefined) {
112
+ extraBody.top_k = config.llmDefaultTopK;
113
+ }
114
+ if (extraBody.min_p === undefined && config.llmDefaultMinP !== undefined) {
115
+ extraBody.min_p = config.llmDefaultMinP;
116
+ }
117
+ // Note: If params explicitly included top_k: null or min_p: null, the loop above would have added them.
118
+ // This logic correctly applies defaults only if the key is entirely absent.
119
+ // Combine for logging
120
+ const allEffectiveParams = { ...standardParams, ...extraBody };
121
+ const sanitizedParams = sanitization.sanitizeForLogging(allEffectiveParams);
122
+ logger.info(`[${operation}] Request received`, { ...context, params: sanitizedParams, streaming: isStreaming });
123
+ // --- BEGIN SPEED PRIORITIZATION ---
124
+ // Ensure provider routing prioritizes throughput for faster responses.
125
+ // If a provider preference is already set, merge 'sort' if not present.
126
+ // If no provider preference exists, create it.
127
+ if (extraBody.provider && typeof extraBody.provider === 'object') {
128
+ if (!extraBody.provider.sort) {
129
+ extraBody.provider.sort = 'throughput';
130
+ logger.debug(`[${operation}] Merged 'sort: throughput' into existing provider preferences.`, context);
131
+ }
132
+ else {
133
+ logger.debug(`[${operation}] Provider 'sort' preference already exists, respecting provided value: ${extraBody.provider.sort}`, context);
134
+ }
135
+ }
136
+ else if (extraBody.provider === undefined) {
137
+ // Only add if 'provider' is completely undefined, not if it's null or another type
138
+ extraBody.provider = { sort: 'throughput' };
139
+ logger.debug(`[${operation}] Added 'provider: { sort: 'throughput' }' preferences.`, context);
140
+ }
141
+ // --- END SPEED PRIORITIZATION ---
142
+ // Apply rate limiting before making the API call
143
+ const rateLimitKey = context.requestId || 'openrouter_default_key';
144
+ try {
145
+ rateLimiter.check(rateLimitKey, context);
146
+ logger.debug(`[${operation}] Rate limit check passed`, { ...context, key: rateLimitKey });
147
+ }
148
+ catch (error) {
149
+ // If rate limit check fails, log and re-throw the McpError (RATE_LIMITED)
150
+ logger.warning(`[${operation}] Rate limit exceeded`, { ...context, key: rateLimitKey, error: error instanceof Error ? error.message : String(error) });
151
+ throw error; // Re-throw the McpError from rateLimiter.check()
152
+ }
153
+ // Use tryCatch for error handling, but return type depends on streaming
154
+ return await ErrorHandler.tryCatch(async () => {
155
+ // Ensure client is defined (checkReady should guarantee this, but TS needs reassurance)
156
+ if (!this.client)
157
+ throw new Error("Client missing despite ready status");
158
+ // Prepare the final parameters for the API call
159
+ // Cast to the correct type based on streaming flag
160
+ const apiParams = { ...standardParams };
161
+ if (Object.keys(extraBody).length > 0) {
162
+ // Pass non-standard params via extra_body
163
+ apiParams.extra_body = extraBody;
164
+ }
165
+ try {
166
+ if (isStreaming) {
167
+ // Call with streaming true
168
+ const stream = await this.client.chat.completions.create(apiParams // Cast to streaming type
169
+ );
170
+ logger.info(`[${operation}] Streaming request successful`, { ...context, model: apiParams.model });
171
+ return stream;
172
+ }
173
+ else {
174
+ // Call with streaming false (or default)
175
+ const completion = await this.client.chat.completions.create(apiParams // Cast to non-streaming type
176
+ );
177
+ logger.info(`[${operation}] Non-streaming request successful`, { ...context, model: apiParams.model });
178
+ // Consider sanitizing completion if logging full response
179
+ // logger.debug(`[${operation}] Response data`, { ...context, response: completion });
180
+ return completion;
181
+ }
182
+ }
183
+ catch (error) {
184
+ // Catch specific OpenAI/API errors if possible, otherwise treat as generic error
185
+ logger.error(`[${operation}] API call failed`, { ...context, error: error.message, status: error.status });
186
+ // Map API errors to McpError types using correct codes and constructor signature (code, message, details?)
187
+ const errorDetails = { providerStatus: error.status, providerMessage: error.message, cause: error?.cause };
188
+ if (error.status === 401) {
189
+ throw new McpError(BaseErrorCode.UNAUTHORIZED, `OpenRouter authentication failed`, errorDetails);
190
+ }
191
+ else if (error.status === 429) {
192
+ throw new McpError(BaseErrorCode.RATE_LIMITED, `OpenRouter rate limit exceeded`, errorDetails);
193
+ }
194
+ else if (error.status === 402) {
195
+ // Using FORBIDDEN as the closest match for payment required
196
+ throw new McpError(BaseErrorCode.FORBIDDEN, `OpenRouter insufficient credits or payment required`, errorDetails);
197
+ }
198
+ // Throw a generic internal error for other API statuses
199
+ throw new McpError(BaseErrorCode.INTERNAL_ERROR, `OpenRouter API error (${error.status})`, errorDetails);
200
+ }
201
+ }, {
202
+ operation,
203
+ context,
204
+ input: sanitizedParams, // Log sanitized input
205
+ errorCode: BaseErrorCode.INTERNAL_ERROR, // Default error code if unexpected error occurs
206
+ });
207
+ }
208
+ // Removed getNextAction and its private helper methods:
209
+ // - buildSystemPrompt
210
+ // - formatToolsForPrompt
211
+ // - formatConversationHistory
212
+ // - parseAndValidateAction
213
+ /**
214
+ * Lists available models from OpenRouter.
215
+ * Note: The standard OpenAI SDK doesn't have a direct equivalent for listing models
216
+ * from a custom base URL like OpenRouter's `/models` endpoint.
217
+ * This method uses fetch directly.
218
+ * @param context - The request context for logging.
219
+ * @returns A promise that resolves with the list of models.
220
+ * @throws {McpError} If the service is not ready or the API call fails.
221
+ */
222
+ async listModels(context) {
223
+ const operation = 'OpenRouterProvider.listModels';
224
+ this.checkReady(operation, context); // Check if service is ready
225
+ logger.info(`[${operation}] Request received`, context);
226
+ return await ErrorHandler.tryCatch(async () => {
227
+ // No need to check this.client here, checkReady handles it
228
+ try {
229
+ const response = await fetch('https://openrouter.ai/api/v1/models', {
230
+ method: 'GET',
231
+ headers: {
232
+ // No API key needed for listing models as per OpenRouter docs
233
+ 'Content-Type': 'application/json',
234
+ },
235
+ });
236
+ if (!response.ok) {
237
+ const errorBody = await response.text();
238
+ const errorDetails = { status: response.status, body: errorBody };
239
+ logger.error(`[${operation}] Failed to list models`, { ...context, ...errorDetails });
240
+ // Context is not passed to McpError constructor directly.
241
+ throw new McpError(BaseErrorCode.INTERNAL_ERROR, `OpenRouter list models failed (${response.status})`, errorDetails);
242
+ }
243
+ const models = await response.json();
244
+ logger.info(`[${operation}] Successfully listed models`, context);
245
+ // logger.debug(`[${operation}] Models data`, { ...context, count: models?.data?.length });
246
+ return models;
247
+ }
248
+ catch (error) {
249
+ logger.error(`[${operation}] Error listing models`, { ...context, error: error.message });
250
+ if (error instanceof McpError) {
251
+ throw error; // Re-throw McpErrors directly
252
+ }
253
+ // Use SERVICE_UNAVAILABLE for network/fetch errors
254
+ // Context is not passed to McpError constructor directly.
255
+ throw new McpError(BaseErrorCode.SERVICE_UNAVAILABLE, `Network error listing OpenRouter models: ${error.message}`, { cause: error });
256
+ }
257
+ }, {
258
+ operation,
259
+ context,
260
+ errorCode: BaseErrorCode.INTERNAL_ERROR, // Default error code if unexpected error occurs
261
+ });
262
+ }
263
+ }
264
+ // Initialize and export the service instance.
265
+ // It reads the API key from the config/environment variables.
266
+ // The instance will always exist, but its status indicates if it's usable.
267
+ const openRouterProviderInstance = new OpenRouterProvider(config.openrouterApiKey);
268
+ // Export the guaranteed instance. Consumers should check its status or handle errors from its methods.
269
+ export { openRouterProviderInstance as openRouterProvider };
@@ -27,7 +27,9 @@ export declare enum BaseErrorCode {
27
27
  /** An error occurred, but the specific cause is unknown or cannot be categorized. */
28
28
  UNKNOWN_ERROR = "UNKNOWN_ERROR",
29
29
  /** An error occurred during the loading or validation of configuration data. */
30
- CONFIGURATION_ERROR = "CONFIGURATION_ERROR"
30
+ CONFIGURATION_ERROR = "CONFIGURATION_ERROR",
31
+ /** An error occurred during the initialization phase of a service or module. */
32
+ INITIALIZATION_FAILED = "INITIALIZATION_FAILED"
31
33
  }
32
34
  /**
33
35
  * Custom error class for MCP-specific errors.
@@ -29,6 +29,8 @@ export var BaseErrorCode;
29
29
  BaseErrorCode["UNKNOWN_ERROR"] = "UNKNOWN_ERROR";
30
30
  /** An error occurred during the loading or validation of configuration data. */
31
31
  BaseErrorCode["CONFIGURATION_ERROR"] = "CONFIGURATION_ERROR";
32
+ /** An error occurred during the initialization phase of a service or module. */
33
+ BaseErrorCode["INITIALIZATION_FAILED"] = "INITIALIZATION_FAILED";
32
34
  })(BaseErrorCode || (BaseErrorCode = {}));
33
35
  /**
34
36
  * Custom error class for MCP-specific errors.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-ts-template",
3
- "version": "1.1.7",
3
+ "version": "1.1.9",
4
4
  "description": "TypeScript template for building Model Context Protocol (MCP) servers & clients. Features production-ready utilities, stdio/HTTP transports (with JWT auth), examples, and type safety. Ideal starting point for creating MCP-based applications.",
5
5
  "main": "dist/index.js",
6
6
  "files": [
@@ -30,9 +30,9 @@
30
30
  "inspector": "mcp-inspector --config mcp.json --server mcp-ts-template"
31
31
  },
32
32
  "dependencies": {
33
- "@modelcontextprotocol/sdk": "^1.11.0",
33
+ "@modelcontextprotocol/sdk": "^1.11.2",
34
34
  "@types/jsonwebtoken": "^9.0.9",
35
- "@types/node": "^22.15.15",
35
+ "@types/node": "^22.15.17",
36
36
  "@types/sanitize-html": "^2.16.0",
37
37
  "@types/validator": "13.15.0",
38
38
  "chrono-node": "^2.8.0",
@@ -40,7 +40,7 @@
40
40
  "express": "^5.1.0",
41
41
  "ignore": "^7.0.4",
42
42
  "jsonwebtoken": "^9.0.2",
43
- "openai": "^4.97.0",
43
+ "openai": "^4.98.0",
44
44
  "partial-json": "^0.1.7",
45
45
  "sanitize-html": "^2.16.0",
46
46
  "tiktoken": "^1.0.21",