@gcnv/gcnv-mcp-server 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/GEMINI.md +200 -0
  2. package/LICENSE +201 -0
  3. package/README.md +374 -0
  4. package/build/index.js +185 -0
  5. package/build/logger.js +19 -0
  6. package/build/registry/register-tools.js +101 -0
  7. package/build/registry/tool-registry.js +27 -0
  8. package/build/tools/active-directory-tools.js +124 -0
  9. package/build/tools/backup-policy-tools.js +140 -0
  10. package/build/tools/backup-tools.js +178 -0
  11. package/build/tools/backup-vault-tools.js +147 -0
  12. package/build/tools/handlers/active-directory-handler.js +321 -0
  13. package/build/tools/handlers/backup-handler.js +451 -0
  14. package/build/tools/handlers/backup-policy-handler.js +275 -0
  15. package/build/tools/handlers/backup-vault-handler.js +370 -0
  16. package/build/tools/handlers/kms-config-handler.js +327 -0
  17. package/build/tools/handlers/operation-handler.js +254 -0
  18. package/build/tools/handlers/quota-rule-handler.js +411 -0
  19. package/build/tools/handlers/replication-handler.js +504 -0
  20. package/build/tools/handlers/snapshot-handler.js +320 -0
  21. package/build/tools/handlers/storage-pool-handler.js +346 -0
  22. package/build/tools/handlers/volume-handler.js +353 -0
  23. package/build/tools/kms-config-tools.js +162 -0
  24. package/build/tools/operation-tools.js +64 -0
  25. package/build/tools/quota-rule-tools.js +166 -0
  26. package/build/tools/replication-tools.js +227 -0
  27. package/build/tools/snapshot-tools.js +124 -0
  28. package/build/tools/storage-pool-tools.js +215 -0
  29. package/build/tools/volume-tools.js +216 -0
  30. package/build/types/tool.js +1 -0
  31. package/build/utils/netapp-client-factory.js +53 -0
  32. package/gemini-extension.json +12 -0
  33. package/package.json +66 -0
package/README.md ADDED
@@ -0,0 +1,374 @@
1
+ # Google Cloud NetApp Volumes MCP Server
2
+
3
+ This is a Model Context Protocol (MCP) server for managing Google Cloud NetApp Volumes resources. It provides a set of tools for creating, retrieving, listing, updating, and deleting storage pools in Google Cloud NetApp Volumes.
4
+
5
+ ## Overview
6
+
7
+ The Google Cloud NetApp Volumes MCP Server is built using the Model Context Protocol SDK and provides a set of tools for interacting with Google Cloud NetApp Volumes resources. It supports operations for Storage Pool management, Volume management, and long-running operations management.
8
+
9
+ ## Features
10
+
11
+ - **Storage Pool Management**:
12
+ - Create new storage pools with configurable capacity, service level, and network settings
13
+ - List storage pools with pagination and filtering
14
+ - Get detailed information about specific storage pools
15
+ - Update storage pool properties (capacity, description, labels)
16
+ - Delete storage pools
17
+
18
+ - **Volume Management**:
19
+ - Create new volumes within storage pools with configurable capacity and protocols
20
+ - List volumes with pagination and filtering
21
+ - Get detailed information about specific volumes, including mount points
22
+ - Update volume properties (capacity, description, labels, export policy)
23
+ - Delete volumes
24
+
25
+ - **Snapshot Management**:
26
+ - Create new snapshots for volumes
27
+ - List snapshots for a specific volume
28
+ - Get detailed information about specific snapshots
29
+ - Delete snapshots when they are no longer needed
30
+ - Revert volumes to previous snapshots
31
+
32
+ - **Backup Vault Management**:
33
+ - Create new backup vaults for storing backups
34
+ - List backup vaults with pagination and filtering
35
+ - Get detailed information about specific backup vaults
36
+ - Update backup vault properties (description, labels)
37
+ - Delete backup vaults when they are no longer needed
38
+
39
+ - **Backup Management**:
40
+ - Create new backups of volumes in backup vaults
41
+ - List backups in a specific backup vault
42
+ - Get detailed information about specific backups
43
+ - Delete backups when they are no longer needed
44
+ - Restore backups to new or existing volumes
45
+
46
+ - **Replication Management**:
47
+ - Create, list, get, update, stop, resume, reverse direction, sync, and establish peering for replications
48
+ - Replication is only supported between specific region pairs for Standard/Premium/Extreme, or within the same region group for Flex. Always validate the requested source/destination regions against the official matrix before creating a replication. See the Google Cloud NetApp Volumes replication guide: https://docs.cloud.google.com/netapp/volumes/docs/protect-data/about-volume-replication
49
+ - When creating a replication, the destination volume is auto-created by specifying only the destination storage pool. Users can also choose a replication schedule (`EVERY_10_MINUTES`, `HOURLY`, or `DAILY`; defaults to `HOURLY`).
50
+
51
+ - **Long-running Operations Management**:
52
+ - Get details of an operation by ID
53
+ - Cancel in-progress operations
54
+ - List operations with filtering and pagination
55
+
56
+ ## Prerequisites
57
+
58
+ - Node.js 16 or higher
59
+ - Google Cloud project with NetApp Volumes API enabled
60
+ - Google Cloud authentication credentials
61
+
62
+ ## Installation
63
+
64
+ If you just want to run the published package (no local build), use:
65
+
66
+ ```bash
67
+ npx @gcnv/gcnv-mcp-server@latest --transport stdio
68
+ ```
69
+
70
+ Then configure `gemini-extension.json` (or your linked extension) to call the same command. To work from source, follow the steps below.
71
+
72
+ 1. Clone this repository:
73
+
74
+ ```bash
75
+ git clone <repository-url>
76
+ cd GCNV-MCP-LOCAL
77
+ ```
78
+
79
+ 2. Install dependencies:
80
+
81
+ ```bash
82
+ npm install
83
+ ```
84
+
85
+ 3. Build the project (required when working from source or before publishing):
86
+
87
+ ```bash
88
+ npm run build
89
+ ```
90
+
91
+ 4. Link the Gemini extension so the CLI can launch the MCP server over stdio:
92
+
93
+ ```bash
94
+ gemini extension link .
95
+ ```
96
+
97
+ 5. Confirm the extension is registered and ready. The MCP server should appear in the list:
98
+
99
+ ```bash
100
+ gemini mcp list
101
+ ```
102
+
103
+ > Gemini automatically forks the MCP server whenever a linked extension needs it, so once the build output exists (or the published package is available via `npx`) and the extension is linked, no manual `npm start` is required for normal usage.
104
+
105
+ ## Google Cloud Authentication
106
+
107
+ Ensure you have valid Google Cloud credentials set up before invoking tools:
108
+
109
+ - Set the `GOOGLE_APPLICATION_CREDENTIALS` environment variable pointing to a service account key file, or
110
+ - Use Application Default Credentials (ADC) with `gcloud auth application-default login`
111
+
112
+ ## Usage
113
+
114
+ ### Starting the Server
115
+
116
+ The MCP server supports both **stdio** (default) and **HTTP/SSE** transports. The transport mode can be controlled via command-line flags.
117
+
118
+ #### Stdio Transport (Default)
119
+
120
+ The stdio transport is the default mode and is launched by Gemini CLI when a linked extension requires it.
121
+
122
+ - After running `gemini extension link .`, you can verify that Gemini sees the server with `gemini mcp list`.
123
+ - Trigger any MCP interaction from Gemini (for example, invoke a registered tool) and the CLI will spawn the `gcnv-mcp` process automatically.
124
+ - For manual debugging you can run `npm start` or `npm run start:stdio`, which starts the stdio transport and waits for a client connection on stdin/stdout.
125
+
126
+ #### HTTP/SSE Transport
127
+
128
+ The server can also run as an HTTP server using Server-Sent Events (SSE) for MCP communication.
129
+
130
+ **Basic Usage:**
131
+
132
+ ```bash
133
+ # Start HTTP server on default port 3000
134
+ npm run start:http
135
+
136
+ # Or with explicit transport flag
137
+ npm start -- --transport http
138
+
139
+ # Start HTTP server on custom port
140
+ npm start -- --transport http --port 8080
141
+
142
+ # Short form
143
+ npm start -- -t http -p 8080
144
+ ```
145
+
146
+ **Command-Line Options:**
147
+
148
+ - `--transport` or `-t`: Transport mode (`stdio` or `http`). Default: `stdio`
149
+ - `--port` or `-p`: HTTP server port (only used with HTTP transport). Default: `3000`
150
+
151
+ **HTTP Endpoint:**
152
+ When running in HTTP mode, the server listens on:
153
+
154
+ - `http://localhost:<port>/message` - SSE endpoint for MCP communication
155
+
156
+ **Development Mode:**
157
+
158
+ ```bash
159
+ # Build and start with stdio (default)
160
+ npm run dev
161
+
162
+ # Build and start with HTTP transport
163
+ npm run dev:http
164
+ ```
165
+
166
+ ### Available Tools
167
+
168
+ The server exposes the following tools through the MCP interface:
169
+
170
+ #### Storage Pool Tools
171
+
172
+ 1. **storage_pool_create** - Create a new storage pool
173
+ - Inputs: projectId, location, storagePoolId, capacityGib, serviceLevel, description (optional), labels (optional), networkConfig (optional)
174
+
175
+ 2. **storage_pool_delete** - Delete an existing storage pool
176
+ - Inputs: projectId, location, storagePoolId, force (optional)
177
+
178
+ 3. **storage_pool_get** - Get details about a specific storage pool
179
+ - Inputs: projectId, location, storagePoolId
180
+
181
+ 4. **storage_pool_list** - List all storage pools in a project/location
182
+ - Inputs: projectId, location, filter (optional), pageSize (optional), pageToken (optional)
183
+
184
+ 5. **storage_pool_update** - Update a storage pool's properties
185
+ - Inputs: projectId, location, storagePoolId, capacityGib (optional), description (optional), labels (optional)
186
+
187
+ #### Operation Tools
188
+
189
+ 1. **operation_get** - Get details of a long-running operation
190
+ - Inputs: operationName (the full name of the operation)
191
+
192
+ 2. **operation_cancel** - Cancel an in-progress operation
193
+ - Inputs: operationName (the full name of the operation)
194
+
195
+ 3. **operation_list** - List operations in a project/location
196
+ - Inputs: projectId, location, filter (optional), pageSize (optional), pageToken (optional)
197
+
198
+ #### Volume Tools
199
+
200
+ 1. **volume_create** - Create a new volume in a storage pool
201
+ - Inputs: projectId, location, storagePoolId, volumeId, capacityGib, shareProtocols, description (optional), labels (optional), exportPolicy (optional)
202
+
203
+ 2. **volume_delete** - Delete an existing volume
204
+ - Inputs: projectId, location, storagePoolId, volumeId, force (optional)
205
+
206
+ 3. **volume_get** - Get details about a specific volume
207
+ - Inputs: projectId, location, storagePoolId, volumeId
208
+
209
+ 4. **volume_list** - List all volumes in a storage pool
210
+ - Inputs: projectId, location, storagePoolId, filter (optional), pageSize (optional), pageToken (optional)
211
+
212
+ 5. **volume_update** - Update a volume's properties
213
+ - Inputs: projectId, location, storagePoolId, volumeId, capacityGib (optional), description (optional), labels (optional), exportPolicy (optional)
214
+
215
+ #### Snapshot Tools
216
+
217
+ 1. **snapshot_create** - Create a new snapshot of a volume
218
+ - Inputs: projectId, location, storagePoolId, volumeId, snapshotId, description (optional)
219
+
220
+ 2. **snapshot_delete** - Delete an existing snapshot
221
+ - Inputs: projectId, location, storagePoolId, volumeId, snapshotId
222
+
223
+ 3. **snapshot_get** - Get details about a specific snapshot
224
+ - Inputs: projectId, location, storagePoolId, volumeId, snapshotId
225
+
226
+ 4. **snapshot_list** - List all snapshots for a volume
227
+ - Inputs: projectId, location, storagePoolId, volumeId, filter (optional), pageSize (optional), pageToken (optional)
228
+
229
+ 5. **snapshot_revert_volume** - Revert a volume to a specific snapshot
230
+ - Inputs: projectId, location, storagePoolId, volumeId, snapshotId
231
+
232
+ #### Backup Vault Tools
233
+
234
+ 1. **gcnv_backup_vault_create** - Create a new backup vault
235
+ - Inputs: projectId, location, backupVaultId, description (optional), labels (optional)
236
+
237
+ 2. **gcnv_backup_vault_delete** - Delete an existing backup vault
238
+ - Inputs: projectId, location, backupVaultId, force (optional)
239
+
240
+ 3. **gcnv_backup_vault_get** - Get details about a specific backup vault
241
+ - Inputs: projectId, location, backupVaultId
242
+
243
+ 4. **gcnv_backup_vault_list** - List all backup vaults in a project and location
244
+ - Inputs: projectId, location, filter (optional), pageSize (optional), pageToken (optional)
245
+
246
+ 5. **gcnv_backup_vault_update** - Update a backup vault's properties
247
+ - Inputs: projectId, location, backupVaultId, description (optional), labels (optional)
248
+
249
+ #### Backup Tools
250
+
251
+ 1. **gcnv_backup_create** - Create a new backup of a volume
252
+ - Inputs: projectId, location, backupVaultId, backupId, volumeName, description (optional), labels (optional)
253
+
254
+ 2. **gcnv_backup_delete** - Delete an existing backup
255
+ - Inputs: projectId, location, backupVaultId, backupId
256
+
257
+ 3. **gcnv_backup_get** - Get details about a specific backup
258
+ - Inputs: projectId, location, backupVaultId, backupId
259
+
260
+ 4. **gcnv_backup_list** - List all backups in a backup vault
261
+ - Inputs: projectId, location, backupVaultId, filter (optional), pageSize (optional), pageToken (optional)
262
+
263
+ 5. **gcnv_backup_restore** - Restore a backup to a new or existing volume
264
+ - Inputs: projectId, location, backupVaultId, backupId, targetStoragePoolId, targetVolumeId, restoreOption
265
+
266
+ ## Architecture
267
+
268
+ The project follows a modular architecture:
269
+
270
+ - **Server**: MCP server supporting both stdio and HTTP/SSE transports
271
+ - **Stdio Transport**: Default mode, links directly with Gemini CLI and other stdio-based MCP clients
272
+ - **HTTP/SSE Transport**: HTTP server mode for web-based MCP clients and remote access
273
+ - **Tools**: Defined using Zod schemas for input validation
274
+ - **Handlers**: Implementation for each tool's functionality
275
+ - **Factory Pattern**: Uses a factory for managing NetApp client instances with caching
276
+
277
+ ## Integrating with Chat AI Applications (e.g., Gemini)
278
+
279
+ To use the MCP server with Gemini CLI or other MCP-aware clients:
280
+
281
+ 1. **Link the Extension**
282
+ After building the project from source (or when relying on the published package via `npx @gcnv/gcnv-mcp-server@latest`), register the extension with the Gemini CLI. This enables Gemini to fork the stdio-based server on demand.
283
+
284
+ ```bash
285
+ gemini extension link .
286
+ ```
287
+
288
+ 2. **(Optional) Customize the Extension**
289
+ Edit `gemini-extension.json` if you need to pass environment variables or adjust the command/arguments that Gemini executes when launching the MCP server.
290
+
291
+ 3. **Verify the Registration**
292
+ Confirm that Gemini recognizes the MCP server:
293
+
294
+ ```bash
295
+ gemini mcp list
296
+ ```
297
+
298
+ 4. **Invoke Tools via Chat**
299
+ Trigger MCP interactions from Gemini. When a chat session or CLI command references the `gcnv-mcp` server, Gemini starts the `gcnv-mcp` CLI (from the published package via `npx`, or from your local `build/index.js` when linked from source) and communicates with it over stdio (default).
300
+ No extra launch step is necessary—the CLI takes care of process lifecycle each time the server is needed.
301
+
302
+ **Note**: For HTTP transport mode, you'll need to manually start the server and configure your MCP client to connect to the HTTP endpoint instead of using stdio.
303
+
304
+ 5. **Maintain Authentication**
305
+ Ensure the MCP process has access to Google Cloud credentials as outlined in the prerequisites.
306
+
307
+ For other chat AI applications, follow their documentation for linking stdio-based MCP servers; most can reuse the `gemini-extension.json` structure as a template.
308
+
309
+ ### Key Components
310
+
311
+ - `src/index.ts` - Main server setup and entry point
312
+ - `src/registry/register-tools.ts` - Tool registration
313
+ - `src/tools/storage-pool-tools.ts` - Storage pool tool definitions with schemas
314
+ - `src/tools/volume-tools.ts` - Volume tool definitions with schemas
315
+ - `src/tools/snapshot-tools.ts` - Snapshot tool definitions with schemas
316
+ - `src/tools/operation-tools.ts` - Operation tool definitions with schemas
317
+ - `src/tools/backup-vault-tools.ts` - Backup vault tool definitions with schemas
318
+ - `src/tools/backup-tools.ts` - Backup tool definitions with schemas
319
+ - `src/tools/handlers/storage-pool-handler.ts` - Storage pool tool implementation
320
+ - `src/tools/handlers/volume-handler.ts` - Volume tool implementation
321
+ - `src/tools/handlers/snapshot-handler.ts` - Snapshot tool implementation
322
+ - `src/tools/handlers/operation-handler.ts` - Operation tool implementation
323
+ - `src/tools/handlers/backup-vault-handler.ts` - Backup vault tool implementation
324
+ - `src/tools/handlers/backup-handler.ts` - Backup tool implementation
325
+ - `src/utils/netapp-client-factory.ts` - Factory for NetApp client creation
326
+
327
+ ## Development
328
+
329
+ ### Adding New Tools
330
+
331
+ 1. Define the tool schema in a new or existing file in the `src/tools` directory
332
+ 2. Implement the handler in the `src/tools/handlers` directory
333
+ 3. Register the tool in `src/registry/register-tools.ts`
334
+
335
+ ### Project Structure
336
+
337
+ ```plaintext
338
+ src/
339
+ ├── index.ts # Main entry point
340
+ ├── registry/
341
+ │ └── register-tools.ts # Tool registration
342
+ ├── tools/
343
+ │ ├── storage-pool-tools.ts # Storage pool tool definitions
344
+ │ ├── volume-tools.ts # Volume tool definitions
345
+ │ ├── snapshot-tools.ts # Snapshot tool definitions
346
+ │ ├── operation-tools.ts # Operation tool definitions
347
+ │ ├── backup-vault-tools.ts # Backup vault tool definitions
348
+ │ ├── backup-tools.ts # Backup tool definitions
349
+ │ └── handlers/
350
+ │ ├── storage-pool-handler.ts # Storage pool tool handlers
351
+ │ ├── volume-handler.ts # Volume tool handlers
352
+ │ ├── snapshot-handler.ts # Snapshot tool handlers
353
+ │ ├── operation-handler.ts # Operation tool handlers
354
+ │ ├── backup-vault-handler.ts # Backup vault tool handlers
355
+ │ └── backup-handler.ts # Backup tool handlers
356
+ ├── types/
357
+ │ └── tool.ts # TypeScript interfaces
358
+ └── utils/
359
+ └── netapp-client-factory.ts # NetApp client factory
360
+ ```
361
+
362
+ ## Dependencies
363
+
364
+ - `@modelcontextprotocol/sdk` - MCP server implementation
365
+ - `@google-cloud/netapp` - Google Cloud NetApp Volumes client library
366
+ - `zod` - Schema validation library
367
+
368
+ ## License
369
+
370
+ ISC
371
+
372
+ ## Contributing
373
+
374
+ Contributions are welcome! Please feel free to submit a Pull Request.
package/build/index.js ADDED
@@ -0,0 +1,185 @@
1
+ #!/usr/bin/env node
2
+ import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
3
+ import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
4
+ import { SSEServerTransport } from '@modelcontextprotocol/sdk/server/sse.js';
5
+ import { registerAllTools } from './registry/register-tools.js';
6
+ import { logger } from './logger.js';
7
+ const log = logger.child({});
8
+ async function startStdioTransport(mcpServer) {
9
+ const transport = new StdioServerTransport();
10
+ await mcpServer.connect(transport);
11
+ log.info('MCP Server listening on stdio');
12
+ await new Promise((resolve, reject) => {
13
+ const originalClose = transport.onclose;
14
+ transport.onclose = () => {
15
+ originalClose?.();
16
+ resolve();
17
+ };
18
+ const originalError = transport.onerror;
19
+ transport.onerror = (error) => {
20
+ originalError?.(error);
21
+ // The stdio transport will throw a SyntaxError if it receives a blank
22
+ // line or otherwise malformed JSON on stdin (for example when a user
23
+ // accidentally hits Enter). Those are non-fatal and can be safely
24
+ // ignored; keep the server alive and just log a warning.
25
+ const message = error instanceof Error ? (error.message ?? '') : '';
26
+ if (error instanceof SyntaxError && message.includes('Unexpected end of JSON input')) {
27
+ log.debug({ err: error }, 'Ignored malformed/empty JSON on stdio');
28
+ return;
29
+ }
30
+ log.error({ err: error }, 'Error on stdio transport');
31
+ reject(error);
32
+ };
33
+ });
34
+ }
35
+ async function startHttpTransport(mcpServerTemplate, port = 3000) {
36
+ const http = await import('http');
37
+ // Store transports by session ID to route POST requests
38
+ const transports = new Map();
39
+ const server = http.createServer((req, res) => {
40
+ // Handle GET request to establish SSE stream
41
+ if (req.method === 'GET' && req.url === '/message') {
42
+ void (async () => {
43
+ try {
44
+ // Create a new server instance for each connection to avoid conflicts
45
+ const connectionServer = new McpServer({
46
+ name: 'gcnv-mcp',
47
+ version: '1.0.0',
48
+ });
49
+ // Register tools for this connection
50
+ registerAllTools(connectionServer);
51
+ // Create transport with the response object (not the server)
52
+ const transport = new SSEServerTransport('/message', res);
53
+ // Store transport by session ID for POST message routing
54
+ const sessionId = transport.sessionId;
55
+ transports.set(sessionId, transport);
56
+ // Set up cleanup handler
57
+ transport.onclose = () => {
58
+ transports.delete(sessionId);
59
+ };
60
+ // Connect the server to the transport (this automatically calls start())
61
+ await connectionServer.connect(transport);
62
+ }
63
+ catch (error) {
64
+ log.error({ err: error }, 'Error handling HTTP connection');
65
+ if (!res.headersSent) {
66
+ res.writeHead(500, { 'Content-Type': 'text/plain' });
67
+ res.end('Internal Server Error');
68
+ }
69
+ }
70
+ })();
71
+ }
72
+ // Handle POST request to receive messages
73
+ else if (req.method === 'POST' && req.url?.startsWith('/message')) {
74
+ try {
75
+ // Extract session ID from query parameter
76
+ const url = new URL(req.url, `http://${req.headers.host || 'localhost'}`);
77
+ const sessionId = url.searchParams.get('sessionId');
78
+ if (!sessionId) {
79
+ res.writeHead(400, { 'Content-Type': 'text/plain' });
80
+ res.end('Missing sessionId parameter');
81
+ return;
82
+ }
83
+ const transport = transports.get(sessionId);
84
+ if (!transport) {
85
+ res.writeHead(404, { 'Content-Type': 'text/plain' });
86
+ res.end('Session not found');
87
+ return;
88
+ }
89
+ // Parse request body
90
+ let body = '';
91
+ req.on('data', (chunk) => {
92
+ body += chunk.toString();
93
+ });
94
+ req.on('end', () => {
95
+ void (async () => {
96
+ try {
97
+ const parsedBody = body ? JSON.parse(body) : undefined;
98
+ await transport.handlePostMessage(req, res, parsedBody);
99
+ }
100
+ catch (error) {
101
+ log.error({ err: error }, 'Error handling POST message');
102
+ if (!res.headersSent) {
103
+ res.writeHead(500, { 'Content-Type': 'text/plain' });
104
+ res.end('Error handling request');
105
+ }
106
+ }
107
+ })();
108
+ });
109
+ }
110
+ catch (error) {
111
+ log.error({ err: error }, 'Error handling POST request');
112
+ if (!res.headersSent) {
113
+ res.writeHead(500, { 'Content-Type': 'text/plain' });
114
+ res.end('Internal Server Error');
115
+ }
116
+ }
117
+ }
118
+ else {
119
+ res.writeHead(404, { 'Content-Type': 'text/plain' });
120
+ res.end('Not Found');
121
+ }
122
+ });
123
+ server.listen(port, () => {
124
+ log.info({ port }, `MCP Server listening on http://localhost:${port}/message`);
125
+ });
126
+ await new Promise((resolve, reject) => {
127
+ server.on('close', () => {
128
+ resolve();
129
+ });
130
+ server.on('error', (error) => {
131
+ reject(error);
132
+ });
133
+ // Handle graceful shutdown
134
+ process.on('SIGINT', () => {
135
+ server.close(() => {
136
+ resolve();
137
+ });
138
+ });
139
+ process.on('SIGTERM', () => {
140
+ server.close(() => {
141
+ resolve();
142
+ });
143
+ });
144
+ });
145
+ }
146
+ function parseArgs() {
147
+ const args = process.argv.slice(2);
148
+ let transport = 'stdio';
149
+ let port;
150
+ for (let i = 0; i < args.length; i++) {
151
+ if (args[i] === '--transport' || args[i] === '-t') {
152
+ const value = args[i + 1];
153
+ if (value === 'http' || value === 'stdio') {
154
+ transport = value;
155
+ i++;
156
+ }
157
+ }
158
+ else if (args[i] === '--port' || args[i] === '-p') {
159
+ const value = parseInt(args[i + 1], 10);
160
+ if (!isNaN(value)) {
161
+ port = value;
162
+ i++;
163
+ }
164
+ }
165
+ }
166
+ return { transport, port };
167
+ }
168
+ async function main() {
169
+ const { transport, port } = parseArgs();
170
+ const mcpServer = new McpServer({
171
+ name: 'gcnv-mcp',
172
+ version: '1.0.0',
173
+ });
174
+ registerAllTools(mcpServer);
175
+ if (transport === 'http') {
176
+ await startHttpTransport(mcpServer, port);
177
+ }
178
+ else {
179
+ await startStdioTransport(mcpServer);
180
+ }
181
+ }
182
+ main().catch((error) => {
183
+ log.fatal({ err: error }, 'Fatal server error');
184
+ process.exit(1);
185
+ });
@@ -0,0 +1,19 @@
1
+ import pino from 'pino';
2
+ const isDev = process.env.NODE_ENV !== 'production';
3
+ // Always log to stderr so stdio transport output stays clean
4
+ const destination = isDev
5
+ ? pino.transport({
6
+ target: 'pino-pretty',
7
+ options: {
8
+ colorize: true,
9
+ translateTime: 'SYS:standard',
10
+ destination: 2,
11
+ },
12
+ })
13
+ : pino.destination({ dest: 2 });
14
+ export const logger = pino({
15
+ level: process.env.LOG_LEVEL || (isDev ? 'debug' : 'info'),
16
+ base: { service: 'gcnv-mcp' },
17
+ timestamp: pino.stdTimeFunctions.isoTime,
18
+ redact: ['request.auth', 'request.token', 'headers.authorization'],
19
+ }, destination);