@juspay/neurolink 8.28.0 → 8.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/README.md +23 -2
- package/dist/adapters/video/vertexVideoHandler.d.ts +12 -2
- package/dist/adapters/video/vertexVideoHandler.js +12 -2
- package/dist/core/baseProvider.d.ts +19 -0
- package/dist/core/baseProvider.js +174 -0
- package/dist/index.d.ts +3 -3
- package/dist/index.js +7 -1
- package/dist/lib/adapters/video/vertexVideoHandler.d.ts +12 -2
- package/dist/lib/adapters/video/vertexVideoHandler.js +12 -2
- package/dist/lib/core/baseProvider.d.ts +19 -0
- package/dist/lib/core/baseProvider.js +174 -0
- package/dist/lib/index.d.ts +3 -3
- package/dist/lib/index.js +7 -1
- package/dist/lib/mcp/auth/index.d.ts +6 -0
- package/dist/lib/mcp/auth/index.js +12 -0
- package/dist/lib/mcp/auth/oauthClientProvider.d.ts +93 -0
- package/dist/lib/mcp/auth/oauthClientProvider.js +326 -0
- package/dist/lib/mcp/auth/tokenStorage.d.ts +56 -0
- package/dist/lib/mcp/auth/tokenStorage.js +135 -0
- package/dist/lib/mcp/externalServerManager.d.ts +5 -1
- package/dist/lib/mcp/externalServerManager.js +84 -22
- package/dist/lib/mcp/httpRateLimiter.d.ts +152 -0
- package/dist/lib/mcp/httpRateLimiter.js +365 -0
- package/dist/lib/mcp/httpRetryHandler.d.ts +62 -0
- package/dist/lib/mcp/httpRetryHandler.js +154 -0
- package/dist/lib/mcp/index.d.ts +5 -0
- package/dist/lib/mcp/index.js +8 -0
- package/dist/lib/mcp/mcpClientFactory.d.ts +25 -2
- package/dist/lib/mcp/mcpClientFactory.js +206 -10
- package/dist/lib/mcp/toolRegistry.d.ts +1 -2
- package/dist/lib/mcp/toolRegistry.js +1 -5
- package/dist/lib/neurolink.js +3 -0
- package/dist/lib/providers/amazonBedrock.js +4 -1
- package/dist/lib/providers/ollama.js +4 -1
- package/dist/lib/sdk/toolRegistration.d.ts +3 -25
- package/dist/lib/types/cli.d.ts +42 -42
- package/dist/lib/types/externalMcp.d.ts +55 -3
- package/dist/lib/types/externalMcp.js +0 -1
- package/dist/lib/types/generateTypes.d.ts +37 -0
- package/dist/lib/types/hitlTypes.d.ts +38 -0
- package/dist/lib/types/index.d.ts +6 -8
- package/dist/lib/types/index.js +4 -4
- package/dist/lib/types/mcpTypes.d.ts +235 -27
- package/dist/lib/types/providers.d.ts +16 -16
- package/dist/lib/types/sdkTypes.d.ts +2 -2
- package/dist/lib/types/tools.d.ts +42 -3
- package/dist/lib/types/utilities.d.ts +19 -0
- package/dist/mcp/auth/index.d.ts +6 -0
- package/dist/mcp/auth/index.js +11 -0
- package/dist/mcp/auth/oauthClientProvider.d.ts +93 -0
- package/dist/mcp/auth/oauthClientProvider.js +325 -0
- package/dist/mcp/auth/tokenStorage.d.ts +56 -0
- package/dist/mcp/auth/tokenStorage.js +134 -0
- package/dist/mcp/externalServerManager.d.ts +5 -1
- package/dist/mcp/externalServerManager.js +84 -22
- package/dist/mcp/httpRateLimiter.d.ts +152 -0
- package/dist/mcp/httpRateLimiter.js +364 -0
- package/dist/mcp/httpRetryHandler.d.ts +62 -0
- package/dist/mcp/httpRetryHandler.js +153 -0
- package/dist/mcp/index.d.ts +5 -0
- package/dist/mcp/index.js +8 -0
- package/dist/mcp/mcpClientFactory.d.ts +25 -2
- package/dist/mcp/mcpClientFactory.js +206 -10
- package/dist/mcp/toolRegistry.d.ts +1 -2
- package/dist/mcp/toolRegistry.js +1 -5
- package/dist/neurolink.js +3 -0
- package/dist/providers/amazonBedrock.js +4 -1
- package/dist/providers/ollama.js +4 -1
- package/dist/sdk/toolRegistration.d.ts +3 -25
- package/dist/types/cli.d.ts +42 -42
- package/dist/types/externalMcp.d.ts +55 -3
- package/dist/types/externalMcp.js +0 -1
- package/dist/types/generateTypes.d.ts +37 -0
- package/dist/types/hitlTypes.d.ts +38 -0
- package/dist/types/index.d.ts +6 -8
- package/dist/types/index.js +4 -4
- package/dist/types/mcpTypes.d.ts +235 -27
- package/dist/types/providers.d.ts +16 -16
- package/dist/types/sdkTypes.d.ts +2 -2
- package/dist/types/tools.d.ts +42 -3
- package/dist/types/utilities.d.ts +19 -0
- package/package.json +2 -1
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HTTP Rate Limiter for MCP HTTP Transport
|
|
3
|
+
* Implements token bucket algorithm for rate limiting
|
|
4
|
+
* Provides fault tolerance and prevents server overload
|
|
5
|
+
*/
|
|
6
|
+
import { mcpLogger } from "../utils/logger.js";
|
|
7
|
+
/**
|
|
8
|
+
* Default rate limit configuration
|
|
9
|
+
* Provides sensible defaults for most MCP HTTP transport use cases
|
|
10
|
+
*/
|
|
11
|
+
export const DEFAULT_RATE_LIMIT_CONFIG = {
|
|
12
|
+
requestsPerWindow: 60,
|
|
13
|
+
windowMs: 60000,
|
|
14
|
+
useTokenBucket: true,
|
|
15
|
+
refillRate: 1,
|
|
16
|
+
maxBurst: 10,
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* HTTPRateLimiter
|
|
20
|
+
* Implements token bucket algorithm for rate limiting HTTP requests
|
|
21
|
+
*
|
|
22
|
+
* The token bucket algorithm works as follows:
|
|
23
|
+
* - Tokens are added to the bucket at a fixed rate (refillRate per second)
|
|
24
|
+
* - Each request consumes one token
|
|
25
|
+
* - If no tokens are available, the request must wait
|
|
26
|
+
* - Maximum tokens are capped at maxBurst to allow controlled bursting
|
|
27
|
+
*/
|
|
28
|
+
export class HTTPRateLimiter {
|
|
29
|
+
tokens;
|
|
30
|
+
lastRefill;
|
|
31
|
+
config;
|
|
32
|
+
waitQueue = [];
|
|
33
|
+
processingQueue = false;
|
|
34
|
+
constructor(config = {}) {
|
|
35
|
+
this.config = { ...DEFAULT_RATE_LIMIT_CONFIG, ...config };
|
|
36
|
+
this.tokens = this.config.maxBurst;
|
|
37
|
+
this.lastRefill = Date.now();
|
|
38
|
+
mcpLogger.debug(`[HTTPRateLimiter] Initialized with config:`, {
|
|
39
|
+
requestsPerWindow: this.config.requestsPerWindow,
|
|
40
|
+
windowMs: this.config.windowMs,
|
|
41
|
+
useTokenBucket: this.config.useTokenBucket,
|
|
42
|
+
refillRate: this.config.refillRate,
|
|
43
|
+
maxBurst: this.config.maxBurst,
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
/**
|
|
47
|
+
* Refill tokens based on elapsed time since last refill
|
|
48
|
+
* Tokens are added at the configured refillRate (tokens per second)
|
|
49
|
+
*/
|
|
50
|
+
refillTokens() {
|
|
51
|
+
const now = Date.now();
|
|
52
|
+
const elapsedMs = now - this.lastRefill;
|
|
53
|
+
const elapsedSeconds = elapsedMs / 1000;
|
|
54
|
+
// Calculate tokens to add based on elapsed time and refill rate
|
|
55
|
+
const tokensToAdd = elapsedSeconds * this.config.refillRate;
|
|
56
|
+
if (tokensToAdd >= 1) {
|
|
57
|
+
// Only refill if at least one token should be added
|
|
58
|
+
const previousTokens = this.tokens;
|
|
59
|
+
this.tokens = Math.min(this.config.maxBurst, this.tokens + tokensToAdd);
|
|
60
|
+
this.lastRefill = now;
|
|
61
|
+
if (this.tokens > previousTokens) {
|
|
62
|
+
mcpLogger.debug(`[HTTPRateLimiter] Refilled tokens: ${previousTokens.toFixed(2)} -> ${this.tokens.toFixed(2)} (+${tokensToAdd.toFixed(2)})`);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Acquire a token, waiting if necessary
|
|
68
|
+
* This is the primary method for rate-limited operations
|
|
69
|
+
*
|
|
70
|
+
* @returns Promise that resolves when a token is acquired
|
|
71
|
+
* @throws Error if the wait queue is too long
|
|
72
|
+
*/
|
|
73
|
+
async acquire() {
|
|
74
|
+
// First, try to acquire without waiting
|
|
75
|
+
if (this.tryAcquire()) {
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
// Add to wait queue
|
|
79
|
+
return new Promise((resolve, reject) => {
|
|
80
|
+
this.waitQueue.push({ resolve, reject });
|
|
81
|
+
mcpLogger.debug(`[HTTPRateLimiter] Request queued, queue length: ${this.waitQueue.length}`);
|
|
82
|
+
// Start processing the queue if not already processing
|
|
83
|
+
if (!this.processingQueue) {
|
|
84
|
+
this.processQueue();
|
|
85
|
+
}
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Process the wait queue, granting tokens as they become available
|
|
90
|
+
*/
|
|
91
|
+
async processQueue() {
|
|
92
|
+
if (this.processingQueue) {
|
|
93
|
+
return;
|
|
94
|
+
}
|
|
95
|
+
this.processingQueue = true;
|
|
96
|
+
while (this.waitQueue.length > 0) {
|
|
97
|
+
// Refill tokens
|
|
98
|
+
this.refillTokens();
|
|
99
|
+
// If we have tokens, grant to next waiter
|
|
100
|
+
if (this.tokens >= 1) {
|
|
101
|
+
const waiter = this.waitQueue.shift();
|
|
102
|
+
if (waiter) {
|
|
103
|
+
this.tokens -= 1;
|
|
104
|
+
mcpLogger.debug(`[HTTPRateLimiter] Token granted from queue, remaining: ${this.tokens.toFixed(2)}, queue: ${this.waitQueue.length}`);
|
|
105
|
+
waiter.resolve();
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
else {
|
|
109
|
+
// Calculate wait time until next token is available
|
|
110
|
+
const tokensNeeded = 1 - this.tokens;
|
|
111
|
+
const waitTimeMs = (tokensNeeded / this.config.refillRate) * 1000;
|
|
112
|
+
const actualWait = Math.max(10, Math.ceil(waitTimeMs));
|
|
113
|
+
mcpLogger.debug(`[HTTPRateLimiter] Waiting ${actualWait}ms for token refill`);
|
|
114
|
+
// Wait for the calculated time
|
|
115
|
+
await this.sleep(actualWait);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
this.processingQueue = false;
|
|
119
|
+
}
|
|
120
|
+
/**
|
|
121
|
+
* Sleep helper function
|
|
122
|
+
*/
|
|
123
|
+
sleep(ms) {
|
|
124
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
125
|
+
}
|
|
126
|
+
/**
|
|
127
|
+
* Try to acquire a token without waiting
|
|
128
|
+
*
|
|
129
|
+
* @returns true if a token was acquired, false otherwise
|
|
130
|
+
*/
|
|
131
|
+
tryAcquire() {
|
|
132
|
+
// Refill tokens based on elapsed time
|
|
133
|
+
this.refillTokens();
|
|
134
|
+
// Check if we have tokens available
|
|
135
|
+
if (this.tokens >= 1) {
|
|
136
|
+
this.tokens -= 1;
|
|
137
|
+
mcpLogger.debug(`[HTTPRateLimiter] Token acquired, remaining: ${this.tokens.toFixed(2)}`);
|
|
138
|
+
return true;
|
|
139
|
+
}
|
|
140
|
+
mcpLogger.debug(`[HTTPRateLimiter] No tokens available, current: ${this.tokens.toFixed(2)}`);
|
|
141
|
+
return false;
|
|
142
|
+
}
|
|
143
|
+
/**
|
|
144
|
+
* Handle rate limit response headers from server
|
|
145
|
+
* Parses Retry-After header and returns wait time in milliseconds
|
|
146
|
+
*
|
|
147
|
+
* @param headers - Response headers from the server
|
|
148
|
+
* @returns Wait time in milliseconds, or 0 if no rate limit headers found
|
|
149
|
+
*/
|
|
150
|
+
handleRateLimitResponse(headers) {
|
|
151
|
+
// Check for Retry-After header (standard HTTP 429 response)
|
|
152
|
+
const retryAfter = headers.get("Retry-After");
|
|
153
|
+
if (retryAfter) {
|
|
154
|
+
// Retry-After can be either a number of seconds or an HTTP-date
|
|
155
|
+
const seconds = parseInt(retryAfter, 10);
|
|
156
|
+
if (!isNaN(seconds)) {
|
|
157
|
+
// It's a number of seconds
|
|
158
|
+
const waitTimeMs = seconds * 1000;
|
|
159
|
+
mcpLogger.info(`[HTTPRateLimiter] Server requested retry after ${seconds} seconds`);
|
|
160
|
+
return waitTimeMs;
|
|
161
|
+
}
|
|
162
|
+
else {
|
|
163
|
+
// Try to parse as HTTP-date
|
|
164
|
+
const retryDate = new Date(retryAfter);
|
|
165
|
+
if (!isNaN(retryDate.getTime())) {
|
|
166
|
+
const waitTimeMs = Math.max(0, retryDate.getTime() - Date.now());
|
|
167
|
+
mcpLogger.info(`[HTTPRateLimiter] Server requested retry at ${retryDate.toISOString()} (${waitTimeMs}ms)`);
|
|
168
|
+
return waitTimeMs;
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
// Check for X-RateLimit-Reset header (common non-standard header)
|
|
173
|
+
const rateLimitReset = headers.get("X-RateLimit-Reset");
|
|
174
|
+
if (rateLimitReset) {
|
|
175
|
+
const resetTimestamp = parseInt(rateLimitReset, 10);
|
|
176
|
+
if (!isNaN(resetTimestamp)) {
|
|
177
|
+
// Could be Unix timestamp (seconds) or milliseconds
|
|
178
|
+
const resetTime = resetTimestamp > 1e12 ? resetTimestamp : resetTimestamp * 1000;
|
|
179
|
+
const waitTimeMs = Math.max(0, resetTime - Date.now());
|
|
180
|
+
mcpLogger.info(`[HTTPRateLimiter] Rate limit resets at ${new Date(resetTime).toISOString()} (${waitTimeMs}ms)`);
|
|
181
|
+
return waitTimeMs;
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
// Check for X-RateLimit-Remaining header
|
|
185
|
+
const remaining = headers.get("X-RateLimit-Remaining");
|
|
186
|
+
if (remaining === "0") {
|
|
187
|
+
// No remaining requests, use default backoff
|
|
188
|
+
const defaultBackoffMs = 1000;
|
|
189
|
+
mcpLogger.info(`[HTTPRateLimiter] Rate limit exhausted, using default backoff: ${defaultBackoffMs}ms`);
|
|
190
|
+
return defaultBackoffMs;
|
|
191
|
+
}
|
|
192
|
+
return 0;
|
|
193
|
+
}
|
|
194
|
+
/**
|
|
195
|
+
* Get the number of remaining tokens
|
|
196
|
+
*
|
|
197
|
+
* @returns Current number of available tokens
|
|
198
|
+
*/
|
|
199
|
+
getRemainingTokens() {
|
|
200
|
+
this.refillTokens();
|
|
201
|
+
return this.tokens;
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Reset the rate limiter to initial state
|
|
205
|
+
* Useful for testing or when server indicates rate limits have been reset
|
|
206
|
+
*/
|
|
207
|
+
reset() {
|
|
208
|
+
this.tokens = this.config.maxBurst;
|
|
209
|
+
this.lastRefill = Date.now();
|
|
210
|
+
// Reject all pending waiters
|
|
211
|
+
while (this.waitQueue.length > 0) {
|
|
212
|
+
const waiter = this.waitQueue.shift();
|
|
213
|
+
if (waiter) {
|
|
214
|
+
waiter.reject(new Error("Rate limiter was reset"));
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
mcpLogger.info(`[HTTPRateLimiter] Reset to initial state, tokens: ${this.tokens}`);
|
|
218
|
+
}
|
|
219
|
+
/**
|
|
220
|
+
* Get current rate limiter statistics
|
|
221
|
+
*/
|
|
222
|
+
getStats() {
|
|
223
|
+
this.refillTokens();
|
|
224
|
+
return {
|
|
225
|
+
tokens: this.tokens,
|
|
226
|
+
maxBurst: this.config.maxBurst,
|
|
227
|
+
refillRate: this.config.refillRate,
|
|
228
|
+
queueLength: this.waitQueue.length,
|
|
229
|
+
lastRefill: new Date(this.lastRefill),
|
|
230
|
+
};
|
|
231
|
+
}
|
|
232
|
+
/**
|
|
233
|
+
* Update configuration dynamically
|
|
234
|
+
* Useful when server provides rate limit information
|
|
235
|
+
*/
|
|
236
|
+
updateConfig(config) {
|
|
237
|
+
Object.assign(this.config, config);
|
|
238
|
+
mcpLogger.info(`[HTTPRateLimiter] Configuration updated:`, config);
|
|
239
|
+
}
|
|
240
|
+
/**
|
|
241
|
+
* Get current configuration
|
|
242
|
+
*/
|
|
243
|
+
getConfig() {
|
|
244
|
+
return { ...this.config };
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
/**
|
|
248
|
+
* RateLimiterManager
|
|
249
|
+
* Manages multiple rate limiters for different servers
|
|
250
|
+
* Each server can have its own rate limiting configuration
|
|
251
|
+
*/
|
|
252
|
+
export class RateLimiterManager {
|
|
253
|
+
limiters = new Map();
|
|
254
|
+
/**
|
|
255
|
+
* Get or create a rate limiter for a server
|
|
256
|
+
*
|
|
257
|
+
* @param serverId - Unique identifier for the server
|
|
258
|
+
* @param config - Optional configuration for the rate limiter
|
|
259
|
+
* @returns HTTPRateLimiter instance for the server
|
|
260
|
+
*/
|
|
261
|
+
getLimiter(serverId, config) {
|
|
262
|
+
let limiter = this.limiters.get(serverId);
|
|
263
|
+
if (!limiter) {
|
|
264
|
+
limiter = new HTTPRateLimiter(config);
|
|
265
|
+
this.limiters.set(serverId, limiter);
|
|
266
|
+
mcpLogger.debug(`[RateLimiterManager] Created rate limiter for server: ${serverId}`);
|
|
267
|
+
}
|
|
268
|
+
else if (config) {
|
|
269
|
+
// Update existing limiter's configuration if provided
|
|
270
|
+
limiter.updateConfig(config);
|
|
271
|
+
}
|
|
272
|
+
return limiter;
|
|
273
|
+
}
|
|
274
|
+
/**
|
|
275
|
+
* Check if a rate limiter exists for a server
|
|
276
|
+
*
|
|
277
|
+
* @param serverId - Unique identifier for the server
|
|
278
|
+
* @returns true if a rate limiter exists for the server
|
|
279
|
+
*/
|
|
280
|
+
hasLimiter(serverId) {
|
|
281
|
+
return this.limiters.has(serverId);
|
|
282
|
+
}
|
|
283
|
+
/**
|
|
284
|
+
* Remove a rate limiter for a server
|
|
285
|
+
*
|
|
286
|
+
* @param serverId - Unique identifier for the server
|
|
287
|
+
*/
|
|
288
|
+
removeLimiter(serverId) {
|
|
289
|
+
const limiter = this.limiters.get(serverId);
|
|
290
|
+
if (limiter) {
|
|
291
|
+
limiter.reset(); // Clean up any pending operations
|
|
292
|
+
this.limiters.delete(serverId);
|
|
293
|
+
mcpLogger.debug(`[RateLimiterManager] Removed rate limiter for server: ${serverId}`);
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
/**
|
|
297
|
+
* Get all server IDs with active rate limiters
|
|
298
|
+
*
|
|
299
|
+
* @returns Array of server IDs
|
|
300
|
+
*/
|
|
301
|
+
getServerIds() {
|
|
302
|
+
return Array.from(this.limiters.keys());
|
|
303
|
+
}
|
|
304
|
+
/**
|
|
305
|
+
* Get statistics for all rate limiters
|
|
306
|
+
*
|
|
307
|
+
* @returns Record of server IDs to their rate limiter statistics
|
|
308
|
+
*/
|
|
309
|
+
getAllStats() {
|
|
310
|
+
const stats = {};
|
|
311
|
+
for (const [serverId, limiter] of this.limiters) {
|
|
312
|
+
stats[serverId] = limiter.getStats();
|
|
313
|
+
}
|
|
314
|
+
return stats;
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Reset all rate limiters
|
|
318
|
+
*/
|
|
319
|
+
resetAll() {
|
|
320
|
+
for (const limiter of this.limiters.values()) {
|
|
321
|
+
limiter.reset();
|
|
322
|
+
}
|
|
323
|
+
mcpLogger.info("[RateLimiterManager] Reset all rate limiters");
|
|
324
|
+
}
|
|
325
|
+
/**
|
|
326
|
+
* Destroy all rate limiters and clean up resources
|
|
327
|
+
* This should be called during application shutdown
|
|
328
|
+
*/
|
|
329
|
+
destroyAll() {
|
|
330
|
+
for (const limiter of this.limiters.values()) {
|
|
331
|
+
limiter.reset();
|
|
332
|
+
}
|
|
333
|
+
this.limiters.clear();
|
|
334
|
+
mcpLogger.info("[RateLimiterManager] Destroyed all rate limiters");
|
|
335
|
+
}
|
|
336
|
+
/**
|
|
337
|
+
* Get health summary for all rate limiters
|
|
338
|
+
*/
|
|
339
|
+
getHealthSummary() {
|
|
340
|
+
const serversWithQueuedRequests = [];
|
|
341
|
+
let totalQueuedRequests = 0;
|
|
342
|
+
let totalTokens = 0;
|
|
343
|
+
for (const [serverId, limiter] of this.limiters) {
|
|
344
|
+
const stats = limiter.getStats();
|
|
345
|
+
if (stats.queueLength > 0) {
|
|
346
|
+
serversWithQueuedRequests.push(serverId);
|
|
347
|
+
totalQueuedRequests += stats.queueLength;
|
|
348
|
+
}
|
|
349
|
+
totalTokens += stats.tokens;
|
|
350
|
+
}
|
|
351
|
+
const averageTokensAvailable = this.limiters.size > 0 ? totalTokens / this.limiters.size : 0;
|
|
352
|
+
return {
|
|
353
|
+
totalLimiters: this.limiters.size,
|
|
354
|
+
serversWithQueuedRequests,
|
|
355
|
+
totalQueuedRequests,
|
|
356
|
+
averageTokensAvailable,
|
|
357
|
+
};
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
/**
|
|
361
|
+
* Global rate limiter manager instance
|
|
362
|
+
* Use this for application-wide rate limiting management
|
|
363
|
+
*/
|
|
364
|
+
export const globalRateLimiterManager = new RateLimiterManager();
|
|
365
|
+
//# sourceMappingURL=httpRateLimiter.js.map
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HTTP Retry Handler for MCP Transport
|
|
3
|
+
*
|
|
4
|
+
* Provides retry logic with exponential backoff and jitter
|
|
5
|
+
* specifically designed for HTTP-based MCP transport connections.
|
|
6
|
+
*/
|
|
7
|
+
import type { HTTPRetryConfig } from "../types/mcpTypes.js";
|
|
8
|
+
/**
|
|
9
|
+
* Default HTTP retry configuration
|
|
10
|
+
*/
|
|
11
|
+
export declare const DEFAULT_HTTP_RETRY_CONFIG: HTTPRetryConfig;
|
|
12
|
+
/**
|
|
13
|
+
* Check if an HTTP status code is retryable based on configuration
|
|
14
|
+
*
|
|
15
|
+
* @param status - HTTP status code to check
|
|
16
|
+
* @param config - HTTP retry configuration
|
|
17
|
+
* @returns True if the status code should trigger a retry
|
|
18
|
+
*/
|
|
19
|
+
export declare function isRetryableStatusCode(status: number, config?: HTTPRetryConfig): boolean;
|
|
20
|
+
/**
|
|
21
|
+
* Check if an error is retryable for HTTP operations
|
|
22
|
+
*
|
|
23
|
+
* Considers:
|
|
24
|
+
* - Network errors (ECONNRESET, ENOTFOUND, ECONNREFUSED, ETIMEDOUT)
|
|
25
|
+
* - Timeout errors
|
|
26
|
+
* - HTTP status codes in the retryable list
|
|
27
|
+
* - Fetch/network-related errors
|
|
28
|
+
*
|
|
29
|
+
* @param error - Error to check
|
|
30
|
+
* @param config - HTTP retry configuration (optional)
|
|
31
|
+
* @returns True if the error is retryable
|
|
32
|
+
*/
|
|
33
|
+
export declare function isRetryableHTTPError(error: unknown, config?: HTTPRetryConfig): boolean;
|
|
34
|
+
/**
|
|
35
|
+
* Execute an HTTP operation with retry logic
|
|
36
|
+
*
|
|
37
|
+
* Implements exponential backoff with jitter to avoid thundering herd problems.
|
|
38
|
+
* Uses the calculateBackoffDelay function from the core retry handler for
|
|
39
|
+
* consistent delay calculation across the codebase.
|
|
40
|
+
*
|
|
41
|
+
* @param operation - Async operation to execute with retries
|
|
42
|
+
* @param config - Partial HTTP retry configuration (merged with defaults)
|
|
43
|
+
* @returns Result of the operation
|
|
44
|
+
* @throws Last error if all retry attempts fail
|
|
45
|
+
*
|
|
46
|
+
* @example
|
|
47
|
+
* ```typescript
|
|
48
|
+
* const result = await withHTTPRetry(
|
|
49
|
+
* async () => {
|
|
50
|
+
* const response = await fetch(url);
|
|
51
|
+
* if (!response.ok) {
|
|
52
|
+
* const error = new Error(`HTTP ${response.status}`) as Error & { status: number };
|
|
53
|
+
* error.status = response.status;
|
|
54
|
+
* throw error;
|
|
55
|
+
* }
|
|
56
|
+
* return response.json();
|
|
57
|
+
* },
|
|
58
|
+
* { maxAttempts: 5, initialDelay: 500 }
|
|
59
|
+
* );
|
|
60
|
+
* ```
|
|
61
|
+
*/
|
|
62
|
+
export declare function withHTTPRetry<T>(operation: () => Promise<T>, config?: Partial<HTTPRetryConfig>): Promise<T>;
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* HTTP Retry Handler for MCP Transport
|
|
3
|
+
*
|
|
4
|
+
* Provides retry logic with exponential backoff and jitter
|
|
5
|
+
* specifically designed for HTTP-based MCP transport connections.
|
|
6
|
+
*/
|
|
7
|
+
import { calculateBackoffDelay } from "../utils/retryHandler.js";
|
|
8
|
+
import { logger } from "../utils/logger.js";
|
|
9
|
+
/**
|
|
10
|
+
* Default HTTP retry configuration
|
|
11
|
+
*/
|
|
12
|
+
export const DEFAULT_HTTP_RETRY_CONFIG = {
|
|
13
|
+
maxAttempts: 3,
|
|
14
|
+
initialDelay: 1000,
|
|
15
|
+
maxDelay: 30000,
|
|
16
|
+
backoffMultiplier: 2,
|
|
17
|
+
retryableStatusCodes: [408, 429, 500, 502, 503, 504],
|
|
18
|
+
};
|
|
19
|
+
/**
|
|
20
|
+
* Sleep utility for retry delays
|
|
21
|
+
*/
|
|
22
|
+
function sleep(ms) {
|
|
23
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Check if an HTTP status code is retryable based on configuration
|
|
27
|
+
*
|
|
28
|
+
* @param status - HTTP status code to check
|
|
29
|
+
* @param config - HTTP retry configuration
|
|
30
|
+
* @returns True if the status code should trigger a retry
|
|
31
|
+
*/
|
|
32
|
+
export function isRetryableStatusCode(status, config = DEFAULT_HTTP_RETRY_CONFIG) {
|
|
33
|
+
return config.retryableStatusCodes.includes(status);
|
|
34
|
+
}
|
|
35
|
+
/**
|
|
36
|
+
* Check if an error is retryable for HTTP operations
|
|
37
|
+
*
|
|
38
|
+
* Considers:
|
|
39
|
+
* - Network errors (ECONNRESET, ENOTFOUND, ECONNREFUSED, ETIMEDOUT)
|
|
40
|
+
* - Timeout errors
|
|
41
|
+
* - HTTP status codes in the retryable list
|
|
42
|
+
* - Fetch/network-related errors
|
|
43
|
+
*
|
|
44
|
+
* @param error - Error to check
|
|
45
|
+
* @param config - HTTP retry configuration (optional)
|
|
46
|
+
* @returns True if the error is retryable
|
|
47
|
+
*/
|
|
48
|
+
export function isRetryableHTTPError(error, config = DEFAULT_HTTP_RETRY_CONFIG) {
|
|
49
|
+
if (!error || typeof error !== "object") {
|
|
50
|
+
return false;
|
|
51
|
+
}
|
|
52
|
+
const errorObj = error;
|
|
53
|
+
// Check for timeout errors
|
|
54
|
+
if (errorObj.name === "TimeoutError" ||
|
|
55
|
+
errorObj.code === "TIMEOUT" ||
|
|
56
|
+
errorObj.code === "ETIMEDOUT" ||
|
|
57
|
+
errorObj.name === "AbortError") {
|
|
58
|
+
return true;
|
|
59
|
+
}
|
|
60
|
+
// Check for network-related errors
|
|
61
|
+
if (errorObj.code === "ECONNRESET" ||
|
|
62
|
+
errorObj.code === "ENOTFOUND" ||
|
|
63
|
+
errorObj.code === "ECONNREFUSED" ||
|
|
64
|
+
errorObj.code === "ECONNABORTED" ||
|
|
65
|
+
errorObj.code === "EPIPE" ||
|
|
66
|
+
errorObj.code === "ENETUNREACH" ||
|
|
67
|
+
errorObj.code === "EHOSTUNREACH") {
|
|
68
|
+
return true;
|
|
69
|
+
}
|
|
70
|
+
// Check for fetch errors (network failures)
|
|
71
|
+
if (errorObj.name === "TypeError" && typeof errorObj.message === "string") {
|
|
72
|
+
const message = errorObj.message.toLowerCase();
|
|
73
|
+
if (message.includes("fetch") ||
|
|
74
|
+
message.includes("network") ||
|
|
75
|
+
message.includes("connection")) {
|
|
76
|
+
return true;
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
// Check for HTTP status codes
|
|
80
|
+
if (typeof errorObj.status === "number") {
|
|
81
|
+
return isRetryableStatusCode(errorObj.status, config);
|
|
82
|
+
}
|
|
83
|
+
// Check for response object with status
|
|
84
|
+
if (errorObj.response &&
|
|
85
|
+
typeof errorObj.response === "object" &&
|
|
86
|
+
typeof errorObj.response.status === "number") {
|
|
87
|
+
return isRetryableStatusCode(errorObj.response.status, config);
|
|
88
|
+
}
|
|
89
|
+
// Check for statusCode (alternative property name)
|
|
90
|
+
if (typeof errorObj.statusCode === "number") {
|
|
91
|
+
return isRetryableStatusCode(errorObj.statusCode, config);
|
|
92
|
+
}
|
|
93
|
+
return false;
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Execute an HTTP operation with retry logic
|
|
97
|
+
*
|
|
98
|
+
* Implements exponential backoff with jitter to avoid thundering herd problems.
|
|
99
|
+
* Uses the calculateBackoffDelay function from the core retry handler for
|
|
100
|
+
* consistent delay calculation across the codebase.
|
|
101
|
+
*
|
|
102
|
+
* @param operation - Async operation to execute with retries
|
|
103
|
+
* @param config - Partial HTTP retry configuration (merged with defaults)
|
|
104
|
+
* @returns Result of the operation
|
|
105
|
+
* @throws Last error if all retry attempts fail
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* ```typescript
|
|
109
|
+
* const result = await withHTTPRetry(
|
|
110
|
+
* async () => {
|
|
111
|
+
* const response = await fetch(url);
|
|
112
|
+
* if (!response.ok) {
|
|
113
|
+
* const error = new Error(`HTTP ${response.status}`) as Error & { status: number };
|
|
114
|
+
* error.status = response.status;
|
|
115
|
+
* throw error;
|
|
116
|
+
* }
|
|
117
|
+
* return response.json();
|
|
118
|
+
* },
|
|
119
|
+
* { maxAttempts: 5, initialDelay: 500 }
|
|
120
|
+
* );
|
|
121
|
+
* ```
|
|
122
|
+
*/
|
|
123
|
+
export async function withHTTPRetry(operation, config = {}) {
|
|
124
|
+
const mergedConfig = {
|
|
125
|
+
...DEFAULT_HTTP_RETRY_CONFIG,
|
|
126
|
+
...config,
|
|
127
|
+
};
|
|
128
|
+
let lastError;
|
|
129
|
+
for (let attempt = 1; attempt <= mergedConfig.maxAttempts; attempt++) {
|
|
130
|
+
try {
|
|
131
|
+
return await operation();
|
|
132
|
+
}
|
|
133
|
+
catch (error) {
|
|
134
|
+
lastError = error;
|
|
135
|
+
// Don't retry if it's the last attempt
|
|
136
|
+
if (attempt === mergedConfig.maxAttempts) {
|
|
137
|
+
logger.debug(`HTTP retry: All ${mergedConfig.maxAttempts} attempts exhausted`);
|
|
138
|
+
break;
|
|
139
|
+
}
|
|
140
|
+
// Check if we should retry this error
|
|
141
|
+
if (!isRetryableHTTPError(error, mergedConfig)) {
|
|
142
|
+
logger.debug(`HTTP retry: Non-retryable error encountered`, error instanceof Error ? error.message : String(error));
|
|
143
|
+
break;
|
|
144
|
+
}
|
|
145
|
+
// Calculate delay using the shared backoff calculation
|
|
146
|
+
const delay = calculateBackoffDelay(attempt, mergedConfig.initialDelay, mergedConfig.backoffMultiplier, mergedConfig.maxDelay, true);
|
|
147
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
148
|
+
logger.warn(`HTTP retry: Attempt ${attempt}/${mergedConfig.maxAttempts} failed: ${errorMessage}. Retrying in ${Math.round(delay)}ms...`);
|
|
149
|
+
await sleep(delay);
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
throw lastError;
|
|
153
|
+
}
|
|
154
|
+
//# sourceMappingURL=httpRetryHandler.js.map
|
package/dist/lib/mcp/index.d.ts
CHANGED
|
@@ -5,6 +5,11 @@
|
|
|
5
5
|
*/
|
|
6
6
|
import type { McpMetadata } from "../types/mcpTypes.js";
|
|
7
7
|
export { mcpLogger } from "../utils/logger.js";
|
|
8
|
+
export type { RateLimitConfig, HTTPRetryConfig, OAuthTokens, TokenStorage, MCPOAuthConfig, OAuthClientInformation, AuthorizationUrlResult, TokenExchangeRequest, } from "../types/mcpTypes.js";
|
|
9
|
+
export { HTTPRateLimiter, RateLimiterManager, globalRateLimiterManager, DEFAULT_RATE_LIMIT_CONFIG, } from "./httpRateLimiter.js";
|
|
10
|
+
export { DEFAULT_HTTP_RETRY_CONFIG, isRetryableStatusCode, isRetryableHTTPError, withHTTPRetry, } from "./httpRetryHandler.js";
|
|
11
|
+
export { InMemoryTokenStorage, FileTokenStorage, isTokenExpired, calculateExpiresAt, NeuroLinkOAuthProvider, createOAuthProviderFromConfig, } from "./auth/index.js";
|
|
12
|
+
export { MCPCircuitBreaker, CircuitBreakerManager, globalCircuitBreakerManager, } from "./mcpCircuitBreaker.js";
|
|
8
13
|
/**
|
|
9
14
|
* Initialize the MCP ecosystem - simplified
|
|
10
15
|
*/
|
package/dist/lib/mcp/index.js
CHANGED
|
@@ -1,4 +1,12 @@
|
|
|
1
1
|
export { mcpLogger } from "../utils/logger.js";
|
|
2
|
+
// HTTP Rate Limiter
|
|
3
|
+
export { HTTPRateLimiter, RateLimiterManager, globalRateLimiterManager, DEFAULT_RATE_LIMIT_CONFIG, } from "./httpRateLimiter.js";
|
|
4
|
+
// HTTP Retry Handler
|
|
5
|
+
export { DEFAULT_HTTP_RETRY_CONFIG, isRetryableStatusCode, isRetryableHTTPError, withHTTPRetry, } from "./httpRetryHandler.js";
|
|
6
|
+
// OAuth Authentication
|
|
7
|
+
export { InMemoryTokenStorage, FileTokenStorage, isTokenExpired, calculateExpiresAt, NeuroLinkOAuthProvider, createOAuthProviderFromConfig, } from "./auth/index.js";
|
|
8
|
+
// Circuit Breaker
|
|
9
|
+
export { MCPCircuitBreaker, CircuitBreakerManager, globalCircuitBreakerManager, } from "./mcpCircuitBreaker.js";
|
|
2
10
|
/**
|
|
3
11
|
* Initialize the MCP ecosystem - simplified
|
|
4
12
|
*/
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* MCP Client Factory
|
|
3
3
|
* Creates and manages MCP clients for external servers
|
|
4
|
-
* Supports stdio, SSE, and
|
|
4
|
+
* Supports stdio, SSE, WebSocket, and HTTP transports
|
|
5
|
+
* Enhanced with retry, rate limiting, and OAuth 2.1 support
|
|
5
6
|
*/
|
|
6
7
|
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
7
8
|
import type { Transport } from "@modelcontextprotocol/sdk/shared/transport.js";
|
|
8
9
|
import type { ClientCapabilities } from "@modelcontextprotocol/sdk/types.js";
|
|
9
|
-
import { ChildProcess } from "child_process";
|
|
10
|
+
import { type ChildProcess } from "child_process";
|
|
10
11
|
import type { MCPTransportType } from "../types/externalMcp.js";
|
|
11
12
|
import type { MCPServerInfo, MCPClientResult } from "../types/mcpTypes.js";
|
|
12
13
|
/**
|
|
@@ -18,6 +19,7 @@ export declare class MCPClientFactory {
|
|
|
18
19
|
private static readonly DEFAULT_CAPABILITIES;
|
|
19
20
|
/**
|
|
20
21
|
* Create an MCP client for the given server configuration
|
|
22
|
+
* Enhanced with retry logic, rate limiting, and circuit breaker protection
|
|
21
23
|
*/
|
|
22
24
|
static createClient(config: MCPServerInfo, timeout?: number): Promise<MCPClientResult>;
|
|
23
25
|
/**
|
|
@@ -40,6 +42,27 @@ export declare class MCPClientFactory {
|
|
|
40
42
|
* Create WebSocket transport
|
|
41
43
|
*/
|
|
42
44
|
private static createWebSocketTransport;
|
|
45
|
+
/**
|
|
46
|
+
* Create HTTP transport (Streamable HTTP)
|
|
47
|
+
* Enhanced with OAuth 2.1, rate limiting, and configurable timeouts
|
|
48
|
+
*/
|
|
49
|
+
private static createHTTPTransport;
|
|
50
|
+
/**
|
|
51
|
+
* Create a fetch wrapper with timeout support
|
|
52
|
+
*/
|
|
53
|
+
private static createFetchWithTimeout;
|
|
54
|
+
/**
|
|
55
|
+
* Create an enhanced fetch function with timeout and optional retry
|
|
56
|
+
*/
|
|
57
|
+
private static createEnhancedFetch;
|
|
58
|
+
/**
|
|
59
|
+
* Set up OAuth provider if configured
|
|
60
|
+
*/
|
|
61
|
+
private static setupAuthProvider;
|
|
62
|
+
/**
|
|
63
|
+
* Get authorization header based on auth configuration
|
|
64
|
+
*/
|
|
65
|
+
private static getAuthorizationHeader;
|
|
43
66
|
/**
|
|
44
67
|
* Perform MCP handshake and get server capabilities
|
|
45
68
|
*/
|