@juspay/neurolink 9.60.0 → 9.61.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -300,14 +300,42 @@ exit 127
300
300
  writeFileSync(TRAMPOLINE_PATH, script, { mode: 0o755 });
301
301
  chmodSync(TRAMPOLINE_PATH, 0o755);
302
302
  }
303
+ /**
304
+ * Check whether a pnpm binary can install into the global store.
305
+ *
306
+ * Multiple pnpm major versions can coexist (e.g., standalone v8 + nvm v10).
307
+ * They use different store layouts (`store/v10` vs `store/v10/v3`), so a
308
+ * pnpm that passes `--version` may still fail `pnpm add -g` with
309
+ * ERR_PNPM_UNEXPECTED_STORE. We detect this by running `pnpm root -g` and
310
+ * checking whether the resolved global root directory actually exists on disk.
311
+ */
312
+ function canInstallGlobally(pnpmPath) {
313
+ try {
314
+ const { execFileSync } = _require("node:child_process");
315
+ const { existsSync } = _require("fs");
316
+ const globalRoot = execFileSync(pnpmPath, ["root", "-g"], {
317
+ encoding: "utf8",
318
+ timeout: 10_000,
319
+ stdio: ["ignore", "pipe", "ignore"],
320
+ }).trim();
321
+ // If the global root exists, this pnpm version is compatible with
322
+ // the current store layout and can install packages there.
323
+ return !!globalRoot && existsSync(globalRoot);
324
+ }
325
+ catch {
326
+ return false;
327
+ }
328
+ }
303
329
  /**
304
330
  * Resolve the `pnpm` binary defensively.
305
331
  *
306
- * Tries multiple candidates in order of preference and validates each by
307
- * running `--version`. Returns the first one that actually works, along
308
- * with a list of all candidates tried (for diagnostics). This defends
309
- * against environments where `which pnpm` returns a broken shim or an
310
- * incompatible version.
332
+ * Tries multiple candidates in order of preference. Each candidate must:
333
+ * 1. Respond to `pnpm --version` (binary works)
334
+ * 2. Have a compatible global store (`pnpm root -g` points to an existing dir)
335
+ *
336
+ * This defends against environments with multiple pnpm major versions
337
+ * (e.g., standalone v8 + nvm v10) where the wrong one would fail with
338
+ * ERR_PNPM_UNEXPECTED_STORE on `pnpm add -g`.
311
339
  *
312
340
  * Honors `NEUROLINK_PNPM_PATH` as an escape hatch.
313
341
  */
@@ -348,17 +376,32 @@ function resolveFullPnpmPath() {
348
376
  seen.add(p);
349
377
  return true;
350
378
  });
351
- // Probe each candidate
379
+ // Probe each candidate: must pass --version AND have a compatible global store
352
380
  const tried = unique.map((path) => {
353
381
  const version = probeBinVersion(path);
354
- return { path, version, working: version !== undefined };
382
+ const working = version !== undefined;
383
+ const globalStoreOk = working ? canInstallGlobally(path) : false;
384
+ return { path, version, working, globalStoreOk };
355
385
  });
356
- const working = tried.find((r) => r.working);
357
- if (working) {
386
+ // Prefer a candidate that can actually install globally
387
+ const fullyWorking = tried.find((r) => r.working && r.globalStoreOk);
388
+ if (fullyWorking) {
389
+ return {
390
+ bin: fullyWorking.path,
391
+ resolved: true,
392
+ version: fullyWorking.version,
393
+ tried,
394
+ };
395
+ }
396
+ // Fall back to any candidate that at least responds to --version
397
+ // (better than nothing — the install may still fail, but will be
398
+ // caught and suppressed by the caller)
399
+ const anyWorking = tried.find((r) => r.working);
400
+ if (anyWorking) {
358
401
  return {
359
- bin: working.path,
402
+ bin: anyWorking.path,
360
403
  resolved: true,
361
- version: working.version,
404
+ version: anyWorking.version,
362
405
  tried,
363
406
  };
364
407
  }
@@ -17,10 +17,21 @@ export const PDF_IMAGE_GENERATION_MODELS = [
17
17
  ];
18
18
  // Global Location Models
19
19
  // Models that require global location configuration (uses aiplatform.googleapis.com instead of region-specific endpoints)
20
+ // Includes Gemini 3.x text and image models, which are only available via the global endpoint on Vertex AI.
21
+ // IMAGE_GENERATION_MODELS is spread in to keep the two lists from drifting:
22
+ // any new image-gen model added there is automatically routed to global here.
20
23
  export const GLOBAL_LOCATION_MODELS = [
21
- "gemini-3-pro-image-preview",
22
- "gemini-2.5-flash-image",
23
- "gemini-3.1-flash-image-preview",
24
+ // Image generation (sourced from IMAGE_GENERATION_MODELS)
25
+ ...IMAGE_GENERATION_MODELS,
26
+ // Gemini 3.1 text models (global-only)
27
+ "gemini-3.1-pro-preview",
28
+ "gemini-3.1-flash-lite-preview",
29
+ "gemini-3.1-pro-preview-customtools",
30
+ // Gemini 3 text models (global-only)
31
+ "gemini-3-pro-preview",
32
+ "gemini-3-pro-preview-11-2025",
33
+ "gemini-3-pro-latest",
34
+ "gemini-3-flash-preview",
24
35
  ];
25
36
  // Core AI Generation Defaults
26
37
  export const DEFAULT_MAX_TOKENS = undefined; // Unlimited by default - let providers decide their own limits
@@ -17,10 +17,21 @@ export const PDF_IMAGE_GENERATION_MODELS = [
17
17
  ];
18
18
  // Global Location Models
19
19
  // Models that require global location configuration (uses aiplatform.googleapis.com instead of region-specific endpoints)
20
+ // Includes Gemini 3.x text and image models, which are only available via the global endpoint on Vertex AI.
21
+ // IMAGE_GENERATION_MODELS is spread in to keep the two lists from drifting:
22
+ // any new image-gen model added there is automatically routed to global here.
20
23
  export const GLOBAL_LOCATION_MODELS = [
21
- "gemini-3-pro-image-preview",
22
- "gemini-2.5-flash-image",
23
- "gemini-3.1-flash-image-preview",
24
+ // Image generation (sourced from IMAGE_GENERATION_MODELS)
25
+ ...IMAGE_GENERATION_MODELS,
26
+ // Gemini 3.1 text models (global-only)
27
+ "gemini-3.1-pro-preview",
28
+ "gemini-3.1-flash-lite-preview",
29
+ "gemini-3.1-pro-preview-customtools",
30
+ // Gemini 3 text models (global-only)
31
+ "gemini-3-pro-preview",
32
+ "gemini-3-pro-preview-11-2025",
33
+ "gemini-3-pro-latest",
34
+ "gemini-3-flash-preview",
24
35
  ];
25
36
  // Core AI Generation Defaults
26
37
  export const DEFAULT_MAX_TOKENS = undefined; // Unlimited by default - let providers decide their own limits
@@ -5,6 +5,9 @@
5
5
  import { randomBytes, createHash } from "crypto";
6
6
  import { InMemoryTokenStorage, isTokenExpired, calculateExpiresAt, } from "./tokenStorage.js";
7
7
  import { logger } from "../../utils/logger.js";
8
+ import { withTimeout } from "../../utils/errorHandling.js";
9
+ /** Default timeout for OAuth token operations (30 seconds) */
10
+ const OAUTH_TOKEN_TIMEOUT_MS = 30000;
8
11
  /**
9
12
  * NeuroLink OAuth Provider for MCP HTTP Transport
10
13
  * Handles OAuth 2.1 authentication flow with optional PKCE support
@@ -147,15 +150,15 @@ export class NeuroLinkOAuthProvider {
147
150
  if (codeVerifier) {
148
151
  body.set("code_verifier", codeVerifier);
149
152
  }
150
- // Request tokens
151
- const response = await fetch(this.config.tokenUrl, {
153
+ // Request tokens with timeout protection
154
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
152
155
  method: "POST",
153
156
  headers: {
154
157
  "Content-Type": "application/x-www-form-urlencoded",
155
158
  Accept: "application/json",
156
159
  },
157
160
  body: body.toString(),
158
- });
161
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token exchange timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
159
162
  if (!response.ok) {
160
163
  const errorText = await response.text();
161
164
  throw new Error(`Token exchange failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -186,14 +189,15 @@ export class NeuroLinkOAuthProvider {
186
189
  if (this.config.clientSecret) {
187
190
  body.set("client_secret", this.config.clientSecret);
188
191
  }
189
- const response = await fetch(this.config.tokenUrl, {
192
+ // Refresh tokens with timeout protection
193
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
190
194
  method: "POST",
191
195
  headers: {
192
196
  "Content-Type": "application/x-www-form-urlencoded",
193
197
  Accept: "application/json",
194
198
  },
195
199
  body: body.toString(),
196
- });
200
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token refresh timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
197
201
  if (!response.ok) {
198
202
  const errorText = await response.text();
199
203
  throw new Error(`Token refresh failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -227,13 +231,14 @@ export class NeuroLinkOAuthProvider {
227
231
  body.set("client_secret", this.config.clientSecret);
228
232
  }
229
233
  try {
230
- await fetch(revocationUrl, {
234
+ // Revoke tokens with timeout protection
235
+ await withTimeout(fetch(revocationUrl, {
231
236
  method: "POST",
232
237
  headers: {
233
238
  "Content-Type": "application/x-www-form-urlencoded",
234
239
  },
235
240
  body: body.toString(),
236
- });
241
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token revocation timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
237
242
  }
238
243
  catch (error) {
239
244
  logger.warn(`[NeuroLinkOAuthProvider] Token revocation failed: ${error instanceof Error ? error.message : String(error)}`);
@@ -2,12 +2,54 @@
2
2
  * Provider-specific token limit utilities
3
3
  * Provides safe maxTokens values based on provider and model capabilities
4
4
  */
5
- import { PROVIDER_MAX_TOKENS } from "../core/constants.js";
5
+ import { PROVIDER_MAX_TOKENS, IMAGE_GENERATION_MODELS, } from "../core/constants.js";
6
6
  import { logger } from "./logger.js";
7
+ // Gemini 3 models and Gemini 2.5 image models have a hard limit of 32768 output tokens
8
+ const GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS = 32768;
9
+ /**
10
+ * Check if a model has the restricted 32768 output token limit
11
+ * This applies to:
12
+ * - All Gemini 3 models (gemini-3-flash, gemini-3-pro, etc.)
13
+ * - All Gemini 2.5 image generation models (gemini-2.5-flash-image)
14
+ */
15
+ function hasRestrictedOutputLimit(model) {
16
+ if (!model) {
17
+ return false;
18
+ }
19
+ // Check for Gemini 3 models
20
+ if (model.includes("gemini-3")) {
21
+ return true;
22
+ }
23
+ // Check for image generation models (includes gemini-2.5-flash-image)
24
+ if (IMAGE_GENERATION_MODELS.some((m) => model.includes(m))) {
25
+ return true;
26
+ }
27
+ return false;
28
+ }
7
29
  /**
8
30
  * Get the safe maximum tokens for a provider and model
9
31
  */
10
32
  export function getSafeMaxTokens(provider, model, requestedMaxTokens) {
33
+ // CRITICAL: Gemini 3 models AND image generation models have a hard limit of 32768 output tokens
34
+ // This check must happen FIRST, before any other logic, because these models
35
+ // will reject requests with maxOutputTokens > 32768
36
+ const isRestrictedModel = hasRestrictedOutputLimit(model);
37
+ if (isRestrictedModel) {
38
+ // Explicit undefined/null check so a caller-supplied 0 is preserved
39
+ // (truthy checks would treat 0 as "unset" and silently fall back to the cap).
40
+ if (requestedMaxTokens !== undefined &&
41
+ requestedMaxTokens !== null &&
42
+ requestedMaxTokens > GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS) {
43
+ logger.warn(`Requested maxTokens ${requestedMaxTokens} exceeds ${model} limit of ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS}. Using ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS} instead.`);
44
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
45
+ }
46
+ // If no maxTokens specified, use the restricted limit as default
47
+ if (requestedMaxTokens === undefined || requestedMaxTokens === null) {
48
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
49
+ }
50
+ // Otherwise, use the requested value (it's within limits, including 0)
51
+ return requestedMaxTokens;
52
+ }
11
53
  // Get provider-specific limits
12
54
  const providerLimits = PROVIDER_MAX_TOKENS[provider];
13
55
  if (!providerLimits) {
@@ -5,6 +5,9 @@
5
5
  import { randomBytes, createHash } from "crypto";
6
6
  import { InMemoryTokenStorage, isTokenExpired, calculateExpiresAt, } from "./tokenStorage.js";
7
7
  import { logger } from "../../utils/logger.js";
8
+ import { withTimeout } from "../../utils/errorHandling.js";
9
+ /** Default timeout for OAuth token operations (30 seconds) */
10
+ const OAUTH_TOKEN_TIMEOUT_MS = 30000;
8
11
  /**
9
12
  * NeuroLink OAuth Provider for MCP HTTP Transport
10
13
  * Handles OAuth 2.1 authentication flow with optional PKCE support
@@ -147,15 +150,15 @@ export class NeuroLinkOAuthProvider {
147
150
  if (codeVerifier) {
148
151
  body.set("code_verifier", codeVerifier);
149
152
  }
150
- // Request tokens
151
- const response = await fetch(this.config.tokenUrl, {
153
+ // Request tokens with timeout protection
154
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
152
155
  method: "POST",
153
156
  headers: {
154
157
  "Content-Type": "application/x-www-form-urlencoded",
155
158
  Accept: "application/json",
156
159
  },
157
160
  body: body.toString(),
158
- });
161
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token exchange timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
159
162
  if (!response.ok) {
160
163
  const errorText = await response.text();
161
164
  throw new Error(`Token exchange failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -186,14 +189,15 @@ export class NeuroLinkOAuthProvider {
186
189
  if (this.config.clientSecret) {
187
190
  body.set("client_secret", this.config.clientSecret);
188
191
  }
189
- const response = await fetch(this.config.tokenUrl, {
192
+ // Refresh tokens with timeout protection
193
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
190
194
  method: "POST",
191
195
  headers: {
192
196
  "Content-Type": "application/x-www-form-urlencoded",
193
197
  Accept: "application/json",
194
198
  },
195
199
  body: body.toString(),
196
- });
200
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token refresh timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
197
201
  if (!response.ok) {
198
202
  const errorText = await response.text();
199
203
  throw new Error(`Token refresh failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -227,13 +231,14 @@ export class NeuroLinkOAuthProvider {
227
231
  body.set("client_secret", this.config.clientSecret);
228
232
  }
229
233
  try {
230
- await fetch(revocationUrl, {
234
+ // Revoke tokens with timeout protection
235
+ await withTimeout(fetch(revocationUrl, {
231
236
  method: "POST",
232
237
  headers: {
233
238
  "Content-Type": "application/x-www-form-urlencoded",
234
239
  },
235
240
  body: body.toString(),
236
- });
241
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token revocation timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
237
242
  }
238
243
  catch (error) {
239
244
  logger.warn(`[NeuroLinkOAuthProvider] Token revocation failed: ${error instanceof Error ? error.message : String(error)}`);
@@ -2,12 +2,54 @@
2
2
  * Provider-specific token limit utilities
3
3
  * Provides safe maxTokens values based on provider and model capabilities
4
4
  */
5
- import { PROVIDER_MAX_TOKENS } from "../core/constants.js";
5
+ import { PROVIDER_MAX_TOKENS, IMAGE_GENERATION_MODELS, } from "../core/constants.js";
6
6
  import { logger } from "./logger.js";
7
+ // Gemini 3 models and Gemini 2.5 image models have a hard limit of 32768 output tokens
8
+ const GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS = 32768;
9
+ /**
10
+ * Check if a model has the restricted 32768 output token limit
11
+ * This applies to:
12
+ * - All Gemini 3 models (gemini-3-flash, gemini-3-pro, etc.)
13
+ * - All Gemini 2.5 image generation models (gemini-2.5-flash-image)
14
+ */
15
+ function hasRestrictedOutputLimit(model) {
16
+ if (!model) {
17
+ return false;
18
+ }
19
+ // Check for Gemini 3 models
20
+ if (model.includes("gemini-3")) {
21
+ return true;
22
+ }
23
+ // Check for image generation models (includes gemini-2.5-flash-image)
24
+ if (IMAGE_GENERATION_MODELS.some((m) => model.includes(m))) {
25
+ return true;
26
+ }
27
+ return false;
28
+ }
7
29
  /**
8
30
  * Get the safe maximum tokens for a provider and model
9
31
  */
10
32
  export function getSafeMaxTokens(provider, model, requestedMaxTokens) {
33
+ // CRITICAL: Gemini 3 models AND image generation models have a hard limit of 32768 output tokens
34
+ // This check must happen FIRST, before any other logic, because these models
35
+ // will reject requests with maxOutputTokens > 32768
36
+ const isRestrictedModel = hasRestrictedOutputLimit(model);
37
+ if (isRestrictedModel) {
38
+ // Explicit undefined/null check so a caller-supplied 0 is preserved
39
+ // (truthy checks would treat 0 as "unset" and silently fall back to the cap).
40
+ if (requestedMaxTokens !== undefined &&
41
+ requestedMaxTokens !== null &&
42
+ requestedMaxTokens > GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS) {
43
+ logger.warn(`Requested maxTokens ${requestedMaxTokens} exceeds ${model} limit of ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS}. Using ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS} instead.`);
44
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
45
+ }
46
+ // If no maxTokens specified, use the restricted limit as default
47
+ if (requestedMaxTokens === undefined || requestedMaxTokens === null) {
48
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
49
+ }
50
+ // Otherwise, use the requested value (it's within limits, including 0)
51
+ return requestedMaxTokens;
52
+ }
11
53
  // Get provider-specific limits
12
54
  const providerLimits = PROVIDER_MAX_TOKENS[provider];
13
55
  if (!providerLimits) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "9.60.0",
3
+ "version": "9.61.0",
4
4
  "packageManager": "pnpm@10.15.1",
5
5
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
6
6
  "author": {