@juspay/neurolink 9.60.1 → 9.61.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,10 +17,21 @@ export const PDF_IMAGE_GENERATION_MODELS = [
17
17
  ];
18
18
  // Global Location Models
19
19
  // Models that require global location configuration (uses aiplatform.googleapis.com instead of region-specific endpoints)
20
+ // Includes Gemini 3.x text and image models, which are only available via the global endpoint on Vertex AI.
21
+ // IMAGE_GENERATION_MODELS is spread in to keep the two lists from drifting:
22
+ // any new image-gen model added there is automatically routed to global here.
20
23
  export const GLOBAL_LOCATION_MODELS = [
21
- "gemini-3-pro-image-preview",
22
- "gemini-2.5-flash-image",
23
- "gemini-3.1-flash-image-preview",
24
+ // Image generation (sourced from IMAGE_GENERATION_MODELS)
25
+ ...IMAGE_GENERATION_MODELS,
26
+ // Gemini 3.1 text models (global-only)
27
+ "gemini-3.1-pro-preview",
28
+ "gemini-3.1-flash-lite-preview",
29
+ "gemini-3.1-pro-preview-customtools",
30
+ // Gemini 3 text models (global-only)
31
+ "gemini-3-pro-preview",
32
+ "gemini-3-pro-preview-11-2025",
33
+ "gemini-3-pro-latest",
34
+ "gemini-3-flash-preview",
24
35
  ];
25
36
  // Core AI Generation Defaults
26
37
  export const DEFAULT_MAX_TOKENS = undefined; // Unlimited by default - let providers decide their own limits
@@ -17,10 +17,21 @@ export const PDF_IMAGE_GENERATION_MODELS = [
17
17
  ];
18
18
  // Global Location Models
19
19
  // Models that require global location configuration (uses aiplatform.googleapis.com instead of region-specific endpoints)
20
+ // Includes Gemini 3.x text and image models, which are only available via the global endpoint on Vertex AI.
21
+ // IMAGE_GENERATION_MODELS is spread in to keep the two lists from drifting:
22
+ // any new image-gen model added there is automatically routed to global here.
20
23
  export const GLOBAL_LOCATION_MODELS = [
21
- "gemini-3-pro-image-preview",
22
- "gemini-2.5-flash-image",
23
- "gemini-3.1-flash-image-preview",
24
+ // Image generation (sourced from IMAGE_GENERATION_MODELS)
25
+ ...IMAGE_GENERATION_MODELS,
26
+ // Gemini 3.1 text models (global-only)
27
+ "gemini-3.1-pro-preview",
28
+ "gemini-3.1-flash-lite-preview",
29
+ "gemini-3.1-pro-preview-customtools",
30
+ // Gemini 3 text models (global-only)
31
+ "gemini-3-pro-preview",
32
+ "gemini-3-pro-preview-11-2025",
33
+ "gemini-3-pro-latest",
34
+ "gemini-3-flash-preview",
24
35
  ];
25
36
  // Core AI Generation Defaults
26
37
  export const DEFAULT_MAX_TOKENS = undefined; // Unlimited by default - let providers decide their own limits
@@ -5,6 +5,9 @@
5
5
  import { randomBytes, createHash } from "crypto";
6
6
  import { InMemoryTokenStorage, isTokenExpired, calculateExpiresAt, } from "./tokenStorage.js";
7
7
  import { logger } from "../../utils/logger.js";
8
+ import { withTimeout } from "../../utils/errorHandling.js";
9
+ /** Default timeout for OAuth token operations (30 seconds) */
10
+ const OAUTH_TOKEN_TIMEOUT_MS = 30000;
8
11
  /**
9
12
  * NeuroLink OAuth Provider for MCP HTTP Transport
10
13
  * Handles OAuth 2.1 authentication flow with optional PKCE support
@@ -147,15 +150,15 @@ export class NeuroLinkOAuthProvider {
147
150
  if (codeVerifier) {
148
151
  body.set("code_verifier", codeVerifier);
149
152
  }
150
- // Request tokens
151
- const response = await fetch(this.config.tokenUrl, {
153
+ // Request tokens with timeout protection
154
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
152
155
  method: "POST",
153
156
  headers: {
154
157
  "Content-Type": "application/x-www-form-urlencoded",
155
158
  Accept: "application/json",
156
159
  },
157
160
  body: body.toString(),
158
- });
161
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token exchange timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
159
162
  if (!response.ok) {
160
163
  const errorText = await response.text();
161
164
  throw new Error(`Token exchange failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -186,14 +189,15 @@ export class NeuroLinkOAuthProvider {
186
189
  if (this.config.clientSecret) {
187
190
  body.set("client_secret", this.config.clientSecret);
188
191
  }
189
- const response = await fetch(this.config.tokenUrl, {
192
+ // Refresh tokens with timeout protection
193
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
190
194
  method: "POST",
191
195
  headers: {
192
196
  "Content-Type": "application/x-www-form-urlencoded",
193
197
  Accept: "application/json",
194
198
  },
195
199
  body: body.toString(),
196
- });
200
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token refresh timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
197
201
  if (!response.ok) {
198
202
  const errorText = await response.text();
199
203
  throw new Error(`Token refresh failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -227,13 +231,14 @@ export class NeuroLinkOAuthProvider {
227
231
  body.set("client_secret", this.config.clientSecret);
228
232
  }
229
233
  try {
230
- await fetch(revocationUrl, {
234
+ // Revoke tokens with timeout protection
235
+ await withTimeout(fetch(revocationUrl, {
231
236
  method: "POST",
232
237
  headers: {
233
238
  "Content-Type": "application/x-www-form-urlencoded",
234
239
  },
235
240
  body: body.toString(),
236
- });
241
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token revocation timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
237
242
  }
238
243
  catch (error) {
239
244
  logger.warn(`[NeuroLinkOAuthProvider] Token revocation failed: ${error instanceof Error ? error.message : String(error)}`);
@@ -2,12 +2,54 @@
2
2
  * Provider-specific token limit utilities
3
3
  * Provides safe maxTokens values based on provider and model capabilities
4
4
  */
5
- import { PROVIDER_MAX_TOKENS } from "../core/constants.js";
5
+ import { PROVIDER_MAX_TOKENS, IMAGE_GENERATION_MODELS, } from "../core/constants.js";
6
6
  import { logger } from "./logger.js";
7
+ // Gemini 3 models and Gemini 2.5 image models have a hard limit of 32768 output tokens
8
+ const GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS = 32768;
9
+ /**
10
+ * Check if a model has the restricted 32768 output token limit
11
+ * This applies to:
12
+ * - All Gemini 3 models (gemini-3-flash, gemini-3-pro, etc.)
13
+ * - All Gemini 2.5 image generation models (gemini-2.5-flash-image)
14
+ */
15
+ function hasRestrictedOutputLimit(model) {
16
+ if (!model) {
17
+ return false;
18
+ }
19
+ // Check for Gemini 3 models
20
+ if (model.includes("gemini-3")) {
21
+ return true;
22
+ }
23
+ // Check for image generation models (includes gemini-2.5-flash-image)
24
+ if (IMAGE_GENERATION_MODELS.some((m) => model.includes(m))) {
25
+ return true;
26
+ }
27
+ return false;
28
+ }
7
29
  /**
8
30
  * Get the safe maximum tokens for a provider and model
9
31
  */
10
32
  export function getSafeMaxTokens(provider, model, requestedMaxTokens) {
33
+ // CRITICAL: Gemini 3 models AND image generation models have a hard limit of 32768 output tokens
34
+ // This check must happen FIRST, before any other logic, because these models
35
+ // will reject requests with maxOutputTokens > 32768
36
+ const isRestrictedModel = hasRestrictedOutputLimit(model);
37
+ if (isRestrictedModel) {
38
+ // Explicit undefined/null check so a caller-supplied 0 is preserved
39
+ // (truthy checks would treat 0 as "unset" and silently fall back to the cap).
40
+ if (requestedMaxTokens !== undefined &&
41
+ requestedMaxTokens !== null &&
42
+ requestedMaxTokens > GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS) {
43
+ logger.warn(`Requested maxTokens ${requestedMaxTokens} exceeds ${model} limit of ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS}. Using ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS} instead.`);
44
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
45
+ }
46
+ // If no maxTokens specified, use the restricted limit as default
47
+ if (requestedMaxTokens === undefined || requestedMaxTokens === null) {
48
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
49
+ }
50
+ // Otherwise, use the requested value (it's within limits, including 0)
51
+ return requestedMaxTokens;
52
+ }
11
53
  // Get provider-specific limits
12
54
  const providerLimits = PROVIDER_MAX_TOKENS[provider];
13
55
  if (!providerLimits) {
@@ -5,6 +5,9 @@
5
5
  import { randomBytes, createHash } from "crypto";
6
6
  import { InMemoryTokenStorage, isTokenExpired, calculateExpiresAt, } from "./tokenStorage.js";
7
7
  import { logger } from "../../utils/logger.js";
8
+ import { withTimeout } from "../../utils/errorHandling.js";
9
+ /** Default timeout for OAuth token operations (30 seconds) */
10
+ const OAUTH_TOKEN_TIMEOUT_MS = 30000;
8
11
  /**
9
12
  * NeuroLink OAuth Provider for MCP HTTP Transport
10
13
  * Handles OAuth 2.1 authentication flow with optional PKCE support
@@ -147,15 +150,15 @@ export class NeuroLinkOAuthProvider {
147
150
  if (codeVerifier) {
148
151
  body.set("code_verifier", codeVerifier);
149
152
  }
150
- // Request tokens
151
- const response = await fetch(this.config.tokenUrl, {
153
+ // Request tokens with timeout protection
154
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
152
155
  method: "POST",
153
156
  headers: {
154
157
  "Content-Type": "application/x-www-form-urlencoded",
155
158
  Accept: "application/json",
156
159
  },
157
160
  body: body.toString(),
158
- });
161
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token exchange timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
159
162
  if (!response.ok) {
160
163
  const errorText = await response.text();
161
164
  throw new Error(`Token exchange failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -186,14 +189,15 @@ export class NeuroLinkOAuthProvider {
186
189
  if (this.config.clientSecret) {
187
190
  body.set("client_secret", this.config.clientSecret);
188
191
  }
189
- const response = await fetch(this.config.tokenUrl, {
192
+ // Refresh tokens with timeout protection
193
+ const response = await withTimeout(fetch(this.config.tokenUrl, {
190
194
  method: "POST",
191
195
  headers: {
192
196
  "Content-Type": "application/x-www-form-urlencoded",
193
197
  Accept: "application/json",
194
198
  },
195
199
  body: body.toString(),
196
- });
200
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token refresh timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
197
201
  if (!response.ok) {
198
202
  const errorText = await response.text();
199
203
  throw new Error(`Token refresh failed: ${response.status} ${response.statusText} - ${errorText}`);
@@ -227,13 +231,14 @@ export class NeuroLinkOAuthProvider {
227
231
  body.set("client_secret", this.config.clientSecret);
228
232
  }
229
233
  try {
230
- await fetch(revocationUrl, {
234
+ // Revoke tokens with timeout protection
235
+ await withTimeout(fetch(revocationUrl, {
231
236
  method: "POST",
232
237
  headers: {
233
238
  "Content-Type": "application/x-www-form-urlencoded",
234
239
  },
235
240
  body: body.toString(),
236
- });
241
+ }), OAUTH_TOKEN_TIMEOUT_MS, new Error(`OAuth token revocation timed out after ${OAUTH_TOKEN_TIMEOUT_MS}ms`));
237
242
  }
238
243
  catch (error) {
239
244
  logger.warn(`[NeuroLinkOAuthProvider] Token revocation failed: ${error instanceof Error ? error.message : String(error)}`);
@@ -2,12 +2,54 @@
2
2
  * Provider-specific token limit utilities
3
3
  * Provides safe maxTokens values based on provider and model capabilities
4
4
  */
5
- import { PROVIDER_MAX_TOKENS } from "../core/constants.js";
5
+ import { PROVIDER_MAX_TOKENS, IMAGE_GENERATION_MODELS, } from "../core/constants.js";
6
6
  import { logger } from "./logger.js";
7
+ // Gemini 3 models and Gemini 2.5 image models have a hard limit of 32768 output tokens
8
+ const GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS = 32768;
9
+ /**
10
+ * Check if a model has the restricted 32768 output token limit
11
+ * This applies to:
12
+ * - All Gemini 3 models (gemini-3-flash, gemini-3-pro, etc.)
13
+ * - All Gemini 2.5 image generation models (gemini-2.5-flash-image)
14
+ */
15
+ function hasRestrictedOutputLimit(model) {
16
+ if (!model) {
17
+ return false;
18
+ }
19
+ // Check for Gemini 3 models
20
+ if (model.includes("gemini-3")) {
21
+ return true;
22
+ }
23
+ // Check for image generation models (includes gemini-2.5-flash-image)
24
+ if (IMAGE_GENERATION_MODELS.some((m) => model.includes(m))) {
25
+ return true;
26
+ }
27
+ return false;
28
+ }
7
29
  /**
8
30
  * Get the safe maximum tokens for a provider and model
9
31
  */
10
32
  export function getSafeMaxTokens(provider, model, requestedMaxTokens) {
33
+ // CRITICAL: Gemini 3 models AND image generation models have a hard limit of 32768 output tokens
34
+ // This check must happen FIRST, before any other logic, because these models
35
+ // will reject requests with maxOutputTokens > 32768
36
+ const isRestrictedModel = hasRestrictedOutputLimit(model);
37
+ if (isRestrictedModel) {
38
+ // Explicit undefined/null check so a caller-supplied 0 is preserved
39
+ // (truthy checks would treat 0 as "unset" and silently fall back to the cap).
40
+ if (requestedMaxTokens !== undefined &&
41
+ requestedMaxTokens !== null &&
42
+ requestedMaxTokens > GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS) {
43
+ logger.warn(`Requested maxTokens ${requestedMaxTokens} exceeds ${model} limit of ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS}. Using ${GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS} instead.`);
44
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
45
+ }
46
+ // If no maxTokens specified, use the restricted limit as default
47
+ if (requestedMaxTokens === undefined || requestedMaxTokens === null) {
48
+ return GEMINI_RESTRICTED_MAX_OUTPUT_TOKENS;
49
+ }
50
+ // Otherwise, use the requested value (it's within limits, including 0)
51
+ return requestedMaxTokens;
52
+ }
11
53
  // Get provider-specific limits
12
54
  const providerLimits = PROVIDER_MAX_TOKENS[provider];
13
55
  if (!providerLimits) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "9.60.1",
3
+ "version": "9.61.0",
4
4
  "packageManager": "pnpm@10.15.1",
5
5
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
6
6
  "author": {