@juspay/neurolink 7.33.3 → 7.33.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/cli/commands/config.d.ts +3 -4
  3. package/dist/cli/commands/config.js +2 -3
  4. package/dist/core/baseProvider.js +26 -1
  5. package/dist/core/constants.d.ts +12 -3
  6. package/dist/core/constants.js +22 -6
  7. package/dist/core/factory.js +19 -0
  8. package/dist/factories/providerRegistry.js +2 -0
  9. package/dist/lib/core/baseProvider.js +26 -1
  10. package/dist/lib/core/constants.d.ts +12 -3
  11. package/dist/lib/core/constants.js +22 -6
  12. package/dist/lib/core/factory.js +19 -0
  13. package/dist/lib/factories/providerRegistry.js +2 -0
  14. package/dist/lib/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  15. package/dist/lib/providers/amazonBedrock.js +2 -2
  16. package/dist/lib/providers/anthropic.js +3 -12
  17. package/dist/lib/providers/anthropicBaseProvider.js +1 -2
  18. package/dist/lib/providers/azureOpenai.js +49 -8
  19. package/dist/lib/providers/googleAiStudio.js +3 -3
  20. package/dist/lib/providers/googleVertex.js +2 -2
  21. package/dist/lib/providers/huggingFace.js +1 -2
  22. package/dist/lib/providers/litellm.js +1 -2
  23. package/dist/lib/providers/mistral.js +2 -2
  24. package/dist/lib/providers/ollama.js +7 -8
  25. package/dist/lib/providers/openAI.js +2 -2
  26. package/dist/lib/providers/openaiCompatible.js +5 -2
  27. package/dist/lib/providers/sagemaker/language-model.d.ts +5 -0
  28. package/dist/lib/providers/sagemaker/language-model.js +9 -1
  29. package/dist/lib/utils/providerHealth.js +7 -3
  30. package/dist/lib/utils/tokenLimits.d.ts +2 -2
  31. package/dist/lib/utils/tokenLimits.js +10 -3
  32. package/dist/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  33. package/dist/providers/amazonBedrock.js +2 -2
  34. package/dist/providers/anthropic.js +3 -12
  35. package/dist/providers/anthropicBaseProvider.js +1 -2
  36. package/dist/providers/azureOpenai.js +49 -8
  37. package/dist/providers/googleAiStudio.js +3 -3
  38. package/dist/providers/googleVertex.js +2 -2
  39. package/dist/providers/huggingFace.js +1 -2
  40. package/dist/providers/litellm.js +1 -2
  41. package/dist/providers/mistral.js +2 -2
  42. package/dist/providers/ollama.js +7 -8
  43. package/dist/providers/openAI.js +2 -2
  44. package/dist/providers/openaiCompatible.js +5 -2
  45. package/dist/providers/sagemaker/language-model.d.ts +5 -0
  46. package/dist/providers/sagemaker/language-model.js +9 -1
  47. package/dist/utils/providerHealth.js +7 -3
  48. package/dist/utils/tokenLimits.d.ts +2 -2
  49. package/dist/utils/tokenLimits.js +10 -3
  50. package/package.json +1 -1
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
10
10
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
11
11
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -97,7 +97,7 @@ export class OpenAIProvider extends BaseProvider {
97
97
  model: this.model,
98
98
  messages: messages,
99
99
  temperature: options.temperature,
100
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
100
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
101
101
  tools,
102
102
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
103
103
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -3,7 +3,6 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
6
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
7
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
8
  // Constants
@@ -163,7 +162,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
163
162
  prompt: options.input.text,
164
163
  system: options.systemPrompt,
165
164
  temperature: options.temperature,
166
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
165
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
167
166
  tools: options.tools,
168
167
  toolChoice: "auto",
169
168
  abortSignal: timeoutController?.controller.signal,
@@ -207,12 +206,16 @@ export class OpenAICompatibleProvider extends BaseProvider {
207
206
  const modelsUrl = new URL("/v1/models", this.config.baseURL).toString();
208
207
  logger.debug(`Fetching available models from: ${modelsUrl}`);
209
208
  const proxyFetch = createProxyFetch();
209
+ const controller = new AbortController();
210
+ const t = setTimeout(() => controller.abort(), 5000);
210
211
  const response = await proxyFetch(modelsUrl, {
211
212
  headers: {
212
213
  Authorization: `Bearer ${this.config.apiKey}`,
213
214
  "Content-Type": "application/json",
214
215
  },
216
+ signal: controller.signal,
215
217
  });
218
+ clearTimeout(t);
216
219
  if (!response.ok) {
217
220
  logger.warn(`Models endpoint returned ${response.status}: ${response.statusText}`);
218
221
  return this.getFallbackModels();
@@ -9,6 +9,11 @@ import type { SageMakerConfig, SageMakerModelConfig } from "./types.js";
9
9
  import type { ConnectivityResult } from "../../types/typeAliases.js";
10
10
  /**
11
11
  * SageMaker Language Model implementing LanguageModelV1 interface
12
+ *
13
+ * Token Limit Behavior:
14
+ * - When maxTokens is undefined, SageMaker uses the model's default token limits
15
+ * - When maxTokens is specified, it sets max_new_tokens parameter explicitly
16
+ * - This aligns with the unlimited-by-default token policy across all providers
12
17
  */
13
18
  export declare class SageMakerLanguageModel implements LanguageModelV1 {
14
19
  readonly specificationVersion = "v1";
@@ -75,6 +75,11 @@ const DEFAULT_MAX_CONCURRENCY = 10;
75
75
  const DEFAULT_MIN_CONCURRENCY = 1;
76
76
  /**
77
77
  * SageMaker Language Model implementing LanguageModelV1 interface
78
+ *
79
+ * Token Limit Behavior:
80
+ * - When maxTokens is undefined, SageMaker uses the model's default token limits
81
+ * - When maxTokens is specified, it sets max_new_tokens parameter explicitly
82
+ * - This aligns with the unlimited-by-default token policy across all providers
78
83
  */
79
84
  export class SageMakerLanguageModel {
80
85
  specificationVersion = "v1";
@@ -345,7 +350,10 @@ export class SageMakerLanguageModel {
345
350
  const request = {
346
351
  inputs: promptText,
347
352
  parameters: {
348
- max_new_tokens: options.maxTokens || 512,
353
+ // Only include max_new_tokens if explicitly specified; let SageMaker use model defaults otherwise
354
+ ...(options.maxTokens !== undefined
355
+ ? { max_new_tokens: options.maxTokens }
356
+ : {}),
349
357
  temperature: options.temperature || 0.7,
350
358
  top_p: options.topP || 0.9,
351
359
  stop: options.stopSequences || [],
@@ -639,9 +639,13 @@ export class ProviderHealthChecker {
639
639
  healthStatus.configurationIssues.push("Invalid AZURE_OPENAI_ENDPOINT format");
640
640
  healthStatus.recommendations.push("Set AZURE_OPENAI_ENDPOINT to a valid URL (e.g., https://your-resource.openai.azure.com/)");
641
641
  }
642
- if (!process.env.AZURE_OPENAI_DEPLOYMENT_NAME) {
643
- healthStatus.configurationIssues.push("AZURE_OPENAI_DEPLOYMENT_NAME not set");
644
- healthStatus.recommendations.push("Set AZURE_OPENAI_DEPLOYMENT_NAME to your deployment name");
642
+ // Check for deployment name using the SAME logic as the Azure provider
643
+ const deploymentName = process.env.AZURE_OPENAI_MODEL ||
644
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
645
+ process.env.AZURE_OPENAI_DEPLOYMENT_ID;
646
+ if (!deploymentName) {
647
+ healthStatus.configurationIssues.push("No Azure deployment specified");
648
+ healthStatus.recommendations.push("Set one of: AZURE_OPENAI_MODEL, AZURE_OPENAI_DEPLOYMENT, or AZURE_OPENAI_DEPLOYMENT_ID");
645
649
  }
646
650
  }
647
651
  /**
@@ -6,13 +6,13 @@ import { PROVIDER_MAX_TOKENS } from "../core/constants.js";
6
6
  /**
7
7
  * Get the safe maximum tokens for a provider and model
8
8
  */
9
- export declare function getSafeMaxTokens(provider: keyof typeof PROVIDER_MAX_TOKENS | string, model?: string, requestedMaxTokens?: number): number;
9
+ export declare function getSafeMaxTokens(provider: keyof typeof PROVIDER_MAX_TOKENS | string, model?: string, requestedMaxTokens?: number): number | undefined;
10
10
  /**
11
11
  * Validate if maxTokens is safe for a provider/model combination
12
12
  */
13
13
  export declare function validateMaxTokens(provider: keyof typeof PROVIDER_MAX_TOKENS | string, model?: string, maxTokens?: number): {
14
14
  isValid: boolean;
15
- recommendedMaxTokens: number;
15
+ recommendedMaxTokens?: number;
16
16
  warning?: string;
17
17
  };
18
18
  /**
@@ -2,7 +2,7 @@
2
2
  * Provider-specific token limit utilities
3
3
  * Provides safe maxTokens values based on provider and model capabilities
4
4
  */
5
- import { PROVIDER_MAX_TOKENS, DEFAULT_MAX_TOKENS } from "../core/constants.js";
5
+ import { PROVIDER_MAX_TOKENS } from "../core/constants.js";
6
6
  import { logger } from "./logger.js";
7
7
  /**
8
8
  * Get the safe maximum tokens for a provider and model
@@ -11,8 +11,8 @@ export function getSafeMaxTokens(provider, model, requestedMaxTokens) {
11
11
  // Get provider-specific limits
12
12
  const providerLimits = PROVIDER_MAX_TOKENS[provider];
13
13
  if (!providerLimits) {
14
- logger.warn(`Unknown provider ${provider}, using default maxTokens limit`);
15
- return Math.min(requestedMaxTokens || DEFAULT_MAX_TOKENS, PROVIDER_MAX_TOKENS.default);
14
+ logger.warn(`Unknown provider ${provider}, no token limits enforced`);
15
+ return requestedMaxTokens || undefined; // No default limit for unknown providers
16
16
  }
17
17
  // Get model-specific limit or provider default
18
18
  let maxLimit;
@@ -54,6 +54,13 @@ export function validateMaxTokens(provider, model, maxTokens) {
54
54
  recommendedMaxTokens: safeMaxTokens,
55
55
  };
56
56
  }
57
+ // If no limits are defined, validation always passes
58
+ if (safeMaxTokens === undefined) {
59
+ return {
60
+ isValid: true,
61
+ recommendedMaxTokens: maxTokens,
62
+ };
63
+ }
57
64
  const isValid = maxTokens <= safeMaxTokens;
58
65
  return {
59
66
  isValid,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.33.3",
3
+ "version": "7.33.4",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",