@juspay/neurolink 7.15.0 → 7.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [7.17.0](https://github.com/juspay/neurolink/compare/v7.16.0...v7.17.0) (2025-08-19)
2
+
3
+ ### Features
4
+
5
+ - **(proxy):** implement comprehensive enterprise proxy support with testing ([0dd124b](https://github.com/juspay/neurolink/commit/0dd124b75826f4581a608e4d62acc05e827cbc1d))
6
+
7
+ ## [7.16.0](https://github.com/juspay/neurolink/compare/v7.15.0...v7.16.0) (2025-08-19)
8
+
9
+ ### Features
10
+
11
+ - **(cli):** Add validate provider config support in CLI ([2e8d6ad](https://github.com/juspay/neurolink/commit/2e8d6ad6475bf24f67f61a76d33689f323821b70))
12
+
1
13
  ## [7.15.0](https://github.com/juspay/neurolink/compare/v7.14.8...v7.15.0) (2025-08-19)
2
14
 
3
15
  ### Features
package/README.md CHANGED
@@ -62,6 +62,7 @@ npx @juspay/neurolink sagemaker benchmark my-endpoint # Performance testing
62
62
  - **🏭 Factory Pattern Architecture** - Unified provider management through BaseProvider inheritance
63
63
  - **🔧 Tools-First Design** - All providers include built-in tool support without additional configuration
64
64
  - **🔗 LiteLLM Integration** - **100+ models** from all major providers through unified interface
65
+ - **🏢 Enterprise Proxy Support** - Comprehensive corporate proxy support with MCP compatibility
65
66
  - **🏗️ Enterprise Architecture** - Production-ready with clean abstractions
66
67
  - **🔄 Configuration Management** - Flexible provider configuration with automatic backups
67
68
  - **✅ Type Safety** - Industry-standard TypeScript interfaces
@@ -404,6 +405,7 @@ npx @juspay/neurolink generate "Weather in Tokyo now" --provider vertex
404
405
  - ⚡ **Automatic Fallback** - Never fail when providers are down, intelligent provider switching
405
406
  - 🖥️ **CLI + SDK** - Use from command line or integrate programmatically with TypeScript support
406
407
  - 🛡️ **Production Ready** - Enterprise-grade error handling, performance optimization, extracted from production
408
+ - 🏢 **Enterprise Proxy Support** - Comprehensive corporate proxy support with zero configuration
407
409
  - ✅ **External MCP Integration** - Model Context Protocol with built-in tools + full external MCP server support
408
410
  - 🔍 **Smart Model Resolution** - Fuzzy matching, aliases, and capability-based search across all providers
409
411
  - 🏠 **Local AI Support** - Run completely offline with Ollama or through LiteLLM proxy
@@ -45,6 +45,10 @@ export declare class CLICommandFactory {
45
45
  * Create config commands
46
46
  */
47
47
  static createConfigCommands(): CommandModule;
48
+ /**
49
+ * Create validate command
50
+ */
51
+ static createValidateCommand(): CommandModule;
48
52
  /**
49
53
  * Create get-best-provider command
50
54
  */
@@ -1,4 +1,5 @@
1
1
  import { NeuroLink } from "../../lib/neurolink.js";
2
+ import { configManager } from "../commands/config.js";
2
3
  import { ContextFactory, } from "../../lib/types/contextTypes.js";
3
4
  import { ModelsCommandFactory } from "../commands/models.js";
4
5
  import { MCPCommandFactory } from "../commands/mcp.js";
@@ -453,15 +454,12 @@ export class CLICommandFactory {
453
454
  builder: (yargs) => {
454
455
  return yargs
455
456
  .command("init", "Interactive configuration setup wizard", (y) => this.buildOptions(y), async (_argv) => {
456
- const { configManager } = await import("../commands/config.js");
457
457
  await configManager.initInteractive();
458
458
  })
459
459
  .command("show", "Display current configuration", (y) => this.buildOptions(y), async (_argv) => {
460
- const { configManager } = await import("../commands/config.js");
461
460
  configManager.showConfig();
462
461
  })
463
462
  .command("validate", "Validate current configuration", (y) => this.buildOptions(y), async (_argv) => {
464
- const { configManager } = await import("../commands/config.js");
465
463
  const result = configManager.validateConfig();
466
464
  if (result.valid) {
467
465
  logger.always(chalk.green("✅ Configuration is valid"));
@@ -473,7 +471,6 @@ export class CLICommandFactory {
473
471
  }
474
472
  })
475
473
  .command("reset", "Reset configuration to defaults", (y) => this.buildOptions(y), async (_argv) => {
476
- const { configManager } = await import("../commands/config.js");
477
474
  configManager.resetConfig();
478
475
  })
479
476
  .command("export", "Export current configuration", (y) => this.buildOptions(y), (argv) => this.executeConfigExport(argv))
@@ -482,6 +479,27 @@ export class CLICommandFactory {
482
479
  handler: () => { }, // No-op handler as subcommands handle everything
483
480
  };
484
481
  }
482
+ /**
483
+ * Create validate command
484
+ */
485
+ static createValidateCommand() {
486
+ return {
487
+ command: "validate",
488
+ describe: "Validate current configuration (alias for 'config validate')",
489
+ builder: (yargs) => this.buildOptions(yargs),
490
+ handler: async (_argv) => {
491
+ const result = configManager.validateConfig();
492
+ if (result.valid) {
493
+ logger.always(chalk.green("✅ Configuration is valid"));
494
+ }
495
+ else {
496
+ logger.always(chalk.red("❌ Configuration has errors:"));
497
+ result.errors.forEach((error) => logger.always(` • ${error}`));
498
+ throw new Error("Configuration is invalid. See errors above.");
499
+ }
500
+ },
501
+ };
502
+ }
485
503
  /**
486
504
  * Create get-best-provider command
487
505
  */
package/dist/cli/index.js CHANGED
@@ -225,6 +225,8 @@ const cli = yargs(args)
225
225
  .command(CLICommandFactory.createConfigCommands())
226
226
  // Get Best Provider Command - Using CLICommandFactory
227
227
  .command(CLICommandFactory.createBestProviderCommand())
228
+ // Validate Command (alias for config validate)
229
+ .command(CLICommandFactory.createValidateCommand())
228
230
  // Completion Command - Using CLICommandFactory
229
231
  .command(CLICommandFactory.createCompletionCommand());
230
232
  // Add Ollama Commands
@@ -1,4 +1,4 @@
1
- import { anthropic } from "@ai-sdk/anthropic";
1
+ import { createAnthropic } from "@ai-sdk/anthropic";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { buildMessagesArray } from "../utils/messageBuilder.js";
9
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
9
10
  // Configuration helpers - now using consolidated utility
10
11
  const getAnthropicApiKey = () => {
11
12
  return validateApiKey(createAnthropicConfig());
@@ -21,11 +22,14 @@ export class AnthropicProvider extends BaseProvider {
21
22
  model;
22
23
  constructor(modelName, sdk) {
23
24
  super(modelName, "anthropic", sdk);
24
- // Initialize Anthropic model with API key validation
25
+ // Initialize Anthropic model with API key validation and proxy support
25
26
  const apiKey = getAnthropicApiKey();
26
- // Set Anthropic API key as environment variable (required by @ai-sdk/anthropic)
27
- process.env.ANTHROPIC_API_KEY = apiKey;
28
- // Initialize Anthropic with proper configuration
27
+ // Create Anthropic instance with proxy fetch
28
+ const anthropic = createAnthropic({
29
+ apiKey: apiKey,
30
+ fetch: createProxyFetch(),
31
+ });
32
+ // Initialize Anthropic model with proxy-aware instance
29
33
  this.model = anthropic(this.modelName || getDefaultAnthropicModel());
30
34
  logger.debug("Anthropic Provider v2 initialized", {
31
35
  modelName: this.modelName,
@@ -4,6 +4,7 @@ import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { buildMessagesArray } from "../utils/messageBuilder.js";
7
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
7
8
  export class AzureOpenAIProvider extends BaseProvider {
8
9
  apiKey;
9
10
  resourceName;
@@ -31,11 +32,12 @@ export class AzureOpenAIProvider extends BaseProvider {
31
32
  if (!this.resourceName) {
32
33
  validateApiKey(createAzureEndpointConfig());
33
34
  }
34
- // Create the Azure provider instance
35
+ // Create the Azure provider instance with proxy support
35
36
  this.azureProvider = createAzure({
36
37
  resourceName: this.resourceName,
37
38
  apiKey: this.apiKey,
38
39
  apiVersion: this.apiVersion,
40
+ fetch: createProxyFetch(),
39
41
  });
40
42
  logger.debug("Azure Vercel Provider initialized", {
41
43
  deployment: this.deployment,
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { ModelConfigurationManager } from "../core/modelConfiguration.js";
8
8
  import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
9
9
  import { buildMessagesArray } from "../utils/messageBuilder.js";
10
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
10
11
  // Cache for anthropic module to avoid repeated imports
11
12
  let _createVertexAnthropic = null;
12
13
  let _anthropicImportAttempted = false;
@@ -51,11 +52,12 @@ const hasGoogleCredentials = () => {
51
52
  (process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
52
53
  process.env.GOOGLE_AUTH_PRIVATE_KEY));
53
54
  };
54
- // Enhanced Vertex settings creation with authentication fallback
55
+ // Enhanced Vertex settings creation with authentication fallback and proxy support
55
56
  const createVertexSettings = () => {
56
57
  const baseSettings = {
57
58
  project: getVertexProjectId(),
58
59
  location: getVertexLocation(),
60
+ fetch: createProxyFetch(),
59
61
  };
60
62
  // Check for principal account authentication first (recommended for production)
61
63
  if (process.env.GOOGLE_APPLICATION_CREDENTIALS) {
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
7
  import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { buildMessagesArray } from "../utils/messageBuilder.js";
9
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
9
10
  // Configuration helpers - now using consolidated utility
10
11
  const getHuggingFaceApiKey = () => {
11
12
  return validateApiKey(createHuggingFaceConfig());
@@ -24,10 +25,11 @@ export class HuggingFaceProvider extends BaseProvider {
24
25
  super(modelName, "huggingface");
25
26
  // Get API key and validate
26
27
  const apiKey = getHuggingFaceApiKey();
27
- // Create HuggingFace provider using unified router endpoint (2025)
28
+ // Create HuggingFace provider using unified router endpoint (2025) with proxy support
28
29
  const huggingface = createOpenAI({
29
30
  apiKey: apiKey,
30
31
  baseURL: "https://router.huggingface.co/v1",
32
+ fetch: createProxyFetch(),
31
33
  });
32
34
  // Initialize model
33
35
  this.model = huggingface(this.modelName);
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
9
  import { buildMessagesArray } from "../utils/messageBuilder.js";
10
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
10
11
  // Configuration helpers - now using consolidated utility
11
12
  const getMistralApiKey = () => {
12
13
  return validateApiKey(createMistralConfig());
@@ -26,10 +27,11 @@ export class MistralProvider extends BaseProvider {
26
27
  ? sdk
27
28
  : undefined;
28
29
  super(modelName, "mistral", validatedNeurolink);
29
- // Initialize Mistral model with API key validation
30
+ // Initialize Mistral model with API key validation and proxy support
30
31
  const apiKey = getMistralApiKey();
31
32
  const mistral = createMistral({
32
33
  apiKey: apiKey,
34
+ fetch: createProxyFetch(),
33
35
  });
34
36
  this.model = mistral(this.modelName);
35
37
  logger.debug("Mistral Provider v2 initialized", {
@@ -3,6 +3,7 @@ import { logger } from "../utils/logger.js";
3
3
  import { TimeoutError } from "../utils/timeout.js";
4
4
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
5
5
  import { modelConfig } from "../core/modelConfiguration.js";
6
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
6
7
  // Model version constants (configurable via environment)
7
8
  const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
8
9
  const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
@@ -26,6 +27,8 @@ const getDefaultOllamaModel = () => {
26
27
  const getOllamaTimeout = () => {
27
28
  return parseInt(process.env.OLLAMA_TIMEOUT || "60000", 10);
28
29
  };
30
+ // Create proxy-aware fetch instance
31
+ const proxyFetch = createProxyFetch();
29
32
  // Custom LanguageModelV1 implementation for Ollama
30
33
  class OllamaLanguageModel {
31
34
  specificationVersion = "v1";
@@ -61,7 +64,7 @@ class OllamaLanguageModel {
61
64
  // Debug: Log what's being sent to Ollama
62
65
  logger.debug("[OllamaLanguageModel] Messages:", JSON.stringify(messages, null, 2));
63
66
  logger.debug("[OllamaLanguageModel] Converted Prompt:", JSON.stringify(prompt));
64
- const response = await fetch(`${this.baseUrl}/api/generate`, {
67
+ const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
65
68
  method: "POST",
66
69
  headers: { "Content-Type": "application/json" },
67
70
  body: JSON.stringify({
@@ -108,7 +111,7 @@ class OllamaLanguageModel {
108
111
  const messages = options
109
112
  .messages || [];
110
113
  const prompt = this.convertMessagesToPrompt(messages);
111
- const response = await fetch(`${this.baseUrl}/api/generate`, {
114
+ const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
112
115
  method: "POST",
113
116
  headers: { "Content-Type": "application/json" },
114
117
  body: JSON.stringify({
@@ -325,7 +328,7 @@ export class OllamaProvider extends BaseProvider {
325
328
  : []),
326
329
  { role: "user", content: options.input.text },
327
330
  ];
328
- const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
331
+ const response = await proxyFetch(`${this.baseUrl}/v1/chat/completions`, {
329
332
  method: "POST",
330
333
  headers: { "Content-Type": "application/json" },
331
334
  body: JSON.stringify({
@@ -366,7 +369,7 @@ export class OllamaProvider extends BaseProvider {
366
369
  * Fallback for non-tool scenarios or when chat API is unavailable
367
370
  */
368
371
  async executeStreamWithoutTools(options, analysisSchema) {
369
- const response = await fetch(`${this.baseUrl}/api/generate`, {
372
+ const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
370
373
  method: "POST",
371
374
  headers: { "Content-Type": "application/json" },
372
375
  body: JSON.stringify({
@@ -568,7 +571,7 @@ export class OllamaProvider extends BaseProvider {
568
571
  // Use traditional AbortController for better compatibility
569
572
  const controller = new AbortController();
570
573
  const timeoutId = setTimeout(() => controller.abort(), 5000);
571
- const response = await fetch(`${this.baseUrl}/api/version`, {
574
+ const response = await proxyFetch(`${this.baseUrl}/api/version`, {
572
575
  method: "GET",
573
576
  signal: controller.signal,
574
577
  });
@@ -589,7 +592,7 @@ export class OllamaProvider extends BaseProvider {
589
592
  */
590
593
  async getAvailableModels() {
591
594
  try {
592
- const response = await fetch(`${this.baseUrl}/api/tags`);
595
+ const response = await proxyFetch(`${this.baseUrl}/api/tags`);
593
596
  if (!response.ok) {
594
597
  throw new Error(`Failed to fetch models: ${response.status}`);
595
598
  }
@@ -1,4 +1,4 @@
1
- import { openai } from "@ai-sdk/openai";
1
+ import { createOpenAI } from "@ai-sdk/openai";
2
2
  import { streamText } from "ai";
3
3
  import { AIProviderName } from "../core/types.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
@@ -8,6 +8,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
11
12
  // Configuration helpers - now using consolidated utility
12
13
  const getOpenAIApiKey = () => {
13
14
  return validateApiKey(createOpenAIConfig());
@@ -23,8 +24,11 @@ export class OpenAIProvider extends BaseProvider {
23
24
  model;
24
25
  constructor(modelName, neurolink) {
25
26
  super(modelName || getOpenAIModel(), AIProviderName.OPENAI, neurolink);
26
- // Set OpenAI API key as environment variable (required by @ai-sdk/openai)
27
- process.env.OPENAI_API_KEY = getOpenAIApiKey();
27
+ // Initialize OpenAI provider with proxy support
28
+ const openai = createOpenAI({
29
+ apiKey: getOpenAIApiKey(),
30
+ fetch: createProxyFetch(),
31
+ });
28
32
  // Initialize model
29
33
  this.model = openai(this.modelName);
30
34
  logger.debug("OpenAIProviderV2 initialized", {
@@ -14,5 +14,6 @@ export declare function getProxyStatus(): {
14
14
  enabled: boolean;
15
15
  httpProxy: string | null;
16
16
  httpsProxy: string | null;
17
+ noProxy: string | null;
17
18
  method: string;
18
19
  };
@@ -57,10 +57,12 @@ export function createProxyFetch() {
57
57
  export function getProxyStatus() {
58
58
  const httpsProxy = process.env.HTTPS_PROXY || process.env.https_proxy;
59
59
  const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy;
60
+ const noProxy = process.env.NO_PROXY || process.env.no_proxy;
60
61
  return {
61
62
  enabled: !!(httpsProxy || httpProxy),
62
63
  httpProxy: httpProxy || null,
63
64
  httpsProxy: httpsProxy || null,
65
+ noProxy: noProxy || null,
64
66
  method: "undici-proxy-agent",
65
67
  };
66
68
  }
@@ -1,4 +1,4 @@
1
- import { anthropic } from "@ai-sdk/anthropic";
1
+ import { createAnthropic } from "@ai-sdk/anthropic";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { buildMessagesArray } from "../utils/messageBuilder.js";
9
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
9
10
  // Configuration helpers - now using consolidated utility
10
11
  const getAnthropicApiKey = () => {
11
12
  return validateApiKey(createAnthropicConfig());
@@ -21,11 +22,14 @@ export class AnthropicProvider extends BaseProvider {
21
22
  model;
22
23
  constructor(modelName, sdk) {
23
24
  super(modelName, "anthropic", sdk);
24
- // Initialize Anthropic model with API key validation
25
+ // Initialize Anthropic model with API key validation and proxy support
25
26
  const apiKey = getAnthropicApiKey();
26
- // Set Anthropic API key as environment variable (required by @ai-sdk/anthropic)
27
- process.env.ANTHROPIC_API_KEY = apiKey;
28
- // Initialize Anthropic with proper configuration
27
+ // Create Anthropic instance with proxy fetch
28
+ const anthropic = createAnthropic({
29
+ apiKey: apiKey,
30
+ fetch: createProxyFetch(),
31
+ });
32
+ // Initialize Anthropic model with proxy-aware instance
29
33
  this.model = anthropic(this.modelName || getDefaultAnthropicModel());
30
34
  logger.debug("Anthropic Provider v2 initialized", {
31
35
  modelName: this.modelName,
@@ -4,6 +4,7 @@ import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { buildMessagesArray } from "../utils/messageBuilder.js";
7
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
7
8
  export class AzureOpenAIProvider extends BaseProvider {
8
9
  apiKey;
9
10
  resourceName;
@@ -31,11 +32,12 @@ export class AzureOpenAIProvider extends BaseProvider {
31
32
  if (!this.resourceName) {
32
33
  validateApiKey(createAzureEndpointConfig());
33
34
  }
34
- // Create the Azure provider instance
35
+ // Create the Azure provider instance with proxy support
35
36
  this.azureProvider = createAzure({
36
37
  resourceName: this.resourceName,
37
38
  apiKey: this.apiKey,
38
39
  apiVersion: this.apiVersion,
40
+ fetch: createProxyFetch(),
39
41
  });
40
42
  logger.debug("Azure Vercel Provider initialized", {
41
43
  deployment: this.deployment,
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { ModelConfigurationManager } from "../core/modelConfiguration.js";
8
8
  import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
9
9
  import { buildMessagesArray } from "../utils/messageBuilder.js";
10
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
10
11
  // Cache for anthropic module to avoid repeated imports
11
12
  let _createVertexAnthropic = null;
12
13
  let _anthropicImportAttempted = false;
@@ -51,11 +52,12 @@ const hasGoogleCredentials = () => {
51
52
  (process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
52
53
  process.env.GOOGLE_AUTH_PRIVATE_KEY));
53
54
  };
54
- // Enhanced Vertex settings creation with authentication fallback
55
+ // Enhanced Vertex settings creation with authentication fallback and proxy support
55
56
  const createVertexSettings = () => {
56
57
  const baseSettings = {
57
58
  project: getVertexProjectId(),
58
59
  location: getVertexLocation(),
60
+ fetch: createProxyFetch(),
59
61
  };
60
62
  // Check for principal account authentication first (recommended for production)
61
63
  if (process.env.GOOGLE_APPLICATION_CREDENTIALS) {
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
7
  import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { buildMessagesArray } from "../utils/messageBuilder.js";
9
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
9
10
  // Configuration helpers - now using consolidated utility
10
11
  const getHuggingFaceApiKey = () => {
11
12
  return validateApiKey(createHuggingFaceConfig());
@@ -24,10 +25,11 @@ export class HuggingFaceProvider extends BaseProvider {
24
25
  super(modelName, "huggingface");
25
26
  // Get API key and validate
26
27
  const apiKey = getHuggingFaceApiKey();
27
- // Create HuggingFace provider using unified router endpoint (2025)
28
+ // Create HuggingFace provider using unified router endpoint (2025) with proxy support
28
29
  const huggingface = createOpenAI({
29
30
  apiKey: apiKey,
30
31
  baseURL: "https://router.huggingface.co/v1",
32
+ fetch: createProxyFetch(),
31
33
  });
32
34
  // Initialize model
33
35
  this.model = huggingface(this.modelName);
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
9
  import { buildMessagesArray } from "../utils/messageBuilder.js";
10
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
10
11
  // Configuration helpers - now using consolidated utility
11
12
  const getMistralApiKey = () => {
12
13
  return validateApiKey(createMistralConfig());
@@ -26,10 +27,11 @@ export class MistralProvider extends BaseProvider {
26
27
  ? sdk
27
28
  : undefined;
28
29
  super(modelName, "mistral", validatedNeurolink);
29
- // Initialize Mistral model with API key validation
30
+ // Initialize Mistral model with API key validation and proxy support
30
31
  const apiKey = getMistralApiKey();
31
32
  const mistral = createMistral({
32
33
  apiKey: apiKey,
34
+ fetch: createProxyFetch(),
33
35
  });
34
36
  this.model = mistral(this.modelName);
35
37
  logger.debug("Mistral Provider v2 initialized", {
@@ -3,6 +3,7 @@ import { logger } from "../utils/logger.js";
3
3
  import { TimeoutError } from "../utils/timeout.js";
4
4
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
5
5
  import { modelConfig } from "../core/modelConfiguration.js";
6
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
6
7
  // Model version constants (configurable via environment)
7
8
  const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
8
9
  const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
@@ -26,6 +27,8 @@ const getDefaultOllamaModel = () => {
26
27
  const getOllamaTimeout = () => {
27
28
  return parseInt(process.env.OLLAMA_TIMEOUT || "60000", 10);
28
29
  };
30
+ // Create proxy-aware fetch instance
31
+ const proxyFetch = createProxyFetch();
29
32
  // Custom LanguageModelV1 implementation for Ollama
30
33
  class OllamaLanguageModel {
31
34
  specificationVersion = "v1";
@@ -61,7 +64,7 @@ class OllamaLanguageModel {
61
64
  // Debug: Log what's being sent to Ollama
62
65
  logger.debug("[OllamaLanguageModel] Messages:", JSON.stringify(messages, null, 2));
63
66
  logger.debug("[OllamaLanguageModel] Converted Prompt:", JSON.stringify(prompt));
64
- const response = await fetch(`${this.baseUrl}/api/generate`, {
67
+ const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
65
68
  method: "POST",
66
69
  headers: { "Content-Type": "application/json" },
67
70
  body: JSON.stringify({
@@ -108,7 +111,7 @@ class OllamaLanguageModel {
108
111
  const messages = options
109
112
  .messages || [];
110
113
  const prompt = this.convertMessagesToPrompt(messages);
111
- const response = await fetch(`${this.baseUrl}/api/generate`, {
114
+ const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
112
115
  method: "POST",
113
116
  headers: { "Content-Type": "application/json" },
114
117
  body: JSON.stringify({
@@ -325,7 +328,7 @@ export class OllamaProvider extends BaseProvider {
325
328
  : []),
326
329
  { role: "user", content: options.input.text },
327
330
  ];
328
- const response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
331
+ const response = await proxyFetch(`${this.baseUrl}/v1/chat/completions`, {
329
332
  method: "POST",
330
333
  headers: { "Content-Type": "application/json" },
331
334
  body: JSON.stringify({
@@ -366,7 +369,7 @@ export class OllamaProvider extends BaseProvider {
366
369
  * Fallback for non-tool scenarios or when chat API is unavailable
367
370
  */
368
371
  async executeStreamWithoutTools(options, analysisSchema) {
369
- const response = await fetch(`${this.baseUrl}/api/generate`, {
372
+ const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
370
373
  method: "POST",
371
374
  headers: { "Content-Type": "application/json" },
372
375
  body: JSON.stringify({
@@ -568,7 +571,7 @@ export class OllamaProvider extends BaseProvider {
568
571
  // Use traditional AbortController for better compatibility
569
572
  const controller = new AbortController();
570
573
  const timeoutId = setTimeout(() => controller.abort(), 5000);
571
- const response = await fetch(`${this.baseUrl}/api/version`, {
574
+ const response = await proxyFetch(`${this.baseUrl}/api/version`, {
572
575
  method: "GET",
573
576
  signal: controller.signal,
574
577
  });
@@ -589,7 +592,7 @@ export class OllamaProvider extends BaseProvider {
589
592
  */
590
593
  async getAvailableModels() {
591
594
  try {
592
- const response = await fetch(`${this.baseUrl}/api/tags`);
595
+ const response = await proxyFetch(`${this.baseUrl}/api/tags`);
593
596
  if (!response.ok) {
594
597
  throw new Error(`Failed to fetch models: ${response.status}`);
595
598
  }
@@ -1,4 +1,4 @@
1
- import { openai } from "@ai-sdk/openai";
1
+ import { createOpenAI } from "@ai-sdk/openai";
2
2
  import { streamText } from "ai";
3
3
  import { AIProviderName } from "../core/types.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
@@ -8,6 +8,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
11
12
  // Configuration helpers - now using consolidated utility
12
13
  const getOpenAIApiKey = () => {
13
14
  return validateApiKey(createOpenAIConfig());
@@ -23,8 +24,11 @@ export class OpenAIProvider extends BaseProvider {
23
24
  model;
24
25
  constructor(modelName, neurolink) {
25
26
  super(modelName || getOpenAIModel(), AIProviderName.OPENAI, neurolink);
26
- // Set OpenAI API key as environment variable (required by @ai-sdk/openai)
27
- process.env.OPENAI_API_KEY = getOpenAIApiKey();
27
+ // Initialize OpenAI provider with proxy support
28
+ const openai = createOpenAI({
29
+ apiKey: getOpenAIApiKey(),
30
+ fetch: createProxyFetch(),
31
+ });
28
32
  // Initialize model
29
33
  this.model = openai(this.modelName);
30
34
  logger.debug("OpenAIProviderV2 initialized", {
@@ -14,5 +14,6 @@ export declare function getProxyStatus(): {
14
14
  enabled: boolean;
15
15
  httpProxy: string | null;
16
16
  httpsProxy: string | null;
17
+ noProxy: string | null;
17
18
  method: string;
18
19
  };
@@ -57,10 +57,12 @@ export function createProxyFetch() {
57
57
  export function getProxyStatus() {
58
58
  const httpsProxy = process.env.HTTPS_PROXY || process.env.https_proxy;
59
59
  const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy;
60
+ const noProxy = process.env.NO_PROXY || process.env.no_proxy;
60
61
  return {
61
62
  enabled: !!(httpsProxy || httpProxy),
62
63
  httpProxy: httpProxy || null,
63
64
  httpsProxy: httpsProxy || null,
65
+ noProxy: noProxy || null,
64
66
  method: "undici-proxy-agent",
65
67
  };
66
68
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.15.0",
3
+ "version": "7.17.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",