@juspay/neurolink 7.16.0 → 7.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/README.md +2 -0
- package/dist/lib/providers/anthropic.js +9 -5
- package/dist/lib/providers/azureOpenai.js +3 -1
- package/dist/lib/providers/googleVertex.js +3 -1
- package/dist/lib/providers/huggingFace.js +3 -1
- package/dist/lib/providers/mistral.js +3 -1
- package/dist/lib/providers/ollama.js +9 -6
- package/dist/lib/providers/openAI.js +7 -3
- package/dist/lib/proxy/proxyFetch.d.ts +1 -0
- package/dist/lib/proxy/proxyFetch.js +2 -0
- package/dist/providers/anthropic.js +9 -5
- package/dist/providers/azureOpenai.js +3 -1
- package/dist/providers/googleVertex.js +3 -1
- package/dist/providers/huggingFace.js +3 -1
- package/dist/providers/mistral.js +3 -1
- package/dist/providers/ollama.js +9 -6
- package/dist/providers/openAI.js +7 -3
- package/dist/proxy/proxyFetch.d.ts +1 -0
- package/dist/proxy/proxyFetch.js +2 -0
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,9 @@
|
|
|
1
|
+
## [7.17.0](https://github.com/juspay/neurolink/compare/v7.16.0...v7.17.0) (2025-08-19)
|
|
2
|
+
|
|
3
|
+
### Features
|
|
4
|
+
|
|
5
|
+
- **(proxy):** implement comprehensive enterprise proxy support with testing ([0dd124b](https://github.com/juspay/neurolink/commit/0dd124b75826f4581a608e4d62acc05e827cbc1d))
|
|
6
|
+
|
|
1
7
|
## [7.16.0](https://github.com/juspay/neurolink/compare/v7.15.0...v7.16.0) (2025-08-19)
|
|
2
8
|
|
|
3
9
|
### Features
|
package/README.md
CHANGED
|
@@ -62,6 +62,7 @@ npx @juspay/neurolink sagemaker benchmark my-endpoint # Performance testing
|
|
|
62
62
|
- **🏭 Factory Pattern Architecture** - Unified provider management through BaseProvider inheritance
|
|
63
63
|
- **🔧 Tools-First Design** - All providers include built-in tool support without additional configuration
|
|
64
64
|
- **🔗 LiteLLM Integration** - **100+ models** from all major providers through unified interface
|
|
65
|
+
- **🏢 Enterprise Proxy Support** - Comprehensive corporate proxy support with MCP compatibility
|
|
65
66
|
- **🏗️ Enterprise Architecture** - Production-ready with clean abstractions
|
|
66
67
|
- **🔄 Configuration Management** - Flexible provider configuration with automatic backups
|
|
67
68
|
- **✅ Type Safety** - Industry-standard TypeScript interfaces
|
|
@@ -404,6 +405,7 @@ npx @juspay/neurolink generate "Weather in Tokyo now" --provider vertex
|
|
|
404
405
|
- ⚡ **Automatic Fallback** - Never fail when providers are down, intelligent provider switching
|
|
405
406
|
- 🖥️ **CLI + SDK** - Use from command line or integrate programmatically with TypeScript support
|
|
406
407
|
- 🛡️ **Production Ready** - Enterprise-grade error handling, performance optimization, extracted from production
|
|
408
|
+
- 🏢 **Enterprise Proxy Support** - Comprehensive corporate proxy support with zero configuration
|
|
407
409
|
- ✅ **External MCP Integration** - Model Context Protocol with built-in tools + full external MCP server support
|
|
408
410
|
- 🔍 **Smart Model Resolution** - Fuzzy matching, aliases, and capability-based search across all providers
|
|
409
411
|
- 🏠 **Local AI Support** - Run completely offline with Ollama or through LiteLLM proxy
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
2
2
|
import { streamText } from "ai";
|
|
3
3
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
|
6
6
|
import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
7
7
|
import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
9
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
10
|
// Configuration helpers - now using consolidated utility
|
|
10
11
|
const getAnthropicApiKey = () => {
|
|
11
12
|
return validateApiKey(createAnthropicConfig());
|
|
@@ -21,11 +22,14 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
21
22
|
model;
|
|
22
23
|
constructor(modelName, sdk) {
|
|
23
24
|
super(modelName, "anthropic", sdk);
|
|
24
|
-
// Initialize Anthropic model with API key validation
|
|
25
|
+
// Initialize Anthropic model with API key validation and proxy support
|
|
25
26
|
const apiKey = getAnthropicApiKey();
|
|
26
|
-
//
|
|
27
|
-
|
|
28
|
-
|
|
27
|
+
// Create Anthropic instance with proxy fetch
|
|
28
|
+
const anthropic = createAnthropic({
|
|
29
|
+
apiKey: apiKey,
|
|
30
|
+
fetch: createProxyFetch(),
|
|
31
|
+
});
|
|
32
|
+
// Initialize Anthropic model with proxy-aware instance
|
|
29
33
|
this.model = anthropic(this.modelName || getDefaultAnthropicModel());
|
|
30
34
|
logger.debug("Anthropic Provider v2 initialized", {
|
|
31
35
|
modelName: this.modelName,
|
|
@@ -4,6 +4,7 @@ import { BaseProvider } from "../core/baseProvider.js";
|
|
|
4
4
|
import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
|
|
5
5
|
import { logger } from "../utils/logger.js";
|
|
6
6
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
7
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
7
8
|
export class AzureOpenAIProvider extends BaseProvider {
|
|
8
9
|
apiKey;
|
|
9
10
|
resourceName;
|
|
@@ -31,11 +32,12 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
31
32
|
if (!this.resourceName) {
|
|
32
33
|
validateApiKey(createAzureEndpointConfig());
|
|
33
34
|
}
|
|
34
|
-
// Create the Azure provider instance
|
|
35
|
+
// Create the Azure provider instance with proxy support
|
|
35
36
|
this.azureProvider = createAzure({
|
|
36
37
|
resourceName: this.resourceName,
|
|
37
38
|
apiKey: this.apiKey,
|
|
38
39
|
apiVersion: this.apiVersion,
|
|
40
|
+
fetch: createProxyFetch(),
|
|
39
41
|
});
|
|
40
42
|
logger.debug("Azure Vercel Provider initialized", {
|
|
41
43
|
deployment: this.deployment,
|
|
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
|
7
7
|
import { ModelConfigurationManager } from "../core/modelConfiguration.js";
|
|
8
8
|
import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
|
|
9
9
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
10
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
10
11
|
// Cache for anthropic module to avoid repeated imports
|
|
11
12
|
let _createVertexAnthropic = null;
|
|
12
13
|
let _anthropicImportAttempted = false;
|
|
@@ -51,11 +52,12 @@ const hasGoogleCredentials = () => {
|
|
|
51
52
|
(process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
|
|
52
53
|
process.env.GOOGLE_AUTH_PRIVATE_KEY));
|
|
53
54
|
};
|
|
54
|
-
// Enhanced Vertex settings creation with authentication fallback
|
|
55
|
+
// Enhanced Vertex settings creation with authentication fallback and proxy support
|
|
55
56
|
const createVertexSettings = () => {
|
|
56
57
|
const baseSettings = {
|
|
57
58
|
project: getVertexProjectId(),
|
|
58
59
|
location: getVertexLocation(),
|
|
60
|
+
fetch: createProxyFetch(),
|
|
59
61
|
};
|
|
60
62
|
// Check for principal account authentication first (recommended for production)
|
|
61
63
|
if (process.env.GOOGLE_APPLICATION_CREDENTIALS) {
|
|
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
|
6
6
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
7
7
|
import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
9
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
10
|
// Configuration helpers - now using consolidated utility
|
|
10
11
|
const getHuggingFaceApiKey = () => {
|
|
11
12
|
return validateApiKey(createHuggingFaceConfig());
|
|
@@ -24,10 +25,11 @@ export class HuggingFaceProvider extends BaseProvider {
|
|
|
24
25
|
super(modelName, "huggingface");
|
|
25
26
|
// Get API key and validate
|
|
26
27
|
const apiKey = getHuggingFaceApiKey();
|
|
27
|
-
// Create HuggingFace provider using unified router endpoint (2025)
|
|
28
|
+
// Create HuggingFace provider using unified router endpoint (2025) with proxy support
|
|
28
29
|
const huggingface = createOpenAI({
|
|
29
30
|
apiKey: apiKey,
|
|
30
31
|
baseURL: "https://router.huggingface.co/v1",
|
|
32
|
+
fetch: createProxyFetch(),
|
|
31
33
|
});
|
|
32
34
|
// Initialize model
|
|
33
35
|
this.model = huggingface(this.modelName);
|
|
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
|
7
7
|
import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
9
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
10
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
10
11
|
// Configuration helpers - now using consolidated utility
|
|
11
12
|
const getMistralApiKey = () => {
|
|
12
13
|
return validateApiKey(createMistralConfig());
|
|
@@ -26,10 +27,11 @@ export class MistralProvider extends BaseProvider {
|
|
|
26
27
|
? sdk
|
|
27
28
|
: undefined;
|
|
28
29
|
super(modelName, "mistral", validatedNeurolink);
|
|
29
|
-
// Initialize Mistral model with API key validation
|
|
30
|
+
// Initialize Mistral model with API key validation and proxy support
|
|
30
31
|
const apiKey = getMistralApiKey();
|
|
31
32
|
const mistral = createMistral({
|
|
32
33
|
apiKey: apiKey,
|
|
34
|
+
fetch: createProxyFetch(),
|
|
33
35
|
});
|
|
34
36
|
this.model = mistral(this.modelName);
|
|
35
37
|
logger.debug("Mistral Provider v2 initialized", {
|
|
@@ -3,6 +3,7 @@ import { logger } from "../utils/logger.js";
|
|
|
3
3
|
import { TimeoutError } from "../utils/timeout.js";
|
|
4
4
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
5
5
|
import { modelConfig } from "../core/modelConfiguration.js";
|
|
6
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
6
7
|
// Model version constants (configurable via environment)
|
|
7
8
|
const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
|
|
8
9
|
const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
|
|
@@ -26,6 +27,8 @@ const getDefaultOllamaModel = () => {
|
|
|
26
27
|
const getOllamaTimeout = () => {
|
|
27
28
|
return parseInt(process.env.OLLAMA_TIMEOUT || "60000", 10);
|
|
28
29
|
};
|
|
30
|
+
// Create proxy-aware fetch instance
|
|
31
|
+
const proxyFetch = createProxyFetch();
|
|
29
32
|
// Custom LanguageModelV1 implementation for Ollama
|
|
30
33
|
class OllamaLanguageModel {
|
|
31
34
|
specificationVersion = "v1";
|
|
@@ -61,7 +64,7 @@ class OllamaLanguageModel {
|
|
|
61
64
|
// Debug: Log what's being sent to Ollama
|
|
62
65
|
logger.debug("[OllamaLanguageModel] Messages:", JSON.stringify(messages, null, 2));
|
|
63
66
|
logger.debug("[OllamaLanguageModel] Converted Prompt:", JSON.stringify(prompt));
|
|
64
|
-
const response = await
|
|
67
|
+
const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
|
|
65
68
|
method: "POST",
|
|
66
69
|
headers: { "Content-Type": "application/json" },
|
|
67
70
|
body: JSON.stringify({
|
|
@@ -108,7 +111,7 @@ class OllamaLanguageModel {
|
|
|
108
111
|
const messages = options
|
|
109
112
|
.messages || [];
|
|
110
113
|
const prompt = this.convertMessagesToPrompt(messages);
|
|
111
|
-
const response = await
|
|
114
|
+
const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
|
|
112
115
|
method: "POST",
|
|
113
116
|
headers: { "Content-Type": "application/json" },
|
|
114
117
|
body: JSON.stringify({
|
|
@@ -325,7 +328,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
325
328
|
: []),
|
|
326
329
|
{ role: "user", content: options.input.text },
|
|
327
330
|
];
|
|
328
|
-
const response = await
|
|
331
|
+
const response = await proxyFetch(`${this.baseUrl}/v1/chat/completions`, {
|
|
329
332
|
method: "POST",
|
|
330
333
|
headers: { "Content-Type": "application/json" },
|
|
331
334
|
body: JSON.stringify({
|
|
@@ -366,7 +369,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
366
369
|
* Fallback for non-tool scenarios or when chat API is unavailable
|
|
367
370
|
*/
|
|
368
371
|
async executeStreamWithoutTools(options, analysisSchema) {
|
|
369
|
-
const response = await
|
|
372
|
+
const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
|
|
370
373
|
method: "POST",
|
|
371
374
|
headers: { "Content-Type": "application/json" },
|
|
372
375
|
body: JSON.stringify({
|
|
@@ -568,7 +571,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
568
571
|
// Use traditional AbortController for better compatibility
|
|
569
572
|
const controller = new AbortController();
|
|
570
573
|
const timeoutId = setTimeout(() => controller.abort(), 5000);
|
|
571
|
-
const response = await
|
|
574
|
+
const response = await proxyFetch(`${this.baseUrl}/api/version`, {
|
|
572
575
|
method: "GET",
|
|
573
576
|
signal: controller.signal,
|
|
574
577
|
});
|
|
@@ -589,7 +592,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
589
592
|
*/
|
|
590
593
|
async getAvailableModels() {
|
|
591
594
|
try {
|
|
592
|
-
const response = await
|
|
595
|
+
const response = await proxyFetch(`${this.baseUrl}/api/tags`);
|
|
593
596
|
if (!response.ok) {
|
|
594
597
|
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
595
598
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
2
2
|
import { streamText } from "ai";
|
|
3
3
|
import { AIProviderName } from "../core/types.js";
|
|
4
4
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
@@ -8,6 +8,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
|
8
8
|
import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
9
9
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
10
10
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
11
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
11
12
|
// Configuration helpers - now using consolidated utility
|
|
12
13
|
const getOpenAIApiKey = () => {
|
|
13
14
|
return validateApiKey(createOpenAIConfig());
|
|
@@ -23,8 +24,11 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
23
24
|
model;
|
|
24
25
|
constructor(modelName, neurolink) {
|
|
25
26
|
super(modelName || getOpenAIModel(), AIProviderName.OPENAI, neurolink);
|
|
26
|
-
//
|
|
27
|
-
|
|
27
|
+
// Initialize OpenAI provider with proxy support
|
|
28
|
+
const openai = createOpenAI({
|
|
29
|
+
apiKey: getOpenAIApiKey(),
|
|
30
|
+
fetch: createProxyFetch(),
|
|
31
|
+
});
|
|
28
32
|
// Initialize model
|
|
29
33
|
this.model = openai(this.modelName);
|
|
30
34
|
logger.debug("OpenAIProviderV2 initialized", {
|
|
@@ -57,10 +57,12 @@ export function createProxyFetch() {
|
|
|
57
57
|
export function getProxyStatus() {
|
|
58
58
|
const httpsProxy = process.env.HTTPS_PROXY || process.env.https_proxy;
|
|
59
59
|
const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy;
|
|
60
|
+
const noProxy = process.env.NO_PROXY || process.env.no_proxy;
|
|
60
61
|
return {
|
|
61
62
|
enabled: !!(httpsProxy || httpProxy),
|
|
62
63
|
httpProxy: httpProxy || null,
|
|
63
64
|
httpsProxy: httpsProxy || null,
|
|
65
|
+
noProxy: noProxy || null,
|
|
64
66
|
method: "undici-proxy-agent",
|
|
65
67
|
};
|
|
66
68
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { createAnthropic } from "@ai-sdk/anthropic";
|
|
2
2
|
import { streamText } from "ai";
|
|
3
3
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
|
|
|
6
6
|
import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
7
7
|
import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
9
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
10
|
// Configuration helpers - now using consolidated utility
|
|
10
11
|
const getAnthropicApiKey = () => {
|
|
11
12
|
return validateApiKey(createAnthropicConfig());
|
|
@@ -21,11 +22,14 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
21
22
|
model;
|
|
22
23
|
constructor(modelName, sdk) {
|
|
23
24
|
super(modelName, "anthropic", sdk);
|
|
24
|
-
// Initialize Anthropic model with API key validation
|
|
25
|
+
// Initialize Anthropic model with API key validation and proxy support
|
|
25
26
|
const apiKey = getAnthropicApiKey();
|
|
26
|
-
//
|
|
27
|
-
|
|
28
|
-
|
|
27
|
+
// Create Anthropic instance with proxy fetch
|
|
28
|
+
const anthropic = createAnthropic({
|
|
29
|
+
apiKey: apiKey,
|
|
30
|
+
fetch: createProxyFetch(),
|
|
31
|
+
});
|
|
32
|
+
// Initialize Anthropic model with proxy-aware instance
|
|
29
33
|
this.model = anthropic(this.modelName || getDefaultAnthropicModel());
|
|
30
34
|
logger.debug("Anthropic Provider v2 initialized", {
|
|
31
35
|
modelName: this.modelName,
|
|
@@ -4,6 +4,7 @@ import { BaseProvider } from "../core/baseProvider.js";
|
|
|
4
4
|
import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
|
|
5
5
|
import { logger } from "../utils/logger.js";
|
|
6
6
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
7
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
7
8
|
export class AzureOpenAIProvider extends BaseProvider {
|
|
8
9
|
apiKey;
|
|
9
10
|
resourceName;
|
|
@@ -31,11 +32,12 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
31
32
|
if (!this.resourceName) {
|
|
32
33
|
validateApiKey(createAzureEndpointConfig());
|
|
33
34
|
}
|
|
34
|
-
// Create the Azure provider instance
|
|
35
|
+
// Create the Azure provider instance with proxy support
|
|
35
36
|
this.azureProvider = createAzure({
|
|
36
37
|
resourceName: this.resourceName,
|
|
37
38
|
apiKey: this.apiKey,
|
|
38
39
|
apiVersion: this.apiVersion,
|
|
40
|
+
fetch: createProxyFetch(),
|
|
39
41
|
});
|
|
40
42
|
logger.debug("Azure Vercel Provider initialized", {
|
|
41
43
|
deployment: this.deployment,
|
|
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
|
7
7
|
import { ModelConfigurationManager } from "../core/modelConfiguration.js";
|
|
8
8
|
import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
|
|
9
9
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
10
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
10
11
|
// Cache for anthropic module to avoid repeated imports
|
|
11
12
|
let _createVertexAnthropic = null;
|
|
12
13
|
let _anthropicImportAttempted = false;
|
|
@@ -51,11 +52,12 @@ const hasGoogleCredentials = () => {
|
|
|
51
52
|
(process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
|
|
52
53
|
process.env.GOOGLE_AUTH_PRIVATE_KEY));
|
|
53
54
|
};
|
|
54
|
-
// Enhanced Vertex settings creation with authentication fallback
|
|
55
|
+
// Enhanced Vertex settings creation with authentication fallback and proxy support
|
|
55
56
|
const createVertexSettings = () => {
|
|
56
57
|
const baseSettings = {
|
|
57
58
|
project: getVertexProjectId(),
|
|
58
59
|
location: getVertexLocation(),
|
|
60
|
+
fetch: createProxyFetch(),
|
|
59
61
|
};
|
|
60
62
|
// Check for principal account authentication first (recommended for production)
|
|
61
63
|
if (process.env.GOOGLE_APPLICATION_CREDENTIALS) {
|
|
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
|
6
6
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
7
7
|
import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
9
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
10
|
// Configuration helpers - now using consolidated utility
|
|
10
11
|
const getHuggingFaceApiKey = () => {
|
|
11
12
|
return validateApiKey(createHuggingFaceConfig());
|
|
@@ -24,10 +25,11 @@ export class HuggingFaceProvider extends BaseProvider {
|
|
|
24
25
|
super(modelName, "huggingface");
|
|
25
26
|
// Get API key and validate
|
|
26
27
|
const apiKey = getHuggingFaceApiKey();
|
|
27
|
-
// Create HuggingFace provider using unified router endpoint (2025)
|
|
28
|
+
// Create HuggingFace provider using unified router endpoint (2025) with proxy support
|
|
28
29
|
const huggingface = createOpenAI({
|
|
29
30
|
apiKey: apiKey,
|
|
30
31
|
baseURL: "https://router.huggingface.co/v1",
|
|
32
|
+
fetch: createProxyFetch(),
|
|
31
33
|
});
|
|
32
34
|
// Initialize model
|
|
33
35
|
this.model = huggingface(this.modelName);
|
|
@@ -7,6 +7,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
|
7
7
|
import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
9
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
10
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
10
11
|
// Configuration helpers - now using consolidated utility
|
|
11
12
|
const getMistralApiKey = () => {
|
|
12
13
|
return validateApiKey(createMistralConfig());
|
|
@@ -26,10 +27,11 @@ export class MistralProvider extends BaseProvider {
|
|
|
26
27
|
? sdk
|
|
27
28
|
: undefined;
|
|
28
29
|
super(modelName, "mistral", validatedNeurolink);
|
|
29
|
-
// Initialize Mistral model with API key validation
|
|
30
|
+
// Initialize Mistral model with API key validation and proxy support
|
|
30
31
|
const apiKey = getMistralApiKey();
|
|
31
32
|
const mistral = createMistral({
|
|
32
33
|
apiKey: apiKey,
|
|
34
|
+
fetch: createProxyFetch(),
|
|
33
35
|
});
|
|
34
36
|
this.model = mistral(this.modelName);
|
|
35
37
|
logger.debug("Mistral Provider v2 initialized", {
|
package/dist/providers/ollama.js
CHANGED
|
@@ -3,6 +3,7 @@ import { logger } from "../utils/logger.js";
|
|
|
3
3
|
import { TimeoutError } from "../utils/timeout.js";
|
|
4
4
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
5
5
|
import { modelConfig } from "../core/modelConfiguration.js";
|
|
6
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
6
7
|
// Model version constants (configurable via environment)
|
|
7
8
|
const DEFAULT_OLLAMA_MODEL = "llama3.1:8b";
|
|
8
9
|
const FALLBACK_OLLAMA_MODEL = "llama3.2:latest"; // Used when primary model fails
|
|
@@ -26,6 +27,8 @@ const getDefaultOllamaModel = () => {
|
|
|
26
27
|
const getOllamaTimeout = () => {
|
|
27
28
|
return parseInt(process.env.OLLAMA_TIMEOUT || "60000", 10);
|
|
28
29
|
};
|
|
30
|
+
// Create proxy-aware fetch instance
|
|
31
|
+
const proxyFetch = createProxyFetch();
|
|
29
32
|
// Custom LanguageModelV1 implementation for Ollama
|
|
30
33
|
class OllamaLanguageModel {
|
|
31
34
|
specificationVersion = "v1";
|
|
@@ -61,7 +64,7 @@ class OllamaLanguageModel {
|
|
|
61
64
|
// Debug: Log what's being sent to Ollama
|
|
62
65
|
logger.debug("[OllamaLanguageModel] Messages:", JSON.stringify(messages, null, 2));
|
|
63
66
|
logger.debug("[OllamaLanguageModel] Converted Prompt:", JSON.stringify(prompt));
|
|
64
|
-
const response = await
|
|
67
|
+
const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
|
|
65
68
|
method: "POST",
|
|
66
69
|
headers: { "Content-Type": "application/json" },
|
|
67
70
|
body: JSON.stringify({
|
|
@@ -108,7 +111,7 @@ class OllamaLanguageModel {
|
|
|
108
111
|
const messages = options
|
|
109
112
|
.messages || [];
|
|
110
113
|
const prompt = this.convertMessagesToPrompt(messages);
|
|
111
|
-
const response = await
|
|
114
|
+
const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
|
|
112
115
|
method: "POST",
|
|
113
116
|
headers: { "Content-Type": "application/json" },
|
|
114
117
|
body: JSON.stringify({
|
|
@@ -325,7 +328,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
325
328
|
: []),
|
|
326
329
|
{ role: "user", content: options.input.text },
|
|
327
330
|
];
|
|
328
|
-
const response = await
|
|
331
|
+
const response = await proxyFetch(`${this.baseUrl}/v1/chat/completions`, {
|
|
329
332
|
method: "POST",
|
|
330
333
|
headers: { "Content-Type": "application/json" },
|
|
331
334
|
body: JSON.stringify({
|
|
@@ -366,7 +369,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
366
369
|
* Fallback for non-tool scenarios or when chat API is unavailable
|
|
367
370
|
*/
|
|
368
371
|
async executeStreamWithoutTools(options, analysisSchema) {
|
|
369
|
-
const response = await
|
|
372
|
+
const response = await proxyFetch(`${this.baseUrl}/api/generate`, {
|
|
370
373
|
method: "POST",
|
|
371
374
|
headers: { "Content-Type": "application/json" },
|
|
372
375
|
body: JSON.stringify({
|
|
@@ -568,7 +571,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
568
571
|
// Use traditional AbortController for better compatibility
|
|
569
572
|
const controller = new AbortController();
|
|
570
573
|
const timeoutId = setTimeout(() => controller.abort(), 5000);
|
|
571
|
-
const response = await
|
|
574
|
+
const response = await proxyFetch(`${this.baseUrl}/api/version`, {
|
|
572
575
|
method: "GET",
|
|
573
576
|
signal: controller.signal,
|
|
574
577
|
});
|
|
@@ -589,7 +592,7 @@ export class OllamaProvider extends BaseProvider {
|
|
|
589
592
|
*/
|
|
590
593
|
async getAvailableModels() {
|
|
591
594
|
try {
|
|
592
|
-
const response = await
|
|
595
|
+
const response = await proxyFetch(`${this.baseUrl}/api/tags`);
|
|
593
596
|
if (!response.ok) {
|
|
594
597
|
throw new Error(`Failed to fetch models: ${response.status}`);
|
|
595
598
|
}
|
package/dist/providers/openAI.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
2
2
|
import { streamText } from "ai";
|
|
3
3
|
import { AIProviderName } from "../core/types.js";
|
|
4
4
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
@@ -8,6 +8,7 @@ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
|
8
8
|
import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
9
9
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
10
10
|
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
11
|
+
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
11
12
|
// Configuration helpers - now using consolidated utility
|
|
12
13
|
const getOpenAIApiKey = () => {
|
|
13
14
|
return validateApiKey(createOpenAIConfig());
|
|
@@ -23,8 +24,11 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
23
24
|
model;
|
|
24
25
|
constructor(modelName, neurolink) {
|
|
25
26
|
super(modelName || getOpenAIModel(), AIProviderName.OPENAI, neurolink);
|
|
26
|
-
//
|
|
27
|
-
|
|
27
|
+
// Initialize OpenAI provider with proxy support
|
|
28
|
+
const openai = createOpenAI({
|
|
29
|
+
apiKey: getOpenAIApiKey(),
|
|
30
|
+
fetch: createProxyFetch(),
|
|
31
|
+
});
|
|
28
32
|
// Initialize model
|
|
29
33
|
this.model = openai(this.modelName);
|
|
30
34
|
logger.debug("OpenAIProviderV2 initialized", {
|
package/dist/proxy/proxyFetch.js
CHANGED
|
@@ -57,10 +57,12 @@ export function createProxyFetch() {
|
|
|
57
57
|
export function getProxyStatus() {
|
|
58
58
|
const httpsProxy = process.env.HTTPS_PROXY || process.env.https_proxy;
|
|
59
59
|
const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy;
|
|
60
|
+
const noProxy = process.env.NO_PROXY || process.env.no_proxy;
|
|
60
61
|
return {
|
|
61
62
|
enabled: !!(httpsProxy || httpProxy),
|
|
62
63
|
httpProxy: httpProxy || null,
|
|
63
64
|
httpsProxy: httpsProxy || null,
|
|
65
|
+
noProxy: noProxy || null,
|
|
64
66
|
method: "undici-proxy-agent",
|
|
65
67
|
};
|
|
66
68
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "7.
|
|
3
|
+
"version": "7.17.0",
|
|
4
4
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|