@juspay/neurolink 7.27.0 → 7.28.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/cli/commands/config.d.ts +3 -3
  3. package/dist/cli/commands/ollama.d.ts +3 -0
  4. package/dist/cli/commands/ollama.js +288 -0
  5. package/dist/cli/factories/ollamaCommandFactory.d.ts +4 -0
  6. package/dist/cli/factories/ollamaCommandFactory.js +86 -93
  7. package/dist/cli/utils/ollamaUtils.d.ts +24 -0
  8. package/dist/cli/utils/ollamaUtils.js +161 -0
  9. package/dist/lib/mcp/toolDiscoveryService.js +1 -1
  10. package/dist/lib/neurolink.js +1099 -56
  11. package/dist/lib/providers/amazonBedrock.d.ts +2 -2
  12. package/dist/lib/providers/amazonBedrock.js +16 -7
  13. package/dist/lib/providers/googleVertex.d.ts +28 -3
  14. package/dist/lib/providers/googleVertex.js +1132 -84
  15. package/dist/lib/providers/litellm.d.ts +1 -1
  16. package/dist/lib/providers/litellm.js +7 -4
  17. package/dist/lib/providers/openaiCompatible.d.ts +1 -1
  18. package/dist/lib/providers/openaiCompatible.js +7 -4
  19. package/dist/lib/proxy/proxyFetch.js +124 -2
  20. package/dist/lib/utils/providerHealth.d.ts +57 -1
  21. package/dist/lib/utils/providerHealth.js +638 -33
  22. package/dist/lib/utils/transformationUtils.js +3 -3
  23. package/dist/mcp/toolDiscoveryService.js +1 -1
  24. package/dist/neurolink.js +1099 -56
  25. package/dist/providers/amazonBedrock.d.ts +2 -2
  26. package/dist/providers/amazonBedrock.js +16 -7
  27. package/dist/providers/googleVertex.d.ts +28 -3
  28. package/dist/providers/googleVertex.js +1132 -84
  29. package/dist/providers/litellm.d.ts +1 -1
  30. package/dist/providers/litellm.js +7 -4
  31. package/dist/providers/openaiCompatible.d.ts +1 -1
  32. package/dist/providers/openaiCompatible.js +7 -4
  33. package/dist/proxy/proxyFetch.js +124 -2
  34. package/dist/utils/providerHealth.d.ts +57 -1
  35. package/dist/utils/providerHealth.js +638 -33
  36. package/dist/utils/transformationUtils.js +3 -3
  37. package/package.json +1 -1
@@ -28,7 +28,7 @@ export declare class LiteLLMProvider extends BaseProvider {
28
28
  * Provider-specific streaming implementation
29
29
  * Note: This is only used when tools are disabled
30
30
  */
31
- protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
31
+ protected executeStream(options: StreamOptions, _analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
32
32
  /**
33
33
  * Get available models from LiteLLM proxy server
34
34
  * Dynamically fetches from /v1/models endpoint with caching and fallback
@@ -2,11 +2,12 @@ import { createOpenAI } from "@ai-sdk/openai";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
- import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
5
+ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
7
  import { getProviderModel } from "../utils/providerConfig.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
9
  import { buildMessagesArray } from "../utils/messageBuilder.js";
10
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
10
11
  // Configuration helpers
11
12
  const getLiteLLMConfig = () => {
12
13
  return {
@@ -51,6 +52,7 @@ export class LiteLLMProvider extends BaseProvider {
51
52
  const customOpenAI = createOpenAI({
52
53
  baseURL: config.baseURL,
53
54
  apiKey: config.apiKey,
55
+ fetch: createProxyFetch(),
54
56
  });
55
57
  this.model = customOpenAI(this.modelName || getDefaultLiteLLMModel());
56
58
  logger.debug("LiteLLM Provider initialized", {
@@ -113,7 +115,7 @@ export class LiteLLMProvider extends BaseProvider {
113
115
  * Provider-specific streaming implementation
114
116
  * Note: This is only used when tools are disabled
115
117
  */
116
- async executeStream(options, analysisSchema) {
118
+ async executeStream(options, _analysisSchema) {
117
119
  this.validateStreamOptions(options);
118
120
  const startTime = Date.now();
119
121
  const timeout = this.getTimeout(options);
@@ -121,7 +123,7 @@ export class LiteLLMProvider extends BaseProvider {
121
123
  try {
122
124
  // Build message array from options
123
125
  const messages = buildMessagesArray(options);
124
- const result = await streamText({
126
+ const result = streamText({
125
127
  model: this.model,
126
128
  messages: messages,
127
129
  temperature: options.temperature,
@@ -217,7 +219,8 @@ export class LiteLLMProvider extends BaseProvider {
217
219
  const timeoutId = setTimeout(() => controller.abort(), 5000); // 5 second timeout
218
220
  try {
219
221
  logger.debug(`[${functionTag}] Fetching models from ${modelsUrl}`);
220
- const response = await fetch(modelsUrl, {
222
+ const proxyFetch = createProxyFetch();
223
+ const response = await proxyFetch(modelsUrl, {
221
224
  method: "GET",
222
225
  headers: {
223
226
  Authorization: `Bearer ${config.apiKey}`,
@@ -29,7 +29,7 @@ export declare class OpenAICompatibleProvider extends BaseProvider {
29
29
  * Provider-specific streaming implementation
30
30
  * Note: This is only used when tools are disabled
31
31
  */
32
- protected executeStream(options: StreamOptions, analysisSchema?: ZodUnknownSchema | Schema<unknown>): Promise<StreamResult>;
32
+ protected executeStream(options: StreamOptions, _analysisSchema?: ZodUnknownSchema | Schema<unknown>): Promise<StreamResult>;
33
33
  /**
34
34
  * Get available models from OpenAI Compatible endpoint
35
35
  *
@@ -2,9 +2,10 @@ import { createOpenAI } from "@ai-sdk/openai";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
- import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
5
+ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
7
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
8
9
  // Constants
9
10
  const FALLBACK_OPENAI_COMPATIBLE_MODEL = "gpt-3.5-turbo";
10
11
  // Configuration helpers
@@ -51,6 +52,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
51
52
  this.customOpenAI = createOpenAI({
52
53
  baseURL: this.config.baseURL,
53
54
  apiKey: this.config.apiKey,
55
+ fetch: createProxyFetch(),
54
56
  });
55
57
  logger.debug("OpenAI Compatible Provider initialized", {
56
58
  modelName: this.modelName,
@@ -149,14 +151,14 @@ export class OpenAICompatibleProvider extends BaseProvider {
149
151
  * Provider-specific streaming implementation
150
152
  * Note: This is only used when tools are disabled
151
153
  */
152
- async executeStream(options, analysisSchema) {
154
+ async executeStream(options, _analysisSchema) {
153
155
  this.validateStreamOptions(options);
154
156
  const startTime = Date.now();
155
157
  const timeout = this.getTimeout(options);
156
158
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
157
159
  try {
158
160
  const model = await this.getAISDKModel();
159
- const result = await streamText({
161
+ const result = streamText({
160
162
  model,
161
163
  prompt: options.input.text,
162
164
  system: options.systemPrompt,
@@ -204,7 +206,8 @@ export class OpenAICompatibleProvider extends BaseProvider {
204
206
  try {
205
207
  const modelsUrl = new URL("/v1/models", this.config.baseURL).toString();
206
208
  logger.debug(`Fetching available models from: ${modelsUrl}`);
207
- const response = await fetch(modelsUrl, {
209
+ const proxyFetch = createProxyFetch();
210
+ const response = await proxyFetch(modelsUrl, {
208
211
  headers: {
209
212
  Authorization: `Bearer ${this.config.apiKey}`,
210
213
  "Content-Type": "application/json",
@@ -10,6 +10,28 @@ import { logger } from "../utils/logger.js";
10
10
  export function createProxyFetch() {
11
11
  const httpsProxy = process.env.HTTPS_PROXY || process.env.https_proxy;
12
12
  const httpProxy = process.env.HTTP_PROXY || process.env.http_proxy;
13
+ // EXHAUSTIVE LOGGING: Capture ALL proxy-related environment variables BEFORE configuration
14
+ logger.debug("[Proxy Fetch] 🔍 EXHAUSTIVE_PROXY_ENV_BEFORE", {
15
+ // Original proxy config function calls
16
+ proxyHostFunction: "getProxyHost equivalent",
17
+ proxyPortFunction: "getProxyPort equivalent",
18
+ // Raw environment variables BEFORE any changes
19
+ originalHttpProxy: process.env.HTTP_PROXY || "NOT_SET",
20
+ originalHttpsProxy: process.env.HTTPS_PROXY || "NOT_SET",
21
+ originalAllProxy: process.env.ALL_PROXY || "NOT_SET",
22
+ originalNoProxy: process.env.NO_PROXY || "NOT_SET",
23
+ // Node.js specific proxy variables
24
+ originalNodejsHttpProxy: process.env.nodejs_http_proxy || "NOT_SET",
25
+ originalNodejsHttpsProxy: process.env.nodejs_https_proxy || "NOT_SET",
26
+ // All potential proxy-related environment variables
27
+ allProxyRelatedEnvVars: Object.keys(process.env)
28
+ .filter((key) => key.toLowerCase().includes("proxy"))
29
+ .reduce((acc, key) => {
30
+ acc[key] = process.env[key] || "NOT_SET";
31
+ return acc;
32
+ }, {}),
33
+ message: "EXHAUSTIVE proxy environment capture BEFORE configuration",
34
+ });
13
35
  // If no proxy configured, return standard fetch
14
36
  if (!httpsProxy && !httpProxy) {
15
37
  logger.debug("[Proxy Fetch] No proxy environment variables found - using standard fetch");
@@ -20,34 +42,134 @@ export function createProxyFetch() {
20
42
  logger.debug(`[Proxy Fetch] HTTPS_PROXY: ${httpsProxy || "not set"}`);
21
43
  // Return proxy-aware fetch function
22
44
  return async (input, init) => {
45
+ const requestId = `req-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
46
+ logger.debug(`[Proxy Fetch] 🚀 EXHAUSTIVE REQUEST START`, {
47
+ requestId,
48
+ input: typeof input === "string"
49
+ ? input
50
+ : input instanceof URL
51
+ ? input.href
52
+ : input.url,
53
+ timestamp: new Date().toISOString(),
54
+ httpProxy: httpProxy || "NOT_SET",
55
+ httpsProxy: httpsProxy || "NOT_SET",
56
+ initHeaders: init?.headers || "NO_HEADERS",
57
+ initMethod: init?.method || "GET",
58
+ });
23
59
  try {
24
60
  // Dynamic import undici to avoid build issues
25
61
  const undici = await import("undici");
26
62
  const { ProxyAgent } = undici;
63
+ logger.debug(`[Proxy Fetch] 🔧 EXHAUSTIVE UNDICI IMPORT SUCCESS`, {
64
+ requestId,
65
+ hasUndici: !!undici,
66
+ hasProxyAgent: !!ProxyAgent,
67
+ undiciType: typeof undici,
68
+ proxyAgentType: typeof ProxyAgent,
69
+ timestamp: new Date().toISOString(),
70
+ });
27
71
  const url = typeof input === "string"
28
72
  ? new URL(input)
29
73
  : input instanceof URL
30
74
  ? input
31
75
  : new URL(input.url);
32
76
  const proxyUrl = url.protocol === "https:" ? httpsProxy : httpProxy;
77
+ logger.debug(`[Proxy Fetch] 🔗 EXHAUSTIVE URL ANALYSIS`, {
78
+ requestId,
79
+ urlString: url.href,
80
+ urlHostname: url.hostname,
81
+ urlProtocol: url.protocol,
82
+ urlPort: url.port,
83
+ urlPathname: url.pathname,
84
+ selectedProxyUrl: proxyUrl,
85
+ timestamp: new Date().toISOString(),
86
+ });
33
87
  if (proxyUrl) {
34
88
  logger.debug(`[Proxy Fetch] Creating ProxyAgent for ${url.hostname} via ${proxyUrl}`);
89
+ logger.debug(`[Proxy Fetch] 🎯 EXHAUSTIVE PROXY AGENT CREATION`, {
90
+ requestId,
91
+ proxyUrl,
92
+ targetHostname: url.hostname,
93
+ targetProtocol: url.protocol,
94
+ aboutToCreateProxyAgent: true,
95
+ timestamp: new Date().toISOString(),
96
+ });
35
97
  // Create ProxyAgent
36
98
  const dispatcher = new ProxyAgent(proxyUrl);
99
+ logger.debug(`[Proxy Fetch] ✅ EXHAUSTIVE PROXY AGENT CREATED`, {
100
+ requestId,
101
+ hasDispatcher: !!dispatcher,
102
+ dispatcherType: typeof dispatcher,
103
+ dispatcherConstructor: dispatcher?.constructor?.name || "unknown",
104
+ timestamp: new Date().toISOString(),
105
+ });
106
+ logger.debug(`[Proxy Fetch] 🌐 EXHAUSTIVE UNDICI FETCH CALL`, {
107
+ requestId,
108
+ aboutToCallUndici: true,
109
+ inputType: typeof input,
110
+ hasInit: !!init,
111
+ hasDispatcher: !!dispatcher,
112
+ timestamp: new Date().toISOString(),
113
+ });
37
114
  // Use undici fetch with dispatcher
38
- const response = await undici.fetch(input, {
39
- ...init,
115
+ // Handle Request objects by extracting URL and merging properties
116
+ let fetchInput;
117
+ let fetchInit = { ...init };
118
+ if (input instanceof Request) {
119
+ fetchInput = input.url;
120
+ // Merge Request properties into init
121
+ fetchInit = {
122
+ method: input.method,
123
+ headers: input.headers,
124
+ body: input.body,
125
+ ...init, // Allow init to override Request properties
126
+ };
127
+ }
128
+ else {
129
+ fetchInput = input;
130
+ }
131
+ const response = await undici.fetch(fetchInput, {
132
+ ...fetchInit,
40
133
  dispatcher: dispatcher,
41
134
  });
135
+ logger.debug(`[Proxy Fetch] 🎉 EXHAUSTIVE UNDICI FETCH SUCCESS`, {
136
+ requestId,
137
+ hasResponse: !!response,
138
+ responseStatus: response?.status,
139
+ responseStatusText: response?.statusText,
140
+ responseHeaders: response?.headers
141
+ ? Object.fromEntries(response.headers.entries())
142
+ : "NO_HEADERS",
143
+ responseOk: response?.ok,
144
+ responseType: response?.type,
145
+ responseUrl: response?.url,
146
+ timestamp: new Date().toISOString(),
147
+ });
42
148
  logger.debug(`[Proxy Fetch] ✅ Request proxied successfully to ${url.hostname}`);
43
149
  return response; // undici.fetch returns compatible Response type
44
150
  }
45
151
  }
46
152
  catch (error) {
47
153
  const errorMessage = error instanceof Error ? error.message : String(error);
154
+ logger.debug(`[Proxy Fetch] 💥 EXHAUSTIVE ERROR ANALYSIS`, {
155
+ requestId,
156
+ error: errorMessage,
157
+ errorType: error instanceof Error ? error.constructor.name : typeof error,
158
+ errorStack: error instanceof Error ? error.stack : undefined,
159
+ errorCode: error?.code || "NO_CODE",
160
+ errorSyscall: error?.syscall || "NO_SYSCALL",
161
+ errorAddress: error?.address || "NO_ADDRESS",
162
+ errorPort: error?.port || "NO_PORT",
163
+ timestamp: new Date().toISOString(),
164
+ });
48
165
  logger.warn(`[Proxy Fetch] Proxy failed (${errorMessage}), falling back to direct connection`);
49
166
  }
50
167
  // Fallback to standard fetch
168
+ logger.debug(`[Proxy Fetch] 🔄 EXHAUSTIVE FALLBACK TO STANDARD FETCH`, {
169
+ requestId,
170
+ fallbackReason: "Either no proxy URL or error occurred",
171
+ timestamp: new Date().toISOString(),
172
+ });
51
173
  return fetch(input, init);
52
174
  };
53
175
  }
@@ -80,13 +80,69 @@ export declare class ProviderHealthChecker {
80
80
  * Get cached health status if still valid
81
81
  */
82
82
  private static getCachedHealth;
83
+ /**
84
+ * Check if Vertex AI supports Anthropic models (dual provider architecture)
85
+ */
86
+ static checkVertexAnthropicSupport(): Promise<{
87
+ isSupported: boolean;
88
+ hasCreateVertexAnthropic: boolean;
89
+ hasCorrectTypes: boolean;
90
+ hasValidProject: boolean;
91
+ hasRegionalSupport: boolean;
92
+ hasNetworkAccess: boolean;
93
+ hasAnthropicModels: boolean;
94
+ authentication: {
95
+ isValid: boolean;
96
+ method: string;
97
+ issues: string[];
98
+ };
99
+ projectConfiguration: {
100
+ isValid: boolean;
101
+ projectId: string | undefined;
102
+ region: string | undefined;
103
+ issues: string[];
104
+ };
105
+ modelSupport: {
106
+ availableModels: string[];
107
+ recommendedModels: string[];
108
+ deprecatedModels: string[];
109
+ };
110
+ recommendations: string[];
111
+ troubleshooting: string[];
112
+ }>;
113
+ /**
114
+ * Validate Vertex AI authentication configuration
115
+ */
116
+ private static validateVertexAuthentication;
117
+ /**
118
+ * Validate Vertex AI project configuration
119
+ */
120
+ private static validateVertexProjectConfiguration;
121
+ /**
122
+ * Check if the specified region supports Anthropic models
123
+ */
124
+ private static checkVertexRegionalSupport;
125
+ /**
126
+ * Check network connectivity to Vertex AI endpoints
127
+ */
128
+ private static checkVertexNetworkConnectivity;
129
+ /**
130
+ * Check if Anthropic model integration is enabled in the project
131
+ */
132
+ private static checkAnthropicModelIntegration;
133
+ /**
134
+ * Initialize health checks in the background (NON-BLOCKING)
135
+ * Starts background health monitoring without blocking initialization
136
+ */
137
+ static initializeBackgroundHealthChecks(): void;
83
138
  /**
84
139
  * Clear health cache for a provider or all providers
85
140
  */
86
141
  static clearHealthCache(providerName?: AIProviderName): void;
87
142
  /**
88
- * Get the best healthy provider from a list of options
143
+ * Get the best healthy provider from a list of options (NON-BLOCKING)
89
144
  * Prioritizes healthy providers over configured but unhealthy ones
145
+ * Uses fast, cached health checks to avoid blocking initialization
90
146
  */
91
147
  static getBestHealthyProvider(preferredProviders?: string[]): Promise<string | null>;
92
148
  /**