@juspay/neurolink 7.33.3 → 7.33.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/cli/commands/config.d.ts +3 -4
  3. package/dist/cli/commands/config.js +2 -3
  4. package/dist/core/baseProvider.js +26 -1
  5. package/dist/core/constants.d.ts +12 -3
  6. package/dist/core/constants.js +22 -6
  7. package/dist/core/factory.js +19 -0
  8. package/dist/factories/providerRegistry.js +2 -0
  9. package/dist/lib/core/baseProvider.js +26 -1
  10. package/dist/lib/core/constants.d.ts +12 -3
  11. package/dist/lib/core/constants.js +22 -6
  12. package/dist/lib/core/factory.js +19 -0
  13. package/dist/lib/factories/providerRegistry.js +2 -0
  14. package/dist/lib/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  15. package/dist/lib/providers/amazonBedrock.js +2 -2
  16. package/dist/lib/providers/anthropic.js +3 -12
  17. package/dist/lib/providers/anthropicBaseProvider.js +1 -2
  18. package/dist/lib/providers/azureOpenai.js +49 -8
  19. package/dist/lib/providers/googleAiStudio.js +3 -3
  20. package/dist/lib/providers/googleVertex.js +2 -2
  21. package/dist/lib/providers/huggingFace.js +1 -2
  22. package/dist/lib/providers/litellm.js +1 -2
  23. package/dist/lib/providers/mistral.js +2 -2
  24. package/dist/lib/providers/ollama.js +7 -8
  25. package/dist/lib/providers/openAI.js +2 -2
  26. package/dist/lib/providers/openaiCompatible.js +5 -2
  27. package/dist/lib/providers/sagemaker/language-model.d.ts +5 -0
  28. package/dist/lib/providers/sagemaker/language-model.js +9 -1
  29. package/dist/lib/utils/providerHealth.js +7 -3
  30. package/dist/lib/utils/tokenLimits.d.ts +2 -2
  31. package/dist/lib/utils/tokenLimits.js +10 -3
  32. package/dist/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  33. package/dist/providers/amazonBedrock.js +2 -2
  34. package/dist/providers/anthropic.js +3 -12
  35. package/dist/providers/anthropicBaseProvider.js +1 -2
  36. package/dist/providers/azureOpenai.js +49 -8
  37. package/dist/providers/googleAiStudio.js +3 -3
  38. package/dist/providers/googleVertex.js +2 -2
  39. package/dist/providers/huggingFace.js +1 -2
  40. package/dist/providers/litellm.js +1 -2
  41. package/dist/providers/mistral.js +2 -2
  42. package/dist/providers/ollama.js +7 -8
  43. package/dist/providers/openAI.js +2 -2
  44. package/dist/providers/openaiCompatible.js +5 -2
  45. package/dist/providers/sagemaker/language-model.d.ts +5 -0
  46. package/dist/providers/sagemaker/language-model.js +9 -1
  47. package/dist/utils/providerHealth.js +7 -3
  48. package/dist/utils/tokenLimits.d.ts +2 -2
  49. package/dist/utils/tokenLimits.js +10 -3
  50. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [7.33.4](https://github.com/juspay/neurolink/compare/v7.33.3...v7.33.4) (2025-09-04)
2
+
3
+ ### Bug Fixes
4
+
5
+ - **(azure):** resolve provider initialization and streaming issues ([f35114b](https://github.com/juspay/neurolink/commit/f35114bcf29ee23c3b9abefcb99c49f7a0533507))
6
+
1
7
  ## [7.33.3](https://github.com/juspay/neurolink/compare/v7.33.2...v7.33.3) (2025-09-04)
2
8
 
3
9
  ## [7.33.2](https://github.com/juspay/neurolink/compare/v7.33.1...v7.33.2) (2025-09-04)
@@ -238,7 +238,7 @@ declare const ConfigSchema: z.ZodObject<{
238
238
  preferences: z.ZodDefault<z.ZodObject<{
239
239
  outputFormat: z.ZodDefault<z.ZodEnum<["text", "json", "yaml"]>>;
240
240
  temperature: z.ZodDefault<z.ZodNumber>;
241
- maxTokens: z.ZodDefault<z.ZodNumber>;
241
+ maxTokens: z.ZodOptional<z.ZodNumber>;
242
242
  enableLogging: z.ZodDefault<z.ZodBoolean>;
243
243
  enableCaching: z.ZodDefault<z.ZodBoolean>;
244
244
  cacheStrategy: z.ZodDefault<z.ZodEnum<["memory", "file", "redis"]>>;
@@ -246,7 +246,6 @@ declare const ConfigSchema: z.ZodObject<{
246
246
  enableAnalyticsByDefault: z.ZodDefault<z.ZodBoolean>;
247
247
  enableEvaluationByDefault: z.ZodDefault<z.ZodBoolean>;
248
248
  }, "strip", z.ZodTypeAny, {
249
- maxTokens: number;
250
249
  temperature: number;
251
250
  outputFormat: "text" | "json" | "yaml";
252
251
  enableLogging: boolean;
@@ -254,6 +253,7 @@ declare const ConfigSchema: z.ZodObject<{
254
253
  cacheStrategy: "memory" | "redis" | "file";
255
254
  enableAnalyticsByDefault: boolean;
256
255
  enableEvaluationByDefault: boolean;
256
+ maxTokens?: number | undefined;
257
257
  defaultEvaluationDomain?: string | undefined;
258
258
  }, {
259
259
  maxTokens?: number | undefined;
@@ -508,7 +508,6 @@ declare const ConfigSchema: z.ZodObject<{
508
508
  defaultProvider: "openai" | "anthropic" | "vertex" | "google-ai" | "bedrock" | "azure" | "huggingface" | "ollama" | "mistral" | "auto";
509
509
  profiles: Record<string, any>;
510
510
  preferences: {
511
- maxTokens: number;
512
511
  temperature: number;
513
512
  outputFormat: "text" | "json" | "yaml";
514
513
  enableLogging: boolean;
@@ -516,6 +515,7 @@ declare const ConfigSchema: z.ZodObject<{
516
515
  cacheStrategy: "memory" | "redis" | "file";
517
516
  enableAnalyticsByDefault: boolean;
518
517
  enableEvaluationByDefault: boolean;
518
+ maxTokens?: number | undefined;
519
519
  defaultEvaluationDomain?: string | undefined;
520
520
  };
521
521
  domains: {
@@ -713,7 +713,6 @@ export declare class ConfigManager {
713
713
  * Mistral AI provider setup
714
714
  */
715
715
  private setupMistral;
716
- /**
717
716
  /**
718
717
  * Get current configuration
719
718
  */
@@ -110,7 +110,7 @@ const ConfigSchema = z.object({
110
110
  .number()
111
111
  .min(CLI_LIMITS.maxTokens.min)
112
112
  .max(CLI_LIMITS.maxTokens.max)
113
- .default(CLI_LIMITS.maxTokens.default),
113
+ .optional(), // No default limit
114
114
  enableLogging: z.boolean().default(false),
115
115
  enableCaching: z.boolean().default(true),
116
116
  cacheStrategy: z.enum(["memory", "file", "redis"]).default("memory"),
@@ -726,7 +726,6 @@ export class ConfigManager {
726
726
  ]);
727
727
  this.config.providers.mistral = answers;
728
728
  }
729
- /**
730
729
  /**
731
730
  * Get current configuration
732
731
  */
@@ -749,7 +748,7 @@ export class ConfigManager {
749
748
  logger.always(` Default Provider: ${chalk.white(this.config.defaultProvider)}`);
750
749
  logger.always(` Output Format: ${chalk.white(this.config.preferences.outputFormat)}`);
751
750
  logger.always(` Temperature: ${chalk.white(this.config.preferences.temperature)}`);
752
- logger.always(` Max Tokens: ${chalk.white(this.config.preferences.maxTokens)}`);
751
+ logger.always(` Max Tokens: ${chalk.white(this.config.preferences.maxTokens ?? "Provider default (no cap)")}`);
753
752
  logger.always(` Default Evaluation Domain: ${chalk.white(this.config.preferences.defaultEvaluationDomain || "None")}`);
754
753
  logger.always(` Analytics by Default: ${chalk.white(this.config.preferences.enableAnalyticsByDefault)}`);
755
754
  logger.always(` Evaluation by Default: ${chalk.white(this.config.preferences.enableEvaluationByDefault)}`);
@@ -168,6 +168,31 @@ export class BaseProvider {
168
168
  ...(options.tools || {}), // Include external tools passed from NeuroLink
169
169
  }
170
170
  : {};
171
+ // DEBUG: Log detailed tool information for generate
172
+ logger.debug("BaseProvider Generate - Tool Loading Debug", {
173
+ provider: this.providerName,
174
+ shouldUseTools,
175
+ baseToolsProvided: !!baseTools,
176
+ baseToolCount: baseTools ? Object.keys(baseTools).length : 0,
177
+ finalToolCount: tools ? Object.keys(tools).length : 0,
178
+ toolNames: tools ? Object.keys(tools).slice(0, 10) : [],
179
+ disableTools: options.disableTools,
180
+ supportsTools: this.supportsTools(),
181
+ externalToolsCount: options.tools
182
+ ? Object.keys(options.tools).length
183
+ : 0,
184
+ });
185
+ if (tools && Object.keys(tools).length > 0) {
186
+ logger.debug("BaseProvider Generate - First 5 Tools Detail", {
187
+ provider: this.providerName,
188
+ tools: Object.keys(tools)
189
+ .slice(0, 5)
190
+ .map((name) => ({
191
+ name,
192
+ description: tools[name]?.description?.substring(0, 100),
193
+ })),
194
+ });
195
+ }
171
196
  logger.debug(`[BaseProvider.generate] Tools for ${this.providerName}:`, {
172
197
  directTools: getKeyCount(baseTools),
173
198
  directToolNames: getKeysAsString(baseTools),
@@ -187,7 +212,7 @@ export class BaseProvider {
187
212
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
188
213
  toolChoice: shouldUseTools ? "auto" : "none",
189
214
  temperature: options.temperature,
190
- maxTokens: options.maxTokens || 8192,
215
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
191
216
  });
192
217
  // Accumulate the streamed content
193
218
  let accumulatedContent = "";
@@ -2,7 +2,7 @@
2
2
  * Central configuration constants for NeuroLink
3
3
  * Single source of truth for all default values
4
4
  */
5
- export declare const DEFAULT_MAX_TOKENS = 8192;
5
+ export declare const DEFAULT_MAX_TOKENS: undefined;
6
6
  export declare const DEFAULT_TEMPERATURE = 0.7;
7
7
  export declare const DEFAULT_TIMEOUT = 30000;
8
8
  export declare const DEFAULT_MAX_STEPS = 5;
@@ -67,6 +67,15 @@ export declare const PROVIDER_MAX_TOKENS: {
67
67
  "anthropic.claude-3-5-sonnet-20240620-v1:0": number;
68
68
  default: number;
69
69
  };
70
+ azure: {
71
+ "gpt-4o": number;
72
+ "gpt-4o-mini": number;
73
+ "gpt-4.1": number;
74
+ "gpt-3.5-turbo": number;
75
+ "gpt-4": number;
76
+ "gpt-4-turbo": number;
77
+ default: number;
78
+ };
70
79
  ollama: {
71
80
  default: number;
72
81
  };
@@ -79,7 +88,7 @@ export declare const CLI_LIMITS: {
79
88
  maxTokens: {
80
89
  min: number;
81
90
  max: number;
82
- default: number;
91
+ default: undefined;
83
92
  };
84
93
  temperature: {
85
94
  min: number;
@@ -99,6 +108,6 @@ export declare const SYSTEM_LIMITS: {
99
108
  DEFAULT_BACKOFF_MULTIPLIER: number;
100
109
  };
101
110
  export declare const ENV_DEFAULTS: {
102
- maxTokens: number;
111
+ maxTokens: number | undefined;
103
112
  temperature: number;
104
113
  };
@@ -3,7 +3,7 @@
3
3
  * Single source of truth for all default values
4
4
  */
5
5
  // Core AI Generation Defaults
6
- export const DEFAULT_MAX_TOKENS = 8192; // Changed from 10000 to fix Anthropic error
6
+ export const DEFAULT_MAX_TOKENS = undefined; // Unlimited by default - let providers decide their own limits
7
7
  export const DEFAULT_TEMPERATURE = 0.7;
8
8
  export const DEFAULT_TIMEOUT = 30000;
9
9
  export const DEFAULT_MAX_STEPS = 5; // Default multi-turn tool execution steps
@@ -72,6 +72,15 @@ export const PROVIDER_MAX_TOKENS = {
72
72
  "anthropic.claude-3-5-sonnet-20240620-v1:0": 4096,
73
73
  default: 4096,
74
74
  },
75
+ azure: {
76
+ "gpt-4o": 16384,
77
+ "gpt-4o-mini": 16384,
78
+ "gpt-4.1": 16384,
79
+ "gpt-3.5-turbo": 4096,
80
+ "gpt-4": 8192,
81
+ "gpt-4-turbo": 4096,
82
+ default: 8192, // Azure OpenAI generally supports similar limits to OpenAI
83
+ },
75
84
  ollama: {
76
85
  default: 8192, // Ollama typically supports higher limits
77
86
  },
@@ -85,7 +94,7 @@ export const CLI_LIMITS = {
85
94
  maxTokens: {
86
95
  min: 1,
87
96
  max: 50000,
88
- default: DEFAULT_MAX_TOKENS,
97
+ default: undefined, // No default limit - unlimited by default
89
98
  },
90
99
  temperature: {
91
100
  min: 0,
@@ -112,10 +121,17 @@ export const SYSTEM_LIMITS = {
112
121
  };
113
122
  // Environment Variable Support (for future use)
114
123
  export const ENV_DEFAULTS = {
115
- maxTokens: process.env.NEUROLINK_DEFAULT_MAX_TOKENS
116
- ? parseInt(process.env.NEUROLINK_DEFAULT_MAX_TOKENS, 10)
117
- : DEFAULT_MAX_TOKENS,
124
+ maxTokens: (() => {
125
+ if (!process.env.NEUROLINK_DEFAULT_MAX_TOKENS) {
126
+ return undefined;
127
+ }
128
+ const n = parseInt(process.env.NEUROLINK_DEFAULT_MAX_TOKENS, 10);
129
+ return Number.isFinite(n) ? n : undefined;
130
+ })(),
118
131
  temperature: process.env.NEUROLINK_DEFAULT_TEMPERATURE
119
- ? parseFloat(process.env.NEUROLINK_DEFAULT_TEMPERATURE)
132
+ ? (() => {
133
+ const t = parseFloat(process.env.NEUROLINK_DEFAULT_TEMPERATURE);
134
+ return Number.isFinite(t) ? t : DEFAULT_TEMPERATURE;
135
+ })()
120
136
  : DEFAULT_TEMPERATURE,
121
137
  };
@@ -111,6 +111,25 @@ export class AIProviderFactory {
111
111
  logger.debug(`[${functionTag}] No Vertex environment variables found (VERTEX_MODEL)`);
112
112
  }
113
113
  }
114
+ else if (providerName.toLowerCase().includes("azure")) {
115
+ const envModel = process.env.AZURE_OPENAI_MODEL ||
116
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
117
+ process.env.AZURE_OPENAI_DEPLOYMENT_ID;
118
+ if (envModel) {
119
+ resolvedModelName = envModel;
120
+ logger.debug(`[${functionTag}] Environment variable found for Azure`, {
121
+ envVariable: process.env.AZURE_OPENAI_MODEL
122
+ ? "AZURE_OPENAI_MODEL"
123
+ : process.env.AZURE_OPENAI_DEPLOYMENT
124
+ ? "AZURE_OPENAI_DEPLOYMENT"
125
+ : "AZURE_OPENAI_DEPLOYMENT_ID",
126
+ resolvedModel: envModel,
127
+ });
128
+ }
129
+ else {
130
+ logger.debug(`[${functionTag}] No Azure environment variables found (AZURE_OPENAI_MODEL, AZURE_OPENAI_DEPLOYMENT, AZURE_OPENAI_DEPLOYMENT_ID)`);
131
+ }
132
+ }
114
133
  else {
115
134
  logger.debug(`[${functionTag}] Provider ${providerName} - no environment variable check implemented`);
116
135
  }
@@ -49,6 +49,8 @@ export class ProviderRegistry {
49
49
  const { AzureOpenAIProvider } = await import("../providers/azureOpenai.js");
50
50
  return new AzureOpenAIProvider(modelName);
51
51
  }, process.env.AZURE_MODEL ||
52
+ process.env.AZURE_OPENAI_MODEL ||
53
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
52
54
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
53
55
  "gpt-4o-mini", ["azure", "azureOpenai"]);
54
56
  // Register Google Vertex AI provider
@@ -168,6 +168,31 @@ export class BaseProvider {
168
168
  ...(options.tools || {}), // Include external tools passed from NeuroLink
169
169
  }
170
170
  : {};
171
+ // DEBUG: Log detailed tool information for generate
172
+ logger.debug("BaseProvider Generate - Tool Loading Debug", {
173
+ provider: this.providerName,
174
+ shouldUseTools,
175
+ baseToolsProvided: !!baseTools,
176
+ baseToolCount: baseTools ? Object.keys(baseTools).length : 0,
177
+ finalToolCount: tools ? Object.keys(tools).length : 0,
178
+ toolNames: tools ? Object.keys(tools).slice(0, 10) : [],
179
+ disableTools: options.disableTools,
180
+ supportsTools: this.supportsTools(),
181
+ externalToolsCount: options.tools
182
+ ? Object.keys(options.tools).length
183
+ : 0,
184
+ });
185
+ if (tools && Object.keys(tools).length > 0) {
186
+ logger.debug("BaseProvider Generate - First 5 Tools Detail", {
187
+ provider: this.providerName,
188
+ tools: Object.keys(tools)
189
+ .slice(0, 5)
190
+ .map((name) => ({
191
+ name,
192
+ description: tools[name]?.description?.substring(0, 100),
193
+ })),
194
+ });
195
+ }
171
196
  logger.debug(`[BaseProvider.generate] Tools for ${this.providerName}:`, {
172
197
  directTools: getKeyCount(baseTools),
173
198
  directToolNames: getKeysAsString(baseTools),
@@ -187,7 +212,7 @@ export class BaseProvider {
187
212
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
188
213
  toolChoice: shouldUseTools ? "auto" : "none",
189
214
  temperature: options.temperature,
190
- maxTokens: options.maxTokens || 8192,
215
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
191
216
  });
192
217
  // Accumulate the streamed content
193
218
  let accumulatedContent = "";
@@ -2,7 +2,7 @@
2
2
  * Central configuration constants for NeuroLink
3
3
  * Single source of truth for all default values
4
4
  */
5
- export declare const DEFAULT_MAX_TOKENS = 8192;
5
+ export declare const DEFAULT_MAX_TOKENS: undefined;
6
6
  export declare const DEFAULT_TEMPERATURE = 0.7;
7
7
  export declare const DEFAULT_TIMEOUT = 30000;
8
8
  export declare const DEFAULT_MAX_STEPS = 5;
@@ -67,6 +67,15 @@ export declare const PROVIDER_MAX_TOKENS: {
67
67
  "anthropic.claude-3-5-sonnet-20240620-v1:0": number;
68
68
  default: number;
69
69
  };
70
+ azure: {
71
+ "gpt-4o": number;
72
+ "gpt-4o-mini": number;
73
+ "gpt-4.1": number;
74
+ "gpt-3.5-turbo": number;
75
+ "gpt-4": number;
76
+ "gpt-4-turbo": number;
77
+ default: number;
78
+ };
70
79
  ollama: {
71
80
  default: number;
72
81
  };
@@ -79,7 +88,7 @@ export declare const CLI_LIMITS: {
79
88
  maxTokens: {
80
89
  min: number;
81
90
  max: number;
82
- default: number;
91
+ default: undefined;
83
92
  };
84
93
  temperature: {
85
94
  min: number;
@@ -99,6 +108,6 @@ export declare const SYSTEM_LIMITS: {
99
108
  DEFAULT_BACKOFF_MULTIPLIER: number;
100
109
  };
101
110
  export declare const ENV_DEFAULTS: {
102
- maxTokens: number;
111
+ maxTokens: number | undefined;
103
112
  temperature: number;
104
113
  };
@@ -3,7 +3,7 @@
3
3
  * Single source of truth for all default values
4
4
  */
5
5
  // Core AI Generation Defaults
6
- export const DEFAULT_MAX_TOKENS = 8192; // Changed from 10000 to fix Anthropic error
6
+ export const DEFAULT_MAX_TOKENS = undefined; // Unlimited by default - let providers decide their own limits
7
7
  export const DEFAULT_TEMPERATURE = 0.7;
8
8
  export const DEFAULT_TIMEOUT = 30000;
9
9
  export const DEFAULT_MAX_STEPS = 5; // Default multi-turn tool execution steps
@@ -72,6 +72,15 @@ export const PROVIDER_MAX_TOKENS = {
72
72
  "anthropic.claude-3-5-sonnet-20240620-v1:0": 4096,
73
73
  default: 4096,
74
74
  },
75
+ azure: {
76
+ "gpt-4o": 16384,
77
+ "gpt-4o-mini": 16384,
78
+ "gpt-4.1": 16384,
79
+ "gpt-3.5-turbo": 4096,
80
+ "gpt-4": 8192,
81
+ "gpt-4-turbo": 4096,
82
+ default: 8192, // Azure OpenAI generally supports similar limits to OpenAI
83
+ },
75
84
  ollama: {
76
85
  default: 8192, // Ollama typically supports higher limits
77
86
  },
@@ -85,7 +94,7 @@ export const CLI_LIMITS = {
85
94
  maxTokens: {
86
95
  min: 1,
87
96
  max: 50000,
88
- default: DEFAULT_MAX_TOKENS,
97
+ default: undefined, // No default limit - unlimited by default
89
98
  },
90
99
  temperature: {
91
100
  min: 0,
@@ -112,10 +121,17 @@ export const SYSTEM_LIMITS = {
112
121
  };
113
122
  // Environment Variable Support (for future use)
114
123
  export const ENV_DEFAULTS = {
115
- maxTokens: process.env.NEUROLINK_DEFAULT_MAX_TOKENS
116
- ? parseInt(process.env.NEUROLINK_DEFAULT_MAX_TOKENS, 10)
117
- : DEFAULT_MAX_TOKENS,
124
+ maxTokens: (() => {
125
+ if (!process.env.NEUROLINK_DEFAULT_MAX_TOKENS) {
126
+ return undefined;
127
+ }
128
+ const n = parseInt(process.env.NEUROLINK_DEFAULT_MAX_TOKENS, 10);
129
+ return Number.isFinite(n) ? n : undefined;
130
+ })(),
118
131
  temperature: process.env.NEUROLINK_DEFAULT_TEMPERATURE
119
- ? parseFloat(process.env.NEUROLINK_DEFAULT_TEMPERATURE)
132
+ ? (() => {
133
+ const t = parseFloat(process.env.NEUROLINK_DEFAULT_TEMPERATURE);
134
+ return Number.isFinite(t) ? t : DEFAULT_TEMPERATURE;
135
+ })()
120
136
  : DEFAULT_TEMPERATURE,
121
137
  };
@@ -111,6 +111,25 @@ export class AIProviderFactory {
111
111
  logger.debug(`[${functionTag}] No Vertex environment variables found (VERTEX_MODEL)`);
112
112
  }
113
113
  }
114
+ else if (providerName.toLowerCase().includes("azure")) {
115
+ const envModel = process.env.AZURE_OPENAI_MODEL ||
116
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
117
+ process.env.AZURE_OPENAI_DEPLOYMENT_ID;
118
+ if (envModel) {
119
+ resolvedModelName = envModel;
120
+ logger.debug(`[${functionTag}] Environment variable found for Azure`, {
121
+ envVariable: process.env.AZURE_OPENAI_MODEL
122
+ ? "AZURE_OPENAI_MODEL"
123
+ : process.env.AZURE_OPENAI_DEPLOYMENT
124
+ ? "AZURE_OPENAI_DEPLOYMENT"
125
+ : "AZURE_OPENAI_DEPLOYMENT_ID",
126
+ resolvedModel: envModel,
127
+ });
128
+ }
129
+ else {
130
+ logger.debug(`[${functionTag}] No Azure environment variables found (AZURE_OPENAI_MODEL, AZURE_OPENAI_DEPLOYMENT, AZURE_OPENAI_DEPLOYMENT_ID)`);
131
+ }
132
+ }
114
133
  else {
115
134
  logger.debug(`[${functionTag}] Provider ${providerName} - no environment variable check implemented`);
116
135
  }
@@ -49,6 +49,8 @@ export class ProviderRegistry {
49
49
  const { AzureOpenAIProvider } = await import("../providers/azureOpenai.js");
50
50
  return new AzureOpenAIProvider(modelName);
51
51
  }, process.env.AZURE_MODEL ||
52
+ process.env.AZURE_OPENAI_MODEL ||
53
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
52
54
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
53
55
  "gpt-4o-mini", ["azure", "azureOpenai"]);
54
56
  // Register Google Vertex AI provider
@@ -146,7 +146,7 @@ Return ONLY a valid JSON object with this exact structure:
146
146
  Generate 3-5 comprehensive test cases covering the requested types.`;
147
147
  const result = await provider.generate({
148
148
  prompt: prompt,
149
- maxTokens: Math.floor(DEFAULT_MAX_TOKENS * 1.2),
149
+ maxTokens: 10000, // High limit for complex analysis
150
150
  temperature: 0.3, // Lower temperature for more consistent structured output
151
151
  });
152
152
  if (!result || !result.content) {
@@ -360,7 +360,7 @@ Return ONLY a valid JSON object with this exact structure:
360
360
  Focus on creating accurate, useful documentation that explains the code's purpose, parameters, return values, and usage patterns.`;
361
361
  const result = await provider.generate({
362
362
  prompt: prompt,
363
- maxTokens: Math.floor(DEFAULT_MAX_TOKENS * 1.2),
363
+ maxTokens: 10000, // High limit for complex analysis
364
364
  temperature: 0.3, // Moderate temperature for creative but structured documentation
365
365
  });
366
366
  if (!result || !result.content) {
@@ -157,7 +157,7 @@ export class AmazonBedrockProvider extends BaseProvider {
157
157
  },
158
158
  ],
159
159
  inferenceConfig: {
160
- maxTokens: options.maxTokens || 4096,
160
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
161
161
  temperature: options.temperature || 0.7,
162
162
  },
163
163
  };
@@ -718,7 +718,7 @@ export class AmazonBedrockProvider extends BaseProvider {
718
718
  },
719
719
  ],
720
720
  inferenceConfig: {
721
- maxTokens: options.maxTokens || 4096,
721
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
722
722
  temperature: options.temperature || 0.7,
723
723
  },
724
724
  };
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
11
  import { createProxyFetch } from "../proxy/proxyFetch.js";
@@ -98,7 +98,7 @@ export class AnthropicProvider extends BaseProvider {
98
98
  model: this.model,
99
99
  messages: messages,
100
100
  temperature: options.temperature,
101
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
101
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
102
102
  tools,
103
103
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
104
104
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -111,22 +111,13 @@ export class AnthropicProvider extends BaseProvider {
111
111
  // Full tool support is now available with real streaming
112
112
  const toolCalls = [];
113
113
  const toolResults = [];
114
- const usage = await result.usage;
115
- const finishReason = await result.finishReason;
116
114
  return {
117
115
  stream: transformedStream,
118
116
  provider: this.providerName,
119
117
  model: this.modelName,
120
118
  toolCalls, // ✅ Include tool calls in stream result
121
119
  toolResults, // ✅ Include tool results in stream result
122
- usage: usage
123
- ? {
124
- input: usage.promptTokens || 0,
125
- output: usage.completionTokens || 0,
126
- total: usage.totalTokens || 0,
127
- }
128
- : undefined,
129
- finishReason: finishReason || undefined,
120
+ // Note: omit usage/finishReason to avoid blocking streaming; compute asynchronously if needed.
130
121
  };
131
122
  }
132
123
  catch (error) {
@@ -4,7 +4,6 @@ import { AnthropicModels } from "../types/index.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
8
7
  import { validateApiKey, createAnthropicBaseConfig, } from "../utils/providerConfig.js";
9
8
  /**
10
9
  * Anthropic provider implementation using BaseProvider pattern
@@ -70,7 +69,7 @@ export class AnthropicProviderV2 extends BaseProvider {
70
69
  prompt: options.input.text,
71
70
  system: options.systemPrompt,
72
71
  temperature: options.temperature,
73
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
72
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
74
73
  tools: options.tools,
75
74
  toolChoice: "auto",
76
75
  abortSignal: timeoutController?.controller.signal,
@@ -6,6 +6,7 @@ import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } f
6
6
  import { logger } from "../utils/logger.js";
7
7
  import { buildMessagesArray } from "../utils/messageBuilder.js";
8
8
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
10
  export class AzureOpenAIProvider extends BaseProvider {
10
11
  apiKey;
11
12
  resourceName;
@@ -19,9 +20,11 @@ export class AzureOpenAIProvider extends BaseProvider {
19
20
  this.resourceName = endpoint
20
21
  .replace("https://", "")
21
22
  .replace(/\/+$/, "") // Remove trailing slashes
22
- .replace(".openai.azure.com", "");
23
+ .replace(".openai.azure.com", "")
24
+ .replace(".cognitiveservices.azure.com", "");
23
25
  this.deployment =
24
26
  modelName ||
27
+ process.env.AZURE_OPENAI_MODEL ||
25
28
  process.env.AZURE_OPENAI_DEPLOYMENT ||
26
29
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
27
30
  "gpt-4o";
@@ -34,6 +37,7 @@ export class AzureOpenAIProvider extends BaseProvider {
34
37
  validateApiKey(createAzureEndpointConfig());
35
38
  }
36
39
  // Create the Azure provider instance with proxy support
40
+ // Let the Azure SDK handle all URL construction automatically
37
41
  this.azureProvider = createAzure({
38
42
  resourceName: this.resourceName,
39
43
  apiKey: this.apiKey,
@@ -73,20 +77,57 @@ export class AzureOpenAIProvider extends BaseProvider {
73
77
  // executeGenerate removed - BaseProvider handles all generation with tools
74
78
  async executeStream(options, _analysisSchema) {
75
79
  try {
80
+ // Get ALL available tools (direct + MCP + external from options) - EXACTLY like BaseProvider
81
+ const shouldUseTools = !options.disableTools && this.supportsTools();
82
+ const baseTools = shouldUseTools ? await this.getAllTools() : {};
83
+ const tools = shouldUseTools
84
+ ? {
85
+ ...baseTools,
86
+ ...(options.tools || {}), // Include external tools passed from NeuroLink
87
+ }
88
+ : undefined;
89
+ // DEBUG: Log detailed tool information
90
+ logger.debug("Azure Stream - Tool Loading Debug", {
91
+ shouldUseTools,
92
+ baseToolsProvided: !!baseTools,
93
+ baseToolCount: baseTools ? Object.keys(baseTools).length : 0,
94
+ finalToolCount: tools ? Object.keys(tools).length : 0,
95
+ toolNames: tools ? Object.keys(tools).slice(0, 10) : [],
96
+ disableTools: options.disableTools,
97
+ supportsTools: this.supportsTools(),
98
+ externalToolsCount: options.tools
99
+ ? Object.keys(options.tools).length
100
+ : 0,
101
+ });
102
+ if (tools && Object.keys(tools).length > 0) {
103
+ logger.debug("Azure Stream - First 5 Tools Detail", {
104
+ tools: Object.keys(tools)
105
+ .slice(0, 5)
106
+ .map((name) => ({
107
+ name,
108
+ description: tools[name]?.description?.substring(0, 100),
109
+ })),
110
+ });
111
+ }
76
112
  // Build message array from options
77
113
  const messages = buildMessagesArray(options);
78
114
  const stream = await streamText({
79
115
  model: this.azureProvider(this.deployment),
80
116
  messages: messages,
81
- maxTokens: options.maxTokens || 1000,
82
- temperature: options.temperature || 0.7,
117
+ ...(options.maxTokens !== null && options.maxTokens !== undefined
118
+ ? { maxTokens: options.maxTokens }
119
+ : {}),
120
+ ...(options.temperature !== null && options.temperature !== undefined
121
+ ? { temperature: options.temperature }
122
+ : {}),
123
+ tools,
124
+ toolChoice: shouldUseTools ? "auto" : "none",
125
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
83
126
  });
127
+ // Transform string stream to content object stream using BaseProvider method
128
+ const transformedStream = this.createTextStream(stream);
84
129
  return {
85
- stream: (async function* () {
86
- for await (const chunk of stream.textStream) {
87
- yield { content: chunk };
88
- }
89
- })(),
130
+ stream: transformedStream,
90
131
  provider: "azure",
91
132
  model: this.deployment,
92
133
  metadata: {
@@ -5,12 +5,12 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
11
  // Create Google GenAI client
12
12
  async function createGoogleGenAIClient(apiKey) {
13
- const mod = await import("@google/genai");
13
+ const mod = await import("@google/generative-ai");
14
14
  const ctor = mod.GoogleGenAI;
15
15
  if (!ctor) {
16
16
  throw new Error("@google/genai does not export GoogleGenAI");
@@ -96,7 +96,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
96
96
  model,
97
97
  messages: messages,
98
98
  temperature: options.temperature,
99
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
99
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
100
100
  tools,
101
101
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
102
102
  toolChoice: shouldUseTools ? "auto" : "none",