@juspay/neurolink 7.33.2 → 7.33.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/cli/commands/config.d.ts +3 -4
  3. package/dist/cli/commands/config.js +2 -3
  4. package/dist/constants/index.d.ts +192 -0
  5. package/dist/constants/index.js +195 -0
  6. package/dist/constants/performance.d.ts +366 -0
  7. package/dist/constants/performance.js +389 -0
  8. package/dist/constants/retry.d.ts +224 -0
  9. package/dist/constants/retry.js +266 -0
  10. package/dist/constants/timeouts.d.ts +225 -0
  11. package/dist/constants/timeouts.js +182 -0
  12. package/dist/constants/tokens.d.ts +234 -0
  13. package/dist/constants/tokens.js +314 -0
  14. package/dist/core/baseProvider.js +26 -1
  15. package/dist/core/constants.d.ts +12 -3
  16. package/dist/core/constants.js +22 -6
  17. package/dist/core/factory.js +19 -0
  18. package/dist/core/types.d.ts +268 -0
  19. package/dist/core/types.js +153 -0
  20. package/dist/factories/providerRegistry.js +2 -0
  21. package/dist/lib/constants/index.d.ts +192 -0
  22. package/dist/lib/constants/index.js +195 -0
  23. package/dist/lib/constants/performance.d.ts +366 -0
  24. package/dist/lib/constants/performance.js +389 -0
  25. package/dist/lib/constants/retry.d.ts +224 -0
  26. package/dist/lib/constants/retry.js +266 -0
  27. package/dist/lib/constants/timeouts.d.ts +225 -0
  28. package/dist/lib/constants/timeouts.js +182 -0
  29. package/dist/lib/constants/tokens.d.ts +234 -0
  30. package/dist/lib/constants/tokens.js +314 -0
  31. package/dist/lib/core/baseProvider.js +26 -1
  32. package/dist/lib/core/constants.d.ts +12 -3
  33. package/dist/lib/core/constants.js +22 -6
  34. package/dist/lib/core/factory.js +19 -0
  35. package/dist/lib/core/types.d.ts +268 -0
  36. package/dist/lib/core/types.js +153 -0
  37. package/dist/lib/factories/providerRegistry.js +2 -0
  38. package/dist/lib/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  39. package/dist/lib/models/modelRegistry.d.ts +1 -1
  40. package/dist/lib/models/modelRegistry.js +63 -37
  41. package/dist/lib/neurolink.js +35 -34
  42. package/dist/lib/providers/amazonBedrock.js +2 -2
  43. package/dist/lib/providers/anthropic.js +3 -12
  44. package/dist/lib/providers/anthropicBaseProvider.js +1 -2
  45. package/dist/lib/providers/azureOpenai.d.ts +1 -1
  46. package/dist/lib/providers/azureOpenai.js +51 -9
  47. package/dist/lib/providers/googleAiStudio.js +3 -3
  48. package/dist/lib/providers/googleVertex.js +2 -2
  49. package/dist/lib/providers/huggingFace.js +1 -2
  50. package/dist/lib/providers/litellm.js +1 -2
  51. package/dist/lib/providers/mistral.js +2 -2
  52. package/dist/lib/providers/ollama.js +7 -8
  53. package/dist/lib/providers/openAI.js +2 -2
  54. package/dist/lib/providers/openaiCompatible.js +5 -2
  55. package/dist/lib/providers/sagemaker/language-model.d.ts +5 -0
  56. package/dist/lib/providers/sagemaker/language-model.js +9 -1
  57. package/dist/lib/utils/providerConfig.d.ts +25 -0
  58. package/dist/lib/utils/providerConfig.js +24 -3
  59. package/dist/lib/utils/providerHealth.d.ts +1 -1
  60. package/dist/lib/utils/providerHealth.js +47 -36
  61. package/dist/lib/utils/providerSetupMessages.js +7 -6
  62. package/dist/lib/utils/providerUtils.js +16 -24
  63. package/dist/lib/utils/tokenLimits.d.ts +2 -2
  64. package/dist/lib/utils/tokenLimits.js +10 -3
  65. package/dist/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  66. package/dist/models/modelRegistry.d.ts +1 -1
  67. package/dist/models/modelRegistry.js +63 -37
  68. package/dist/neurolink.js +35 -34
  69. package/dist/providers/amazonBedrock.js +2 -2
  70. package/dist/providers/anthropic.js +3 -12
  71. package/dist/providers/anthropicBaseProvider.js +1 -2
  72. package/dist/providers/azureOpenai.d.ts +1 -1
  73. package/dist/providers/azureOpenai.js +51 -9
  74. package/dist/providers/googleAiStudio.js +3 -3
  75. package/dist/providers/googleVertex.js +2 -2
  76. package/dist/providers/huggingFace.js +1 -2
  77. package/dist/providers/litellm.js +1 -2
  78. package/dist/providers/mistral.js +2 -2
  79. package/dist/providers/ollama.js +7 -8
  80. package/dist/providers/openAI.js +2 -2
  81. package/dist/providers/openaiCompatible.js +5 -2
  82. package/dist/providers/sagemaker/language-model.d.ts +5 -0
  83. package/dist/providers/sagemaker/language-model.js +9 -1
  84. package/dist/utils/providerConfig.d.ts +25 -0
  85. package/dist/utils/providerConfig.js +24 -3
  86. package/dist/utils/providerHealth.d.ts +1 -1
  87. package/dist/utils/providerHealth.js +47 -36
  88. package/dist/utils/providerSetupMessages.js +7 -6
  89. package/dist/utils/providerUtils.js +16 -24
  90. package/dist/utils/tokenLimits.d.ts +2 -2
  91. package/dist/utils/tokenLimits.js +10 -3
  92. package/package.json +1 -1
@@ -4,7 +4,7 @@ import { streamText, Output, } from "ai";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { ModelConfigurationManager } from "../core/modelConfiguration.js";
9
9
  import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
10
10
  import fs from "fs";
@@ -945,7 +945,7 @@ export class GoogleVertexProvider extends BaseProvider {
945
945
  // This avoids hardcoded model-specific logic and repeated config lookups
946
946
  const shouldSetMaxTokens = this.shouldSetMaxTokensCached(modelName);
947
947
  const maxTokens = shouldSetMaxTokens
948
- ? options.maxTokens || DEFAULT_MAX_TOKENS
948
+ ? options.maxTokens // No default limit
949
949
  : undefined;
950
950
  // Build complete stream options with proper typing
951
951
  let streamOptions = {
@@ -3,7 +3,6 @@ import { streamText, } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
6
  import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
8
7
  import { buildMessagesArray } from "../utils/messageBuilder.js";
9
8
  import { createProxyFetch } from "../proxy/proxyFetch.js";
@@ -120,7 +119,7 @@ export class HuggingFaceProvider extends BaseProvider {
120
119
  model: this.model,
121
120
  messages: messages,
122
121
  temperature: options.temperature,
123
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
122
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
124
123
  tools: streamOptions.tools, // Tools format conversion handled by prepareStreamOptions
125
124
  toolChoice: streamOptions.toolChoice, // Tool choice handled by prepareStreamOptions
126
125
  abortSignal: timeoutController?.controller.signal,
@@ -3,7 +3,6 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
6
  import { getProviderModel } from "../utils/providerConfig.js";
8
7
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
8
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -127,7 +126,7 @@ export class LiteLLMProvider extends BaseProvider {
127
126
  model: this.model,
128
127
  messages: messages,
129
128
  temperature: options.temperature,
130
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
129
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
131
130
  tools: options.tools,
132
131
  toolChoice: "auto",
133
132
  abortSignal: timeoutController?.controller.signal,
@@ -3,7 +3,7 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
6
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
9
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -54,7 +54,7 @@ export class MistralProvider extends BaseProvider {
54
54
  model: this.model,
55
55
  messages: messages,
56
56
  temperature: options.temperature,
57
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
57
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
58
58
  tools,
59
59
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
60
60
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -1,6 +1,5 @@
1
1
  import { BaseProvider } from "../core/baseProvider.js";
2
2
  import { logger } from "../utils/logger.js";
3
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
4
3
  import { modelConfig } from "../core/modelConfiguration.js";
5
4
  import { createProxyFetch } from "../proxy/proxyFetch.js";
6
5
  import { TimeoutError } from "../utils/timeout.js";
@@ -88,10 +87,10 @@ class OllamaLanguageModel {
88
87
  return {
89
88
  text: data.response,
90
89
  usage: {
91
- promptTokens: data.prompt_eval_count || this.estimateTokens(prompt),
92
- completionTokens: data.eval_count || this.estimateTokens(data.response),
93
- totalTokens: (data.prompt_eval_count || this.estimateTokens(prompt)) +
94
- (data.eval_count || this.estimateTokens(data.response)),
90
+ promptTokens: data.prompt_eval_count ?? this.estimateTokens(prompt),
91
+ completionTokens: data.eval_count ?? this.estimateTokens(String(data.response ?? "")),
92
+ totalTokens: (data.prompt_eval_count ?? this.estimateTokens(prompt)) +
93
+ (data.eval_count ?? this.estimateTokens(String(data.response ?? ""))),
95
94
  },
96
95
  finishReason: "stop",
97
96
  rawCall: {
@@ -271,7 +270,7 @@ export class OllamaProvider extends BaseProvider {
271
270
  * @returns true for supported models, false for unsupported models
272
271
  */
273
272
  supportsTools() {
274
- const modelName = this.modelName.toLowerCase();
273
+ const modelName = (this.modelName ?? getDefaultOllamaModel()).toLowerCase();
275
274
  // Get tool-capable models from configuration
276
275
  const ollamaConfig = modelConfig.getProviderConfiguration("ollama");
277
276
  const toolCapableModels = ollamaConfig?.modelBehavior?.toolCapableModels || [];
@@ -340,7 +339,7 @@ export class OllamaProvider extends BaseProvider {
340
339
  tool_choice: "auto",
341
340
  stream: true,
342
341
  temperature: options.temperature,
343
- max_tokens: options.maxTokens || DEFAULT_MAX_TOKENS,
342
+ max_tokens: options.maxTokens,
344
343
  }),
345
344
  signal: createAbortSignalWithTimeout(this.timeout),
346
345
  });
@@ -381,7 +380,7 @@ export class OllamaProvider extends BaseProvider {
381
380
  stream: true,
382
381
  options: {
383
382
  temperature: options.temperature,
384
- num_predict: options.maxTokens || DEFAULT_MAX_TOKENS,
383
+ num_predict: options.maxTokens,
385
384
  },
386
385
  }),
387
386
  signal: createAbortSignalWithTimeout(this.timeout),
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, InvalidModelError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
10
10
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
11
11
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -97,7 +97,7 @@ export class OpenAIProvider extends BaseProvider {
97
97
  model: this.model,
98
98
  messages: messages,
99
99
  temperature: options.temperature,
100
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
100
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
101
101
  tools,
102
102
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
103
103
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -3,7 +3,6 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
6
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
7
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
8
  // Constants
@@ -163,7 +162,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
163
162
  prompt: options.input.text,
164
163
  system: options.systemPrompt,
165
164
  temperature: options.temperature,
166
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
165
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
167
166
  tools: options.tools,
168
167
  toolChoice: "auto",
169
168
  abortSignal: timeoutController?.controller.signal,
@@ -207,12 +206,16 @@ export class OpenAICompatibleProvider extends BaseProvider {
207
206
  const modelsUrl = new URL("/v1/models", this.config.baseURL).toString();
208
207
  logger.debug(`Fetching available models from: ${modelsUrl}`);
209
208
  const proxyFetch = createProxyFetch();
209
+ const controller = new AbortController();
210
+ const t = setTimeout(() => controller.abort(), 5000);
210
211
  const response = await proxyFetch(modelsUrl, {
211
212
  headers: {
212
213
  Authorization: `Bearer ${this.config.apiKey}`,
213
214
  "Content-Type": "application/json",
214
215
  },
216
+ signal: controller.signal,
215
217
  });
218
+ clearTimeout(t);
216
219
  if (!response.ok) {
217
220
  logger.warn(`Models endpoint returned ${response.status}: ${response.statusText}`);
218
221
  return this.getFallbackModels();
@@ -9,6 +9,11 @@ import type { SageMakerConfig, SageMakerModelConfig } from "./types.js";
9
9
  import type { ConnectivityResult } from "../../types/typeAliases.js";
10
10
  /**
11
11
  * SageMaker Language Model implementing LanguageModelV1 interface
12
+ *
13
+ * Token Limit Behavior:
14
+ * - When maxTokens is undefined, SageMaker uses the model's default token limits
15
+ * - When maxTokens is specified, it sets max_new_tokens parameter explicitly
16
+ * - This aligns with the unlimited-by-default token policy across all providers
12
17
  */
13
18
  export declare class SageMakerLanguageModel implements LanguageModelV1 {
14
19
  readonly specificationVersion = "v1";
@@ -75,6 +75,11 @@ const DEFAULT_MAX_CONCURRENCY = 10;
75
75
  const DEFAULT_MIN_CONCURRENCY = 1;
76
76
  /**
77
77
  * SageMaker Language Model implementing LanguageModelV1 interface
78
+ *
79
+ * Token Limit Behavior:
80
+ * - When maxTokens is undefined, SageMaker uses the model's default token limits
81
+ * - When maxTokens is specified, it sets max_new_tokens parameter explicitly
82
+ * - This aligns with the unlimited-by-default token policy across all providers
78
83
  */
79
84
  export class SageMakerLanguageModel {
80
85
  specificationVersion = "v1";
@@ -345,7 +350,10 @@ export class SageMakerLanguageModel {
345
350
  const request = {
346
351
  inputs: promptText,
347
352
  parameters: {
348
- max_new_tokens: options.maxTokens || 512,
353
+ // Only include max_new_tokens if explicitly specified; let SageMaker use model defaults otherwise
354
+ ...(options.maxTokens !== undefined
355
+ ? { max_new_tokens: options.maxTokens }
356
+ : {}),
349
357
  temperature: options.temperature || 0.7,
350
358
  top_p: options.topP || 0.9,
351
359
  stop: options.stopSequences || [],
@@ -15,6 +15,31 @@ export interface ProviderConfigOptions {
15
15
  instructions: string[];
16
16
  fallbackEnvVars?: string[];
17
17
  }
18
+ /**
19
+ * API key format validation patterns (extracted from advanced validation system)
20
+ * Exported for use across the codebase to replace scattered regex patterns
21
+ */
22
+ export declare const API_KEY_FORMATS: Record<string, RegExp>;
23
+ /**
24
+ * API key length constants to replace scattered magic numbers
25
+ */
26
+ export declare const API_KEY_LENGTHS: {
27
+ readonly OPENAI_MIN: 48;
28
+ readonly ANTHROPIC_MIN: 95;
29
+ readonly HUGGINGFACE_EXACT: 37;
30
+ readonly AZURE_MIN: 32;
31
+ readonly MISTRAL_EXACT: 32;
32
+ readonly AWS_ACCESS_KEY: 20;
33
+ readonly GOOGLE_AI_EXACT: 39;
34
+ };
35
+ /**
36
+ * Project ID format validation (for Google Cloud)
37
+ */
38
+ export declare const PROJECT_ID_FORMAT: {
39
+ readonly MIN_LENGTH: 6;
40
+ readonly MAX_LENGTH: 30;
41
+ readonly PATTERN: RegExp;
42
+ };
18
43
  /**
19
44
  * Enhanced validation result with format checking
20
45
  */
@@ -6,16 +6,37 @@
6
6
  */
7
7
  /**
8
8
  * API key format validation patterns (extracted from advanced validation system)
9
+ * Exported for use across the codebase to replace scattered regex patterns
9
10
  */
10
- const API_KEY_FORMATS = {
11
+ export const API_KEY_FORMATS = {
11
12
  openai: /^sk-[A-Za-z0-9]{48,}$/,
12
13
  anthropic: /^sk-ant-[A-Za-z0-9\-_]{95,}$/,
13
14
  "google-ai": /^AIza[A-Za-z0-9\-_]{35}$/,
14
15
  huggingface: /^hf_[A-Za-z0-9]{37}$/,
15
16
  mistral: /^[A-Za-z0-9]{32}$/,
16
- azure: /^[A-Za-z0-9]{32,}$/,
17
+ azure: /^[A-Za-z0-9]{32}$/,
17
18
  aws: /^[A-Z0-9]{20}$/, // Access Key ID format
18
- googleVertex: /^[A-Za-z0-9\-_]{1,}$/, // Project ID format
19
+ bedrock: /^[A-Z0-9]{20}$/, // AWS access key ID: 20 uppercase alphanumerics
20
+ };
21
+ /**
22
+ * API key length constants to replace scattered magic numbers
23
+ */
24
+ export const API_KEY_LENGTHS = {
25
+ OPENAI_MIN: 48, // OpenAI API keys minimum length
26
+ ANTHROPIC_MIN: 95, // Anthropic API keys minimum length
27
+ HUGGINGFACE_EXACT: 37, // HuggingFace tokens exact length
28
+ AZURE_MIN: 32, // Azure OpenAI API keys minimum length
29
+ MISTRAL_EXACT: 32, // Mistral API keys exact length
30
+ AWS_ACCESS_KEY: 20, // AWS access key ID exact length
31
+ GOOGLE_AI_EXACT: 39, // Google AI Studio keys exact length (with AIza prefix)
32
+ };
33
+ /**
34
+ * Project ID format validation (for Google Cloud)
35
+ */
36
+ export const PROJECT_ID_FORMAT = {
37
+ MIN_LENGTH: 6, // Minimum project ID length
38
+ MAX_LENGTH: 30, // Maximum project ID length
39
+ PATTERN: /^[a-z][a-z0-9-]{4,28}[a-z0-9]$/, // Google Cloud project ID format
19
40
  };
20
41
  /**
21
42
  * Validates API key format for a specific provider
@@ -2,7 +2,7 @@
2
2
  * Provider Health Checking System
3
3
  * Prevents 500 errors by validating provider availability and configuration
4
4
  */
5
- import { AIProviderName } from "../types/index.js";
5
+ import { AIProviderName } from "../core/types.js";
6
6
  export interface ProviderHealthStatus {
7
7
  provider: AIProviderName;
8
8
  isHealthy: boolean;
@@ -3,7 +3,8 @@
3
3
  * Prevents 500 errors by validating provider availability and configuration
4
4
  */
5
5
  import { logger } from "./logger.js";
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, BedrockModels, } from "../core/types.js";
7
+ import { API_KEY_LENGTHS, PROJECT_ID_FORMAT } from "./providerConfig.js";
7
8
  import { basename } from "path";
8
9
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
10
  export class ProviderHealthChecker {
@@ -402,17 +403,19 @@ export class ProviderHealthChecker {
402
403
  static validateApiKeyFormat(providerName, apiKey) {
403
404
  switch (providerName) {
404
405
  case AIProviderName.ANTHROPIC:
405
- return apiKey.startsWith("sk-ant-") && apiKey.length > 20;
406
+ return (apiKey.startsWith("sk-ant-") &&
407
+ apiKey.length >= API_KEY_LENGTHS.ANTHROPIC_MIN);
406
408
  case AIProviderName.OPENAI:
407
- return apiKey.startsWith("sk-") && apiKey.length > 20;
409
+ return (apiKey.startsWith("sk-") &&
410
+ apiKey.length >= API_KEY_LENGTHS.OPENAI_MIN);
408
411
  case AIProviderName.GOOGLE_AI:
409
- return apiKey.length > 20; // Basic length check
412
+ return apiKey.length >= API_KEY_LENGTHS.GOOGLE_AI_EXACT; // Basic length check
410
413
  case AIProviderName.VERTEX:
411
414
  return apiKey.endsWith(".json") || apiKey.includes("type"); // JSON key format
412
415
  case AIProviderName.BEDROCK:
413
- return apiKey.length >= 20; // AWS access key length
416
+ return apiKey.length >= API_KEY_LENGTHS.AWS_ACCESS_KEY; // AWS access key length
414
417
  case AIProviderName.AZURE:
415
- return apiKey.length >= 32; // Azure OpenAI API key length
418
+ return apiKey.length >= API_KEY_LENGTHS.AZURE_MIN; // Azure OpenAI API key length
416
419
  case AIProviderName.OLLAMA:
417
420
  return true; // Ollama usually doesn't require specific format
418
421
  default:
@@ -604,14 +607,14 @@ export class ProviderHealthChecker {
604
607
  static checkBedrockModels(healthStatus) {
605
608
  const bedrockModel = process.env.BEDROCK_MODEL || process.env.BEDROCK_MODEL_ID;
606
609
  const supportedModels = [
607
- "anthropic.claude-3-sonnet-20240229-v1:0",
608
- "anthropic.claude-3-haiku-20240307-v1:0",
609
- "anthropic.claude-3-opus-20240229-v1:0",
610
+ BedrockModels.CLAUDE_3_SONNET,
611
+ BedrockModels.CLAUDE_3_HAIKU,
612
+ BedrockModels.CLAUDE_3_5_SONNET,
610
613
  "anthropic.claude-v2:1",
611
614
  "amazon.titan-text-express-v1",
612
615
  ];
613
616
  if (!bedrockModel) {
614
- healthStatus.recommendations.push("Set BEDROCK_MODEL or BEDROCK_MODEL_ID for faster startup (e.g., anthropic.claude-3-sonnet-20240229-v1:0)");
617
+ healthStatus.recommendations.push(`Set BEDROCK_MODEL or BEDROCK_MODEL_ID for faster startup (e.g., ${BedrockModels.CLAUDE_3_SONNET})`);
615
618
  }
616
619
  else if (!supportedModels.includes(bedrockModel)) {
617
620
  healthStatus.recommendations.push(`Consider using a popular Bedrock model: ${supportedModels.slice(0, 3).join(", ")}`);
@@ -636,9 +639,13 @@ export class ProviderHealthChecker {
636
639
  healthStatus.configurationIssues.push("Invalid AZURE_OPENAI_ENDPOINT format");
637
640
  healthStatus.recommendations.push("Set AZURE_OPENAI_ENDPOINT to a valid URL (e.g., https://your-resource.openai.azure.com/)");
638
641
  }
639
- if (!process.env.AZURE_OPENAI_DEPLOYMENT_NAME) {
640
- healthStatus.configurationIssues.push("AZURE_OPENAI_DEPLOYMENT_NAME not set");
641
- healthStatus.recommendations.push("Set AZURE_OPENAI_DEPLOYMENT_NAME to your deployment name");
642
+ // Check for deployment name using the SAME logic as the Azure provider
643
+ const deploymentName = process.env.AZURE_OPENAI_MODEL ||
644
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
645
+ process.env.AZURE_OPENAI_DEPLOYMENT_ID;
646
+ if (!deploymentName) {
647
+ healthStatus.configurationIssues.push("No Azure deployment specified");
648
+ healthStatus.recommendations.push("Set one of: AZURE_OPENAI_MODEL, AZURE_OPENAI_DEPLOYMENT, or AZURE_OPENAI_DEPLOYMENT_ID");
642
649
  }
643
650
  }
644
651
  /**
@@ -658,39 +665,44 @@ export class ProviderHealthChecker {
658
665
  switch (providerName) {
659
666
  case AIProviderName.ANTHROPIC:
660
667
  return [
661
- "claude-3-5-sonnet-20241022",
662
- "claude-3-haiku-20240307",
663
- "claude-3-opus-20240229",
668
+ AnthropicModels.CLAUDE_3_5_SONNET,
669
+ AnthropicModels.CLAUDE_3_HAIKU,
670
+ AnthropicModels.CLAUDE_3_OPUS,
664
671
  ];
665
672
  case AIProviderName.OPENAI:
666
- return ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"];
673
+ return [
674
+ OpenAIModels.GPT_4O,
675
+ OpenAIModels.GPT_4O_MINI,
676
+ OpenAIModels.GPT_3_5_TURBO,
677
+ ];
667
678
  case AIProviderName.GOOGLE_AI:
668
- return ["gemini-1.5-pro", "gemini-1.5-flash", "gemini-pro"];
679
+ return [
680
+ GoogleAIModels.GEMINI_1_5_PRO,
681
+ GoogleAIModels.GEMINI_1_5_FLASH,
682
+ GoogleAIModels.GEMINI_2_5_PRO,
683
+ ];
669
684
  case AIProviderName.VERTEX:
670
685
  return [
671
686
  // Google models (via vertex provider)
672
- "gemini-2.5-pro",
673
- "gemini-2.5-flash",
674
- "gemini-2.5-flash-lite",
675
- "gemini-2.0-flash-001",
676
- "gemini-1.5-pro",
677
- "gemini-1.5-flash",
687
+ GoogleAIModels.GEMINI_2_5_PRO,
688
+ GoogleAIModels.GEMINI_2_5_FLASH,
689
+ GoogleAIModels.GEMINI_2_5_FLASH_LITE,
690
+ GoogleAIModels.GEMINI_2_0_FLASH_001,
691
+ GoogleAIModels.GEMINI_1_5_PRO,
692
+ GoogleAIModels.GEMINI_1_5_FLASH,
678
693
  // Anthropic models (via vertexAnthropic provider)
679
694
  "claude-sonnet-4@20250514",
680
695
  "claude-opus-4@20250514",
681
- "claude-3-5-sonnet-20241022",
682
- "claude-3-5-haiku-20241022",
683
- "claude-3-sonnet-20240229",
684
- "claude-3-haiku-20240307",
685
- "claude-3-opus-20240229",
696
+ AnthropicModels.CLAUDE_3_5_SONNET,
697
+ AnthropicModels.CLAUDE_3_5_HAIKU,
698
+ AnthropicModels.CLAUDE_3_SONNET,
699
+ AnthropicModels.CLAUDE_3_HAIKU,
700
+ AnthropicModels.CLAUDE_3_OPUS,
686
701
  ];
687
702
  case AIProviderName.BEDROCK:
688
- return [
689
- "anthropic.claude-3-sonnet-20240229-v1:0",
690
- "anthropic.claude-3-haiku-20240307-v1:0",
691
- ];
703
+ return [BedrockModels.CLAUDE_3_SONNET, BedrockModels.CLAUDE_3_HAIKU];
692
704
  case AIProviderName.AZURE:
693
- return ["gpt-4o", "gpt-4o-mini", "gpt-35-turbo"];
705
+ return [OpenAIModels.GPT_4O, OpenAIModels.GPT_4O_MINI, "gpt-35-turbo"];
694
706
  case AIProviderName.OLLAMA:
695
707
  return ["llama3.2:latest", "llama3.1:latest", "mistral:latest"];
696
708
  default:
@@ -951,8 +963,7 @@ export class ProviderHealthChecker {
951
963
  if (projectId) {
952
964
  result.projectId = projectId;
953
965
  // Validate project ID format
954
- const projectIdPattern = /^[a-z][a-z0-9-]{4,28}[a-z0-9]$/;
955
- if (projectIdPattern.test(projectId)) {
966
+ if (PROJECT_ID_FORMAT.PATTERN.test(projectId)) {
956
967
  result.isValid = true;
957
968
  }
958
969
  else {
@@ -2,6 +2,7 @@
2
2
  * Enhanced Provider Setup Messages
3
3
  * Provides detailed setup instructions for AI providers
4
4
  */
5
+ import { OpenAIModels, GoogleAIModels, AnthropicModels, APIVersions, } from "../core/types.js";
5
6
  /**
6
7
  * Generate enhanced error message with setup instructions
7
8
  */
@@ -12,7 +13,7 @@ export function getProviderSetupMessage(provider, missingVars) {
12
13
  envVars: [
13
14
  'OPENAI_API_KEY="sk-proj-your-openai-api-key"',
14
15
  "# Optional:",
15
- 'OPENAI_MODEL="gpt-4o"',
16
+ `OPENAI_MODEL="${OpenAIModels.GPT_4O}"`,
16
17
  'OPENAI_BASE_URL="https://api.openai.com"',
17
18
  ],
18
19
  },
@@ -21,7 +22,7 @@ export function getProviderSetupMessage(provider, missingVars) {
21
22
  envVars: [
22
23
  'ANTHROPIC_API_KEY="sk-ant-api03-your-anthropic-key"',
23
24
  "# Optional:",
24
- 'ANTHROPIC_MODEL="claude-3-5-sonnet-20241022"',
25
+ `ANTHROPIC_MODEL="${AnthropicModels.CLAUDE_3_5_SONNET}"`,
25
26
  ],
26
27
  },
27
28
  "google-ai": {
@@ -29,7 +30,7 @@ export function getProviderSetupMessage(provider, missingVars) {
29
30
  envVars: [
30
31
  'GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"',
31
32
  "# Optional:",
32
- 'GOOGLE_AI_MODEL="gemini-2.5-pro"',
33
+ `GOOGLE_AI_MODEL="${GoogleAIModels.GEMINI_2_5_PRO}"`,
33
34
  ],
34
35
  },
35
36
  vertex: {
@@ -39,7 +40,7 @@ export function getProviderSetupMessage(provider, missingVars) {
39
40
  'GOOGLE_VERTEX_PROJECT="your-gcp-project-id"',
40
41
  'GOOGLE_VERTEX_LOCATION="us-central1"',
41
42
  "# Optional:",
42
- 'VERTEX_MODEL="gemini-2.5-pro"',
43
+ `VERTEX_MODEL="${GoogleAIModels.GEMINI_2_5_PRO}"`,
43
44
  ],
44
45
  },
45
46
  bedrock: {
@@ -61,8 +62,8 @@ export function getProviderSetupMessage(provider, missingVars) {
61
62
  'AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"',
62
63
  'AZURE_OPENAI_DEPLOYMENT_ID="your-deployment-name"',
63
64
  "# Optional:",
64
- 'AZURE_MODEL="gpt-4o"',
65
- 'AZURE_API_VERSION="2024-02-15-preview"',
65
+ `AZURE_MODEL="${OpenAIModels.GPT_4O}"`,
66
+ `AZURE_API_VERSION="${APIVersions.AZURE_STABLE}"`,
66
67
  ],
67
68
  },
68
69
  huggingface: {
@@ -5,6 +5,7 @@
5
5
  import { AIProviderFactory } from "../core/factory.js";
6
6
  import { logger } from "./logger.js";
7
7
  import { ProviderHealthChecker } from "./providerHealth.js";
8
+ import { API_KEY_FORMATS, API_KEY_LENGTHS, PROJECT_ID_FORMAT, } from "./providerConfig.js";
8
9
  /**
9
10
  * Get the best available provider based on real-time availability checks
10
11
  * Enhanced version consolidated from providerUtils-fixed.ts
@@ -123,15 +124,6 @@ async function isProviderAvailable(providerName) {
123
124
  return false;
124
125
  }
125
126
  }
126
- /**
127
- * Google Cloud Project ID validation regex
128
- * Format requirements:
129
- * - Must start with a lowercase letter
130
- * - Can contain lowercase letters, numbers, and hyphens
131
- * - Must end with a lowercase letter or number
132
- * - Total length must be 6-30 characters
133
- */
134
- const GOOGLE_CLOUD_PROJECT_ID_REGEX = /^[a-z][a-z0-9-]{4,28}[a-z0-9]$/;
135
127
  /**
136
128
  * Validate environment variable values for a provider
137
129
  * Addresses GitHub Copilot comment about adding environment variable validation
@@ -166,7 +158,7 @@ export function validateProviderEnvVars(provider) {
166
158
  validateAnthropicCredentials(result);
167
159
  break;
168
160
  case "azure":
169
- case "azureOpenai":
161
+ case "azureopenai":
170
162
  validateAzureCredentials(result);
171
163
  break;
172
164
  case "google-ai":
@@ -240,7 +232,7 @@ function validateVertexCredentials(result) {
240
232
  if (!projectId) {
241
233
  result.missingVars.push("GOOGLE_CLOUD_PROJECT_ID (or variant)");
242
234
  }
243
- else if (!GOOGLE_CLOUD_PROJECT_ID_REGEX.test(projectId)) {
235
+ else if (!PROJECT_ID_FORMAT.PATTERN.test(projectId)) {
244
236
  result.invalidVars.push("Project ID format invalid (must be 6-30 lowercase letters, digits, hyphens)");
245
237
  }
246
238
  if (!hasCredentials) {
@@ -259,8 +251,8 @@ function validateOpenAICredentials(result) {
259
251
  if (!apiKey) {
260
252
  result.missingVars.push("OPENAI_API_KEY");
261
253
  }
262
- else if (!/^sk-[A-Za-z0-9]{48,}$/.test(apiKey)) {
263
- result.invalidVars.push("OPENAI_API_KEY (should start with 'sk-' followed by 48+ characters)");
254
+ else if (!API_KEY_FORMATS.openai.test(apiKey)) {
255
+ result.invalidVars.push(`OPENAI_API_KEY (should start with 'sk-' followed by ${API_KEY_LENGTHS.OPENAI_MIN}+ characters)`);
264
256
  }
265
257
  }
266
258
  /**
@@ -271,8 +263,8 @@ function validateAnthropicCredentials(result) {
271
263
  if (!apiKey) {
272
264
  result.missingVars.push("ANTHROPIC_API_KEY");
273
265
  }
274
- else if (!/^sk-ant-[A-Za-z0-9-_]{95,}$/.test(apiKey)) {
275
- result.invalidVars.push("ANTHROPIC_API_KEY (should start with 'sk-ant-' followed by 95+ characters)");
266
+ else if (!API_KEY_FORMATS.anthropic.test(apiKey)) {
267
+ result.invalidVars.push(`ANTHROPIC_API_KEY (should start with 'sk-ant-' followed by ${API_KEY_LENGTHS.ANTHROPIC_MIN}+ characters)`);
276
268
  }
277
269
  }
278
270
  /**
@@ -284,8 +276,8 @@ function validateAzureCredentials(result) {
284
276
  if (!apiKey) {
285
277
  result.missingVars.push("AZURE_OPENAI_API_KEY");
286
278
  }
287
- else if (!/^[a-f0-9]{32}$/.test(apiKey)) {
288
- result.invalidVars.push("AZURE_OPENAI_API_KEY (should be 32 hexadecimal characters)");
279
+ else if (!API_KEY_FORMATS.azure.test(apiKey)) {
280
+ result.invalidVars.push(`AZURE_OPENAI_API_KEY (should be at least ${API_KEY_LENGTHS.AZURE_MIN} alphanumeric characters)`);
289
281
  }
290
282
  if (!endpoint) {
291
283
  result.missingVars.push("AZURE_OPENAI_ENDPOINT");
@@ -302,8 +294,8 @@ function validateGoogleAICredentials(result) {
302
294
  if (!apiKey) {
303
295
  result.missingVars.push("GOOGLE_AI_API_KEY (or GOOGLE_GENERATIVE_AI_API_KEY)");
304
296
  }
305
- else if (!/^[A-Za-z0-9_-]{39}$/.test(apiKey)) {
306
- result.invalidVars.push("GOOGLE_AI_API_KEY (should be 39 alphanumeric characters with dashes/underscores)");
297
+ else if (!API_KEY_FORMATS["google-ai"].test(apiKey)) {
298
+ result.invalidVars.push(`GOOGLE_AI_API_KEY (should be ${API_KEY_LENGTHS.GOOGLE_AI_EXACT} alphanumeric characters with dashes/underscores)`);
307
299
  }
308
300
  }
309
301
  /**
@@ -314,8 +306,8 @@ function validateHuggingFaceCredentials(result) {
314
306
  if (!apiKey) {
315
307
  result.missingVars.push("HUGGINGFACE_API_KEY (or HF_TOKEN)");
316
308
  }
317
- else if (!/^hf_[A-Za-z0-9]{37}$/.test(apiKey)) {
318
- result.invalidVars.push("HUGGINGFACE_API_KEY (should start with 'hf_' followed by 37 characters)");
309
+ else if (!API_KEY_FORMATS.huggingface.test(apiKey)) {
310
+ result.invalidVars.push(`HUGGINGFACE_API_KEY (should start with 'hf_' followed by ${API_KEY_LENGTHS.HUGGINGFACE_EXACT} characters)`);
319
311
  }
320
312
  }
321
313
  /**
@@ -326,8 +318,8 @@ function validateMistralCredentials(result) {
326
318
  if (!apiKey) {
327
319
  result.missingVars.push("MISTRAL_API_KEY");
328
320
  }
329
- else if (!/^[A-Za-z0-9]{32,}$/.test(apiKey)) {
330
- result.invalidVars.push("MISTRAL_API_KEY (should be 32+ alphanumeric characters)");
321
+ else if (!API_KEY_FORMATS.mistral.test(apiKey)) {
322
+ result.invalidVars.push(`MISTRAL_API_KEY (should be ${API_KEY_LENGTHS.MISTRAL_EXACT} alphanumeric characters)`);
331
323
  }
332
324
  }
333
325
  /**
@@ -381,7 +373,7 @@ export function hasProviderEnvVars(provider) {
381
373
  case "claude":
382
374
  return !!process.env.ANTHROPIC_API_KEY;
383
375
  case "azure":
384
- case "azureOpenai":
376
+ case "azureopenai":
385
377
  return !!process.env.AZURE_OPENAI_API_KEY;
386
378
  case "google-ai":
387
379
  case "google-studio":
@@ -6,13 +6,13 @@ import { PROVIDER_MAX_TOKENS } from "../core/constants.js";
6
6
  /**
7
7
  * Get the safe maximum tokens for a provider and model
8
8
  */
9
- export declare function getSafeMaxTokens(provider: keyof typeof PROVIDER_MAX_TOKENS | string, model?: string, requestedMaxTokens?: number): number;
9
+ export declare function getSafeMaxTokens(provider: keyof typeof PROVIDER_MAX_TOKENS | string, model?: string, requestedMaxTokens?: number): number | undefined;
10
10
  /**
11
11
  * Validate if maxTokens is safe for a provider/model combination
12
12
  */
13
13
  export declare function validateMaxTokens(provider: keyof typeof PROVIDER_MAX_TOKENS | string, model?: string, maxTokens?: number): {
14
14
  isValid: boolean;
15
- recommendedMaxTokens: number;
15
+ recommendedMaxTokens?: number;
16
16
  warning?: string;
17
17
  };
18
18
  /**