@juspay/neurolink 7.33.2 → 7.33.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/cli/commands/config.d.ts +3 -4
  3. package/dist/cli/commands/config.js +2 -3
  4. package/dist/constants/index.d.ts +192 -0
  5. package/dist/constants/index.js +195 -0
  6. package/dist/constants/performance.d.ts +366 -0
  7. package/dist/constants/performance.js +389 -0
  8. package/dist/constants/retry.d.ts +224 -0
  9. package/dist/constants/retry.js +266 -0
  10. package/dist/constants/timeouts.d.ts +225 -0
  11. package/dist/constants/timeouts.js +182 -0
  12. package/dist/constants/tokens.d.ts +234 -0
  13. package/dist/constants/tokens.js +314 -0
  14. package/dist/core/baseProvider.js +26 -1
  15. package/dist/core/constants.d.ts +12 -3
  16. package/dist/core/constants.js +22 -6
  17. package/dist/core/factory.js +19 -0
  18. package/dist/core/types.d.ts +268 -0
  19. package/dist/core/types.js +153 -0
  20. package/dist/factories/providerRegistry.js +2 -0
  21. package/dist/lib/constants/index.d.ts +192 -0
  22. package/dist/lib/constants/index.js +195 -0
  23. package/dist/lib/constants/performance.d.ts +366 -0
  24. package/dist/lib/constants/performance.js +389 -0
  25. package/dist/lib/constants/retry.d.ts +224 -0
  26. package/dist/lib/constants/retry.js +266 -0
  27. package/dist/lib/constants/timeouts.d.ts +225 -0
  28. package/dist/lib/constants/timeouts.js +182 -0
  29. package/dist/lib/constants/tokens.d.ts +234 -0
  30. package/dist/lib/constants/tokens.js +314 -0
  31. package/dist/lib/core/baseProvider.js +26 -1
  32. package/dist/lib/core/constants.d.ts +12 -3
  33. package/dist/lib/core/constants.js +22 -6
  34. package/dist/lib/core/factory.js +19 -0
  35. package/dist/lib/core/types.d.ts +268 -0
  36. package/dist/lib/core/types.js +153 -0
  37. package/dist/lib/factories/providerRegistry.js +2 -0
  38. package/dist/lib/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  39. package/dist/lib/models/modelRegistry.d.ts +1 -1
  40. package/dist/lib/models/modelRegistry.js +63 -37
  41. package/dist/lib/neurolink.js +35 -34
  42. package/dist/lib/providers/amazonBedrock.js +2 -2
  43. package/dist/lib/providers/anthropic.js +3 -12
  44. package/dist/lib/providers/anthropicBaseProvider.js +1 -2
  45. package/dist/lib/providers/azureOpenai.d.ts +1 -1
  46. package/dist/lib/providers/azureOpenai.js +51 -9
  47. package/dist/lib/providers/googleAiStudio.js +3 -3
  48. package/dist/lib/providers/googleVertex.js +2 -2
  49. package/dist/lib/providers/huggingFace.js +1 -2
  50. package/dist/lib/providers/litellm.js +1 -2
  51. package/dist/lib/providers/mistral.js +2 -2
  52. package/dist/lib/providers/ollama.js +7 -8
  53. package/dist/lib/providers/openAI.js +2 -2
  54. package/dist/lib/providers/openaiCompatible.js +5 -2
  55. package/dist/lib/providers/sagemaker/language-model.d.ts +5 -0
  56. package/dist/lib/providers/sagemaker/language-model.js +9 -1
  57. package/dist/lib/utils/providerConfig.d.ts +25 -0
  58. package/dist/lib/utils/providerConfig.js +24 -3
  59. package/dist/lib/utils/providerHealth.d.ts +1 -1
  60. package/dist/lib/utils/providerHealth.js +47 -36
  61. package/dist/lib/utils/providerSetupMessages.js +7 -6
  62. package/dist/lib/utils/providerUtils.js +16 -24
  63. package/dist/lib/utils/tokenLimits.d.ts +2 -2
  64. package/dist/lib/utils/tokenLimits.js +10 -3
  65. package/dist/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  66. package/dist/models/modelRegistry.d.ts +1 -1
  67. package/dist/models/modelRegistry.js +63 -37
  68. package/dist/neurolink.js +35 -34
  69. package/dist/providers/amazonBedrock.js +2 -2
  70. package/dist/providers/anthropic.js +3 -12
  71. package/dist/providers/anthropicBaseProvider.js +1 -2
  72. package/dist/providers/azureOpenai.d.ts +1 -1
  73. package/dist/providers/azureOpenai.js +51 -9
  74. package/dist/providers/googleAiStudio.js +3 -3
  75. package/dist/providers/googleVertex.js +2 -2
  76. package/dist/providers/huggingFace.js +1 -2
  77. package/dist/providers/litellm.js +1 -2
  78. package/dist/providers/mistral.js +2 -2
  79. package/dist/providers/ollama.js +7 -8
  80. package/dist/providers/openAI.js +2 -2
  81. package/dist/providers/openaiCompatible.js +5 -2
  82. package/dist/providers/sagemaker/language-model.d.ts +5 -0
  83. package/dist/providers/sagemaker/language-model.js +9 -1
  84. package/dist/utils/providerConfig.d.ts +25 -0
  85. package/dist/utils/providerConfig.js +24 -3
  86. package/dist/utils/providerHealth.d.ts +1 -1
  87. package/dist/utils/providerHealth.js +47 -36
  88. package/dist/utils/providerSetupMessages.js +7 -6
  89. package/dist/utils/providerUtils.js +16 -24
  90. package/dist/utils/tokenLimits.d.ts +2 -2
  91. package/dist/utils/tokenLimits.js +10 -3
  92. package/package.json +1 -1
@@ -111,6 +111,25 @@ export class AIProviderFactory {
111
111
  logger.debug(`[${functionTag}] No Vertex environment variables found (VERTEX_MODEL)`);
112
112
  }
113
113
  }
114
+ else if (providerName.toLowerCase().includes("azure")) {
115
+ const envModel = process.env.AZURE_OPENAI_MODEL ||
116
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
117
+ process.env.AZURE_OPENAI_DEPLOYMENT_ID;
118
+ if (envModel) {
119
+ resolvedModelName = envModel;
120
+ logger.debug(`[${functionTag}] Environment variable found for Azure`, {
121
+ envVariable: process.env.AZURE_OPENAI_MODEL
122
+ ? "AZURE_OPENAI_MODEL"
123
+ : process.env.AZURE_OPENAI_DEPLOYMENT
124
+ ? "AZURE_OPENAI_DEPLOYMENT"
125
+ : "AZURE_OPENAI_DEPLOYMENT_ID",
126
+ resolvedModel: envModel,
127
+ });
128
+ }
129
+ else {
130
+ logger.debug(`[${functionTag}] No Azure environment variables found (AZURE_OPENAI_MODEL, AZURE_OPENAI_DEPLOYMENT, AZURE_OPENAI_DEPLOYMENT_ID)`);
131
+ }
132
+ }
114
133
  else {
115
134
  logger.debug(`[${functionTag}] Provider ${providerName} - no environment variable check implemented`);
116
135
  }
@@ -0,0 +1,268 @@
1
+ import type { Tool, Schema } from "ai";
2
+ import type { ZodUnknownSchema, ValidationSchema } from "../types/typeAliases.js";
3
+ import type { GenerateResult } from "../types/generateTypes.js";
4
+ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
+ import type { JsonValue } from "../types/common.js";
6
+ import type { ChatMessage, ConversationMemoryConfig } from "../types/conversationTypes.js";
7
+ import type { TokenUsage, AnalyticsData } from "../types/analytics.js";
8
+ import type { EvaluationData } from "../index.js";
9
+ export type { EvaluationData };
10
+ import type { MiddlewareFactoryOptions } from "../types/middlewareTypes.js";
11
+ export interface TextGenerationResult {
12
+ content: string;
13
+ provider?: string;
14
+ model?: string;
15
+ usage?: TokenUsage;
16
+ responseTime?: number;
17
+ toolsUsed?: string[];
18
+ toolExecutions?: Array<{
19
+ toolName: string;
20
+ executionTime: number;
21
+ success: boolean;
22
+ serverId?: string;
23
+ }>;
24
+ enhancedWithTools?: boolean;
25
+ availableTools?: Array<{
26
+ name: string;
27
+ description: string;
28
+ server: string;
29
+ category?: string;
30
+ }>;
31
+ analytics?: AnalyticsData;
32
+ evaluation?: EvaluationData;
33
+ }
34
+ /**
35
+ * Supported AI Provider Names
36
+ */
37
+ export declare enum AIProviderName {
38
+ BEDROCK = "bedrock",
39
+ OPENAI = "openai",
40
+ OPENAI_COMPATIBLE = "openai-compatible",
41
+ VERTEX = "vertex",
42
+ ANTHROPIC = "anthropic",
43
+ AZURE = "azure",
44
+ GOOGLE_AI = "google-ai",
45
+ HUGGINGFACE = "huggingface",
46
+ OLLAMA = "ollama",
47
+ MISTRAL = "mistral",
48
+ LITELLM = "litellm",
49
+ SAGEMAKER = "sagemaker",
50
+ AUTO = "auto"
51
+ }
52
+ /**
53
+ * Supported Models for Amazon Bedrock
54
+ */
55
+ export declare enum BedrockModels {
56
+ CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0",
57
+ CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0",
58
+ CLAUDE_3_5_SONNET = "anthropic.claude-3-5-sonnet-20240620-v1:0",
59
+ CLAUDE_3_7_SONNET = "arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0"
60
+ }
61
+ /**
62
+ * Supported Models for OpenAI
63
+ */
64
+ export declare enum OpenAIModels {
65
+ GPT_4 = "gpt-4",
66
+ GPT_4_TURBO = "gpt-4-turbo",
67
+ GPT_4O = "gpt-4o",
68
+ GPT_4O_MINI = "gpt-4o-mini",
69
+ GPT_3_5_TURBO = "gpt-3.5-turbo",
70
+ O1_PREVIEW = "o1-preview",
71
+ O1_MINI = "o1-mini"
72
+ }
73
+ /**
74
+ * Supported Models for Google Vertex AI
75
+ */
76
+ export declare enum VertexModels {
77
+ CLAUDE_4_0_SONNET = "claude-sonnet-4@20250514",
78
+ CLAUDE_4_0_OPUS = "claude-opus-4@20250514",
79
+ CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20241022",
80
+ CLAUDE_3_5_HAIKU = "claude-3-5-haiku-20241022",
81
+ CLAUDE_3_SONNET = "claude-3-sonnet-20240229",
82
+ CLAUDE_3_OPUS = "claude-3-opus-20240229",
83
+ CLAUDE_3_HAIKU = "claude-3-haiku-20240307",
84
+ GEMINI_2_5_PRO = "gemini-2.5-pro",
85
+ GEMINI_2_5_FLASH = "gemini-2.5-flash",
86
+ GEMINI_2_5_FLASH_LITE = "gemini-2.5-flash-lite",
87
+ GEMINI_2_0_FLASH_001 = "gemini-2.0-flash-001",
88
+ GEMINI_1_5_PRO = "gemini-1.5-pro",
89
+ GEMINI_1_5_FLASH = "gemini-1.5-flash"
90
+ }
91
+ /**
92
+ * Supported Models for Google AI Studio
93
+ */
94
+ export declare enum GoogleAIModels {
95
+ GEMINI_2_5_PRO = "gemini-2.5-pro",
96
+ GEMINI_2_5_FLASH = "gemini-2.5-flash",
97
+ GEMINI_2_5_FLASH_LITE = "gemini-2.5-flash-lite",
98
+ GEMINI_2_0_FLASH_001 = "gemini-2.0-flash-001",
99
+ GEMINI_1_5_PRO = "gemini-1.5-pro",
100
+ GEMINI_1_5_FLASH = "gemini-1.5-flash",
101
+ GEMINI_1_5_FLASH_LITE = "gemini-1.5-flash-lite"
102
+ }
103
+ /**
104
+ * Supported Models for Anthropic (Direct API)
105
+ */
106
+ export declare enum AnthropicModels {
107
+ CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20241022",
108
+ CLAUDE_3_5_HAIKU = "claude-3-5-haiku-20241022",
109
+ CLAUDE_3_SONNET = "claude-3-sonnet-20240229",
110
+ CLAUDE_3_OPUS = "claude-3-opus-20240229",
111
+ CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
112
+ }
113
+ /**
114
+ * API Versions for various providers
115
+ */
116
+ export declare enum APIVersions {
117
+ AZURE_LATEST = "2025-04-01-preview",
118
+ AZURE_STABLE = "2024-10-21",
119
+ AZURE_LEGACY = "2023-12-01-preview",
120
+ OPENAI_CURRENT = "v1",
121
+ OPENAI_BETA = "v1-beta",
122
+ GOOGLE_AI_CURRENT = "v1",
123
+ GOOGLE_AI_BETA = "v1beta",
124
+ ANTHROPIC_CURRENT = "2023-06-01"
125
+ }
126
+ /**
127
+ * Default model aliases for easy reference
128
+ */
129
+ export declare const DEFAULT_MODEL_ALIASES: {
130
+ readonly LATEST_OPENAI: OpenAIModels.GPT_4O;
131
+ readonly FASTEST_OPENAI: OpenAIModels.GPT_4O_MINI;
132
+ readonly LATEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_SONNET;
133
+ readonly FASTEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_HAIKU;
134
+ readonly LATEST_GOOGLE: GoogleAIModels.GEMINI_2_5_PRO;
135
+ readonly FASTEST_GOOGLE: GoogleAIModels.GEMINI_2_5_FLASH;
136
+ readonly BEST_CODING: AnthropicModels.CLAUDE_3_5_SONNET;
137
+ readonly BEST_ANALYSIS: GoogleAIModels.GEMINI_2_5_PRO;
138
+ readonly BEST_CREATIVE: AnthropicModels.CLAUDE_3_5_SONNET;
139
+ readonly BEST_VALUE: GoogleAIModels.GEMINI_2_5_FLASH;
140
+ };
141
+ /**
142
+ * @deprecated Use DEFAULT_MODEL_ALIASES instead. Will be removed in future version.
143
+ */
144
+ export declare const ModelAliases: {
145
+ readonly LATEST_OPENAI: OpenAIModels.GPT_4O;
146
+ readonly FASTEST_OPENAI: OpenAIModels.GPT_4O_MINI;
147
+ readonly LATEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_SONNET;
148
+ readonly FASTEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_HAIKU;
149
+ readonly LATEST_GOOGLE: GoogleAIModels.GEMINI_2_5_PRO;
150
+ readonly FASTEST_GOOGLE: GoogleAIModels.GEMINI_2_5_FLASH;
151
+ readonly BEST_CODING: AnthropicModels.CLAUDE_3_5_SONNET;
152
+ readonly BEST_ANALYSIS: GoogleAIModels.GEMINI_2_5_PRO;
153
+ readonly BEST_CREATIVE: AnthropicModels.CLAUDE_3_5_SONNET;
154
+ readonly BEST_VALUE: GoogleAIModels.GEMINI_2_5_FLASH;
155
+ };
156
+ /**
157
+ * Union type of all supported model names
158
+ */
159
+ export type SupportedModelName = BedrockModels | OpenAIModels | VertexModels | GoogleAIModels | AnthropicModels;
160
+ /**
161
+ * Provider configuration specifying provider and its available models
162
+ */
163
+ export interface ProviderConfig {
164
+ provider: AIProviderName;
165
+ models: SupportedModelName[];
166
+ }
167
+ /**
168
+ * Options for AI requests with unified provider configuration
169
+ */
170
+ export interface StreamingOptions {
171
+ providers: ProviderConfig[];
172
+ temperature?: number;
173
+ maxTokens?: number;
174
+ systemPrompt?: string;
175
+ }
176
+ /**
177
+ * Text generation options interface
178
+ */
179
+ export interface TextGenerationOptions {
180
+ prompt?: string;
181
+ input?: {
182
+ text: string;
183
+ };
184
+ provider?: AIProviderName;
185
+ model?: string;
186
+ temperature?: number;
187
+ maxTokens?: number;
188
+ systemPrompt?: string;
189
+ schema?: ZodUnknownSchema | Schema<unknown>;
190
+ tools?: Record<string, Tool>;
191
+ timeout?: number | string;
192
+ disableTools?: boolean;
193
+ maxSteps?: number;
194
+ enableEvaluation?: boolean;
195
+ enableAnalytics?: boolean;
196
+ context?: Record<string, JsonValue>;
197
+ evaluationDomain?: string;
198
+ toolUsageContext?: string;
199
+ conversationHistory?: Array<{
200
+ role: string;
201
+ content: string;
202
+ }>;
203
+ conversationMessages?: ChatMessage[];
204
+ conversationMemoryConfig?: Partial<ConversationMemoryConfig>;
205
+ originalPrompt?: string;
206
+ middleware?: MiddlewareFactoryOptions;
207
+ expectedOutcome?: string;
208
+ evaluationCriteria?: string[];
209
+ }
210
+ export type { AnalyticsData } from "../types/analytics.js";
211
+ /**
212
+ * Enhanced result interfaces with optional analytics/evaluation
213
+ */
214
+ export interface EnhancedGenerateResult extends GenerateResult {
215
+ analytics?: AnalyticsData;
216
+ evaluation?: EvaluationData;
217
+ }
218
+ /**
219
+ * Phase 2: Enhanced Streaming Infrastructure
220
+ * Progress tracking and metadata for streaming operations
221
+ */
222
+ export interface StreamingProgressData {
223
+ chunkCount: number;
224
+ totalBytes: number;
225
+ chunkSize: number;
226
+ elapsedTime: number;
227
+ estimatedRemaining?: number;
228
+ streamId?: string;
229
+ phase: "initializing" | "streaming" | "processing" | "complete" | "error";
230
+ }
231
+ export interface StreamingMetadata {
232
+ startTime: number;
233
+ endTime?: number;
234
+ totalDuration?: number;
235
+ averageChunkSize: number;
236
+ maxChunkSize: number;
237
+ minChunkSize: number;
238
+ throughputBytesPerSecond?: number;
239
+ streamingProvider: string;
240
+ modelUsed: string;
241
+ }
242
+ export type ProgressCallback = (progress: StreamingProgressData) => void;
243
+ /**
244
+ * AI Provider interface with flexible parameter support
245
+ */
246
+ export interface AIProvider {
247
+ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ValidationSchema): Promise<StreamResult>;
248
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
249
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ValidationSchema): Promise<EnhancedGenerateResult | null>;
250
+ setupToolExecutor(sdk: {
251
+ customTools: Map<string, unknown>;
252
+ executeTool: (toolName: string, params: unknown) => Promise<unknown>;
253
+ }, functionTag: string): void;
254
+ }
255
+ /**
256
+ * Provider attempt result for iteration tracking
257
+ */
258
+ export interface ProviderAttempt {
259
+ provider: AIProviderName;
260
+ model: SupportedModelName;
261
+ success: boolean;
262
+ error?: string;
263
+ stack?: string;
264
+ }
265
+ /**
266
+ * Default provider configurations
267
+ */
268
+ export declare const DEFAULT_PROVIDER_CONFIGS: ProviderConfig[];
@@ -0,0 +1,153 @@
1
+ /**
2
+ * Supported AI Provider Names
3
+ */
4
+ export var AIProviderName;
5
+ (function (AIProviderName) {
6
+ AIProviderName["BEDROCK"] = "bedrock";
7
+ AIProviderName["OPENAI"] = "openai";
8
+ AIProviderName["OPENAI_COMPATIBLE"] = "openai-compatible";
9
+ AIProviderName["VERTEX"] = "vertex";
10
+ AIProviderName["ANTHROPIC"] = "anthropic";
11
+ AIProviderName["AZURE"] = "azure";
12
+ AIProviderName["GOOGLE_AI"] = "google-ai";
13
+ AIProviderName["HUGGINGFACE"] = "huggingface";
14
+ AIProviderName["OLLAMA"] = "ollama";
15
+ AIProviderName["MISTRAL"] = "mistral";
16
+ AIProviderName["LITELLM"] = "litellm";
17
+ AIProviderName["SAGEMAKER"] = "sagemaker";
18
+ AIProviderName["AUTO"] = "auto";
19
+ })(AIProviderName || (AIProviderName = {}));
20
+ /**
21
+ * Supported Models for Amazon Bedrock
22
+ */
23
+ export var BedrockModels;
24
+ (function (BedrockModels) {
25
+ BedrockModels["CLAUDE_3_SONNET"] = "anthropic.claude-3-sonnet-20240229-v1:0";
26
+ BedrockModels["CLAUDE_3_HAIKU"] = "anthropic.claude-3-haiku-20240307-v1:0";
27
+ BedrockModels["CLAUDE_3_5_SONNET"] = "anthropic.claude-3-5-sonnet-20240620-v1:0";
28
+ BedrockModels["CLAUDE_3_7_SONNET"] = "arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0";
29
+ })(BedrockModels || (BedrockModels = {}));
30
+ /**
31
+ * Supported Models for OpenAI
32
+ */
33
+ export var OpenAIModels;
34
+ (function (OpenAIModels) {
35
+ OpenAIModels["GPT_4"] = "gpt-4";
36
+ OpenAIModels["GPT_4_TURBO"] = "gpt-4-turbo";
37
+ OpenAIModels["GPT_4O"] = "gpt-4o";
38
+ OpenAIModels["GPT_4O_MINI"] = "gpt-4o-mini";
39
+ OpenAIModels["GPT_3_5_TURBO"] = "gpt-3.5-turbo";
40
+ OpenAIModels["O1_PREVIEW"] = "o1-preview";
41
+ OpenAIModels["O1_MINI"] = "o1-mini";
42
+ })(OpenAIModels || (OpenAIModels = {}));
43
+ /**
44
+ * Supported Models for Google Vertex AI
45
+ */
46
+ export var VertexModels;
47
+ (function (VertexModels) {
48
+ // Claude 4 Series (Latest - May 2025)
49
+ VertexModels["CLAUDE_4_0_SONNET"] = "claude-sonnet-4@20250514";
50
+ VertexModels["CLAUDE_4_0_OPUS"] = "claude-opus-4@20250514";
51
+ // Claude 3.5 Series (Still supported)
52
+ VertexModels["CLAUDE_3_5_SONNET"] = "claude-3-5-sonnet-20241022";
53
+ VertexModels["CLAUDE_3_5_HAIKU"] = "claude-3-5-haiku-20241022";
54
+ // Claude 3 Series (Legacy support)
55
+ VertexModels["CLAUDE_3_SONNET"] = "claude-3-sonnet-20240229";
56
+ VertexModels["CLAUDE_3_OPUS"] = "claude-3-opus-20240229";
57
+ VertexModels["CLAUDE_3_HAIKU"] = "claude-3-haiku-20240307";
58
+ // Gemini 2.5 Series (Latest - 2025)
59
+ VertexModels["GEMINI_2_5_PRO"] = "gemini-2.5-pro";
60
+ VertexModels["GEMINI_2_5_FLASH"] = "gemini-2.5-flash";
61
+ VertexModels["GEMINI_2_5_FLASH_LITE"] = "gemini-2.5-flash-lite";
62
+ // Gemini 2.0 Series
63
+ VertexModels["GEMINI_2_0_FLASH_001"] = "gemini-2.0-flash-001";
64
+ // Gemini 1.5 Series (Legacy support)
65
+ VertexModels["GEMINI_1_5_PRO"] = "gemini-1.5-pro";
66
+ VertexModels["GEMINI_1_5_FLASH"] = "gemini-1.5-flash";
67
+ })(VertexModels || (VertexModels = {}));
68
+ /**
69
+ * Supported Models for Google AI Studio
70
+ */
71
+ export var GoogleAIModels;
72
+ (function (GoogleAIModels) {
73
+ // Gemini 2.5 Series (Latest - 2025)
74
+ GoogleAIModels["GEMINI_2_5_PRO"] = "gemini-2.5-pro";
75
+ GoogleAIModels["GEMINI_2_5_FLASH"] = "gemini-2.5-flash";
76
+ GoogleAIModels["GEMINI_2_5_FLASH_LITE"] = "gemini-2.5-flash-lite";
77
+ // Gemini 2.0 Series
78
+ GoogleAIModels["GEMINI_2_0_FLASH_001"] = "gemini-2.0-flash-001";
79
+ // Gemini 1.5 Series (Legacy support)
80
+ GoogleAIModels["GEMINI_1_5_PRO"] = "gemini-1.5-pro";
81
+ GoogleAIModels["GEMINI_1_5_FLASH"] = "gemini-1.5-flash";
82
+ GoogleAIModels["GEMINI_1_5_FLASH_LITE"] = "gemini-1.5-flash-lite";
83
+ })(GoogleAIModels || (GoogleAIModels = {}));
84
+ /**
85
+ * Supported Models for Anthropic (Direct API)
86
+ */
87
+ export var AnthropicModels;
88
+ (function (AnthropicModels) {
89
+ // Claude 3.5 Series (Latest)
90
+ AnthropicModels["CLAUDE_3_5_SONNET"] = "claude-3-5-sonnet-20241022";
91
+ AnthropicModels["CLAUDE_3_5_HAIKU"] = "claude-3-5-haiku-20241022";
92
+ // Claude 3 Series (Legacy support)
93
+ AnthropicModels["CLAUDE_3_SONNET"] = "claude-3-sonnet-20240229";
94
+ AnthropicModels["CLAUDE_3_OPUS"] = "claude-3-opus-20240229";
95
+ AnthropicModels["CLAUDE_3_HAIKU"] = "claude-3-haiku-20240307";
96
+ })(AnthropicModels || (AnthropicModels = {}));
97
+ /**
98
+ * API Versions for various providers
99
+ */
100
+ export var APIVersions;
101
+ (function (APIVersions) {
102
+ // Azure OpenAI API versions
103
+ APIVersions["AZURE_LATEST"] = "2025-04-01-preview";
104
+ APIVersions["AZURE_STABLE"] = "2024-10-21";
105
+ APIVersions["AZURE_LEGACY"] = "2023-12-01-preview";
106
+ // OpenAI API versions
107
+ APIVersions["OPENAI_CURRENT"] = "v1";
108
+ APIVersions["OPENAI_BETA"] = "v1-beta";
109
+ // Google AI API versions
110
+ APIVersions["GOOGLE_AI_CURRENT"] = "v1";
111
+ APIVersions["GOOGLE_AI_BETA"] = "v1beta";
112
+ // Anthropic API versions
113
+ APIVersions["ANTHROPIC_CURRENT"] = "2023-06-01";
114
+ // Other provider versions can be added here
115
+ })(APIVersions || (APIVersions = {}));
116
+ /**
117
+ * Default model aliases for easy reference
118
+ */
119
+ export const DEFAULT_MODEL_ALIASES = {
120
+ // Latest recommended models per provider
121
+ LATEST_OPENAI: OpenAIModels.GPT_4O,
122
+ FASTEST_OPENAI: OpenAIModels.GPT_4O_MINI,
123
+ LATEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_SONNET,
124
+ FASTEST_ANTHROPIC: AnthropicModels.CLAUDE_3_5_HAIKU,
125
+ LATEST_GOOGLE: GoogleAIModels.GEMINI_2_5_PRO,
126
+ FASTEST_GOOGLE: GoogleAIModels.GEMINI_2_5_FLASH,
127
+ // Best models by use case
128
+ BEST_CODING: AnthropicModels.CLAUDE_3_5_SONNET,
129
+ BEST_ANALYSIS: GoogleAIModels.GEMINI_2_5_PRO,
130
+ BEST_CREATIVE: AnthropicModels.CLAUDE_3_5_SONNET,
131
+ BEST_VALUE: GoogleAIModels.GEMINI_2_5_FLASH,
132
+ };
133
+ /**
134
+ * @deprecated Use DEFAULT_MODEL_ALIASES instead. Will be removed in future version.
135
+ */
136
+ export const ModelAliases = DEFAULT_MODEL_ALIASES;
137
+ /**
138
+ * Default provider configurations
139
+ */
140
+ export const DEFAULT_PROVIDER_CONFIGS = [
141
+ {
142
+ provider: AIProviderName.BEDROCK,
143
+ models: [BedrockModels.CLAUDE_3_7_SONNET, BedrockModels.CLAUDE_3_5_SONNET],
144
+ },
145
+ {
146
+ provider: AIProviderName.VERTEX,
147
+ models: [VertexModels.CLAUDE_4_0_SONNET, VertexModels.GEMINI_2_5_FLASH],
148
+ },
149
+ {
150
+ provider: AIProviderName.OPENAI,
151
+ models: [OpenAIModels.GPT_4O, OpenAIModels.GPT_4O_MINI],
152
+ },
153
+ ];
@@ -49,6 +49,8 @@ export class ProviderRegistry {
49
49
  const { AzureOpenAIProvider } = await import("../providers/azureOpenai.js");
50
50
  return new AzureOpenAIProvider(modelName);
51
51
  }, process.env.AZURE_MODEL ||
52
+ process.env.AZURE_OPENAI_MODEL ||
53
+ process.env.AZURE_OPENAI_DEPLOYMENT ||
52
54
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
53
55
  "gpt-4o-mini", ["azure", "azureOpenai"]);
54
56
  // Register Google Vertex AI provider
@@ -146,7 +146,7 @@ Return ONLY a valid JSON object with this exact structure:
146
146
  Generate 3-5 comprehensive test cases covering the requested types.`;
147
147
  const result = await provider.generate({
148
148
  prompt: prompt,
149
- maxTokens: Math.floor(DEFAULT_MAX_TOKENS * 1.2),
149
+ maxTokens: 10000, // High limit for complex analysis
150
150
  temperature: 0.3, // Lower temperature for more consistent structured output
151
151
  });
152
152
  if (!result || !result.content) {
@@ -360,7 +360,7 @@ Return ONLY a valid JSON object with this exact structure:
360
360
  Focus on creating accurate, useful documentation that explains the code's purpose, parameters, return values, and usage patterns.`;
361
361
  const result = await provider.generate({
362
362
  prompt: prompt,
363
- maxTokens: Math.floor(DEFAULT_MAX_TOKENS * 1.2),
363
+ maxTokens: 10000, // High limit for complex analysis
364
364
  temperature: 0.3, // Moderate temperature for creative but structured documentation
365
365
  });
366
366
  if (!result || !result.content) {
@@ -3,7 +3,7 @@
3
3
  * Provides centralized model data for models command system
4
4
  * Part of Phase 4.1 - Models Command System
5
5
  */
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName } from "../core/types.js";
7
7
  import type { JsonValue } from "../types/common.js";
8
8
  /**
9
9
  * Model capabilities interface
@@ -3,14 +3,14 @@
3
3
  * Provides centralized model data for models command system
4
4
  * Part of Phase 4.1 - Models Command System
5
5
  */
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, DEFAULT_MODEL_ALIASES, } from "../core/types.js";
7
7
  /**
8
8
  * Comprehensive model registry
9
9
  */
10
10
  export const MODEL_REGISTRY = {
11
11
  // OpenAI Models
12
- "gpt-4o": {
13
- id: "gpt-4o",
12
+ [OpenAIModels.GPT_4O]: {
13
+ id: OpenAIModels.GPT_4O,
14
14
  name: "GPT-4 Omni",
15
15
  provider: AIProviderName.OPENAI,
16
16
  description: "Most capable OpenAI model with vision and advanced reasoning",
@@ -53,8 +53,8 @@ export const MODEL_REGISTRY = {
53
53
  releaseDate: "2024-05-13",
54
54
  category: "general",
55
55
  },
56
- "gpt-4o-mini": {
57
- id: "gpt-4o-mini",
56
+ [OpenAIModels.GPT_4O_MINI]: {
57
+ id: OpenAIModels.GPT_4O_MINI,
58
58
  name: "GPT-4 Omni Mini",
59
59
  provider: AIProviderName.OPENAI,
60
60
  description: "Fast and cost-effective model with strong performance",
@@ -98,8 +98,8 @@ export const MODEL_REGISTRY = {
98
98
  category: "general",
99
99
  },
100
100
  // Google AI Studio Models
101
- "gemini-2.5-pro": {
102
- id: "gemini-2.5-pro",
101
+ [GoogleAIModels.GEMINI_2_5_PRO]: {
102
+ id: GoogleAIModels.GEMINI_2_5_PRO,
103
103
  name: "Gemini 2.5 Pro",
104
104
  provider: AIProviderName.GOOGLE_AI,
105
105
  description: "Google's most capable multimodal model with large context window",
@@ -142,8 +142,8 @@ export const MODEL_REGISTRY = {
142
142
  releaseDate: "2024-12-11",
143
143
  category: "reasoning",
144
144
  },
145
- "gemini-2.5-flash": {
146
- id: "gemini-2.5-flash",
145
+ [GoogleAIModels.GEMINI_2_5_FLASH]: {
146
+ id: GoogleAIModels.GEMINI_2_5_FLASH,
147
147
  name: "Gemini 2.5 Flash",
148
148
  provider: AIProviderName.GOOGLE_AI,
149
149
  description: "Fast and efficient multimodal model with large context",
@@ -187,8 +187,8 @@ export const MODEL_REGISTRY = {
187
187
  category: "general",
188
188
  },
189
189
  // Anthropic Models
190
- "claude-3-5-sonnet-20241022": {
191
- id: "claude-3-5-sonnet-20241022",
190
+ [AnthropicModels.CLAUDE_3_5_SONNET]: {
191
+ id: AnthropicModels.CLAUDE_3_5_SONNET,
192
192
  name: "Claude 3.5 Sonnet",
193
193
  provider: AIProviderName.ANTHROPIC,
194
194
  description: "Anthropic's most capable model with excellent reasoning and coding",
@@ -236,8 +236,8 @@ export const MODEL_REGISTRY = {
236
236
  releaseDate: "2024-10-22",
237
237
  category: "coding",
238
238
  },
239
- "claude-3-5-haiku-20241022": {
240
- id: "claude-3-5-haiku-20241022",
239
+ [AnthropicModels.CLAUDE_3_5_HAIKU]: {
240
+ id: AnthropicModels.CLAUDE_3_5_HAIKU,
241
241
  name: "Claude 3.5 Haiku",
242
242
  provider: AIProviderName.ANTHROPIC,
243
243
  description: "Fast and efficient Claude model for quick tasks",
@@ -380,39 +380,65 @@ Object.values(MODEL_REGISTRY).forEach((model) => {
380
380
  MODEL_ALIASES[alias.toLowerCase()] = model.id;
381
381
  });
382
382
  });
383
- // Add common aliases
384
- Object.assign(MODEL_ALIASES, {
385
- latest: "gpt-4o", // Default latest model
386
- fastest: "gpt-4o-mini",
387
- cheapest: "gemini-2.5-flash",
388
- "best-coding": "claude-3-5-sonnet-20241022",
389
- "best-analysis": "gemini-2.5-pro",
390
- "best-creative": "claude-3-5-sonnet-20241022",
391
- "best-value": "gemini-2.5-flash",
392
- local: "llama3.2:latest",
383
+ // Pull canonical alias recommendations from core/types
384
+ Object.entries(DEFAULT_MODEL_ALIASES).forEach(([k, v]) => {
385
+ MODEL_ALIASES[k.toLowerCase().replace(/_/g, "-")] = v;
393
386
  });
387
+ MODEL_ALIASES.local = "llama3.2:latest";
394
388
  /**
395
389
  * Use case to model mappings
396
390
  */
397
391
  export const USE_CASE_RECOMMENDATIONS = {
398
- coding: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
399
- creative: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
400
- analysis: ["gemini-2.5-pro", "claude-3-5-sonnet-20241022", "gpt-4o"],
392
+ coding: [
393
+ AnthropicModels.CLAUDE_3_5_SONNET,
394
+ OpenAIModels.GPT_4O,
395
+ GoogleAIModels.GEMINI_2_5_PRO,
396
+ ],
397
+ creative: [
398
+ AnthropicModels.CLAUDE_3_5_SONNET,
399
+ OpenAIModels.GPT_4O,
400
+ GoogleAIModels.GEMINI_2_5_PRO,
401
+ ],
402
+ analysis: [
403
+ GoogleAIModels.GEMINI_2_5_PRO,
404
+ AnthropicModels.CLAUDE_3_5_SONNET,
405
+ OpenAIModels.GPT_4O,
406
+ ],
401
407
  conversation: [
402
- "gpt-4o",
403
- "claude-3-5-sonnet-20241022",
404
- "claude-3-5-haiku-20241022",
408
+ OpenAIModels.GPT_4O,
409
+ AnthropicModels.CLAUDE_3_5_SONNET,
410
+ AnthropicModels.CLAUDE_3_5_HAIKU,
411
+ ],
412
+ reasoning: [
413
+ AnthropicModels.CLAUDE_3_5_SONNET,
414
+ GoogleAIModels.GEMINI_2_5_PRO,
415
+ OpenAIModels.GPT_4O,
416
+ ],
417
+ translation: [
418
+ GoogleAIModels.GEMINI_2_5_PRO,
419
+ OpenAIModels.GPT_4O,
420
+ AnthropicModels.CLAUDE_3_5_HAIKU,
405
421
  ],
406
- reasoning: ["claude-3-5-sonnet-20241022", "gemini-2.5-pro", "gpt-4o"],
407
- translation: ["gemini-2.5-pro", "gpt-4o", "claude-3-5-haiku-20241022"],
408
422
  summarization: [
409
- "gemini-2.5-flash",
410
- "gpt-4o-mini",
411
- "claude-3-5-haiku-20241022",
423
+ GoogleAIModels.GEMINI_2_5_FLASH,
424
+ OpenAIModels.GPT_4O_MINI,
425
+ AnthropicModels.CLAUDE_3_5_HAIKU,
426
+ ],
427
+ "cost-effective": [
428
+ GoogleAIModels.GEMINI_2_5_FLASH,
429
+ OpenAIModels.GPT_4O_MINI,
430
+ "mistral-small-latest",
431
+ ],
432
+ "high-quality": [
433
+ AnthropicModels.CLAUDE_3_5_SONNET,
434
+ OpenAIModels.GPT_4O,
435
+ GoogleAIModels.GEMINI_2_5_PRO,
436
+ ],
437
+ fast: [
438
+ OpenAIModels.GPT_4O_MINI,
439
+ GoogleAIModels.GEMINI_2_5_FLASH,
440
+ AnthropicModels.CLAUDE_3_5_HAIKU,
412
441
  ],
413
- "cost-effective": ["gemini-2.5-flash", "gpt-4o-mini", "mistral-small-latest"],
414
- "high-quality": ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
415
- fast: ["gpt-4o-mini", "gemini-2.5-flash", "claude-3-5-haiku-20241022"],
416
442
  };
417
443
  /**
418
444
  * Get all models