@juspay/neurolink 8.26.0 → 8.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/README.md +47 -25
- package/dist/adapters/providerImageAdapter.js +11 -0
- package/dist/cli/commands/config.js +16 -23
- package/dist/cli/commands/setup-anthropic.js +3 -26
- package/dist/cli/commands/setup-azure.js +3 -22
- package/dist/cli/commands/setup-bedrock.js +3 -26
- package/dist/cli/commands/setup-google-ai.js +3 -22
- package/dist/cli/commands/setup-mistral.js +3 -31
- package/dist/cli/commands/setup-openai.js +3 -22
- package/dist/cli/factories/commandFactory.js +32 -0
- package/dist/cli/factories/ollamaCommandFactory.js +5 -17
- package/dist/cli/loop/optionsSchema.d.ts +1 -1
- package/dist/cli/loop/optionsSchema.js +13 -0
- package/dist/config/modelSpecificPrompts.d.ts +9 -0
- package/dist/config/modelSpecificPrompts.js +38 -0
- package/dist/constants/enums.d.ts +8 -0
- package/dist/constants/enums.js +8 -0
- package/dist/constants/tokens.d.ts +25 -0
- package/dist/constants/tokens.js +18 -0
- package/dist/core/analytics.js +7 -28
- package/dist/core/baseProvider.js +1 -0
- package/dist/core/constants.d.ts +1 -0
- package/dist/core/constants.js +1 -0
- package/dist/core/modules/GenerationHandler.js +43 -5
- package/dist/core/streamAnalytics.d.ts +1 -0
- package/dist/core/streamAnalytics.js +8 -16
- package/dist/lib/adapters/providerImageAdapter.js +11 -0
- package/dist/lib/config/modelSpecificPrompts.d.ts +9 -0
- package/dist/lib/config/modelSpecificPrompts.js +39 -0
- package/dist/lib/constants/enums.d.ts +8 -0
- package/dist/lib/constants/enums.js +8 -0
- package/dist/lib/constants/tokens.d.ts +25 -0
- package/dist/lib/constants/tokens.js +18 -0
- package/dist/lib/core/analytics.js +7 -28
- package/dist/lib/core/baseProvider.js +1 -0
- package/dist/lib/core/constants.d.ts +1 -0
- package/dist/lib/core/constants.js +1 -0
- package/dist/lib/core/modules/GenerationHandler.js +43 -5
- package/dist/lib/core/streamAnalytics.d.ts +1 -0
- package/dist/lib/core/streamAnalytics.js +8 -16
- package/dist/lib/providers/googleAiStudio.d.ts +15 -0
- package/dist/lib/providers/googleAiStudio.js +659 -3
- package/dist/lib/providers/googleVertex.d.ts +25 -0
- package/dist/lib/providers/googleVertex.js +978 -3
- package/dist/lib/providers/sagemaker/language-model.d.ts +2 -2
- package/dist/lib/types/analytics.d.ts +4 -0
- package/dist/lib/types/cli.d.ts +16 -0
- package/dist/lib/types/conversation.d.ts +72 -4
- package/dist/lib/types/conversation.js +30 -0
- package/dist/lib/types/generateTypes.d.ts +135 -0
- package/dist/lib/types/groundingTypes.d.ts +231 -0
- package/dist/lib/types/groundingTypes.js +12 -0
- package/dist/lib/types/providers.d.ts +29 -0
- package/dist/lib/types/streamTypes.d.ts +54 -0
- package/dist/lib/utils/analyticsUtils.js +22 -2
- package/dist/lib/utils/errorHandling.d.ts +65 -0
- package/dist/lib/utils/errorHandling.js +268 -0
- package/dist/lib/utils/modelChoices.d.ts +82 -0
- package/dist/lib/utils/modelChoices.js +402 -0
- package/dist/lib/utils/modelDetection.d.ts +9 -0
- package/dist/lib/utils/modelDetection.js +81 -0
- package/dist/lib/utils/parameterValidation.d.ts +59 -1
- package/dist/lib/utils/parameterValidation.js +196 -0
- package/dist/lib/utils/schemaConversion.d.ts +12 -0
- package/dist/lib/utils/schemaConversion.js +90 -0
- package/dist/lib/utils/thinkingConfig.d.ts +108 -0
- package/dist/lib/utils/thinkingConfig.js +105 -0
- package/dist/lib/utils/tokenUtils.d.ts +124 -0
- package/dist/lib/utils/tokenUtils.js +240 -0
- package/dist/lib/utils/transformationUtils.js +15 -26
- package/dist/providers/googleAiStudio.d.ts +15 -0
- package/dist/providers/googleAiStudio.js +659 -3
- package/dist/providers/googleVertex.d.ts +25 -0
- package/dist/providers/googleVertex.js +978 -3
- package/dist/types/analytics.d.ts +4 -0
- package/dist/types/cli.d.ts +16 -0
- package/dist/types/conversation.d.ts +72 -4
- package/dist/types/conversation.js +30 -0
- package/dist/types/generateTypes.d.ts +135 -0
- package/dist/types/groundingTypes.d.ts +231 -0
- package/dist/types/groundingTypes.js +11 -0
- package/dist/types/providers.d.ts +29 -0
- package/dist/types/streamTypes.d.ts +54 -0
- package/dist/utils/analyticsUtils.js +22 -2
- package/dist/utils/errorHandling.d.ts +65 -0
- package/dist/utils/errorHandling.js +268 -0
- package/dist/utils/modelChoices.d.ts +82 -0
- package/dist/utils/modelChoices.js +401 -0
- package/dist/utils/modelDetection.d.ts +9 -0
- package/dist/utils/modelDetection.js +80 -0
- package/dist/utils/parameterValidation.d.ts +59 -1
- package/dist/utils/parameterValidation.js +196 -0
- package/dist/utils/schemaConversion.d.ts +12 -0
- package/dist/utils/schemaConversion.js +90 -0
- package/dist/utils/thinkingConfig.d.ts +108 -0
- package/dist/utils/thinkingConfig.js +104 -0
- package/dist/utils/tokenUtils.d.ts +124 -0
- package/dist/utils/tokenUtils.js +239 -0
- package/dist/utils/transformationUtils.js +15 -26
- package/package.json +4 -3
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Centralized model choices for CLI commands
|
|
3
|
+
* Derives choices from model enums to ensure consistency
|
|
4
|
+
*/
|
|
5
|
+
import { AIProviderName, OpenAIModels, AnthropicModels, GoogleAIModels, BedrockModels, VertexModels, MistralModels, OllamaModels, AzureOpenAIModels, LiteLLMModels, HuggingFaceModels, SageMakerModels, OpenRouterModels, } from "../constants/enums.js";
|
|
6
|
+
/**
|
|
7
|
+
* Top models per provider with descriptions for CLI prompts
|
|
8
|
+
* These are curated lists of the most commonly used/recommended models
|
|
9
|
+
*/
|
|
10
|
+
const TOP_MODELS_CONFIG = {
|
|
11
|
+
[AIProviderName.OPENAI]: [
|
|
12
|
+
{
|
|
13
|
+
model: OpenAIModels.GPT_4O,
|
|
14
|
+
description: "Recommended - Latest multimodal model",
|
|
15
|
+
},
|
|
16
|
+
{ model: OpenAIModels.GPT_4O_MINI, description: "Cost-effective, fast" },
|
|
17
|
+
{
|
|
18
|
+
model: OpenAIModels.GPT_5_2,
|
|
19
|
+
description: "Latest flagship with deep reasoning",
|
|
20
|
+
},
|
|
21
|
+
{ model: OpenAIModels.O3, description: "Advanced reasoning model" },
|
|
22
|
+
{ model: OpenAIModels.GPT_4_TURBO, description: "Previous generation" },
|
|
23
|
+
{
|
|
24
|
+
model: OpenAIModels.GPT_3_5_TURBO,
|
|
25
|
+
description: "Legacy, most cost-effective",
|
|
26
|
+
},
|
|
27
|
+
],
|
|
28
|
+
[AIProviderName.ANTHROPIC]: [
|
|
29
|
+
{
|
|
30
|
+
model: AnthropicModels.CLAUDE_SONNET_4_5,
|
|
31
|
+
description: "Recommended - Latest and most capable",
|
|
32
|
+
},
|
|
33
|
+
{
|
|
34
|
+
model: AnthropicModels.CLAUDE_4_5_HAIKU,
|
|
35
|
+
description: "Fast and cost-effective",
|
|
36
|
+
},
|
|
37
|
+
{
|
|
38
|
+
model: AnthropicModels.CLAUDE_OPUS_4_5,
|
|
39
|
+
description: "Most powerful for complex tasks",
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
model: AnthropicModels.CLAUDE_3_5_SONNET,
|
|
43
|
+
description: "Excellent reasoning and coding",
|
|
44
|
+
},
|
|
45
|
+
{
|
|
46
|
+
model: AnthropicModels.CLAUDE_3_5_HAIKU,
|
|
47
|
+
description: "Fast and economical",
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
model: AnthropicModels.CLAUDE_3_OPUS,
|
|
51
|
+
description: "Previous gen, powerful",
|
|
52
|
+
},
|
|
53
|
+
],
|
|
54
|
+
[AIProviderName.GOOGLE_AI]: [
|
|
55
|
+
{
|
|
56
|
+
model: GoogleAIModels.GEMINI_2_5_FLASH,
|
|
57
|
+
description: "Recommended - Fast and efficient",
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
model: GoogleAIModels.GEMINI_2_5_PRO,
|
|
61
|
+
description: "Most capable, large context",
|
|
62
|
+
},
|
|
63
|
+
{
|
|
64
|
+
model: GoogleAIModels.GEMINI_2_0_FLASH,
|
|
65
|
+
description: "Stable production model",
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
model: GoogleAIModels.GEMINI_3_PRO_PREVIEW,
|
|
69
|
+
description: "Latest preview",
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
model: GoogleAIModels.GEMINI_1_5_PRO,
|
|
73
|
+
description: "Previous generation",
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
model: GoogleAIModels.GEMINI_1_5_FLASH,
|
|
77
|
+
description: "Legacy fast model",
|
|
78
|
+
},
|
|
79
|
+
],
|
|
80
|
+
[AIProviderName.VERTEX]: [
|
|
81
|
+
{
|
|
82
|
+
model: VertexModels.GEMINI_2_5_FLASH,
|
|
83
|
+
description: "Recommended - Fast and efficient",
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
model: VertexModels.GEMINI_2_5_PRO,
|
|
87
|
+
description: "Most capable, large context",
|
|
88
|
+
},
|
|
89
|
+
{ model: VertexModels.CLAUDE_4_5_SONNET, description: "Claude on Vertex" },
|
|
90
|
+
{
|
|
91
|
+
model: VertexModels.GEMINI_2_0_FLASH,
|
|
92
|
+
description: "Stable production model",
|
|
93
|
+
},
|
|
94
|
+
{ model: VertexModels.GEMINI_1_5_PRO, description: "Previous generation" },
|
|
95
|
+
{
|
|
96
|
+
model: VertexModels.CLAUDE_3_5_SONNET,
|
|
97
|
+
description: "Claude 3.5 on Vertex",
|
|
98
|
+
},
|
|
99
|
+
],
|
|
100
|
+
[AIProviderName.BEDROCK]: [
|
|
101
|
+
{
|
|
102
|
+
model: BedrockModels.CLAUDE_4_5_SONNET,
|
|
103
|
+
description: "Recommended - Latest Claude",
|
|
104
|
+
},
|
|
105
|
+
{ model: BedrockModels.NOVA_PRO, description: "Amazon Nova balanced" },
|
|
106
|
+
{ model: BedrockModels.NOVA_LITE, description: "Fast and cost-effective" },
|
|
107
|
+
{ model: BedrockModels.CLAUDE_3_5_SONNET, description: "Excellent coding" },
|
|
108
|
+
{ model: BedrockModels.LLAMA_4_MAVERICK_17B, description: "Meta Llama 4" },
|
|
109
|
+
{ model: BedrockModels.MISTRAL_LARGE_3, description: "Mistral flagship" },
|
|
110
|
+
],
|
|
111
|
+
[AIProviderName.AZURE]: [
|
|
112
|
+
{
|
|
113
|
+
model: AzureOpenAIModels.GPT_4O,
|
|
114
|
+
description: "Recommended - Latest multimodal",
|
|
115
|
+
},
|
|
116
|
+
{
|
|
117
|
+
model: AzureOpenAIModels.GPT_4O_MINI,
|
|
118
|
+
description: "Cost-effective, fast",
|
|
119
|
+
},
|
|
120
|
+
{ model: AzureOpenAIModels.GPT_5_1, description: "Latest flagship" },
|
|
121
|
+
{ model: AzureOpenAIModels.O3, description: "Advanced reasoning" },
|
|
122
|
+
{
|
|
123
|
+
model: AzureOpenAIModels.GPT_4_TURBO,
|
|
124
|
+
description: "Previous generation",
|
|
125
|
+
},
|
|
126
|
+
{ model: AzureOpenAIModels.GPT_3_5_TURBO, description: "Legacy model" },
|
|
127
|
+
],
|
|
128
|
+
[AIProviderName.MISTRAL]: [
|
|
129
|
+
{
|
|
130
|
+
model: MistralModels.MISTRAL_LARGE_LATEST,
|
|
131
|
+
description: "Recommended - Flagship model",
|
|
132
|
+
},
|
|
133
|
+
{
|
|
134
|
+
model: MistralModels.MISTRAL_SMALL_LATEST,
|
|
135
|
+
description: "Cost-effective",
|
|
136
|
+
},
|
|
137
|
+
{
|
|
138
|
+
model: MistralModels.CODESTRAL_LATEST,
|
|
139
|
+
description: "Specialized for code",
|
|
140
|
+
},
|
|
141
|
+
{ model: MistralModels.PIXTRAL_LARGE, description: "Multimodal vision" },
|
|
142
|
+
{
|
|
143
|
+
model: MistralModels.MAGISTRAL_MEDIUM_LATEST,
|
|
144
|
+
description: "Reasoning model",
|
|
145
|
+
},
|
|
146
|
+
{ model: MistralModels.MISTRAL_NEMO, description: "Efficient base model" },
|
|
147
|
+
],
|
|
148
|
+
[AIProviderName.OLLAMA]: [
|
|
149
|
+
{
|
|
150
|
+
model: OllamaModels.LLAMA4_LATEST,
|
|
151
|
+
description: "Recommended - Latest Llama 4",
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
model: OllamaModels.LLAMA3_3_LATEST,
|
|
155
|
+
description: "High-performance local",
|
|
156
|
+
},
|
|
157
|
+
{ model: OllamaModels.DEEPSEEK_R1_70B, description: "Advanced reasoning" },
|
|
158
|
+
{ model: OllamaModels.QWEN3_72B, description: "Multilingual reasoning" },
|
|
159
|
+
{ model: OllamaModels.MISTRAL_LARGE_LATEST, description: "Mistral local" },
|
|
160
|
+
{
|
|
161
|
+
model: OllamaModels.LLAMA3_2_LATEST,
|
|
162
|
+
description: "Efficient local model",
|
|
163
|
+
},
|
|
164
|
+
],
|
|
165
|
+
[AIProviderName.LITELLM]: [
|
|
166
|
+
{ model: LiteLLMModels.OPENAI_GPT_4O, description: "OpenAI via LiteLLM" },
|
|
167
|
+
{
|
|
168
|
+
model: LiteLLMModels.ANTHROPIC_CLAUDE_SONNET_4_5,
|
|
169
|
+
description: "Anthropic via LiteLLM",
|
|
170
|
+
},
|
|
171
|
+
{ model: LiteLLMModels.GEMINI_2_5_PRO, description: "Google via LiteLLM" },
|
|
172
|
+
{
|
|
173
|
+
model: LiteLLMModels.GROQ_LLAMA_3_1_70B_VERSATILE,
|
|
174
|
+
description: "Groq via LiteLLM",
|
|
175
|
+
},
|
|
176
|
+
{ model: LiteLLMModels.MISTRAL_LARGE, description: "Mistral via LiteLLM" },
|
|
177
|
+
{
|
|
178
|
+
model: LiteLLMModels.VERTEX_GEMINI_2_5_PRO,
|
|
179
|
+
description: "Vertex via LiteLLM",
|
|
180
|
+
},
|
|
181
|
+
],
|
|
182
|
+
[AIProviderName.HUGGINGFACE]: [
|
|
183
|
+
{
|
|
184
|
+
model: HuggingFaceModels.LLAMA_3_3_70B_INSTRUCT,
|
|
185
|
+
description: "Recommended - Latest Llama",
|
|
186
|
+
},
|
|
187
|
+
{
|
|
188
|
+
model: HuggingFaceModels.MISTRAL_LARGE_3_675B,
|
|
189
|
+
description: "Mistral Large",
|
|
190
|
+
},
|
|
191
|
+
{ model: HuggingFaceModels.DEEPSEEK_R1, description: "Advanced reasoning" },
|
|
192
|
+
{
|
|
193
|
+
model: HuggingFaceModels.QWEN_2_5_72B_INSTRUCT,
|
|
194
|
+
description: "Qwen flagship",
|
|
195
|
+
},
|
|
196
|
+
{ model: HuggingFaceModels.PHI_4, description: "Microsoft Phi-4" },
|
|
197
|
+
{ model: HuggingFaceModels.GEMMA_3_27B_IT, description: "Google Gemma 3" },
|
|
198
|
+
],
|
|
199
|
+
[AIProviderName.SAGEMAKER]: [
|
|
200
|
+
{
|
|
201
|
+
model: SageMakerModels.LLAMA_4_MAVERICK_17B_128E,
|
|
202
|
+
description: "Recommended - Llama 4",
|
|
203
|
+
},
|
|
204
|
+
{ model: SageMakerModels.LLAMA_3_70B, description: "Meta Llama 3 70B" },
|
|
205
|
+
{ model: SageMakerModels.MISTRAL_SMALL_24B, description: "Mistral Small" },
|
|
206
|
+
{ model: SageMakerModels.MIXTRAL_8X7B, description: "Mixtral MoE" },
|
|
207
|
+
{ model: SageMakerModels.FALCON_3_10B, description: "Falcon 3" },
|
|
208
|
+
{ model: SageMakerModels.CODE_LLAMA_34B, description: "Code Llama" },
|
|
209
|
+
],
|
|
210
|
+
[AIProviderName.OPENROUTER]: [
|
|
211
|
+
{
|
|
212
|
+
model: OpenRouterModels.CLAUDE_3_5_SONNET,
|
|
213
|
+
description: "Anthropic via OpenRouter",
|
|
214
|
+
},
|
|
215
|
+
{ model: OpenRouterModels.GPT_4O, description: "OpenAI via OpenRouter" },
|
|
216
|
+
{
|
|
217
|
+
model: OpenRouterModels.GEMINI_2_0_FLASH,
|
|
218
|
+
description: "Google via OpenRouter",
|
|
219
|
+
},
|
|
220
|
+
{ model: OpenRouterModels.LLAMA_3_1_70B, description: "Meta Llama" },
|
|
221
|
+
{
|
|
222
|
+
model: OpenRouterModels.MISTRAL_LARGE,
|
|
223
|
+
description: "Mistral via OpenRouter",
|
|
224
|
+
},
|
|
225
|
+
{ model: OpenRouterModels.MIXTRAL_8X7B, description: "Mixtral MoE" },
|
|
226
|
+
],
|
|
227
|
+
[AIProviderName.OPENAI_COMPATIBLE]: [
|
|
228
|
+
{ model: "gpt-4o", description: "OpenAI-compatible model" },
|
|
229
|
+
{ model: "gpt-4o-mini", description: "Fast compatible model" },
|
|
230
|
+
{ model: "gpt-4-turbo", description: "Turbo compatible model" },
|
|
231
|
+
{ model: "gpt-3.5-turbo", description: "Legacy compatible model" },
|
|
232
|
+
],
|
|
233
|
+
[AIProviderName.AUTO]: [],
|
|
234
|
+
};
|
|
235
|
+
/**
|
|
236
|
+
* Default models per provider (first choice/recommended)
|
|
237
|
+
*/
|
|
238
|
+
export const DEFAULT_MODELS = {
|
|
239
|
+
[AIProviderName.OPENAI]: OpenAIModels.GPT_4O,
|
|
240
|
+
[AIProviderName.ANTHROPIC]: AnthropicModels.CLAUDE_SONNET_4_5,
|
|
241
|
+
[AIProviderName.GOOGLE_AI]: GoogleAIModels.GEMINI_2_5_FLASH,
|
|
242
|
+
[AIProviderName.VERTEX]: VertexModels.GEMINI_2_5_FLASH,
|
|
243
|
+
[AIProviderName.BEDROCK]: BedrockModels.CLAUDE_4_5_SONNET,
|
|
244
|
+
[AIProviderName.AZURE]: AzureOpenAIModels.GPT_4O,
|
|
245
|
+
[AIProviderName.MISTRAL]: MistralModels.MISTRAL_LARGE_LATEST,
|
|
246
|
+
[AIProviderName.OLLAMA]: OllamaModels.LLAMA4_LATEST,
|
|
247
|
+
[AIProviderName.LITELLM]: LiteLLMModels.OPENAI_GPT_4O,
|
|
248
|
+
[AIProviderName.HUGGINGFACE]: HuggingFaceModels.LLAMA_3_3_70B_INSTRUCT,
|
|
249
|
+
[AIProviderName.SAGEMAKER]: SageMakerModels.LLAMA_4_MAVERICK_17B_128E,
|
|
250
|
+
[AIProviderName.OPENROUTER]: OpenRouterModels.CLAUDE_3_5_SONNET,
|
|
251
|
+
[AIProviderName.OPENAI_COMPATIBLE]: "gpt-4o",
|
|
252
|
+
};
|
|
253
|
+
/**
|
|
254
|
+
* Model enum mappings for getAllModels
|
|
255
|
+
*/
|
|
256
|
+
const MODEL_ENUMS = {
|
|
257
|
+
[AIProviderName.OPENAI]: OpenAIModels,
|
|
258
|
+
[AIProviderName.ANTHROPIC]: AnthropicModels,
|
|
259
|
+
[AIProviderName.GOOGLE_AI]: GoogleAIModels,
|
|
260
|
+
[AIProviderName.VERTEX]: VertexModels,
|
|
261
|
+
[AIProviderName.BEDROCK]: BedrockModels,
|
|
262
|
+
[AIProviderName.AZURE]: AzureOpenAIModels,
|
|
263
|
+
[AIProviderName.MISTRAL]: MistralModels,
|
|
264
|
+
[AIProviderName.OLLAMA]: OllamaModels,
|
|
265
|
+
[AIProviderName.LITELLM]: LiteLLMModels,
|
|
266
|
+
[AIProviderName.HUGGINGFACE]: HuggingFaceModels,
|
|
267
|
+
[AIProviderName.SAGEMAKER]: SageMakerModels,
|
|
268
|
+
[AIProviderName.OPENROUTER]: OpenRouterModels,
|
|
269
|
+
[AIProviderName.OPENAI_COMPATIBLE]: null,
|
|
270
|
+
[AIProviderName.AUTO]: null,
|
|
271
|
+
};
|
|
272
|
+
/**
|
|
273
|
+
* Get top model choices for a provider (for CLI prompts)
|
|
274
|
+
* Returns models formatted for inquirer list prompts
|
|
275
|
+
*
|
|
276
|
+
* @param provider - The AI provider to get models for
|
|
277
|
+
* @param limit - Maximum number of models to return (default: 5)
|
|
278
|
+
* @returns Array of ModelChoice objects for CLI prompts
|
|
279
|
+
*/
|
|
280
|
+
export function getTopModelChoices(provider, limit = 5) {
|
|
281
|
+
const config = TOP_MODELS_CONFIG[provider];
|
|
282
|
+
if (!config || config.length === 0) {
|
|
283
|
+
return [];
|
|
284
|
+
}
|
|
285
|
+
const choices = config.slice(0, limit).map((item) => ({
|
|
286
|
+
name: `${item.model} (${item.description})`,
|
|
287
|
+
value: item.model,
|
|
288
|
+
description: item.description,
|
|
289
|
+
}));
|
|
290
|
+
// Always add custom option at the end
|
|
291
|
+
choices.push({
|
|
292
|
+
name: "Custom model (enter manually)",
|
|
293
|
+
value: "custom",
|
|
294
|
+
description: "Enter a custom model name",
|
|
295
|
+
});
|
|
296
|
+
return choices;
|
|
297
|
+
}
|
|
298
|
+
/**
|
|
299
|
+
* Get all available models for a provider
|
|
300
|
+
* Returns all values from the provider's model enum
|
|
301
|
+
*
|
|
302
|
+
* @param provider - The AI provider to get models for
|
|
303
|
+
* @returns Array of model identifier strings
|
|
304
|
+
*/
|
|
305
|
+
export function getAllModels(provider) {
|
|
306
|
+
const modelEnum = MODEL_ENUMS[provider];
|
|
307
|
+
if (!modelEnum) {
|
|
308
|
+
return [];
|
|
309
|
+
}
|
|
310
|
+
return Object.values(modelEnum);
|
|
311
|
+
}
|
|
312
|
+
/**
|
|
313
|
+
* Get available provider choices for CLI
|
|
314
|
+
* Returns all provider names except AUTO
|
|
315
|
+
*
|
|
316
|
+
* @returns Array of provider name strings
|
|
317
|
+
*/
|
|
318
|
+
export function getProviderChoices() {
|
|
319
|
+
return Object.values(AIProviderName).filter((p) => p !== AIProviderName.AUTO);
|
|
320
|
+
}
|
|
321
|
+
/**
|
|
322
|
+
* Get all provider choices including AUTO
|
|
323
|
+
*
|
|
324
|
+
* @returns Array of all provider name strings
|
|
325
|
+
*/
|
|
326
|
+
export function getAllProviderChoices() {
|
|
327
|
+
return Object.values(AIProviderName);
|
|
328
|
+
}
|
|
329
|
+
/**
|
|
330
|
+
* Get the default model for a provider
|
|
331
|
+
*
|
|
332
|
+
* @param provider - The AI provider
|
|
333
|
+
* @returns Default model string for the provider
|
|
334
|
+
*/
|
|
335
|
+
export function getDefaultModel(provider) {
|
|
336
|
+
return DEFAULT_MODELS[provider];
|
|
337
|
+
}
|
|
338
|
+
/**
|
|
339
|
+
* Check if a model is valid for a given provider
|
|
340
|
+
*
|
|
341
|
+
* @param provider - The AI provider
|
|
342
|
+
* @param model - The model identifier to check
|
|
343
|
+
* @returns true if the model exists in the provider's model enum
|
|
344
|
+
*/
|
|
345
|
+
export function isValidModel(provider, model) {
|
|
346
|
+
const models = getAllModels(provider);
|
|
347
|
+
if (models.length === 0) {
|
|
348
|
+
// For providers without strict model lists (like openai-compatible), allow any model
|
|
349
|
+
return true;
|
|
350
|
+
}
|
|
351
|
+
return models.includes(model);
|
|
352
|
+
}
|
|
353
|
+
/**
|
|
354
|
+
* Get model choices formatted for inquirer prompts with a specific default
|
|
355
|
+
*
|
|
356
|
+
* @param provider - The AI provider
|
|
357
|
+
* @param currentModel - Current/existing model to mark as default
|
|
358
|
+
* @param limit - Maximum number of models to return
|
|
359
|
+
* @returns Array of ModelChoice objects with the current model marked
|
|
360
|
+
*/
|
|
361
|
+
export function getModelChoicesWithDefault(provider, currentModel, limit = 5) {
|
|
362
|
+
const choices = getTopModelChoices(provider, limit);
|
|
363
|
+
if (currentModel && !choices.some((c) => c.value === currentModel)) {
|
|
364
|
+
// Insert current model at the top if not already in the list
|
|
365
|
+
choices.unshift({
|
|
366
|
+
name: `${currentModel} (current)`,
|
|
367
|
+
value: currentModel,
|
|
368
|
+
description: "Currently configured model",
|
|
369
|
+
});
|
|
370
|
+
}
|
|
371
|
+
else if (currentModel) {
|
|
372
|
+
// Mark the current model in the list
|
|
373
|
+
const idx = choices.findIndex((c) => c.value === currentModel);
|
|
374
|
+
if (idx !== -1) {
|
|
375
|
+
choices[idx].name = `${choices[idx].name.replace(/\)$/, ", current)")}`;
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
return choices;
|
|
379
|
+
}
|
|
380
|
+
/**
|
|
381
|
+
* Get a flat list of popular models across all providers
|
|
382
|
+
* Useful for model suggestions and auto-complete
|
|
383
|
+
*
|
|
384
|
+
* @returns Array of { provider, model, description } objects
|
|
385
|
+
*/
|
|
386
|
+
export function getPopularModelsAcrossProviders() {
|
|
387
|
+
const popularModels = [];
|
|
388
|
+
for (const [provider, config] of Object.entries(TOP_MODELS_CONFIG)) {
|
|
389
|
+
if (config && config.length > 0) {
|
|
390
|
+
// Take top 2 from each provider
|
|
391
|
+
config.slice(0, 2).forEach((item) => {
|
|
392
|
+
popularModels.push({
|
|
393
|
+
provider: provider,
|
|
394
|
+
model: item.model,
|
|
395
|
+
description: item.description,
|
|
396
|
+
});
|
|
397
|
+
});
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
return popularModels;
|
|
401
|
+
}
|
|
402
|
+
//# sourceMappingURL=modelChoices.js.map
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model detection utilities for capability checking
|
|
3
|
+
*/
|
|
4
|
+
export declare function isGemini3Model(modelName: string): boolean;
|
|
5
|
+
export declare function isGemini25Model(modelName: string): boolean;
|
|
6
|
+
export declare function supportsThinkingConfig(modelName: string): boolean;
|
|
7
|
+
export declare function supportsPromptCaching(modelName: string): boolean;
|
|
8
|
+
export declare function getMaxThinkingBudgetTokens(modelName: string): number;
|
|
9
|
+
export declare function getModelFamily(modelName: string): string;
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model detection utilities for capability checking
|
|
3
|
+
*/
|
|
4
|
+
/**
|
|
5
|
+
* Check if model name is valid for detection functions
|
|
6
|
+
*/
|
|
7
|
+
function isValidModelName(modelName) {
|
|
8
|
+
return typeof modelName === "string" && modelName.length > 0;
|
|
9
|
+
}
|
|
10
|
+
export function isGemini3Model(modelName) {
|
|
11
|
+
if (!isValidModelName(modelName)) {
|
|
12
|
+
return false;
|
|
13
|
+
}
|
|
14
|
+
return /^gemini-3(-.*)?$/i.test(modelName);
|
|
15
|
+
}
|
|
16
|
+
export function isGemini25Model(modelName) {
|
|
17
|
+
if (!isValidModelName(modelName)) {
|
|
18
|
+
return false;
|
|
19
|
+
}
|
|
20
|
+
return /^gemini-2\.5(-.*)?$/i.test(modelName);
|
|
21
|
+
}
|
|
22
|
+
export function supportsThinkingConfig(modelName) {
|
|
23
|
+
if (!isValidModelName(modelName)) {
|
|
24
|
+
return false;
|
|
25
|
+
}
|
|
26
|
+
const thinkingModels = [
|
|
27
|
+
/^gemini-3/i,
|
|
28
|
+
/^gemini-2\.5-pro/i,
|
|
29
|
+
/^gemini-2\.5-flash/i,
|
|
30
|
+
];
|
|
31
|
+
return thinkingModels.some((pattern) => pattern.test(modelName));
|
|
32
|
+
}
|
|
33
|
+
export function supportsPromptCaching(modelName) {
|
|
34
|
+
if (!isValidModelName(modelName)) {
|
|
35
|
+
return false;
|
|
36
|
+
}
|
|
37
|
+
const cachingModels = [
|
|
38
|
+
/^gemini-3/i,
|
|
39
|
+
/^gemini-2\.5/i,
|
|
40
|
+
/^gpt-4/i,
|
|
41
|
+
/^claude-3/i,
|
|
42
|
+
];
|
|
43
|
+
return cachingModels.some((pattern) => pattern.test(modelName));
|
|
44
|
+
}
|
|
45
|
+
export function getMaxThinkingBudgetTokens(modelName) {
|
|
46
|
+
if (!isValidModelName(modelName)) {
|
|
47
|
+
return 10000;
|
|
48
|
+
}
|
|
49
|
+
if (/^gemini-3-pro/i.test(modelName)) {
|
|
50
|
+
return 100000;
|
|
51
|
+
}
|
|
52
|
+
if (/^gemini-3-flash/i.test(modelName)) {
|
|
53
|
+
return 50000;
|
|
54
|
+
}
|
|
55
|
+
if (/^gemini-2\.5/i.test(modelName)) {
|
|
56
|
+
return 32000;
|
|
57
|
+
}
|
|
58
|
+
return 10000;
|
|
59
|
+
}
|
|
60
|
+
export function getModelFamily(modelName) {
|
|
61
|
+
if (!isValidModelName(modelName)) {
|
|
62
|
+
return "unknown";
|
|
63
|
+
}
|
|
64
|
+
if (/^gemini-3/i.test(modelName)) {
|
|
65
|
+
return "gemini-3";
|
|
66
|
+
}
|
|
67
|
+
if (/^gemini-2\.5/i.test(modelName)) {
|
|
68
|
+
return "gemini-2.5";
|
|
69
|
+
}
|
|
70
|
+
if (/^gemini-2/i.test(modelName)) {
|
|
71
|
+
return "gemini-2";
|
|
72
|
+
}
|
|
73
|
+
if (/^gpt-4/i.test(modelName)) {
|
|
74
|
+
return "gpt-4";
|
|
75
|
+
}
|
|
76
|
+
if (/^claude-3/i.test(modelName)) {
|
|
77
|
+
return "claude-3";
|
|
78
|
+
}
|
|
79
|
+
return "unknown";
|
|
80
|
+
}
|
|
81
|
+
//# sourceMappingURL=modelDetection.js.map
|
|
@@ -2,8 +2,11 @@
|
|
|
2
2
|
* Parameter Validation Utilities
|
|
3
3
|
* Provides consistent parameter validation across all tool interfaces
|
|
4
4
|
*/
|
|
5
|
-
import type {
|
|
5
|
+
import type { GenerateOptions } from "../types/generateTypes.js";
|
|
6
|
+
import type { VideoOutputOptions } from "../types/multimodal.js";
|
|
6
7
|
import type { EnhancedValidationResult } from "../types/tools.js";
|
|
8
|
+
import type { StringArray, ValidationSchema } from "../types/typeAliases.js";
|
|
9
|
+
import { NeuroLinkError } from "./errorHandling.js";
|
|
7
10
|
/**
|
|
8
11
|
* Custom error class for parameter validation failures
|
|
9
12
|
* Provides detailed information about validation errors including field context and suggestions
|
|
@@ -74,6 +77,61 @@ export declare function validateToolBatch(tools: Record<string, unknown>): {
|
|
|
74
77
|
invalidTools: string[];
|
|
75
78
|
results: Record<string, EnhancedValidationResult>;
|
|
76
79
|
};
|
|
80
|
+
/**
|
|
81
|
+
* Validate video output options (resolution, length, aspect ratio, audio)
|
|
82
|
+
*
|
|
83
|
+
* @param options - VideoOutputOptions to validate
|
|
84
|
+
* @returns NeuroLinkError if invalid, null if valid
|
|
85
|
+
*
|
|
86
|
+
* @example
|
|
87
|
+
* ```typescript
|
|
88
|
+
* const error = validateVideoOutputOptions({ resolution: "4K", length: 10 });
|
|
89
|
+
* // error.code === "INVALID_VIDEO_RESOLUTION"
|
|
90
|
+
* ```
|
|
91
|
+
*/
|
|
92
|
+
export declare function validateVideoOutputOptions(options: VideoOutputOptions): NeuroLinkError | null;
|
|
93
|
+
/**
|
|
94
|
+
* Validate image input for video generation
|
|
95
|
+
*
|
|
96
|
+
* Checks image format (magic bytes) and size constraints.
|
|
97
|
+
* Supports JPEG, PNG, and WebP formats.
|
|
98
|
+
*
|
|
99
|
+
* @param image - Image buffer to validate
|
|
100
|
+
* @param maxSize - Maximum allowed size in bytes (default: 10MB)
|
|
101
|
+
* @returns NeuroLinkError if invalid, null if valid
|
|
102
|
+
*
|
|
103
|
+
* @example
|
|
104
|
+
* ```typescript
|
|
105
|
+
* const imageBuffer = readFileSync("product.jpg");
|
|
106
|
+
* const error = validateImageForVideo(imageBuffer);
|
|
107
|
+
* if (error) throw error;
|
|
108
|
+
* ```
|
|
109
|
+
*/
|
|
110
|
+
export declare function validateImageForVideo(image: Buffer | string, maxSize?: number): NeuroLinkError | null;
|
|
111
|
+
/**
|
|
112
|
+
* Validate complete video generation input
|
|
113
|
+
*
|
|
114
|
+
* Validates all requirements for video generation:
|
|
115
|
+
* - output.mode must be "video"
|
|
116
|
+
* - Must have exactly one input image
|
|
117
|
+
* - Prompt must be within length limits
|
|
118
|
+
* - Video output options must be valid
|
|
119
|
+
*
|
|
120
|
+
* @param options - GenerateOptions to validate for video generation
|
|
121
|
+
* @returns EnhancedValidationResult with errors, warnings, and suggestions
|
|
122
|
+
*
|
|
123
|
+
* @example
|
|
124
|
+
* ```typescript
|
|
125
|
+
* const validation = validateVideoGenerationInput({
|
|
126
|
+
* input: { text: "Product showcase video", images: [imageBuffer] },
|
|
127
|
+
* output: { mode: "video", video: { resolution: "1080p" } }
|
|
128
|
+
* });
|
|
129
|
+
* if (!validation.isValid) {
|
|
130
|
+
* console.error(validation.errors);
|
|
131
|
+
* }
|
|
132
|
+
* ```
|
|
133
|
+
*/
|
|
134
|
+
export declare function validateVideoGenerationInput(options: GenerateOptions): EnhancedValidationResult;
|
|
77
135
|
/**
|
|
78
136
|
* Create a validation error summary for logging
|
|
79
137
|
*/
|