@juspay/neurolink 8.26.0 → 8.26.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/README.md +47 -25
  3. package/dist/adapters/providerImageAdapter.js +11 -0
  4. package/dist/cli/commands/config.js +16 -23
  5. package/dist/cli/commands/setup-anthropic.js +3 -26
  6. package/dist/cli/commands/setup-azure.js +3 -22
  7. package/dist/cli/commands/setup-bedrock.js +3 -26
  8. package/dist/cli/commands/setup-google-ai.js +3 -22
  9. package/dist/cli/commands/setup-mistral.js +3 -31
  10. package/dist/cli/commands/setup-openai.js +3 -22
  11. package/dist/cli/factories/commandFactory.js +32 -0
  12. package/dist/cli/factories/ollamaCommandFactory.js +5 -17
  13. package/dist/cli/loop/optionsSchema.d.ts +1 -1
  14. package/dist/cli/loop/optionsSchema.js +13 -0
  15. package/dist/config/modelSpecificPrompts.d.ts +9 -0
  16. package/dist/config/modelSpecificPrompts.js +38 -0
  17. package/dist/constants/enums.d.ts +8 -0
  18. package/dist/constants/enums.js +8 -0
  19. package/dist/constants/tokens.d.ts +25 -0
  20. package/dist/constants/tokens.js +18 -0
  21. package/dist/core/analytics.js +7 -28
  22. package/dist/core/baseProvider.js +1 -0
  23. package/dist/core/constants.d.ts +1 -0
  24. package/dist/core/constants.js +1 -0
  25. package/dist/core/modules/GenerationHandler.js +43 -5
  26. package/dist/core/streamAnalytics.d.ts +1 -0
  27. package/dist/core/streamAnalytics.js +8 -16
  28. package/dist/lib/adapters/providerImageAdapter.js +11 -0
  29. package/dist/lib/config/modelSpecificPrompts.d.ts +9 -0
  30. package/dist/lib/config/modelSpecificPrompts.js +39 -0
  31. package/dist/lib/constants/enums.d.ts +8 -0
  32. package/dist/lib/constants/enums.js +8 -0
  33. package/dist/lib/constants/tokens.d.ts +25 -0
  34. package/dist/lib/constants/tokens.js +18 -0
  35. package/dist/lib/core/analytics.js +7 -28
  36. package/dist/lib/core/baseProvider.js +1 -0
  37. package/dist/lib/core/constants.d.ts +1 -0
  38. package/dist/lib/core/constants.js +1 -0
  39. package/dist/lib/core/modules/GenerationHandler.js +43 -5
  40. package/dist/lib/core/streamAnalytics.d.ts +1 -0
  41. package/dist/lib/core/streamAnalytics.js +8 -16
  42. package/dist/lib/providers/googleAiStudio.d.ts +15 -0
  43. package/dist/lib/providers/googleAiStudio.js +659 -3
  44. package/dist/lib/providers/googleVertex.d.ts +25 -0
  45. package/dist/lib/providers/googleVertex.js +978 -3
  46. package/dist/lib/types/analytics.d.ts +4 -0
  47. package/dist/lib/types/cli.d.ts +16 -0
  48. package/dist/lib/types/conversation.d.ts +72 -4
  49. package/dist/lib/types/conversation.js +30 -0
  50. package/dist/lib/types/generateTypes.d.ts +135 -0
  51. package/dist/lib/types/groundingTypes.d.ts +231 -0
  52. package/dist/lib/types/groundingTypes.js +12 -0
  53. package/dist/lib/types/providers.d.ts +29 -0
  54. package/dist/lib/types/streamTypes.d.ts +54 -0
  55. package/dist/lib/utils/analyticsUtils.js +22 -2
  56. package/dist/lib/utils/modelChoices.d.ts +82 -0
  57. package/dist/lib/utils/modelChoices.js +402 -0
  58. package/dist/lib/utils/modelDetection.d.ts +9 -0
  59. package/dist/lib/utils/modelDetection.js +81 -0
  60. package/dist/lib/utils/schemaConversion.d.ts +12 -0
  61. package/dist/lib/utils/schemaConversion.js +90 -0
  62. package/dist/lib/utils/thinkingConfig.d.ts +108 -0
  63. package/dist/lib/utils/thinkingConfig.js +105 -0
  64. package/dist/lib/utils/tokenUtils.d.ts +124 -0
  65. package/dist/lib/utils/tokenUtils.js +240 -0
  66. package/dist/lib/utils/transformationUtils.js +15 -26
  67. package/dist/providers/googleAiStudio.d.ts +15 -0
  68. package/dist/providers/googleAiStudio.js +659 -3
  69. package/dist/providers/googleVertex.d.ts +25 -0
  70. package/dist/providers/googleVertex.js +978 -3
  71. package/dist/types/analytics.d.ts +4 -0
  72. package/dist/types/cli.d.ts +16 -0
  73. package/dist/types/conversation.d.ts +72 -4
  74. package/dist/types/conversation.js +30 -0
  75. package/dist/types/generateTypes.d.ts +135 -0
  76. package/dist/types/groundingTypes.d.ts +231 -0
  77. package/dist/types/groundingTypes.js +11 -0
  78. package/dist/types/providers.d.ts +29 -0
  79. package/dist/types/streamTypes.d.ts +54 -0
  80. package/dist/utils/analyticsUtils.js +22 -2
  81. package/dist/utils/modelChoices.d.ts +82 -0
  82. package/dist/utils/modelChoices.js +401 -0
  83. package/dist/utils/modelDetection.d.ts +9 -0
  84. package/dist/utils/modelDetection.js +80 -0
  85. package/dist/utils/schemaConversion.d.ts +12 -0
  86. package/dist/utils/schemaConversion.js +90 -0
  87. package/dist/utils/thinkingConfig.d.ts +108 -0
  88. package/dist/utils/thinkingConfig.js +104 -0
  89. package/dist/utils/tokenUtils.d.ts +124 -0
  90. package/dist/utils/tokenUtils.js +239 -0
  91. package/dist/utils/transformationUtils.js +15 -26
  92. package/package.json +4 -3
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [8.26.1](https://github.com/juspay/neurolink/compare/v8.26.0...v8.26.1) (2025-12-31)
2
+
3
+ ### Bug Fixes
4
+
5
+ - **(providers):** resolve Gemini 3 issues, add utilities, improve tests ([270ef6f](https://github.com/juspay/neurolink/commit/270ef6f225e7861846cf359f2d81edae38592053))
6
+
1
7
  ## [8.26.0](https://github.com/juspay/neurolink/compare/v8.25.0...v8.26.0) (2025-12-30)
2
8
 
3
9
  ### Features
package/README.md CHANGED
@@ -25,6 +25,7 @@ Extracted from production systems at Juspay and battle-tested at enterprise scal
25
25
 
26
26
  ## What's New (Q4 2025)
27
27
 
28
+ - 🧠 **Gemini 3 Preview Support** - Full support for gemini-3-flash-preview and gemini-3-pro-preview with extended thinking capabilities
28
29
  - **Structured Output with Zod Schemas** – Type-safe JSON generation with automatic validation using `schema` + `output.format: "json"` in `generate()`. → [Structured Output Guide](docs/features/structured-output.md)
29
30
  - **CSV File Support** – Attach CSV files to prompts for AI-powered data analysis with auto-detection. → [CSV Guide](docs/features/multimodal-chat.md#csv-file-support)
30
31
  - **PDF File Support** – Process PDF documents with native visual analysis for Vertex AI, Anthropic, Bedrock, AI Studio. → [PDF Guide](docs/features/pdf-support.md)
@@ -58,21 +59,21 @@ NeuroLink is a comprehensive AI development platform. Every feature below is pro
58
59
 
59
60
  **13 providers unified under one API** - Switch providers with a single parameter change.
60
61
 
61
- | Provider | Models | Free Tier | Tool Support | Status | Documentation |
62
- | --------------------- | ------------------------------ | --------------- | ------------ | ------------- | ----------------------------------------------------------------------- |
63
- | **OpenAI** | GPT-4o, GPT-4o-mini, o1 | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#openai) |
64
- | **Anthropic** | Claude 3.5/3.7 Sonnet, Opus | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#anthropic) |
65
- | **Google AI Studio** | Gemini 2.5 Flash/Pro | ✅ Free Tier | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#google-ai) |
66
- | **AWS Bedrock** | Claude, Titan, Llama, Nova | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#bedrock) |
67
- | **Google Vertex** | Gemini via GCP | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#vertex) |
68
- | **Azure OpenAI** | GPT-4, GPT-4o, o1 | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#azure) |
69
- | **LiteLLM** | 100+ models unified | Varies | ✅ Full | ✅ Production | [Setup Guide](docs/LITELLM-INTEGRATION.md) |
70
- | **OpenRouter** | 300+ models unified | Varies | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#openrouter) |
71
- | **AWS SageMaker** | Custom deployed models | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/SAGEMAKER-INTEGRATION.md) |
72
- | **Mistral AI** | Mistral Large, Small | ✅ Free Tier | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#mistral) |
73
- | **Hugging Face** | 100,000+ models | ✅ Free | ⚠️ Partial | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#huggingface) |
74
- | **Ollama** | Local models (Llama, Mistral) | ✅ Free (Local) | ⚠️ Partial | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#ollama) |
75
- | **OpenAI Compatible** | Any OpenAI-compatible endpoint | Varies | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#openai-compatible) |
62
+ | Provider | Models | Free Tier | Tool Support | Status | Documentation |
63
+ | --------------------- | ---------------------------------- | --------------- | ------------ | ------------- | ----------------------------------------------------------------------- |
64
+ | **OpenAI** | GPT-4o, GPT-4o-mini, o1 | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#openai) |
65
+ | **Anthropic** | Claude 3.5/3.7 Sonnet, Opus | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#anthropic) |
66
+ | **Google AI Studio** | Gemini 2.5 Flash/Pro | ✅ Free Tier | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#google-ai) |
67
+ | **AWS Bedrock** | Claude, Titan, Llama, Nova | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#bedrock) |
68
+ | **Google Vertex** | Gemini 3/2.5 (gemini-3-\*-preview) | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#vertex) |
69
+ | **Azure OpenAI** | GPT-4, GPT-4o, o1 | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#azure) |
70
+ | **LiteLLM** | 100+ models unified | Varies | ✅ Full | ✅ Production | [Setup Guide](docs/LITELLM-INTEGRATION.md) |
71
+ | **OpenRouter** | 300+ models unified | Varies | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#openrouter) |
72
+ | **AWS SageMaker** | Custom deployed models | ❌ | ✅ Full | ✅ Production | [Setup Guide](docs/SAGEMAKER-INTEGRATION.md) |
73
+ | **Mistral AI** | Mistral Large, Small | ✅ Free Tier | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#mistral) |
74
+ | **Hugging Face** | 100,000+ models | ✅ Free | ⚠️ Partial | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#huggingface) |
75
+ | **Ollama** | Local models (Llama, Mistral) | ✅ Free (Local) | ⚠️ Partial | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#ollama) |
76
+ | **OpenAI Compatible** | Any OpenAI-compatible endpoint | Varies | ✅ Full | ✅ Production | [Setup Guide](docs/getting-started/provider-setup.md#openai-compatible) |
76
77
 
77
78
  **[📖 Provider Comparison Guide](docs/reference/provider-comparison.md)** - Detailed feature matrix and selection criteria
78
79
  **[🔬 Provider Feature Compatibility](docs/reference/provider-feature-compatibility.md)** - Test-based compatibility reference for all 19 features across 11 providers
@@ -117,16 +118,17 @@ const result = await neurolink.generate({
117
118
 
118
119
  **SDK-First Design** with TypeScript, IntelliSense, and type safety:
119
120
 
120
- | Feature | Description | Documentation |
121
- | --------------------------- | ------------------------------ | ----------------------------------------------------- |
122
- | **Auto Provider Selection** | Intelligent provider fallback | [SDK Guide](docs/sdk/index.md#auto-selection) |
123
- | **Streaming Responses** | Real-time token streaming | [Streaming Guide](docs/advanced/streaming.md) |
124
- | **Conversation Memory** | Automatic context management | [Memory Guide](docs/sdk/index.md#memory) |
125
- | **Full Type Safety** | Complete TypeScript types | [Type Reference](docs/sdk/api-reference.md) |
126
- | **Error Handling** | Graceful provider fallback | [Error Guide](docs/reference/troubleshooting.md) |
127
- | **Analytics & Evaluation** | Usage tracking, quality scores | [Analytics Guide](docs/advanced/analytics.md) |
128
- | **Middleware System** | Request/response hooks | [Middleware Guide](docs/CUSTOM-MIDDLEWARE-GUIDE.md) |
129
- | **Framework Integration** | Next.js, SvelteKit, Express | [Framework Guides](docs/sdk/framework-integration.md) |
121
+ | Feature | Description | Documentation |
122
+ | --------------------------- | ------------------------------------------------------------- | --------------------------------------------------------- |
123
+ | **Auto Provider Selection** | Intelligent provider fallback | [SDK Guide](docs/sdk/index.md#auto-selection) |
124
+ | **Streaming Responses** | Real-time token streaming | [Streaming Guide](docs/advanced/streaming.md) |
125
+ | **Conversation Memory** | Automatic context management | [Memory Guide](docs/sdk/index.md#memory) |
126
+ | **Full Type Safety** | Complete TypeScript types | [Type Reference](docs/sdk/api-reference.md) |
127
+ | **Error Handling** | Graceful provider fallback | [Error Guide](docs/reference/troubleshooting.md) |
128
+ | **Analytics & Evaluation** | Usage tracking, quality scores | [Analytics Guide](docs/advanced/analytics.md) |
129
+ | **Middleware System** | Request/response hooks | [Middleware Guide](docs/CUSTOM-MIDDLEWARE-GUIDE.md) |
130
+ | **Framework Integration** | Next.js, SvelteKit, Express | [Framework Guides](docs/sdk/framework-integration.md) |
131
+ | **Extended Thinking** | Native thinking/reasoning mode for Gemini 3 and Claude models | [Thinking Guide](docs/features/thinking-configuration.md) |
130
132
 
131
133
  ---
132
134
 
@@ -293,6 +295,26 @@ console.log(result.content);
293
295
  console.log(result.evaluation?.overallScore);
294
296
  ```
295
297
 
298
+ ### Gemini 3 with Extended Thinking
299
+
300
+ ```typescript
301
+ import { NeuroLink } from "@juspay/neurolink";
302
+
303
+ const neurolink = new NeuroLink();
304
+
305
+ // Use Gemini 3 with extended thinking for complex reasoning
306
+ const result = await neurolink.generate({
307
+ input: {
308
+ text: "Solve this step by step: What is the optimal strategy for...",
309
+ },
310
+ provider: "vertex",
311
+ model: "gemini-3-flash-preview",
312
+ thinkingLevel: "medium", // Options: "minimal", "low", "medium", "high"
313
+ });
314
+
315
+ console.log(result.content);
316
+ ```
317
+
296
318
  Full command and API breakdown lives in [`docs/cli/commands.md`](docs/cli/commands.md) and [`docs/sdk/api-reference.md`](docs/sdk/api-reference.md).
297
319
 
298
320
  ## Platform Capabilities at a Glance
@@ -77,6 +77,10 @@ const VISION_CAPABILITIES = {
77
77
  "gemini-3-pro-preview-11-2025",
78
78
  "gemini-3-pro-latest",
79
79
  "gemini-3-pro-image-preview",
80
+ // Gemini 3 Flash Series
81
+ "gemini-3-flash",
82
+ "gemini-3-flash-preview",
83
+ "gemini-3-flash-latest",
80
84
  // Gemini 2.5 Series
81
85
  "gemini-2.5-pro",
82
86
  "gemini-2.5-flash",
@@ -151,6 +155,10 @@ const VISION_CAPABILITIES = {
151
155
  "gemini-3-pro-latest",
152
156
  "gemini-3-pro-preview",
153
157
  "gemini-3-pro",
158
+ // Gemini 3 Flash Series on Vertex AI
159
+ "gemini-3-flash",
160
+ "gemini-3-flash-preview",
161
+ "gemini-3-flash-latest",
154
162
  // Gemini 2.5 models on Vertex AI
155
163
  "gemini-2.5-pro",
156
164
  "gemini-2.5-flash",
@@ -226,6 +234,9 @@ const VISION_CAPABILITIES = {
226
234
  "gemini/gemini-2.0-flash",
227
235
  "gemini-3-pro-preview",
228
236
  "gemini-3-pro-latest",
237
+ "gemini-3-flash",
238
+ "gemini-3-flash-preview",
239
+ "gemini-3-flash-latest",
229
240
  "gemini-2.5-pro",
230
241
  "gemini-2.5-flash",
231
242
  "gemini-2.0-flash-lite",
@@ -13,6 +13,8 @@ import chalk from "chalk";
13
13
  import { z } from "zod";
14
14
  import { CLI_LIMITS } from "../../lib/core/constants.js";
15
15
  import { logger } from "../../lib/utils/logger.js";
16
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
17
+ import { AIProviderName } from "../../lib/types/index.js";
16
18
  // Configuration schema for validation
17
19
  const ConfigSchema = z.object({
18
20
  defaultProvider: z
@@ -52,7 +54,7 @@ const ConfigSchema = z.object({
52
54
  vertex: z
53
55
  .object({
54
56
  projectId: z.string().optional(),
55
- location: z.string().default("us-east5"),
57
+ location: z.string().default("us-central1"),
56
58
  credentials: z.string().optional(),
57
59
  serviceAccountKey: z.string().optional(),
58
60
  clientEmail: z.string().optional(),
@@ -406,6 +408,7 @@ export class ConfigManager {
406
408
  * OpenAI provider setup
407
409
  */
408
410
  async setupOpenAI() {
411
+ const modelChoices = getTopModelChoices(AIProviderName.OPENAI, 5).filter((c) => c.value !== "custom");
409
412
  const answers = await inquirer.prompt([
410
413
  {
411
414
  type: "password",
@@ -417,8 +420,7 @@ export class ConfigManager {
417
420
  type: "list",
418
421
  name: "model",
419
422
  message: "Default model:",
420
- choices: ["gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"],
421
- default: "gpt-4",
423
+ choices: modelChoices,
422
424
  },
423
425
  {
424
426
  type: "input",
@@ -492,6 +494,7 @@ export class ConfigManager {
492
494
  ],
493
495
  },
494
496
  ]);
497
+ const vertexModelChoices = getTopModelChoices(AIProviderName.VERTEX, 5).filter((c) => c.value !== "custom");
495
498
  const commonAnswers = await inquirer.prompt([
496
499
  {
497
500
  type: "input",
@@ -503,14 +506,13 @@ export class ConfigManager {
503
506
  type: "input",
504
507
  name: "location",
505
508
  message: "Vertex AI Location:",
506
- default: "us-east5",
509
+ default: "us-central1",
507
510
  },
508
511
  {
509
512
  type: "list",
510
513
  name: "model",
511
514
  message: "Default model:",
512
- choices: ["gemini-2.5-pro", "gemini-2.5-flash", "gemini-pro"],
513
- default: "gemini-2.5-pro",
515
+ choices: vertexModelChoices,
514
516
  },
515
517
  ]);
516
518
  let authConfig = {};
@@ -579,6 +581,7 @@ export class ConfigManager {
579
581
  * Anthropic provider setup
580
582
  */
581
583
  async setupAnthropic() {
584
+ const anthropicModelChoices = getTopModelChoices(AIProviderName.ANTHROPIC, 5).filter((c) => c.value !== "custom");
582
585
  const answers = await inquirer.prompt([
583
586
  {
584
587
  type: "password",
@@ -590,12 +593,7 @@ export class ConfigManager {
590
593
  type: "list",
591
594
  name: "model",
592
595
  message: "Default model:",
593
- choices: [
594
- "claude-3-5-sonnet-20241022",
595
- "claude-3-5-haiku-20241022",
596
- "claude-3-opus-20240229",
597
- ],
598
- default: "claude-3-5-sonnet-20241022",
596
+ choices: anthropicModelChoices,
599
597
  },
600
598
  ]);
601
599
  this.config.providers.anthropic = answers;
@@ -604,6 +602,7 @@ export class ConfigManager {
604
602
  * Azure OpenAI provider setup
605
603
  */
606
604
  async setupAzure() {
605
+ const azureModelChoices = getTopModelChoices(AIProviderName.AZURE, 5).filter((c) => c.value !== "custom");
607
606
  const answers = await inquirer.prompt([
608
607
  {
609
608
  type: "password",
@@ -625,8 +624,7 @@ export class ConfigManager {
625
624
  type: "list",
626
625
  name: "model",
627
626
  message: "Model:",
628
- choices: ["gpt-4", "gpt-4-turbo", "gpt-35-turbo"],
629
- default: "gpt-4",
627
+ choices: azureModelChoices,
630
628
  },
631
629
  ]);
632
630
  this.config.providers.azure = answers;
@@ -635,6 +633,7 @@ export class ConfigManager {
635
633
  * Google AI Studio provider setup
636
634
  */
637
635
  async setupGoogleAI() {
636
+ const googleAIModelChoices = getTopModelChoices(AIProviderName.GOOGLE_AI, 5).filter((c) => c.value !== "custom");
638
637
  const answers = await inquirer.prompt([
639
638
  {
640
639
  type: "password",
@@ -646,8 +645,7 @@ export class ConfigManager {
646
645
  type: "list",
647
646
  name: "model",
648
647
  message: "Default model:",
649
- choices: ["gemini-2.5-pro", "gemini-2.5-flash"],
650
- default: "gemini-2.5-pro",
648
+ choices: googleAIModelChoices,
651
649
  },
652
650
  ]);
653
651
  this.config.providers["google-ai"] = answers;
@@ -704,6 +702,7 @@ export class ConfigManager {
704
702
  * Mistral AI provider setup
705
703
  */
706
704
  async setupMistral() {
705
+ const mistralModelChoices = getTopModelChoices(AIProviderName.MISTRAL, 5).filter((c) => c.value !== "custom");
707
706
  const answers = await inquirer.prompt([
708
707
  {
709
708
  type: "password",
@@ -715,13 +714,7 @@ export class ConfigManager {
715
714
  type: "list",
716
715
  name: "model",
717
716
  message: "Default model:",
718
- choices: [
719
- "mistral-small",
720
- "mistral-medium",
721
- "mistral-large",
722
- "mistral-tiny",
723
- ],
724
- default: "mistral-small",
717
+ choices: mistralModelChoices,
725
718
  },
726
719
  ]);
727
720
  this.config.providers.mistral = answers;
@@ -14,6 +14,8 @@ import inquirer from "inquirer";
14
14
  import chalk from "chalk";
15
15
  import ora from "ora";
16
16
  import { logger } from "../../lib/utils/logger.js";
17
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
18
+ import { AIProviderName } from "../../lib/types/index.js";
17
19
  export async function handleAnthropicSetup(argv) {
18
20
  try {
19
21
  const options = {
@@ -266,32 +268,7 @@ async function promptForModel() {
266
268
  type: "list",
267
269
  name: "modelChoice",
268
270
  message: "Select an Anthropic Claude model:",
269
- choices: [
270
- {
271
- name: "claude-3-5-sonnet-20241022 (Recommended - Latest and most capable)",
272
- value: "claude-3-5-sonnet-20241022",
273
- },
274
- {
275
- name: "claude-3-5-haiku-20241022 (Fast and cost-effective)",
276
- value: "claude-3-5-haiku-20241022",
277
- },
278
- {
279
- name: "claude-3-opus-20240229 (Most powerful, slower)",
280
- value: "claude-3-opus-20240229",
281
- },
282
- {
283
- name: "claude-3-sonnet-20240229 (Balanced performance)",
284
- value: "claude-3-sonnet-20240229",
285
- },
286
- {
287
- name: "claude-3-haiku-20240307 (Fast and economical)",
288
- value: "claude-3-haiku-20240307",
289
- },
290
- {
291
- name: "Custom model (enter manually)",
292
- value: "custom",
293
- },
294
- ],
271
+ choices: getTopModelChoices(AIProviderName.ANTHROPIC, 5),
295
272
  },
296
273
  ]);
297
274
  if (modelChoice === "custom") {
@@ -14,6 +14,8 @@ import chalk from "chalk";
14
14
  import ora from "ora";
15
15
  import { logger } from "../../lib/utils/logger.js";
16
16
  import { updateEnvFile as updateEnvFileShared, displayEnvUpdateSummary, } from "../utils/envManager.js";
17
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
18
+ import { AIProviderName } from "../../lib/types/index.js";
17
19
  export async function handleAzureSetup(argv) {
18
20
  try {
19
21
  const options = {
@@ -315,28 +317,7 @@ async function promptForModel() {
315
317
  type: "list",
316
318
  name: "modelChoice",
317
319
  message: "Select an Azure OpenAI model:",
318
- choices: [
319
- {
320
- name: "gpt-4o (Latest multimodal model)",
321
- value: "gpt-4o",
322
- },
323
- {
324
- name: "gpt-4o-mini (Cost-effective)",
325
- value: "gpt-4o-mini",
326
- },
327
- {
328
- name: "gpt-4-turbo (Previous generation)",
329
- value: "gpt-4-turbo",
330
- },
331
- {
332
- name: "gpt-35-turbo (Legacy, most cost-effective)",
333
- value: "gpt-35-turbo",
334
- },
335
- {
336
- name: "Custom deployment name (enter manually)",
337
- value: "custom",
338
- },
339
- ],
320
+ choices: getTopModelChoices(AIProviderName.AZURE, 5),
340
321
  },
341
322
  ]);
342
323
  if (modelChoice === "custom") {
@@ -13,6 +13,8 @@ import chalk from "chalk";
13
13
  import ora from "ora";
14
14
  import { logger } from "../../lib/utils/logger.js";
15
15
  import { updateEnvFile as envUpdate } from "../utils/envManager.js";
16
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
17
+ import { AIProviderName } from "../../lib/types/index.js";
16
18
  export async function handleBedrockSetup(argv) {
17
19
  try {
18
20
  const options = {
@@ -375,32 +377,7 @@ async function handleModelSelection(config) {
375
377
  type: "list",
376
378
  name: "model",
377
379
  message: "Select a Bedrock model:",
378
- choices: [
379
- {
380
- name: "Claude 3.7 Sonnet (Recommended - Latest)",
381
- value: "arn:aws:bedrock:us-east-2:225681119357:inference-profile/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
382
- },
383
- {
384
- name: "Claude 3.5 Sonnet v2",
385
- value: "anthropic.claude-3-5-sonnet-20241022-v2:0",
386
- },
387
- {
388
- name: "Claude 3 Sonnet",
389
- value: "anthropic.claude-3-sonnet-20240229-v1:0",
390
- },
391
- {
392
- name: "Claude 3 Haiku",
393
- value: "anthropic.claude-3-haiku-20240307-v1:0",
394
- },
395
- {
396
- name: "Claude 3 Opus",
397
- value: "anthropic.claude-3-opus-20240229-v1:0",
398
- },
399
- {
400
- name: "Custom model (enter manually)",
401
- value: "custom",
402
- },
403
- ],
380
+ choices: getTopModelChoices(AIProviderName.BEDROCK, 5),
404
381
  },
405
382
  ]);
406
383
  if (model === "custom") {
@@ -15,6 +15,8 @@ import ora from "ora";
15
15
  import { logger } from "../../lib/utils/logger.js";
16
16
  import { GoogleAIModels } from "../../lib/constants/enums.js";
17
17
  import { updateEnvFile as updateEnvFileManager, displayEnvUpdateSummary, } from "../utils/envManager.js";
18
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
19
+ import { AIProviderName } from "../../lib/types/index.js";
18
20
  /**
19
21
  * Get the runtime default model that matches the provider implementation
20
22
  */
@@ -267,28 +269,7 @@ async function promptForModel() {
267
269
  type: "list",
268
270
  name: "modelChoice",
269
271
  message: "Select a Google AI model:",
270
- choices: [
271
- {
272
- name: "gemini-2.5-pro (Recommended - Latest high-capability model)",
273
- value: "gemini-2.5-pro",
274
- },
275
- {
276
- name: "gemini-2.5-flash (Fast and efficient)",
277
- value: "gemini-2.5-flash",
278
- },
279
- {
280
- name: "gemini-pro-vision (Multimodal - text and images)",
281
- value: "gemini-pro-vision",
282
- },
283
- {
284
- name: "gemini-pro (Previous generation)",
285
- value: "gemini-pro",
286
- },
287
- {
288
- name: "Custom model (enter manually)",
289
- value: "custom",
290
- },
291
- ],
272
+ choices: getTopModelChoices(AIProviderName.GOOGLE_AI, 5),
292
273
  },
293
274
  ]);
294
275
  if (modelChoice === "custom") {
@@ -4,6 +4,8 @@ import inquirer from "inquirer";
4
4
  import fs from "fs";
5
5
  import path from "path";
6
6
  import { logger } from "../../lib/utils/logger.js";
7
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
8
+ import { AIProviderName } from "../../lib/types/index.js";
7
9
  /**
8
10
  * Validates Mistral API key format
9
11
  * Mistral keys typically start with "sk-" and contain base62 plus "-" or "_".
@@ -141,37 +143,7 @@ export const handleMistralSetup = async (argv) => {
141
143
  type: "list",
142
144
  name: "modelChoice",
143
145
  message: "Select a Mistral model:",
144
- choices: [
145
- {
146
- name: "mistral-small (Balanced performance - Recommended)",
147
- value: "mistral-small",
148
- },
149
- {
150
- name: "mistral-tiny (Fastest, most cost-effective)",
151
- value: "mistral-tiny",
152
- },
153
- {
154
- name: "mistral-medium (Enhanced capabilities)",
155
- value: "mistral-medium",
156
- },
157
- {
158
- name: "mistral-large (Most capable model)",
159
- value: "mistral-large",
160
- },
161
- {
162
- name: "open-mistral-7b (Open source model)",
163
- value: "open-mistral-7b",
164
- },
165
- {
166
- name: "open-mixtral-8x7b (Open source mixture of experts)",
167
- value: "open-mixtral-8x7b",
168
- },
169
- {
170
- name: "Custom model name",
171
- value: "custom",
172
- },
173
- ],
174
- default: "mistral-small",
146
+ choices: getTopModelChoices(AIProviderName.MISTRAL, 5),
175
147
  },
176
148
  ]);
177
149
  let selectedModel = modelChoice;
@@ -14,6 +14,8 @@ import inquirer from "inquirer";
14
14
  import chalk from "chalk";
15
15
  import ora from "ora";
16
16
  import { logger } from "../../lib/utils/logger.js";
17
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
18
+ import { AIProviderName } from "../../lib/types/index.js";
17
19
  export async function handleOpenAISetup(argv) {
18
20
  try {
19
21
  const options = {
@@ -259,28 +261,7 @@ async function promptForModel() {
259
261
  type: "list",
260
262
  name: "modelChoice",
261
263
  message: "Select an OpenAI model:",
262
- choices: [
263
- {
264
- name: "gpt-4o (Recommended - Latest multimodal model)",
265
- value: "gpt-4o",
266
- },
267
- {
268
- name: "gpt-4o-mini (Cost-effective, fast)",
269
- value: "gpt-4o-mini",
270
- },
271
- {
272
- name: "gpt-4-turbo (Previous generation)",
273
- value: "gpt-4-turbo",
274
- },
275
- {
276
- name: "gpt-3.5-turbo (Legacy, most cost-effective)",
277
- value: "gpt-3.5-turbo",
278
- },
279
- {
280
- name: "Custom model (enter manually)",
281
- value: "custom",
282
- },
283
- ],
264
+ choices: getTopModelChoices(AIProviderName.OPENAI, 5),
284
265
  },
285
266
  ]);
286
267
  if (modelChoice === "custom") {
@@ -2,6 +2,7 @@ import { globalSession } from "../../lib/session/globalSessionState.js";
2
2
  import { configManager } from "../commands/config.js";
3
3
  import { handleError } from "../errorHandler.js";
4
4
  import { normalizeEvaluationData } from "../../lib/utils/evaluationUtils.js";
5
+ import { createThinkingConfigFromRecord } from "../../lib/utils/thinkingConfig.js";
5
6
  import { LoopSession } from "../loop/session.js";
6
7
  import { initializeCliParser } from "../parser.js";
7
8
  // Use TokenUsage from standard types - no local interface needed
@@ -258,6 +259,27 @@ export class CLICommandFactory {
258
259
  default: false,
259
260
  description: "Auto-play generated audio",
260
261
  },
262
+ thinking: {
263
+ alias: "think",
264
+ type: "boolean",
265
+ description: "Enable extended thinking/reasoning capability",
266
+ default: false,
267
+ },
268
+ thinkingBudget: {
269
+ type: "number",
270
+ description: "Token budget for extended thinking - Anthropic Claude and Gemini 2.5+ models (5000-100000)",
271
+ default: 10000,
272
+ },
273
+ thinkingLevel: {
274
+ type: "string",
275
+ description: "Thinking level for Gemini 3 models: minimal, low, medium, high",
276
+ choices: ["minimal", "low", "medium", "high"],
277
+ },
278
+ region: {
279
+ type: "string",
280
+ description: "Vertex AI region (e.g., us-central1, europe-west1, asia-northeast1)",
281
+ alias: "r",
282
+ },
261
283
  };
262
284
  // Helper method to build options for commands
263
285
  static buildOptions(yargs, additionalOptions = {}) {
@@ -381,6 +403,12 @@ export class CLICommandFactory {
381
403
  ttsQuality: argv.ttsQuality,
382
404
  ttsOutput: argv.ttsOutput,
383
405
  ttsPlay: argv.ttsPlay,
406
+ // Extended thinking options for Claude and Gemini models
407
+ thinking: argv.thinking,
408
+ thinkingBudget: argv.thinkingBudget,
409
+ thinkingLevel: argv.thinkingLevel,
410
+ // Region option for cloud providers (Vertex AI, Bedrock, etc.)
411
+ region: argv.region,
384
412
  };
385
413
  }
386
414
  // Helper method to handle output
@@ -1233,6 +1261,8 @@ export class CLICommandFactory {
1233
1261
  evaluationDomain: enhancedOptions.evaluationDomain,
1234
1262
  toolUsageContext: enhancedOptions.toolUsageContext,
1235
1263
  context: context,
1264
+ region: options.region,
1265
+ thinkingConfig: createThinkingConfigFromRecord(options),
1236
1266
  factoryConfig: enhancedOptions.domain
1237
1267
  ? {
1238
1268
  domainType: enhancedOptions.domain,
@@ -1429,6 +1459,8 @@ export class CLICommandFactory {
1429
1459
  evaluationDomain: enhancedOptions.evaluationDomain,
1430
1460
  toolUsageContext: enhancedOptions.toolUsageContext,
1431
1461
  context: context,
1462
+ region: options.region,
1463
+ thinkingConfig: createThinkingConfigFromRecord(options),
1432
1464
  factoryConfig: enhancedOptions.domain
1433
1465
  ? {
1434
1466
  domainType: enhancedOptions.domain,
@@ -4,6 +4,8 @@ import ora from "ora";
4
4
  import inquirer from "inquirer";
5
5
  import { logger } from "../../lib/utils/logger.js";
6
6
  import { OllamaUtils } from "../utils/ollamaUtils.js";
7
+ import { getTopModelChoices } from "../../lib/utils/modelChoices.js";
8
+ import { AIProviderName } from "../../lib/types/index.js";
7
9
  /**
8
10
  * Factory for creating Ollama CLI commands using the Factory Pattern
9
11
  */
@@ -330,23 +332,9 @@ export class OllamaCommandFactory {
330
332
  type: "list",
331
333
  name: "selectedModel",
332
334
  message: "Select a model to download:",
333
- choices: [
334
- {
335
- name: "llama2 (7B) - Recommended for general use",
336
- value: "llama2",
337
- },
338
- {
339
- name: "codellama (7B) - Best for code generation",
340
- value: "codellama",
341
- },
342
- { name: "mistral (7B) - Fast and efficient", value: "mistral" },
343
- {
344
- name: "tinyllama (1B) - Lightweight, fast",
345
- value: "tinyllama",
346
- },
347
- { name: "phi (2.7B) - Microsoft's compact model", value: "phi" },
348
- { name: "Other (enter manually)", value: "other" },
349
- ],
335
+ choices: getTopModelChoices(AIProviderName.OLLAMA, 5).map((choice) => choice.value === "custom"
336
+ ? { name: "Other (enter manually)", value: "other" }
337
+ : choice),
350
338
  },
351
339
  ]);
352
340
  let modelToDownload = selectedModel;
@@ -5,4 +5,4 @@ import type { OptionSchema } from "../../lib/types/cli.js";
5
5
  * This object provides metadata for validation and help text in the CLI loop.
6
6
  * It is derived from the main TextGenerationOptions interface to ensure consistency.
7
7
  */
8
- export declare const textGenerationOptionsSchema: Record<keyof Omit<TextGenerationOptions, "prompt" | "input" | "schema" | "tools" | "context" | "conversationHistory" | "conversationMessages" | "conversationMemoryConfig" | "originalPrompt" | "middleware" | "expectedOutcome" | "evaluationCriteria" | "region" | "csvOptions" | "tts">, OptionSchema>;
8
+ export declare const textGenerationOptionsSchema: Record<keyof Omit<TextGenerationOptions, "prompt" | "input" | "schema" | "tools" | "context" | "conversationHistory" | "conversationMessages" | "conversationMemoryConfig" | "originalPrompt" | "middleware" | "expectedOutcome" | "evaluationCriteria" | "region" | "csvOptions" | "tts" | "thinkingConfig">, OptionSchema>;