@juspay/neurolink 8.13.0 → 8.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ ## [8.13.1](https://github.com/juspay/neurolink/compare/v8.13.0...v8.13.1) (2025-12-13)
2
+
3
+ ### Bug Fixes
4
+
5
+ - **(provider):** Implement image count limits with validation and warnings ([ff3e27a](https://github.com/juspay/neurolink/commit/ff3e27a5ab3aafffc8312f645e0ebc566600cc63))
6
+
1
7
  ## [8.13.0](https://github.com/juspay/neurolink/compare/v8.12.0...v8.13.0) (2025-12-13)
2
8
 
3
9
  ### Features
@@ -33,6 +33,11 @@ export declare class ProviderImageAdapter {
33
33
  * Format content for Vertex AI (model-specific routing)
34
34
  */
35
35
  private static formatForVertex;
36
+ /**
37
+ * Validate image count against provider limits
38
+ * Warns at 80% threshold, throws error if limit exceeded
39
+ */
40
+ private static validateImageCount;
36
41
  /**
37
42
  * Validate that provider and model support vision
38
43
  */
@@ -16,6 +16,28 @@ export class MultimodalLogger {
16
16
  }
17
17
  }
18
18
  }
19
+ /**
20
+ * Image count limits per provider
21
+ * These limits prevent API rejections when too many images are sent
22
+ */
23
+ const IMAGE_LIMITS = {
24
+ openai: 10,
25
+ azure: 10, // Same as OpenAI
26
+ "google-ai": 16,
27
+ google: 16,
28
+ anthropic: 20,
29
+ vertex: {
30
+ // Vertex has model-specific limits
31
+ claude: 20, // Claude models on Vertex
32
+ gemini: 16, // Gemini models on Vertex
33
+ default: 16,
34
+ },
35
+ ollama: 10, // Conservative limit for Ollama
36
+ litellm: 10, // Conservative limit, as it proxies to various providers
37
+ mistral: 10, // Conservative limit for Mistral
38
+ // Note: Bedrock limit defined for future use when vision support is added
39
+ bedrock: 20, // Same as Anthropic for Claude models on Bedrock
40
+ };
19
41
  /**
20
42
  * Vision capability definitions for each provider
21
43
  */
@@ -368,7 +390,9 @@ export class ProviderImageAdapter {
368
390
  break;
369
391
  case "azure":
370
392
  case "azure-openai":
371
- adaptedPayload = this.formatForOpenAI(text, images);
393
+ // Azure uses same format as OpenAI but validate with azure provider name
394
+ this.validateImageCount(images.length, "azure");
395
+ adaptedPayload = this.formatForOpenAI(text, images, true);
372
396
  break;
373
397
  case "google-ai":
374
398
  case "google":
@@ -381,7 +405,9 @@ export class ProviderImageAdapter {
381
405
  adaptedPayload = this.formatForVertex(text, images, model);
382
406
  break;
383
407
  case "ollama":
384
- adaptedPayload = this.formatForOpenAI(text, images);
408
+ // Ollama uses same format as OpenAI but validate with ollama provider name
409
+ this.validateImageCount(images.length, "ollama");
410
+ adaptedPayload = this.formatForOpenAI(text, images, true);
385
411
  break;
386
412
  case "huggingface":
387
413
  adaptedPayload = this.formatForOpenAI(text, images);
@@ -415,7 +441,11 @@ export class ProviderImageAdapter {
415
441
  /**
416
442
  * Format content for OpenAI (GPT-4o format)
417
443
  */
418
- static formatForOpenAI(text, images) {
444
+ static formatForOpenAI(text, images, skipValidation = false) {
445
+ // Validate image count before processing (unless called from another formatter)
446
+ if (!skipValidation) {
447
+ this.validateImageCount(images.length, "openai");
448
+ }
419
449
  const content = [{ type: "text", text }];
420
450
  images.forEach((image, index) => {
421
451
  try {
@@ -438,7 +468,11 @@ export class ProviderImageAdapter {
438
468
  /**
439
469
  * Format content for Google AI (Gemini format)
440
470
  */
441
- static formatForGoogleAI(text, images) {
471
+ static formatForGoogleAI(text, images, skipValidation = false) {
472
+ // Validate image count before processing (unless called from another formatter)
473
+ if (!skipValidation) {
474
+ this.validateImageCount(images.length, "google-ai");
475
+ }
442
476
  const parts = [{ text }];
443
477
  images.forEach((image, index) => {
444
478
  try {
@@ -460,7 +494,11 @@ export class ProviderImageAdapter {
460
494
  /**
461
495
  * Format content for Anthropic (Claude format)
462
496
  */
463
- static formatForAnthropic(text, images) {
497
+ static formatForAnthropic(text, images, skipValidation = false) {
498
+ // Validate image count before processing (unless called from another formatter)
499
+ if (!skipValidation) {
500
+ this.validateImageCount(images.length, "anthropic");
501
+ }
464
502
  const content = [{ type: "text", text }];
465
503
  images.forEach((image, index) => {
466
504
  try {
@@ -488,15 +526,65 @@ export class ProviderImageAdapter {
488
526
  * Format content for Vertex AI (model-specific routing)
489
527
  */
490
528
  static formatForVertex(text, images, model) {
491
- // Route based on model type
529
+ // Validate image count with model-specific limits before processing
530
+ this.validateImageCount(images.length, "vertex", model);
531
+ // Route based on model type, skip validation in delegated methods
492
532
  if (model.includes("gemini")) {
493
- return this.formatForGoogleAI(text, images);
533
+ return this.formatForGoogleAI(text, images, true);
494
534
  }
495
535
  else if (model.includes("claude")) {
496
- return this.formatForAnthropic(text, images);
536
+ return this.formatForAnthropic(text, images, true);
537
+ }
538
+ else {
539
+ return this.formatForGoogleAI(text, images, true);
540
+ }
541
+ }
542
+ /**
543
+ * Validate image count against provider limits
544
+ * Warns at 80% threshold, throws error if limit exceeded
545
+ */
546
+ static validateImageCount(imageCount, provider, model) {
547
+ const normalizedProvider = provider.toLowerCase();
548
+ let limit;
549
+ // Determine the limit based on provider
550
+ if (normalizedProvider === "vertex" && model) {
551
+ // Vertex has model-specific limits
552
+ if (model.includes("claude")) {
553
+ limit = IMAGE_LIMITS.vertex.claude;
554
+ }
555
+ else if (model.includes("gemini")) {
556
+ limit = IMAGE_LIMITS.vertex.gemini;
557
+ }
558
+ else {
559
+ limit = IMAGE_LIMITS.vertex.default;
560
+ }
497
561
  }
498
562
  else {
499
- return this.formatForGoogleAI(text, images);
563
+ // Use provider-specific limit
564
+ const providerLimit = normalizedProvider in IMAGE_LIMITS
565
+ ? IMAGE_LIMITS[normalizedProvider]
566
+ : undefined;
567
+ // If provider not found in limits map, use a conservative default
568
+ if (providerLimit === undefined) {
569
+ // Conservative default for unknown providers
570
+ limit = 10;
571
+ logger.warn(`Image count limit not defined for provider ${provider}. Using conservative default of 10 images.`);
572
+ }
573
+ else {
574
+ // providerLimit is always a number when defined (except vertex which is handled separately)
575
+ limit = providerLimit;
576
+ }
577
+ }
578
+ // Warn only once at 80% threshold to avoid noise in batch processing
579
+ const warningThreshold = Math.floor(limit * 0.8);
580
+ if (imageCount === warningThreshold) {
581
+ logger.warn(`Image count (${imageCount}) is approaching the limit for ${provider}. ` +
582
+ `Maximum allowed: ${limit}. Please reduce the number of images.`);
583
+ }
584
+ // Throw error if limit exceeded
585
+ if (imageCount > limit) {
586
+ throw new Error(`Image count (${imageCount}) exceeds the maximum limit for ${provider}. ` +
587
+ `Maximum allowed: ${limit}. Please reduce the number of images.`);
500
588
  }
501
589
  }
502
590
  /**
@@ -33,6 +33,11 @@ export declare class ProviderImageAdapter {
33
33
  * Format content for Vertex AI (model-specific routing)
34
34
  */
35
35
  private static formatForVertex;
36
+ /**
37
+ * Validate image count against provider limits
38
+ * Warns at 80% threshold, throws error if limit exceeded
39
+ */
40
+ private static validateImageCount;
36
41
  /**
37
42
  * Validate that provider and model support vision
38
43
  */
@@ -16,6 +16,28 @@ export class MultimodalLogger {
16
16
  }
17
17
  }
18
18
  }
19
+ /**
20
+ * Image count limits per provider
21
+ * These limits prevent API rejections when too many images are sent
22
+ */
23
+ const IMAGE_LIMITS = {
24
+ openai: 10,
25
+ azure: 10, // Same as OpenAI
26
+ "google-ai": 16,
27
+ google: 16,
28
+ anthropic: 20,
29
+ vertex: {
30
+ // Vertex has model-specific limits
31
+ claude: 20, // Claude models on Vertex
32
+ gemini: 16, // Gemini models on Vertex
33
+ default: 16,
34
+ },
35
+ ollama: 10, // Conservative limit for Ollama
36
+ litellm: 10, // Conservative limit, as it proxies to various providers
37
+ mistral: 10, // Conservative limit for Mistral
38
+ // Note: Bedrock limit defined for future use when vision support is added
39
+ bedrock: 20, // Same as Anthropic for Claude models on Bedrock
40
+ };
19
41
  /**
20
42
  * Vision capability definitions for each provider
21
43
  */
@@ -368,7 +390,9 @@ export class ProviderImageAdapter {
368
390
  break;
369
391
  case "azure":
370
392
  case "azure-openai":
371
- adaptedPayload = this.formatForOpenAI(text, images);
393
+ // Azure uses same format as OpenAI but validate with azure provider name
394
+ this.validateImageCount(images.length, "azure");
395
+ adaptedPayload = this.formatForOpenAI(text, images, true);
372
396
  break;
373
397
  case "google-ai":
374
398
  case "google":
@@ -381,7 +405,9 @@ export class ProviderImageAdapter {
381
405
  adaptedPayload = this.formatForVertex(text, images, model);
382
406
  break;
383
407
  case "ollama":
384
- adaptedPayload = this.formatForOpenAI(text, images);
408
+ // Ollama uses same format as OpenAI but validate with ollama provider name
409
+ this.validateImageCount(images.length, "ollama");
410
+ adaptedPayload = this.formatForOpenAI(text, images, true);
385
411
  break;
386
412
  case "huggingface":
387
413
  adaptedPayload = this.formatForOpenAI(text, images);
@@ -415,7 +441,11 @@ export class ProviderImageAdapter {
415
441
  /**
416
442
  * Format content for OpenAI (GPT-4o format)
417
443
  */
418
- static formatForOpenAI(text, images) {
444
+ static formatForOpenAI(text, images, skipValidation = false) {
445
+ // Validate image count before processing (unless called from another formatter)
446
+ if (!skipValidation) {
447
+ this.validateImageCount(images.length, "openai");
448
+ }
419
449
  const content = [{ type: "text", text }];
420
450
  images.forEach((image, index) => {
421
451
  try {
@@ -438,7 +468,11 @@ export class ProviderImageAdapter {
438
468
  /**
439
469
  * Format content for Google AI (Gemini format)
440
470
  */
441
- static formatForGoogleAI(text, images) {
471
+ static formatForGoogleAI(text, images, skipValidation = false) {
472
+ // Validate image count before processing (unless called from another formatter)
473
+ if (!skipValidation) {
474
+ this.validateImageCount(images.length, "google-ai");
475
+ }
442
476
  const parts = [{ text }];
443
477
  images.forEach((image, index) => {
444
478
  try {
@@ -460,7 +494,11 @@ export class ProviderImageAdapter {
460
494
  /**
461
495
  * Format content for Anthropic (Claude format)
462
496
  */
463
- static formatForAnthropic(text, images) {
497
+ static formatForAnthropic(text, images, skipValidation = false) {
498
+ // Validate image count before processing (unless called from another formatter)
499
+ if (!skipValidation) {
500
+ this.validateImageCount(images.length, "anthropic");
501
+ }
464
502
  const content = [{ type: "text", text }];
465
503
  images.forEach((image, index) => {
466
504
  try {
@@ -488,15 +526,65 @@ export class ProviderImageAdapter {
488
526
  * Format content for Vertex AI (model-specific routing)
489
527
  */
490
528
  static formatForVertex(text, images, model) {
491
- // Route based on model type
529
+ // Validate image count with model-specific limits before processing
530
+ this.validateImageCount(images.length, "vertex", model);
531
+ // Route based on model type, skip validation in delegated methods
492
532
  if (model.includes("gemini")) {
493
- return this.formatForGoogleAI(text, images);
533
+ return this.formatForGoogleAI(text, images, true);
494
534
  }
495
535
  else if (model.includes("claude")) {
496
- return this.formatForAnthropic(text, images);
536
+ return this.formatForAnthropic(text, images, true);
537
+ }
538
+ else {
539
+ return this.formatForGoogleAI(text, images, true);
540
+ }
541
+ }
542
+ /**
543
+ * Validate image count against provider limits
544
+ * Warns at 80% threshold, throws error if limit exceeded
545
+ */
546
+ static validateImageCount(imageCount, provider, model) {
547
+ const normalizedProvider = provider.toLowerCase();
548
+ let limit;
549
+ // Determine the limit based on provider
550
+ if (normalizedProvider === "vertex" && model) {
551
+ // Vertex has model-specific limits
552
+ if (model.includes("claude")) {
553
+ limit = IMAGE_LIMITS.vertex.claude;
554
+ }
555
+ else if (model.includes("gemini")) {
556
+ limit = IMAGE_LIMITS.vertex.gemini;
557
+ }
558
+ else {
559
+ limit = IMAGE_LIMITS.vertex.default;
560
+ }
497
561
  }
498
562
  else {
499
- return this.formatForGoogleAI(text, images);
563
+ // Use provider-specific limit
564
+ const providerLimit = normalizedProvider in IMAGE_LIMITS
565
+ ? IMAGE_LIMITS[normalizedProvider]
566
+ : undefined;
567
+ // If provider not found in limits map, use a conservative default
568
+ if (providerLimit === undefined) {
569
+ // Conservative default for unknown providers
570
+ limit = 10;
571
+ logger.warn(`Image count limit not defined for provider ${provider}. Using conservative default of 10 images.`);
572
+ }
573
+ else {
574
+ // providerLimit is always a number when defined (except vertex which is handled separately)
575
+ limit = providerLimit;
576
+ }
577
+ }
578
+ // Warn only once at 80% threshold to avoid noise in batch processing
579
+ const warningThreshold = Math.floor(limit * 0.8);
580
+ if (imageCount === warningThreshold) {
581
+ logger.warn(`Image count (${imageCount}) is approaching the limit for ${provider}. ` +
582
+ `Maximum allowed: ${limit}. Please reduce the number of images.`);
583
+ }
584
+ // Throw error if limit exceeded
585
+ if (imageCount > limit) {
586
+ throw new Error(`Image count (${imageCount}) exceeds the maximum limit for ${provider}. ` +
587
+ `Maximum allowed: ${limit}. Please reduce the number of images.`);
500
588
  }
501
589
  }
502
590
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "8.13.0",
3
+ "version": "8.13.1",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",