@juspay/neurolink 7.45.0 → 7.47.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/adapters/providerImageAdapter.js +12 -0
  3. package/dist/cli/commands/config.d.ts +2 -2
  4. package/dist/core/constants.js +1 -1
  5. package/dist/evaluation/contextBuilder.d.ts +48 -0
  6. package/dist/evaluation/contextBuilder.js +134 -0
  7. package/dist/evaluation/index.d.ts +36 -0
  8. package/dist/evaluation/index.js +61 -0
  9. package/dist/evaluation/prompts.d.ts +22 -0
  10. package/dist/evaluation/prompts.js +73 -0
  11. package/dist/evaluation/ragasEvaluator.d.ts +28 -0
  12. package/dist/evaluation/ragasEvaluator.js +90 -0
  13. package/dist/evaluation/retryManager.d.ts +40 -0
  14. package/dist/evaluation/retryManager.js +78 -0
  15. package/dist/evaluation/scoring.d.ts +16 -0
  16. package/dist/evaluation/scoring.js +35 -0
  17. package/dist/lib/adapters/providerImageAdapter.js +12 -0
  18. package/dist/lib/core/constants.js +1 -1
  19. package/dist/lib/evaluation/contextBuilder.d.ts +48 -0
  20. package/dist/lib/evaluation/contextBuilder.js +134 -0
  21. package/dist/lib/evaluation/index.d.ts +36 -0
  22. package/dist/lib/evaluation/index.js +61 -0
  23. package/dist/lib/evaluation/prompts.d.ts +22 -0
  24. package/dist/lib/evaluation/prompts.js +73 -0
  25. package/dist/lib/evaluation/ragasEvaluator.d.ts +28 -0
  26. package/dist/lib/evaluation/ragasEvaluator.js +90 -0
  27. package/dist/lib/evaluation/retryManager.d.ts +40 -0
  28. package/dist/lib/evaluation/retryManager.js +78 -0
  29. package/dist/lib/evaluation/scoring.d.ts +16 -0
  30. package/dist/lib/evaluation/scoring.js +35 -0
  31. package/dist/lib/middleware/builtin/autoEvaluation.d.ts +14 -0
  32. package/dist/lib/middleware/builtin/autoEvaluation.js +181 -0
  33. package/dist/lib/middleware/factory.js +6 -0
  34. package/dist/lib/providers/azureOpenai.js +36 -3
  35. package/dist/lib/providers/googleAiStudio.js +37 -3
  36. package/dist/lib/providers/googleVertex.js +37 -3
  37. package/dist/lib/types/evaluation.d.ts +2 -0
  38. package/dist/lib/types/evaluationTypes.d.ts +142 -0
  39. package/dist/lib/types/evaluationTypes.js +1 -0
  40. package/dist/lib/types/middlewareTypes.d.ts +28 -2
  41. package/dist/lib/utils/imageProcessor.d.ts +44 -0
  42. package/dist/lib/utils/imageProcessor.js +159 -8
  43. package/dist/lib/utils/messageBuilder.d.ts +4 -6
  44. package/dist/lib/utils/messageBuilder.js +145 -1
  45. package/dist/middleware/builtin/autoEvaluation.d.ts +14 -0
  46. package/dist/middleware/builtin/autoEvaluation.js +181 -0
  47. package/dist/middleware/factory.js +6 -0
  48. package/dist/providers/azureOpenai.js +36 -3
  49. package/dist/providers/googleAiStudio.js +37 -3
  50. package/dist/providers/googleVertex.js +37 -3
  51. package/dist/types/evaluation.d.ts +2 -0
  52. package/dist/types/evaluationTypes.d.ts +142 -0
  53. package/dist/types/evaluationTypes.js +1 -0
  54. package/dist/types/middlewareTypes.d.ts +28 -2
  55. package/dist/utils/imageProcessor.d.ts +44 -0
  56. package/dist/utils/imageProcessor.js +159 -8
  57. package/dist/utils/messageBuilder.d.ts +4 -6
  58. package/dist/utils/messageBuilder.js +145 -1
  59. package/package.json +1 -1
@@ -0,0 +1,142 @@
1
+ import type { LanguageModelV1CallOptions } from "ai";
2
+ import type { TokenUsage } from "./analytics.js";
3
+ import type { GenerateResult } from "./generateTypes.js";
4
+ import type { ToolExecution } from "./tools.js";
5
+ /**
6
+ * Represents the analysis of the user's query intent.
7
+ * This provides a basic understanding of what the user is trying to achieve.
8
+ */
9
+ export interface QueryIntentAnalysis {
10
+ /** The type of query, e.g., asking a question or giving a command. */
11
+ type: "question" | "command" | "greeting" | "unknown";
12
+ /** The estimated complexity of the query. */
13
+ complexity: "low" | "medium" | "high";
14
+ /** Whether the query likely required the use of tools to be answered correctly. */
15
+ shouldHaveUsedTools: boolean;
16
+ }
17
+ /**
18
+ * Represents a single turn in an enhanced conversation history,
19
+ * including tool executions and evaluations for richer context.
20
+ */
21
+ export interface EnhancedConversationTurn {
22
+ /** The role of the speaker, either 'user' or 'assistant'. */
23
+ role: "user" | "assistant";
24
+ /** The content of the message. */
25
+ content: string;
26
+ /** The timestamp of the message. */
27
+ timestamp: string;
28
+ /** Any tools that were executed as part of this turn. */
29
+ toolExecutions?: ToolExecution[];
30
+ /** The evaluation result for this turn, if applicable. */
31
+ evaluation?: EvaluationResult;
32
+ }
33
+ /**
34
+ * Contains all the rich context needed for a thorough, RAGAS-style evaluation.
35
+ * This object is constructed by the `ContextBuilder` and used by the `RAGASEvaluator`.
36
+ */
37
+ export interface EnhancedEvaluationContext {
38
+ /** The original user query. */
39
+ userQuery: string;
40
+ /** An analysis of the user's query intent. */
41
+ queryAnalysis: QueryIntentAnalysis;
42
+ /** The AI's response that is being evaluated. */
43
+ aiResponse: string;
44
+ /** The AI provider that generated the response. */
45
+ provider: string;
46
+ /** The specific model that generated the response. */
47
+ model: string;
48
+ /** The parameters used for the generation call. */
49
+ generationParams: {
50
+ temperature?: number;
51
+ maxTokens?: number;
52
+ systemPrompt?: string;
53
+ };
54
+ /** A list of tools that were executed. */
55
+ toolExecutions: ToolExecution[];
56
+ /** The history of the conversation leading up to this turn. */
57
+ conversationHistory: EnhancedConversationTurn[];
58
+ /** The response time of the AI in milliseconds. */
59
+ responseTime: number;
60
+ /** The token usage for the generation. */
61
+ tokenUsage: TokenUsage;
62
+ /** The results of any previous evaluation attempts for this response. */
63
+ previousEvaluations?: EvaluationResult[];
64
+ /** The current attempt number for this evaluation (1-based). */
65
+ attemptNumber: number;
66
+ }
67
+ /**
68
+ * Represents the result of a single evaluation attempt, based on RAGAS principles.
69
+ */
70
+ export interface EvaluationResult {
71
+ /** The final, overall score for the response, typically from 1 to 10. */
72
+ finalScore: number;
73
+ /** How well the response addresses the user's query. */
74
+ relevanceScore: number;
75
+ /** The factual accuracy of the information in the response. */
76
+ accuracyScore: number;
77
+ /** How completely the response answers the user's query. */
78
+ completenessScore: number;
79
+ /** Whether the final score meets the passing threshold. */
80
+ isPassing: boolean;
81
+ /** Constructive response from the judge LLM on how to improve the response. */
82
+ reasoning: string;
83
+ /** Specific suggestions for improving the response. */
84
+ suggestedImprovements: string;
85
+ /** The raw, unparsed response from the judge LLM. */
86
+ rawEvaluationResponse: string;
87
+ /** The model used to perform the evaluation. */
88
+ evaluationModel: string;
89
+ /** The time taken for the evaluation in milliseconds. */
90
+ evaluationTime: number;
91
+ /** The attempt number for this evaluation. */
92
+ attemptNumber: number;
93
+ }
94
+ /**
95
+ * Provides detailed information when a response fails quality assurance checks.
96
+ */
97
+ export interface QualityErrorDetails {
98
+ /** The history of all evaluation attempts for this response. */
99
+ evaluationHistory: EvaluationResult[];
100
+ /** The final score of the last attempt. */
101
+ finalScore: number;
102
+ /** The total number of evaluation attempts made. */
103
+ attempts: number;
104
+ /** A summary message of the failure. */
105
+ message: string;
106
+ }
107
+ /**
108
+ * Configuration for the main `Evaluator` class.
109
+ */
110
+ export interface EvaluationConfig {
111
+ /** The minimum score (1-10) for a response to be considered passing. */
112
+ threshold?: number;
113
+ /** The evaluation strategy to use. Currently only 'ragas' is supported. */
114
+ evaluationStrategy?: "ragas" | "custom";
115
+ /** The model to use for the LLM-as-judge evaluation. */
116
+ evaluationModel?: string;
117
+ /** The maximum number of evaluation attempts before failing. */
118
+ maxAttempts?: number;
119
+ /** The provider to use for the evaluation model. */
120
+ provider?: string;
121
+ /** A custom evaluator function to override the default behavior. */
122
+ customEvaluator?: (options: LanguageModelV1CallOptions, result: GenerateResult) => Promise<{
123
+ evaluationResult: EvaluationResult;
124
+ evalContext: EnhancedEvaluationContext;
125
+ }>;
126
+ /** The score below which a response is considered off-topic. */
127
+ offTopicThreshold?: number;
128
+ /** The score below which a failing response is considered a high severity alert. */
129
+ highSeverityThreshold?: number;
130
+ /** An optional function to generate custom evaluation prompts. */
131
+ promptGenerator?: GetPromptFunction;
132
+ }
133
+ /**
134
+ * A function that generates the main body of an evaluation prompt.
135
+ */
136
+ export type GetPromptFunction = (context: {
137
+ userQuery: string;
138
+ history: string;
139
+ tools: string;
140
+ retryInfo: string;
141
+ aiResponse: string;
142
+ }) => string;
@@ -0,0 +1 @@
1
+ export {};
@@ -1,5 +1,7 @@
1
1
  import type { LanguageModelV1Middleware } from "ai";
2
2
  import type { JsonValue } from "../types/common.js";
3
+ import type { EvaluationData } from "./evaluation.js";
4
+ import type { GetPromptFunction } from "./evaluationTypes.js";
3
5
  /**
4
6
  * Metadata interface for NeuroLink middleware
5
7
  * Provides additional information about middleware without affecting execution
@@ -33,7 +35,7 @@ export interface MiddlewareConfig {
33
35
  /** Whether the middleware is enabled */
34
36
  enabled?: boolean;
35
37
  /** Middleware-specific configuration */
36
- config?: Record<string, JsonValue>;
38
+ config?: Record<string, unknown>;
37
39
  /** Conditions under which to apply this middleware */
38
40
  conditions?: MiddlewareConditions;
39
41
  }
@@ -108,7 +110,7 @@ export interface MiddlewareChainStats {
108
110
  /**
109
111
  * Built-in middleware types
110
112
  */
111
- export type BuiltInMiddlewareType = "analytics" | "guardrails" | "logging" | "caching" | "rateLimit" | "retry" | "timeout";
113
+ export type BuiltInMiddlewareType = "analytics" | "guardrails" | "logging" | "caching" | "rateLimit" | "retry" | "timeout" | "autoEvaluation";
112
114
  /**
113
115
  * Middleware preset configurations
114
116
  */
@@ -144,3 +146,27 @@ export interface MiddlewareFactoryOptions {
144
146
  collectStats?: boolean;
145
147
  };
146
148
  }
149
+ /**
150
+ * Configuration for the Auto-Evaluation Middleware.
151
+ */
152
+ export interface AutoEvaluationConfig {
153
+ /** The minimum score (1-10) for a response to be considered passing. */
154
+ threshold?: number;
155
+ /** The maximum number of retry attempts before failing. */
156
+ maxRetries?: number;
157
+ /** The model to use for the LLM-as-judge evaluation. */
158
+ evaluationModel?: string;
159
+ /**
160
+ * If true, the middleware will wait for the evaluation to complete before returning.
161
+ * If the evaluation fails, it will throw an error. Defaults to true.
162
+ */
163
+ blocking?: boolean;
164
+ /** A callback function to be invoked with the evaluation result. */
165
+ onEvaluationComplete?: (evaluation: EvaluationData) => void | Promise<void>;
166
+ /** The score below which a response is considered off-topic. */
167
+ offTopicThreshold?: number;
168
+ /** The score below which a failing response is considered a high severity alert. */
169
+ highSeverityThreshold?: number;
170
+ promptGenerator?: GetPromptFunction;
171
+ provider?: string;
172
+ }
@@ -81,4 +81,48 @@ export declare const imageUtils: {
81
81
  * Convert file size to human readable format
82
82
  */
83
83
  formatFileSize: (bytes: number) => string;
84
+ /**
85
+ * Convert Buffer to base64 string
86
+ */
87
+ bufferToBase64: (buffer: Buffer) => string;
88
+ /**
89
+ * Convert base64 string to Buffer
90
+ */
91
+ base64ToBuffer: (base64: string) => Buffer;
92
+ /**
93
+ * Convert file path to base64 data URI
94
+ */
95
+ fileToBase64DataUri: (filePath: string, maxBytes?: number) => Promise<string>;
96
+ /**
97
+ * Convert URL to base64 data URI by downloading the image
98
+ */
99
+ urlToBase64DataUri: (url: string, { timeoutMs, maxBytes }?: {
100
+ timeoutMs?: number | undefined;
101
+ maxBytes?: number | undefined;
102
+ }) => Promise<string>;
103
+ /**
104
+ * Extract base64 data from data URI
105
+ */
106
+ extractBase64FromDataUri: (dataUri: string) => string;
107
+ /**
108
+ * Extract MIME type from data URI
109
+ */
110
+ extractMimeTypeFromDataUri: (dataUri: string) => string;
111
+ /**
112
+ * Create data URI from base64 and MIME type
113
+ */
114
+ createDataUri: (base64: string, mimeType?: string) => string;
115
+ /**
116
+ * Validate base64 string format
117
+ */
118
+ isValidBase64: (str: string) => boolean;
119
+ /**
120
+ * Get base64 string size in bytes
121
+ */
122
+ getBase64Size: (base64: string) => number;
123
+ /**
124
+ * Compress base64 image by reducing quality (basic implementation)
125
+ * Note: This is a placeholder - for production use, consider using sharp or similar
126
+ */
127
+ compressBase64: (base64: string, _quality?: number) => string;
84
128
  };
@@ -151,6 +151,8 @@ export class ImageProcessor {
151
151
  bmp: "image/bmp",
152
152
  tiff: "image/tiff",
153
153
  tif: "image/tiff",
154
+ svg: "image/svg+xml",
155
+ avif: "image/avif",
154
156
  };
155
157
  return imageTypes[extension || ""] || "image/jpeg";
156
158
  }
@@ -183,6 +185,21 @@ export class ImageProcessor {
183
185
  return "image/webp";
184
186
  }
185
187
  }
188
+ // SVG: check for "<svg" or "<?xml" at start (text-based)
189
+ if (input.length >= 4) {
190
+ const start = input.subarray(0, 4).toString();
191
+ if (start === "<svg" || start === "<?xm") {
192
+ return "image/svg+xml";
193
+ }
194
+ }
195
+ // AVIF: check for "ftypavif" signature at bytes 4-11
196
+ if (input.length >= 12) {
197
+ const ftyp = input.subarray(4, 8).toString();
198
+ const brand = input.subarray(8, 12).toString();
199
+ if (ftyp === "ftyp" && brand === "avif") {
200
+ return "image/avif";
201
+ }
202
+ }
186
203
  }
187
204
  return "image/jpeg"; // Default fallback
188
205
  }
@@ -217,6 +234,8 @@ export class ImageProcessor {
217
234
  "image/webp",
218
235
  "image/bmp",
219
236
  "image/tiff",
237
+ "image/svg+xml",
238
+ "image/avif",
220
239
  ];
221
240
  return supportedFormats.includes(mediaType.toLowerCase());
222
241
  }
@@ -332,14 +351,7 @@ export const imageUtils = {
332
351
  /**
333
352
  * Check if a string is base64 encoded
334
353
  */
335
- isBase64: (str) => {
336
- try {
337
- return btoa(atob(str)) === str;
338
- }
339
- catch {
340
- return false;
341
- }
342
- },
354
+ isBase64: (str) => imageUtils.isValidBase64(str),
343
355
  /**
344
356
  * Extract file extension from filename or URL
345
357
  */
@@ -359,4 +371,143 @@ export const imageUtils = {
359
371
  const i = Math.floor(Math.log(bytes) / Math.log(k));
360
372
  return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + " " + sizes[i];
361
373
  },
374
+ /**
375
+ * Convert Buffer to base64 string
376
+ */
377
+ bufferToBase64: (buffer) => {
378
+ return buffer.toString("base64");
379
+ },
380
+ /**
381
+ * Convert base64 string to Buffer
382
+ */
383
+ base64ToBuffer: (base64) => {
384
+ // Remove data URI prefix if present
385
+ const cleanBase64 = base64.includes(",") ? base64.split(",")[1] : base64;
386
+ return Buffer.from(cleanBase64, "base64");
387
+ },
388
+ /**
389
+ * Convert file path to base64 data URI
390
+ */
391
+ fileToBase64DataUri: async (filePath, maxBytes = 10 * 1024 * 1024) => {
392
+ try {
393
+ const fs = await import("fs/promises");
394
+ // File existence and type validation
395
+ const stat = await fs.stat(filePath);
396
+ if (!stat.isFile()) {
397
+ throw new Error("Not a file");
398
+ }
399
+ // Size check before reading - prevent memory exhaustion
400
+ if (stat.size > maxBytes) {
401
+ throw new Error(`File too large: ${stat.size} bytes (max: ${maxBytes} bytes)`);
402
+ }
403
+ const buffer = await fs.readFile(filePath);
404
+ // Enhanced MIME detection: try buffer content first, fallback to filename
405
+ const mimeType = ImageProcessor.detectImageType(buffer) ||
406
+ ImageProcessor.detectImageType(filePath);
407
+ const base64 = buffer.toString("base64");
408
+ return `data:${mimeType};base64,${base64}`;
409
+ }
410
+ catch (error) {
411
+ throw new Error(`Failed to convert file to base64: ${error instanceof Error ? error.message : "Unknown error"}`);
412
+ }
413
+ },
414
+ /**
415
+ * Convert URL to base64 data URI by downloading the image
416
+ */
417
+ urlToBase64DataUri: async (url, { timeoutMs = 15000, maxBytes = 10 * 1024 * 1024 } = {}) => {
418
+ try {
419
+ // Basic protocol whitelist
420
+ if (!/^https?:\/\//i.test(url)) {
421
+ throw new Error("Unsupported protocol");
422
+ }
423
+ const controller = new AbortController();
424
+ const t = setTimeout(() => controller.abort(), timeoutMs);
425
+ try {
426
+ const response = await fetch(url, { signal: controller.signal });
427
+ if (!response.ok) {
428
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
429
+ }
430
+ const contentType = response.headers.get("content-type") || "";
431
+ if (!/^image\//i.test(contentType)) {
432
+ throw new Error(`Unsupported content-type: ${contentType || "unknown"}`);
433
+ }
434
+ const len = Number(response.headers.get("content-length") || 0);
435
+ if (len && len > maxBytes) {
436
+ throw new Error(`Content too large: ${len} bytes`);
437
+ }
438
+ const buffer = await response.arrayBuffer();
439
+ if (buffer.byteLength > maxBytes) {
440
+ throw new Error(`Downloaded content too large: ${buffer.byteLength} bytes`);
441
+ }
442
+ const base64 = Buffer.from(buffer).toString("base64");
443
+ return `data:${contentType || "image/jpeg"};base64,${base64}`;
444
+ }
445
+ finally {
446
+ clearTimeout(t);
447
+ }
448
+ }
449
+ catch (error) {
450
+ throw new Error(`Failed to download and convert URL to base64: ${error instanceof Error ? error.message : "Unknown error"}`);
451
+ }
452
+ },
453
+ /**
454
+ * Extract base64 data from data URI
455
+ */
456
+ extractBase64FromDataUri: (dataUri) => {
457
+ if (!dataUri.includes(",")) {
458
+ return dataUri; // Already just base64
459
+ }
460
+ return dataUri.split(",")[1];
461
+ },
462
+ /**
463
+ * Extract MIME type from data URI
464
+ */
465
+ extractMimeTypeFromDataUri: (dataUri) => {
466
+ const match = dataUri.match(/^data:([^;]+);base64,/);
467
+ return match ? match[1] : "image/jpeg";
468
+ },
469
+ /**
470
+ * Create data URI from base64 and MIME type
471
+ */
472
+ createDataUri: (base64, mimeType = "image/jpeg") => {
473
+ // Remove data URI prefix if already present
474
+ const cleanBase64 = base64.includes(",") ? base64.split(",")[1] : base64;
475
+ return `data:${mimeType};base64,${cleanBase64}`;
476
+ },
477
+ /**
478
+ * Validate base64 string format
479
+ */
480
+ isValidBase64: (str) => {
481
+ try {
482
+ // Remove data URI prefix if present
483
+ const cleanBase64 = str.includes(",") ? str.split(",")[1] : str;
484
+ // Check if it's valid base64
485
+ const decoded = Buffer.from(cleanBase64, "base64");
486
+ const reencoded = decoded.toString("base64");
487
+ // Remove padding for comparison (base64 can have different padding)
488
+ const normalizeBase64 = (b64) => b64.replace(/=+$/, "");
489
+ return normalizeBase64(cleanBase64) === normalizeBase64(reencoded);
490
+ }
491
+ catch {
492
+ return false;
493
+ }
494
+ },
495
+ /**
496
+ * Get base64 string size in bytes
497
+ */
498
+ getBase64Size: (base64) => {
499
+ // Remove data URI prefix if present
500
+ const cleanBase64 = base64.includes(",") ? base64.split(",")[1] : base64;
501
+ return Buffer.byteLength(cleanBase64, "base64");
502
+ },
503
+ /**
504
+ * Compress base64 image by reducing quality (basic implementation)
505
+ * Note: This is a placeholder - for production use, consider using sharp or similar
506
+ */
507
+ compressBase64: (base64, _quality = 0.8) => {
508
+ // This is a basic implementation that just returns the original
509
+ // In a real implementation, you'd use an image processing library
510
+ logger.warn("Base64 compression not implemented - returning original");
511
+ return base64;
512
+ },
362
513
  };
@@ -7,13 +7,12 @@ import type { MultimodalChatMessage } from "../types/conversation.js";
7
7
  import type { TextGenerationOptions } from "../types/index.js";
8
8
  import type { StreamOptions } from "../types/streamTypes.js";
9
9
  import type { GenerateOptions } from "../types/generateTypes.js";
10
+ import type { CoreMessage } from "ai";
10
11
  /**
11
- * Core message type compatible with AI SDK
12
+ * Type-safe conversion from MultimodalChatMessage[] to CoreMessage[]
13
+ * Filters out invalid content and ensures strict CoreMessage contract compliance
12
14
  */
13
- type CoreMessage = {
14
- role: "user" | "assistant" | "system";
15
- content: string;
16
- };
15
+ export declare function convertToCoreMessages(messages: MultimodalChatMessage[]): CoreMessage[];
17
16
  /**
18
17
  * Build a properly formatted message array for AI providers
19
18
  * Combines system prompt, conversation history, and current user prompt
@@ -25,4 +24,3 @@ export declare function buildMessagesArray(options: TextGenerationOptions | Stre
25
24
  * Detects when images are present and routes through provider adapter
26
25
  */
27
26
  export declare function buildMultimodalMessagesArray(options: GenerateOptions, provider: string, model: string): Promise<MultimodalChatMessage[]>;
28
- export {};
@@ -8,6 +8,147 @@ import { ProviderImageAdapter, MultimodalLogger, } from "../adapters/providerIma
8
8
  import { logger } from "./logger.js";
9
9
  import { request } from "undici";
10
10
  import { readFileSync, existsSync } from "fs";
11
+ /**
12
+ * Type guard for validating message roles
13
+ */
14
+ function isValidRole(role) {
15
+ return (typeof role === "string" &&
16
+ (role === "user" || role === "assistant" || role === "system"));
17
+ }
18
+ /**
19
+ * Type guard for validating content items
20
+ */
21
+ function isValidContentItem(item) {
22
+ if (!item || typeof item !== "object") {
23
+ return false;
24
+ }
25
+ const contentItem = item;
26
+ if (contentItem.type === "text") {
27
+ return typeof contentItem.text === "string";
28
+ }
29
+ if (contentItem.type === "image") {
30
+ return (typeof contentItem.image === "string" &&
31
+ (contentItem.mimeType === undefined ||
32
+ typeof contentItem.mimeType === "string"));
33
+ }
34
+ return false;
35
+ }
36
+ /**
37
+ * Safely convert content item to AI SDK content format
38
+ */
39
+ function convertContentItem(item) {
40
+ if (!isValidContentItem(item)) {
41
+ return null;
42
+ }
43
+ const contentItem = item;
44
+ if (contentItem.type === "text" && typeof contentItem.text === "string") {
45
+ return { type: "text", text: contentItem.text };
46
+ }
47
+ if (contentItem.type === "image" && typeof contentItem.image === "string") {
48
+ return {
49
+ type: "image",
50
+ image: contentItem.image,
51
+ ...(contentItem.mimeType && { mimeType: contentItem.mimeType }),
52
+ };
53
+ }
54
+ return null;
55
+ }
56
+ /**
57
+ * Type-safe conversion from MultimodalChatMessage[] to CoreMessage[]
58
+ * Filters out invalid content and ensures strict CoreMessage contract compliance
59
+ */
60
+ export function convertToCoreMessages(messages) {
61
+ return messages
62
+ .map((msg) => {
63
+ // Validate role
64
+ if (!isValidRole(msg.role)) {
65
+ logger.warn("Invalid message role found, skipping", { role: msg.role });
66
+ return null;
67
+ }
68
+ // Handle string content
69
+ if (typeof msg.content === "string") {
70
+ // Create properly typed discriminated union messages
71
+ if (msg.role === "system") {
72
+ return {
73
+ role: "system",
74
+ content: msg.content,
75
+ };
76
+ }
77
+ else if (msg.role === "user") {
78
+ return {
79
+ role: "user",
80
+ content: msg.content,
81
+ };
82
+ }
83
+ else if (msg.role === "assistant") {
84
+ return {
85
+ role: "assistant",
86
+ content: msg.content,
87
+ };
88
+ }
89
+ }
90
+ // Handle array content (multimodal) - only user messages support full multimodal content
91
+ if (Array.isArray(msg.content)) {
92
+ const validContent = msg.content
93
+ .map(convertContentItem)
94
+ .filter((item) => item !== null);
95
+ // If no valid content items, skip the message
96
+ if (validContent.length === 0) {
97
+ logger.warn("No valid content items found in multimodal message, skipping");
98
+ return null;
99
+ }
100
+ if (msg.role === "user") {
101
+ // User messages support both text and image content
102
+ return {
103
+ role: "user",
104
+ content: validContent,
105
+ };
106
+ }
107
+ else if (msg.role === "assistant") {
108
+ // Assistant messages only support text content, filter out images
109
+ const textOnlyContent = validContent.filter((item) => item.type === "text");
110
+ if (textOnlyContent.length === 0) {
111
+ // If no text content, convert to empty string
112
+ return {
113
+ role: "assistant",
114
+ content: "",
115
+ };
116
+ }
117
+ else if (textOnlyContent.length === 1) {
118
+ // Single text item, use string content
119
+ return {
120
+ role: "assistant",
121
+ content: textOnlyContent[0].text,
122
+ };
123
+ }
124
+ else {
125
+ // Multiple text items, concatenate them
126
+ const combinedText = textOnlyContent
127
+ .map((item) => item.text)
128
+ .join(" ");
129
+ return {
130
+ role: "assistant",
131
+ content: combinedText,
132
+ };
133
+ }
134
+ }
135
+ else {
136
+ // System messages cannot have multimodal content, convert to text
137
+ const textContent = validContent.find((item) => item.type === "text")?.text || "";
138
+ return {
139
+ role: "system",
140
+ content: textContent,
141
+ };
142
+ }
143
+ }
144
+ // Invalid content type
145
+ logger.warn("Invalid message content type found, skipping", {
146
+ contentType: typeof msg.content,
147
+ });
148
+ return null;
149
+ })
150
+ .filter((msg) => msg !== null);
151
+ }
11
152
  /**
12
153
  * Convert ChatMessage to CoreMessage for AI SDK compatibility
13
154
  */
@@ -84,7 +225,10 @@ export async function buildMultimodalMessagesArray(options, provider, model) {
84
225
  // If no images, use standard message building and convert to MultimodalChatMessage[]
85
226
  if (!hasImages) {
86
227
  const standardMessages = buildMessagesArray(options);
87
- return standardMessages.map((msg) => ({ ...msg, content: msg.content }));
228
+ return standardMessages.map((msg) => ({
229
+ role: msg.role,
230
+ content: typeof msg.content === "string" ? msg.content : msg.content,
231
+ }));
88
232
  }
89
233
  // Validate provider supports vision
90
234
  if (!ProviderImageAdapter.supportsVision(provider, model)) {
@@ -0,0 +1,14 @@
1
+ /**
2
+ * @file Implements the Auto-Evaluation Middleware for ensuring response quality.
3
+ */
4
+ import type { NeuroLinkMiddleware, AutoEvaluationConfig } from "../../types/middlewareTypes.js";
5
+ /**
6
+ * Creates the Auto-Evaluation middleware, which intercepts generation requests
7
+ * to evaluate the quality of the response. If the response quality is below a
8
+ * configured threshold, it can trigger retries with feedback.
9
+ *
10
+ * @param config - Configuration for the auto-evaluation middleware.
11
+ * @returns A `NeuroLinkMiddleware` object.
12
+ */
13
+ export declare function createAutoEvaluationMiddleware(config?: AutoEvaluationConfig): NeuroLinkMiddleware;
14
+ export default createAutoEvaluationMiddleware;