@juspay/neurolink 8.5.1 → 8.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +30 -0
  2. package/dist/adapters/providerImageAdapter.d.ts +4 -2
  3. package/dist/adapters/providerImageAdapter.js +16 -2
  4. package/dist/cli/factories/commandFactory.d.ts +5 -0
  5. package/dist/cli/factories/commandFactory.js +96 -0
  6. package/dist/cli/utils/audioFileUtils.d.ts +70 -0
  7. package/dist/cli/utils/audioFileUtils.js +174 -0
  8. package/dist/core/baseProvider.js +6 -2
  9. package/dist/core/modules/TelemetryHandler.js +6 -1
  10. package/dist/lib/adapters/providerImageAdapter.d.ts +4 -2
  11. package/dist/lib/adapters/providerImageAdapter.js +16 -2
  12. package/dist/lib/core/baseProvider.js +6 -2
  13. package/dist/lib/core/modules/TelemetryHandler.js +6 -1
  14. package/dist/lib/middleware/builtin/guardrails.js +7 -0
  15. package/dist/lib/neurolink.js +75 -5
  16. package/dist/lib/telemetry/telemetryService.d.ts +1 -1
  17. package/dist/lib/telemetry/telemetryService.js +4 -4
  18. package/dist/lib/types/cli.d.ts +2 -0
  19. package/dist/lib/types/common.d.ts +5 -0
  20. package/dist/lib/types/content.d.ts +1 -1
  21. package/dist/lib/types/fileTypes.d.ts +13 -12
  22. package/dist/lib/types/generateTypes.d.ts +19 -2
  23. package/dist/lib/types/index.d.ts +1 -0
  24. package/dist/lib/types/index.js +2 -0
  25. package/dist/lib/types/multimodal.d.ts +38 -1
  26. package/dist/lib/types/streamTypes.d.ts +21 -2
  27. package/dist/lib/types/ttsTypes.d.ts +91 -0
  28. package/dist/lib/types/ttsTypes.js +58 -0
  29. package/dist/lib/utils/imageProcessor.d.ts +38 -5
  30. package/dist/lib/utils/imageProcessor.js +131 -7
  31. package/dist/lib/utils/messageBuilder.js +52 -7
  32. package/dist/lib/utils/multimodalOptionsBuilder.d.ts +1 -1
  33. package/dist/lib/utils/pdfProcessor.js +24 -2
  34. package/dist/middleware/builtin/guardrails.js +7 -0
  35. package/dist/neurolink.js +75 -5
  36. package/dist/telemetry/telemetryService.d.ts +1 -1
  37. package/dist/telemetry/telemetryService.js +4 -4
  38. package/dist/types/cli.d.ts +2 -0
  39. package/dist/types/common.d.ts +5 -0
  40. package/dist/types/content.d.ts +1 -1
  41. package/dist/types/fileTypes.d.ts +13 -12
  42. package/dist/types/generateTypes.d.ts +19 -2
  43. package/dist/types/index.d.ts +1 -0
  44. package/dist/types/index.js +2 -0
  45. package/dist/types/multimodal.d.ts +38 -1
  46. package/dist/types/streamTypes.d.ts +21 -2
  47. package/dist/types/ttsTypes.d.ts +91 -0
  48. package/dist/types/ttsTypes.js +57 -0
  49. package/dist/utils/imageProcessor.d.ts +38 -5
  50. package/dist/utils/imageProcessor.js +131 -7
  51. package/dist/utils/messageBuilder.js +52 -7
  52. package/dist/utils/multimodalOptionsBuilder.d.ts +1 -1
  53. package/dist/utils/pdfProcessor.js +24 -2
  54. package/package.json +7 -4
@@ -1998,19 +1998,85 @@ Current user's request: ${currentInput}`;
1998
1998
  }
1999
1999
  }
2000
2000
  const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
2001
- // Create a wrapper around the stream that accumulates content
2002
2001
  let accumulatedContent = "";
2002
+ let chunkCount = 0;
2003
+ const metadata = {
2004
+ fallbackAttempted: false,
2005
+ guardrailsBlocked: false,
2006
+ error: undefined,
2007
+ };
2003
2008
  const processedStream = (async function* (self) {
2004
2009
  try {
2005
2010
  for await (const chunk of mcpStream) {
2011
+ chunkCount++;
2006
2012
  if (chunk &&
2007
2013
  "content" in chunk &&
2008
2014
  typeof chunk.content === "string") {
2009
2015
  accumulatedContent += chunk.content;
2010
- // Emit chunk event for compatibility
2011
2016
  self.emitter.emit("response:chunk", chunk.content);
2012
2017
  }
2013
- yield chunk; // Preserve original streaming behavior
2018
+ yield chunk;
2019
+ }
2020
+ if (chunkCount === 0 && !metadata.fallbackAttempted) {
2021
+ metadata.fallbackAttempted = true;
2022
+ const errorMsg = "Stream completed with 0 chunks (possible guardrails block)";
2023
+ metadata.error = errorMsg;
2024
+ const fallbackRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
2025
+ provider: providerName,
2026
+ model: enhancedOptions.model || "gpt-4o",
2027
+ reasoning: "primary failed",
2028
+ confidence: 0.5,
2029
+ }, { fallbackStrategy: "auto" });
2030
+ logger.warn("Retrying with fallback provider", {
2031
+ originalProvider: providerName,
2032
+ fallbackProvider: fallbackRoute.provider,
2033
+ reason: errorMsg,
2034
+ });
2035
+ try {
2036
+ const fallbackProvider = await AIProviderFactory.createProvider(fallbackRoute.provider, fallbackRoute.model);
2037
+ // Ensure fallback provider can execute tools
2038
+ fallbackProvider.setupToolExecutor({
2039
+ customTools: self.getCustomTools(),
2040
+ executeTool: self.executeTool.bind(self),
2041
+ }, "NeuroLink.fallbackStream");
2042
+ // Get conversation messages for context (same as primary stream)
2043
+ const conversationMessages = await getConversationMessages(self.conversationMemory, {
2044
+ prompt: enhancedOptions.input.text,
2045
+ context: enhancedOptions.context,
2046
+ });
2047
+ const fallbackResult = await fallbackProvider.stream({
2048
+ ...enhancedOptions,
2049
+ model: fallbackRoute.model,
2050
+ conversationMessages,
2051
+ });
2052
+ let fallbackChunkCount = 0;
2053
+ for await (const fallbackChunk of fallbackResult.stream) {
2054
+ fallbackChunkCount++;
2055
+ if (fallbackChunk &&
2056
+ "content" in fallbackChunk &&
2057
+ typeof fallbackChunk.content === "string") {
2058
+ accumulatedContent += fallbackChunk.content;
2059
+ self.emitter.emit("response:chunk", fallbackChunk.content);
2060
+ }
2061
+ yield fallbackChunk;
2062
+ }
2063
+ if (fallbackChunkCount === 0) {
2064
+ throw new Error(`Fallback provider ${fallbackRoute.provider} also returned 0 chunks`);
2065
+ }
2066
+ // Fallback succeeded - likely guardrails blocked primary
2067
+ metadata.guardrailsBlocked = true;
2068
+ }
2069
+ catch (fallbackError) {
2070
+ const fallbackErrorMsg = fallbackError instanceof Error
2071
+ ? fallbackError.message
2072
+ : String(fallbackError);
2073
+ metadata.error = `${errorMsg}; Fallback failed: ${fallbackErrorMsg}`;
2074
+ logger.error("Fallback provider failed", {
2075
+ fallbackProvider: fallbackRoute.provider,
2076
+ error: fallbackErrorMsg,
2077
+ });
2078
+ throw fallbackError;
2079
+ }
2014
2080
  }
2015
2081
  }
2016
2082
  finally {
@@ -2053,7 +2119,7 @@ Current user's request: ${currentInput}`;
2053
2119
  }
2054
2120
  }
2055
2121
  })(this);
2056
- const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
2122
+ const streamResult = await this.processStreamResult(processedStream, enhancedOptions, factoryResult);
2057
2123
  const responseTime = Date.now() - startTime;
2058
2124
  this.emitStreamEndEvents(streamResult);
2059
2125
  return this.createStreamResponse(streamResult, processedStream, {
@@ -2062,7 +2128,9 @@ Current user's request: ${currentInput}`;
2062
2128
  startTime,
2063
2129
  responseTime,
2064
2130
  streamId,
2065
- fallback: false,
2131
+ fallback: metadata.fallbackAttempted,
2132
+ guardrailsBlocked: metadata.guardrailsBlocked,
2133
+ error: metadata.error,
2066
2134
  });
2067
2135
  }
2068
2136
  catch (error) {
@@ -2181,6 +2249,8 @@ Current user's request: ${currentInput}`;
2181
2249
  startTime: config.startTime,
2182
2250
  responseTime: config.responseTime,
2183
2251
  fallback: config.fallback || false,
2252
+ guardrailsBlocked: config.guardrailsBlocked,
2253
+ error: config.error,
2184
2254
  },
2185
2255
  };
2186
2256
  }
@@ -31,7 +31,7 @@ export declare class TelemetryService {
31
31
  private initializeTelemetry;
32
32
  private initializeMetrics;
33
33
  initialize(): Promise<void>;
34
- traceAIRequest<T>(provider: string, operation: () => Promise<T>): Promise<T>;
34
+ traceAIRequest<T>(provider: string, operation: () => Promise<T>, operationType?: string): Promise<T>;
35
35
  recordAIRequest(provider: string, model: string, tokens: number, duration: number): void;
36
36
  recordAIError(provider: string, error: Error): void;
37
37
  recordMCPToolCall(toolName: string, duration: number, success: boolean): void;
@@ -108,14 +108,14 @@ export class TelemetryService {
108
108
  }
109
109
  }
110
110
  // AI Operation Tracing (NO-OP when disabled)
111
- async traceAIRequest(provider, operation) {
111
+ async traceAIRequest(provider, operation, operationType = "generate_text") {
112
112
  if (!this.enabled || !this.tracer) {
113
- return await operation(); // Direct execution when disabled
113
+ return await operation();
114
114
  }
115
- const span = this.tracer.startSpan(`ai.${provider}.generate_text`, {
115
+ const span = this.tracer.startSpan(`ai.${provider}.${operationType}`, {
116
116
  attributes: {
117
117
  "ai.provider": provider,
118
- "ai.operation": "generate_text",
118
+ "ai.operation": operationType,
119
119
  },
120
120
  });
121
121
  try {
@@ -337,6 +337,8 @@ export type GenerateResult = CommandResult & {
337
337
  name: string;
338
338
  description: string;
339
339
  }>;
340
+ /** TTS audio result when TTS is enabled */
341
+ audio?: import("./index.js").TTSResult;
340
342
  };
341
343
  /**
342
344
  * Stream result chunk
@@ -129,3 +129,8 @@ export type TypedEventEmitter<TEvents extends Record<string, unknown>> = {
129
129
  listenerCount<K extends keyof TEvents>(event: K): number;
130
130
  listeners<K extends keyof TEvents>(event: K): Array<(...args: unknown[]) => void>;
131
131
  };
132
+ export type Context = {
133
+ traceName?: string;
134
+ userId?: string;
135
+ sessionId?: string;
136
+ };
@@ -14,5 +14,5 @@
14
14
  * import type { MultimodalInput } from './types/multimodal.js';
15
15
  * ```
16
16
  */
17
- export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
17
+ export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, ImageWithAltText, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
18
18
  export { isTextContent, isImageContent, isCSVContent, isPDFContent, isAudioContent, isVideoContent, isMultimodalInput, } from "./multimodal.js";
@@ -81,18 +81,7 @@ export type PDFProcessorOptions = {
81
81
  bedrockApiMode?: "converse" | "invokeModel";
82
82
  };
83
83
  /**
84
- * File detector options
85
- */
86
- export type FileDetectorOptions = {
87
- maxSize?: number;
88
- timeout?: number;
89
- allowedTypes?: FileType[];
90
- csvOptions?: CSVProcessorOptions;
91
- confidenceThreshold?: number;
92
- provider?: string;
93
- };
94
- /**
95
- * Audio processor options for transcription configuration
84
+ * Audio processor options
96
85
  */
97
86
  export type AudioProcessorOptions = {
98
87
  /** AI provider to use for transcription (e.g., 'openai', 'google', 'azure') */
@@ -108,6 +97,18 @@ export type AudioProcessorOptions = {
108
97
  /** Maximum file size in megabytes */
109
98
  maxSizeMB?: number;
110
99
  };
100
+ /**
101
+ * File detector options
102
+ */
103
+ export type FileDetectorOptions = {
104
+ maxSize?: number;
105
+ timeout?: number;
106
+ allowedTypes?: FileType[];
107
+ audioOptions?: AudioProcessorOptions;
108
+ csvOptions?: CSVProcessorOptions;
109
+ confidenceThreshold?: number;
110
+ provider?: string;
111
+ };
111
112
  /**
112
113
  * Google AI Studio Files API types
113
114
  */
@@ -6,7 +6,7 @@ import type { EvaluationData } from "./evaluation.js";
6
6
  import type { ChatMessage, ConversationMemoryConfig } from "./conversation.js";
7
7
  import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
8
8
  import type { JsonValue } from "./common.js";
9
- import type { Content } from "./content.js";
9
+ import type { Content, ImageWithAltText } from "./content.js";
10
10
  /**
11
11
  * Generate function options type - Primary method for content generation
12
12
  * Supports multimodal content while maintaining backward compatibility
@@ -14,7 +14,24 @@ import type { Content } from "./content.js";
14
14
  export type GenerateOptions = {
15
15
  input: {
16
16
  text: string;
17
- images?: Array<Buffer | string>;
17
+ /**
18
+ * Images to include in the request.
19
+ * Supports simple image data (Buffer, string) or objects with alt text for accessibility.
20
+ *
21
+ * @example Simple usage
22
+ * ```typescript
23
+ * images: [imageBuffer, "https://example.com/image.jpg"]
24
+ * ```
25
+ *
26
+ * @example With alt text for accessibility
27
+ * ```typescript
28
+ * images: [
29
+ * { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
30
+ * { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
31
+ * ]
32
+ * ```
33
+ */
34
+ images?: Array<Buffer | string | ImageWithAltText>;
18
35
  csvFiles?: Array<Buffer | string>;
19
36
  pdfFiles?: Array<Buffer | string>;
20
37
  files?: Array<Buffer | string>;
@@ -32,3 +32,4 @@ export * from "./utilities.js";
32
32
  export * from "./middlewareTypes.js";
33
33
  export * from "./fileTypes.js";
34
34
  export * from "./content.js";
35
+ export * from "./ttsTypes.js";
@@ -35,4 +35,6 @@ export * from "./middlewareTypes.js";
35
35
  export * from "./fileTypes.js";
36
36
  // Content types for multimodal support
37
37
  export * from "./content.js";
38
+ // TTS (Text-to-Speech) types
39
+ export * from "./ttsTypes.js";
38
40
  //# sourceMappingURL=index.js.map
@@ -52,6 +52,8 @@ export type TextContent = {
52
52
  export type ImageContent = {
53
53
  type: "image";
54
54
  data: Buffer | string;
55
+ /** Alternative text for accessibility (screen readers, SEO) */
56
+ altText?: string;
55
57
  mediaType?: "image/jpeg" | "image/png" | "image/gif" | "image/webp" | "image/bmp" | "image/tiff";
56
58
  metadata?: {
57
59
  description?: string;
@@ -164,13 +166,48 @@ export type VideoContent = {
164
166
  * Covers text, images, documents, and multimedia
165
167
  */
166
168
  export type Content = TextContent | ImageContent | CSVContent | PDFContent | AudioContent | VideoContent;
169
+ /**
170
+ * Image data with optional alt text for accessibility
171
+ * Use this when you need to provide alt text for screen readers and SEO
172
+ *
173
+ * @example
174
+ * ```typescript
175
+ * const imageWithAlt: ImageWithAltText = {
176
+ * data: imageBuffer,
177
+ * altText: "A dashboard showing quarterly sales trends"
178
+ * };
179
+ * ```
180
+ */
181
+ export type ImageWithAltText = {
182
+ /** Image data as Buffer, base64 string, URL, or data URI */
183
+ data: Buffer | string;
184
+ /** Alternative text for accessibility (screen readers, SEO) */
185
+ altText?: string;
186
+ };
167
187
  /**
168
188
  * Multimodal input type for options that may contain images or content arrays
169
189
  * This is the primary interface for users to provide multimodal content
170
190
  */
171
191
  export type MultimodalInput = {
172
192
  text: string;
173
- images?: Array<Buffer | string>;
193
+ /**
194
+ * Images to include in the request.
195
+ * Can be simple image data (Buffer, string) or objects with alt text for accessibility.
196
+ *
197
+ * @example Simple usage
198
+ * ```typescript
199
+ * images: [imageBuffer, "https://example.com/image.jpg"]
200
+ * ```
201
+ *
202
+ * @example With alt text for accessibility
203
+ * ```typescript
204
+ * images: [
205
+ * { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
206
+ * { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
207
+ * ]
208
+ * ```
209
+ */
210
+ images?: Array<Buffer | string | ImageWithAltText>;
174
211
  content?: Content[];
175
212
  csvFiles?: Array<Buffer | string>;
176
213
  pdfFiles?: Array<Buffer | string>;
@@ -1,7 +1,7 @@
1
1
  import type { Tool } from "ai";
2
2
  import type { ValidationSchema, StandardRecord } from "./typeAliases.js";
3
3
  import type { AIModelProviderConfig } from "./providers.js";
4
- import type { Content } from "./content.js";
4
+ import type { Content, ImageWithAltText } from "./content.js";
5
5
  import type { AnalyticsData, ToolExecutionEvent, ToolExecutionSummary } from "../types/index.js";
6
6
  import { AIProviderName } from "../constants/enums.js";
7
7
  import type { TokenUsage } from "./analytics.js";
@@ -125,7 +125,24 @@ export type StreamOptions = {
125
125
  input: {
126
126
  text: string;
127
127
  audio?: AudioInputSpec;
128
- images?: Array<Buffer | string>;
128
+ /**
129
+ * Images to include in the request.
130
+ * Supports simple image data (Buffer, string) or objects with alt text for accessibility.
131
+ *
132
+ * @example Simple usage
133
+ * ```typescript
134
+ * images: [imageBuffer, "https://example.com/image.jpg"]
135
+ * ```
136
+ *
137
+ * @example With alt text for accessibility
138
+ * ```typescript
139
+ * images: [
140
+ * { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
141
+ * { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
142
+ * ]
143
+ * ```
144
+ */
145
+ images?: Array<Buffer | string | ImageWithAltText>;
129
146
  csvFiles?: Array<Buffer | string>;
130
147
  pdfFiles?: Array<Buffer | string>;
131
148
  files?: Array<Buffer | string>;
@@ -211,6 +228,8 @@ export type StreamResult = {
211
228
  totalToolExecutions?: number;
212
229
  toolExecutionTime?: number;
213
230
  hasToolErrors?: boolean;
231
+ guardrailsBlocked?: boolean;
232
+ error?: string;
214
233
  };
215
234
  analytics?: AnalyticsData | Promise<AnalyticsData>;
216
235
  evaluation?: EvaluationData | Promise<EvaluationData>;
@@ -0,0 +1,91 @@
1
+ /**
2
+ * Text-to-Speech (TTS) Type Definitions for NeuroLink
3
+ *
4
+ * This module defines types for TTS audio generation and output.
5
+ *
6
+ * @module types/ttsTypes
7
+ */
8
+ /**
9
+ * Supported audio formats for TTS output
10
+ */
11
+ export type AudioFormat = "mp3" | "wav" | "ogg" | "opus";
12
+ /**
13
+ * TTS quality settings
14
+ */
15
+ export type TTSQuality = "standard" | "hd";
16
+ /**
17
+ * TTS configuration options
18
+ */
19
+ export type TTSOptions = {
20
+ /** Enable TTS output */
21
+ enabled?: boolean;
22
+ /** Voice identifier (e.g., "en-US-Neural2-C") */
23
+ voice?: string;
24
+ /** Audio format (default: mp3) */
25
+ format?: AudioFormat;
26
+ /** Speaking rate 0.25-4.0 (default: 1.0) */
27
+ speed?: number;
28
+ /** Audio quality (default: standard) */
29
+ quality?: TTSQuality;
30
+ /** Output file path (optional) */
31
+ output?: string;
32
+ /** Auto-play audio after generation (default: false) */
33
+ play?: boolean;
34
+ };
35
+ /**
36
+ * TTS audio result returned from generation
37
+ */
38
+ export type TTSResult = {
39
+ /** Audio data as Buffer */
40
+ buffer: Buffer;
41
+ /** Audio format */
42
+ format: AudioFormat;
43
+ /** Audio file size in bytes */
44
+ size: number;
45
+ /** Duration in seconds (if available) */
46
+ duration?: number;
47
+ /** Voice used for generation */
48
+ voice?: string;
49
+ /** Sample rate in Hz */
50
+ sampleRate?: number;
51
+ };
52
+ /**
53
+ * Result of saving audio to file
54
+ */
55
+ export type AudioSaveResult = {
56
+ /** Whether the save was successful */
57
+ success: boolean;
58
+ /** Full path to the saved file */
59
+ path: string;
60
+ /** File size in bytes */
61
+ size: number;
62
+ /** Error message if failed */
63
+ error?: string;
64
+ };
65
+ /**
66
+ * TTS voice information
67
+ */
68
+ export type TTSVoice = {
69
+ /** Voice identifier */
70
+ id: string;
71
+ /** Display name */
72
+ name: string;
73
+ /** Language code (e.g., "en-US") */
74
+ languageCode: string;
75
+ /** Gender */
76
+ gender: "male" | "female" | "neutral";
77
+ /** Voice type */
78
+ type: "neural" | "wavenet" | "standard";
79
+ };
80
+ /** Valid audio formats as an array for runtime validation */
81
+ export declare const VALID_AUDIO_FORMATS: readonly AudioFormat[];
82
+ /** Valid TTS quality levels as an array for runtime validation */
83
+ export declare const VALID_TTS_QUALITIES: readonly TTSQuality[];
84
+ /**
85
+ * Type guard to check if an object is a TTSResult
86
+ */
87
+ export declare function isTTSResult(value: unknown): value is TTSResult;
88
+ /**
89
+ * Type guard to check if TTSOptions are valid
90
+ */
91
+ export declare function isValidTTSOptions(options: unknown): options is TTSOptions;
@@ -0,0 +1,58 @@
1
+ /**
2
+ * Text-to-Speech (TTS) Type Definitions for NeuroLink
3
+ *
4
+ * This module defines types for TTS audio generation and output.
5
+ *
6
+ * @module types/ttsTypes
7
+ */
8
+ /** Valid audio formats as an array for runtime validation */
9
+ export const VALID_AUDIO_FORMATS = [
10
+ "mp3",
11
+ "wav",
12
+ "ogg",
13
+ "opus",
14
+ ];
15
+ /** Valid TTS quality levels as an array for runtime validation */
16
+ export const VALID_TTS_QUALITIES = ["standard", "hd"];
17
+ /**
18
+ * Type guard to check if an object is a TTSResult
19
+ */
20
+ export function isTTSResult(value) {
21
+ if (!value || typeof value !== "object") {
22
+ return false;
23
+ }
24
+ const obj = value;
25
+ return (Buffer.isBuffer(obj.buffer) &&
26
+ typeof obj.format === "string" &&
27
+ VALID_AUDIO_FORMATS.includes(obj.format) &&
28
+ typeof obj.size === "number" &&
29
+ obj.size >= 0);
30
+ }
31
+ /**
32
+ * Type guard to check if TTSOptions are valid
33
+ */
34
+ export function isValidTTSOptions(options) {
35
+ if (!options || typeof options !== "object") {
36
+ return false;
37
+ }
38
+ const opts = options;
39
+ if (opts.speed !== undefined) {
40
+ if (typeof opts.speed !== "number" ||
41
+ opts.speed < 0.25 ||
42
+ opts.speed > 4.0) {
43
+ return false;
44
+ }
45
+ }
46
+ if (opts.format !== undefined) {
47
+ if (!VALID_AUDIO_FORMATS.includes(opts.format)) {
48
+ return false;
49
+ }
50
+ }
51
+ if (opts.quality !== undefined) {
52
+ if (!VALID_TTS_QUALITIES.includes(opts.quality)) {
53
+ return false;
54
+ }
55
+ }
56
+ return true;
57
+ }
58
+ //# sourceMappingURL=ttsTypes.js.map
@@ -17,6 +17,18 @@ export declare class ImageProcessor {
17
17
  * @returns Processed image as data URI
18
18
  */
19
19
  static process(content: Buffer, _options?: unknown): Promise<FileProcessingResult>;
20
+ /**
21
+ * Validate processed output meets required format
22
+ * Checks:
23
+ * - Base64 content is non-empty
24
+ * - Data URI format is valid (data:{mimeType};base64,{content})
25
+ * - MIME type is in the allowed list
26
+ * @param dataUri - The complete data URI string
27
+ * @param base64 - The base64-encoded content
28
+ * @param mediaType - The MIME type of the image
29
+ * @throws Error if any validation fails
30
+ */
31
+ private static validateProcessOutput;
20
32
  /**
21
33
  * Process image for OpenAI (requires data URI format)
22
34
  */
@@ -104,11 +116,32 @@ export declare const imageUtils: {
104
116
  */
105
117
  fileToBase64DataUri: (filePath: string, maxBytes?: number) => Promise<string>;
106
118
  /**
107
- * Convert URL to base64 data URI by downloading the image
108
- */
109
- urlToBase64DataUri: (url: string, { timeoutMs, maxBytes }?: {
110
- timeoutMs?: number | undefined;
111
- maxBytes?: number | undefined;
119
+ * Convert URL to base64 data URI by downloading the image.
120
+ * Implements retry logic with exponential backoff for network errors.
121
+ *
122
+ * Retries are performed for:
123
+ * - Network errors (ECONNRESET, ENOTFOUND, ECONNREFUSED, ETIMEDOUT, ERR_NETWORK, AbortError)
124
+ * - Server errors (5xx status codes)
125
+ * - Rate limiting (429 Too Many Requests)
126
+ * - Request timeouts (408 Request Timeout)
127
+ *
128
+ * Retries are NOT performed for:
129
+ * - Client errors (4xx status codes except 408, 429)
130
+ * - Invalid content type
131
+ * - Content size limit exceeded
132
+ * - Unsupported protocol
133
+ *
134
+ * @param url - The URL of the image to download
135
+ * @param options - Configuration options
136
+ * @param options.timeoutMs - Timeout for each download attempt (default: 15000ms)
137
+ * @param options.maxBytes - Maximum allowed file size (default: 10MB)
138
+ * @param options.maxAttempts - Maximum number of total attempts including initial attempt (default: 3)
139
+ * @returns Promise<string> - Base64 data URI of the downloaded image
140
+ */
141
+ urlToBase64DataUri: (url: string, { timeoutMs, maxBytes, maxAttempts, }?: {
142
+ timeoutMs?: number;
143
+ maxBytes?: number;
144
+ maxAttempts?: number;
112
145
  }) => Promise<string>;
113
146
  /**
114
147
  * Extract base64 data from data URI