@juspay/neurolink 8.5.0 → 8.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/CHANGELOG.md +17 -0
  2. package/dist/adapters/providerImageAdapter.d.ts +4 -2
  3. package/dist/adapters/providerImageAdapter.js +72 -11
  4. package/dist/config/conversationMemory.d.ts +6 -0
  5. package/dist/config/conversationMemory.js +14 -0
  6. package/dist/constants/enums.d.ts +23 -3
  7. package/dist/constants/enums.js +30 -4
  8. package/dist/constants/tokens.d.ts +27 -12
  9. package/dist/constants/tokens.js +46 -12
  10. package/dist/core/baseProvider.js +6 -2
  11. package/dist/core/modules/GenerationHandler.js +20 -5
  12. package/dist/core/modules/MessageBuilder.js +4 -0
  13. package/dist/core/modules/TelemetryHandler.js +6 -1
  14. package/dist/lib/adapters/providerImageAdapter.d.ts +4 -2
  15. package/dist/lib/adapters/providerImageAdapter.js +72 -11
  16. package/dist/lib/config/conversationMemory.d.ts +6 -0
  17. package/dist/lib/config/conversationMemory.js +14 -0
  18. package/dist/lib/constants/enums.d.ts +23 -3
  19. package/dist/lib/constants/enums.js +30 -4
  20. package/dist/lib/constants/tokens.d.ts +27 -12
  21. package/dist/lib/constants/tokens.js +46 -12
  22. package/dist/lib/core/baseProvider.js +6 -2
  23. package/dist/lib/core/modules/GenerationHandler.js +20 -5
  24. package/dist/lib/core/modules/MessageBuilder.js +4 -0
  25. package/dist/lib/core/modules/TelemetryHandler.js +6 -1
  26. package/dist/lib/middleware/builtin/guardrails.js +7 -0
  27. package/dist/lib/models/modelRegistry.js +93 -0
  28. package/dist/lib/neurolink.js +75 -5
  29. package/dist/lib/providers/googleAiStudio.d.ts +27 -0
  30. package/dist/lib/providers/googleAiStudio.js +27 -0
  31. package/dist/lib/providers/googleVertex.d.ts +35 -0
  32. package/dist/lib/providers/googleVertex.js +38 -0
  33. package/dist/lib/telemetry/telemetryService.d.ts +1 -1
  34. package/dist/lib/telemetry/telemetryService.js +4 -4
  35. package/dist/lib/types/common.d.ts +5 -0
  36. package/dist/lib/types/content.d.ts +1 -1
  37. package/dist/lib/types/generateTypes.d.ts +68 -2
  38. package/dist/lib/types/multimodal.d.ts +38 -1
  39. package/dist/lib/types/streamTypes.d.ts +21 -2
  40. package/dist/lib/utils/messageBuilder.js +70 -8
  41. package/dist/lib/utils/multimodalOptionsBuilder.d.ts +1 -1
  42. package/dist/middleware/builtin/guardrails.js +7 -0
  43. package/dist/models/modelRegistry.js +93 -0
  44. package/dist/neurolink.js +75 -5
  45. package/dist/providers/googleAiStudio.d.ts +27 -0
  46. package/dist/providers/googleAiStudio.js +27 -0
  47. package/dist/providers/googleVertex.d.ts +35 -0
  48. package/dist/providers/googleVertex.js +38 -0
  49. package/dist/telemetry/telemetryService.d.ts +1 -1
  50. package/dist/telemetry/telemetryService.js +4 -4
  51. package/dist/types/common.d.ts +5 -0
  52. package/dist/types/content.d.ts +1 -1
  53. package/dist/types/generateTypes.d.ts +68 -2
  54. package/dist/types/multimodal.d.ts +38 -1
  55. package/dist/types/streamTypes.d.ts +21 -2
  56. package/dist/utils/messageBuilder.js +70 -8
  57. package/dist/utils/multimodalOptionsBuilder.d.ts +1 -1
  58. package/package.json +1 -1
@@ -63,6 +63,8 @@ export class MessageBuilder {
63
63
  enableEvaluation: options.enableEvaluation,
64
64
  context: options.context,
65
65
  conversationHistory: options.conversationMessages,
66
+ schema: options.schema,
67
+ output: options.output,
66
68
  };
67
69
  messages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
68
70
  }
@@ -143,6 +145,8 @@ export class MessageBuilder {
143
145
  context: options.context,
144
146
  conversationHistory: options
145
147
  .conversationMessages,
148
+ schema: options.schema,
149
+ output: options.output,
146
150
  };
147
151
  messages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
148
152
  }
@@ -120,12 +120,17 @@ export class TelemetryHandler {
120
120
  if (!this.neurolink?.isTelemetryEnabled()) {
121
121
  return undefined;
122
122
  }
123
- const functionId = `${this.providerName}-${operationType}-${nanoid()}`;
123
+ const context = options.context;
124
+ const traceName = context?.traceName;
125
+ const userId = context?.userId;
126
+ const functionId = traceName ? traceName : userId ? userId : "guest";
124
127
  const metadata = {
125
128
  provider: this.providerName,
126
129
  model: this.modelName,
127
130
  toolsEnabled: !options.disableTools,
128
131
  neurolink: true,
132
+ operationType,
133
+ originalProvider: this.providerName,
129
134
  };
130
135
  // Add sessionId if available
131
136
  if ("sessionId" in options && options.sessionId) {
@@ -69,8 +69,10 @@ export function createGuardrailsMiddleware(config = {}) {
69
69
  };
70
70
  }
71
71
  const { stream, ...rest } = await doStream();
72
+ let hasYieldedChunks = false;
72
73
  const transformStream = new TransformStream({
73
74
  transform(chunk, controller) {
75
+ hasYieldedChunks = true;
74
76
  let filteredChunk = chunk;
75
77
  if (typeof filteredChunk === "object" &&
76
78
  "textDelta" in filteredChunk) {
@@ -84,6 +86,11 @@ export function createGuardrailsMiddleware(config = {}) {
84
86
  }
85
87
  controller.enqueue(filteredChunk);
86
88
  },
89
+ flush() {
90
+ if (!hasYieldedChunks) {
91
+ logger.warn(`[GuardrailsMiddleware] Stream ended without yielding any chunks`);
92
+ }
93
+ },
87
94
  });
88
95
  return {
89
96
  stream: stream.pipeThrough(transformStream),
@@ -188,6 +188,99 @@ export const MODEL_REGISTRY = {
188
188
  category: "general",
189
189
  },
190
190
  // Anthropic Models
191
+ [AnthropicModels.CLAUDE_OPUS_4_5]: {
192
+ id: AnthropicModels.CLAUDE_OPUS_4_5,
193
+ name: "Claude Opus 4.5",
194
+ provider: AIProviderName.ANTHROPIC,
195
+ description: "Anthropic's most capable model with exceptional reasoning, coding, and multimodal capabilities",
196
+ capabilities: {
197
+ vision: true,
198
+ functionCalling: true,
199
+ codeGeneration: true,
200
+ reasoning: true,
201
+ multimodal: true,
202
+ streaming: true,
203
+ jsonMode: false,
204
+ },
205
+ pricing: {
206
+ inputCostPer1K: 0.015,
207
+ outputCostPer1K: 0.075,
208
+ currency: "USD",
209
+ },
210
+ performance: {
211
+ speed: "medium",
212
+ quality: "high",
213
+ accuracy: "high",
214
+ },
215
+ limits: {
216
+ maxContextTokens: 200000,
217
+ maxOutputTokens: 64000,
218
+ maxRequestsPerMinute: 50,
219
+ },
220
+ useCases: {
221
+ coding: 10,
222
+ creative: 10,
223
+ analysis: 10,
224
+ conversation: 9,
225
+ reasoning: 10,
226
+ translation: 9,
227
+ summarization: 9,
228
+ },
229
+ aliases: [
230
+ "claude-4.5-opus",
231
+ "claude-opus-latest",
232
+ "opus-4.5",
233
+ "anthropic-flagship",
234
+ ],
235
+ deprecated: false,
236
+ isLocal: false,
237
+ releaseDate: "2025-11-24",
238
+ category: "reasoning",
239
+ },
240
+ [AnthropicModels.CLAUDE_SONNET_4_5]: {
241
+ id: AnthropicModels.CLAUDE_SONNET_4_5,
242
+ name: "Claude Sonnet 4.5",
243
+ provider: AIProviderName.ANTHROPIC,
244
+ description: "Balanced Claude model with excellent performance across all tasks including vision and reasoning",
245
+ capabilities: {
246
+ vision: true,
247
+ functionCalling: true,
248
+ codeGeneration: true,
249
+ reasoning: true,
250
+ multimodal: true,
251
+ streaming: true,
252
+ jsonMode: false,
253
+ },
254
+ pricing: {
255
+ inputCostPer1K: 0.003,
256
+ outputCostPer1K: 0.015,
257
+ currency: "USD",
258
+ },
259
+ performance: {
260
+ speed: "medium",
261
+ quality: "high",
262
+ accuracy: "high",
263
+ },
264
+ limits: {
265
+ maxContextTokens: 200000,
266
+ maxOutputTokens: 64000,
267
+ maxRequestsPerMinute: 100,
268
+ },
269
+ useCases: {
270
+ coding: 10,
271
+ creative: 9,
272
+ analysis: 9,
273
+ conversation: 9,
274
+ reasoning: 10,
275
+ translation: 8,
276
+ summarization: 8,
277
+ },
278
+ aliases: ["claude-4.5-sonnet", "claude-sonnet-latest", "sonnet-4.5"],
279
+ deprecated: false,
280
+ isLocal: false,
281
+ releaseDate: "2025-09-29",
282
+ category: "coding",
283
+ },
191
284
  [AnthropicModels.CLAUDE_4_5_HAIKU]: {
192
285
  id: AnthropicModels.CLAUDE_4_5_HAIKU,
193
286
  name: "Claude 4.5 Haiku",
@@ -1998,19 +1998,85 @@ Current user's request: ${currentInput}`;
1998
1998
  }
1999
1999
  }
2000
2000
  const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
2001
- // Create a wrapper around the stream that accumulates content
2002
2001
  let accumulatedContent = "";
2002
+ let chunkCount = 0;
2003
+ const metadata = {
2004
+ fallbackAttempted: false,
2005
+ guardrailsBlocked: false,
2006
+ error: undefined,
2007
+ };
2003
2008
  const processedStream = (async function* (self) {
2004
2009
  try {
2005
2010
  for await (const chunk of mcpStream) {
2011
+ chunkCount++;
2006
2012
  if (chunk &&
2007
2013
  "content" in chunk &&
2008
2014
  typeof chunk.content === "string") {
2009
2015
  accumulatedContent += chunk.content;
2010
- // Emit chunk event for compatibility
2011
2016
  self.emitter.emit("response:chunk", chunk.content);
2012
2017
  }
2013
- yield chunk; // Preserve original streaming behavior
2018
+ yield chunk;
2019
+ }
2020
+ if (chunkCount === 0 && !metadata.fallbackAttempted) {
2021
+ metadata.fallbackAttempted = true;
2022
+ const errorMsg = "Stream completed with 0 chunks (possible guardrails block)";
2023
+ metadata.error = errorMsg;
2024
+ const fallbackRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
2025
+ provider: providerName,
2026
+ model: enhancedOptions.model || "gpt-4o",
2027
+ reasoning: "primary failed",
2028
+ confidence: 0.5,
2029
+ }, { fallbackStrategy: "auto" });
2030
+ logger.warn("Retrying with fallback provider", {
2031
+ originalProvider: providerName,
2032
+ fallbackProvider: fallbackRoute.provider,
2033
+ reason: errorMsg,
2034
+ });
2035
+ try {
2036
+ const fallbackProvider = await AIProviderFactory.createProvider(fallbackRoute.provider, fallbackRoute.model);
2037
+ // Ensure fallback provider can execute tools
2038
+ fallbackProvider.setupToolExecutor({
2039
+ customTools: self.getCustomTools(),
2040
+ executeTool: self.executeTool.bind(self),
2041
+ }, "NeuroLink.fallbackStream");
2042
+ // Get conversation messages for context (same as primary stream)
2043
+ const conversationMessages = await getConversationMessages(self.conversationMemory, {
2044
+ prompt: enhancedOptions.input.text,
2045
+ context: enhancedOptions.context,
2046
+ });
2047
+ const fallbackResult = await fallbackProvider.stream({
2048
+ ...enhancedOptions,
2049
+ model: fallbackRoute.model,
2050
+ conversationMessages,
2051
+ });
2052
+ let fallbackChunkCount = 0;
2053
+ for await (const fallbackChunk of fallbackResult.stream) {
2054
+ fallbackChunkCount++;
2055
+ if (fallbackChunk &&
2056
+ "content" in fallbackChunk &&
2057
+ typeof fallbackChunk.content === "string") {
2058
+ accumulatedContent += fallbackChunk.content;
2059
+ self.emitter.emit("response:chunk", fallbackChunk.content);
2060
+ }
2061
+ yield fallbackChunk;
2062
+ }
2063
+ if (fallbackChunkCount === 0) {
2064
+ throw new Error(`Fallback provider ${fallbackRoute.provider} also returned 0 chunks`);
2065
+ }
2066
+ // Fallback succeeded - likely guardrails blocked primary
2067
+ metadata.guardrailsBlocked = true;
2068
+ }
2069
+ catch (fallbackError) {
2070
+ const fallbackErrorMsg = fallbackError instanceof Error
2071
+ ? fallbackError.message
2072
+ : String(fallbackError);
2073
+ metadata.error = `${errorMsg}; Fallback failed: ${fallbackErrorMsg}`;
2074
+ logger.error("Fallback provider failed", {
2075
+ fallbackProvider: fallbackRoute.provider,
2076
+ error: fallbackErrorMsg,
2077
+ });
2078
+ throw fallbackError;
2079
+ }
2014
2080
  }
2015
2081
  }
2016
2082
  finally {
@@ -2053,7 +2119,7 @@ Current user's request: ${currentInput}`;
2053
2119
  }
2054
2120
  }
2055
2121
  })(this);
2056
- const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
2122
+ const streamResult = await this.processStreamResult(processedStream, enhancedOptions, factoryResult);
2057
2123
  const responseTime = Date.now() - startTime;
2058
2124
  this.emitStreamEndEvents(streamResult);
2059
2125
  return this.createStreamResponse(streamResult, processedStream, {
@@ -2062,7 +2128,9 @@ Current user's request: ${currentInput}`;
2062
2128
  startTime,
2063
2129
  responseTime,
2064
2130
  streamId,
2065
- fallback: false,
2131
+ fallback: metadata.fallbackAttempted,
2132
+ guardrailsBlocked: metadata.guardrailsBlocked,
2133
+ error: metadata.error,
2066
2134
  });
2067
2135
  }
2068
2136
  catch (error) {
@@ -2181,6 +2249,8 @@ Current user's request: ${currentInput}`;
2181
2249
  startTime: config.startTime,
2182
2250
  responseTime: config.responseTime,
2183
2251
  fallback: config.fallback || false,
2252
+ guardrailsBlocked: config.guardrailsBlocked,
2253
+ error: config.error,
2184
2254
  },
2185
2255
  };
2186
2256
  }
@@ -6,6 +6,33 @@ import { BaseProvider } from "../core/baseProvider.js";
6
6
  /**
7
7
  * Google AI Studio provider implementation using BaseProvider
8
8
  * Migrated from original GoogleAIStudio class to new factory pattern
9
+ *
10
+ * @important Structured Output Limitation
11
+ * Google Gemini models cannot combine function calling (tools) with structured
12
+ * output (JSON schema). When using schemas with output.format: "json", you MUST
13
+ * set disableTools: true.
14
+ *
15
+ * Error without disableTools:
16
+ * "Function calling with a response mime type: 'application/json' is unsupported"
17
+ *
18
+ * This is a Google API limitation documented at:
19
+ * https://ai.google.dev/gemini-api/docs/function-calling
20
+ *
21
+ * @example
22
+ * ```typescript
23
+ * // ✅ Correct usage with schemas
24
+ * const provider = new GoogleAIStudioProvider("gemini-2.5-flash");
25
+ * const result = await provider.generate({
26
+ * input: { text: "Analyze data" },
27
+ * schema: MySchema,
28
+ * output: { format: "json" },
29
+ * disableTools: true // Required
30
+ * });
31
+ * ```
32
+ *
33
+ * @note Gemini 3 Pro Preview (November 2025) will support combining tools + schemas
34
+ * @note "Too many states for serving" errors can occur with complex schemas + tools.
35
+ * Solution: Simplify schema or use disableTools: true
9
36
  */
10
37
  export declare class GoogleAIStudioProvider extends BaseProvider {
11
38
  constructor(modelName?: string, sdk?: unknown);
@@ -27,6 +27,33 @@ if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
27
27
  /**
28
28
  * Google AI Studio provider implementation using BaseProvider
29
29
  * Migrated from original GoogleAIStudio class to new factory pattern
30
+ *
31
+ * @important Structured Output Limitation
32
+ * Google Gemini models cannot combine function calling (tools) with structured
33
+ * output (JSON schema). When using schemas with output.format: "json", you MUST
34
+ * set disableTools: true.
35
+ *
36
+ * Error without disableTools:
37
+ * "Function calling with a response mime type: 'application/json' is unsupported"
38
+ *
39
+ * This is a Google API limitation documented at:
40
+ * https://ai.google.dev/gemini-api/docs/function-calling
41
+ *
42
+ * @example
43
+ * ```typescript
44
+ * // ✅ Correct usage with schemas
45
+ * const provider = new GoogleAIStudioProvider("gemini-2.5-flash");
46
+ * const result = await provider.generate({
47
+ * input: { text: "Analyze data" },
48
+ * schema: MySchema,
49
+ * output: { format: "json" },
50
+ * disableTools: true // Required
51
+ * });
52
+ * ```
53
+ *
54
+ * @note Gemini 3 Pro Preview (November 2025) will support combining tools + schemas
55
+ * @note "Too many states for serving" errors can occur with complex schemas + tools.
56
+ * Solution: Simplify schema or use disableTools: true
30
57
  */
31
58
  export class GoogleAIStudioProvider extends BaseProvider {
32
59
  constructor(modelName, sdk) {
@@ -13,6 +13,41 @@ import { BaseProvider } from "../core/baseProvider.js";
13
13
  * - Fresh model creation for each request
14
14
  * - Enhanced error handling with setup guidance
15
15
  * - Tool registration and context management
16
+ *
17
+ * @important Structured Output Limitation (Gemini Models Only)
18
+ * Google Gemini models on Vertex AI cannot combine function calling (tools) with
19
+ * structured output (JSON schema). When using schemas, you MUST set disableTools: true.
20
+ *
21
+ * Error without disableTools:
22
+ * "Function calling with a response mime type: 'application/json' is unsupported"
23
+ *
24
+ * This limitation ONLY affects Gemini models. Anthropic Claude models via Vertex
25
+ * AI do NOT have this limitation and support both tools + schemas simultaneously.
26
+ *
27
+ * @example Gemini models with schemas
28
+ * ```typescript
29
+ * const provider = new GoogleVertexProvider("gemini-2.5-flash");
30
+ * const result = await provider.generate({
31
+ * input: { text: "Analyze data" },
32
+ * schema: MySchema,
33
+ * output: { format: "json" },
34
+ * disableTools: true // Required for Gemini models
35
+ * });
36
+ * ```
37
+ *
38
+ * @example Claude models (no limitation)
39
+ * ```typescript
40
+ * const provider = new GoogleVertexProvider("claude-3-5-sonnet-20241022");
41
+ * const result = await provider.generate({
42
+ * input: { text: "Analyze data" },
43
+ * schema: MySchema,
44
+ * output: { format: "json" }
45
+ * // No disableTools needed - Claude supports both
46
+ * });
47
+ * ```
48
+ *
49
+ * @note Gemini 3 Pro Preview (November 2025) will support combining tools + schemas
50
+ * @see https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models
16
51
  */
17
52
  export declare class GoogleVertexProvider extends BaseProvider {
18
53
  private projectId;
@@ -234,6 +234,41 @@ const isAnthropicModel = (modelName) => {
234
234
  * - Fresh model creation for each request
235
235
  * - Enhanced error handling with setup guidance
236
236
  * - Tool registration and context management
237
+ *
238
+ * @important Structured Output Limitation (Gemini Models Only)
239
+ * Google Gemini models on Vertex AI cannot combine function calling (tools) with
240
+ * structured output (JSON schema). When using schemas, you MUST set disableTools: true.
241
+ *
242
+ * Error without disableTools:
243
+ * "Function calling with a response mime type: 'application/json' is unsupported"
244
+ *
245
+ * This limitation ONLY affects Gemini models. Anthropic Claude models via Vertex
246
+ * AI do NOT have this limitation and support both tools + schemas simultaneously.
247
+ *
248
+ * @example Gemini models with schemas
249
+ * ```typescript
250
+ * const provider = new GoogleVertexProvider("gemini-2.5-flash");
251
+ * const result = await provider.generate({
252
+ * input: { text: "Analyze data" },
253
+ * schema: MySchema,
254
+ * output: { format: "json" },
255
+ * disableTools: true // Required for Gemini models
256
+ * });
257
+ * ```
258
+ *
259
+ * @example Claude models (no limitation)
260
+ * ```typescript
261
+ * const provider = new GoogleVertexProvider("claude-3-5-sonnet-20241022");
262
+ * const result = await provider.generate({
263
+ * input: { text: "Analyze data" },
264
+ * schema: MySchema,
265
+ * output: { format: "json" }
266
+ * // No disableTools needed - Claude supports both
267
+ * });
268
+ * ```
269
+ *
270
+ * @note Gemini 3 Pro Preview (November 2025) will support combining tools + schemas
271
+ * @see https://cloud.google.com/vertex-ai/docs/generative-ai/learn/models
237
272
  */
238
273
  export class GoogleVertexProvider extends BaseProvider {
239
274
  projectId;
@@ -1363,11 +1398,14 @@ export class GoogleVertexProvider extends BaseProvider {
1363
1398
  getModelSuggestions(requestedModel) {
1364
1399
  const availableModels = {
1365
1400
  google: [
1401
+ "gemini-3-pro-preview-11-2025",
1402
+ "gemini-3-pro-latest",
1366
1403
  "gemini-3-pro-preview",
1367
1404
  "gemini-2.5-pro",
1368
1405
  "gemini-2.5-flash",
1369
1406
  "gemini-2.5-flash-lite",
1370
1407
  "gemini-2.0-flash-001",
1408
+ "gemini-2.0-flash-lite",
1371
1409
  "gemini-1.5-pro",
1372
1410
  "gemini-1.5-flash",
1373
1411
  ],
@@ -31,7 +31,7 @@ export declare class TelemetryService {
31
31
  private initializeTelemetry;
32
32
  private initializeMetrics;
33
33
  initialize(): Promise<void>;
34
- traceAIRequest<T>(provider: string, operation: () => Promise<T>): Promise<T>;
34
+ traceAIRequest<T>(provider: string, operation: () => Promise<T>, operationType?: string): Promise<T>;
35
35
  recordAIRequest(provider: string, model: string, tokens: number, duration: number): void;
36
36
  recordAIError(provider: string, error: Error): void;
37
37
  recordMCPToolCall(toolName: string, duration: number, success: boolean): void;
@@ -108,14 +108,14 @@ export class TelemetryService {
108
108
  }
109
109
  }
110
110
  // AI Operation Tracing (NO-OP when disabled)
111
- async traceAIRequest(provider, operation) {
111
+ async traceAIRequest(provider, operation, operationType = "generate_text") {
112
112
  if (!this.enabled || !this.tracer) {
113
- return await operation(); // Direct execution when disabled
113
+ return await operation();
114
114
  }
115
- const span = this.tracer.startSpan(`ai.${provider}.generate_text`, {
115
+ const span = this.tracer.startSpan(`ai.${provider}.${operationType}`, {
116
116
  attributes: {
117
117
  "ai.provider": provider,
118
- "ai.operation": "generate_text",
118
+ "ai.operation": operationType,
119
119
  },
120
120
  });
121
121
  try {
@@ -129,3 +129,8 @@ export type TypedEventEmitter<TEvents extends Record<string, unknown>> = {
129
129
  listenerCount<K extends keyof TEvents>(event: K): number;
130
130
  listeners<K extends keyof TEvents>(event: K): Array<(...args: unknown[]) => void>;
131
131
  };
132
+ export type Context = {
133
+ traceName?: string;
134
+ userId?: string;
135
+ sessionId?: string;
136
+ };
@@ -14,5 +14,5 @@
14
14
  * import type { MultimodalInput } from './types/multimodal.js';
15
15
  * ```
16
16
  */
17
- export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
17
+ export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, ImageWithAltText, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
18
18
  export { isTextContent, isImageContent, isCSVContent, isPDFContent, isAudioContent, isVideoContent, isMultimodalInput, } from "./multimodal.js";
@@ -6,7 +6,7 @@ import type { EvaluationData } from "./evaluation.js";
6
6
  import type { ChatMessage, ConversationMemoryConfig } from "./conversation.js";
7
7
  import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
8
8
  import type { JsonValue } from "./common.js";
9
- import type { Content } from "./content.js";
9
+ import type { Content, ImageWithAltText } from "./content.js";
10
10
  /**
11
11
  * Generate function options type - Primary method for content generation
12
12
  * Supports multimodal content while maintaining backward compatibility
@@ -14,7 +14,24 @@ import type { Content } from "./content.js";
14
14
  export type GenerateOptions = {
15
15
  input: {
16
16
  text: string;
17
- images?: Array<Buffer | string>;
17
+ /**
18
+ * Images to include in the request.
19
+ * Supports simple image data (Buffer, string) or objects with alt text for accessibility.
20
+ *
21
+ * @example Simple usage
22
+ * ```typescript
23
+ * images: [imageBuffer, "https://example.com/image.jpg"]
24
+ * ```
25
+ *
26
+ * @example With alt text for accessibility
27
+ * ```typescript
28
+ * images: [
29
+ * { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
30
+ * { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
31
+ * ]
32
+ * ```
33
+ */
34
+ images?: Array<Buffer | string | ImageWithAltText>;
18
35
  csvFiles?: Array<Buffer | string>;
19
36
  pdfFiles?: Array<Buffer | string>;
20
37
  files?: Array<Buffer | string>;
@@ -34,9 +51,58 @@ export type GenerateOptions = {
34
51
  temperature?: number;
35
52
  maxTokens?: number;
36
53
  systemPrompt?: string;
54
+ /**
55
+ * Zod schema for structured output validation
56
+ *
57
+ * @important Google Gemini Limitation
58
+ * Google Vertex AI and Google AI Studio cannot combine function calling with
59
+ * structured output. You MUST use `disableTools: true` when using schemas with
60
+ * Google providers.
61
+ *
62
+ * Error without disableTools: "Function calling with a response mime type:
63
+ * 'application/json' is unsupported"
64
+ *
65
+ * This is a documented Google API limitation, not a NeuroLink bug.
66
+ * All frameworks (LangChain, Vercel AI SDK, Agno, Instructor) use this approach.
67
+ *
68
+ * @example
69
+ * ```typescript
70
+ * // ✅ Correct for Google providers
71
+ * const result = await neurolink.generate({
72
+ * schema: MySchema,
73
+ * provider: "vertex",
74
+ * disableTools: true // Required for Google
75
+ * });
76
+ *
77
+ * // ✅ No restriction for other providers
78
+ * const result = await neurolink.generate({
79
+ * schema: MySchema,
80
+ * provider: "openai" // Works without disableTools
81
+ * });
82
+ * ```
83
+ *
84
+ * @see https://ai.google.dev/gemini-api/docs/function-calling
85
+ */
37
86
  schema?: ValidationSchema;
38
87
  tools?: Record<string, Tool>;
39
88
  timeout?: number | string;
89
+ /**
90
+ * Disable tool execution (including built-in tools)
91
+ *
92
+ * @required For Google Gemini providers when using schemas
93
+ * Google Vertex AI and Google AI Studio require this flag when using
94
+ * structured output (schemas) due to Google API limitations.
95
+ *
96
+ * @example
97
+ * ```typescript
98
+ * // Required for Google providers with schemas
99
+ * await neurolink.generate({
100
+ * schema: MySchema,
101
+ * provider: "vertex",
102
+ * disableTools: true
103
+ * });
104
+ * ```
105
+ */
40
106
  disableTools?: boolean;
41
107
  enableEvaluation?: boolean;
42
108
  enableAnalytics?: boolean;
@@ -52,6 +52,8 @@ export type TextContent = {
52
52
  export type ImageContent = {
53
53
  type: "image";
54
54
  data: Buffer | string;
55
+ /** Alternative text for accessibility (screen readers, SEO) */
56
+ altText?: string;
55
57
  mediaType?: "image/jpeg" | "image/png" | "image/gif" | "image/webp" | "image/bmp" | "image/tiff";
56
58
  metadata?: {
57
59
  description?: string;
@@ -164,13 +166,48 @@ export type VideoContent = {
164
166
  * Covers text, images, documents, and multimedia
165
167
  */
166
168
  export type Content = TextContent | ImageContent | CSVContent | PDFContent | AudioContent | VideoContent;
169
+ /**
170
+ * Image data with optional alt text for accessibility
171
+ * Use this when you need to provide alt text for screen readers and SEO
172
+ *
173
+ * @example
174
+ * ```typescript
175
+ * const imageWithAlt: ImageWithAltText = {
176
+ * data: imageBuffer,
177
+ * altText: "A dashboard showing quarterly sales trends"
178
+ * };
179
+ * ```
180
+ */
181
+ export type ImageWithAltText = {
182
+ /** Image data as Buffer, base64 string, URL, or data URI */
183
+ data: Buffer | string;
184
+ /** Alternative text for accessibility (screen readers, SEO) */
185
+ altText?: string;
186
+ };
167
187
  /**
168
188
  * Multimodal input type for options that may contain images or content arrays
169
189
  * This is the primary interface for users to provide multimodal content
170
190
  */
171
191
  export type MultimodalInput = {
172
192
  text: string;
173
- images?: Array<Buffer | string>;
193
+ /**
194
+ * Images to include in the request.
195
+ * Can be simple image data (Buffer, string) or objects with alt text for accessibility.
196
+ *
197
+ * @example Simple usage
198
+ * ```typescript
199
+ * images: [imageBuffer, "https://example.com/image.jpg"]
200
+ * ```
201
+ *
202
+ * @example With alt text for accessibility
203
+ * ```typescript
204
+ * images: [
205
+ * { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
206
+ * { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
207
+ * ]
208
+ * ```
209
+ */
210
+ images?: Array<Buffer | string | ImageWithAltText>;
174
211
  content?: Content[];
175
212
  csvFiles?: Array<Buffer | string>;
176
213
  pdfFiles?: Array<Buffer | string>;