@juspay/neurolink 8.5.1 → 8.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -0
- package/dist/adapters/providerImageAdapter.d.ts +4 -2
- package/dist/adapters/providerImageAdapter.js +16 -2
- package/dist/core/baseProvider.js +6 -2
- package/dist/core/modules/TelemetryHandler.js +6 -1
- package/dist/lib/adapters/providerImageAdapter.d.ts +4 -2
- package/dist/lib/adapters/providerImageAdapter.js +16 -2
- package/dist/lib/core/baseProvider.js +6 -2
- package/dist/lib/core/modules/TelemetryHandler.js +6 -1
- package/dist/lib/middleware/builtin/guardrails.js +7 -0
- package/dist/lib/neurolink.js +75 -5
- package/dist/lib/telemetry/telemetryService.d.ts +1 -1
- package/dist/lib/telemetry/telemetryService.js +4 -4
- package/dist/lib/types/common.d.ts +5 -0
- package/dist/lib/types/content.d.ts +1 -1
- package/dist/lib/types/generateTypes.d.ts +19 -2
- package/dist/lib/types/multimodal.d.ts +38 -1
- package/dist/lib/types/streamTypes.d.ts +21 -2
- package/dist/lib/utils/messageBuilder.js +52 -7
- package/dist/lib/utils/multimodalOptionsBuilder.d.ts +1 -1
- package/dist/middleware/builtin/guardrails.js +7 -0
- package/dist/neurolink.js +75 -5
- package/dist/telemetry/telemetryService.d.ts +1 -1
- package/dist/telemetry/telemetryService.js +4 -4
- package/dist/types/common.d.ts +5 -0
- package/dist/types/content.d.ts +1 -1
- package/dist/types/generateTypes.d.ts +19 -2
- package/dist/types/multimodal.d.ts +38 -1
- package/dist/types/streamTypes.d.ts +21 -2
- package/dist/utils/messageBuilder.js +52 -7
- package/dist/utils/multimodalOptionsBuilder.d.ts +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,14 @@
|
|
|
1
|
+
## [8.6.0](https://github.com/juspay/neurolink/compare/v8.5.1...v8.6.0) (2025-12-06)
|
|
2
|
+
|
|
3
|
+
### Features
|
|
4
|
+
|
|
5
|
+
- **(multimodal):** add altText support to ImageContent for accessibility ([27118c8](https://github.com/juspay/neurolink/commit/27118c87c73bc1eb6389bbc49dd2e59f1cc4c523)), closes [#565](https://github.com/juspay/neurolink/issues/565)
|
|
6
|
+
|
|
7
|
+
### Bug Fixes
|
|
8
|
+
|
|
9
|
+
- **(guardrails):** added fallback for guardrail errors on azure's jailbreak errors ([ae42552](https://github.com/juspay/neurolink/commit/ae4255255657c00ea164730dbd61fbad9f65f339))
|
|
10
|
+
- **(observability):** add support to let applications customize traces ([608d991](https://github.com/juspay/neurolink/commit/608d991114c5df2335be73f44a24a187f424373a))
|
|
11
|
+
|
|
1
12
|
## [8.5.1](https://github.com/juspay/neurolink/compare/v8.5.0...v8.5.1) (2025-12-04)
|
|
2
13
|
|
|
3
14
|
### Bug Fixes
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Provider Image Adapter - Smart routing for multimodal content
|
|
3
3
|
* Handles provider-specific image formatting and vision capability validation
|
|
4
4
|
*/
|
|
5
|
-
import type { Content } from "../types/multimodal.js";
|
|
5
|
+
import type { Content, ImageWithAltText } from "../types/multimodal.js";
|
|
6
6
|
/**
|
|
7
7
|
* Simplified logger for essential error reporting only
|
|
8
8
|
*/
|
|
@@ -39,8 +39,10 @@ export declare class ProviderImageAdapter {
|
|
|
39
39
|
private static validateVisionSupport;
|
|
40
40
|
/**
|
|
41
41
|
* Convert simple images array to advanced content format
|
|
42
|
+
* @param text - Text content to include
|
|
43
|
+
* @param images - Array of images (Buffer, string, or ImageWithAltText)
|
|
42
44
|
*/
|
|
43
|
-
static convertToContent(text: string, images?: Array<Buffer | string>): Content[];
|
|
45
|
+
static convertToContent(text: string, images?: Array<Buffer | string | ImageWithAltText>): Content[];
|
|
44
46
|
/**
|
|
45
47
|
* Check if provider supports multimodal content
|
|
46
48
|
*/
|
|
@@ -400,15 +400,29 @@ export class ProviderImageAdapter {
|
|
|
400
400
|
}
|
|
401
401
|
/**
|
|
402
402
|
* Convert simple images array to advanced content format
|
|
403
|
+
* @param text - Text content to include
|
|
404
|
+
* @param images - Array of images (Buffer, string, or ImageWithAltText)
|
|
403
405
|
*/
|
|
404
406
|
static convertToContent(text, images) {
|
|
405
407
|
const content = [{ type: "text", text }];
|
|
406
408
|
if (images && images.length > 0) {
|
|
407
409
|
images.forEach((image) => {
|
|
410
|
+
// Handle both simple images and images with alt text
|
|
411
|
+
const imageData = typeof image === "object" &&
|
|
412
|
+
"data" in image &&
|
|
413
|
+
!Buffer.isBuffer(image)
|
|
414
|
+
? image.data
|
|
415
|
+
: image;
|
|
416
|
+
const altText = typeof image === "object" &&
|
|
417
|
+
"data" in image &&
|
|
418
|
+
!Buffer.isBuffer(image)
|
|
419
|
+
? image.altText
|
|
420
|
+
: undefined;
|
|
408
421
|
content.push({
|
|
409
422
|
type: "image",
|
|
410
|
-
data:
|
|
411
|
-
|
|
423
|
+
data: imageData,
|
|
424
|
+
altText,
|
|
425
|
+
mediaType: ImageProcessor.detectImageType(imageData),
|
|
412
426
|
});
|
|
413
427
|
});
|
|
414
428
|
}
|
|
@@ -4,7 +4,6 @@ import { MiddlewareFactory } from "../middleware/factory.js";
|
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
5
|
import { directAgentTools } from "../agent/directTools.js";
|
|
6
6
|
import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
7
|
-
import { nanoid } from "nanoid";
|
|
8
7
|
import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
|
|
9
8
|
import { getKeysAsString, getKeyCount } from "../utils/transformationUtils.js";
|
|
10
9
|
// Import modules for composition
|
|
@@ -658,12 +657,17 @@ export class BaseProvider {
|
|
|
658
657
|
if (!this.neurolink?.isTelemetryEnabled()) {
|
|
659
658
|
return undefined;
|
|
660
659
|
}
|
|
661
|
-
const
|
|
660
|
+
const context = options.context;
|
|
661
|
+
const traceName = context?.traceName;
|
|
662
|
+
const userId = context?.userId;
|
|
663
|
+
const functionId = traceName ? traceName : userId ? userId : "guest";
|
|
662
664
|
const metadata = {
|
|
663
665
|
provider: this.providerName,
|
|
664
666
|
model: this.modelName,
|
|
665
667
|
toolsEnabled: !options.disableTools,
|
|
666
668
|
neurolink: true,
|
|
669
|
+
operationType,
|
|
670
|
+
originalProvider: this.providerName,
|
|
667
671
|
};
|
|
668
672
|
// Add sessionId if available
|
|
669
673
|
if ("sessionId" in options && options.sessionId) {
|
|
@@ -120,12 +120,17 @@ export class TelemetryHandler {
|
|
|
120
120
|
if (!this.neurolink?.isTelemetryEnabled()) {
|
|
121
121
|
return undefined;
|
|
122
122
|
}
|
|
123
|
-
const
|
|
123
|
+
const context = options.context;
|
|
124
|
+
const traceName = context?.traceName;
|
|
125
|
+
const userId = context?.userId;
|
|
126
|
+
const functionId = traceName ? traceName : userId ? userId : "guest";
|
|
124
127
|
const metadata = {
|
|
125
128
|
provider: this.providerName,
|
|
126
129
|
model: this.modelName,
|
|
127
130
|
toolsEnabled: !options.disableTools,
|
|
128
131
|
neurolink: true,
|
|
132
|
+
operationType,
|
|
133
|
+
originalProvider: this.providerName,
|
|
129
134
|
};
|
|
130
135
|
// Add sessionId if available
|
|
131
136
|
if ("sessionId" in options && options.sessionId) {
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Provider Image Adapter - Smart routing for multimodal content
|
|
3
3
|
* Handles provider-specific image formatting and vision capability validation
|
|
4
4
|
*/
|
|
5
|
-
import type { Content } from "../types/multimodal.js";
|
|
5
|
+
import type { Content, ImageWithAltText } from "../types/multimodal.js";
|
|
6
6
|
/**
|
|
7
7
|
* Simplified logger for essential error reporting only
|
|
8
8
|
*/
|
|
@@ -39,8 +39,10 @@ export declare class ProviderImageAdapter {
|
|
|
39
39
|
private static validateVisionSupport;
|
|
40
40
|
/**
|
|
41
41
|
* Convert simple images array to advanced content format
|
|
42
|
+
* @param text - Text content to include
|
|
43
|
+
* @param images - Array of images (Buffer, string, or ImageWithAltText)
|
|
42
44
|
*/
|
|
43
|
-
static convertToContent(text: string, images?: Array<Buffer | string>): Content[];
|
|
45
|
+
static convertToContent(text: string, images?: Array<Buffer | string | ImageWithAltText>): Content[];
|
|
44
46
|
/**
|
|
45
47
|
* Check if provider supports multimodal content
|
|
46
48
|
*/
|
|
@@ -400,15 +400,29 @@ export class ProviderImageAdapter {
|
|
|
400
400
|
}
|
|
401
401
|
/**
|
|
402
402
|
* Convert simple images array to advanced content format
|
|
403
|
+
* @param text - Text content to include
|
|
404
|
+
* @param images - Array of images (Buffer, string, or ImageWithAltText)
|
|
403
405
|
*/
|
|
404
406
|
static convertToContent(text, images) {
|
|
405
407
|
const content = [{ type: "text", text }];
|
|
406
408
|
if (images && images.length > 0) {
|
|
407
409
|
images.forEach((image) => {
|
|
410
|
+
// Handle both simple images and images with alt text
|
|
411
|
+
const imageData = typeof image === "object" &&
|
|
412
|
+
"data" in image &&
|
|
413
|
+
!Buffer.isBuffer(image)
|
|
414
|
+
? image.data
|
|
415
|
+
: image;
|
|
416
|
+
const altText = typeof image === "object" &&
|
|
417
|
+
"data" in image &&
|
|
418
|
+
!Buffer.isBuffer(image)
|
|
419
|
+
? image.altText
|
|
420
|
+
: undefined;
|
|
408
421
|
content.push({
|
|
409
422
|
type: "image",
|
|
410
|
-
data:
|
|
411
|
-
|
|
423
|
+
data: imageData,
|
|
424
|
+
altText,
|
|
425
|
+
mediaType: ImageProcessor.detectImageType(imageData),
|
|
412
426
|
});
|
|
413
427
|
});
|
|
414
428
|
}
|
|
@@ -4,7 +4,6 @@ import { MiddlewareFactory } from "../middleware/factory.js";
|
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
5
|
import { directAgentTools } from "../agent/directTools.js";
|
|
6
6
|
import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
7
|
-
import { nanoid } from "nanoid";
|
|
8
7
|
import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
|
|
9
8
|
import { getKeysAsString, getKeyCount } from "../utils/transformationUtils.js";
|
|
10
9
|
// Import modules for composition
|
|
@@ -658,12 +657,17 @@ export class BaseProvider {
|
|
|
658
657
|
if (!this.neurolink?.isTelemetryEnabled()) {
|
|
659
658
|
return undefined;
|
|
660
659
|
}
|
|
661
|
-
const
|
|
660
|
+
const context = options.context;
|
|
661
|
+
const traceName = context?.traceName;
|
|
662
|
+
const userId = context?.userId;
|
|
663
|
+
const functionId = traceName ? traceName : userId ? userId : "guest";
|
|
662
664
|
const metadata = {
|
|
663
665
|
provider: this.providerName,
|
|
664
666
|
model: this.modelName,
|
|
665
667
|
toolsEnabled: !options.disableTools,
|
|
666
668
|
neurolink: true,
|
|
669
|
+
operationType,
|
|
670
|
+
originalProvider: this.providerName,
|
|
667
671
|
};
|
|
668
672
|
// Add sessionId if available
|
|
669
673
|
if ("sessionId" in options && options.sessionId) {
|
|
@@ -120,12 +120,17 @@ export class TelemetryHandler {
|
|
|
120
120
|
if (!this.neurolink?.isTelemetryEnabled()) {
|
|
121
121
|
return undefined;
|
|
122
122
|
}
|
|
123
|
-
const
|
|
123
|
+
const context = options.context;
|
|
124
|
+
const traceName = context?.traceName;
|
|
125
|
+
const userId = context?.userId;
|
|
126
|
+
const functionId = traceName ? traceName : userId ? userId : "guest";
|
|
124
127
|
const metadata = {
|
|
125
128
|
provider: this.providerName,
|
|
126
129
|
model: this.modelName,
|
|
127
130
|
toolsEnabled: !options.disableTools,
|
|
128
131
|
neurolink: true,
|
|
132
|
+
operationType,
|
|
133
|
+
originalProvider: this.providerName,
|
|
129
134
|
};
|
|
130
135
|
// Add sessionId if available
|
|
131
136
|
if ("sessionId" in options && options.sessionId) {
|
|
@@ -69,8 +69,10 @@ export function createGuardrailsMiddleware(config = {}) {
|
|
|
69
69
|
};
|
|
70
70
|
}
|
|
71
71
|
const { stream, ...rest } = await doStream();
|
|
72
|
+
let hasYieldedChunks = false;
|
|
72
73
|
const transformStream = new TransformStream({
|
|
73
74
|
transform(chunk, controller) {
|
|
75
|
+
hasYieldedChunks = true;
|
|
74
76
|
let filteredChunk = chunk;
|
|
75
77
|
if (typeof filteredChunk === "object" &&
|
|
76
78
|
"textDelta" in filteredChunk) {
|
|
@@ -84,6 +86,11 @@ export function createGuardrailsMiddleware(config = {}) {
|
|
|
84
86
|
}
|
|
85
87
|
controller.enqueue(filteredChunk);
|
|
86
88
|
},
|
|
89
|
+
flush() {
|
|
90
|
+
if (!hasYieldedChunks) {
|
|
91
|
+
logger.warn(`[GuardrailsMiddleware] Stream ended without yielding any chunks`);
|
|
92
|
+
}
|
|
93
|
+
},
|
|
87
94
|
});
|
|
88
95
|
return {
|
|
89
96
|
stream: stream.pipeThrough(transformStream),
|
package/dist/lib/neurolink.js
CHANGED
|
@@ -1998,19 +1998,85 @@ Current user's request: ${currentInput}`;
|
|
|
1998
1998
|
}
|
|
1999
1999
|
}
|
|
2000
2000
|
const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
|
|
2001
|
-
// Create a wrapper around the stream that accumulates content
|
|
2002
2001
|
let accumulatedContent = "";
|
|
2002
|
+
let chunkCount = 0;
|
|
2003
|
+
const metadata = {
|
|
2004
|
+
fallbackAttempted: false,
|
|
2005
|
+
guardrailsBlocked: false,
|
|
2006
|
+
error: undefined,
|
|
2007
|
+
};
|
|
2003
2008
|
const processedStream = (async function* (self) {
|
|
2004
2009
|
try {
|
|
2005
2010
|
for await (const chunk of mcpStream) {
|
|
2011
|
+
chunkCount++;
|
|
2006
2012
|
if (chunk &&
|
|
2007
2013
|
"content" in chunk &&
|
|
2008
2014
|
typeof chunk.content === "string") {
|
|
2009
2015
|
accumulatedContent += chunk.content;
|
|
2010
|
-
// Emit chunk event for compatibility
|
|
2011
2016
|
self.emitter.emit("response:chunk", chunk.content);
|
|
2012
2017
|
}
|
|
2013
|
-
yield chunk;
|
|
2018
|
+
yield chunk;
|
|
2019
|
+
}
|
|
2020
|
+
if (chunkCount === 0 && !metadata.fallbackAttempted) {
|
|
2021
|
+
metadata.fallbackAttempted = true;
|
|
2022
|
+
const errorMsg = "Stream completed with 0 chunks (possible guardrails block)";
|
|
2023
|
+
metadata.error = errorMsg;
|
|
2024
|
+
const fallbackRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
|
|
2025
|
+
provider: providerName,
|
|
2026
|
+
model: enhancedOptions.model || "gpt-4o",
|
|
2027
|
+
reasoning: "primary failed",
|
|
2028
|
+
confidence: 0.5,
|
|
2029
|
+
}, { fallbackStrategy: "auto" });
|
|
2030
|
+
logger.warn("Retrying with fallback provider", {
|
|
2031
|
+
originalProvider: providerName,
|
|
2032
|
+
fallbackProvider: fallbackRoute.provider,
|
|
2033
|
+
reason: errorMsg,
|
|
2034
|
+
});
|
|
2035
|
+
try {
|
|
2036
|
+
const fallbackProvider = await AIProviderFactory.createProvider(fallbackRoute.provider, fallbackRoute.model);
|
|
2037
|
+
// Ensure fallback provider can execute tools
|
|
2038
|
+
fallbackProvider.setupToolExecutor({
|
|
2039
|
+
customTools: self.getCustomTools(),
|
|
2040
|
+
executeTool: self.executeTool.bind(self),
|
|
2041
|
+
}, "NeuroLink.fallbackStream");
|
|
2042
|
+
// Get conversation messages for context (same as primary stream)
|
|
2043
|
+
const conversationMessages = await getConversationMessages(self.conversationMemory, {
|
|
2044
|
+
prompt: enhancedOptions.input.text,
|
|
2045
|
+
context: enhancedOptions.context,
|
|
2046
|
+
});
|
|
2047
|
+
const fallbackResult = await fallbackProvider.stream({
|
|
2048
|
+
...enhancedOptions,
|
|
2049
|
+
model: fallbackRoute.model,
|
|
2050
|
+
conversationMessages,
|
|
2051
|
+
});
|
|
2052
|
+
let fallbackChunkCount = 0;
|
|
2053
|
+
for await (const fallbackChunk of fallbackResult.stream) {
|
|
2054
|
+
fallbackChunkCount++;
|
|
2055
|
+
if (fallbackChunk &&
|
|
2056
|
+
"content" in fallbackChunk &&
|
|
2057
|
+
typeof fallbackChunk.content === "string") {
|
|
2058
|
+
accumulatedContent += fallbackChunk.content;
|
|
2059
|
+
self.emitter.emit("response:chunk", fallbackChunk.content);
|
|
2060
|
+
}
|
|
2061
|
+
yield fallbackChunk;
|
|
2062
|
+
}
|
|
2063
|
+
if (fallbackChunkCount === 0) {
|
|
2064
|
+
throw new Error(`Fallback provider ${fallbackRoute.provider} also returned 0 chunks`);
|
|
2065
|
+
}
|
|
2066
|
+
// Fallback succeeded - likely guardrails blocked primary
|
|
2067
|
+
metadata.guardrailsBlocked = true;
|
|
2068
|
+
}
|
|
2069
|
+
catch (fallbackError) {
|
|
2070
|
+
const fallbackErrorMsg = fallbackError instanceof Error
|
|
2071
|
+
? fallbackError.message
|
|
2072
|
+
: String(fallbackError);
|
|
2073
|
+
metadata.error = `${errorMsg}; Fallback failed: ${fallbackErrorMsg}`;
|
|
2074
|
+
logger.error("Fallback provider failed", {
|
|
2075
|
+
fallbackProvider: fallbackRoute.provider,
|
|
2076
|
+
error: fallbackErrorMsg,
|
|
2077
|
+
});
|
|
2078
|
+
throw fallbackError;
|
|
2079
|
+
}
|
|
2014
2080
|
}
|
|
2015
2081
|
}
|
|
2016
2082
|
finally {
|
|
@@ -2053,7 +2119,7 @@ Current user's request: ${currentInput}`;
|
|
|
2053
2119
|
}
|
|
2054
2120
|
}
|
|
2055
2121
|
})(this);
|
|
2056
|
-
const streamResult = await this.processStreamResult(
|
|
2122
|
+
const streamResult = await this.processStreamResult(processedStream, enhancedOptions, factoryResult);
|
|
2057
2123
|
const responseTime = Date.now() - startTime;
|
|
2058
2124
|
this.emitStreamEndEvents(streamResult);
|
|
2059
2125
|
return this.createStreamResponse(streamResult, processedStream, {
|
|
@@ -2062,7 +2128,9 @@ Current user's request: ${currentInput}`;
|
|
|
2062
2128
|
startTime,
|
|
2063
2129
|
responseTime,
|
|
2064
2130
|
streamId,
|
|
2065
|
-
fallback:
|
|
2131
|
+
fallback: metadata.fallbackAttempted,
|
|
2132
|
+
guardrailsBlocked: metadata.guardrailsBlocked,
|
|
2133
|
+
error: metadata.error,
|
|
2066
2134
|
});
|
|
2067
2135
|
}
|
|
2068
2136
|
catch (error) {
|
|
@@ -2181,6 +2249,8 @@ Current user's request: ${currentInput}`;
|
|
|
2181
2249
|
startTime: config.startTime,
|
|
2182
2250
|
responseTime: config.responseTime,
|
|
2183
2251
|
fallback: config.fallback || false,
|
|
2252
|
+
guardrailsBlocked: config.guardrailsBlocked,
|
|
2253
|
+
error: config.error,
|
|
2184
2254
|
},
|
|
2185
2255
|
};
|
|
2186
2256
|
}
|
|
@@ -31,7 +31,7 @@ export declare class TelemetryService {
|
|
|
31
31
|
private initializeTelemetry;
|
|
32
32
|
private initializeMetrics;
|
|
33
33
|
initialize(): Promise<void>;
|
|
34
|
-
traceAIRequest<T>(provider: string, operation: () => Promise<T
|
|
34
|
+
traceAIRequest<T>(provider: string, operation: () => Promise<T>, operationType?: string): Promise<T>;
|
|
35
35
|
recordAIRequest(provider: string, model: string, tokens: number, duration: number): void;
|
|
36
36
|
recordAIError(provider: string, error: Error): void;
|
|
37
37
|
recordMCPToolCall(toolName: string, duration: number, success: boolean): void;
|
|
@@ -108,14 +108,14 @@ export class TelemetryService {
|
|
|
108
108
|
}
|
|
109
109
|
}
|
|
110
110
|
// AI Operation Tracing (NO-OP when disabled)
|
|
111
|
-
async traceAIRequest(provider, operation) {
|
|
111
|
+
async traceAIRequest(provider, operation, operationType = "generate_text") {
|
|
112
112
|
if (!this.enabled || !this.tracer) {
|
|
113
|
-
return await operation();
|
|
113
|
+
return await operation();
|
|
114
114
|
}
|
|
115
|
-
const span = this.tracer.startSpan(`ai.${provider}
|
|
115
|
+
const span = this.tracer.startSpan(`ai.${provider}.${operationType}`, {
|
|
116
116
|
attributes: {
|
|
117
117
|
"ai.provider": provider,
|
|
118
|
-
"ai.operation":
|
|
118
|
+
"ai.operation": operationType,
|
|
119
119
|
},
|
|
120
120
|
});
|
|
121
121
|
try {
|
|
@@ -129,3 +129,8 @@ export type TypedEventEmitter<TEvents extends Record<string, unknown>> = {
|
|
|
129
129
|
listenerCount<K extends keyof TEvents>(event: K): number;
|
|
130
130
|
listeners<K extends keyof TEvents>(event: K): Array<(...args: unknown[]) => void>;
|
|
131
131
|
};
|
|
132
|
+
export type Context = {
|
|
133
|
+
traceName?: string;
|
|
134
|
+
userId?: string;
|
|
135
|
+
sessionId?: string;
|
|
136
|
+
};
|
|
@@ -14,5 +14,5 @@
|
|
|
14
14
|
* import type { MultimodalInput } from './types/multimodal.js';
|
|
15
15
|
* ```
|
|
16
16
|
*/
|
|
17
|
-
export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
|
|
17
|
+
export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, ImageWithAltText, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
|
|
18
18
|
export { isTextContent, isImageContent, isCSVContent, isPDFContent, isAudioContent, isVideoContent, isMultimodalInput, } from "./multimodal.js";
|
|
@@ -6,7 +6,7 @@ import type { EvaluationData } from "./evaluation.js";
|
|
|
6
6
|
import type { ChatMessage, ConversationMemoryConfig } from "./conversation.js";
|
|
7
7
|
import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
|
|
8
8
|
import type { JsonValue } from "./common.js";
|
|
9
|
-
import type { Content } from "./content.js";
|
|
9
|
+
import type { Content, ImageWithAltText } from "./content.js";
|
|
10
10
|
/**
|
|
11
11
|
* Generate function options type - Primary method for content generation
|
|
12
12
|
* Supports multimodal content while maintaining backward compatibility
|
|
@@ -14,7 +14,24 @@ import type { Content } from "./content.js";
|
|
|
14
14
|
export type GenerateOptions = {
|
|
15
15
|
input: {
|
|
16
16
|
text: string;
|
|
17
|
-
|
|
17
|
+
/**
|
|
18
|
+
* Images to include in the request.
|
|
19
|
+
* Supports simple image data (Buffer, string) or objects with alt text for accessibility.
|
|
20
|
+
*
|
|
21
|
+
* @example Simple usage
|
|
22
|
+
* ```typescript
|
|
23
|
+
* images: [imageBuffer, "https://example.com/image.jpg"]
|
|
24
|
+
* ```
|
|
25
|
+
*
|
|
26
|
+
* @example With alt text for accessibility
|
|
27
|
+
* ```typescript
|
|
28
|
+
* images: [
|
|
29
|
+
* { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
|
|
30
|
+
* { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
|
|
31
|
+
* ]
|
|
32
|
+
* ```
|
|
33
|
+
*/
|
|
34
|
+
images?: Array<Buffer | string | ImageWithAltText>;
|
|
18
35
|
csvFiles?: Array<Buffer | string>;
|
|
19
36
|
pdfFiles?: Array<Buffer | string>;
|
|
20
37
|
files?: Array<Buffer | string>;
|
|
@@ -52,6 +52,8 @@ export type TextContent = {
|
|
|
52
52
|
export type ImageContent = {
|
|
53
53
|
type: "image";
|
|
54
54
|
data: Buffer | string;
|
|
55
|
+
/** Alternative text for accessibility (screen readers, SEO) */
|
|
56
|
+
altText?: string;
|
|
55
57
|
mediaType?: "image/jpeg" | "image/png" | "image/gif" | "image/webp" | "image/bmp" | "image/tiff";
|
|
56
58
|
metadata?: {
|
|
57
59
|
description?: string;
|
|
@@ -164,13 +166,48 @@ export type VideoContent = {
|
|
|
164
166
|
* Covers text, images, documents, and multimedia
|
|
165
167
|
*/
|
|
166
168
|
export type Content = TextContent | ImageContent | CSVContent | PDFContent | AudioContent | VideoContent;
|
|
169
|
+
/**
|
|
170
|
+
* Image data with optional alt text for accessibility
|
|
171
|
+
* Use this when you need to provide alt text for screen readers and SEO
|
|
172
|
+
*
|
|
173
|
+
* @example
|
|
174
|
+
* ```typescript
|
|
175
|
+
* const imageWithAlt: ImageWithAltText = {
|
|
176
|
+
* data: imageBuffer,
|
|
177
|
+
* altText: "A dashboard showing quarterly sales trends"
|
|
178
|
+
* };
|
|
179
|
+
* ```
|
|
180
|
+
*/
|
|
181
|
+
export type ImageWithAltText = {
|
|
182
|
+
/** Image data as Buffer, base64 string, URL, or data URI */
|
|
183
|
+
data: Buffer | string;
|
|
184
|
+
/** Alternative text for accessibility (screen readers, SEO) */
|
|
185
|
+
altText?: string;
|
|
186
|
+
};
|
|
167
187
|
/**
|
|
168
188
|
* Multimodal input type for options that may contain images or content arrays
|
|
169
189
|
* This is the primary interface for users to provide multimodal content
|
|
170
190
|
*/
|
|
171
191
|
export type MultimodalInput = {
|
|
172
192
|
text: string;
|
|
173
|
-
|
|
193
|
+
/**
|
|
194
|
+
* Images to include in the request.
|
|
195
|
+
* Can be simple image data (Buffer, string) or objects with alt text for accessibility.
|
|
196
|
+
*
|
|
197
|
+
* @example Simple usage
|
|
198
|
+
* ```typescript
|
|
199
|
+
* images: [imageBuffer, "https://example.com/image.jpg"]
|
|
200
|
+
* ```
|
|
201
|
+
*
|
|
202
|
+
* @example With alt text for accessibility
|
|
203
|
+
* ```typescript
|
|
204
|
+
* images: [
|
|
205
|
+
* { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
|
|
206
|
+
* { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
|
|
207
|
+
* ]
|
|
208
|
+
* ```
|
|
209
|
+
*/
|
|
210
|
+
images?: Array<Buffer | string | ImageWithAltText>;
|
|
174
211
|
content?: Content[];
|
|
175
212
|
csvFiles?: Array<Buffer | string>;
|
|
176
213
|
pdfFiles?: Array<Buffer | string>;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import type { Tool } from "ai";
|
|
2
2
|
import type { ValidationSchema, StandardRecord } from "./typeAliases.js";
|
|
3
3
|
import type { AIModelProviderConfig } from "./providers.js";
|
|
4
|
-
import type { Content } from "./content.js";
|
|
4
|
+
import type { Content, ImageWithAltText } from "./content.js";
|
|
5
5
|
import type { AnalyticsData, ToolExecutionEvent, ToolExecutionSummary } from "../types/index.js";
|
|
6
6
|
import { AIProviderName } from "../constants/enums.js";
|
|
7
7
|
import type { TokenUsage } from "./analytics.js";
|
|
@@ -125,7 +125,24 @@ export type StreamOptions = {
|
|
|
125
125
|
input: {
|
|
126
126
|
text: string;
|
|
127
127
|
audio?: AudioInputSpec;
|
|
128
|
-
|
|
128
|
+
/**
|
|
129
|
+
* Images to include in the request.
|
|
130
|
+
* Supports simple image data (Buffer, string) or objects with alt text for accessibility.
|
|
131
|
+
*
|
|
132
|
+
* @example Simple usage
|
|
133
|
+
* ```typescript
|
|
134
|
+
* images: [imageBuffer, "https://example.com/image.jpg"]
|
|
135
|
+
* ```
|
|
136
|
+
*
|
|
137
|
+
* @example With alt text for accessibility
|
|
138
|
+
* ```typescript
|
|
139
|
+
* images: [
|
|
140
|
+
* { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
|
|
141
|
+
* { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
|
|
142
|
+
* ]
|
|
143
|
+
* ```
|
|
144
|
+
*/
|
|
145
|
+
images?: Array<Buffer | string | ImageWithAltText>;
|
|
129
146
|
csvFiles?: Array<Buffer | string>;
|
|
130
147
|
pdfFiles?: Array<Buffer | string>;
|
|
131
148
|
files?: Array<Buffer | string>;
|
|
@@ -211,6 +228,8 @@ export type StreamResult = {
|
|
|
211
228
|
totalToolExecutions?: number;
|
|
212
229
|
toolExecutionTime?: number;
|
|
213
230
|
hasToolErrors?: boolean;
|
|
231
|
+
guardrailsBlocked?: boolean;
|
|
232
|
+
error?: string;
|
|
214
233
|
};
|
|
215
234
|
analytics?: AnalyticsData | Promise<AnalyticsData>;
|
|
216
235
|
evaluation?: EvaluationData | Promise<EvaluationData>;
|
|
@@ -10,6 +10,30 @@ import { FileDetector } from "./fileDetector.js";
|
|
|
10
10
|
import { PDFProcessor } from "./pdfProcessor.js";
|
|
11
11
|
import { request, getGlobalDispatcher, interceptors } from "undici";
|
|
12
12
|
import { readFileSync, existsSync } from "fs";
|
|
13
|
+
/**
|
|
14
|
+
* Type guard to check if an image input has alt text
|
|
15
|
+
*/
|
|
16
|
+
function isImageWithAltText(image) {
|
|
17
|
+
return (typeof image === "object" && !Buffer.isBuffer(image) && "data" in image);
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Extract image data from an image input (handles both simple and alt text formats)
|
|
21
|
+
*/
|
|
22
|
+
function extractImageData(image) {
|
|
23
|
+
if (isImageWithAltText(image)) {
|
|
24
|
+
return image.data;
|
|
25
|
+
}
|
|
26
|
+
return image;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Extract alt text from an image input if available
|
|
30
|
+
*/
|
|
31
|
+
function extractAltText(image) {
|
|
32
|
+
if (isImageWithAltText(image)) {
|
|
33
|
+
return image.altText;
|
|
34
|
+
}
|
|
35
|
+
return undefined;
|
|
36
|
+
}
|
|
13
37
|
/**
|
|
14
38
|
* Type guard for validating message roles
|
|
15
39
|
*/
|
|
@@ -639,28 +663,47 @@ async function downloadImageFromUrl(url) {
|
|
|
639
663
|
* - URLs: Downloaded and converted to base64 for Vercel AI SDK compatibility
|
|
640
664
|
* - Local files: Converted to base64 for Vercel AI SDK compatibility
|
|
641
665
|
* - Buffers/Data URIs: Processed normally
|
|
666
|
+
* - Supports alt text for accessibility (included as context in text parts)
|
|
642
667
|
*/
|
|
643
668
|
async function convertSimpleImagesToProviderFormat(text, images, provider, _model) {
|
|
644
669
|
// For Vercel AI SDK, we need to return the content in the standard format
|
|
645
670
|
// The Vercel AI SDK will handle provider-specific formatting internally
|
|
671
|
+
// IMPORTANT: Generate alt text descriptions BEFORE URL downloading to maintain correct image numbering
|
|
672
|
+
// This ensures image numbers match the original order provided by users, even if some URLs fail to download
|
|
673
|
+
const altTextDescriptions = images
|
|
674
|
+
.map((image, idx) => {
|
|
675
|
+
const altText = extractAltText(image);
|
|
676
|
+
return altText ? `[Image ${idx + 1}: ${altText}]` : null;
|
|
677
|
+
})
|
|
678
|
+
.filter(Boolean);
|
|
679
|
+
// Build enhanced text with alt text context for accessibility
|
|
680
|
+
// NOTE: Alt text is appended to the user's prompt as contextual information because most AI providers
|
|
681
|
+
// don't have native alt text fields in their APIs. This approach ensures accessibility metadata
|
|
682
|
+
// is preserved and helps AI models better understand image content.
|
|
683
|
+
const enhancedText = altTextDescriptions.length > 0
|
|
684
|
+
? `${text}\n\nImage descriptions for context: ${altTextDescriptions.join(" ")}`
|
|
685
|
+
: text;
|
|
646
686
|
// Smart auto-detection: separate URLs from actual image data
|
|
687
|
+
// Also track alt text for each image
|
|
647
688
|
const urlImages = [];
|
|
648
689
|
const actualImages = [];
|
|
649
690
|
images.forEach((image, _index) => {
|
|
650
|
-
|
|
691
|
+
const imageData = extractImageData(image);
|
|
692
|
+
const altText = extractAltText(image);
|
|
693
|
+
if (typeof imageData === "string" && isInternetUrl(imageData)) {
|
|
651
694
|
// Internet URL - will be downloaded and converted to base64
|
|
652
|
-
urlImages.push(
|
|
695
|
+
urlImages.push({ url: imageData, altText });
|
|
653
696
|
}
|
|
654
697
|
else {
|
|
655
698
|
// Actual image data (file path, Buffer, data URI) - process for Vercel AI SDK
|
|
656
|
-
actualImages.push(
|
|
699
|
+
actualImages.push({ data: imageData, altText });
|
|
657
700
|
}
|
|
658
701
|
});
|
|
659
702
|
// Download URL images and add to actual images
|
|
660
|
-
for (const url of urlImages) {
|
|
703
|
+
for (const { url, altText } of urlImages) {
|
|
661
704
|
try {
|
|
662
705
|
const downloadedDataUri = await downloadImageFromUrl(url);
|
|
663
|
-
actualImages.push(downloadedDataUri);
|
|
706
|
+
actualImages.push({ data: downloadedDataUri, altText });
|
|
664
707
|
}
|
|
665
708
|
catch (error) {
|
|
666
709
|
MultimodalLogger.logError("URL_DOWNLOAD_FAILED_SKIPPING", error, { url });
|
|
@@ -668,9 +711,11 @@ async function convertSimpleImagesToProviderFormat(text, images, provider, _mode
|
|
|
668
711
|
logger.warn(`Failed to download image from ${url}, skipping: ${error instanceof Error ? error.message : String(error)}`);
|
|
669
712
|
}
|
|
670
713
|
}
|
|
671
|
-
const content = [
|
|
714
|
+
const content = [
|
|
715
|
+
{ type: "text", text: enhancedText },
|
|
716
|
+
];
|
|
672
717
|
// Process all images (including downloaded URLs) for Vercel AI SDK
|
|
673
|
-
actualImages.forEach((image, index) => {
|
|
718
|
+
actualImages.forEach(({ data: image }, index) => {
|
|
674
719
|
try {
|
|
675
720
|
// Vercel AI SDK expects { type: 'image', image: Buffer | string, mimeType?: string }
|
|
676
721
|
// For Vertex AI, we need to include mimeType
|
|
@@ -44,7 +44,7 @@ import type { StreamOptions } from "../types/streamTypes.js";
|
|
|
44
44
|
export declare function buildMultimodalOptions(options: StreamOptions, providerName: string, modelName: string): {
|
|
45
45
|
input: {
|
|
46
46
|
text: string;
|
|
47
|
-
images: (string | Buffer<ArrayBufferLike>)[] | undefined;
|
|
47
|
+
images: (string | Buffer<ArrayBufferLike> | import("../types/multimodal.js").ImageWithAltText)[] | undefined;
|
|
48
48
|
content: import("../types/multimodal.js").Content[] | undefined;
|
|
49
49
|
files: (string | Buffer<ArrayBufferLike>)[] | undefined;
|
|
50
50
|
csvFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
|
|
@@ -69,8 +69,10 @@ export function createGuardrailsMiddleware(config = {}) {
|
|
|
69
69
|
};
|
|
70
70
|
}
|
|
71
71
|
const { stream, ...rest } = await doStream();
|
|
72
|
+
let hasYieldedChunks = false;
|
|
72
73
|
const transformStream = new TransformStream({
|
|
73
74
|
transform(chunk, controller) {
|
|
75
|
+
hasYieldedChunks = true;
|
|
74
76
|
let filteredChunk = chunk;
|
|
75
77
|
if (typeof filteredChunk === "object" &&
|
|
76
78
|
"textDelta" in filteredChunk) {
|
|
@@ -84,6 +86,11 @@ export function createGuardrailsMiddleware(config = {}) {
|
|
|
84
86
|
}
|
|
85
87
|
controller.enqueue(filteredChunk);
|
|
86
88
|
},
|
|
89
|
+
flush() {
|
|
90
|
+
if (!hasYieldedChunks) {
|
|
91
|
+
logger.warn(`[GuardrailsMiddleware] Stream ended without yielding any chunks`);
|
|
92
|
+
}
|
|
93
|
+
},
|
|
87
94
|
});
|
|
88
95
|
return {
|
|
89
96
|
stream: stream.pipeThrough(transformStream),
|
package/dist/neurolink.js
CHANGED
|
@@ -1998,19 +1998,85 @@ Current user's request: ${currentInput}`;
|
|
|
1998
1998
|
}
|
|
1999
1999
|
}
|
|
2000
2000
|
const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
|
|
2001
|
-
// Create a wrapper around the stream that accumulates content
|
|
2002
2001
|
let accumulatedContent = "";
|
|
2002
|
+
let chunkCount = 0;
|
|
2003
|
+
const metadata = {
|
|
2004
|
+
fallbackAttempted: false,
|
|
2005
|
+
guardrailsBlocked: false,
|
|
2006
|
+
error: undefined,
|
|
2007
|
+
};
|
|
2003
2008
|
const processedStream = (async function* (self) {
|
|
2004
2009
|
try {
|
|
2005
2010
|
for await (const chunk of mcpStream) {
|
|
2011
|
+
chunkCount++;
|
|
2006
2012
|
if (chunk &&
|
|
2007
2013
|
"content" in chunk &&
|
|
2008
2014
|
typeof chunk.content === "string") {
|
|
2009
2015
|
accumulatedContent += chunk.content;
|
|
2010
|
-
// Emit chunk event for compatibility
|
|
2011
2016
|
self.emitter.emit("response:chunk", chunk.content);
|
|
2012
2017
|
}
|
|
2013
|
-
yield chunk;
|
|
2018
|
+
yield chunk;
|
|
2019
|
+
}
|
|
2020
|
+
if (chunkCount === 0 && !metadata.fallbackAttempted) {
|
|
2021
|
+
metadata.fallbackAttempted = true;
|
|
2022
|
+
const errorMsg = "Stream completed with 0 chunks (possible guardrails block)";
|
|
2023
|
+
metadata.error = errorMsg;
|
|
2024
|
+
const fallbackRoute = ModelRouter.getFallbackRoute(originalPrompt || enhancedOptions.input.text || "", {
|
|
2025
|
+
provider: providerName,
|
|
2026
|
+
model: enhancedOptions.model || "gpt-4o",
|
|
2027
|
+
reasoning: "primary failed",
|
|
2028
|
+
confidence: 0.5,
|
|
2029
|
+
}, { fallbackStrategy: "auto" });
|
|
2030
|
+
logger.warn("Retrying with fallback provider", {
|
|
2031
|
+
originalProvider: providerName,
|
|
2032
|
+
fallbackProvider: fallbackRoute.provider,
|
|
2033
|
+
reason: errorMsg,
|
|
2034
|
+
});
|
|
2035
|
+
try {
|
|
2036
|
+
const fallbackProvider = await AIProviderFactory.createProvider(fallbackRoute.provider, fallbackRoute.model);
|
|
2037
|
+
// Ensure fallback provider can execute tools
|
|
2038
|
+
fallbackProvider.setupToolExecutor({
|
|
2039
|
+
customTools: self.getCustomTools(),
|
|
2040
|
+
executeTool: self.executeTool.bind(self),
|
|
2041
|
+
}, "NeuroLink.fallbackStream");
|
|
2042
|
+
// Get conversation messages for context (same as primary stream)
|
|
2043
|
+
const conversationMessages = await getConversationMessages(self.conversationMemory, {
|
|
2044
|
+
prompt: enhancedOptions.input.text,
|
|
2045
|
+
context: enhancedOptions.context,
|
|
2046
|
+
});
|
|
2047
|
+
const fallbackResult = await fallbackProvider.stream({
|
|
2048
|
+
...enhancedOptions,
|
|
2049
|
+
model: fallbackRoute.model,
|
|
2050
|
+
conversationMessages,
|
|
2051
|
+
});
|
|
2052
|
+
let fallbackChunkCount = 0;
|
|
2053
|
+
for await (const fallbackChunk of fallbackResult.stream) {
|
|
2054
|
+
fallbackChunkCount++;
|
|
2055
|
+
if (fallbackChunk &&
|
|
2056
|
+
"content" in fallbackChunk &&
|
|
2057
|
+
typeof fallbackChunk.content === "string") {
|
|
2058
|
+
accumulatedContent += fallbackChunk.content;
|
|
2059
|
+
self.emitter.emit("response:chunk", fallbackChunk.content);
|
|
2060
|
+
}
|
|
2061
|
+
yield fallbackChunk;
|
|
2062
|
+
}
|
|
2063
|
+
if (fallbackChunkCount === 0) {
|
|
2064
|
+
throw new Error(`Fallback provider ${fallbackRoute.provider} also returned 0 chunks`);
|
|
2065
|
+
}
|
|
2066
|
+
// Fallback succeeded - likely guardrails blocked primary
|
|
2067
|
+
metadata.guardrailsBlocked = true;
|
|
2068
|
+
}
|
|
2069
|
+
catch (fallbackError) {
|
|
2070
|
+
const fallbackErrorMsg = fallbackError instanceof Error
|
|
2071
|
+
? fallbackError.message
|
|
2072
|
+
: String(fallbackError);
|
|
2073
|
+
metadata.error = `${errorMsg}; Fallback failed: ${fallbackErrorMsg}`;
|
|
2074
|
+
logger.error("Fallback provider failed", {
|
|
2075
|
+
fallbackProvider: fallbackRoute.provider,
|
|
2076
|
+
error: fallbackErrorMsg,
|
|
2077
|
+
});
|
|
2078
|
+
throw fallbackError;
|
|
2079
|
+
}
|
|
2014
2080
|
}
|
|
2015
2081
|
}
|
|
2016
2082
|
finally {
|
|
@@ -2053,7 +2119,7 @@ Current user's request: ${currentInput}`;
|
|
|
2053
2119
|
}
|
|
2054
2120
|
}
|
|
2055
2121
|
})(this);
|
|
2056
|
-
const streamResult = await this.processStreamResult(
|
|
2122
|
+
const streamResult = await this.processStreamResult(processedStream, enhancedOptions, factoryResult);
|
|
2057
2123
|
const responseTime = Date.now() - startTime;
|
|
2058
2124
|
this.emitStreamEndEvents(streamResult);
|
|
2059
2125
|
return this.createStreamResponse(streamResult, processedStream, {
|
|
@@ -2062,7 +2128,9 @@ Current user's request: ${currentInput}`;
|
|
|
2062
2128
|
startTime,
|
|
2063
2129
|
responseTime,
|
|
2064
2130
|
streamId,
|
|
2065
|
-
fallback:
|
|
2131
|
+
fallback: metadata.fallbackAttempted,
|
|
2132
|
+
guardrailsBlocked: metadata.guardrailsBlocked,
|
|
2133
|
+
error: metadata.error,
|
|
2066
2134
|
});
|
|
2067
2135
|
}
|
|
2068
2136
|
catch (error) {
|
|
@@ -2181,6 +2249,8 @@ Current user's request: ${currentInput}`;
|
|
|
2181
2249
|
startTime: config.startTime,
|
|
2182
2250
|
responseTime: config.responseTime,
|
|
2183
2251
|
fallback: config.fallback || false,
|
|
2252
|
+
guardrailsBlocked: config.guardrailsBlocked,
|
|
2253
|
+
error: config.error,
|
|
2184
2254
|
},
|
|
2185
2255
|
};
|
|
2186
2256
|
}
|
|
@@ -31,7 +31,7 @@ export declare class TelemetryService {
|
|
|
31
31
|
private initializeTelemetry;
|
|
32
32
|
private initializeMetrics;
|
|
33
33
|
initialize(): Promise<void>;
|
|
34
|
-
traceAIRequest<T>(provider: string, operation: () => Promise<T
|
|
34
|
+
traceAIRequest<T>(provider: string, operation: () => Promise<T>, operationType?: string): Promise<T>;
|
|
35
35
|
recordAIRequest(provider: string, model: string, tokens: number, duration: number): void;
|
|
36
36
|
recordAIError(provider: string, error: Error): void;
|
|
37
37
|
recordMCPToolCall(toolName: string, duration: number, success: boolean): void;
|
|
@@ -108,14 +108,14 @@ export class TelemetryService {
|
|
|
108
108
|
}
|
|
109
109
|
}
|
|
110
110
|
// AI Operation Tracing (NO-OP when disabled)
|
|
111
|
-
async traceAIRequest(provider, operation) {
|
|
111
|
+
async traceAIRequest(provider, operation, operationType = "generate_text") {
|
|
112
112
|
if (!this.enabled || !this.tracer) {
|
|
113
|
-
return await operation();
|
|
113
|
+
return await operation();
|
|
114
114
|
}
|
|
115
|
-
const span = this.tracer.startSpan(`ai.${provider}
|
|
115
|
+
const span = this.tracer.startSpan(`ai.${provider}.${operationType}`, {
|
|
116
116
|
attributes: {
|
|
117
117
|
"ai.provider": provider,
|
|
118
|
-
"ai.operation":
|
|
118
|
+
"ai.operation": operationType,
|
|
119
119
|
},
|
|
120
120
|
});
|
|
121
121
|
try {
|
package/dist/types/common.d.ts
CHANGED
|
@@ -129,3 +129,8 @@ export type TypedEventEmitter<TEvents extends Record<string, unknown>> = {
|
|
|
129
129
|
listenerCount<K extends keyof TEvents>(event: K): number;
|
|
130
130
|
listeners<K extends keyof TEvents>(event: K): Array<(...args: unknown[]) => void>;
|
|
131
131
|
};
|
|
132
|
+
export type Context = {
|
|
133
|
+
traceName?: string;
|
|
134
|
+
userId?: string;
|
|
135
|
+
sessionId?: string;
|
|
136
|
+
};
|
package/dist/types/content.d.ts
CHANGED
|
@@ -14,5 +14,5 @@
|
|
|
14
14
|
* import type { MultimodalInput } from './types/multimodal.js';
|
|
15
15
|
* ```
|
|
16
16
|
*/
|
|
17
|
-
export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
|
|
17
|
+
export type { TextContent, ImageContent, CSVContent, PDFContent, AudioContent, VideoContent, Content, ImageWithAltText, MultimodalInput, MultimodalMessage, VisionCapability, ProviderImageFormat, ProcessedImage, ProviderMultimodalPayload, } from "./multimodal.js";
|
|
18
18
|
export { isTextContent, isImageContent, isCSVContent, isPDFContent, isAudioContent, isVideoContent, isMultimodalInput, } from "./multimodal.js";
|
|
@@ -6,7 +6,7 @@ import type { EvaluationData } from "./evaluation.js";
|
|
|
6
6
|
import type { ChatMessage, ConversationMemoryConfig } from "./conversation.js";
|
|
7
7
|
import type { MiddlewareFactoryOptions } from "./middlewareTypes.js";
|
|
8
8
|
import type { JsonValue } from "./common.js";
|
|
9
|
-
import type { Content } from "./content.js";
|
|
9
|
+
import type { Content, ImageWithAltText } from "./content.js";
|
|
10
10
|
/**
|
|
11
11
|
* Generate function options type - Primary method for content generation
|
|
12
12
|
* Supports multimodal content while maintaining backward compatibility
|
|
@@ -14,7 +14,24 @@ import type { Content } from "./content.js";
|
|
|
14
14
|
export type GenerateOptions = {
|
|
15
15
|
input: {
|
|
16
16
|
text: string;
|
|
17
|
-
|
|
17
|
+
/**
|
|
18
|
+
* Images to include in the request.
|
|
19
|
+
* Supports simple image data (Buffer, string) or objects with alt text for accessibility.
|
|
20
|
+
*
|
|
21
|
+
* @example Simple usage
|
|
22
|
+
* ```typescript
|
|
23
|
+
* images: [imageBuffer, "https://example.com/image.jpg"]
|
|
24
|
+
* ```
|
|
25
|
+
*
|
|
26
|
+
* @example With alt text for accessibility
|
|
27
|
+
* ```typescript
|
|
28
|
+
* images: [
|
|
29
|
+
* { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
|
|
30
|
+
* { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
|
|
31
|
+
* ]
|
|
32
|
+
* ```
|
|
33
|
+
*/
|
|
34
|
+
images?: Array<Buffer | string | ImageWithAltText>;
|
|
18
35
|
csvFiles?: Array<Buffer | string>;
|
|
19
36
|
pdfFiles?: Array<Buffer | string>;
|
|
20
37
|
files?: Array<Buffer | string>;
|
|
@@ -52,6 +52,8 @@ export type TextContent = {
|
|
|
52
52
|
export type ImageContent = {
|
|
53
53
|
type: "image";
|
|
54
54
|
data: Buffer | string;
|
|
55
|
+
/** Alternative text for accessibility (screen readers, SEO) */
|
|
56
|
+
altText?: string;
|
|
55
57
|
mediaType?: "image/jpeg" | "image/png" | "image/gif" | "image/webp" | "image/bmp" | "image/tiff";
|
|
56
58
|
metadata?: {
|
|
57
59
|
description?: string;
|
|
@@ -164,13 +166,48 @@ export type VideoContent = {
|
|
|
164
166
|
* Covers text, images, documents, and multimedia
|
|
165
167
|
*/
|
|
166
168
|
export type Content = TextContent | ImageContent | CSVContent | PDFContent | AudioContent | VideoContent;
|
|
169
|
+
/**
|
|
170
|
+
* Image data with optional alt text for accessibility
|
|
171
|
+
* Use this when you need to provide alt text for screen readers and SEO
|
|
172
|
+
*
|
|
173
|
+
* @example
|
|
174
|
+
* ```typescript
|
|
175
|
+
* const imageWithAlt: ImageWithAltText = {
|
|
176
|
+
* data: imageBuffer,
|
|
177
|
+
* altText: "A dashboard showing quarterly sales trends"
|
|
178
|
+
* };
|
|
179
|
+
* ```
|
|
180
|
+
*/
|
|
181
|
+
export type ImageWithAltText = {
|
|
182
|
+
/** Image data as Buffer, base64 string, URL, or data URI */
|
|
183
|
+
data: Buffer | string;
|
|
184
|
+
/** Alternative text for accessibility (screen readers, SEO) */
|
|
185
|
+
altText?: string;
|
|
186
|
+
};
|
|
167
187
|
/**
|
|
168
188
|
* Multimodal input type for options that may contain images or content arrays
|
|
169
189
|
* This is the primary interface for users to provide multimodal content
|
|
170
190
|
*/
|
|
171
191
|
export type MultimodalInput = {
|
|
172
192
|
text: string;
|
|
173
|
-
|
|
193
|
+
/**
|
|
194
|
+
* Images to include in the request.
|
|
195
|
+
* Can be simple image data (Buffer, string) or objects with alt text for accessibility.
|
|
196
|
+
*
|
|
197
|
+
* @example Simple usage
|
|
198
|
+
* ```typescript
|
|
199
|
+
* images: [imageBuffer, "https://example.com/image.jpg"]
|
|
200
|
+
* ```
|
|
201
|
+
*
|
|
202
|
+
* @example With alt text for accessibility
|
|
203
|
+
* ```typescript
|
|
204
|
+
* images: [
|
|
205
|
+
* { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
|
|
206
|
+
* { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
|
|
207
|
+
* ]
|
|
208
|
+
* ```
|
|
209
|
+
*/
|
|
210
|
+
images?: Array<Buffer | string | ImageWithAltText>;
|
|
174
211
|
content?: Content[];
|
|
175
212
|
csvFiles?: Array<Buffer | string>;
|
|
176
213
|
pdfFiles?: Array<Buffer | string>;
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import type { Tool } from "ai";
|
|
2
2
|
import type { ValidationSchema, StandardRecord } from "./typeAliases.js";
|
|
3
3
|
import type { AIModelProviderConfig } from "./providers.js";
|
|
4
|
-
import type { Content } from "./content.js";
|
|
4
|
+
import type { Content, ImageWithAltText } from "./content.js";
|
|
5
5
|
import type { AnalyticsData, ToolExecutionEvent, ToolExecutionSummary } from "../types/index.js";
|
|
6
6
|
import { AIProviderName } from "../constants/enums.js";
|
|
7
7
|
import type { TokenUsage } from "./analytics.js";
|
|
@@ -125,7 +125,24 @@ export type StreamOptions = {
|
|
|
125
125
|
input: {
|
|
126
126
|
text: string;
|
|
127
127
|
audio?: AudioInputSpec;
|
|
128
|
-
|
|
128
|
+
/**
|
|
129
|
+
* Images to include in the request.
|
|
130
|
+
* Supports simple image data (Buffer, string) or objects with alt text for accessibility.
|
|
131
|
+
*
|
|
132
|
+
* @example Simple usage
|
|
133
|
+
* ```typescript
|
|
134
|
+
* images: [imageBuffer, "https://example.com/image.jpg"]
|
|
135
|
+
* ```
|
|
136
|
+
*
|
|
137
|
+
* @example With alt text for accessibility
|
|
138
|
+
* ```typescript
|
|
139
|
+
* images: [
|
|
140
|
+
* { data: imageBuffer, altText: "Product screenshot showing main dashboard" },
|
|
141
|
+
* { data: "https://example.com/chart.png", altText: "Sales chart for Q3 2024" }
|
|
142
|
+
* ]
|
|
143
|
+
* ```
|
|
144
|
+
*/
|
|
145
|
+
images?: Array<Buffer | string | ImageWithAltText>;
|
|
129
146
|
csvFiles?: Array<Buffer | string>;
|
|
130
147
|
pdfFiles?: Array<Buffer | string>;
|
|
131
148
|
files?: Array<Buffer | string>;
|
|
@@ -211,6 +228,8 @@ export type StreamResult = {
|
|
|
211
228
|
totalToolExecutions?: number;
|
|
212
229
|
toolExecutionTime?: number;
|
|
213
230
|
hasToolErrors?: boolean;
|
|
231
|
+
guardrailsBlocked?: boolean;
|
|
232
|
+
error?: string;
|
|
214
233
|
};
|
|
215
234
|
analytics?: AnalyticsData | Promise<AnalyticsData>;
|
|
216
235
|
evaluation?: EvaluationData | Promise<EvaluationData>;
|
|
@@ -10,6 +10,30 @@ import { FileDetector } from "./fileDetector.js";
|
|
|
10
10
|
import { PDFProcessor } from "./pdfProcessor.js";
|
|
11
11
|
import { request, getGlobalDispatcher, interceptors } from "undici";
|
|
12
12
|
import { readFileSync, existsSync } from "fs";
|
|
13
|
+
/**
|
|
14
|
+
* Type guard to check if an image input has alt text
|
|
15
|
+
*/
|
|
16
|
+
function isImageWithAltText(image) {
|
|
17
|
+
return (typeof image === "object" && !Buffer.isBuffer(image) && "data" in image);
|
|
18
|
+
}
|
|
19
|
+
/**
|
|
20
|
+
* Extract image data from an image input (handles both simple and alt text formats)
|
|
21
|
+
*/
|
|
22
|
+
function extractImageData(image) {
|
|
23
|
+
if (isImageWithAltText(image)) {
|
|
24
|
+
return image.data;
|
|
25
|
+
}
|
|
26
|
+
return image;
|
|
27
|
+
}
|
|
28
|
+
/**
|
|
29
|
+
* Extract alt text from an image input if available
|
|
30
|
+
*/
|
|
31
|
+
function extractAltText(image) {
|
|
32
|
+
if (isImageWithAltText(image)) {
|
|
33
|
+
return image.altText;
|
|
34
|
+
}
|
|
35
|
+
return undefined;
|
|
36
|
+
}
|
|
13
37
|
/**
|
|
14
38
|
* Type guard for validating message roles
|
|
15
39
|
*/
|
|
@@ -639,28 +663,47 @@ async function downloadImageFromUrl(url) {
|
|
|
639
663
|
* - URLs: Downloaded and converted to base64 for Vercel AI SDK compatibility
|
|
640
664
|
* - Local files: Converted to base64 for Vercel AI SDK compatibility
|
|
641
665
|
* - Buffers/Data URIs: Processed normally
|
|
666
|
+
* - Supports alt text for accessibility (included as context in text parts)
|
|
642
667
|
*/
|
|
643
668
|
async function convertSimpleImagesToProviderFormat(text, images, provider, _model) {
|
|
644
669
|
// For Vercel AI SDK, we need to return the content in the standard format
|
|
645
670
|
// The Vercel AI SDK will handle provider-specific formatting internally
|
|
671
|
+
// IMPORTANT: Generate alt text descriptions BEFORE URL downloading to maintain correct image numbering
|
|
672
|
+
// This ensures image numbers match the original order provided by users, even if some URLs fail to download
|
|
673
|
+
const altTextDescriptions = images
|
|
674
|
+
.map((image, idx) => {
|
|
675
|
+
const altText = extractAltText(image);
|
|
676
|
+
return altText ? `[Image ${idx + 1}: ${altText}]` : null;
|
|
677
|
+
})
|
|
678
|
+
.filter(Boolean);
|
|
679
|
+
// Build enhanced text with alt text context for accessibility
|
|
680
|
+
// NOTE: Alt text is appended to the user's prompt as contextual information because most AI providers
|
|
681
|
+
// don't have native alt text fields in their APIs. This approach ensures accessibility metadata
|
|
682
|
+
// is preserved and helps AI models better understand image content.
|
|
683
|
+
const enhancedText = altTextDescriptions.length > 0
|
|
684
|
+
? `${text}\n\nImage descriptions for context: ${altTextDescriptions.join(" ")}`
|
|
685
|
+
: text;
|
|
646
686
|
// Smart auto-detection: separate URLs from actual image data
|
|
687
|
+
// Also track alt text for each image
|
|
647
688
|
const urlImages = [];
|
|
648
689
|
const actualImages = [];
|
|
649
690
|
images.forEach((image, _index) => {
|
|
650
|
-
|
|
691
|
+
const imageData = extractImageData(image);
|
|
692
|
+
const altText = extractAltText(image);
|
|
693
|
+
if (typeof imageData === "string" && isInternetUrl(imageData)) {
|
|
651
694
|
// Internet URL - will be downloaded and converted to base64
|
|
652
|
-
urlImages.push(
|
|
695
|
+
urlImages.push({ url: imageData, altText });
|
|
653
696
|
}
|
|
654
697
|
else {
|
|
655
698
|
// Actual image data (file path, Buffer, data URI) - process for Vercel AI SDK
|
|
656
|
-
actualImages.push(
|
|
699
|
+
actualImages.push({ data: imageData, altText });
|
|
657
700
|
}
|
|
658
701
|
});
|
|
659
702
|
// Download URL images and add to actual images
|
|
660
|
-
for (const url of urlImages) {
|
|
703
|
+
for (const { url, altText } of urlImages) {
|
|
661
704
|
try {
|
|
662
705
|
const downloadedDataUri = await downloadImageFromUrl(url);
|
|
663
|
-
actualImages.push(downloadedDataUri);
|
|
706
|
+
actualImages.push({ data: downloadedDataUri, altText });
|
|
664
707
|
}
|
|
665
708
|
catch (error) {
|
|
666
709
|
MultimodalLogger.logError("URL_DOWNLOAD_FAILED_SKIPPING", error, { url });
|
|
@@ -668,9 +711,11 @@ async function convertSimpleImagesToProviderFormat(text, images, provider, _mode
|
|
|
668
711
|
logger.warn(`Failed to download image from ${url}, skipping: ${error instanceof Error ? error.message : String(error)}`);
|
|
669
712
|
}
|
|
670
713
|
}
|
|
671
|
-
const content = [
|
|
714
|
+
const content = [
|
|
715
|
+
{ type: "text", text: enhancedText },
|
|
716
|
+
];
|
|
672
717
|
// Process all images (including downloaded URLs) for Vercel AI SDK
|
|
673
|
-
actualImages.forEach((image, index) => {
|
|
718
|
+
actualImages.forEach(({ data: image }, index) => {
|
|
674
719
|
try {
|
|
675
720
|
// Vercel AI SDK expects { type: 'image', image: Buffer | string, mimeType?: string }
|
|
676
721
|
// For Vertex AI, we need to include mimeType
|
|
@@ -44,7 +44,7 @@ import type { StreamOptions } from "../types/streamTypes.js";
|
|
|
44
44
|
export declare function buildMultimodalOptions(options: StreamOptions, providerName: string, modelName: string): {
|
|
45
45
|
input: {
|
|
46
46
|
text: string;
|
|
47
|
-
images: (string | Buffer<ArrayBufferLike>)[] | undefined;
|
|
47
|
+
images: (string | Buffer<ArrayBufferLike> | import("../types/multimodal.js").ImageWithAltText)[] | undefined;
|
|
48
48
|
content: import("../types/multimodal.js").Content[] | undefined;
|
|
49
49
|
files: (string | Buffer<ArrayBufferLike>)[] | undefined;
|
|
50
50
|
csvFiles: (string | Buffer<ArrayBufferLike>)[] | undefined;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "8.
|
|
3
|
+
"version": "8.6.0",
|
|
4
4
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|