@juspay/neurolink 7.48.1 → 7.49.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/README.md +215 -16
- package/dist/agent/directTools.d.ts +55 -0
- package/dist/agent/directTools.js +266 -0
- package/dist/cli/factories/commandFactory.d.ts +2 -0
- package/dist/cli/factories/commandFactory.js +130 -16
- package/dist/cli/index.js +0 -0
- package/dist/cli/loop/conversationSelector.d.ts +45 -0
- package/dist/cli/loop/conversationSelector.js +222 -0
- package/dist/cli/loop/optionsSchema.d.ts +1 -1
- package/dist/cli/loop/session.d.ts +36 -8
- package/dist/cli/loop/session.js +257 -61
- package/dist/core/baseProvider.js +9 -2
- package/dist/core/evaluation.js +5 -2
- package/dist/factories/providerRegistry.js +2 -2
- package/dist/lib/agent/directTools.d.ts +55 -0
- package/dist/lib/agent/directTools.js +266 -0
- package/dist/lib/core/baseProvider.js +9 -2
- package/dist/lib/core/evaluation.js +5 -2
- package/dist/lib/factories/providerRegistry.js +2 -2
- package/dist/lib/mcp/factory.d.ts +2 -157
- package/dist/lib/mcp/flexibleToolValidator.d.ts +1 -5
- package/dist/lib/mcp/index.d.ts +3 -2
- package/dist/lib/mcp/mcpCircuitBreaker.d.ts +1 -75
- package/dist/lib/mcp/mcpClientFactory.d.ts +1 -20
- package/dist/lib/mcp/mcpClientFactory.js +1 -0
- package/dist/lib/mcp/registry.d.ts +3 -10
- package/dist/lib/mcp/servers/agent/directToolsServer.d.ts +1 -1
- package/dist/lib/mcp/servers/aiProviders/aiCoreServer.d.ts +1 -1
- package/dist/lib/mcp/servers/utilities/utilityServer.d.ts +1 -1
- package/dist/lib/mcp/toolDiscoveryService.d.ts +3 -84
- package/dist/lib/mcp/toolRegistry.d.ts +2 -24
- package/dist/lib/middleware/builtin/guardrails.d.ts +5 -16
- package/dist/lib/middleware/builtin/guardrails.js +44 -39
- package/dist/lib/middleware/utils/guardrailsUtils.d.ts +64 -0
- package/dist/lib/middleware/utils/guardrailsUtils.js +387 -0
- package/dist/lib/neurolink.d.ts +1 -1
- package/dist/lib/providers/anthropic.js +46 -3
- package/dist/lib/providers/azureOpenai.js +8 -2
- package/dist/lib/providers/googleAiStudio.js +8 -2
- package/dist/lib/providers/googleVertex.js +11 -2
- package/dist/lib/providers/huggingFace.js +1 -1
- package/dist/lib/providers/litellm.js +1 -1
- package/dist/lib/providers/mistral.js +1 -1
- package/dist/lib/providers/openAI.js +46 -3
- package/dist/lib/session/globalSessionState.d.ts +26 -0
- package/dist/lib/session/globalSessionState.js +49 -0
- package/dist/lib/types/cli.d.ts +28 -0
- package/dist/lib/types/content.d.ts +18 -5
- package/dist/lib/types/contextTypes.d.ts +1 -1
- package/dist/lib/types/conversation.d.ts +55 -4
- package/dist/lib/types/fileTypes.d.ts +65 -0
- package/dist/lib/types/fileTypes.js +4 -0
- package/dist/lib/types/generateTypes.d.ts +12 -0
- package/dist/lib/types/guardrails.d.ts +103 -0
- package/dist/lib/types/guardrails.js +1 -0
- package/dist/lib/types/index.d.ts +4 -2
- package/dist/lib/types/index.js +4 -0
- package/dist/lib/types/mcpTypes.d.ts +407 -14
- package/dist/lib/types/streamTypes.d.ts +7 -0
- package/dist/lib/types/tools.d.ts +132 -35
- package/dist/lib/utils/csvProcessor.d.ts +68 -0
- package/dist/lib/utils/csvProcessor.js +277 -0
- package/dist/lib/utils/fileDetector.d.ts +57 -0
- package/dist/lib/utils/fileDetector.js +457 -0
- package/dist/lib/utils/imageProcessor.d.ts +10 -0
- package/dist/lib/utils/imageProcessor.js +22 -0
- package/dist/lib/utils/loopUtils.d.ts +71 -0
- package/dist/lib/utils/loopUtils.js +262 -0
- package/dist/lib/utils/messageBuilder.d.ts +2 -1
- package/dist/lib/utils/messageBuilder.js +197 -2
- package/dist/lib/utils/optionsUtils.d.ts +1 -1
- package/dist/mcp/factory.d.ts +2 -157
- package/dist/mcp/flexibleToolValidator.d.ts +1 -5
- package/dist/mcp/index.d.ts +3 -2
- package/dist/mcp/mcpCircuitBreaker.d.ts +1 -75
- package/dist/mcp/mcpClientFactory.d.ts +1 -20
- package/dist/mcp/mcpClientFactory.js +1 -0
- package/dist/mcp/registry.d.ts +3 -10
- package/dist/mcp/servers/agent/directToolsServer.d.ts +1 -1
- package/dist/mcp/servers/aiProviders/aiCoreServer.d.ts +1 -1
- package/dist/mcp/servers/utilities/utilityServer.d.ts +1 -1
- package/dist/mcp/toolDiscoveryService.d.ts +3 -84
- package/dist/mcp/toolRegistry.d.ts +2 -24
- package/dist/middleware/builtin/guardrails.d.ts +5 -16
- package/dist/middleware/builtin/guardrails.js +44 -39
- package/dist/middleware/utils/guardrailsUtils.d.ts +64 -0
- package/dist/middleware/utils/guardrailsUtils.js +387 -0
- package/dist/neurolink.d.ts +1 -1
- package/dist/providers/anthropic.js +46 -3
- package/dist/providers/azureOpenai.js +8 -2
- package/dist/providers/googleAiStudio.js +8 -2
- package/dist/providers/googleVertex.js +11 -2
- package/dist/providers/huggingFace.js +1 -1
- package/dist/providers/litellm.js +1 -1
- package/dist/providers/mistral.js +1 -1
- package/dist/providers/openAI.js +46 -3
- package/dist/session/globalSessionState.d.ts +26 -0
- package/dist/session/globalSessionState.js +49 -0
- package/dist/types/cli.d.ts +28 -0
- package/dist/types/content.d.ts +18 -5
- package/dist/types/contextTypes.d.ts +1 -1
- package/dist/types/conversation.d.ts +55 -4
- package/dist/types/fileTypes.d.ts +65 -0
- package/dist/types/fileTypes.js +4 -0
- package/dist/types/generateTypes.d.ts +12 -0
- package/dist/types/guardrails.d.ts +103 -0
- package/dist/types/guardrails.js +1 -0
- package/dist/types/index.d.ts +4 -2
- package/dist/types/index.js +4 -0
- package/dist/types/mcpTypes.d.ts +407 -14
- package/dist/types/modelTypes.d.ts +6 -6
- package/dist/types/streamTypes.d.ts +7 -0
- package/dist/types/tools.d.ts +132 -35
- package/dist/utils/csvProcessor.d.ts +68 -0
- package/dist/utils/csvProcessor.js +277 -0
- package/dist/utils/fileDetector.d.ts +57 -0
- package/dist/utils/fileDetector.js +457 -0
- package/dist/utils/imageProcessor.d.ts +10 -0
- package/dist/utils/imageProcessor.js +22 -0
- package/dist/utils/loopUtils.d.ts +71 -0
- package/dist/utils/loopUtils.js +262 -0
- package/dist/utils/messageBuilder.d.ts +2 -1
- package/dist/utils/messageBuilder.js +197 -2
- package/dist/utils/optionsUtils.d.ts +1 -1
- package/package.json +9 -3
- package/dist/lib/mcp/contracts/mcpContract.d.ts +0 -106
- package/dist/lib/mcp/contracts/mcpContract.js +0 -5
- package/dist/mcp/contracts/mcpContract.d.ts +0 -106
- package/dist/mcp/contracts/mcpContract.js +0 -5
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import type { PrecallEvaluationResult, PrecallEvaluationConfig, EvaluationActionResult, BadWordsConfig } from "../../types/guardrails.js";
|
|
2
|
+
import type { LanguageModelV1CallOptions } from "ai";
|
|
3
|
+
export declare const DEFAULT_EVALUATION_PROMPT = "\nEvaluate the following user input for safety and appropriateness. Respond ONLY with valid JSON:\n\n{\n \"overall\": \"safe|unsafe|suspicious|inappropriate\",\n \"safetyScore\": <1-10>,\n \"appropriatenessScore\": <1-10>,\n \"confidenceLevel\": <1-10>,\n \"issues\": [\n {\n \"category\": \"explicit_content|hate_speech|violence|personal_info|spam|other\",\n \"severity\": \"low|medium|high|critical\",\n \"description\": \"Brief description\"\n }\n ],\n \"suggestedAction\": \"allow|block|sanitize|warn\",\n \"reasoning\": \"Brief explanation\"\n}\n\nUser Input: \"{USER_INPUT}\"\n";
|
|
4
|
+
/**
|
|
5
|
+
* Extract user input from LanguageModelV1CallOptions
|
|
6
|
+
*/
|
|
7
|
+
export declare function extractUserInput(params: LanguageModelV1CallOptions): string;
|
|
8
|
+
export declare function parseEvaluationResponse(rawResponse: string): PrecallEvaluationResult;
|
|
9
|
+
/**
|
|
10
|
+
* Handles the precall guardrails logic, including evaluation and sanitization.
|
|
11
|
+
* @param params - The language model call options.
|
|
12
|
+
* @param config - The precall evaluation configuration.
|
|
13
|
+
* @returns An object indicating if the request should be blocked and the (potentially transformed) params.
|
|
14
|
+
*/
|
|
15
|
+
export declare function handlePrecallGuardrails(params: LanguageModelV1CallOptions, config: PrecallEvaluationConfig): Promise<{
|
|
16
|
+
shouldBlock: boolean;
|
|
17
|
+
transformedParams: LanguageModelV1CallOptions;
|
|
18
|
+
}>;
|
|
19
|
+
/**
|
|
20
|
+
* Perform precall evaluation of user input using AI models
|
|
21
|
+
*/
|
|
22
|
+
export declare function performPrecallEvaluation(config: PrecallEvaluationConfig, userInput: string): Promise<PrecallEvaluationResult>;
|
|
23
|
+
export declare function applyEvaluationActions(evaluation: PrecallEvaluationResult, config: PrecallEvaluationConfig, userInput: string): EvaluationActionResult;
|
|
24
|
+
/**
|
|
25
|
+
* Apply parameter sanitization to request parameters
|
|
26
|
+
*/
|
|
27
|
+
export declare function applySanitization(params: LanguageModelV1CallOptions, sanitizedInput: string): LanguageModelV1CallOptions;
|
|
28
|
+
export declare function escapeRegExp(string: string): string;
|
|
29
|
+
export declare function createBlockedResponse(): {
|
|
30
|
+
text: string;
|
|
31
|
+
usage: {
|
|
32
|
+
promptTokens: number;
|
|
33
|
+
completionTokens: number;
|
|
34
|
+
};
|
|
35
|
+
finishReason: "stop";
|
|
36
|
+
warnings: never[];
|
|
37
|
+
rawCall: {
|
|
38
|
+
rawPrompt: null;
|
|
39
|
+
rawSettings: {};
|
|
40
|
+
};
|
|
41
|
+
};
|
|
42
|
+
export declare function createBlockedStream(): ReadableStream<any>;
|
|
43
|
+
/**
|
|
44
|
+
* Result from content filtering operation
|
|
45
|
+
*/
|
|
46
|
+
export interface ContentFilteringResult {
|
|
47
|
+
filteredText: string;
|
|
48
|
+
hasChanges: boolean;
|
|
49
|
+
appliedFilters: string[];
|
|
50
|
+
filteringStats: {
|
|
51
|
+
regexPatternsApplied: number;
|
|
52
|
+
stringFiltersApplied: number;
|
|
53
|
+
totalMatches: number;
|
|
54
|
+
};
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Apply content filtering using bad words configuration
|
|
58
|
+
* Handles both regex patterns and string lists with proper priority
|
|
59
|
+
* @param text The text to filter
|
|
60
|
+
* @param badWordsConfig Bad words configuration
|
|
61
|
+
* @param context Optional context for logging (e.g., "generate", "stream")
|
|
62
|
+
* @returns Filtering result with filtered text and metadata
|
|
63
|
+
*/
|
|
64
|
+
export declare function applyContentFiltering(text: string, badWordsConfig?: BadWordsConfig, context?: string): ContentFilteringResult;
|
|
@@ -0,0 +1,387 @@
|
|
|
1
|
+
import { AIProviderFactory } from "../../core/factory.js";
|
|
2
|
+
import { logger } from "../../utils/logger.js";
|
|
3
|
+
export const DEFAULT_EVALUATION_PROMPT = `
|
|
4
|
+
Evaluate the following user input for safety and appropriateness. Respond ONLY with valid JSON:
|
|
5
|
+
|
|
6
|
+
{
|
|
7
|
+
"overall": "safe|unsafe|suspicious|inappropriate",
|
|
8
|
+
"safetyScore": <1-10>,
|
|
9
|
+
"appropriatenessScore": <1-10>,
|
|
10
|
+
"confidenceLevel": <1-10>,
|
|
11
|
+
"issues": [
|
|
12
|
+
{
|
|
13
|
+
"category": "explicit_content|hate_speech|violence|personal_info|spam|other",
|
|
14
|
+
"severity": "low|medium|high|critical",
|
|
15
|
+
"description": "Brief description"
|
|
16
|
+
}
|
|
17
|
+
],
|
|
18
|
+
"suggestedAction": "allow|block|sanitize|warn",
|
|
19
|
+
"reasoning": "Brief explanation"
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
User Input: "{USER_INPUT}"
|
|
23
|
+
`;
|
|
24
|
+
/**
|
|
25
|
+
* Extract user input from LanguageModelV1CallOptions
|
|
26
|
+
*/
|
|
27
|
+
export function extractUserInput(params) {
|
|
28
|
+
if (typeof params.prompt === "string") {
|
|
29
|
+
return params.prompt;
|
|
30
|
+
}
|
|
31
|
+
return params.prompt
|
|
32
|
+
.map((msg) => {
|
|
33
|
+
if (msg.role === "user") {
|
|
34
|
+
if (typeof msg.content === "string") {
|
|
35
|
+
return msg.content;
|
|
36
|
+
}
|
|
37
|
+
else if (Array.isArray(msg.content)) {
|
|
38
|
+
return msg.content
|
|
39
|
+
.filter((part) => part.type === "text")
|
|
40
|
+
.map((part) => part.text)
|
|
41
|
+
.join(" ");
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
return "";
|
|
45
|
+
})
|
|
46
|
+
.filter(Boolean)
|
|
47
|
+
.join("\n");
|
|
48
|
+
}
|
|
49
|
+
export function parseEvaluationResponse(rawResponse) {
|
|
50
|
+
try {
|
|
51
|
+
const cleanedResponse = rawResponse.replace(/```json\n|```/g, "").trim();
|
|
52
|
+
const parsed = JSON.parse(cleanedResponse);
|
|
53
|
+
return {
|
|
54
|
+
overall: parsed.overall || "safe",
|
|
55
|
+
safetyScore: Number(parsed.safetyScore) || 10,
|
|
56
|
+
appropriatenessScore: Number(parsed.appropriatenessScore) || 10,
|
|
57
|
+
confidenceLevel: Number(parsed.confidenceLevel) || 10,
|
|
58
|
+
issues: parsed.issues || [],
|
|
59
|
+
suggestedAction: parsed.suggestedAction || "allow",
|
|
60
|
+
reasoning: parsed.reasoning || "No reasoning provided.",
|
|
61
|
+
};
|
|
62
|
+
}
|
|
63
|
+
catch (error) {
|
|
64
|
+
logger.error("[GuardrailsUtils] Failed to parse evaluation response:", error);
|
|
65
|
+
return {
|
|
66
|
+
overall: "safe",
|
|
67
|
+
safetyScore: 10,
|
|
68
|
+
appropriatenessScore: 10,
|
|
69
|
+
confidenceLevel: 1,
|
|
70
|
+
suggestedAction: "allow",
|
|
71
|
+
reasoning: "Error parsing evaluation response - allowing by default.",
|
|
72
|
+
};
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Handles the precall guardrails logic, including evaluation and sanitization.
|
|
77
|
+
* @param params - The language model call options.
|
|
78
|
+
* @param config - The precall evaluation configuration.
|
|
79
|
+
* @returns An object indicating if the request should be blocked and the (potentially transformed) params.
|
|
80
|
+
*/
|
|
81
|
+
export async function handlePrecallGuardrails(params, config) {
|
|
82
|
+
const userInput = extractUserInput(params);
|
|
83
|
+
let transformedParams = params;
|
|
84
|
+
if (userInput.trim()) {
|
|
85
|
+
logger.debug(`[GuardrailsUtils] Performing precall evaluation on user input.`);
|
|
86
|
+
const evaluation = await performPrecallEvaluation(config, userInput);
|
|
87
|
+
const actionResult = applyEvaluationActions(evaluation, config, userInput);
|
|
88
|
+
if (actionResult.shouldBlock) {
|
|
89
|
+
logger.warn(`[GuardrailsUtils] Blocking request due to precall evaluation.`, {
|
|
90
|
+
evaluation,
|
|
91
|
+
userInput: userInput.substring(0, 100),
|
|
92
|
+
});
|
|
93
|
+
return { shouldBlock: true, transformedParams };
|
|
94
|
+
}
|
|
95
|
+
if (actionResult.sanitizedInput) {
|
|
96
|
+
logger.info(`[GuardrailsUtils] Applying input sanitization.`);
|
|
97
|
+
transformedParams = applySanitization(params, actionResult.sanitizedInput);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
else {
|
|
101
|
+
logger.debug(`[GuardrailsUtils] Skipping precall evaluation - no user content to evaluate.`);
|
|
102
|
+
}
|
|
103
|
+
return { shouldBlock: false, transformedParams };
|
|
104
|
+
}
|
|
105
|
+
/**
|
|
106
|
+
* Perform precall evaluation of user input using AI models
|
|
107
|
+
*/
|
|
108
|
+
export async function performPrecallEvaluation(config, userInput) {
|
|
109
|
+
try {
|
|
110
|
+
const provider = await AIProviderFactory.createProvider(config.provider || "google-ai", config.evaluationModel || "gemini-1.5-flash");
|
|
111
|
+
const evaluationPrompt = config.evaluationPrompt || DEFAULT_EVALUATION_PROMPT;
|
|
112
|
+
const prompt = evaluationPrompt.replace("{USER_INPUT}", userInput);
|
|
113
|
+
const result = await provider.generate({
|
|
114
|
+
input: { text: prompt },
|
|
115
|
+
});
|
|
116
|
+
if (!result || !result.content) {
|
|
117
|
+
throw new Error("Evaluation generation failed to return a result.");
|
|
118
|
+
}
|
|
119
|
+
return parseEvaluationResponse(result.content);
|
|
120
|
+
}
|
|
121
|
+
catch (error) {
|
|
122
|
+
logger.error("[GuardrailsUtils] Precall evaluation failed:", error);
|
|
123
|
+
return {
|
|
124
|
+
overall: "safe",
|
|
125
|
+
safetyScore: 10,
|
|
126
|
+
appropriatenessScore: 10,
|
|
127
|
+
confidenceLevel: 1,
|
|
128
|
+
suggestedAction: "allow",
|
|
129
|
+
reasoning: "Evaluation failed - allowing by default.",
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
export function applyEvaluationActions(evaluation, config, userInput) {
|
|
134
|
+
const actions = config.actions || {};
|
|
135
|
+
const thresholds = config.thresholds || {};
|
|
136
|
+
const safetyThreshold = thresholds.safetyScore || 7;
|
|
137
|
+
const appropriatenessThreshold = thresholds.appropriatenessScore || 6;
|
|
138
|
+
let actionToTake;
|
|
139
|
+
if (evaluation.overall === "unsafe" ||
|
|
140
|
+
evaluation.safetyScore < safetyThreshold) {
|
|
141
|
+
actionToTake = actions.onUnsafe || "block";
|
|
142
|
+
}
|
|
143
|
+
else if (evaluation.overall === "inappropriate" ||
|
|
144
|
+
evaluation.appropriatenessScore < appropriatenessThreshold) {
|
|
145
|
+
actionToTake = actions.onInappropriate || "warn";
|
|
146
|
+
}
|
|
147
|
+
else if (evaluation.overall === "suspicious") {
|
|
148
|
+
actionToTake = actions.onSuspicious || "log";
|
|
149
|
+
}
|
|
150
|
+
else {
|
|
151
|
+
actionToTake = "allow";
|
|
152
|
+
}
|
|
153
|
+
logger.info("[GuardrailsUtils] Precall evaluation result:", {
|
|
154
|
+
overall: evaluation.overall,
|
|
155
|
+
safetyScore: evaluation.safetyScore,
|
|
156
|
+
appropriatenessScore: evaluation.appropriatenessScore,
|
|
157
|
+
confidenceLevel: evaluation.confidenceLevel,
|
|
158
|
+
suggestedAction: evaluation.suggestedAction,
|
|
159
|
+
actionTaken: actionToTake,
|
|
160
|
+
reasoning: evaluation.reasoning,
|
|
161
|
+
issues: evaluation.issues,
|
|
162
|
+
});
|
|
163
|
+
switch (actionToTake) {
|
|
164
|
+
case "block":
|
|
165
|
+
return { shouldBlock: true };
|
|
166
|
+
case "sanitize": {
|
|
167
|
+
let sanitized = userInput;
|
|
168
|
+
const patterns = config.sanitizationPatterns || [];
|
|
169
|
+
const replacementText = config.replacementText || "[REDACTED]";
|
|
170
|
+
if (patterns.length > 0) {
|
|
171
|
+
logger.debug(`[GuardrailsUtils] Applying ${patterns.length} sanitization patterns with replacement: "${replacementText}".`);
|
|
172
|
+
patterns.forEach((pattern, index) => {
|
|
173
|
+
try {
|
|
174
|
+
const regex = new RegExp(pattern, "gi");
|
|
175
|
+
const before = sanitized;
|
|
176
|
+
let matchCount = 0;
|
|
177
|
+
sanitized = sanitized.replace(regex, () => {
|
|
178
|
+
matchCount++;
|
|
179
|
+
return replacementText;
|
|
180
|
+
});
|
|
181
|
+
if (before !== sanitized) {
|
|
182
|
+
logger.debug(`[GuardrailsUtils] Pattern ${index + 1} matched ${matchCount} times.`);
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
catch (error) {
|
|
186
|
+
logger.error(`[GuardrailsUtils] Invalid sanitization pattern "${pattern}":`, error);
|
|
187
|
+
}
|
|
188
|
+
});
|
|
189
|
+
if (sanitized !== userInput) {
|
|
190
|
+
logger.info(`[GuardrailsUtils] Input sanitized using ${patterns.length} patterns.`);
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
else {
|
|
194
|
+
logger.warn("[GuardrailsUtils] Sanitize action triggered but no sanitizationPatterns provided in config. Input will not be modified.");
|
|
195
|
+
}
|
|
196
|
+
return { shouldBlock: false, sanitizedInput: sanitized };
|
|
197
|
+
}
|
|
198
|
+
case "warn": {
|
|
199
|
+
logger.warn("[GuardrailsUtils] Potentially inappropriate content detected but allowing:", {
|
|
200
|
+
userInput: userInput.substring(0, 100),
|
|
201
|
+
evaluation,
|
|
202
|
+
});
|
|
203
|
+
return { shouldBlock: false };
|
|
204
|
+
}
|
|
205
|
+
case "log": {
|
|
206
|
+
logger.info("[GuardrailsUtils] Suspicious content detected:", {
|
|
207
|
+
userInput: userInput.substring(0, 100),
|
|
208
|
+
evaluation,
|
|
209
|
+
});
|
|
210
|
+
return { shouldBlock: false };
|
|
211
|
+
}
|
|
212
|
+
default:
|
|
213
|
+
return { shouldBlock: false };
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Apply parameter sanitization to request parameters
|
|
218
|
+
*/
|
|
219
|
+
export function applySanitization(params, sanitizedInput) {
|
|
220
|
+
const sanitizedParams = { ...params };
|
|
221
|
+
if (Array.isArray(params.prompt)) {
|
|
222
|
+
const sanitizedPrompt = params.prompt.map((msg) => {
|
|
223
|
+
if (msg.role === "user") {
|
|
224
|
+
if (typeof msg.content === "string") {
|
|
225
|
+
return {
|
|
226
|
+
...msg,
|
|
227
|
+
content: [{ type: "text", text: sanitizedInput }],
|
|
228
|
+
};
|
|
229
|
+
}
|
|
230
|
+
else if (Array.isArray(msg.content)) {
|
|
231
|
+
const sanitizedContent = msg.content.map((part) => {
|
|
232
|
+
if (part.type === "text") {
|
|
233
|
+
return { ...part, text: sanitizedInput };
|
|
234
|
+
}
|
|
235
|
+
return part;
|
|
236
|
+
});
|
|
237
|
+
return { ...msg, content: sanitizedContent };
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
return msg;
|
|
241
|
+
});
|
|
242
|
+
sanitizedParams.prompt =
|
|
243
|
+
sanitizedPrompt;
|
|
244
|
+
}
|
|
245
|
+
return sanitizedParams;
|
|
246
|
+
}
|
|
247
|
+
export function escapeRegExp(string) {
|
|
248
|
+
return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
249
|
+
}
|
|
250
|
+
export function createBlockedResponse() {
|
|
251
|
+
return {
|
|
252
|
+
text: "Request contains inappropriate content and has been blocked.",
|
|
253
|
+
usage: { promptTokens: 0, completionTokens: 0 },
|
|
254
|
+
finishReason: "stop",
|
|
255
|
+
warnings: [],
|
|
256
|
+
rawCall: { rawPrompt: null, rawSettings: {} },
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
export function createBlockedStream() {
|
|
260
|
+
return new ReadableStream({
|
|
261
|
+
start(controller) {
|
|
262
|
+
controller.enqueue({
|
|
263
|
+
type: "text-delta",
|
|
264
|
+
textDelta: "Request contains inappropriate content and has been blocked.",
|
|
265
|
+
});
|
|
266
|
+
controller.enqueue({
|
|
267
|
+
type: "finish",
|
|
268
|
+
finishReason: "stop",
|
|
269
|
+
usage: { promptTokens: 0, completionTokens: 0 },
|
|
270
|
+
});
|
|
271
|
+
controller.close();
|
|
272
|
+
},
|
|
273
|
+
});
|
|
274
|
+
}
|
|
275
|
+
/**
|
|
276
|
+
* Apply content filtering using bad words configuration
|
|
277
|
+
* Handles both regex patterns and string lists with proper priority
|
|
278
|
+
* @param text The text to filter
|
|
279
|
+
* @param badWordsConfig Bad words configuration
|
|
280
|
+
* @param context Optional context for logging (e.g., "generate", "stream")
|
|
281
|
+
* @returns Filtering result with filtered text and metadata
|
|
282
|
+
*/
|
|
283
|
+
export function applyContentFiltering(text, badWordsConfig, context = "unknown") {
|
|
284
|
+
// Early return if filtering is disabled or no config
|
|
285
|
+
try {
|
|
286
|
+
if (!badWordsConfig?.enabled || !text) {
|
|
287
|
+
return {
|
|
288
|
+
filteredText: text,
|
|
289
|
+
hasChanges: false,
|
|
290
|
+
appliedFilters: [],
|
|
291
|
+
filteringStats: {
|
|
292
|
+
regexPatternsApplied: 0,
|
|
293
|
+
stringFiltersApplied: 0,
|
|
294
|
+
totalMatches: 0,
|
|
295
|
+
},
|
|
296
|
+
};
|
|
297
|
+
}
|
|
298
|
+
let filteredText = text;
|
|
299
|
+
let hasChanges = false;
|
|
300
|
+
const appliedFilters = [];
|
|
301
|
+
let totalMatches = 0;
|
|
302
|
+
const replacementText = badWordsConfig.replacementText || "[REDACTED]";
|
|
303
|
+
// Priority 1: Use regex patterns if provided
|
|
304
|
+
if (badWordsConfig.regexPatterns &&
|
|
305
|
+
badWordsConfig.regexPatterns.length > 0) {
|
|
306
|
+
if (badWordsConfig.list && badWordsConfig.list.length > 0) {
|
|
307
|
+
logger.warn(`[ContentFiltering:${context}] Both regexPatterns and list provided. Using regexPatterns and ignoring list.`);
|
|
308
|
+
}
|
|
309
|
+
logger.debug(`[ContentFiltering:${context}] Applying regex pattern filtering with ${badWordsConfig.regexPatterns.length} patterns using replacement: "${replacementText}".`);
|
|
310
|
+
for (const pattern of badWordsConfig.regexPatterns) {
|
|
311
|
+
try {
|
|
312
|
+
// TODO: Add blocking for overly complex or long patterns
|
|
313
|
+
if (pattern.length > 1000) {
|
|
314
|
+
logger.warn(`[ContentFiltering:${context}] Regex pattern exceeds max length (1000 chars): "${pattern.substring(0, 50)}..."`);
|
|
315
|
+
}
|
|
316
|
+
const regex = new RegExp(pattern, "gi");
|
|
317
|
+
const testStart = Date.now();
|
|
318
|
+
regex.test("test");
|
|
319
|
+
if (Date.now() - testStart > 100) {
|
|
320
|
+
logger.warn(`[ContentFiltering:${context}] Regex pattern "${pattern}" appears to be too complex (slow test execution).`);
|
|
321
|
+
}
|
|
322
|
+
const before = filteredText;
|
|
323
|
+
let matchCount = 0;
|
|
324
|
+
filteredText = filteredText?.replace(regex, () => {
|
|
325
|
+
matchCount++;
|
|
326
|
+
return replacementText;
|
|
327
|
+
});
|
|
328
|
+
if (before !== filteredText) {
|
|
329
|
+
hasChanges = true;
|
|
330
|
+
totalMatches += matchCount;
|
|
331
|
+
appliedFilters.push(`regex:${pattern}`);
|
|
332
|
+
logger.debug(`[ContentFiltering:${context}] Regex pattern "${pattern}" matched ${matchCount} times and filtered content.`);
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
catch (error) {
|
|
336
|
+
logger.error(`[ContentFiltering:${context}] Invalid regex pattern "${pattern}":`, error);
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
// Priority 2: Use simple string list if no regex patterns
|
|
341
|
+
else if (badWordsConfig.list && badWordsConfig.list.length > 0) {
|
|
342
|
+
logger.debug(`[ContentFiltering:${context}] Applying string list filtering with ${badWordsConfig.list.length} terms using replacement: "${replacementText}".`);
|
|
343
|
+
for (const term of badWordsConfig.list) {
|
|
344
|
+
const regex = new RegExp(escapeRegExp(term), "gi");
|
|
345
|
+
const before = filteredText;
|
|
346
|
+
let matchCount = 0;
|
|
347
|
+
filteredText = filteredText?.replace(regex, () => {
|
|
348
|
+
matchCount++;
|
|
349
|
+
return replacementText;
|
|
350
|
+
});
|
|
351
|
+
if (before !== filteredText) {
|
|
352
|
+
hasChanges = true;
|
|
353
|
+
totalMatches += matchCount;
|
|
354
|
+
appliedFilters.push(`string:${term}`);
|
|
355
|
+
logger.debug(`[ContentFiltering:${context}] String filter "${term}" matched ${matchCount} times.`);
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
}
|
|
359
|
+
const result = {
|
|
360
|
+
filteredText,
|
|
361
|
+
hasChanges,
|
|
362
|
+
appliedFilters,
|
|
363
|
+
filteringStats: {
|
|
364
|
+
regexPatternsApplied: badWordsConfig.regexPatterns?.length || 0,
|
|
365
|
+
stringFiltersApplied: badWordsConfig.list?.length || 0,
|
|
366
|
+
totalMatches,
|
|
367
|
+
},
|
|
368
|
+
};
|
|
369
|
+
if (hasChanges) {
|
|
370
|
+
logger.debug(`[ContentFiltering:${context}] Filtering completed. Applied ${appliedFilters.length} filters with ${totalMatches} total matches.`);
|
|
371
|
+
}
|
|
372
|
+
return result;
|
|
373
|
+
}
|
|
374
|
+
catch (error) {
|
|
375
|
+
logger.error(`[ContentFiltering:${context}] Error during content filtering:`, error);
|
|
376
|
+
return {
|
|
377
|
+
filteredText: text,
|
|
378
|
+
hasChanges: false,
|
|
379
|
+
appliedFilters: [],
|
|
380
|
+
filteringStats: {
|
|
381
|
+
regexPatternsApplied: 0,
|
|
382
|
+
stringFiltersApplied: 0,
|
|
383
|
+
totalMatches: 0,
|
|
384
|
+
},
|
|
385
|
+
};
|
|
386
|
+
}
|
|
387
|
+
}
|
package/dist/neurolink.d.ts
CHANGED
|
@@ -10,7 +10,7 @@ import { MCPToolRegistry } from "./mcp/toolRegistry.js";
|
|
|
10
10
|
import type { GenerateOptions, GenerateResult } from "./types/generateTypes.js";
|
|
11
11
|
import type { StreamOptions, StreamResult } from "./types/streamTypes.js";
|
|
12
12
|
import type { MCPServerInfo, MCPExecutableTool } from "./types/mcpTypes.js";
|
|
13
|
-
import type { ToolInfo } from "./
|
|
13
|
+
import type { ToolInfo } from "./types/tools.js";
|
|
14
14
|
import type { NeuroLinkEvents, TypedEventEmitter, ToolExecutionContext, ToolExecutionSummary } from "./types/common.js";
|
|
15
15
|
import type { JsonObject } from "./types/common.js";
|
|
16
16
|
import type { BatchOperationResult } from "./types/typeAliases.js";
|
|
@@ -7,7 +7,7 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
|
7
7
|
import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
|
|
8
8
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
9
9
|
import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
10
|
-
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
10
|
+
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
11
11
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
12
12
|
// Configuration helpers - now using consolidated utility
|
|
13
13
|
const getAnthropicApiKey = () => {
|
|
@@ -92,8 +92,51 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
92
92
|
// ✅ Get tools for streaming (same as generate method)
|
|
93
93
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
94
94
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
95
|
-
// Build message array from options
|
|
96
|
-
const
|
|
95
|
+
// Build message array from options with multimodal support
|
|
96
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
97
|
+
options.input?.content?.length ||
|
|
98
|
+
options.input?.files?.length ||
|
|
99
|
+
options.input?.csvFiles?.length);
|
|
100
|
+
let messages;
|
|
101
|
+
if (hasMultimodalInput) {
|
|
102
|
+
logger.debug(`Anthropic: Detected multimodal input, using multimodal message builder`, {
|
|
103
|
+
hasImages: !!options.input?.images?.length,
|
|
104
|
+
imageCount: options.input?.images?.length || 0,
|
|
105
|
+
hasContent: !!options.input?.content?.length,
|
|
106
|
+
contentCount: options.input?.content?.length || 0,
|
|
107
|
+
hasFiles: !!options.input?.files?.length,
|
|
108
|
+
fileCount: options.input?.files?.length || 0,
|
|
109
|
+
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
110
|
+
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
111
|
+
});
|
|
112
|
+
// Create multimodal options for buildMultimodalMessagesArray
|
|
113
|
+
const multimodalOptions = {
|
|
114
|
+
input: {
|
|
115
|
+
text: options.input?.text || "",
|
|
116
|
+
images: options.input?.images,
|
|
117
|
+
content: options.input?.content,
|
|
118
|
+
files: options.input?.files,
|
|
119
|
+
csvFiles: options.input?.csvFiles,
|
|
120
|
+
},
|
|
121
|
+
csvOptions: options.csvOptions,
|
|
122
|
+
systemPrompt: options.systemPrompt,
|
|
123
|
+
conversationHistory: options.conversationMessages,
|
|
124
|
+
provider: this.providerName,
|
|
125
|
+
model: this.modelName,
|
|
126
|
+
temperature: options.temperature,
|
|
127
|
+
maxTokens: options.maxTokens,
|
|
128
|
+
enableAnalytics: options.enableAnalytics,
|
|
129
|
+
enableEvaluation: options.enableEvaluation,
|
|
130
|
+
context: options.context,
|
|
131
|
+
};
|
|
132
|
+
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
133
|
+
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
134
|
+
messages = convertToCoreMessages(mm);
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
logger.debug(`Anthropic: Text-only input, using standard message builder`);
|
|
138
|
+
messages = await buildMessagesArray(options);
|
|
139
|
+
}
|
|
97
140
|
const model = await this.getAISDKModelWithMiddleware(options);
|
|
98
141
|
const result = await streamText({
|
|
99
142
|
model: model,
|
|
@@ -110,7 +110,10 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
110
110
|
});
|
|
111
111
|
}
|
|
112
112
|
// Build message array from options with multimodal support
|
|
113
|
-
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
113
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
114
|
+
options.input?.content?.length ||
|
|
115
|
+
options.input?.files?.length ||
|
|
116
|
+
options.input?.csvFiles?.length);
|
|
114
117
|
let messages;
|
|
115
118
|
if (hasMultimodalInput) {
|
|
116
119
|
logger.debug(`Azure OpenAI: Detected multimodal input, using multimodal message builder`, {
|
|
@@ -125,7 +128,10 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
125
128
|
text: options.input?.text || "",
|
|
126
129
|
images: options.input?.images,
|
|
127
130
|
content: options.input?.content,
|
|
131
|
+
files: options.input?.files,
|
|
132
|
+
csvFiles: options.input?.csvFiles,
|
|
128
133
|
},
|
|
134
|
+
csvOptions: options.csvOptions,
|
|
129
135
|
systemPrompt: options.systemPrompt,
|
|
130
136
|
conversationHistory: options.conversationMessages,
|
|
131
137
|
provider: this.providerName,
|
|
@@ -142,7 +148,7 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
142
148
|
}
|
|
143
149
|
else {
|
|
144
150
|
logger.debug(`Azure OpenAI: Text-only input, using standard message builder`);
|
|
145
|
-
messages = buildMessagesArray(options);
|
|
151
|
+
messages = await buildMessagesArray(options);
|
|
146
152
|
}
|
|
147
153
|
const model = await this.getAISDKModelWithMiddleware(options);
|
|
148
154
|
const stream = await streamText({
|
|
@@ -92,7 +92,10 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
92
92
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
93
93
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
94
94
|
// Build message array from options with multimodal support
|
|
95
|
-
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
95
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
96
|
+
options.input?.content?.length ||
|
|
97
|
+
options.input?.files?.length ||
|
|
98
|
+
options.input?.csvFiles?.length);
|
|
96
99
|
let messages;
|
|
97
100
|
if (hasMultimodalInput) {
|
|
98
101
|
logger.debug(`Google AI Studio: Detected multimodal input, using multimodal message builder`, {
|
|
@@ -107,7 +110,10 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
107
110
|
text: options.input?.text || "",
|
|
108
111
|
images: options.input?.images,
|
|
109
112
|
content: options.input?.content,
|
|
113
|
+
files: options.input?.files,
|
|
114
|
+
csvFiles: options.input?.csvFiles,
|
|
110
115
|
},
|
|
116
|
+
csvOptions: options.csvOptions,
|
|
111
117
|
systemPrompt: options.systemPrompt,
|
|
112
118
|
conversationHistory: options.conversationMessages,
|
|
113
119
|
provider: this.providerName,
|
|
@@ -124,7 +130,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
124
130
|
}
|
|
125
131
|
else {
|
|
126
132
|
logger.debug(`Google AI Studio: Text-only input, using standard message builder`);
|
|
127
|
-
messages = buildMessagesArray(options);
|
|
133
|
+
messages = await buildMessagesArray(options);
|
|
128
134
|
}
|
|
129
135
|
const result = await streamText({
|
|
130
136
|
model,
|
|
@@ -596,7 +596,10 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
596
596
|
// Validate stream options
|
|
597
597
|
this.validateStreamOptionsOnly(options);
|
|
598
598
|
// Build message array from options with multimodal support
|
|
599
|
-
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
599
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
600
|
+
options.input?.content?.length ||
|
|
601
|
+
options.input?.files?.length ||
|
|
602
|
+
options.input?.csvFiles?.length);
|
|
600
603
|
let messages;
|
|
601
604
|
if (hasMultimodalInput) {
|
|
602
605
|
logger.debug(`${functionTag}: Detected multimodal input, using multimodal message builder`, {
|
|
@@ -611,7 +614,10 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
611
614
|
text: options.input?.text || "",
|
|
612
615
|
images: options.input?.images,
|
|
613
616
|
content: options.input?.content,
|
|
617
|
+
files: options.input?.files,
|
|
618
|
+
csvFiles: options.input?.csvFiles,
|
|
614
619
|
},
|
|
620
|
+
csvOptions: options.csvOptions,
|
|
615
621
|
systemPrompt: options.systemPrompt,
|
|
616
622
|
conversationHistory: options.conversationMessages,
|
|
617
623
|
provider: this.providerName,
|
|
@@ -628,7 +634,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
628
634
|
}
|
|
629
635
|
else {
|
|
630
636
|
logger.debug(`${functionTag}: Text-only input, using standard message builder`);
|
|
631
|
-
messages = buildMessagesArray(options);
|
|
637
|
+
messages = await buildMessagesArray(options);
|
|
632
638
|
}
|
|
633
639
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
634
640
|
// Get all available tools (direct + MCP + external) for streaming
|
|
@@ -937,6 +943,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
937
943
|
modelName,
|
|
938
944
|
issue: modelValidation.issue,
|
|
939
945
|
recommendedModels: [
|
|
946
|
+
"claude-sonnet-4-5@20250929",
|
|
940
947
|
"claude-sonnet-4@20250514",
|
|
941
948
|
"claude-opus-4@20250514",
|
|
942
949
|
"claude-3-5-sonnet-20241022",
|
|
@@ -1169,6 +1176,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
1169
1176
|
// Validate against known Claude model patterns
|
|
1170
1177
|
const validPatterns = [
|
|
1171
1178
|
/^claude-sonnet-4@\d{8}$/,
|
|
1179
|
+
/^claude-sonnet-4-5@\d{8}$/,
|
|
1172
1180
|
/^claude-opus-4@\d{8}$/,
|
|
1173
1181
|
/^claude-3-5-sonnet-\d{8}$/,
|
|
1174
1182
|
/^claude-3-5-haiku-\d{8}$/,
|
|
@@ -1390,6 +1398,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
1390
1398
|
"gemini-1.5-flash",
|
|
1391
1399
|
],
|
|
1392
1400
|
claude: [
|
|
1401
|
+
"claude-sonnet-4-5@20250929",
|
|
1393
1402
|
"claude-sonnet-4@20250514",
|
|
1394
1403
|
"claude-opus-4@20250514",
|
|
1395
1404
|
"claude-3-5-sonnet-20241022",
|
|
@@ -114,7 +114,7 @@ export class HuggingFaceProvider extends BaseProvider {
|
|
|
114
114
|
// Enhanced tool handling for HuggingFace models
|
|
115
115
|
const streamOptions = this.prepareStreamOptions(options, analysisSchema);
|
|
116
116
|
// Build message array from options
|
|
117
|
-
const messages = buildMessagesArray(options);
|
|
117
|
+
const messages = await buildMessagesArray(options);
|
|
118
118
|
const result = await streamText({
|
|
119
119
|
model: this.model,
|
|
120
120
|
messages: messages,
|
|
@@ -121,7 +121,7 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
121
121
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
122
122
|
try {
|
|
123
123
|
// Build message array from options
|
|
124
|
-
const messages = buildMessagesArray(options);
|
|
124
|
+
const messages = await buildMessagesArray(options);
|
|
125
125
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
126
126
|
const result = streamText({
|
|
127
127
|
model: model,
|
|
@@ -49,7 +49,7 @@ export class MistralProvider extends BaseProvider {
|
|
|
49
49
|
// Get tools consistently with generate method
|
|
50
50
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
51
51
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
52
|
-
const messages = buildMessagesArray(options);
|
|
52
|
+
const messages = await buildMessagesArray(options);
|
|
53
53
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
54
54
|
const result = await streamText({
|
|
55
55
|
model,
|