@juspay/neurolink 7.41.3 → 7.42.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/core/baseProvider.js +16 -0
- package/dist/lib/core/baseProvider.js +16 -0
- package/dist/lib/middleware/builtin/guardrails.js +11 -5
- package/dist/lib/middleware/factory.js +2 -0
- package/dist/lib/middleware/registry.js +3 -0
- package/dist/lib/providers/anthropic.js +2 -1
- package/dist/lib/providers/anthropicBaseProvider.js +1 -3
- package/dist/lib/providers/azureOpenai.js +2 -1
- package/dist/lib/providers/googleAiStudio.js +1 -2
- package/dist/lib/providers/googleVertex.js +1 -1
- package/dist/lib/providers/litellm.js +2 -1
- package/dist/lib/providers/mistral.js +2 -1
- package/dist/lib/providers/openAI.js +2 -1
- package/dist/lib/providers/openaiCompatible.js +1 -1
- package/dist/lib/types/index.d.ts +1 -0
- package/dist/lib/types/index.js +2 -0
- package/dist/lib/types/sdkTypes.d.ts +25 -0
- package/dist/lib/types/sdkTypes.js +8 -0
- package/dist/middleware/builtin/guardrails.js +11 -5
- package/dist/middleware/factory.js +2 -0
- package/dist/middleware/registry.js +3 -0
- package/dist/providers/anthropic.js +2 -1
- package/dist/providers/anthropicBaseProvider.js +1 -3
- package/dist/providers/azureOpenai.js +2 -1
- package/dist/providers/googleAiStudio.js +1 -2
- package/dist/providers/googleVertex.js +1 -1
- package/dist/providers/litellm.js +2 -1
- package/dist/providers/mistral.js +2 -1
- package/dist/providers/openAI.js +2 -1
- package/dist/providers/openaiCompatible.js +1 -1
- package/dist/types/index.d.ts +1 -0
- package/dist/types/index.js +2 -0
- package/dist/types/sdkTypes.d.ts +25 -0
- package/dist/types/sdkTypes.js +8 -0
- package/package.json +6 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,15 @@
|
|
|
1
|
+
## [7.42.0](https://github.com/juspay/neurolink/compare/v7.41.4...v7.42.0) (2025-09-23)
|
|
2
|
+
|
|
3
|
+
### Features
|
|
4
|
+
|
|
5
|
+
- **(middleware):** robust bad word filtering in guardrails and correct stream usage ([d396797](https://github.com/juspay/neurolink/commit/d396797640832a373b386a7c550ec406e129d2d2))
|
|
6
|
+
|
|
7
|
+
## [7.41.4](https://github.com/juspay/neurolink/compare/v7.41.3...v7.41.4) (2025-09-21)
|
|
8
|
+
|
|
9
|
+
### Bug Fixes
|
|
10
|
+
|
|
11
|
+
- **(types):** expose core SDK types for external developer integration ([66199c9](https://github.com/juspay/neurolink/commit/66199c9fb579f1aaab929ca987ac028eafa61a46))
|
|
12
|
+
|
|
1
13
|
## [7.41.3](https://github.com/juspay/neurolink/compare/v7.41.2...v7.41.3) (2025-09-20)
|
|
2
14
|
|
|
3
15
|
## [7.41.2](https://github.com/juspay/neurolink/compare/v7.41.1...v7.41.2) (2025-09-20)
|
|
@@ -623,12 +623,28 @@ export class BaseProvider {
|
|
|
623
623
|
async getAISDKModelWithMiddleware(options = {}) {
|
|
624
624
|
// Get the base model
|
|
625
625
|
const baseModel = await this.getAISDKModel();
|
|
626
|
+
logger.debug(`Retrieved base model for ${this.providerName}`, {
|
|
627
|
+
provider: this.providerName,
|
|
628
|
+
model: this.modelName,
|
|
629
|
+
hasMiddlewareConfig: !!this.middlewareOptions,
|
|
630
|
+
timestamp: Date.now(),
|
|
631
|
+
});
|
|
626
632
|
// Check if middleware should be applied
|
|
627
633
|
const middlewareOptions = this.extractMiddlewareOptions(options);
|
|
634
|
+
logger.debug(`Middleware extraction result`, {
|
|
635
|
+
provider: this.providerName,
|
|
636
|
+
model: this.modelName,
|
|
637
|
+
middlewareOptions,
|
|
638
|
+
});
|
|
628
639
|
if (!middlewareOptions) {
|
|
629
640
|
return baseModel;
|
|
630
641
|
}
|
|
631
642
|
try {
|
|
643
|
+
logger.debug(`Applying middleware to ${this.providerName} model`, {
|
|
644
|
+
provider: this.providerName,
|
|
645
|
+
model: this.modelName,
|
|
646
|
+
middlewareOptions,
|
|
647
|
+
});
|
|
632
648
|
// Create a new factory instance with the specified options
|
|
633
649
|
const factory = new MiddlewareFactory(middlewareOptions);
|
|
634
650
|
// Create middleware context
|
|
@@ -623,12 +623,28 @@ export class BaseProvider {
|
|
|
623
623
|
async getAISDKModelWithMiddleware(options = {}) {
|
|
624
624
|
// Get the base model
|
|
625
625
|
const baseModel = await this.getAISDKModel();
|
|
626
|
+
logger.debug(`Retrieved base model for ${this.providerName}`, {
|
|
627
|
+
provider: this.providerName,
|
|
628
|
+
model: this.modelName,
|
|
629
|
+
hasMiddlewareConfig: !!this.middlewareOptions,
|
|
630
|
+
timestamp: Date.now(),
|
|
631
|
+
});
|
|
626
632
|
// Check if middleware should be applied
|
|
627
633
|
const middlewareOptions = this.extractMiddlewareOptions(options);
|
|
634
|
+
logger.debug(`Middleware extraction result`, {
|
|
635
|
+
provider: this.providerName,
|
|
636
|
+
model: this.modelName,
|
|
637
|
+
middlewareOptions,
|
|
638
|
+
});
|
|
628
639
|
if (!middlewareOptions) {
|
|
629
640
|
return baseModel;
|
|
630
641
|
}
|
|
631
642
|
try {
|
|
643
|
+
logger.debug(`Applying middleware to ${this.providerName} model`, {
|
|
644
|
+
provider: this.providerName,
|
|
645
|
+
model: this.modelName,
|
|
646
|
+
middlewareOptions,
|
|
647
|
+
});
|
|
632
648
|
// Create a new factory instance with the specified options
|
|
633
649
|
const factory = new MiddlewareFactory(middlewareOptions);
|
|
634
650
|
// Create middleware context
|
|
@@ -58,16 +58,22 @@ export function createGuardrailsMiddleware(config = {}) {
|
|
|
58
58
|
badWordsEnabled: !!config.badWords?.enabled,
|
|
59
59
|
});
|
|
60
60
|
const { stream, ...rest } = await doStream();
|
|
61
|
-
//
|
|
62
|
-
|
|
61
|
+
// Helper to escape regex special characters
|
|
62
|
+
function escapeRegExp(string) {
|
|
63
|
+
return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
64
|
+
}
|
|
63
65
|
const transformStream = new TransformStream({
|
|
64
66
|
transform(chunk, controller) {
|
|
65
67
|
let filteredChunk = chunk;
|
|
66
68
|
if (config.badWords?.enabled && config.badWords.list) {
|
|
67
69
|
for (const term of config.badWords.list) {
|
|
68
|
-
const regex = new RegExp(term, "gi");
|
|
69
|
-
if (typeof filteredChunk === "
|
|
70
|
-
|
|
70
|
+
const regex = new RegExp(escapeRegExp(term), "gi");
|
|
71
|
+
if (typeof filteredChunk === "object" &&
|
|
72
|
+
"textDelta" in filteredChunk) {
|
|
73
|
+
filteredChunk = {
|
|
74
|
+
...filteredChunk,
|
|
75
|
+
textDelta: filteredChunk.textDelta.replace(regex, "*".repeat(term.length)),
|
|
76
|
+
};
|
|
71
77
|
}
|
|
72
78
|
}
|
|
73
79
|
}
|
|
@@ -42,7 +42,9 @@ export class MiddlewareFactory {
|
|
|
42
42
|
config: { guardrails: { enabled: true } },
|
|
43
43
|
});
|
|
44
44
|
// Register custom middleware if provided
|
|
45
|
+
logger.debug("Initializing MiddlewareFactory", { options });
|
|
45
46
|
if (options.middleware) {
|
|
47
|
+
logger.debug(`Registering custom middleware`);
|
|
46
48
|
for (const customMiddleware of options.middleware) {
|
|
47
49
|
this.register(customMiddleware);
|
|
48
50
|
}
|
|
@@ -11,6 +11,7 @@ export class MiddlewareRegistry {
|
|
|
11
11
|
*/
|
|
12
12
|
register(middleware, options = {}) {
|
|
13
13
|
const { replace = false, defaultEnabled = false, globalConfig } = options;
|
|
14
|
+
logger.debug(`Registering middleware: ${middleware.metadata.id}`);
|
|
14
15
|
// Check if middleware already exists
|
|
15
16
|
if (this.middleware.has(middleware.metadata.id) && !replace) {
|
|
16
17
|
throw new Error(`Middleware with ID '${middleware.metadata.id}' already exists. Use replace: true to override.`);
|
|
@@ -69,8 +70,10 @@ export class MiddlewareRegistry {
|
|
|
69
70
|
buildChain(context, config = {}) {
|
|
70
71
|
const chain = [];
|
|
71
72
|
const sortedIds = this.getSortedIds();
|
|
73
|
+
logger.debug("Building middleware chain", { config, sortedIds });
|
|
72
74
|
for (const middlewareId of sortedIds) {
|
|
73
75
|
const middleware = this.middleware.get(middlewareId);
|
|
76
|
+
logger.debug(`Evaluating middleware: ${middlewareId}`, { middleware });
|
|
74
77
|
if (!middleware) {
|
|
75
78
|
continue;
|
|
76
79
|
}
|
|
@@ -94,8 +94,9 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
94
94
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
95
95
|
// Build message array from options
|
|
96
96
|
const messages = buildMessagesArray(options);
|
|
97
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
97
98
|
const result = await streamText({
|
|
98
|
-
model:
|
|
99
|
+
model: model,
|
|
99
100
|
messages: messages,
|
|
100
101
|
temperature: options.temperature,
|
|
101
102
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
@@ -58,9 +58,7 @@ export class AnthropicProviderV2 extends BaseProvider {
|
|
|
58
58
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
59
59
|
async executeStream(options, _analysisSchema) {
|
|
60
60
|
// Note: StreamOptions validation handled differently than TextGenerationOptions
|
|
61
|
-
const
|
|
62
|
-
const anthropicClient = createAnthropic({ apiKey });
|
|
63
|
-
const model = anthropicClient(this.modelName);
|
|
61
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
64
62
|
const timeout = this.getTimeout(options);
|
|
65
63
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
66
64
|
try {
|
|
@@ -111,8 +111,9 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
111
111
|
}
|
|
112
112
|
// Build message array from options
|
|
113
113
|
const messages = buildMessagesArray(options);
|
|
114
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
114
115
|
const stream = await streamText({
|
|
115
|
-
model
|
|
116
|
+
model,
|
|
116
117
|
messages: messages,
|
|
117
118
|
...(options.maxTokens !== null && options.maxTokens !== undefined
|
|
118
119
|
? { maxTokens: options.maxTokens }
|
|
@@ -83,8 +83,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
83
83
|
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
|
|
84
84
|
process.env.GOOGLE_GENERATIVE_AI_API_KEY = apiKey;
|
|
85
85
|
}
|
|
86
|
-
const
|
|
87
|
-
const model = google(this.modelName);
|
|
86
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
88
87
|
const timeout = this.getTimeout(options);
|
|
89
88
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
90
89
|
try {
|
|
@@ -597,7 +597,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
597
597
|
this.validateStreamOptionsOnly(options);
|
|
598
598
|
// Build message array from options
|
|
599
599
|
const messages = buildMessagesArray(options);
|
|
600
|
-
const model = await this.
|
|
600
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
601
601
|
// Get all available tools (direct + MCP + external) for streaming
|
|
602
602
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
603
603
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
@@ -122,8 +122,9 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
122
122
|
try {
|
|
123
123
|
// Build message array from options
|
|
124
124
|
const messages = buildMessagesArray(options);
|
|
125
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
125
126
|
const result = streamText({
|
|
126
|
-
model:
|
|
127
|
+
model: model,
|
|
127
128
|
messages: messages,
|
|
128
129
|
temperature: options.temperature,
|
|
129
130
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
@@ -50,8 +50,9 @@ export class MistralProvider extends BaseProvider {
|
|
|
50
50
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
51
51
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
52
52
|
const messages = buildMessagesArray(options);
|
|
53
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
53
54
|
const result = await streamText({
|
|
54
|
-
model
|
|
55
|
+
model,
|
|
55
56
|
messages: messages,
|
|
56
57
|
temperature: options.temperature,
|
|
57
58
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
@@ -263,8 +263,9 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
263
263
|
}
|
|
264
264
|
: "no-tools",
|
|
265
265
|
});
|
|
266
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
266
267
|
const result = await streamText({
|
|
267
|
-
model
|
|
268
|
+
model,
|
|
268
269
|
messages: messages,
|
|
269
270
|
temperature: options.temperature,
|
|
270
271
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
@@ -157,7 +157,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
|
|
|
157
157
|
const timeout = this.getTimeout(options);
|
|
158
158
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
159
159
|
try {
|
|
160
|
-
const model = await this.
|
|
160
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
161
161
|
const result = streamText({
|
|
162
162
|
model,
|
|
163
163
|
prompt: options.input.text,
|
package/dist/lib/types/index.js
CHANGED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink SDK Core Types
|
|
3
|
+
*
|
|
4
|
+
* This file exposes ALL essential types that external developers need
|
|
5
|
+
* when integrating with the NeuroLink SDK. Maximum type exposure for
|
|
6
|
+
* comprehensive TypeScript support across the NeuroLink ecosystem.
|
|
7
|
+
*/
|
|
8
|
+
export type { StreamResult, StreamingProgressData, StreamingMetadata, ProgressCallback, ToolCall as StreamToolCall, // Note: Renamed in main index to avoid conflict with tools.js ToolCall
|
|
9
|
+
ToolResult as StreamToolResult, // Note: Renamed in main index to avoid conflict with tools.js ToolResult
|
|
10
|
+
ToolCallResults, ToolCalls, StreamOptions, StreamingOptions, EnhancedStreamProvider, StreamTextResult, AISDKUsage, StreamAnalyticsCollector, ResponseMetadata, AudioInputSpec, AudioChunk, PCMEncoding, } from "./streamTypes.js";
|
|
11
|
+
export type { ToolExecutionEvent, ToolExecutionSummary, TypedEventEmitter, NeuroLinkEvents, StreamEvent, ToolExecutionContext, AsyncFunction, SyncFunction, AnyFunction, } from "./common.js";
|
|
12
|
+
export type { NeuroLinkConfig, ProviderConfig, PerformanceConfig, CacheConfig, FallbackConfig, RetryConfig, AnalyticsConfig, ToolConfig, BackupInfo, BackupMetadata, ConfigValidationResult, ConfigUpdateOptions, } from "./configTypes.js";
|
|
13
|
+
export type { ToolArgs, ToolContext, ToolResult, ToolDefinition, SimpleTool, AvailableTool, ToolExecution, BaseToolArgs, ToolExecutionMetadata, ToolParameterSchema, ZodUnknownSchema, ZodAnySchema, ZodObjectSchema, ZodStringSchema, } from "./tools.js";
|
|
14
|
+
export type { AISDKModel, ProviderError, AIModelProviderConfig, AIProviderName, ProviderName, ModelCapability, ModelUseCase, ModelFilter, ModelResolutionContext, ModelStats, ModelPricing, ProviderCapabilities, } from "./providers.js";
|
|
15
|
+
export type { GenerateOptions, GenerateResult, UnifiedGenerationOptions, EnhancedProvider, FactoryEnhancedProvider, TextGenerationOptions, TextGenerationResult, EnhancedGenerateResult, } from "./generateTypes.js";
|
|
16
|
+
export type { TokenUsage, AnalyticsData } from "./analytics.js";
|
|
17
|
+
export type { TextContent, ImageContent, Content, VisionCapability, ProviderImageFormat, ProcessedImage, MultimodalMessage, ProviderMultimodalPayload, } from "./content.js";
|
|
18
|
+
export type { MCPTransportType, MCPServerConnectionStatus, MCPServerCategory, MCPServerStatus, MCPDiscoveredServer, MCPConnectedServer, MCPToolInfo, MCPExecutableTool, MCPServerMetadata, MCPToolMetadata, MCPServerRegistryEntry, } from "./mcpTypes.js";
|
|
19
|
+
export type { ExternalMCPServerInstance, ExternalMCPServerStatus, ExternalMCPToolInfo, ExternalMCPServerHealth, ExternalMCPConfigValidation, ExternalMCPOperationResult, ExternalMCPToolContext, ExternalMCPToolResult, ExternalMCPServerEvents, ExternalMCPManagerConfig, } from "./externalMcp.js";
|
|
20
|
+
export type { BaseCommandArgs, GenerateCommandArgs, MCPCommandArgs, ModelsCommandArgs, CommandResult, GenerateResult as CLIGenerateResult, StreamChunk, } from "./cli.js";
|
|
21
|
+
export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonArray, ErrorInfo, Result, FunctionParameters, } from "./common.js";
|
|
22
|
+
export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
|
|
23
|
+
export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
|
|
24
|
+
export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
|
|
25
|
+
export type { ConversationMemoryConfig, SessionMemory, ConversationMemoryStats, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink SDK Core Types
|
|
3
|
+
*
|
|
4
|
+
* This file exposes ALL essential types that external developers need
|
|
5
|
+
* when integrating with the NeuroLink SDK. Maximum type exposure for
|
|
6
|
+
* comprehensive TypeScript support across the NeuroLink ecosystem.
|
|
7
|
+
*/
|
|
8
|
+
export {};
|
|
@@ -58,16 +58,22 @@ export function createGuardrailsMiddleware(config = {}) {
|
|
|
58
58
|
badWordsEnabled: !!config.badWords?.enabled,
|
|
59
59
|
});
|
|
60
60
|
const { stream, ...rest } = await doStream();
|
|
61
|
-
//
|
|
62
|
-
|
|
61
|
+
// Helper to escape regex special characters
|
|
62
|
+
function escapeRegExp(string) {
|
|
63
|
+
return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
|
|
64
|
+
}
|
|
63
65
|
const transformStream = new TransformStream({
|
|
64
66
|
transform(chunk, controller) {
|
|
65
67
|
let filteredChunk = chunk;
|
|
66
68
|
if (config.badWords?.enabled && config.badWords.list) {
|
|
67
69
|
for (const term of config.badWords.list) {
|
|
68
|
-
const regex = new RegExp(term, "gi");
|
|
69
|
-
if (typeof filteredChunk === "
|
|
70
|
-
|
|
70
|
+
const regex = new RegExp(escapeRegExp(term), "gi");
|
|
71
|
+
if (typeof filteredChunk === "object" &&
|
|
72
|
+
"textDelta" in filteredChunk) {
|
|
73
|
+
filteredChunk = {
|
|
74
|
+
...filteredChunk,
|
|
75
|
+
textDelta: filteredChunk.textDelta.replace(regex, "*".repeat(term.length)),
|
|
76
|
+
};
|
|
71
77
|
}
|
|
72
78
|
}
|
|
73
79
|
}
|
|
@@ -42,7 +42,9 @@ export class MiddlewareFactory {
|
|
|
42
42
|
config: { guardrails: { enabled: true } },
|
|
43
43
|
});
|
|
44
44
|
// Register custom middleware if provided
|
|
45
|
+
logger.debug("Initializing MiddlewareFactory", { options });
|
|
45
46
|
if (options.middleware) {
|
|
47
|
+
logger.debug(`Registering custom middleware`);
|
|
46
48
|
for (const customMiddleware of options.middleware) {
|
|
47
49
|
this.register(customMiddleware);
|
|
48
50
|
}
|
|
@@ -11,6 +11,7 @@ export class MiddlewareRegistry {
|
|
|
11
11
|
*/
|
|
12
12
|
register(middleware, options = {}) {
|
|
13
13
|
const { replace = false, defaultEnabled = false, globalConfig } = options;
|
|
14
|
+
logger.debug(`Registering middleware: ${middleware.metadata.id}`);
|
|
14
15
|
// Check if middleware already exists
|
|
15
16
|
if (this.middleware.has(middleware.metadata.id) && !replace) {
|
|
16
17
|
throw new Error(`Middleware with ID '${middleware.metadata.id}' already exists. Use replace: true to override.`);
|
|
@@ -69,8 +70,10 @@ export class MiddlewareRegistry {
|
|
|
69
70
|
buildChain(context, config = {}) {
|
|
70
71
|
const chain = [];
|
|
71
72
|
const sortedIds = this.getSortedIds();
|
|
73
|
+
logger.debug("Building middleware chain", { config, sortedIds });
|
|
72
74
|
for (const middlewareId of sortedIds) {
|
|
73
75
|
const middleware = this.middleware.get(middlewareId);
|
|
76
|
+
logger.debug(`Evaluating middleware: ${middlewareId}`, { middleware });
|
|
74
77
|
if (!middleware) {
|
|
75
78
|
continue;
|
|
76
79
|
}
|
|
@@ -94,8 +94,9 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
94
94
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
95
95
|
// Build message array from options
|
|
96
96
|
const messages = buildMessagesArray(options);
|
|
97
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
97
98
|
const result = await streamText({
|
|
98
|
-
model:
|
|
99
|
+
model: model,
|
|
99
100
|
messages: messages,
|
|
100
101
|
temperature: options.temperature,
|
|
101
102
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
@@ -58,9 +58,7 @@ export class AnthropicProviderV2 extends BaseProvider {
|
|
|
58
58
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
59
59
|
async executeStream(options, _analysisSchema) {
|
|
60
60
|
// Note: StreamOptions validation handled differently than TextGenerationOptions
|
|
61
|
-
const
|
|
62
|
-
const anthropicClient = createAnthropic({ apiKey });
|
|
63
|
-
const model = anthropicClient(this.modelName);
|
|
61
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
64
62
|
const timeout = this.getTimeout(options);
|
|
65
63
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
66
64
|
try {
|
|
@@ -111,8 +111,9 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
111
111
|
}
|
|
112
112
|
// Build message array from options
|
|
113
113
|
const messages = buildMessagesArray(options);
|
|
114
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
114
115
|
const stream = await streamText({
|
|
115
|
-
model
|
|
116
|
+
model,
|
|
116
117
|
messages: messages,
|
|
117
118
|
...(options.maxTokens !== null && options.maxTokens !== undefined
|
|
118
119
|
? { maxTokens: options.maxTokens }
|
|
@@ -83,8 +83,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
83
83
|
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
|
|
84
84
|
process.env.GOOGLE_GENERATIVE_AI_API_KEY = apiKey;
|
|
85
85
|
}
|
|
86
|
-
const
|
|
87
|
-
const model = google(this.modelName);
|
|
86
|
+
const model = await this.getAISDKModelWithMiddleware(options);
|
|
88
87
|
const timeout = this.getTimeout(options);
|
|
89
88
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
90
89
|
try {
|
|
@@ -597,7 +597,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
597
597
|
this.validateStreamOptionsOnly(options);
|
|
598
598
|
// Build message array from options
|
|
599
599
|
const messages = buildMessagesArray(options);
|
|
600
|
-
const model = await this.
|
|
600
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
601
601
|
// Get all available tools (direct + MCP + external) for streaming
|
|
602
602
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
603
603
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
@@ -122,8 +122,9 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
122
122
|
try {
|
|
123
123
|
// Build message array from options
|
|
124
124
|
const messages = buildMessagesArray(options);
|
|
125
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
125
126
|
const result = streamText({
|
|
126
|
-
model:
|
|
127
|
+
model: model,
|
|
127
128
|
messages: messages,
|
|
128
129
|
temperature: options.temperature,
|
|
129
130
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
@@ -50,8 +50,9 @@ export class MistralProvider extends BaseProvider {
|
|
|
50
50
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
51
51
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
52
52
|
const messages = buildMessagesArray(options);
|
|
53
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
53
54
|
const result = await streamText({
|
|
54
|
-
model
|
|
55
|
+
model,
|
|
55
56
|
messages: messages,
|
|
56
57
|
temperature: options.temperature,
|
|
57
58
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
package/dist/providers/openAI.js
CHANGED
|
@@ -263,8 +263,9 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
263
263
|
}
|
|
264
264
|
: "no-tools",
|
|
265
265
|
});
|
|
266
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
266
267
|
const result = await streamText({
|
|
267
|
-
model
|
|
268
|
+
model,
|
|
268
269
|
messages: messages,
|
|
269
270
|
temperature: options.temperature,
|
|
270
271
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
@@ -157,7 +157,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
|
|
|
157
157
|
const timeout = this.getTimeout(options);
|
|
158
158
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
159
159
|
try {
|
|
160
|
-
const model = await this.
|
|
160
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
161
161
|
const result = streamText({
|
|
162
162
|
model,
|
|
163
163
|
prompt: options.input.text,
|
package/dist/types/index.d.ts
CHANGED
package/dist/types/index.js
CHANGED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink SDK Core Types
|
|
3
|
+
*
|
|
4
|
+
* This file exposes ALL essential types that external developers need
|
|
5
|
+
* when integrating with the NeuroLink SDK. Maximum type exposure for
|
|
6
|
+
* comprehensive TypeScript support across the NeuroLink ecosystem.
|
|
7
|
+
*/
|
|
8
|
+
export type { StreamResult, StreamingProgressData, StreamingMetadata, ProgressCallback, ToolCall as StreamToolCall, // Note: Renamed in main index to avoid conflict with tools.js ToolCall
|
|
9
|
+
ToolResult as StreamToolResult, // Note: Renamed in main index to avoid conflict with tools.js ToolResult
|
|
10
|
+
ToolCallResults, ToolCalls, StreamOptions, StreamingOptions, EnhancedStreamProvider, StreamTextResult, AISDKUsage, StreamAnalyticsCollector, ResponseMetadata, AudioInputSpec, AudioChunk, PCMEncoding, } from "./streamTypes.js";
|
|
11
|
+
export type { ToolExecutionEvent, ToolExecutionSummary, TypedEventEmitter, NeuroLinkEvents, StreamEvent, ToolExecutionContext, AsyncFunction, SyncFunction, AnyFunction, } from "./common.js";
|
|
12
|
+
export type { NeuroLinkConfig, ProviderConfig, PerformanceConfig, CacheConfig, FallbackConfig, RetryConfig, AnalyticsConfig, ToolConfig, BackupInfo, BackupMetadata, ConfigValidationResult, ConfigUpdateOptions, } from "./configTypes.js";
|
|
13
|
+
export type { ToolArgs, ToolContext, ToolResult, ToolDefinition, SimpleTool, AvailableTool, ToolExecution, BaseToolArgs, ToolExecutionMetadata, ToolParameterSchema, ZodUnknownSchema, ZodAnySchema, ZodObjectSchema, ZodStringSchema, } from "./tools.js";
|
|
14
|
+
export type { AISDKModel, ProviderError, AIModelProviderConfig, AIProviderName, ProviderName, ModelCapability, ModelUseCase, ModelFilter, ModelResolutionContext, ModelStats, ModelPricing, ProviderCapabilities, } from "./providers.js";
|
|
15
|
+
export type { GenerateOptions, GenerateResult, UnifiedGenerationOptions, EnhancedProvider, FactoryEnhancedProvider, TextGenerationOptions, TextGenerationResult, EnhancedGenerateResult, } from "./generateTypes.js";
|
|
16
|
+
export type { TokenUsage, AnalyticsData } from "./analytics.js";
|
|
17
|
+
export type { TextContent, ImageContent, Content, VisionCapability, ProviderImageFormat, ProcessedImage, MultimodalMessage, ProviderMultimodalPayload, } from "./content.js";
|
|
18
|
+
export type { MCPTransportType, MCPServerConnectionStatus, MCPServerCategory, MCPServerStatus, MCPDiscoveredServer, MCPConnectedServer, MCPToolInfo, MCPExecutableTool, MCPServerMetadata, MCPToolMetadata, MCPServerRegistryEntry, } from "./mcpTypes.js";
|
|
19
|
+
export type { ExternalMCPServerInstance, ExternalMCPServerStatus, ExternalMCPToolInfo, ExternalMCPServerHealth, ExternalMCPConfigValidation, ExternalMCPOperationResult, ExternalMCPToolContext, ExternalMCPToolResult, ExternalMCPServerEvents, ExternalMCPManagerConfig, } from "./externalMcp.js";
|
|
20
|
+
export type { BaseCommandArgs, GenerateCommandArgs, MCPCommandArgs, ModelsCommandArgs, CommandResult, GenerateResult as CLIGenerateResult, StreamChunk, } from "./cli.js";
|
|
21
|
+
export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonArray, ErrorInfo, Result, FunctionParameters, } from "./common.js";
|
|
22
|
+
export type { EvaluationData, EvaluationContext, EnhancedEvaluationResult, EvaluationRequest, EvaluationCriteria, } from "./evaluation.js";
|
|
23
|
+
export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
|
|
24
|
+
export type { DomainType, DomainConfig, DomainTemplate, DomainConfigOptions, DomainEvaluationCriteria, DomainValidationRule, } from "./domainTypes.js";
|
|
25
|
+
export type { ConversationMemoryConfig, SessionMemory, ConversationMemoryStats, ChatMessage, MessageContent, MultimodalChatMessage, ConversationMemoryEvents, ConversationMemoryError, SessionIdentifier, SessionMetadata, RedisConversationObject, RedisStorageConfig, } from "./conversation.js";
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink SDK Core Types
|
|
3
|
+
*
|
|
4
|
+
* This file exposes ALL essential types that external developers need
|
|
5
|
+
* when integrating with the NeuroLink SDK. Maximum type exposure for
|
|
6
|
+
* comprehensive TypeScript support across the NeuroLink ecosystem.
|
|
7
|
+
*/
|
|
8
|
+
export {};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "7.
|
|
3
|
+
"version": "7.42.0",
|
|
4
4
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|
|
@@ -129,6 +129,11 @@
|
|
|
129
129
|
"import": "./dist/index.js",
|
|
130
130
|
"default": "./dist/index.js"
|
|
131
131
|
},
|
|
132
|
+
"./types": {
|
|
133
|
+
"types": "./dist/types/sdkTypes.d.ts",
|
|
134
|
+
"import": "./dist/types/sdkTypes.js",
|
|
135
|
+
"default": "./dist/types/sdkTypes.js"
|
|
136
|
+
},
|
|
132
137
|
"./package.json": "./package.json",
|
|
133
138
|
"./cli": {
|
|
134
139
|
"types": "./dist/cli/index.d.ts",
|