@juspay/neurolink 7.41.4 → 7.43.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/cli/factories/commandFactory.js +42 -17
  3. package/dist/core/baseProvider.js +16 -0
  4. package/dist/lib/core/baseProvider.js +16 -0
  5. package/dist/lib/middleware/builtin/guardrails.js +11 -5
  6. package/dist/lib/middleware/factory.js +2 -0
  7. package/dist/lib/middleware/registry.js +3 -0
  8. package/dist/lib/providers/anthropic.js +2 -1
  9. package/dist/lib/providers/anthropicBaseProvider.js +1 -3
  10. package/dist/lib/providers/azureOpenai.js +2 -1
  11. package/dist/lib/providers/googleAiStudio.js +1 -2
  12. package/dist/lib/providers/googleVertex.js +1 -1
  13. package/dist/lib/providers/litellm.js +2 -1
  14. package/dist/lib/providers/mistral.js +2 -1
  15. package/dist/lib/providers/openAI.js +2 -1
  16. package/dist/lib/providers/openaiCompatible.js +1 -1
  17. package/dist/lib/utils/conversationMemoryUtils.d.ts +4 -0
  18. package/dist/lib/utils/conversationMemoryUtils.js +46 -0
  19. package/dist/middleware/builtin/guardrails.js +11 -5
  20. package/dist/middleware/factory.js +2 -0
  21. package/dist/middleware/registry.js +3 -0
  22. package/dist/providers/anthropic.js +2 -1
  23. package/dist/providers/anthropicBaseProvider.js +1 -3
  24. package/dist/providers/azureOpenai.js +2 -1
  25. package/dist/providers/googleAiStudio.js +1 -2
  26. package/dist/providers/googleVertex.js +1 -1
  27. package/dist/providers/litellm.js +2 -1
  28. package/dist/providers/mistral.js +2 -1
  29. package/dist/providers/openAI.js +2 -1
  30. package/dist/providers/openaiCompatible.js +1 -1
  31. package/dist/utils/conversationMemoryUtils.d.ts +4 -0
  32. package/dist/utils/conversationMemoryUtils.js +46 -0
  33. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [7.43.0](https://github.com/juspay/neurolink/compare/v7.42.0...v7.43.0) (2025-09-23)
2
+
3
+ ### Features
4
+
5
+ - **(cli):** auto-detect and enable redis support in loop conversation memory ([b7b5514](https://github.com/juspay/neurolink/commit/b7b55149eb49a9f0ffa2a257c96d869b0da59eeb))
6
+
7
+ ## [7.42.0](https://github.com/juspay/neurolink/compare/v7.41.4...v7.42.0) (2025-09-23)
8
+
9
+ ### Features
10
+
11
+ - **(middleware):** robust bad word filtering in guardrails and correct stream usage ([d396797](https://github.com/juspay/neurolink/commit/d396797640832a373b386a7c550ec406e129d2d2))
12
+
1
13
  ## [7.41.4](https://github.com/juspay/neurolink/compare/v7.41.3...v7.41.4) (2025-09-21)
2
14
 
3
15
  ### Bug Fixes
@@ -15,6 +15,7 @@ import chalk from "chalk";
15
15
  import { logger } from "../../lib/utils/logger.js";
16
16
  import fs from "fs";
17
17
  import { handleSetup } from "../commands/setup.js";
18
+ import { checkRedisAvailability } from "../../lib/utils/conversationMemoryUtils.js";
18
19
  /**
19
20
  * CLI Command Factory for generate commands
20
21
  */
@@ -151,7 +152,7 @@ export class CLICommandFactory {
151
152
  quiet: {
152
153
  type: "boolean",
153
154
  alias: "q",
154
- default: false,
155
+ default: true,
155
156
  description: "Suppress non-essential output",
156
157
  },
157
158
  noColor: {
@@ -674,22 +675,29 @@ export class CLICommandFactory {
674
675
  return {
675
676
  command: "loop",
676
677
  describe: "Start an interactive loop session",
677
- builder: (yargs) => yargs
678
- .option("enable-conversation-memory", {
679
- type: "boolean",
680
- description: "Enable conversation memory for the loop session",
681
- default: false,
682
- })
683
- .option("max-sessions", {
684
- type: "number",
685
- description: "Maximum number of conversation sessions to keep",
686
- default: 50,
687
- })
688
- .option("max-turns-per-session", {
689
- type: "number",
690
- description: "Maximum turns per conversation session",
691
- default: 20,
678
+ builder: (yargs) => this.buildOptions(yargs, {
679
+ "enable-conversation-memory": {
680
+ type: "boolean",
681
+ description: "Enable conversation memory for the loop session",
682
+ default: true,
683
+ },
684
+ "max-sessions": {
685
+ type: "number",
686
+ description: "Maximum number of conversation sessions to keep",
687
+ default: 50,
688
+ },
689
+ "max-turns-per-session": {
690
+ type: "number",
691
+ description: "Maximum turns per conversation session",
692
+ default: 20,
693
+ },
694
+ "auto-redis": {
695
+ type: "boolean",
696
+ description: "Automatically use Redis if available",
697
+ default: true,
698
+ },
692
699
  })
700
+ .example("$0 loop --no-auto-redis", "Start loop with memory storage only")
693
701
  .example("$0 loop", "Start interactive session")
694
702
  .example("$0 loop --enable-conversation-memory", "Start loop with memory"),
695
703
  handler: async (argv) => {
@@ -698,8 +706,25 @@ export class CLICommandFactory {
698
706
  return;
699
707
  }
700
708
  let conversationMemoryConfig;
701
- const { enableConversationMemory, maxSessions, maxTurnsPerSession } = argv;
709
+ const { enableConversationMemory, maxSessions, maxTurnsPerSession, autoRedis, } = argv;
702
710
  if (enableConversationMemory) {
711
+ let storageType = "memory";
712
+ if (autoRedis) {
713
+ const isRedisAvailable = await checkRedisAvailability();
714
+ if (isRedisAvailable) {
715
+ storageType = "redis";
716
+ if (!argv.quiet) {
717
+ logger.always(chalk.green("✅ Using Redis for persistent conversation memory"));
718
+ }
719
+ }
720
+ else if (argv.debug) {
721
+ logger.debug("Redis not available, using in-memory storage");
722
+ }
723
+ }
724
+ else if (argv.debug) {
725
+ logger.debug("Auto-Redis disabled, using in-memory storage");
726
+ }
727
+ process.env.STORAGE_TYPE = storageType;
703
728
  conversationMemoryConfig = {
704
729
  enabled: true,
705
730
  maxSessions: maxSessions,
@@ -623,12 +623,28 @@ export class BaseProvider {
623
623
  async getAISDKModelWithMiddleware(options = {}) {
624
624
  // Get the base model
625
625
  const baseModel = await this.getAISDKModel();
626
+ logger.debug(`Retrieved base model for ${this.providerName}`, {
627
+ provider: this.providerName,
628
+ model: this.modelName,
629
+ hasMiddlewareConfig: !!this.middlewareOptions,
630
+ timestamp: Date.now(),
631
+ });
626
632
  // Check if middleware should be applied
627
633
  const middlewareOptions = this.extractMiddlewareOptions(options);
634
+ logger.debug(`Middleware extraction result`, {
635
+ provider: this.providerName,
636
+ model: this.modelName,
637
+ middlewareOptions,
638
+ });
628
639
  if (!middlewareOptions) {
629
640
  return baseModel;
630
641
  }
631
642
  try {
643
+ logger.debug(`Applying middleware to ${this.providerName} model`, {
644
+ provider: this.providerName,
645
+ model: this.modelName,
646
+ middlewareOptions,
647
+ });
632
648
  // Create a new factory instance with the specified options
633
649
  const factory = new MiddlewareFactory(middlewareOptions);
634
650
  // Create middleware context
@@ -623,12 +623,28 @@ export class BaseProvider {
623
623
  async getAISDKModelWithMiddleware(options = {}) {
624
624
  // Get the base model
625
625
  const baseModel = await this.getAISDKModel();
626
+ logger.debug(`Retrieved base model for ${this.providerName}`, {
627
+ provider: this.providerName,
628
+ model: this.modelName,
629
+ hasMiddlewareConfig: !!this.middlewareOptions,
630
+ timestamp: Date.now(),
631
+ });
626
632
  // Check if middleware should be applied
627
633
  const middlewareOptions = this.extractMiddlewareOptions(options);
634
+ logger.debug(`Middleware extraction result`, {
635
+ provider: this.providerName,
636
+ model: this.modelName,
637
+ middlewareOptions,
638
+ });
628
639
  if (!middlewareOptions) {
629
640
  return baseModel;
630
641
  }
631
642
  try {
643
+ logger.debug(`Applying middleware to ${this.providerName} model`, {
644
+ provider: this.providerName,
645
+ model: this.modelName,
646
+ middlewareOptions,
647
+ });
632
648
  // Create a new factory instance with the specified options
633
649
  const factory = new MiddlewareFactory(middlewareOptions);
634
650
  // Create middleware context
@@ -58,16 +58,22 @@ export function createGuardrailsMiddleware(config = {}) {
58
58
  badWordsEnabled: !!config.badWords?.enabled,
59
59
  });
60
60
  const { stream, ...rest } = await doStream();
61
- // Note: Model-based filtering is not applied to streams in this version
62
- // as it requires the full text for analysis.
61
+ // Helper to escape regex special characters
62
+ function escapeRegExp(string) {
63
+ return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
64
+ }
63
65
  const transformStream = new TransformStream({
64
66
  transform(chunk, controller) {
65
67
  let filteredChunk = chunk;
66
68
  if (config.badWords?.enabled && config.badWords.list) {
67
69
  for (const term of config.badWords.list) {
68
- const regex = new RegExp(term, "gi");
69
- if (typeof filteredChunk === "string") {
70
- filteredChunk = filteredChunk.replace(regex, "*".repeat(term.length));
70
+ const regex = new RegExp(escapeRegExp(term), "gi");
71
+ if (typeof filteredChunk === "object" &&
72
+ "textDelta" in filteredChunk) {
73
+ filteredChunk = {
74
+ ...filteredChunk,
75
+ textDelta: filteredChunk.textDelta.replace(regex, "*".repeat(term.length)),
76
+ };
71
77
  }
72
78
  }
73
79
  }
@@ -42,7 +42,9 @@ export class MiddlewareFactory {
42
42
  config: { guardrails: { enabled: true } },
43
43
  });
44
44
  // Register custom middleware if provided
45
+ logger.debug("Initializing MiddlewareFactory", { options });
45
46
  if (options.middleware) {
47
+ logger.debug(`Registering custom middleware`);
46
48
  for (const customMiddleware of options.middleware) {
47
49
  this.register(customMiddleware);
48
50
  }
@@ -11,6 +11,7 @@ export class MiddlewareRegistry {
11
11
  */
12
12
  register(middleware, options = {}) {
13
13
  const { replace = false, defaultEnabled = false, globalConfig } = options;
14
+ logger.debug(`Registering middleware: ${middleware.metadata.id}`);
14
15
  // Check if middleware already exists
15
16
  if (this.middleware.has(middleware.metadata.id) && !replace) {
16
17
  throw new Error(`Middleware with ID '${middleware.metadata.id}' already exists. Use replace: true to override.`);
@@ -69,8 +70,10 @@ export class MiddlewareRegistry {
69
70
  buildChain(context, config = {}) {
70
71
  const chain = [];
71
72
  const sortedIds = this.getSortedIds();
73
+ logger.debug("Building middleware chain", { config, sortedIds });
72
74
  for (const middlewareId of sortedIds) {
73
75
  const middleware = this.middleware.get(middlewareId);
76
+ logger.debug(`Evaluating middleware: ${middlewareId}`, { middleware });
74
77
  if (!middleware) {
75
78
  continue;
76
79
  }
@@ -94,8 +94,9 @@ export class AnthropicProvider extends BaseProvider {
94
94
  const tools = shouldUseTools ? await this.getAllTools() : {};
95
95
  // Build message array from options
96
96
  const messages = buildMessagesArray(options);
97
+ const model = await this.getAISDKModelWithMiddleware(options);
97
98
  const result = await streamText({
98
- model: this.model,
99
+ model: model,
99
100
  messages: messages,
100
101
  temperature: options.temperature,
101
102
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -58,9 +58,7 @@ export class AnthropicProviderV2 extends BaseProvider {
58
58
  // executeGenerate removed - BaseProvider handles all generation with tools
59
59
  async executeStream(options, _analysisSchema) {
60
60
  // Note: StreamOptions validation handled differently than TextGenerationOptions
61
- const apiKey = this.getApiKey();
62
- const anthropicClient = createAnthropic({ apiKey });
63
- const model = anthropicClient(this.modelName);
61
+ const model = await this.getAISDKModelWithMiddleware(options);
64
62
  const timeout = this.getTimeout(options);
65
63
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
66
64
  try {
@@ -111,8 +111,9 @@ export class AzureOpenAIProvider extends BaseProvider {
111
111
  }
112
112
  // Build message array from options
113
113
  const messages = buildMessagesArray(options);
114
+ const model = await this.getAISDKModelWithMiddleware(options);
114
115
  const stream = await streamText({
115
- model: this.azureProvider(this.deployment),
116
+ model,
116
117
  messages: messages,
117
118
  ...(options.maxTokens !== null && options.maxTokens !== undefined
118
119
  ? { maxTokens: options.maxTokens }
@@ -83,8 +83,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
83
83
  if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
84
84
  process.env.GOOGLE_GENERATIVE_AI_API_KEY = apiKey;
85
85
  }
86
- const google = createGoogleGenerativeAI({ apiKey });
87
- const model = google(this.modelName);
86
+ const model = await this.getAISDKModelWithMiddleware(options);
88
87
  const timeout = this.getTimeout(options);
89
88
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
90
89
  try {
@@ -597,7 +597,7 @@ export class GoogleVertexProvider extends BaseProvider {
597
597
  this.validateStreamOptionsOnly(options);
598
598
  // Build message array from options
599
599
  const messages = buildMessagesArray(options);
600
- const model = await this.getModel(); // This is where network connection happens!
600
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
601
601
  // Get all available tools (direct + MCP + external) for streaming
602
602
  const shouldUseTools = !options.disableTools && this.supportsTools();
603
603
  const tools = shouldUseTools ? await this.getAllTools() : {};
@@ -122,8 +122,9 @@ export class LiteLLMProvider extends BaseProvider {
122
122
  try {
123
123
  // Build message array from options
124
124
  const messages = buildMessagesArray(options);
125
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
125
126
  const result = streamText({
126
- model: this.model,
127
+ model: model,
127
128
  messages: messages,
128
129
  temperature: options.temperature,
129
130
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -50,8 +50,9 @@ export class MistralProvider extends BaseProvider {
50
50
  const shouldUseTools = !options.disableTools && this.supportsTools();
51
51
  const tools = shouldUseTools ? await this.getAllTools() : {};
52
52
  const messages = buildMessagesArray(options);
53
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
53
54
  const result = await streamText({
54
- model: this.model,
55
+ model,
55
56
  messages: messages,
56
57
  temperature: options.temperature,
57
58
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -263,8 +263,9 @@ export class OpenAIProvider extends BaseProvider {
263
263
  }
264
264
  : "no-tools",
265
265
  });
266
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
266
267
  const result = await streamText({
267
- model: this.model,
268
+ model,
268
269
  messages: messages,
269
270
  temperature: options.temperature,
270
271
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -157,7 +157,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
157
157
  const timeout = this.getTimeout(options);
158
158
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
159
159
  try {
160
- const model = await this.getAISDKModel();
160
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
161
161
  const result = streamText({
162
162
  model,
163
163
  prompt: options.input.text,
@@ -19,3 +19,7 @@ export declare function getConversationMessages(conversationMemory: Conversation
19
19
  * Saves user messages and AI responses for conversation memory
20
20
  */
21
21
  export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult): Promise<void>;
22
+ /**
23
+ * Check if Redis is available for conversation memory
24
+ */
25
+ export declare function checkRedisAvailability(): Promise<boolean>;
@@ -4,6 +4,7 @@
4
4
  */
5
5
  import { getConversationMemoryDefaults } from "../config/conversationMemory.js";
6
6
  import { logger } from "./logger.js";
7
+ import { createRedisClient, getNormalizedConfig } from "./redis.js";
7
8
  /**
8
9
  * Apply conversation memory defaults to user configuration
9
10
  * Merges user config with environment variables and default values
@@ -74,3 +75,48 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
74
75
  });
75
76
  }
76
77
  }
78
+ /**
79
+ * Check if Redis is available for conversation memory
80
+ */
81
+ export async function checkRedisAvailability() {
82
+ let testClient = null;
83
+ try {
84
+ const testConfig = getNormalizedConfig({
85
+ host: process.env.REDIS_HOST,
86
+ port: process.env.REDIS_PORT ? Number(process.env.REDIS_PORT) : undefined,
87
+ password: process.env.REDIS_PASSWORD,
88
+ db: process.env.REDIS_DB ? Number(process.env.REDIS_DB) : undefined,
89
+ keyPrefix: process.env.REDIS_KEY_PREFIX,
90
+ ttl: process.env.REDIS_TTL ? Number(process.env.REDIS_TTL) : undefined,
91
+ connectionOptions: {
92
+ connectTimeout: 5000,
93
+ maxRetriesPerRequest: 1,
94
+ retryDelayOnFailover: 100,
95
+ },
96
+ });
97
+ // Test Redis connection
98
+ testClient = await createRedisClient(testConfig);
99
+ await testClient.ping();
100
+ logger.debug("Redis connection test successful");
101
+ return true;
102
+ }
103
+ catch (error) {
104
+ logger.debug("Redis connection test failed", {
105
+ error: error instanceof Error ? error.message : String(error),
106
+ });
107
+ return false;
108
+ }
109
+ finally {
110
+ if (testClient) {
111
+ try {
112
+ await testClient.quit();
113
+ logger.debug("Redis test client disconnected successfully");
114
+ }
115
+ catch (quitError) {
116
+ logger.debug("Error during Redis test client disconnect", {
117
+ error: quitError instanceof Error ? quitError.message : String(quitError),
118
+ });
119
+ }
120
+ }
121
+ }
122
+ }
@@ -58,16 +58,22 @@ export function createGuardrailsMiddleware(config = {}) {
58
58
  badWordsEnabled: !!config.badWords?.enabled,
59
59
  });
60
60
  const { stream, ...rest } = await doStream();
61
- // Note: Model-based filtering is not applied to streams in this version
62
- // as it requires the full text for analysis.
61
+ // Helper to escape regex special characters
62
+ function escapeRegExp(string) {
63
+ return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
64
+ }
63
65
  const transformStream = new TransformStream({
64
66
  transform(chunk, controller) {
65
67
  let filteredChunk = chunk;
66
68
  if (config.badWords?.enabled && config.badWords.list) {
67
69
  for (const term of config.badWords.list) {
68
- const regex = new RegExp(term, "gi");
69
- if (typeof filteredChunk === "string") {
70
- filteredChunk = filteredChunk.replace(regex, "*".repeat(term.length));
70
+ const regex = new RegExp(escapeRegExp(term), "gi");
71
+ if (typeof filteredChunk === "object" &&
72
+ "textDelta" in filteredChunk) {
73
+ filteredChunk = {
74
+ ...filteredChunk,
75
+ textDelta: filteredChunk.textDelta.replace(regex, "*".repeat(term.length)),
76
+ };
71
77
  }
72
78
  }
73
79
  }
@@ -42,7 +42,9 @@ export class MiddlewareFactory {
42
42
  config: { guardrails: { enabled: true } },
43
43
  });
44
44
  // Register custom middleware if provided
45
+ logger.debug("Initializing MiddlewareFactory", { options });
45
46
  if (options.middleware) {
47
+ logger.debug(`Registering custom middleware`);
46
48
  for (const customMiddleware of options.middleware) {
47
49
  this.register(customMiddleware);
48
50
  }
@@ -11,6 +11,7 @@ export class MiddlewareRegistry {
11
11
  */
12
12
  register(middleware, options = {}) {
13
13
  const { replace = false, defaultEnabled = false, globalConfig } = options;
14
+ logger.debug(`Registering middleware: ${middleware.metadata.id}`);
14
15
  // Check if middleware already exists
15
16
  if (this.middleware.has(middleware.metadata.id) && !replace) {
16
17
  throw new Error(`Middleware with ID '${middleware.metadata.id}' already exists. Use replace: true to override.`);
@@ -69,8 +70,10 @@ export class MiddlewareRegistry {
69
70
  buildChain(context, config = {}) {
70
71
  const chain = [];
71
72
  const sortedIds = this.getSortedIds();
73
+ logger.debug("Building middleware chain", { config, sortedIds });
72
74
  for (const middlewareId of sortedIds) {
73
75
  const middleware = this.middleware.get(middlewareId);
76
+ logger.debug(`Evaluating middleware: ${middlewareId}`, { middleware });
74
77
  if (!middleware) {
75
78
  continue;
76
79
  }
@@ -94,8 +94,9 @@ export class AnthropicProvider extends BaseProvider {
94
94
  const tools = shouldUseTools ? await this.getAllTools() : {};
95
95
  // Build message array from options
96
96
  const messages = buildMessagesArray(options);
97
+ const model = await this.getAISDKModelWithMiddleware(options);
97
98
  const result = await streamText({
98
- model: this.model,
99
+ model: model,
99
100
  messages: messages,
100
101
  temperature: options.temperature,
101
102
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -58,9 +58,7 @@ export class AnthropicProviderV2 extends BaseProvider {
58
58
  // executeGenerate removed - BaseProvider handles all generation with tools
59
59
  async executeStream(options, _analysisSchema) {
60
60
  // Note: StreamOptions validation handled differently than TextGenerationOptions
61
- const apiKey = this.getApiKey();
62
- const anthropicClient = createAnthropic({ apiKey });
63
- const model = anthropicClient(this.modelName);
61
+ const model = await this.getAISDKModelWithMiddleware(options);
64
62
  const timeout = this.getTimeout(options);
65
63
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
66
64
  try {
@@ -111,8 +111,9 @@ export class AzureOpenAIProvider extends BaseProvider {
111
111
  }
112
112
  // Build message array from options
113
113
  const messages = buildMessagesArray(options);
114
+ const model = await this.getAISDKModelWithMiddleware(options);
114
115
  const stream = await streamText({
115
- model: this.azureProvider(this.deployment),
116
+ model,
116
117
  messages: messages,
117
118
  ...(options.maxTokens !== null && options.maxTokens !== undefined
118
119
  ? { maxTokens: options.maxTokens }
@@ -83,8 +83,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
83
83
  if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY) {
84
84
  process.env.GOOGLE_GENERATIVE_AI_API_KEY = apiKey;
85
85
  }
86
- const google = createGoogleGenerativeAI({ apiKey });
87
- const model = google(this.modelName);
86
+ const model = await this.getAISDKModelWithMiddleware(options);
88
87
  const timeout = this.getTimeout(options);
89
88
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
90
89
  try {
@@ -597,7 +597,7 @@ export class GoogleVertexProvider extends BaseProvider {
597
597
  this.validateStreamOptionsOnly(options);
598
598
  // Build message array from options
599
599
  const messages = buildMessagesArray(options);
600
- const model = await this.getModel(); // This is where network connection happens!
600
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
601
601
  // Get all available tools (direct + MCP + external) for streaming
602
602
  const shouldUseTools = !options.disableTools && this.supportsTools();
603
603
  const tools = shouldUseTools ? await this.getAllTools() : {};
@@ -122,8 +122,9 @@ export class LiteLLMProvider extends BaseProvider {
122
122
  try {
123
123
  // Build message array from options
124
124
  const messages = buildMessagesArray(options);
125
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
125
126
  const result = streamText({
126
- model: this.model,
127
+ model: model,
127
128
  messages: messages,
128
129
  temperature: options.temperature,
129
130
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -50,8 +50,9 @@ export class MistralProvider extends BaseProvider {
50
50
  const shouldUseTools = !options.disableTools && this.supportsTools();
51
51
  const tools = shouldUseTools ? await this.getAllTools() : {};
52
52
  const messages = buildMessagesArray(options);
53
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
53
54
  const result = await streamText({
54
- model: this.model,
55
+ model,
55
56
  messages: messages,
56
57
  temperature: options.temperature,
57
58
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -263,8 +263,9 @@ export class OpenAIProvider extends BaseProvider {
263
263
  }
264
264
  : "no-tools",
265
265
  });
266
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
266
267
  const result = await streamText({
267
- model: this.model,
268
+ model,
268
269
  messages: messages,
269
270
  temperature: options.temperature,
270
271
  maxTokens: options.maxTokens, // No default limit - unlimited unless specified
@@ -157,7 +157,7 @@ export class OpenAICompatibleProvider extends BaseProvider {
157
157
  const timeout = this.getTimeout(options);
158
158
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
159
159
  try {
160
- const model = await this.getAISDKModel();
160
+ const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
161
161
  const result = streamText({
162
162
  model,
163
163
  prompt: options.input.text,
@@ -19,3 +19,7 @@ export declare function getConversationMessages(conversationMemory: Conversation
19
19
  * Saves user messages and AI responses for conversation memory
20
20
  */
21
21
  export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult): Promise<void>;
22
+ /**
23
+ * Check if Redis is available for conversation memory
24
+ */
25
+ export declare function checkRedisAvailability(): Promise<boolean>;
@@ -4,6 +4,7 @@
4
4
  */
5
5
  import { getConversationMemoryDefaults } from "../config/conversationMemory.js";
6
6
  import { logger } from "./logger.js";
7
+ import { createRedisClient, getNormalizedConfig } from "./redis.js";
7
8
  /**
8
9
  * Apply conversation memory defaults to user configuration
9
10
  * Merges user config with environment variables and default values
@@ -74,3 +75,48 @@ export async function storeConversationTurn(conversationMemory, originalOptions,
74
75
  });
75
76
  }
76
77
  }
78
+ /**
79
+ * Check if Redis is available for conversation memory
80
+ */
81
+ export async function checkRedisAvailability() {
82
+ let testClient = null;
83
+ try {
84
+ const testConfig = getNormalizedConfig({
85
+ host: process.env.REDIS_HOST,
86
+ port: process.env.REDIS_PORT ? Number(process.env.REDIS_PORT) : undefined,
87
+ password: process.env.REDIS_PASSWORD,
88
+ db: process.env.REDIS_DB ? Number(process.env.REDIS_DB) : undefined,
89
+ keyPrefix: process.env.REDIS_KEY_PREFIX,
90
+ ttl: process.env.REDIS_TTL ? Number(process.env.REDIS_TTL) : undefined,
91
+ connectionOptions: {
92
+ connectTimeout: 5000,
93
+ maxRetriesPerRequest: 1,
94
+ retryDelayOnFailover: 100,
95
+ },
96
+ });
97
+ // Test Redis connection
98
+ testClient = await createRedisClient(testConfig);
99
+ await testClient.ping();
100
+ logger.debug("Redis connection test successful");
101
+ return true;
102
+ }
103
+ catch (error) {
104
+ logger.debug("Redis connection test failed", {
105
+ error: error instanceof Error ? error.message : String(error),
106
+ });
107
+ return false;
108
+ }
109
+ finally {
110
+ if (testClient) {
111
+ try {
112
+ await testClient.quit();
113
+ logger.debug("Redis test client disconnected successfully");
114
+ }
115
+ catch (quitError) {
116
+ logger.debug("Error during Redis test client disconnect", {
117
+ error: quitError instanceof Error ? quitError.message : String(quitError),
118
+ });
119
+ }
120
+ }
121
+ }
122
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.41.4",
3
+ "version": "7.43.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",