@juspay/neurolink 7.21.0 → 7.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [7.23.0](https://github.com/juspay/neurolink/compare/v7.22.0...v7.23.0) (2025-08-19)
2
+
3
+ ### Features
4
+
5
+ - **(docs):** modernize api examples ([c77706b](https://github.com/juspay/neurolink/commit/c77706b427c2ea781269c6d0c2dc7ca2511128cb))
6
+
7
+ ## [7.22.0](https://github.com/juspay/neurolink/compare/v7.21.0...v7.22.0) (2025-08-19)
8
+
9
+ ### Features
10
+
11
+ - **(memory):** Add conversation memory test suite for NeuroLink stream functionality ([b896bef](https://github.com/juspay/neurolink/commit/b896bef43fb0d743f5d9a7196ecf5ca4e39aa8a0))
12
+
1
13
  ## [7.21.0](https://github.com/juspay/neurolink/compare/v7.20.0...v7.21.0) (2025-08-19)
2
14
 
3
15
  ### Features
package/README.md CHANGED
@@ -146,32 +146,7 @@ const { NeuroLink } = require('@juspay/neurolink');
146
146
  ### Basic Usage
147
147
 
148
148
  ```typescript
149
- import { NeuroLink, AIProviderFactory } from "@juspay/neurolink";
150
-
151
- // LiteLLM - Access 100+ models through unified interface
152
- const litellmProvider = await AIProviderFactory.createProvider(
153
- "litellm",
154
- "openai/gpt-4o",
155
- );
156
- const result = await litellmProvider.generate({
157
- input: { text: "Write a haiku about programming" },
158
- });
159
-
160
- // Compare multiple models simultaneously
161
- const models = [
162
- "openai/gpt-4o",
163
- "anthropic/claude-3-5-sonnet",
164
- "google/gemini-2.0-flash",
165
- ];
166
- const comparisons = await Promise.all(
167
- models.map(async (model) => {
168
- const provider = await AIProviderFactory.createProvider("litellm", model);
169
- const result = await provider.generate({
170
- input: { text: "Explain quantum computing" },
171
- });
172
- return { model, response: result.content, provider: result.provider };
173
- }),
174
- );
149
+ import { NeuroLink } from "@juspay/neurolink";
175
150
 
176
151
  // Auto-select best available provider
177
152
  const neurolink = new NeuroLink();
@@ -181,8 +156,8 @@ const autoResult = await neurolink.generate({
181
156
  timeout: "30s",
182
157
  });
183
158
 
184
- console.log(result.content);
185
- console.log(`Used: ${result.provider}`);
159
+ console.log(autoResult.content);
160
+ console.log(`Used: ${autoResult.provider}`);
186
161
  ```
187
162
 
188
163
  ### Conversation Memory
@@ -241,27 +216,10 @@ npx @juspay/neurolink generate "Write a proposal" --enable-analytics --enable-ev
241
216
  npx @juspay/neurolink stream "What time is it and write a file with the current date"
242
217
  ```
243
218
 
244
- #### SDK with LiteLLM and Enhancement Features
219
+ #### SDK and Enhancement Features
245
220
 
246
221
  ```typescript
247
- import { NeuroLink, AIProviderFactory } from "@juspay/neurolink";
248
-
249
- // LiteLLM multi-model comparison
250
- const models = [
251
- "openai/gpt-4o",
252
- "anthropic/claude-3-5-sonnet",
253
- "google/gemini-2.0-flash",
254
- ];
255
- const comparisons = await Promise.all(
256
- models.map(async (model) => {
257
- const provider = await AIProviderFactory.createProvider("litellm", model);
258
- return await provider.generate({
259
- input: { text: "Explain the benefits of renewable energy" },
260
- enableAnalytics: true,
261
- enableEvaluation: true,
262
- });
263
- }),
264
- );
222
+ import { NeuroLink } from "@juspay/neurolink";
265
223
 
266
224
  // Enhanced generation with analytics
267
225
  const neurolink = new NeuroLink();
@@ -749,6 +749,15 @@ export class NeuroLink {
749
749
  provider: providerName,
750
750
  prompt: (options.input.text?.substring(0, 100) || "No text") + "...",
751
751
  });
752
+ // Initialize conversation memory if enabled (same as generate function)
753
+ if (this.conversationMemory) {
754
+ await this.conversationMemory.initialize();
755
+ }
756
+ // Get conversation messages for context injection (same as generate function)
757
+ const conversationMessages = await getConversationMessages(this.conversationMemory, {
758
+ prompt: options.input.text,
759
+ context: enhancedOptions.context,
760
+ });
752
761
  // Create provider using the same factory pattern as generate
753
762
  const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true, this);
754
763
  // Enable tool execution for streaming using BaseProvider method
@@ -756,12 +765,69 @@ export class NeuroLink {
756
765
  customTools: this.getCustomTools(),
757
766
  executeTool: this.executeTool.bind(this),
758
767
  }, functionTag);
759
- // Create clean options for provider (remove factoryConfig)
768
+ // Create clean options for provider (remove factoryConfig) and inject conversation history
760
769
  const cleanOptions = createCleanStreamOptions(enhancedOptions);
761
- // Call the provider's stream method with clean options
762
- const streamResult = await provider.stream(cleanOptions);
770
+ const optionsWithHistory = {
771
+ ...cleanOptions,
772
+ conversationMessages, // Inject conversation history like in generate function
773
+ };
774
+ // Call the provider's stream method with conversation history
775
+ const streamResult = await provider.stream(optionsWithHistory);
763
776
  // Extract the stream from the result
764
- const stream = streamResult.stream;
777
+ const originalStream = streamResult.stream;
778
+ // Create a proper tee pattern that accumulates content and stores memory after consumption
779
+ let accumulatedContent = "";
780
+ const processedStream = (async function* (self) {
781
+ try {
782
+ for await (const chunk of originalStream) {
783
+ // Enhanced chunk validation and content handling
784
+ let processedChunk = chunk;
785
+ if (chunk && typeof chunk === "object") {
786
+ // Ensure chunk has content property and it's a string
787
+ if (typeof chunk.content === "string") {
788
+ accumulatedContent += chunk.content;
789
+ }
790
+ else if (chunk.content === undefined ||
791
+ chunk.content === null) {
792
+ // Handle undefined/null content gracefully - create a new chunk object
793
+ processedChunk = { ...chunk, content: "" };
794
+ }
795
+ else if (typeof chunk.content !== "string") {
796
+ // Convert non-string content to string - create a new chunk object
797
+ const stringContent = String(chunk.content || "");
798
+ processedChunk = { ...chunk, content: stringContent };
799
+ accumulatedContent += stringContent;
800
+ }
801
+ }
802
+ else if (chunk === null || chunk === undefined) {
803
+ // Create a safe empty chunk if chunk is null/undefined
804
+ processedChunk = { content: "" };
805
+ }
806
+ yield processedChunk; // Preserve original streaming behavior with safe content
807
+ }
808
+ }
809
+ finally {
810
+ // Store memory after stream consumption
811
+ if (self.conversationMemory) {
812
+ try {
813
+ await self.conversationMemory.storeConversationTurn(enhancedOptions.context
814
+ ?.sessionId, enhancedOptions.context
815
+ ?.userId, options.input.text, accumulatedContent);
816
+ logger.debug("Stream conversation turn stored", {
817
+ sessionId: enhancedOptions.context
818
+ ?.sessionId,
819
+ userInputLength: options.input.text.length,
820
+ responseLength: accumulatedContent.length,
821
+ });
822
+ }
823
+ catch (error) {
824
+ logger.warn("Failed to store stream conversation turn", {
825
+ error: error instanceof Error ? error.message : String(error),
826
+ });
827
+ }
828
+ }
829
+ }
830
+ })(this);
765
831
  const responseTime = Date.now() - startTime;
766
832
  mcpLogger.debug(`[${functionTag}] MCP-enabled streaming completed`, {
767
833
  responseTime,
@@ -774,7 +840,7 @@ export class NeuroLink {
774
840
  });
775
841
  // Convert to StreamResult format - Include analytics and evaluation from provider
776
842
  return {
777
- stream,
843
+ stream: processedStream,
778
844
  provider: providerName,
779
845
  model: options.model,
780
846
  usage: streamResult.usage,
@@ -804,6 +870,15 @@ export class NeuroLink {
804
870
  mcpLogger.warn(`[${functionTag}] MCP streaming failed, falling back to regular`, {
805
871
  error: error instanceof Error ? error.message : String(error),
806
872
  });
873
+ // Initialize conversation memory for fallback path (same as success path)
874
+ if (this.conversationMemory) {
875
+ await this.conversationMemory.initialize();
876
+ }
877
+ // Get conversation messages for fallback context injection
878
+ const fallbackConversationMessages = await getConversationMessages(this.conversationMemory, {
879
+ prompt: options.input.text,
880
+ context: enhancedOptions.context,
881
+ });
807
882
  // Use factory to create provider without MCP
808
883
  const provider = await AIProviderFactory.createBestProvider(providerName, options.model, false, // Disable MCP for fallback
809
884
  this);
@@ -812,9 +887,46 @@ export class NeuroLink {
812
887
  customTools: this.getCustomTools(),
813
888
  executeTool: this.executeTool.bind(this),
814
889
  }, functionTag);
815
- // Create clean options for fallback provider (remove factoryConfig)
890
+ // Create clean options for fallback provider and inject conversation history
816
891
  const cleanOptions = createCleanStreamOptions(enhancedOptions);
817
- const streamResult = await provider.stream(cleanOptions);
892
+ const fallbackOptionsWithHistory = {
893
+ ...cleanOptions,
894
+ conversationMessages: fallbackConversationMessages, // Inject conversation history in fallback
895
+ };
896
+ const streamResult = await provider.stream(fallbackOptionsWithHistory);
897
+ // Create a proper tee pattern for fallback that accumulates content and stores memory after consumption
898
+ let fallbackAccumulatedContent = "";
899
+ const fallbackProcessedStream = (async function* (self) {
900
+ try {
901
+ for await (const chunk of streamResult.stream) {
902
+ if (chunk && typeof chunk.content === "string") {
903
+ fallbackAccumulatedContent += chunk.content;
904
+ }
905
+ yield chunk; // Preserve original streaming behavior
906
+ }
907
+ }
908
+ finally {
909
+ // Store memory after fallback stream consumption
910
+ if (self.conversationMemory) {
911
+ try {
912
+ await self.conversationMemory.storeConversationTurn(enhancedOptions.context
913
+ ?.sessionId, enhancedOptions.context
914
+ ?.userId, options.input.text, fallbackAccumulatedContent);
915
+ logger.debug("Fallback stream conversation turn stored", {
916
+ sessionId: enhancedOptions.context
917
+ ?.sessionId,
918
+ userInputLength: options.input.text.length,
919
+ responseLength: fallbackAccumulatedContent.length,
920
+ });
921
+ }
922
+ catch (error) {
923
+ logger.warn("Failed to store fallback stream conversation turn", {
924
+ error: error instanceof Error ? error.message : String(error),
925
+ });
926
+ }
927
+ }
928
+ }
929
+ })(this);
818
930
  const responseTime = Date.now() - startTime;
819
931
  // Emit stream completion event for fallback
820
932
  this.emitter.emit("stream:end", {
@@ -823,7 +935,7 @@ export class NeuroLink {
823
935
  fallback: true,
824
936
  });
825
937
  return {
826
- stream: streamResult.stream,
938
+ stream: fallbackProcessedStream,
827
939
  provider: providerName,
828
940
  model: options.model,
829
941
  usage: streamResult.usage,
package/dist/neurolink.js CHANGED
@@ -749,6 +749,15 @@ export class NeuroLink {
749
749
  provider: providerName,
750
750
  prompt: (options.input.text?.substring(0, 100) || "No text") + "...",
751
751
  });
752
+ // Initialize conversation memory if enabled (same as generate function)
753
+ if (this.conversationMemory) {
754
+ await this.conversationMemory.initialize();
755
+ }
756
+ // Get conversation messages for context injection (same as generate function)
757
+ const conversationMessages = await getConversationMessages(this.conversationMemory, {
758
+ prompt: options.input.text,
759
+ context: enhancedOptions.context,
760
+ });
752
761
  // Create provider using the same factory pattern as generate
753
762
  const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true, this);
754
763
  // Enable tool execution for streaming using BaseProvider method
@@ -756,12 +765,69 @@ export class NeuroLink {
756
765
  customTools: this.getCustomTools(),
757
766
  executeTool: this.executeTool.bind(this),
758
767
  }, functionTag);
759
- // Create clean options for provider (remove factoryConfig)
768
+ // Create clean options for provider (remove factoryConfig) and inject conversation history
760
769
  const cleanOptions = createCleanStreamOptions(enhancedOptions);
761
- // Call the provider's stream method with clean options
762
- const streamResult = await provider.stream(cleanOptions);
770
+ const optionsWithHistory = {
771
+ ...cleanOptions,
772
+ conversationMessages, // Inject conversation history like in generate function
773
+ };
774
+ // Call the provider's stream method with conversation history
775
+ const streamResult = await provider.stream(optionsWithHistory);
763
776
  // Extract the stream from the result
764
- const stream = streamResult.stream;
777
+ const originalStream = streamResult.stream;
778
+ // Create a proper tee pattern that accumulates content and stores memory after consumption
779
+ let accumulatedContent = "";
780
+ const processedStream = (async function* (self) {
781
+ try {
782
+ for await (const chunk of originalStream) {
783
+ // Enhanced chunk validation and content handling
784
+ let processedChunk = chunk;
785
+ if (chunk && typeof chunk === "object") {
786
+ // Ensure chunk has content property and it's a string
787
+ if (typeof chunk.content === "string") {
788
+ accumulatedContent += chunk.content;
789
+ }
790
+ else if (chunk.content === undefined ||
791
+ chunk.content === null) {
792
+ // Handle undefined/null content gracefully - create a new chunk object
793
+ processedChunk = { ...chunk, content: "" };
794
+ }
795
+ else if (typeof chunk.content !== "string") {
796
+ // Convert non-string content to string - create a new chunk object
797
+ const stringContent = String(chunk.content || "");
798
+ processedChunk = { ...chunk, content: stringContent };
799
+ accumulatedContent += stringContent;
800
+ }
801
+ }
802
+ else if (chunk === null || chunk === undefined) {
803
+ // Create a safe empty chunk if chunk is null/undefined
804
+ processedChunk = { content: "" };
805
+ }
806
+ yield processedChunk; // Preserve original streaming behavior with safe content
807
+ }
808
+ }
809
+ finally {
810
+ // Store memory after stream consumption
811
+ if (self.conversationMemory) {
812
+ try {
813
+ await self.conversationMemory.storeConversationTurn(enhancedOptions.context
814
+ ?.sessionId, enhancedOptions.context
815
+ ?.userId, options.input.text, accumulatedContent);
816
+ logger.debug("Stream conversation turn stored", {
817
+ sessionId: enhancedOptions.context
818
+ ?.sessionId,
819
+ userInputLength: options.input.text.length,
820
+ responseLength: accumulatedContent.length,
821
+ });
822
+ }
823
+ catch (error) {
824
+ logger.warn("Failed to store stream conversation turn", {
825
+ error: error instanceof Error ? error.message : String(error),
826
+ });
827
+ }
828
+ }
829
+ }
830
+ })(this);
765
831
  const responseTime = Date.now() - startTime;
766
832
  mcpLogger.debug(`[${functionTag}] MCP-enabled streaming completed`, {
767
833
  responseTime,
@@ -774,7 +840,7 @@ export class NeuroLink {
774
840
  });
775
841
  // Convert to StreamResult format - Include analytics and evaluation from provider
776
842
  return {
777
- stream,
843
+ stream: processedStream,
778
844
  provider: providerName,
779
845
  model: options.model,
780
846
  usage: streamResult.usage,
@@ -804,6 +870,15 @@ export class NeuroLink {
804
870
  mcpLogger.warn(`[${functionTag}] MCP streaming failed, falling back to regular`, {
805
871
  error: error instanceof Error ? error.message : String(error),
806
872
  });
873
+ // Initialize conversation memory for fallback path (same as success path)
874
+ if (this.conversationMemory) {
875
+ await this.conversationMemory.initialize();
876
+ }
877
+ // Get conversation messages for fallback context injection
878
+ const fallbackConversationMessages = await getConversationMessages(this.conversationMemory, {
879
+ prompt: options.input.text,
880
+ context: enhancedOptions.context,
881
+ });
807
882
  // Use factory to create provider without MCP
808
883
  const provider = await AIProviderFactory.createBestProvider(providerName, options.model, false, // Disable MCP for fallback
809
884
  this);
@@ -812,9 +887,46 @@ export class NeuroLink {
812
887
  customTools: this.getCustomTools(),
813
888
  executeTool: this.executeTool.bind(this),
814
889
  }, functionTag);
815
- // Create clean options for fallback provider (remove factoryConfig)
890
+ // Create clean options for fallback provider and inject conversation history
816
891
  const cleanOptions = createCleanStreamOptions(enhancedOptions);
817
- const streamResult = await provider.stream(cleanOptions);
892
+ const fallbackOptionsWithHistory = {
893
+ ...cleanOptions,
894
+ conversationMessages: fallbackConversationMessages, // Inject conversation history in fallback
895
+ };
896
+ const streamResult = await provider.stream(fallbackOptionsWithHistory);
897
+ // Create a proper tee pattern for fallback that accumulates content and stores memory after consumption
898
+ let fallbackAccumulatedContent = "";
899
+ const fallbackProcessedStream = (async function* (self) {
900
+ try {
901
+ for await (const chunk of streamResult.stream) {
902
+ if (chunk && typeof chunk.content === "string") {
903
+ fallbackAccumulatedContent += chunk.content;
904
+ }
905
+ yield chunk; // Preserve original streaming behavior
906
+ }
907
+ }
908
+ finally {
909
+ // Store memory after fallback stream consumption
910
+ if (self.conversationMemory) {
911
+ try {
912
+ await self.conversationMemory.storeConversationTurn(enhancedOptions.context
913
+ ?.sessionId, enhancedOptions.context
914
+ ?.userId, options.input.text, fallbackAccumulatedContent);
915
+ logger.debug("Fallback stream conversation turn stored", {
916
+ sessionId: enhancedOptions.context
917
+ ?.sessionId,
918
+ userInputLength: options.input.text.length,
919
+ responseLength: fallbackAccumulatedContent.length,
920
+ });
921
+ }
922
+ catch (error) {
923
+ logger.warn("Failed to store fallback stream conversation turn", {
924
+ error: error instanceof Error ? error.message : String(error),
925
+ });
926
+ }
927
+ }
928
+ }
929
+ })(this);
818
930
  const responseTime = Date.now() - startTime;
819
931
  // Emit stream completion event for fallback
820
932
  this.emitter.emit("stream:end", {
@@ -823,7 +935,7 @@ export class NeuroLink {
823
935
  fallback: true,
824
936
  });
825
937
  return {
826
- stream: streamResult.stream,
938
+ stream: fallbackProcessedStream,
827
939
  provider: providerName,
828
940
  model: options.model,
829
941
  usage: streamResult.usage,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.21.0",
3
+ "version": "7.23.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",