@juspay/neurolink 7.11.1 → 7.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/README.md +16 -0
  3. package/dist/config/conversationMemoryConfig.d.ts +27 -0
  4. package/dist/config/conversationMemoryConfig.js +39 -0
  5. package/dist/context/ContextManager.d.ts +28 -0
  6. package/dist/context/ContextManager.js +102 -0
  7. package/dist/context/config.d.ts +5 -0
  8. package/dist/context/config.js +38 -0
  9. package/dist/context/types.d.ts +20 -0
  10. package/dist/context/types.js +1 -0
  11. package/dist/context/utils.d.ts +7 -0
  12. package/dist/context/utils.js +8 -0
  13. package/dist/core/baseProvider.js +4 -2
  14. package/dist/core/conversationMemoryManager.d.ts +41 -0
  15. package/dist/core/conversationMemoryManager.js +152 -0
  16. package/dist/core/types.d.ts +2 -0
  17. package/dist/lib/config/conversationMemoryConfig.d.ts +27 -0
  18. package/dist/lib/config/conversationMemoryConfig.js +39 -0
  19. package/dist/lib/context/ContextManager.d.ts +28 -0
  20. package/dist/lib/context/ContextManager.js +102 -0
  21. package/dist/lib/context/config.d.ts +5 -0
  22. package/dist/lib/context/config.js +38 -0
  23. package/dist/lib/context/types.d.ts +20 -0
  24. package/dist/lib/context/types.js +1 -0
  25. package/dist/lib/context/utils.d.ts +7 -0
  26. package/dist/lib/context/utils.js +8 -0
  27. package/dist/lib/core/baseProvider.js +4 -2
  28. package/dist/lib/core/conversationMemoryManager.d.ts +41 -0
  29. package/dist/lib/core/conversationMemoryManager.js +152 -0
  30. package/dist/lib/core/types.d.ts +2 -0
  31. package/dist/lib/neurolink.d.ts +39 -4
  32. package/dist/lib/neurolink.js +106 -5
  33. package/dist/lib/providers/amazonBedrock.js +4 -2
  34. package/dist/lib/providers/anthropic.js +4 -2
  35. package/dist/lib/providers/azureOpenai.js +4 -2
  36. package/dist/lib/providers/googleAiStudio.js +4 -2
  37. package/dist/lib/providers/googleVertex.js +4 -2
  38. package/dist/lib/providers/huggingFace.js +4 -2
  39. package/dist/lib/providers/litellm.js +4 -2
  40. package/dist/lib/providers/mistral.js +3 -2
  41. package/dist/lib/providers/openAI.js +4 -2
  42. package/dist/lib/types/conversationTypes.d.ts +95 -0
  43. package/dist/lib/types/conversationTypes.js +17 -0
  44. package/dist/lib/types/streamTypes.d.ts +2 -0
  45. package/dist/lib/utils/conversationMemoryUtils.d.ts +22 -0
  46. package/dist/lib/utils/conversationMemoryUtils.js +77 -0
  47. package/dist/lib/utils/messageBuilder.d.ts +13 -0
  48. package/dist/lib/utils/messageBuilder.js +48 -0
  49. package/dist/neurolink.d.ts +39 -4
  50. package/dist/neurolink.js +106 -5
  51. package/dist/providers/amazonBedrock.js +4 -2
  52. package/dist/providers/anthropic.js +4 -2
  53. package/dist/providers/azureOpenai.js +4 -2
  54. package/dist/providers/googleAiStudio.js +4 -2
  55. package/dist/providers/googleVertex.js +4 -2
  56. package/dist/providers/huggingFace.js +4 -2
  57. package/dist/providers/litellm.js +4 -2
  58. package/dist/providers/mistral.js +3 -2
  59. package/dist/providers/openAI.js +4 -2
  60. package/dist/types/conversationTypes.d.ts +95 -0
  61. package/dist/types/conversationTypes.js +17 -0
  62. package/dist/types/streamTypes.d.ts +2 -0
  63. package/dist/utils/conversationMemoryUtils.d.ts +22 -0
  64. package/dist/utils/conversationMemoryUtils.js +77 -0
  65. package/dist/utils/messageBuilder.d.ts +13 -0
  66. package/dist/utils/messageBuilder.js +48 -0
  67. package/package.json +1 -1
@@ -27,10 +27,15 @@ import { processFactoryOptions, enhanceTextGenerationOptions, validateFactoryCon
27
27
  // Enhanced error handling imports
28
28
  import { ErrorFactory, NeuroLinkError, withTimeout, withRetry, isRetriableError, logStructuredError, CircuitBreaker, } from "./utils/errorHandling.js";
29
29
  import { EventEmitter } from "events";
30
+ import { ConversationMemoryManager } from "./core/conversationMemoryManager.js";
31
+ import { applyConversationMemoryDefaults, getConversationMessages, storeConversationTurn, } from "./utils/conversationMemoryUtils.js";
32
+ import { ContextManager } from "./context/ContextManager.js";
33
+ import { defaultContextConfig } from "./context/config.js";
30
34
  // Core types imported from core/types.js
31
35
  export class NeuroLink {
32
36
  mcpInitialized = false;
33
37
  emitter = new EventEmitter();
38
+ contextManager = null;
34
39
  // Tool registration support
35
40
  customTools = new Map();
36
41
  inMemoryServers = new Map();
@@ -52,11 +57,22 @@ export class NeuroLink {
52
57
  timestamp: Date.now(),
53
58
  });
54
59
  }
55
- constructor() {
60
+ // Conversation memory support
61
+ conversationMemory;
62
+ constructor(config) {
56
63
  // SDK always disables manual MCP config for security
57
64
  ProviderRegistry.setOptions({
58
65
  enableManualMCP: false,
59
66
  });
67
+ // Initialize conversation memory if enabled
68
+ if (config?.conversationMemory?.enabled) {
69
+ const memoryConfig = applyConversationMemoryDefaults(config.conversationMemory);
70
+ this.conversationMemory = new ConversationMemoryManager(memoryConfig);
71
+ logger.info("NeuroLink initialized with conversation memory", {
72
+ maxSessions: memoryConfig.maxSessions,
73
+ maxTurnsPerSession: memoryConfig.maxTurnsPerSession,
74
+ });
75
+ }
60
76
  }
61
77
  /**
62
78
  * Initialize MCP registry with enhanced error handling and resource cleanup
@@ -107,7 +123,33 @@ export class NeuroLink {
107
123
  * MAIN ENTRY POINT: Enhanced generate method with new function signature
108
124
  * Replaces both generateText and legacy methods
109
125
  */
126
+ /**
127
+ * Extracts the original prompt text from the provided input.
128
+ * If a string is provided, it returns the string directly.
129
+ * If a GenerateOptions object is provided, it returns the input text from the object.
130
+ * @param optionsOrPrompt The prompt input, either as a string or a GenerateOptions object.
131
+ * @returns The original prompt text as a string.
132
+ */
133
+ _extractOriginalPrompt(optionsOrPrompt) {
134
+ return typeof optionsOrPrompt === 'string' ? optionsOrPrompt : optionsOrPrompt.input.text;
135
+ }
136
+ /**
137
+ * Enables automatic context summarization for the NeuroLink instance.
138
+ * Once enabled, the instance will maintain conversation history and
139
+ * automatically summarize it when it exceeds token limits.
140
+ * @param config Optional configuration to override default summarization settings.
141
+ */
142
+ enableContextSummarization(config) {
143
+ const contextConfig = {
144
+ ...defaultContextConfig,
145
+ ...config,
146
+ };
147
+ // Pass the internal generator function directly, bound to the correct `this` context.
148
+ this.contextManager = new ContextManager(this.generateTextInternal.bind(this), contextConfig);
149
+ logger.info("[NeuroLink] Automatic context summarization enabled.");
150
+ }
110
151
  async generate(optionsOrPrompt) {
152
+ const originalPrompt = this._extractOriginalPrompt(optionsOrPrompt);
111
153
  // Convert string prompt to full options
112
154
  const options = typeof optionsOrPrompt === "string"
113
155
  ? { input: { text: optionsOrPrompt } }
@@ -116,6 +158,11 @@ export class NeuroLink {
116
158
  if (!options.input?.text || typeof options.input.text !== "string") {
117
159
  throw new Error("Input text is required and must be a non-empty string");
118
160
  }
161
+ // Handle Context Management if enabled
162
+ if (this.contextManager) {
163
+ // Get the full context for the prompt without permanently adding the user's turn yet
164
+ options.input.text = this.contextManager.getContextForPrompt("user", options.input.text);
165
+ }
119
166
  const startTime = Date.now();
120
167
  // Emit generation start event
121
168
  this.emitter.emit("generation:start", {
@@ -227,6 +274,11 @@ export class NeuroLink {
227
274
  }
228
275
  : undefined,
229
276
  };
277
+ // Add both the user's turn and the AI's response to the permanent history
278
+ if (this.contextManager) {
279
+ await this.contextManager.addTurn("user", originalPrompt);
280
+ await this.contextManager.addTurn("assistant", generateResult.content);
281
+ }
230
282
  return generateResult;
231
283
  }
232
284
  /**
@@ -247,9 +299,11 @@ export class NeuroLink {
247
299
  * REDESIGNED INTERNAL GENERATION - NO CIRCULAR DEPENDENCIES
248
300
  *
249
301
  * This method implements a clean fallback chain:
250
- * 1. Try MCP-enhanced generation if available
251
- * 2. Fall back to direct provider generation
252
- * 3. No recursive calls - each method has a specific purpose
302
+ * 1. Initialize conversation memory if enabled
303
+ * 2. Inject conversation history into prompt
304
+ * 3. Try MCP-enhanced generation if available
305
+ * 4. Fall back to direct provider generation
306
+ * 5. Store conversation turn for future context
253
307
  */
254
308
  async generateTextInternal(options) {
255
309
  const startTime = Date.now();
@@ -257,14 +311,21 @@ export class NeuroLink {
257
311
  logger.debug(`[${functionTag}] Starting generation`, {
258
312
  provider: options.provider || "auto",
259
313
  promptLength: options.prompt?.length || 0,
314
+ hasConversationMemory: !!this.conversationMemory,
260
315
  });
261
316
  try {
317
+ // Initialize conversation memory if enabled
318
+ if (this.conversationMemory) {
319
+ await this.conversationMemory.initialize();
320
+ }
262
321
  // Try MCP-enhanced generation first (if not explicitly disabled)
263
322
  if (!options.disableTools) {
264
323
  try {
265
324
  const mcpResult = await this.tryMCPGeneration(options);
266
325
  if (mcpResult && mcpResult.content) {
267
326
  logger.debug(`[${functionTag}] MCP generation successful`);
327
+ // Store conversation turn
328
+ await storeConversationTurn(this.conversationMemory, options, mcpResult);
268
329
  return mcpResult;
269
330
  }
270
331
  }
@@ -277,6 +338,8 @@ export class NeuroLink {
277
338
  // Fall back to direct provider generation
278
339
  const directResult = await this.directProviderGeneration(options);
279
340
  logger.debug(`[${functionTag}] Direct generation successful`);
341
+ // Store conversation turn
342
+ await storeConversationTurn(this.conversationMemory, options, directResult);
280
343
  return directResult;
281
344
  }
282
345
  catch (error) {
@@ -334,6 +397,8 @@ export class NeuroLink {
334
397
  }
335
398
  // Create tool-aware system prompt
336
399
  const enhancedSystemPrompt = this.createToolAwareSystemPrompt(options.systemPrompt, availableTools);
400
+ // Get conversation messages for context
401
+ const conversationMessages = await getConversationMessages(this.conversationMemory, options);
337
402
  // Create provider and generate
338
403
  const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
339
404
  this);
@@ -345,6 +410,7 @@ export class NeuroLink {
345
410
  const result = await provider.generate({
346
411
  ...options,
347
412
  systemPrompt: enhancedSystemPrompt,
413
+ conversationMessages, // Inject conversation history
348
414
  });
349
415
  const responseTime = Date.now() - startTime;
350
416
  // Check if result is meaningful
@@ -413,6 +479,8 @@ export class NeuroLink {
413
479
  for (const providerName of tryProviders) {
414
480
  try {
415
481
  logger.debug(`[${functionTag}] Attempting provider: ${providerName}`);
482
+ // Get conversation messages for context
483
+ const conversationMessages = await getConversationMessages(this.conversationMemory, options);
416
484
  const provider = await AIProviderFactory.createProvider(providerName, options.model, !options.disableTools, // Pass disableTools as inverse of enableMCP
417
485
  this);
418
486
  // Enable tool execution for direct provider generation using BaseProvider method
@@ -420,7 +488,10 @@ export class NeuroLink {
420
488
  customTools: this.customTools,
421
489
  executeTool: this.executeTool.bind(this),
422
490
  }, functionTag);
423
- const result = await provider.generate(options);
491
+ const result = await provider.generate({
492
+ ...options,
493
+ conversationMessages, // Inject conversation history
494
+ });
424
495
  const responseTime = Date.now() - startTime;
425
496
  if (!result) {
426
497
  throw new Error(`Provider ${providerName} returned null result`);
@@ -1577,6 +1648,36 @@ export class NeuroLink {
1577
1648
  tools,
1578
1649
  };
1579
1650
  }
1651
+ // ============================================================================
1652
+ // CONVERSATION MEMORY PUBLIC API
1653
+ // ============================================================================
1654
+ /**
1655
+ * Get conversation memory statistics (public API)
1656
+ */
1657
+ async getConversationStats() {
1658
+ if (!this.conversationMemory) {
1659
+ throw new Error("Conversation memory is not enabled");
1660
+ }
1661
+ return await this.conversationMemory.getStats();
1662
+ }
1663
+ /**
1664
+ * Clear conversation history for a specific session (public API)
1665
+ */
1666
+ async clearConversationSession(sessionId) {
1667
+ if (!this.conversationMemory) {
1668
+ throw new Error("Conversation memory is not enabled");
1669
+ }
1670
+ return await this.conversationMemory.clearSession(sessionId);
1671
+ }
1672
+ /**
1673
+ * Clear all conversation history (public API)
1674
+ */
1675
+ async clearAllConversations() {
1676
+ if (!this.conversationMemory) {
1677
+ throw new Error("Conversation memory is not enabled");
1678
+ }
1679
+ await this.conversationMemory.clearAllSessions();
1680
+ }
1580
1681
  }
1581
1682
  // Create default instance
1582
1683
  export const neurolink = new NeuroLink();
@@ -5,6 +5,7 @@ import { logger } from "../utils/logger.js";
5
5
  import { TimeoutError, } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
7
  import { validateApiKey, createAWSAccessKeyConfig, createAWSSecretConfig, getAWSRegion, getAWSSessionToken, } from "../utils/providerConfig.js";
8
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
8
9
  // Configuration helpers
9
10
  const getBedrockModelId = () => {
10
11
  return (process.env.BEDROCK_MODEL ||
@@ -79,10 +80,11 @@ export class AmazonBedrockProvider extends BaseProvider {
79
80
  async executeStream(options, analysisSchema) {
80
81
  try {
81
82
  this.validateStreamOptions(options);
83
+ // Build message array from options
84
+ const messages = buildMessagesArray(options);
82
85
  const result = await streamText({
83
86
  model: this.model,
84
- prompt: options.input.text,
85
- system: options.systemPrompt,
87
+ messages: messages,
86
88
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
87
89
  temperature: options.temperature,
88
90
  });
@@ -5,6 +5,7 @@ import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
8
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
8
9
  // Configuration helpers - now using consolidated utility
9
10
  const getAnthropicApiKey = () => {
10
11
  return validateApiKey(createAnthropicConfig());
@@ -94,10 +95,11 @@ export class AnthropicProvider extends BaseProvider {
94
95
  // ✅ Get tools for streaming (same as generate method)
95
96
  const shouldUseTools = !options.disableTools && this.supportsTools();
96
97
  const tools = shouldUseTools ? await this.getAllTools() : {};
98
+ // Build message array from options
99
+ const messages = buildMessagesArray(options);
97
100
  const result = await streamText({
98
101
  model: this.model,
99
- prompt: options.input.text,
100
- system: options.systemPrompt || undefined,
102
+ messages: messages,
101
103
  temperature: options.temperature,
102
104
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
103
105
  tools,
@@ -3,6 +3,7 @@ import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
5
5
  import { logger } from "../utils/logger.js";
6
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
6
7
  export class AzureOpenAIProvider extends BaseProvider {
7
8
  apiKey;
8
9
  resourceName;
@@ -69,12 +70,13 @@ export class AzureOpenAIProvider extends BaseProvider {
69
70
  // executeGenerate removed - BaseProvider handles all generation with tools
70
71
  async executeStream(options, analysisSchema) {
71
72
  try {
73
+ // Build message array from options
74
+ const messages = buildMessagesArray(options);
72
75
  const stream = await streamText({
73
76
  model: this.azureProvider(this.deployment),
74
- prompt: options.input?.text || "",
77
+ messages: messages,
75
78
  maxTokens: options.maxTokens || 1000,
76
79
  temperature: options.temperature || 0.7,
77
- system: options.systemPrompt,
78
80
  });
79
81
  return {
80
82
  stream: (async function* () {
@@ -6,6 +6,7 @@ import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
7
7
  import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
9
10
  // Environment variable setup
10
11
  if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
11
12
  process.env.GOOGLE_AI_API_KEY) {
@@ -76,10 +77,11 @@ export class GoogleAIStudioProvider extends BaseProvider {
76
77
  // Get tools consistently with generate method
77
78
  const shouldUseTools = !options.disableTools && this.supportsTools();
78
79
  const tools = shouldUseTools ? await this.getAllTools() : {};
80
+ // Build message array from options
81
+ const messages = buildMessagesArray(options);
79
82
  const result = await streamText({
80
83
  model,
81
- prompt: options.input.text,
82
- system: options.systemPrompt,
84
+ messages: messages,
83
85
  temperature: options.temperature,
84
86
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
85
87
  tools,
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { ModelConfigurationManager } from "../core/modelConfiguration.js";
8
8
  import { validateApiKey, createVertexProjectConfig, createGoogleAuthConfig, } from "../utils/providerConfig.js";
9
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
9
10
  // Cache for anthropic module to avoid repeated imports
10
11
  let _createVertexAnthropic = null;
11
12
  let _anthropicImportAttempted = false;
@@ -189,6 +190,8 @@ export class GoogleVertexProvider extends BaseProvider {
189
190
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
190
191
  try {
191
192
  this.validateStreamOptions(options);
193
+ // Build message array from options
194
+ const messages = buildMessagesArray(options);
192
195
  logger.debug(`${functionTag}: Starting stream request`, {
193
196
  modelName: this.modelName,
194
197
  promptLength: options.input.text.length,
@@ -209,8 +212,7 @@ export class GoogleVertexProvider extends BaseProvider {
209
212
  // Build complete stream options with proper typing
210
213
  let streamOptions = {
211
214
  model: model,
212
- prompt: options.input.text,
213
- system: options.systemPrompt,
215
+ messages: messages,
214
216
  temperature: options.temperature,
215
217
  ...(maxTokens && { maxTokens }),
216
218
  tools,
@@ -5,6 +5,7 @@ import { logger } from "../utils/logger.js";
5
5
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
7
  import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
8
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
8
9
  // Configuration helpers - now using consolidated utility
9
10
  const getHuggingFaceApiKey = () => {
10
11
  return validateApiKey(createHuggingFaceConfig());
@@ -111,10 +112,11 @@ export class HuggingFaceProvider extends BaseProvider {
111
112
  try {
112
113
  // Enhanced tool handling for HuggingFace models
113
114
  const streamOptions = this.prepareStreamOptions(options, analysisSchema);
115
+ // Build message array from options
116
+ const messages = buildMessagesArray(options);
114
117
  const result = await streamText({
115
118
  model: this.model,
116
- prompt: streamOptions.prompt,
117
- system: streamOptions.system,
119
+ messages: messages,
118
120
  temperature: options.temperature,
119
121
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
120
122
  tools: streamOptions.tools, // Tools format conversion handled by prepareStreamOptions
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
7
  import { getProviderModel } from "../utils/providerConfig.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
9
10
  // Configuration helpers
10
11
  const getLiteLLMConfig = () => {
11
12
  return {
@@ -118,10 +119,11 @@ export class LiteLLMProvider extends BaseProvider {
118
119
  const timeout = this.getTimeout(options);
119
120
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
120
121
  try {
122
+ // Build message array from options
123
+ const messages = buildMessagesArray(options);
121
124
  const result = await streamText({
122
125
  model: this.model,
123
- prompt: options.input.text,
124
- system: options.systemPrompt,
126
+ messages: messages,
125
127
  temperature: options.temperature,
126
128
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
127
129
  tools: options.tools,
@@ -6,6 +6,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
6
  import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
7
7
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
8
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
9
10
  // Configuration helpers - now using consolidated utility
10
11
  const getMistralApiKey = () => {
11
12
  return validateApiKey(createMistralConfig());
@@ -46,10 +47,10 @@ export class MistralProvider extends BaseProvider {
46
47
  // Get tools consistently with generate method
47
48
  const shouldUseTools = !options.disableTools && this.supportsTools();
48
49
  const tools = shouldUseTools ? await this.getAllTools() : {};
50
+ const messages = buildMessagesArray(options);
49
51
  const result = await streamText({
50
52
  model: this.model,
51
- prompt: options.input.text,
52
- system: options.systemPrompt,
53
+ messages: messages,
53
54
  temperature: options.temperature,
54
55
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
55
56
  tools,
@@ -7,6 +7,7 @@ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
7
7
  import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
+ import { buildMessagesArray } from "../utils/messageBuilder.js";
10
11
  // Configuration helpers - now using consolidated utility
11
12
  const getOpenAIApiKey = () => {
12
13
  return validateApiKey(createOpenAIConfig());
@@ -77,10 +78,11 @@ export class OpenAIProvider extends BaseProvider {
77
78
  // Get tools consistently with generate method
78
79
  const shouldUseTools = !options.disableTools && this.supportsTools();
79
80
  const tools = shouldUseTools ? await this.getAllTools() : {};
81
+ // Build message array from options
82
+ const messages = buildMessagesArray(options);
80
83
  const result = await streamText({
81
84
  model: this.model,
82
- prompt: options.input.text,
83
- system: options.systemPrompt,
85
+ messages: messages,
84
86
  temperature: options.temperature,
85
87
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
86
88
  tools,
@@ -0,0 +1,95 @@
1
+ /**
2
+ * Conversation Memory Types for NeuroLink
3
+ * Provides type-safe conversation storage and context management
4
+ */
5
+ /**
6
+ * Configuration for conversation memory feature
7
+ */
8
+ export interface ConversationMemoryConfig {
9
+ /** Enable conversation memory feature */
10
+ enabled: boolean;
11
+ /** Maximum number of sessions to keep in memory (default: 50) */
12
+ maxSessions?: number;
13
+ /** Maximum number of conversation turns to keep per session (default: 20) */
14
+ maxTurnsPerSession?: number;
15
+ }
16
+ /**
17
+ * Complete memory for a conversation session
18
+ * ULTRA-OPTIMIZED: Direct ChatMessage[] storage - zero conversion overhead
19
+ */
20
+ export interface SessionMemory {
21
+ /** Unique session identifier */
22
+ sessionId: string;
23
+ /** User identifier (optional) */
24
+ userId?: string;
25
+ /** Direct message storage - ready for immediate AI consumption */
26
+ messages: ChatMessage[];
27
+ /** When this session was created */
28
+ createdAt: number;
29
+ /** When this session was last active */
30
+ lastActivity: number;
31
+ /** Optional session metadata */
32
+ metadata?: {
33
+ /** User role or permissions */
34
+ userRole?: string;
35
+ /** Tags for categorizing this session */
36
+ tags?: string[];
37
+ /** Custom data specific to the organization */
38
+ customData?: Record<string, unknown>;
39
+ };
40
+ }
41
+ /**
42
+ * Statistics about conversation memory usage (simplified for pure in-memory storage)
43
+ */
44
+ export interface ConversationMemoryStats {
45
+ /** Total number of active sessions */
46
+ totalSessions: number;
47
+ /** Total number of conversation turns across all sessions */
48
+ totalTurns: number;
49
+ }
50
+ /**
51
+ * Chat message format for conversation history
52
+ */
53
+ export interface ChatMessage {
54
+ /** Role of the message sender */
55
+ role: "user" | "assistant" | "system";
56
+ /** Content of the message */
57
+ content: string;
58
+ }
59
+ /**
60
+ * Events emitted by conversation memory system
61
+ */
62
+ export interface ConversationMemoryEvents {
63
+ /** Emitted when a new session is created */
64
+ "session:created": {
65
+ sessionId: string;
66
+ userId?: string;
67
+ timestamp: number;
68
+ };
69
+ /** Emitted when a conversation turn is stored */
70
+ "turn:stored": {
71
+ sessionId: string;
72
+ turnIndex: number;
73
+ timestamp: number;
74
+ };
75
+ /** Emitted when a session is cleaned up */
76
+ "session:cleanup": {
77
+ sessionId: string;
78
+ reason: "expired" | "limit_exceeded";
79
+ timestamp: number;
80
+ };
81
+ /** Emitted when context is injected */
82
+ "context:injected": {
83
+ sessionId: string;
84
+ turnsIncluded: number;
85
+ timestamp: number;
86
+ };
87
+ }
88
+ /**
89
+ * Error types specific to conversation memory
90
+ */
91
+ export declare class ConversationMemoryError extends Error {
92
+ code: "STORAGE_ERROR" | "CONFIG_ERROR" | "SESSION_NOT_FOUND" | "CLEANUP_ERROR";
93
+ details?: Record<string, unknown> | undefined;
94
+ constructor(message: string, code: "STORAGE_ERROR" | "CONFIG_ERROR" | "SESSION_NOT_FOUND" | "CLEANUP_ERROR", details?: Record<string, unknown> | undefined);
95
+ }
@@ -0,0 +1,17 @@
1
+ /**
2
+ * Conversation Memory Types for NeuroLink
3
+ * Provides type-safe conversation storage and context management
4
+ */
5
+ /**
6
+ * Error types specific to conversation memory
7
+ */
8
+ export class ConversationMemoryError extends Error {
9
+ code;
10
+ details;
11
+ constructor(message, code, details) {
12
+ super(message);
13
+ this.code = code;
14
+ this.details = details;
15
+ this.name = "ConversationMemoryError";
16
+ }
17
+ }
@@ -2,6 +2,7 @@ import type { ZodType, ZodTypeDef } from "zod";
2
2
  import type { Tool, Schema } from "ai";
3
3
  import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
4
4
  import type { UnknownRecord, JsonValue } from "./common.js";
5
+ import type { ChatMessage } from "./conversationTypes.js";
5
6
  /**
6
7
  * Interface for tool execution calls (AI SDK compatible)
7
8
  */
@@ -107,6 +108,7 @@ export interface StreamOptions {
107
108
  enableProgress?: boolean;
108
109
  fallbackToGenerate?: boolean;
109
110
  };
111
+ conversationMessages?: ChatMessage[];
110
112
  }
111
113
  /**
112
114
  * Stream function result interface - Primary output format for streaming
@@ -0,0 +1,22 @@
1
+ /**
2
+ * Conversation Memory Utilities
3
+ * Handles configuration merging and conversation memory operations
4
+ */
5
+ import type { ConversationMemoryConfig, ChatMessage } from "../types/conversationTypes.js";
6
+ import type { ConversationMemoryManager } from "../core/conversationMemoryManager.js";
7
+ import type { TextGenerationOptions, TextGenerationResult } from "../core/types.js";
8
+ /**
9
+ * Apply conversation memory defaults to user configuration
10
+ * Merges user config with environment variables and default values
11
+ */
12
+ export declare function applyConversationMemoryDefaults(userConfig?: Partial<ConversationMemoryConfig>): ConversationMemoryConfig;
13
+ /**
14
+ * Get conversation history as message array (PREFERRED METHOD)
15
+ * Returns proper message array format for AI providers
16
+ */
17
+ export declare function getConversationMessages(conversationMemory: ConversationMemoryManager | undefined, options: TextGenerationOptions): Promise<ChatMessage[]>;
18
+ /**
19
+ * Store conversation turn for future context
20
+ * Saves user messages and AI responses for conversation memory
21
+ */
22
+ export declare function storeConversationTurn(conversationMemory: ConversationMemoryManager | undefined, originalOptions: TextGenerationOptions, result: TextGenerationResult): Promise<void>;
@@ -0,0 +1,77 @@
1
+ /**
2
+ * Conversation Memory Utilities
3
+ * Handles configuration merging and conversation memory operations
4
+ */
5
+ import { getConversationMemoryDefaults } from "../config/conversationMemoryConfig.js";
6
+ import { logger } from "./logger.js";
7
+ /**
8
+ * Apply conversation memory defaults to user configuration
9
+ * Merges user config with environment variables and default values
10
+ */
11
+ export function applyConversationMemoryDefaults(userConfig) {
12
+ const defaults = getConversationMemoryDefaults();
13
+ return {
14
+ enabled: userConfig?.enabled ?? defaults.enabled,
15
+ maxSessions: userConfig?.maxSessions ?? defaults.maxSessions,
16
+ maxTurnsPerSession: userConfig?.maxTurnsPerSession ?? defaults.maxTurnsPerSession,
17
+ };
18
+ }
19
+ /**
20
+ * Get conversation history as message array (PREFERRED METHOD)
21
+ * Returns proper message array format for AI providers
22
+ */
23
+ export async function getConversationMessages(conversationMemory, options) {
24
+ if (!conversationMemory || !options.context) {
25
+ return [];
26
+ }
27
+ const sessionId = options.context?.sessionId;
28
+ if (typeof sessionId !== "string" || !sessionId) {
29
+ return [];
30
+ }
31
+ try {
32
+ const messages = conversationMemory.buildContextMessages(sessionId);
33
+ logger.debug("Conversation messages retrieved", {
34
+ sessionId,
35
+ messageCount: messages.length,
36
+ });
37
+ return messages;
38
+ }
39
+ catch (error) {
40
+ logger.warn("Failed to get conversation messages", {
41
+ sessionId,
42
+ error: error instanceof Error ? error.message : String(error),
43
+ });
44
+ return [];
45
+ }
46
+ }
47
+ /**
48
+ * Store conversation turn for future context
49
+ * Saves user messages and AI responses for conversation memory
50
+ */
51
+ export async function storeConversationTurn(conversationMemory, originalOptions, result) {
52
+ if (!conversationMemory || !originalOptions.context) {
53
+ return;
54
+ }
55
+ const context = originalOptions.context;
56
+ const sessionId = context.sessionId;
57
+ const userId = typeof context.userId === "string" ? context.userId : undefined;
58
+ if (typeof sessionId !== "string" || !sessionId) {
59
+ return;
60
+ }
61
+ try {
62
+ await conversationMemory.storeConversationTurn(sessionId, userId, originalOptions.prompt || "", result.content);
63
+ logger.debug("Conversation turn stored", {
64
+ sessionId,
65
+ userId,
66
+ promptLength: originalOptions.prompt?.length || 0,
67
+ responseLength: result.content.length,
68
+ });
69
+ }
70
+ catch (error) {
71
+ logger.warn("Failed to store conversation turn", {
72
+ sessionId,
73
+ userId,
74
+ error: error instanceof Error ? error.message : String(error),
75
+ });
76
+ }
77
+ }