@juspay/neurolink 7.54.0 → 8.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +34 -0
- package/dist/index.d.ts +2 -2
- package/dist/index.js +2 -2
- package/dist/lib/index.d.ts +2 -2
- package/dist/lib/index.js +2 -2
- package/dist/lib/mcp/mcpClientFactory.js +0 -3
- package/dist/lib/memory/mem0Initializer.d.ts +10 -5
- package/dist/lib/memory/mem0Initializer.js +17 -28
- package/dist/lib/neurolink.d.ts +8 -0
- package/dist/lib/neurolink.js +351 -316
- package/dist/lib/services/server/ai/observability/instrumentation.d.ts +17 -0
- package/dist/lib/services/server/ai/observability/instrumentation.js +54 -5
- package/dist/lib/types/conversation.d.ts +3 -6
- package/dist/lib/types/observability.d.ts +4 -0
- package/dist/lib/types/utilities.d.ts +0 -35
- package/dist/lib/utils/fileDetector.js +3 -3
- package/dist/lib/utils/messageBuilder.js +3 -3
- package/dist/mcp/mcpClientFactory.js +0 -3
- package/dist/memory/mem0Initializer.d.ts +10 -5
- package/dist/memory/mem0Initializer.js +17 -28
- package/dist/neurolink.d.ts +8 -0
- package/dist/neurolink.js +351 -316
- package/dist/services/server/ai/observability/instrumentation.d.ts +17 -0
- package/dist/services/server/ai/observability/instrumentation.js +54 -5
- package/dist/types/conversation.d.ts +3 -6
- package/dist/types/observability.d.ts +4 -0
- package/dist/types/utilities.d.ts +0 -35
- package/dist/utils/fileDetector.js +3 -3
- package/dist/utils/messageBuilder.js +3 -3
- package/package.json +7 -6
package/dist/neurolink.js
CHANGED
|
@@ -44,7 +44,8 @@ import { directToolsServer } from "./mcp/servers/agent/directToolsServer.js";
|
|
|
44
44
|
// Import orchestration components
|
|
45
45
|
import { ModelRouter } from "./utils/modelRouter.js";
|
|
46
46
|
import { BinaryTaskClassifier } from "./utils/taskClassifier.js";
|
|
47
|
-
import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, } from "./services/server/ai/observability/instrumentation.js";
|
|
47
|
+
import { initializeOpenTelemetry, shutdownOpenTelemetry, flushOpenTelemetry, getLangfuseHealthStatus, setLangfuseContext, } from "./services/server/ai/observability/instrumentation.js";
|
|
48
|
+
import { initializeMem0 } from "./memory/mem0Initializer.js";
|
|
48
49
|
export class NeuroLink {
|
|
49
50
|
mcpInitialized = false;
|
|
50
51
|
emitter = new EventEmitter();
|
|
@@ -94,6 +95,40 @@ export class NeuroLink {
|
|
|
94
95
|
// Mem0 memory instance and config for conversation context
|
|
95
96
|
mem0Instance;
|
|
96
97
|
mem0Config;
|
|
98
|
+
/**
|
|
99
|
+
* Extract and set Langfuse context from options with proper async scoping
|
|
100
|
+
*/
|
|
101
|
+
async setLangfuseContextFromOptions(options, callback) {
|
|
102
|
+
if (options.context &&
|
|
103
|
+
typeof options.context === "object" &&
|
|
104
|
+
options.context !== null) {
|
|
105
|
+
try {
|
|
106
|
+
const ctx = options.context;
|
|
107
|
+
if (ctx.userId || ctx.sessionId) {
|
|
108
|
+
return await new Promise((resolve, reject) => {
|
|
109
|
+
setLangfuseContext({
|
|
110
|
+
userId: typeof ctx.userId === "string" ? ctx.userId : null,
|
|
111
|
+
sessionId: typeof ctx.sessionId === "string" ? ctx.sessionId : null,
|
|
112
|
+
}, async () => {
|
|
113
|
+
try {
|
|
114
|
+
const result = await callback();
|
|
115
|
+
resolve(result);
|
|
116
|
+
}
|
|
117
|
+
catch (error) {
|
|
118
|
+
reject(error);
|
|
119
|
+
}
|
|
120
|
+
});
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
catch (error) {
|
|
125
|
+
logger.warn("Failed to set Langfuse context from options", {
|
|
126
|
+
error: error instanceof Error ? error.message : String(error),
|
|
127
|
+
});
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
return await callback();
|
|
131
|
+
}
|
|
97
132
|
/**
|
|
98
133
|
* Simple sync config setup for mem0
|
|
99
134
|
*/
|
|
@@ -116,8 +151,6 @@ export class NeuroLink {
|
|
|
116
151
|
this.mem0Instance = null;
|
|
117
152
|
return null;
|
|
118
153
|
}
|
|
119
|
-
// Import and initialize from separate file
|
|
120
|
-
const { initializeMem0 } = await import("./memory/mem0Initializer.js");
|
|
121
154
|
if (!this.mem0Config) {
|
|
122
155
|
this.mem0Instance = null;
|
|
123
156
|
return null;
|
|
@@ -356,9 +389,28 @@ export class NeuroLink {
|
|
|
356
389
|
/** Format memory context for prompt inclusion */
|
|
357
390
|
formatMemoryContext(memoryContext, currentInput) {
|
|
358
391
|
return `Context from previous conversations:
|
|
359
|
-
${memoryContext}
|
|
360
392
|
|
|
361
|
-
|
|
393
|
+
${memoryContext}
|
|
394
|
+
|
|
395
|
+
Current user's request: ${currentInput}`;
|
|
396
|
+
}
|
|
397
|
+
/** Extract memory context from search results */
|
|
398
|
+
extractMemoryContext(memories) {
|
|
399
|
+
return memories
|
|
400
|
+
.map((m) => m.memory || "")
|
|
401
|
+
.filter(Boolean)
|
|
402
|
+
.join("\n");
|
|
403
|
+
}
|
|
404
|
+
/** Store conversation turn in mem0 */
|
|
405
|
+
async storeConversationTurn(mem0, userContent, userId, metadata) {
|
|
406
|
+
// Store user message only, reducing latency in mem0
|
|
407
|
+
const conversationTurn = [{ role: "user", content: userContent }];
|
|
408
|
+
await mem0.add(conversationTurn, {
|
|
409
|
+
user_id: userId,
|
|
410
|
+
metadata,
|
|
411
|
+
infer: true,
|
|
412
|
+
async_mode: true,
|
|
413
|
+
});
|
|
362
414
|
}
|
|
363
415
|
/**
|
|
364
416
|
* Set up HITL event forwarding to main emitter
|
|
@@ -518,7 +570,7 @@ export class NeuroLink {
|
|
|
518
570
|
langfuseInitStartTimeNs: langfuseInitStartTime.toString(),
|
|
519
571
|
message: "Starting Langfuse observability initialization",
|
|
520
572
|
});
|
|
521
|
-
// Initialize OpenTelemetry
|
|
573
|
+
// Initialize OpenTelemetry (sets defaults from config)
|
|
522
574
|
initializeOpenTelemetry(langfuseConfig);
|
|
523
575
|
const healthStatus = getLangfuseHealthStatus();
|
|
524
576
|
const langfuseInitDurationNs = process.hrtime.bigint() - langfuseInitStartTime;
|
|
@@ -1146,198 +1198,190 @@ export class NeuroLink {
|
|
|
1146
1198
|
if (!options.input?.text || typeof options.input.text !== "string") {
|
|
1147
1199
|
throw new Error("Input text is required and must be a non-empty string");
|
|
1148
1200
|
}
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1155
|
-
|
|
1156
|
-
|
|
1157
|
-
const memories = await mem0.search(options.input.text, {
|
|
1158
|
-
userId: options.context.userId,
|
|
1159
|
-
limit: 5,
|
|
1160
|
-
});
|
|
1161
|
-
if (memories?.results?.length > 0) {
|
|
1162
|
-
// Enhance the input with memory context
|
|
1163
|
-
const memoryContext = memories.results
|
|
1164
|
-
.map((m) => m.memory)
|
|
1165
|
-
.join("\n");
|
|
1166
|
-
options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
|
|
1201
|
+
// Set session and user IDs from context for Langfuse spans and execute with proper async scoping
|
|
1202
|
+
return await this.setLangfuseContextFromOptions(options, async () => {
|
|
1203
|
+
if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
|
|
1204
|
+
options.context?.userId) {
|
|
1205
|
+
try {
|
|
1206
|
+
const mem0 = await this.ensureMem0Ready();
|
|
1207
|
+
if (!mem0) {
|
|
1208
|
+
logger.debug("Mem0 not available, continuing without memory retrieval");
|
|
1167
1209
|
}
|
|
1210
|
+
else {
|
|
1211
|
+
const memories = await mem0.search(options.input.text, {
|
|
1212
|
+
user_id: options.context.userId,
|
|
1213
|
+
limit: 5,
|
|
1214
|
+
});
|
|
1215
|
+
if (memories && memories.length > 0) {
|
|
1216
|
+
// Enhance the input with memory context
|
|
1217
|
+
const memoryContext = this.extractMemoryContext(memories);
|
|
1218
|
+
options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
|
|
1219
|
+
}
|
|
1220
|
+
}
|
|
1221
|
+
}
|
|
1222
|
+
catch (error) {
|
|
1223
|
+
logger.warn("Mem0 memory retrieval failed:", error);
|
|
1168
1224
|
}
|
|
1169
1225
|
}
|
|
1170
|
-
|
|
1171
|
-
|
|
1226
|
+
const startTime = Date.now();
|
|
1227
|
+
// Apply orchestration if enabled and no specific provider/model requested
|
|
1228
|
+
if (this.enableOrchestration && !options.provider && !options.model) {
|
|
1229
|
+
try {
|
|
1230
|
+
const orchestratedOptions = await this.applyOrchestration(options);
|
|
1231
|
+
logger.debug("Orchestration applied", {
|
|
1232
|
+
originalProvider: options.provider || "auto",
|
|
1233
|
+
orchestratedProvider: orchestratedOptions.provider,
|
|
1234
|
+
orchestratedModel: orchestratedOptions.model,
|
|
1235
|
+
prompt: options.input.text.substring(0, 100),
|
|
1236
|
+
});
|
|
1237
|
+
// Use orchestrated options
|
|
1238
|
+
Object.assign(options, orchestratedOptions);
|
|
1239
|
+
}
|
|
1240
|
+
catch (error) {
|
|
1241
|
+
logger.warn("Orchestration failed, continuing with original options", {
|
|
1242
|
+
error: error instanceof Error ? error.message : String(error),
|
|
1243
|
+
originalProvider: options.provider || "auto",
|
|
1244
|
+
});
|
|
1245
|
+
// Continue with original options if orchestration fails
|
|
1246
|
+
}
|
|
1172
1247
|
}
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1248
|
+
// Emit generation start event (NeuroLink format - keep existing)
|
|
1249
|
+
this.emitter.emit("generation:start", {
|
|
1250
|
+
provider: options.provider || "auto",
|
|
1251
|
+
timestamp: startTime,
|
|
1252
|
+
});
|
|
1253
|
+
// ADD: Bedrock-compatible response:start event
|
|
1254
|
+
this.emitter.emit("response:start");
|
|
1255
|
+
// ADD: Bedrock-compatible message event
|
|
1256
|
+
this.emitter.emit("message", `Starting ${options.provider || "auto"} text generation...`);
|
|
1257
|
+
// Process factory configuration
|
|
1258
|
+
const factoryResult = processFactoryOptions(options);
|
|
1259
|
+
// Validate factory configuration if present
|
|
1260
|
+
if (factoryResult.hasFactoryConfig && options.factoryConfig) {
|
|
1261
|
+
const validation = validateFactoryConfig(options.factoryConfig);
|
|
1262
|
+
if (!validation.isValid) {
|
|
1263
|
+
logger.warn("Invalid factory configuration detected", {
|
|
1264
|
+
errors: validation.errors,
|
|
1265
|
+
});
|
|
1266
|
+
// Continue with warning rather than throwing - graceful degradation
|
|
1267
|
+
}
|
|
1187
1268
|
}
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1269
|
+
// 🔧 CRITICAL FIX: Convert to TextGenerationOptions while preserving the input object for multimodal support
|
|
1270
|
+
const baseOptions = {
|
|
1271
|
+
prompt: options.input.text,
|
|
1272
|
+
provider: options.provider,
|
|
1273
|
+
model: options.model,
|
|
1274
|
+
temperature: options.temperature,
|
|
1275
|
+
maxTokens: options.maxTokens,
|
|
1276
|
+
systemPrompt: options.systemPrompt,
|
|
1277
|
+
schema: options.schema,
|
|
1278
|
+
output: options.output,
|
|
1279
|
+
disableTools: options.disableTools,
|
|
1280
|
+
enableAnalytics: options.enableAnalytics,
|
|
1281
|
+
enableEvaluation: options.enableEvaluation,
|
|
1282
|
+
context: options.context,
|
|
1283
|
+
evaluationDomain: options.evaluationDomain,
|
|
1284
|
+
toolUsageContext: options.toolUsageContext,
|
|
1285
|
+
input: options.input, // This includes text, images, and content arrays
|
|
1286
|
+
region: options.region,
|
|
1287
|
+
};
|
|
1288
|
+
// Apply factory enhancement using centralized utilities
|
|
1289
|
+
const textOptions = enhanceTextGenerationOptions(baseOptions, factoryResult);
|
|
1290
|
+
// Pass conversation memory config if available
|
|
1291
|
+
if (this.conversationMemory) {
|
|
1292
|
+
textOptions.conversationMemoryConfig = this.conversationMemory.config;
|
|
1293
|
+
// Include original prompt for context summarization
|
|
1294
|
+
textOptions.originalPrompt = originalPrompt;
|
|
1194
1295
|
}
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
this.emitter.emit("message", `Starting ${options.provider || "auto"} text generation...`);
|
|
1205
|
-
// Process factory configuration
|
|
1206
|
-
const factoryResult = processFactoryOptions(options);
|
|
1207
|
-
// Validate factory configuration if present
|
|
1208
|
-
if (factoryResult.hasFactoryConfig && options.factoryConfig) {
|
|
1209
|
-
const validation = validateFactoryConfig(options.factoryConfig);
|
|
1210
|
-
if (!validation.isValid) {
|
|
1211
|
-
logger.warn("Invalid factory configuration detected", {
|
|
1212
|
-
errors: validation.errors,
|
|
1296
|
+
// Detect and execute domain-specific tools
|
|
1297
|
+
const { toolResults, enhancedPrompt } = await this.detectAndExecuteTools(textOptions.prompt || options.input.text, factoryResult.domainType);
|
|
1298
|
+
// Update prompt with tool results if available
|
|
1299
|
+
if (enhancedPrompt !== textOptions.prompt) {
|
|
1300
|
+
textOptions.prompt = enhancedPrompt;
|
|
1301
|
+
logger.debug("Enhanced prompt with tool results", {
|
|
1302
|
+
originalLength: options.input.text.length,
|
|
1303
|
+
enhancedLength: enhancedPrompt.length,
|
|
1304
|
+
toolResults: toolResults.length,
|
|
1213
1305
|
});
|
|
1214
|
-
// Continue with warning rather than throwing - graceful degradation
|
|
1215
1306
|
}
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
schema: options.schema,
|
|
1226
|
-
output: options.output,
|
|
1227
|
-
disableTools: options.disableTools,
|
|
1228
|
-
enableAnalytics: options.enableAnalytics,
|
|
1229
|
-
enableEvaluation: options.enableEvaluation,
|
|
1230
|
-
context: options.context,
|
|
1231
|
-
evaluationDomain: options.evaluationDomain,
|
|
1232
|
-
toolUsageContext: options.toolUsageContext,
|
|
1233
|
-
input: options.input, // This includes text, images, and content arrays
|
|
1234
|
-
region: options.region,
|
|
1235
|
-
};
|
|
1236
|
-
// Apply factory enhancement using centralized utilities
|
|
1237
|
-
const textOptions = enhanceTextGenerationOptions(baseOptions, factoryResult);
|
|
1238
|
-
// Pass conversation memory config if available
|
|
1239
|
-
if (this.conversationMemory) {
|
|
1240
|
-
textOptions.conversationMemoryConfig = this.conversationMemory.config;
|
|
1241
|
-
// Include original prompt for context summarization
|
|
1242
|
-
textOptions.originalPrompt = originalPrompt;
|
|
1243
|
-
}
|
|
1244
|
-
// Detect and execute domain-specific tools
|
|
1245
|
-
const { toolResults, enhancedPrompt } = await this.detectAndExecuteTools(textOptions.prompt || options.input.text, factoryResult.domainType);
|
|
1246
|
-
// Update prompt with tool results if available
|
|
1247
|
-
if (enhancedPrompt !== textOptions.prompt) {
|
|
1248
|
-
textOptions.prompt = enhancedPrompt;
|
|
1249
|
-
logger.debug("Enhanced prompt with tool results", {
|
|
1250
|
-
originalLength: options.input.text.length,
|
|
1251
|
-
enhancedLength: enhancedPrompt.length,
|
|
1252
|
-
toolResults: toolResults.length,
|
|
1307
|
+
// Use redesigned generation logic
|
|
1308
|
+
const textResult = await this.generateTextInternal(textOptions);
|
|
1309
|
+
// Emit generation completion event (NeuroLink format - enhanced with content)
|
|
1310
|
+
this.emitter.emit("generation:end", {
|
|
1311
|
+
provider: textResult.provider,
|
|
1312
|
+
responseTime: Date.now() - startTime,
|
|
1313
|
+
toolsUsed: textResult.toolsUsed,
|
|
1314
|
+
timestamp: Date.now(),
|
|
1315
|
+
result: textResult, // Enhanced: include full result
|
|
1253
1316
|
});
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
.
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
: undefined,
|
|
1308
|
-
};
|
|
1309
|
-
if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
|
|
1310
|
-
options.context?.userId &&
|
|
1311
|
-
generateResult.content) {
|
|
1312
|
-
// Non-blocking memory storage - run in background
|
|
1313
|
-
setImmediate(async () => {
|
|
1314
|
-
try {
|
|
1315
|
-
const mem0 = await this.ensureMem0Ready();
|
|
1316
|
-
if (mem0) {
|
|
1317
|
-
// Store complete conversation turn (user + AI messages)
|
|
1318
|
-
const conversationTurn = [
|
|
1319
|
-
{ role: "user", content: options.input.text },
|
|
1320
|
-
{ role: "system", content: generateResult.content },
|
|
1321
|
-
];
|
|
1322
|
-
await mem0.add(JSON.stringify(conversationTurn), {
|
|
1323
|
-
userId: options.context?.userId,
|
|
1324
|
-
metadata: {
|
|
1317
|
+
// ADD: Bedrock-compatible response:end event with content
|
|
1318
|
+
this.emitter.emit("response:end", textResult.content || "");
|
|
1319
|
+
// ADD: Bedrock-compatible message event
|
|
1320
|
+
this.emitter.emit("message", `Generation completed in ${Date.now() - startTime}ms`);
|
|
1321
|
+
// Convert back to GenerateResult
|
|
1322
|
+
const generateResult = {
|
|
1323
|
+
content: textResult.content,
|
|
1324
|
+
provider: textResult.provider,
|
|
1325
|
+
model: textResult.model,
|
|
1326
|
+
usage: textResult.usage
|
|
1327
|
+
? {
|
|
1328
|
+
input: textResult.usage.input || 0,
|
|
1329
|
+
output: textResult.usage.output || 0,
|
|
1330
|
+
total: textResult.usage.total || 0,
|
|
1331
|
+
}
|
|
1332
|
+
: undefined,
|
|
1333
|
+
responseTime: textResult.responseTime,
|
|
1334
|
+
toolsUsed: textResult.toolsUsed,
|
|
1335
|
+
toolExecutions: transformToolExecutions(textResult.toolExecutions),
|
|
1336
|
+
enhancedWithTools: textResult.enhancedWithTools,
|
|
1337
|
+
availableTools: transformAvailableTools(textResult.availableTools),
|
|
1338
|
+
analytics: textResult.analytics,
|
|
1339
|
+
evaluation: textResult.evaluation
|
|
1340
|
+
? {
|
|
1341
|
+
...textResult.evaluation,
|
|
1342
|
+
isOffTopic: textResult.evaluation
|
|
1343
|
+
.isOffTopic ?? false,
|
|
1344
|
+
alertSeverity: textResult.evaluation
|
|
1345
|
+
.alertSeverity ??
|
|
1346
|
+
"none",
|
|
1347
|
+
reasoning: textResult.evaluation
|
|
1348
|
+
.reasoning ?? "No evaluation provided",
|
|
1349
|
+
evaluationModel: textResult.evaluation
|
|
1350
|
+
.evaluationModel ?? "unknown",
|
|
1351
|
+
evaluationTime: textResult.evaluation
|
|
1352
|
+
.evaluationTime ?? Date.now(),
|
|
1353
|
+
// Include evaluationDomain from original options
|
|
1354
|
+
evaluationDomain: textResult.evaluation
|
|
1355
|
+
.evaluationDomain ??
|
|
1356
|
+
textOptions.evaluationDomain ??
|
|
1357
|
+
factoryResult.domainType,
|
|
1358
|
+
}
|
|
1359
|
+
: undefined,
|
|
1360
|
+
};
|
|
1361
|
+
if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
|
|
1362
|
+
options.context?.userId &&
|
|
1363
|
+
generateResult.content) {
|
|
1364
|
+
// Non-blocking memory storage - run in background
|
|
1365
|
+
setImmediate(async () => {
|
|
1366
|
+
try {
|
|
1367
|
+
const mem0 = await this.ensureMem0Ready();
|
|
1368
|
+
if (mem0) {
|
|
1369
|
+
await this.storeConversationTurn(mem0, originalPrompt, options.context?.userId, {
|
|
1325
1370
|
timestamp: new Date().toISOString(),
|
|
1326
1371
|
provider: generateResult.provider,
|
|
1327
1372
|
model: generateResult.model,
|
|
1328
1373
|
type: "conversation_turn",
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
});
|
|
1374
|
+
});
|
|
1375
|
+
}
|
|
1332
1376
|
}
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
}
|
|
1338
|
-
}
|
|
1339
|
-
|
|
1340
|
-
|
|
1377
|
+
catch (error) {
|
|
1378
|
+
// Non-blocking: Log error but don't fail the generation
|
|
1379
|
+
logger.warn("Mem0 memory storage failed:", error);
|
|
1380
|
+
}
|
|
1381
|
+
});
|
|
1382
|
+
}
|
|
1383
|
+
return generateResult;
|
|
1384
|
+
});
|
|
1341
1385
|
}
|
|
1342
1386
|
/**
|
|
1343
1387
|
* BACKWARD COMPATIBILITY: Legacy generateText method
|
|
@@ -1863,153 +1907,144 @@ export class NeuroLink {
|
|
|
1863
1907
|
const originalPrompt = options.input.text; // Store the original prompt for memory storage
|
|
1864
1908
|
await this.validateStreamInput(options);
|
|
1865
1909
|
this.emitStreamStartEvents(options, startTime);
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
options.
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
|
|
1885
|
-
|
|
1886
|
-
|
|
1887
|
-
|
|
1888
|
-
|
|
1889
|
-
|
|
1890
|
-
|
|
1891
|
-
.
|
|
1892
|
-
|
|
1910
|
+
// Set session and user IDs from context for Langfuse spans and execute with proper async scoping
|
|
1911
|
+
return await this.setLangfuseContextFromOptions(options, async () => {
|
|
1912
|
+
let enhancedOptions;
|
|
1913
|
+
let factoryResult;
|
|
1914
|
+
try {
|
|
1915
|
+
// Initialize conversation memory if needed (for lazy loading)
|
|
1916
|
+
await this.initializeConversationMemoryForGeneration(streamId, startTime, hrTimeStart);
|
|
1917
|
+
// Initialize MCP
|
|
1918
|
+
await this.initializeMCP();
|
|
1919
|
+
const _originalPrompt = options.input.text;
|
|
1920
|
+
if (this.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
|
|
1921
|
+
options.context?.userId) {
|
|
1922
|
+
try {
|
|
1923
|
+
const mem0 = await this.ensureMem0Ready();
|
|
1924
|
+
if (!mem0) {
|
|
1925
|
+
// Continue without memories if mem0 is not available
|
|
1926
|
+
logger.debug("Mem0 not available, continuing without memory retrieval");
|
|
1927
|
+
}
|
|
1928
|
+
else {
|
|
1929
|
+
const memories = await mem0.search(options.input.text, {
|
|
1930
|
+
user_id: options.context.userId,
|
|
1931
|
+
limit: 5,
|
|
1932
|
+
});
|
|
1933
|
+
if (memories && memories.length > 0) {
|
|
1934
|
+
// Enhance the input with memory context
|
|
1935
|
+
const memoryContext = this.extractMemoryContext(memories);
|
|
1936
|
+
options.input.text = this.formatMemoryContext(memoryContext, options.input.text);
|
|
1937
|
+
}
|
|
1893
1938
|
}
|
|
1894
1939
|
}
|
|
1940
|
+
catch (error) {
|
|
1941
|
+
// Non-blocking: Log error but continue with streaming
|
|
1942
|
+
logger.warn("Mem0 memory retrieval failed:", error);
|
|
1943
|
+
}
|
|
1895
1944
|
}
|
|
1896
|
-
|
|
1897
|
-
|
|
1898
|
-
|
|
1899
|
-
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
|
|
1903
|
-
|
|
1904
|
-
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
|
|
1908
|
-
|
|
1909
|
-
|
|
1910
|
-
|
|
1911
|
-
|
|
1912
|
-
|
|
1913
|
-
|
|
1914
|
-
|
|
1915
|
-
|
|
1916
|
-
error: error instanceof Error ? error.message : String(error),
|
|
1917
|
-
originalProvider: options.provider || "auto",
|
|
1918
|
-
});
|
|
1919
|
-
// Continue with original options if orchestration fails
|
|
1920
|
-
}
|
|
1921
|
-
}
|
|
1922
|
-
factoryResult = processStreamingFactoryOptions(options);
|
|
1923
|
-
enhancedOptions = createCleanStreamOptions(options);
|
|
1924
|
-
if (options.input?.text) {
|
|
1925
|
-
const { toolResults: _toolResults, enhancedPrompt } = await this.detectAndExecuteTools(options.input.text, undefined);
|
|
1926
|
-
if (enhancedPrompt !== options.input.text) {
|
|
1927
|
-
enhancedOptions.input.text = enhancedPrompt;
|
|
1945
|
+
// Apply orchestration if enabled and no specific provider/model requested
|
|
1946
|
+
if (this.enableOrchestration && !options.provider && !options.model) {
|
|
1947
|
+
try {
|
|
1948
|
+
const orchestratedOptions = await this.applyStreamOrchestration(options);
|
|
1949
|
+
logger.debug("Stream orchestration applied", {
|
|
1950
|
+
originalProvider: options.provider || "auto",
|
|
1951
|
+
orchestratedProvider: orchestratedOptions.provider,
|
|
1952
|
+
orchestratedModel: orchestratedOptions.model,
|
|
1953
|
+
prompt: options.input.text?.substring(0, 100),
|
|
1954
|
+
});
|
|
1955
|
+
// Use orchestrated options
|
|
1956
|
+
Object.assign(options, orchestratedOptions);
|
|
1957
|
+
}
|
|
1958
|
+
catch (error) {
|
|
1959
|
+
logger.warn("Stream orchestration failed, continuing with original options", {
|
|
1960
|
+
error: error instanceof Error ? error.message : String(error),
|
|
1961
|
+
originalProvider: options.provider || "auto",
|
|
1962
|
+
});
|
|
1963
|
+
// Continue with original options if orchestration fails
|
|
1964
|
+
}
|
|
1928
1965
|
}
|
|
1929
|
-
|
|
1930
|
-
|
|
1931
|
-
|
|
1932
|
-
|
|
1933
|
-
|
|
1934
|
-
|
|
1935
|
-
for await (const chunk of mcpStream) {
|
|
1936
|
-
if (chunk &&
|
|
1937
|
-
"content" in chunk &&
|
|
1938
|
-
typeof chunk.content === "string") {
|
|
1939
|
-
accumulatedContent += chunk.content;
|
|
1940
|
-
// Emit chunk event for compatibility
|
|
1941
|
-
self.emitter.emit("response:chunk", chunk.content);
|
|
1942
|
-
}
|
|
1943
|
-
yield chunk; // Preserve original streaming behavior
|
|
1966
|
+
factoryResult = processStreamingFactoryOptions(options);
|
|
1967
|
+
enhancedOptions = createCleanStreamOptions(options);
|
|
1968
|
+
if (options.input?.text) {
|
|
1969
|
+
const { toolResults: _toolResults, enhancedPrompt } = await this.detectAndExecuteTools(options.input.text, undefined);
|
|
1970
|
+
if (enhancedPrompt !== options.input.text) {
|
|
1971
|
+
enhancedOptions.input.text = enhancedPrompt;
|
|
1944
1972
|
}
|
|
1945
1973
|
}
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
}
|
|
1959
|
-
|
|
1960
|
-
catch (error) {
|
|
1961
|
-
logger.warn("Failed to store stream conversation turn", {
|
|
1962
|
-
error: error instanceof Error ? error.message : String(error),
|
|
1963
|
-
});
|
|
1974
|
+
const { stream: mcpStream, provider: providerName } = await this.createMCPStream(enhancedOptions);
|
|
1975
|
+
// Create a wrapper around the stream that accumulates content
|
|
1976
|
+
let accumulatedContent = "";
|
|
1977
|
+
const processedStream = (async function* (self) {
|
|
1978
|
+
try {
|
|
1979
|
+
for await (const chunk of mcpStream) {
|
|
1980
|
+
if (chunk &&
|
|
1981
|
+
"content" in chunk &&
|
|
1982
|
+
typeof chunk.content === "string") {
|
|
1983
|
+
accumulatedContent += chunk.content;
|
|
1984
|
+
// Emit chunk event for compatibility
|
|
1985
|
+
self.emitter.emit("response:chunk", chunk.content);
|
|
1986
|
+
}
|
|
1987
|
+
yield chunk; // Preserve original streaming behavior
|
|
1964
1988
|
}
|
|
1965
1989
|
}
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1990
|
+
finally {
|
|
1991
|
+
// Store memory after stream consumption is complete
|
|
1992
|
+
if (self.conversationMemory && enhancedOptions.context?.sessionId) {
|
|
1993
|
+
const sessionId = enhancedOptions.context?.sessionId;
|
|
1994
|
+
const userId = enhancedOptions.context?.userId;
|
|
1971
1995
|
try {
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
|
|
1979
|
-
|
|
1980
|
-
|
|
1981
|
-
|
|
1996
|
+
await self.conversationMemory.storeConversationTurn(sessionId, userId, originalPrompt ?? "", accumulatedContent, new Date(startTime));
|
|
1997
|
+
logger.debug("Stream conversation turn stored", {
|
|
1998
|
+
sessionId,
|
|
1999
|
+
userInputLength: originalPrompt?.length ?? 0,
|
|
2000
|
+
responseLength: accumulatedContent.length,
|
|
2001
|
+
});
|
|
2002
|
+
}
|
|
2003
|
+
catch (error) {
|
|
2004
|
+
logger.warn("Failed to store stream conversation turn", {
|
|
2005
|
+
error: error instanceof Error ? error.message : String(error),
|
|
2006
|
+
});
|
|
2007
|
+
}
|
|
2008
|
+
}
|
|
2009
|
+
if (self.conversationMemoryConfig?.conversationMemory?.mem0Enabled &&
|
|
2010
|
+
enhancedOptions.context?.userId &&
|
|
2011
|
+
accumulatedContent.trim()) {
|
|
2012
|
+
// Non-blocking memory storage - run in background
|
|
2013
|
+
setImmediate(async () => {
|
|
2014
|
+
try {
|
|
2015
|
+
const mem0 = await self.ensureMem0Ready();
|
|
2016
|
+
if (mem0) {
|
|
2017
|
+
await self.storeConversationTurn(mem0, originalPrompt, enhancedOptions.context?.userId, {
|
|
1982
2018
|
timestamp: new Date().toISOString(),
|
|
1983
2019
|
type: "conversation_turn_stream",
|
|
1984
2020
|
userMessage: originalPrompt,
|
|
1985
|
-
async_mode: true,
|
|
1986
2021
|
aiResponse: accumulatedContent.trim(),
|
|
1987
|
-
}
|
|
1988
|
-
}
|
|
2022
|
+
});
|
|
2023
|
+
}
|
|
1989
2024
|
}
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
}
|
|
1994
|
-
}
|
|
2025
|
+
catch (error) {
|
|
2026
|
+
logger.warn("Mem0 memory storage failed:", error);
|
|
2027
|
+
}
|
|
2028
|
+
});
|
|
2029
|
+
}
|
|
1995
2030
|
}
|
|
1996
|
-
}
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
}
|
|
2009
|
-
|
|
2010
|
-
|
|
2011
|
-
|
|
2012
|
-
}
|
|
2031
|
+
})(this);
|
|
2032
|
+
const streamResult = await this.processStreamResult(mcpStream, enhancedOptions, factoryResult);
|
|
2033
|
+
const responseTime = Date.now() - startTime;
|
|
2034
|
+
this.emitStreamEndEvents(streamResult);
|
|
2035
|
+
return this.createStreamResponse(streamResult, processedStream, {
|
|
2036
|
+
providerName,
|
|
2037
|
+
options,
|
|
2038
|
+
startTime,
|
|
2039
|
+
responseTime,
|
|
2040
|
+
streamId,
|
|
2041
|
+
fallback: false,
|
|
2042
|
+
});
|
|
2043
|
+
}
|
|
2044
|
+
catch (error) {
|
|
2045
|
+
return this.handleStreamError(error, options, startTime, streamId, undefined, undefined);
|
|
2046
|
+
}
|
|
2047
|
+
});
|
|
2013
2048
|
}
|
|
2014
2049
|
/**
|
|
2015
2050
|
* Validate stream input with comprehensive error reporting
|