@juspay/neurolink 4.2.0 → 5.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +47 -2
- package/README.md +51 -60
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/mcp.js +1 -1
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +14 -0
- package/dist/cli/factories/command-factory.js +129 -0
- package/dist/cli/index.js +27 -29
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/evaluation.d.ts +9 -9
- package/dist/core/evaluation.js +14 -14
- package/dist/core/types.d.ts +41 -48
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/evaluation.d.ts +9 -9
- package/dist/lib/core/evaluation.js +14 -14
- package/dist/lib/core/types.d.ts +41 -48
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/index.d.ts +4 -2
- package/dist/lib/index.js +3 -1
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/neurolink.d.ts +21 -73
- package/dist/lib/neurolink.js +230 -119
- package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/lib/providers/agent-enhanced-provider.js +87 -96
- package/dist/lib/providers/amazonBedrock.d.ts +17 -8
- package/dist/lib/providers/amazonBedrock.js +60 -30
- package/dist/lib/providers/anthropic.d.ts +14 -10
- package/dist/lib/providers/anthropic.js +84 -154
- package/dist/lib/providers/azureOpenAI.d.ts +9 -6
- package/dist/lib/providers/azureOpenAI.js +70 -159
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/googleAIStudio.d.ts +12 -19
- package/dist/lib/providers/googleAIStudio.js +65 -34
- package/dist/lib/providers/googleVertexAI.d.ts +11 -15
- package/dist/lib/providers/googleVertexAI.js +146 -118
- package/dist/lib/providers/huggingFace.d.ts +10 -11
- package/dist/lib/providers/huggingFace.js +61 -24
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +59 -18
- package/dist/lib/providers/mistralAI.d.ts +14 -11
- package/dist/lib/providers/mistralAI.js +60 -29
- package/dist/lib/providers/ollama.d.ts +9 -8
- package/dist/lib/providers/ollama.js +134 -91
- package/dist/lib/providers/openAI.d.ts +11 -12
- package/dist/lib/providers/openAI.js +132 -97
- package/dist/lib/types/generate-types.d.ts +79 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/stream-types.d.ts +83 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/neurolink.d.ts +21 -73
- package/dist/neurolink.js +230 -119
- package/dist/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/providers/agent-enhanced-provider.js +87 -95
- package/dist/providers/amazonBedrock.d.ts +17 -8
- package/dist/providers/amazonBedrock.js +60 -30
- package/dist/providers/anthropic.d.ts +14 -10
- package/dist/providers/anthropic.js +84 -154
- package/dist/providers/azureOpenAI.d.ts +9 -6
- package/dist/providers/azureOpenAI.js +70 -159
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/googleAIStudio.d.ts +12 -19
- package/dist/providers/googleAIStudio.js +65 -34
- package/dist/providers/googleVertexAI.d.ts +11 -15
- package/dist/providers/googleVertexAI.js +146 -118
- package/dist/providers/huggingFace.d.ts +10 -11
- package/dist/providers/huggingFace.js +61 -24
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +59 -18
- package/dist/providers/mistralAI.d.ts +14 -11
- package/dist/providers/mistralAI.js +60 -29
- package/dist/providers/ollama.d.ts +9 -8
- package/dist/providers/ollama.js +133 -90
- package/dist/providers/openAI.d.ts +11 -12
- package/dist/providers/openAI.js +132 -97
- package/dist/types/generate-types.d.ts +79 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/stream-types.d.ts +83 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +2 -3
- package/dist/cli/commands/agent-generate.d.ts +0 -1
- package/dist/cli/commands/agent-generate.js +0 -67
package/dist/lib/neurolink.js
CHANGED
|
@@ -12,6 +12,8 @@ import { unifiedRegistry } from "./mcp/unified-registry.js";
|
|
|
12
12
|
import { logger } from "./utils/logger.js";
|
|
13
13
|
import { getBestProvider } from "./utils/providerUtils-fixed.js";
|
|
14
14
|
import { TimeoutError } from "./utils/timeout.js";
|
|
15
|
+
import { CompatibilityConversionFactory } from "./factories/compatibility-factory.js";
|
|
16
|
+
// Core types imported from core/types.js
|
|
15
17
|
export class NeuroLink {
|
|
16
18
|
mcpInitialized = false;
|
|
17
19
|
contextManager;
|
|
@@ -69,29 +71,120 @@ export class NeuroLink {
|
|
|
69
71
|
}
|
|
70
72
|
}
|
|
71
73
|
/**
|
|
72
|
-
* Generate
|
|
73
|
-
*
|
|
74
|
+
* PRIMARY METHOD: Generate content using AI (recommended for new code)
|
|
75
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
76
|
+
*/
|
|
77
|
+
async generate(options) {
|
|
78
|
+
// Validate input
|
|
79
|
+
if (!options?.input?.text ||
|
|
80
|
+
typeof options.input.text !== "string" ||
|
|
81
|
+
options.input.text.trim() === "") {
|
|
82
|
+
throw new Error("Generate options must include input.text as a non-empty string");
|
|
83
|
+
}
|
|
84
|
+
// Convert input format and extract text
|
|
85
|
+
const prompt = options.input.text;
|
|
86
|
+
const convertedOptions = {
|
|
87
|
+
prompt,
|
|
88
|
+
provider: options.provider,
|
|
89
|
+
model: options.model,
|
|
90
|
+
temperature: options.temperature,
|
|
91
|
+
maxTokens: options.maxTokens,
|
|
92
|
+
systemPrompt: options.systemPrompt,
|
|
93
|
+
timeout: options.timeout,
|
|
94
|
+
enableAnalytics: options.enableAnalytics,
|
|
95
|
+
enableEvaluation: options.enableEvaluation,
|
|
96
|
+
context: options.context,
|
|
97
|
+
};
|
|
98
|
+
// Use existing generation infrastructure directly
|
|
99
|
+
// For now, always use generateWithTools for full functionality
|
|
100
|
+
const textResult = await this.generateWithTools(convertedOptions);
|
|
101
|
+
// Convert to GenerateResult format
|
|
102
|
+
const generateResult = {
|
|
103
|
+
content: textResult.content,
|
|
104
|
+
provider: textResult.provider,
|
|
105
|
+
model: textResult.model,
|
|
106
|
+
usage: textResult.usage
|
|
107
|
+
? {
|
|
108
|
+
inputTokens: textResult.usage.promptTokens || 0,
|
|
109
|
+
outputTokens: textResult.usage.completionTokens || 0,
|
|
110
|
+
totalTokens: textResult.usage.totalTokens || 0,
|
|
111
|
+
}
|
|
112
|
+
: undefined,
|
|
113
|
+
responseTime: textResult.responseTime,
|
|
114
|
+
toolsUsed: textResult.toolsUsed,
|
|
115
|
+
toolExecutions: textResult.toolExecutions?.map((te) => ({
|
|
116
|
+
name: te.toolName || te.name || "",
|
|
117
|
+
input: te.input || {},
|
|
118
|
+
output: te.output || te.result,
|
|
119
|
+
duration: te.executionTime || te.duration || 0,
|
|
120
|
+
})),
|
|
121
|
+
enhancedWithTools: textResult.enhancedWithTools,
|
|
122
|
+
availableTools: textResult.availableTools?.map((tool) => ({
|
|
123
|
+
name: tool.name || "",
|
|
124
|
+
description: tool.description || "",
|
|
125
|
+
parameters: tool.parameters || {},
|
|
126
|
+
})),
|
|
127
|
+
analytics: textResult.analytics,
|
|
128
|
+
evaluation: textResult.evaluation,
|
|
129
|
+
};
|
|
130
|
+
return generateResult;
|
|
131
|
+
}
|
|
132
|
+
/**
|
|
133
|
+
* BACKWARD COMPATIBILITY: Legacy generateText method
|
|
134
|
+
* Internally calls generate() and converts result format
|
|
74
135
|
*/
|
|
75
136
|
async generateText(options) {
|
|
76
|
-
//
|
|
77
|
-
if (!options ||
|
|
137
|
+
// Validate required parameters for backward compatibility
|
|
138
|
+
if (!options.prompt ||
|
|
78
139
|
typeof options.prompt !== "string" ||
|
|
79
140
|
options.prompt.trim() === "") {
|
|
80
|
-
throw new Error("options
|
|
141
|
+
throw new Error("GenerateText options must include prompt as a non-empty string");
|
|
142
|
+
}
|
|
143
|
+
// Convert TextGenerationOptions to GenerateOptions
|
|
144
|
+
const generateOptions = CompatibilityConversionFactory.convertTextToGenerate(options);
|
|
145
|
+
try {
|
|
146
|
+
// Use internal generate method for identical performance and behavior
|
|
147
|
+
const generateResult = await this.generate(generateOptions);
|
|
148
|
+
// Convert GenerateResult back to TextGenerationResult format for backward compatibility
|
|
149
|
+
const textResult = {
|
|
150
|
+
content: generateResult.content,
|
|
151
|
+
provider: generateResult.provider,
|
|
152
|
+
model: generateResult.model,
|
|
153
|
+
usage: generateResult.usage
|
|
154
|
+
? {
|
|
155
|
+
promptTokens: generateResult.usage?.inputTokens || 0,
|
|
156
|
+
completionTokens: generateResult.usage?.outputTokens || 0,
|
|
157
|
+
totalTokens: generateResult.usage?.totalTokens || 0,
|
|
158
|
+
}
|
|
159
|
+
: undefined,
|
|
160
|
+
responseTime: generateResult.responseTime,
|
|
161
|
+
toolsUsed: generateResult.toolsUsed,
|
|
162
|
+
toolExecutions: generateResult.toolExecutions?.map((te) => ({
|
|
163
|
+
toolName: te.name || te.toolName || "",
|
|
164
|
+
executionTime: te.duration || te.executionTime || 0,
|
|
165
|
+
success: true, // Assume success if execution completed
|
|
166
|
+
serverId: te.serverId,
|
|
167
|
+
})),
|
|
168
|
+
enhancedWithTools: generateResult.enhancedWithTools,
|
|
169
|
+
availableTools: generateResult.availableTools?.map((tool) => ({
|
|
170
|
+
name: tool.name || "",
|
|
171
|
+
description: tool.description || "",
|
|
172
|
+
server: tool.server || "unknown",
|
|
173
|
+
category: tool.category,
|
|
174
|
+
})),
|
|
175
|
+
};
|
|
176
|
+
return textResult;
|
|
81
177
|
}
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
return this.generateTextRegular(options);
|
|
178
|
+
catch (error) {
|
|
179
|
+
throw new Error(`GenerateText compatibility method failed: ${error}`);
|
|
85
180
|
}
|
|
86
|
-
// Default: Generate with tools (natural AI behavior)
|
|
87
|
-
return this.generateTextWithTools(options);
|
|
88
181
|
}
|
|
89
182
|
/**
|
|
90
183
|
* Generate text with real MCP tool integration using automatic detection
|
|
91
184
|
*/
|
|
92
|
-
async
|
|
185
|
+
async generateWithTools(options) {
|
|
93
186
|
const startTime = Date.now();
|
|
94
|
-
const functionTag = "NeuroLink.
|
|
187
|
+
const functionTag = "NeuroLink.generateWithTools";
|
|
95
188
|
// Initialize MCP if needed
|
|
96
189
|
await this.initializeMCP();
|
|
97
190
|
// Create execution context for tool operations
|
|
@@ -135,7 +228,7 @@ export class NeuroLink {
|
|
|
135
228
|
// Create provider with MCP enabled using best provider function
|
|
136
229
|
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true);
|
|
137
230
|
// Generate text with automatic tool detection
|
|
138
|
-
const result = await provider.
|
|
231
|
+
const result = await provider.generate({
|
|
139
232
|
prompt: options.prompt,
|
|
140
233
|
temperature: options.temperature,
|
|
141
234
|
maxTokens: options.maxTokens,
|
|
@@ -163,17 +256,17 @@ export class NeuroLink {
|
|
|
163
256
|
availableToolsCount: availableTools.length,
|
|
164
257
|
});
|
|
165
258
|
// Check if we actually got content
|
|
166
|
-
if (!result.
|
|
259
|
+
if (!result.content || result.content.trim() === "") {
|
|
167
260
|
mcpLogger.warn(`[${functionTag}] Empty response from provider, attempting fallback`, {
|
|
168
261
|
provider: providerName,
|
|
169
|
-
hasText: !!result.
|
|
170
|
-
textLength: result.
|
|
262
|
+
hasText: !!result.content,
|
|
263
|
+
textLength: result.content?.length || 0,
|
|
171
264
|
});
|
|
172
265
|
// Fall back to regular generation if MCP generation returns empty
|
|
173
|
-
return this.
|
|
266
|
+
return this.generateRegular(options);
|
|
174
267
|
}
|
|
175
268
|
return {
|
|
176
|
-
content: result.
|
|
269
|
+
content: result.content,
|
|
177
270
|
provider: providerName,
|
|
178
271
|
usage: result.usage,
|
|
179
272
|
responseTime,
|
|
@@ -190,15 +283,15 @@ export class NeuroLink {
|
|
|
190
283
|
mcpLogger.warn(`[${functionTag}] MCP generation failed, falling back to regular`, {
|
|
191
284
|
error: error instanceof Error ? error.message : String(error),
|
|
192
285
|
});
|
|
193
|
-
return this.
|
|
286
|
+
return this.generateRegular(options);
|
|
194
287
|
}
|
|
195
288
|
}
|
|
196
289
|
/**
|
|
197
290
|
* Regular text generation (existing logic)
|
|
198
291
|
*/
|
|
199
|
-
async
|
|
292
|
+
async generateRegular(options) {
|
|
200
293
|
const startTime = Date.now();
|
|
201
|
-
const functionTag = "NeuroLink.
|
|
294
|
+
const functionTag = "NeuroLink.generateRegular";
|
|
202
295
|
// Define fallback provider priority order
|
|
203
296
|
const providerPriority = [
|
|
204
297
|
"openai",
|
|
@@ -228,7 +321,7 @@ export class NeuroLink {
|
|
|
228
321
|
provider: providerName,
|
|
229
322
|
});
|
|
230
323
|
const provider = await AIProviderFactory.createProvider(providerName, options.model, false);
|
|
231
|
-
const result = await provider.
|
|
324
|
+
const result = await provider.generate({
|
|
232
325
|
prompt: options.prompt,
|
|
233
326
|
model: options.model,
|
|
234
327
|
temperature: options.temperature,
|
|
@@ -248,11 +341,11 @@ export class NeuroLink {
|
|
|
248
341
|
throw new Error("No response received from AI provider");
|
|
249
342
|
}
|
|
250
343
|
// Check if we actually got content
|
|
251
|
-
if (!result.
|
|
344
|
+
if (!result.content || result.content.trim() === "") {
|
|
252
345
|
logger.warn(`[${functionTag}] Empty response from provider`, {
|
|
253
346
|
provider: providerName,
|
|
254
|
-
hasText: !!result.
|
|
255
|
-
textLength: result.
|
|
347
|
+
hasText: !!result.content,
|
|
348
|
+
textLength: result.content?.length || 0,
|
|
256
349
|
});
|
|
257
350
|
// Continue to next provider if available
|
|
258
351
|
throw new Error(`Empty response from ${providerName}`);
|
|
@@ -264,9 +357,15 @@ export class NeuroLink {
|
|
|
264
357
|
usage: result.usage,
|
|
265
358
|
});
|
|
266
359
|
return {
|
|
267
|
-
content: result.
|
|
360
|
+
content: result.content,
|
|
268
361
|
provider: providerName,
|
|
269
|
-
usage: result.usage
|
|
362
|
+
usage: result.usage
|
|
363
|
+
? {
|
|
364
|
+
promptTokens: result.usage.inputTokens || 0,
|
|
365
|
+
completionTokens: result.usage.outputTokens || 0,
|
|
366
|
+
totalTokens: result.usage.totalTokens || 0,
|
|
367
|
+
}
|
|
368
|
+
: undefined,
|
|
270
369
|
responseTime,
|
|
271
370
|
// NEW: Preserve enhancement data from provider
|
|
272
371
|
...(result.analytics && { analytics: result.analytics }),
|
|
@@ -326,90 +425,110 @@ You can mention these capabilities when they're relevant to user questions. For
|
|
|
326
425
|
Note: Tool integration is currently in development. Please provide helpful responses based on your knowledge while mentioning tool capabilities when relevant.`;
|
|
327
426
|
}
|
|
328
427
|
/**
|
|
329
|
-
*
|
|
428
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
429
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
330
430
|
*/
|
|
331
|
-
async
|
|
332
|
-
const
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
"
|
|
337
|
-
"
|
|
338
|
-
"
|
|
339
|
-
"azure",
|
|
340
|
-
"google-ai",
|
|
341
|
-
"huggingface",
|
|
342
|
-
"ollama",
|
|
343
|
-
];
|
|
344
|
-
const requestedProvider = options.provider === "auto" ? undefined : options.provider;
|
|
345
|
-
// If specific provider requested, only use that provider (no fallback)
|
|
346
|
-
const tryProviders = requestedProvider
|
|
347
|
-
? [requestedProvider] // Only use the requested provider, no fallback
|
|
348
|
-
: providerPriority;
|
|
349
|
-
logger.debug(`[${functionTag}] Starting stream generation`, {
|
|
350
|
-
requestedProvider: requestedProvider || "auto",
|
|
351
|
-
tryProviders,
|
|
352
|
-
allowFallback: !requestedProvider,
|
|
353
|
-
promptLength: options.prompt.length,
|
|
354
|
-
});
|
|
355
|
-
let lastError = null;
|
|
356
|
-
for (const providerName of tryProviders) {
|
|
357
|
-
try {
|
|
358
|
-
logger.debug(`[${functionTag}] Attempting provider`, {
|
|
359
|
-
provider: providerName,
|
|
360
|
-
});
|
|
361
|
-
const provider = await AIProviderFactory.createProvider(providerName, options.model, false);
|
|
362
|
-
const result = await provider.streamText({
|
|
363
|
-
prompt: options.prompt,
|
|
364
|
-
model: options.model,
|
|
365
|
-
temperature: options.temperature,
|
|
366
|
-
maxTokens: options.maxTokens,
|
|
367
|
-
systemPrompt: options.systemPrompt,
|
|
368
|
-
timeout: options.timeout,
|
|
369
|
-
});
|
|
370
|
-
if (!result) {
|
|
371
|
-
throw new Error("No stream response received from AI provider");
|
|
372
|
-
}
|
|
373
|
-
logger.debug(`[${functionTag}] Provider succeeded`, {
|
|
374
|
-
provider: providerName,
|
|
375
|
-
});
|
|
376
|
-
// Convert the AI SDK stream to our expected format
|
|
377
|
-
async function* convertStream() {
|
|
378
|
-
if (result && result.textStream) {
|
|
379
|
-
for await (const chunk of result.textStream) {
|
|
380
|
-
yield { content: chunk };
|
|
381
|
-
}
|
|
382
|
-
}
|
|
383
|
-
}
|
|
384
|
-
return convertStream();
|
|
385
|
-
}
|
|
386
|
-
catch (error) {
|
|
387
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
388
|
-
lastError = error instanceof Error ? error : new Error(errorMessage);
|
|
389
|
-
// Special handling for timeout errors
|
|
390
|
-
if (error instanceof TimeoutError) {
|
|
391
|
-
logger.warn(`[${functionTag}] Provider timed out`, {
|
|
392
|
-
provider: providerName,
|
|
393
|
-
timeout: error.timeout,
|
|
394
|
-
operation: error.operation,
|
|
395
|
-
});
|
|
396
|
-
}
|
|
397
|
-
logger.debug(`[${functionTag}] Provider failed, trying next`, {
|
|
398
|
-
provider: providerName,
|
|
399
|
-
error: errorMessage,
|
|
400
|
-
isTimeout: error instanceof TimeoutError,
|
|
401
|
-
remainingProviders: tryProviders.slice(tryProviders.indexOf(providerName) + 1),
|
|
402
|
-
});
|
|
403
|
-
// Continue to next provider
|
|
404
|
-
continue;
|
|
405
|
-
}
|
|
431
|
+
async stream(options) {
|
|
432
|
+
const startTime = Date.now();
|
|
433
|
+
const functionTag = "NeuroLink.stream";
|
|
434
|
+
// Validate input
|
|
435
|
+
if (!options?.input?.text ||
|
|
436
|
+
typeof options.input.text !== "string" ||
|
|
437
|
+
options.input.text.trim() === "") {
|
|
438
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
406
439
|
}
|
|
407
|
-
//
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
440
|
+
// Initialize MCP if needed
|
|
441
|
+
await this.initializeMCP();
|
|
442
|
+
// Create execution context for tool operations
|
|
443
|
+
const context = this.contextManager.createContext({
|
|
444
|
+
sessionId: `neurolink-stream-${Date.now()}`,
|
|
445
|
+
userId: "neurolink-user",
|
|
446
|
+
aiProvider: options.provider || "auto",
|
|
411
447
|
});
|
|
412
|
-
|
|
448
|
+
// Determine provider to use
|
|
449
|
+
const providerName = options.provider === "auto" || !options.provider
|
|
450
|
+
? await getBestProvider()
|
|
451
|
+
: options.provider;
|
|
452
|
+
try {
|
|
453
|
+
mcpLogger.debug(`[${functionTag}] Starting MCP-enabled streaming`, {
|
|
454
|
+
provider: providerName,
|
|
455
|
+
prompt: (options.input.text?.substring(0, 100) || "No text") + "...",
|
|
456
|
+
contextId: context.sessionId,
|
|
457
|
+
});
|
|
458
|
+
// Create provider using the same factory pattern as generate
|
|
459
|
+
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true);
|
|
460
|
+
// Call the provider's stream method directly
|
|
461
|
+
const streamResult = await provider.stream(options);
|
|
462
|
+
// Extract the stream from the result
|
|
463
|
+
const stream = streamResult.stream;
|
|
464
|
+
const responseTime = Date.now() - startTime;
|
|
465
|
+
mcpLogger.debug(`[${functionTag}] MCP-enabled streaming completed`, {
|
|
466
|
+
responseTime,
|
|
467
|
+
provider: providerName,
|
|
468
|
+
});
|
|
469
|
+
// Convert to StreamResult format
|
|
470
|
+
return {
|
|
471
|
+
stream,
|
|
472
|
+
provider: providerName,
|
|
473
|
+
model: options.model,
|
|
474
|
+
metadata: {
|
|
475
|
+
streamId: `neurolink-${Date.now()}`,
|
|
476
|
+
startTime,
|
|
477
|
+
},
|
|
478
|
+
};
|
|
479
|
+
}
|
|
480
|
+
catch (error) {
|
|
481
|
+
// Fall back to regular streaming if MCP fails
|
|
482
|
+
mcpLogger.warn(`[${functionTag}] MCP streaming failed, falling back to regular`, {
|
|
483
|
+
error: error instanceof Error ? error.message : String(error),
|
|
484
|
+
});
|
|
485
|
+
// Create provider and use regular streaming
|
|
486
|
+
const provider = await AIProviderFactory.createBestProvider(providerName, options.model, true);
|
|
487
|
+
// Call the provider's stream method directly
|
|
488
|
+
const streamResult = await provider.stream(options);
|
|
489
|
+
// Extract the stream from the result
|
|
490
|
+
const stream = streamResult.stream;
|
|
491
|
+
return {
|
|
492
|
+
stream,
|
|
493
|
+
provider: providerName,
|
|
494
|
+
model: options.model,
|
|
495
|
+
metadata: {
|
|
496
|
+
streamId: `neurolink-fallback-${Date.now()}`,
|
|
497
|
+
startTime,
|
|
498
|
+
},
|
|
499
|
+
};
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
/**
|
|
503
|
+
* BACKWARD COMPATIBILITY: Legacy streamText method
|
|
504
|
+
* Internally calls stream() and converts result format
|
|
505
|
+
*/
|
|
506
|
+
async streamText(options) {
|
|
507
|
+
// Validate required parameters for backward compatibility
|
|
508
|
+
if (!options.prompt ||
|
|
509
|
+
typeof options.prompt !== "string" ||
|
|
510
|
+
options.prompt.trim() === "") {
|
|
511
|
+
throw new Error("StreamText options must include prompt as a non-empty string");
|
|
512
|
+
}
|
|
513
|
+
// Convert legacy options to StreamOptions
|
|
514
|
+
const streamOptions = {
|
|
515
|
+
input: { text: options.prompt },
|
|
516
|
+
provider: options.provider,
|
|
517
|
+
model: options.model,
|
|
518
|
+
temperature: options.temperature,
|
|
519
|
+
maxTokens: options.maxTokens,
|
|
520
|
+
systemPrompt: options.systemPrompt,
|
|
521
|
+
timeout: options.timeout,
|
|
522
|
+
};
|
|
523
|
+
try {
|
|
524
|
+
// Use new stream method for identical performance and behavior
|
|
525
|
+
const streamResult = await this.stream(streamOptions);
|
|
526
|
+
// Return just the stream for backward compatibility
|
|
527
|
+
return streamResult.stream;
|
|
528
|
+
}
|
|
529
|
+
catch (error) {
|
|
530
|
+
throw new Error(`StreamText compatibility method failed: ${error}`);
|
|
531
|
+
}
|
|
413
532
|
}
|
|
414
533
|
/**
|
|
415
534
|
* Get the best available AI provider
|
|
@@ -423,14 +542,14 @@ Note: Tool integration is currently in development. Please provide helpful respo
|
|
|
423
542
|
async testProvider(providerName, testPrompt = "test") {
|
|
424
543
|
try {
|
|
425
544
|
const provider = await AIProviderFactory.createProvider(providerName, null, false); // Disable MCP for simple testing
|
|
426
|
-
await provider.
|
|
545
|
+
await provider.generate({
|
|
427
546
|
prompt: testPrompt,
|
|
428
547
|
enableAnalytics: false,
|
|
429
548
|
enableEvaluation: false,
|
|
430
549
|
});
|
|
431
550
|
return true;
|
|
432
551
|
}
|
|
433
|
-
catch
|
|
552
|
+
catch {
|
|
434
553
|
return false;
|
|
435
554
|
}
|
|
436
555
|
}
|
|
@@ -553,15 +672,7 @@ Note: Tool integration is currently in development. Please provide helpful respo
|
|
|
553
672
|
}
|
|
554
673
|
}
|
|
555
674
|
/**
|
|
556
|
-
*
|
|
557
|
-
* @param options - Text generation options
|
|
558
|
-
* @returns Promise resolving to text generation result
|
|
559
|
-
*/
|
|
560
|
-
async generate(options) {
|
|
561
|
-
return this.generateText(options);
|
|
562
|
-
}
|
|
563
|
-
/**
|
|
564
|
-
* Short alias for generateText() - CLI-SDK consistency
|
|
675
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
565
676
|
* @param options - Text generation options
|
|
566
677
|
* @returns Promise resolving to text generation result
|
|
567
678
|
*/
|
|
@@ -2,8 +2,9 @@
|
|
|
2
2
|
* Agent-Enhanced Provider for NeuroLink CLI
|
|
3
3
|
* Integrates direct tools with AI providers for true agent functionality
|
|
4
4
|
*/
|
|
5
|
-
import
|
|
6
|
-
import type { AIProvider, TextGenerationOptions,
|
|
5
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
6
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
7
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
7
8
|
/**
|
|
8
9
|
* Agent configuration options
|
|
9
10
|
*/
|
|
@@ -45,8 +46,12 @@ export declare class AgentEnhancedProvider implements AIProvider {
|
|
|
45
46
|
* Get combined tools: direct tools + MCP tools
|
|
46
47
|
*/
|
|
47
48
|
private getCombinedTools;
|
|
48
|
-
|
|
49
|
-
|
|
49
|
+
/**
|
|
50
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
51
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
52
|
+
*/
|
|
53
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: any): Promise<StreamResult>;
|
|
54
|
+
generate(optionsOrPrompt: TextGenerationOptions | string): Promise<GenerateResult>;
|
|
50
55
|
/**
|
|
51
56
|
* Determine if we should force tool usage based on prompt patterns
|
|
52
57
|
*/
|
|
@@ -70,13 +75,12 @@ export declare class AgentEnhancedProvider implements AIProvider {
|
|
|
70
75
|
*/
|
|
71
76
|
static createMultiProviderAgents(): Record<string, AgentEnhancedProvider>;
|
|
72
77
|
/**
|
|
73
|
-
* Alias for
|
|
78
|
+
* Alias for generate() - CLI-SDK consistency
|
|
74
79
|
*/
|
|
75
|
-
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
76
80
|
/**
|
|
77
|
-
* Short alias for
|
|
81
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
78
82
|
*/
|
|
79
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<
|
|
83
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
80
84
|
}
|
|
81
85
|
/**
|
|
82
86
|
* Helper function to create agent provider
|