@juspay/neurolink 5.0.0 → 5.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/README.md +51 -60
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/mcp.js +1 -1
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +14 -0
- package/dist/cli/factories/command-factory.js +129 -0
- package/dist/cli/index.js +27 -26
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/evaluation.d.ts +9 -9
- package/dist/core/evaluation.js +14 -14
- package/dist/core/types.d.ts +41 -48
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/evaluation.d.ts +9 -9
- package/dist/lib/core/evaluation.js +14 -14
- package/dist/lib/core/types.d.ts +41 -48
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/index.d.ts +4 -2
- package/dist/lib/index.js +3 -1
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/neurolink.d.ts +21 -73
- package/dist/lib/neurolink.js +230 -119
- package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/lib/providers/agent-enhanced-provider.js +87 -96
- package/dist/lib/providers/amazonBedrock.d.ts +17 -8
- package/dist/lib/providers/amazonBedrock.js +60 -30
- package/dist/lib/providers/anthropic.d.ts +14 -10
- package/dist/lib/providers/anthropic.js +84 -154
- package/dist/lib/providers/azureOpenAI.d.ts +9 -6
- package/dist/lib/providers/azureOpenAI.js +70 -159
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/googleAIStudio.d.ts +12 -19
- package/dist/lib/providers/googleAIStudio.js +65 -34
- package/dist/lib/providers/googleVertexAI.d.ts +11 -15
- package/dist/lib/providers/googleVertexAI.js +146 -118
- package/dist/lib/providers/huggingFace.d.ts +10 -11
- package/dist/lib/providers/huggingFace.js +61 -24
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +59 -18
- package/dist/lib/providers/mistralAI.d.ts +14 -11
- package/dist/lib/providers/mistralAI.js +60 -29
- package/dist/lib/providers/ollama.d.ts +9 -8
- package/dist/lib/providers/ollama.js +134 -91
- package/dist/lib/providers/openAI.d.ts +11 -12
- package/dist/lib/providers/openAI.js +132 -97
- package/dist/lib/types/generate-types.d.ts +79 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/stream-types.d.ts +83 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/neurolink.d.ts +21 -73
- package/dist/neurolink.js +230 -119
- package/dist/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/providers/agent-enhanced-provider.js +87 -95
- package/dist/providers/amazonBedrock.d.ts +17 -8
- package/dist/providers/amazonBedrock.js +60 -30
- package/dist/providers/anthropic.d.ts +14 -10
- package/dist/providers/anthropic.js +84 -154
- package/dist/providers/azureOpenAI.d.ts +9 -6
- package/dist/providers/azureOpenAI.js +70 -159
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/googleAIStudio.d.ts +12 -19
- package/dist/providers/googleAIStudio.js +65 -34
- package/dist/providers/googleVertexAI.d.ts +11 -15
- package/dist/providers/googleVertexAI.js +146 -118
- package/dist/providers/huggingFace.d.ts +10 -11
- package/dist/providers/huggingFace.js +61 -24
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +59 -18
- package/dist/providers/mistralAI.d.ts +14 -11
- package/dist/providers/mistralAI.js +60 -29
- package/dist/providers/ollama.d.ts +9 -8
- package/dist/providers/ollama.js +133 -90
- package/dist/providers/openAI.d.ts +11 -12
- package/dist/providers/openAI.js +132 -97
- package/dist/types/generate-types.d.ts +79 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/stream-types.d.ts +83 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +1 -1
|
@@ -294,123 +294,14 @@ export class GoogleVertexAI {
|
|
|
294
294
|
const vertex = await getVertexInstance();
|
|
295
295
|
return vertex(this.modelName);
|
|
296
296
|
}
|
|
297
|
-
/**
|
|
298
|
-
* Processes text using streaming approach with enhanced error handling callbacks
|
|
299
|
-
* @param prompt - The input text prompt to analyze
|
|
300
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
301
|
-
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
302
|
-
*/
|
|
303
|
-
async streamText(optionsOrPrompt, analysisSchema) {
|
|
304
|
-
const functionTag = "GoogleVertexAI.streamText";
|
|
305
|
-
const provider = "vertex";
|
|
306
|
-
let chunkCount = 0;
|
|
307
|
-
try {
|
|
308
|
-
// Parse parameters - support both string and options object
|
|
309
|
-
const options = typeof optionsOrPrompt === "string"
|
|
310
|
-
? { prompt: optionsOrPrompt }
|
|
311
|
-
: optionsOrPrompt;
|
|
312
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
313
|
-
// Use schema from options or fallback parameter
|
|
314
|
-
const finalSchema = schema || analysisSchema;
|
|
315
|
-
logger.debug(`[${functionTag}] Stream request started`, {
|
|
316
|
-
provider,
|
|
317
|
-
modelName: this.modelName,
|
|
318
|
-
isAnthropic: isAnthropicModel(this.modelName),
|
|
319
|
-
promptLength: prompt.length,
|
|
320
|
-
temperature,
|
|
321
|
-
maxTokens,
|
|
322
|
-
hasSchema: !!finalSchema,
|
|
323
|
-
timeout,
|
|
324
|
-
});
|
|
325
|
-
const model = await this.getModel();
|
|
326
|
-
// Create timeout controller if timeout is specified
|
|
327
|
-
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
328
|
-
const streamOptions = {
|
|
329
|
-
model: model,
|
|
330
|
-
prompt: prompt,
|
|
331
|
-
system: systemPrompt,
|
|
332
|
-
temperature,
|
|
333
|
-
maxTokens,
|
|
334
|
-
// Add abort signal if available
|
|
335
|
-
...(timeoutController && {
|
|
336
|
-
abortSignal: timeoutController.controller.signal,
|
|
337
|
-
}),
|
|
338
|
-
onError: (event) => {
|
|
339
|
-
const error = event.error;
|
|
340
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
341
|
-
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
342
|
-
logger.error(`[${functionTag}] Stream text error`, {
|
|
343
|
-
provider,
|
|
344
|
-
modelName: this.modelName,
|
|
345
|
-
error: errorMessage,
|
|
346
|
-
stack: errorStack,
|
|
347
|
-
promptLength: prompt.length,
|
|
348
|
-
chunkCount,
|
|
349
|
-
});
|
|
350
|
-
},
|
|
351
|
-
onFinish: (event) => {
|
|
352
|
-
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
353
|
-
provider,
|
|
354
|
-
modelName: this.modelName,
|
|
355
|
-
finishReason: event.finishReason,
|
|
356
|
-
usage: event.usage,
|
|
357
|
-
totalChunks: chunkCount,
|
|
358
|
-
promptLength: prompt.length,
|
|
359
|
-
responseLength: event.text?.length || 0,
|
|
360
|
-
});
|
|
361
|
-
},
|
|
362
|
-
onChunk: (event) => {
|
|
363
|
-
chunkCount++;
|
|
364
|
-
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
365
|
-
provider,
|
|
366
|
-
modelName: this.modelName,
|
|
367
|
-
chunkNumber: chunkCount,
|
|
368
|
-
chunkLength: event.chunk.text?.length || 0,
|
|
369
|
-
chunkType: event.chunk.type,
|
|
370
|
-
});
|
|
371
|
-
},
|
|
372
|
-
};
|
|
373
|
-
if (analysisSchema) {
|
|
374
|
-
streamOptions.experimental_output = Output.object({
|
|
375
|
-
schema: analysisSchema,
|
|
376
|
-
});
|
|
377
|
-
}
|
|
378
|
-
const result = streamText(streamOptions);
|
|
379
|
-
// For streaming, we can't clean up immediately, but the timeout will auto-clean
|
|
380
|
-
// The user should handle the stream and any timeout errors
|
|
381
|
-
return result;
|
|
382
|
-
}
|
|
383
|
-
catch (err) {
|
|
384
|
-
// Log timeout errors specifically
|
|
385
|
-
if (err instanceof TimeoutError) {
|
|
386
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
387
|
-
provider,
|
|
388
|
-
modelName: this.modelName,
|
|
389
|
-
isAnthropic: isAnthropicModel(this.modelName),
|
|
390
|
-
timeout: err.timeout,
|
|
391
|
-
message: err.message,
|
|
392
|
-
});
|
|
393
|
-
}
|
|
394
|
-
else {
|
|
395
|
-
logger.error(`[${functionTag}] Exception`, {
|
|
396
|
-
provider,
|
|
397
|
-
modelName: this.modelName,
|
|
398
|
-
message: "Error in streaming text",
|
|
399
|
-
err: String(err),
|
|
400
|
-
promptLength: prompt.length,
|
|
401
|
-
});
|
|
402
|
-
}
|
|
403
|
-
throw err; // Re-throw error to trigger fallback
|
|
404
|
-
}
|
|
405
|
-
}
|
|
406
297
|
/**
|
|
407
298
|
* Processes text using non-streaming approach with optional schema validation
|
|
408
299
|
* @param prompt - The input text prompt to analyze
|
|
409
300
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
410
|
-
* @returns Promise resolving to
|
|
301
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
411
302
|
*/
|
|
412
|
-
async
|
|
413
|
-
const functionTag = "GoogleVertexAI.
|
|
303
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
304
|
+
const functionTag = "GoogleVertexAI.generate";
|
|
414
305
|
const provider = "vertex";
|
|
415
306
|
const startTime = Date.now();
|
|
416
307
|
try {
|
|
@@ -475,7 +366,19 @@ export class GoogleVertexAI {
|
|
|
475
366
|
if (options.enableEvaluation) {
|
|
476
367
|
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
477
368
|
}
|
|
478
|
-
return
|
|
369
|
+
return {
|
|
370
|
+
content: result.text,
|
|
371
|
+
provider: "vertex",
|
|
372
|
+
model: this.modelName,
|
|
373
|
+
usage: result.usage
|
|
374
|
+
? {
|
|
375
|
+
inputTokens: result.usage.promptTokens,
|
|
376
|
+
outputTokens: result.usage.completionTokens,
|
|
377
|
+
totalTokens: result.usage.totalTokens,
|
|
378
|
+
}
|
|
379
|
+
: undefined,
|
|
380
|
+
responseTime: Date.now() - startTime,
|
|
381
|
+
};
|
|
479
382
|
}
|
|
480
383
|
finally {
|
|
481
384
|
// Always cleanup timeout
|
|
@@ -505,15 +408,140 @@ export class GoogleVertexAI {
|
|
|
505
408
|
}
|
|
506
409
|
}
|
|
507
410
|
/**
|
|
508
|
-
*
|
|
411
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
412
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
509
413
|
*/
|
|
510
|
-
async
|
|
511
|
-
|
|
414
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
415
|
+
const functionTag = "GoogleVertexAI.stream";
|
|
416
|
+
const startTime = Date.now();
|
|
417
|
+
try {
|
|
418
|
+
// Parse parameters - support both string and options object
|
|
419
|
+
const options = typeof optionsOrPrompt === "string"
|
|
420
|
+
? { input: { text: optionsOrPrompt } }
|
|
421
|
+
: optionsOrPrompt;
|
|
422
|
+
// Validate input
|
|
423
|
+
if (!options?.input?.text ||
|
|
424
|
+
typeof options.input.text !== "string" ||
|
|
425
|
+
options.input.text.trim() === "") {
|
|
426
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
427
|
+
}
|
|
428
|
+
logger.debug(`[${functionTag}] Stream request started`, {
|
|
429
|
+
provider: "vertex",
|
|
430
|
+
modelName: this.modelName,
|
|
431
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
432
|
+
promptLength: options.input.text.length,
|
|
433
|
+
hasSchema: !!analysisSchema,
|
|
434
|
+
});
|
|
435
|
+
// Convert StreamOptions for internal use
|
|
436
|
+
const convertedOptions = {
|
|
437
|
+
prompt: options.input.text,
|
|
438
|
+
provider: options.provider,
|
|
439
|
+
model: options.model,
|
|
440
|
+
temperature: options.temperature,
|
|
441
|
+
maxTokens: options.maxTokens,
|
|
442
|
+
systemPrompt: options.systemPrompt,
|
|
443
|
+
timeout: options.timeout,
|
|
444
|
+
schema: options.schema,
|
|
445
|
+
tools: options.tools,
|
|
446
|
+
};
|
|
447
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout("vertex", "stream"), } = convertedOptions;
|
|
448
|
+
// Use schema from options or fallback parameter
|
|
449
|
+
const finalSchema = schema || analysisSchema;
|
|
450
|
+
logger.debug(`[${functionTag}] Stream request details`, {
|
|
451
|
+
provider: "vertex",
|
|
452
|
+
modelName: this.modelName,
|
|
453
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
454
|
+
promptLength: prompt.length,
|
|
455
|
+
temperature,
|
|
456
|
+
maxTokens,
|
|
457
|
+
hasSchema: !!finalSchema,
|
|
458
|
+
timeout,
|
|
459
|
+
});
|
|
460
|
+
const model = await this.getModel();
|
|
461
|
+
// Create timeout controller if timeout is specified
|
|
462
|
+
const timeoutController = createTimeoutController(timeout, "vertex", "stream");
|
|
463
|
+
let chunkCount = 0;
|
|
464
|
+
const streamOptions = {
|
|
465
|
+
model: model,
|
|
466
|
+
prompt: prompt,
|
|
467
|
+
system: systemPrompt,
|
|
468
|
+
temperature,
|
|
469
|
+
maxTokens,
|
|
470
|
+
// Add abort signal if available
|
|
471
|
+
...(timeoutController && {
|
|
472
|
+
abortSignal: timeoutController.controller.signal,
|
|
473
|
+
}),
|
|
474
|
+
onError: (event) => {
|
|
475
|
+
const error = event.error;
|
|
476
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
477
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
478
|
+
logger.error(`[${functionTag}] Stream error`, {
|
|
479
|
+
provider: "vertex",
|
|
480
|
+
modelName: this.modelName,
|
|
481
|
+
error: errorMessage,
|
|
482
|
+
stack: errorStack,
|
|
483
|
+
promptLength: prompt.length,
|
|
484
|
+
chunkCount,
|
|
485
|
+
});
|
|
486
|
+
},
|
|
487
|
+
onFinish: (event) => {
|
|
488
|
+
logger.debug(`[${functionTag}] Stream finished`, {
|
|
489
|
+
provider: "vertex",
|
|
490
|
+
modelName: this.modelName,
|
|
491
|
+
finishReason: event.finishReason,
|
|
492
|
+
usage: event.usage,
|
|
493
|
+
totalChunks: chunkCount,
|
|
494
|
+
promptLength: prompt.length,
|
|
495
|
+
responseLength: event.text?.length || 0,
|
|
496
|
+
});
|
|
497
|
+
},
|
|
498
|
+
onChunk: (event) => {
|
|
499
|
+
chunkCount++;
|
|
500
|
+
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
501
|
+
provider: "vertex",
|
|
502
|
+
modelName: this.modelName,
|
|
503
|
+
chunkNumber: chunkCount,
|
|
504
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
505
|
+
chunkType: event.chunk.type,
|
|
506
|
+
});
|
|
507
|
+
},
|
|
508
|
+
};
|
|
509
|
+
if (finalSchema) {
|
|
510
|
+
streamOptions.experimental_output = Output.object({
|
|
511
|
+
schema: finalSchema,
|
|
512
|
+
});
|
|
513
|
+
}
|
|
514
|
+
const result = streamText(streamOptions);
|
|
515
|
+
// Convert to StreamResult format
|
|
516
|
+
return {
|
|
517
|
+
stream: (async function* () {
|
|
518
|
+
for await (const chunk of result.textStream) {
|
|
519
|
+
yield { content: chunk };
|
|
520
|
+
}
|
|
521
|
+
})(),
|
|
522
|
+
provider: "vertex",
|
|
523
|
+
model: this.modelName,
|
|
524
|
+
metadata: {
|
|
525
|
+
streamId: `vertex-${Date.now()}`,
|
|
526
|
+
startTime,
|
|
527
|
+
},
|
|
528
|
+
};
|
|
529
|
+
}
|
|
530
|
+
catch (err) {
|
|
531
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
532
|
+
provider: "vertex",
|
|
533
|
+
modelName: this.modelName,
|
|
534
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
535
|
+
message: "Error in streaming text",
|
|
536
|
+
err: String(err),
|
|
537
|
+
});
|
|
538
|
+
throw err; // Re-throw error to trigger fallback
|
|
539
|
+
}
|
|
512
540
|
}
|
|
513
541
|
/**
|
|
514
|
-
* Short alias for
|
|
542
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
515
543
|
*/
|
|
516
544
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
517
|
-
return this.
|
|
545
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
518
546
|
}
|
|
519
547
|
}
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
-
import { type
|
|
3
|
-
import type {
|
|
2
|
+
import { type Schema } from "ai";
|
|
3
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
4
6
|
export declare class HuggingFace implements AIProvider {
|
|
5
7
|
private modelName;
|
|
6
8
|
private client;
|
|
@@ -15,19 +17,16 @@ export declare class HuggingFace implements AIProvider {
|
|
|
15
17
|
*/
|
|
16
18
|
private getModel;
|
|
17
19
|
/**
|
|
18
|
-
*
|
|
19
|
-
*
|
|
20
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
21
|
-
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
20
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
21
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
22
22
|
*/
|
|
23
|
-
|
|
23
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
24
24
|
/**
|
|
25
25
|
* Processes text using non-streaming approach with optional schema validation
|
|
26
26
|
* @param prompt - The input text prompt to analyze
|
|
27
27
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
28
|
-
* @returns Promise resolving to
|
|
28
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
29
29
|
*/
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
30
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
31
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
33
32
|
}
|
|
@@ -217,21 +217,38 @@ export class HuggingFace {
|
|
|
217
217
|
return new HuggingFaceLanguageModel(this.modelName, this.client);
|
|
218
218
|
}
|
|
219
219
|
/**
|
|
220
|
-
*
|
|
221
|
-
*
|
|
222
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
223
|
-
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
220
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
221
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
224
222
|
*/
|
|
225
|
-
async
|
|
226
|
-
const functionTag = "HuggingFace.
|
|
223
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
224
|
+
const functionTag = "HuggingFace.stream";
|
|
227
225
|
const provider = "huggingface";
|
|
228
226
|
let chunkCount = 0;
|
|
227
|
+
const startTime = Date.now();
|
|
229
228
|
try {
|
|
230
229
|
// Parse parameters - support both string and options object
|
|
231
230
|
const options = typeof optionsOrPrompt === "string"
|
|
232
|
-
? {
|
|
231
|
+
? { input: { text: optionsOrPrompt } }
|
|
233
232
|
: optionsOrPrompt;
|
|
234
|
-
|
|
233
|
+
// Validate input
|
|
234
|
+
if (!options?.input?.text ||
|
|
235
|
+
typeof options.input.text !== "string" ||
|
|
236
|
+
options.input.text.trim() === "") {
|
|
237
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
238
|
+
}
|
|
239
|
+
// Convert StreamOptions for internal use
|
|
240
|
+
const convertedOptions = {
|
|
241
|
+
prompt: options.input.text,
|
|
242
|
+
provider: options.provider,
|
|
243
|
+
model: options.model,
|
|
244
|
+
temperature: options.temperature,
|
|
245
|
+
maxTokens: options.maxTokens,
|
|
246
|
+
systemPrompt: options.systemPrompt,
|
|
247
|
+
timeout: options.timeout,
|
|
248
|
+
schema: options.schema,
|
|
249
|
+
tools: options.tools,
|
|
250
|
+
};
|
|
251
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = convertedOptions;
|
|
235
252
|
// Use schema from options or fallback parameter
|
|
236
253
|
const finalSchema = schema || analysisSchema;
|
|
237
254
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -260,7 +277,7 @@ export class HuggingFace {
|
|
|
260
277
|
const error = event.error;
|
|
261
278
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
262
279
|
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
263
|
-
logger.error(`[${functionTag}] Stream
|
|
280
|
+
logger.error(`[${functionTag}] Stream error`, {
|
|
264
281
|
provider,
|
|
265
282
|
modelName: this.modelName,
|
|
266
283
|
error: errorMessage,
|
|
@@ -270,7 +287,7 @@ export class HuggingFace {
|
|
|
270
287
|
});
|
|
271
288
|
},
|
|
272
289
|
onFinish: (event) => {
|
|
273
|
-
logger.debug(`[${functionTag}] Stream
|
|
290
|
+
logger.debug(`[${functionTag}] Stream finished`, {
|
|
274
291
|
provider,
|
|
275
292
|
modelName: this.modelName,
|
|
276
293
|
finishReason: event.finishReason,
|
|
@@ -282,7 +299,7 @@ export class HuggingFace {
|
|
|
282
299
|
},
|
|
283
300
|
onChunk: (event) => {
|
|
284
301
|
chunkCount++;
|
|
285
|
-
logger.debug(`[${functionTag}] Stream
|
|
302
|
+
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
286
303
|
provider,
|
|
287
304
|
modelName: this.modelName,
|
|
288
305
|
chunkNumber: chunkCount,
|
|
@@ -297,9 +314,20 @@ export class HuggingFace {
|
|
|
297
314
|
});
|
|
298
315
|
}
|
|
299
316
|
const result = streamText(streamOptions);
|
|
300
|
-
//
|
|
301
|
-
|
|
302
|
-
|
|
317
|
+
// Convert to StreamResult format
|
|
318
|
+
return {
|
|
319
|
+
stream: (async function* () {
|
|
320
|
+
for await (const chunk of result.textStream) {
|
|
321
|
+
yield { content: chunk };
|
|
322
|
+
}
|
|
323
|
+
})(),
|
|
324
|
+
provider: "huggingface",
|
|
325
|
+
model: this.modelName,
|
|
326
|
+
metadata: {
|
|
327
|
+
streamId: `huggingface-${Date.now()}`,
|
|
328
|
+
startTime,
|
|
329
|
+
},
|
|
330
|
+
};
|
|
303
331
|
}
|
|
304
332
|
catch (err) {
|
|
305
333
|
// Log timeout errors specifically
|
|
@@ -315,11 +343,11 @@ export class HuggingFace {
|
|
|
315
343
|
logger.error(`[${functionTag}] Exception`, {
|
|
316
344
|
provider,
|
|
317
345
|
modelName: this.modelName,
|
|
318
|
-
message: "Error in streaming
|
|
346
|
+
message: "Error in streaming content",
|
|
319
347
|
err: String(err),
|
|
320
348
|
promptLength: typeof optionsOrPrompt === "string"
|
|
321
349
|
? optionsOrPrompt.length
|
|
322
|
-
: optionsOrPrompt.
|
|
350
|
+
: optionsOrPrompt.input?.text?.length || 0,
|
|
323
351
|
});
|
|
324
352
|
}
|
|
325
353
|
throw err; // Re-throw error to trigger fallback
|
|
@@ -329,10 +357,10 @@ export class HuggingFace {
|
|
|
329
357
|
* Processes text using non-streaming approach with optional schema validation
|
|
330
358
|
* @param prompt - The input text prompt to analyze
|
|
331
359
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
332
|
-
* @returns Promise resolving to
|
|
360
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
333
361
|
*/
|
|
334
|
-
async
|
|
335
|
-
const functionTag = "HuggingFace.
|
|
362
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
363
|
+
const functionTag = "HuggingFace.generate";
|
|
336
364
|
const provider = "huggingface";
|
|
337
365
|
const startTime = Date.now();
|
|
338
366
|
try {
|
|
@@ -396,7 +424,19 @@ export class HuggingFace {
|
|
|
396
424
|
if (options.enableEvaluation) {
|
|
397
425
|
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
398
426
|
}
|
|
399
|
-
return
|
|
427
|
+
return {
|
|
428
|
+
content: result.text,
|
|
429
|
+
provider: "huggingface",
|
|
430
|
+
model: this.modelName,
|
|
431
|
+
usage: result.usage
|
|
432
|
+
? {
|
|
433
|
+
inputTokens: result.usage.promptTokens,
|
|
434
|
+
outputTokens: result.usage.completionTokens,
|
|
435
|
+
totalTokens: result.usage.totalTokens,
|
|
436
|
+
}
|
|
437
|
+
: undefined,
|
|
438
|
+
responseTime: Date.now() - startTime,
|
|
439
|
+
};
|
|
400
440
|
}
|
|
401
441
|
finally {
|
|
402
442
|
// Always cleanup timeout
|
|
@@ -424,10 +464,7 @@ export class HuggingFace {
|
|
|
424
464
|
throw err; // Re-throw error to trigger fallback
|
|
425
465
|
}
|
|
426
466
|
}
|
|
427
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
428
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
429
|
-
}
|
|
430
467
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
431
|
-
return this.
|
|
468
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
432
469
|
}
|
|
433
470
|
}
|
|
@@ -2,9 +2,11 @@
|
|
|
2
2
|
* NeuroLink MCP-Aware AI Provider
|
|
3
3
|
* Integrates MCP tools with AI providers following Lighthouse's pattern
|
|
4
4
|
*/
|
|
5
|
-
import type { AIProvider, TextGenerationOptions,
|
|
6
|
-
import type {
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
6
|
+
import type { Schema } from "ai";
|
|
7
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
7
8
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
9
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
8
10
|
/**
|
|
9
11
|
* MCP-Aware Provider Configuration
|
|
10
12
|
*/
|
|
@@ -31,8 +33,12 @@ export declare class MCPAwareProvider implements AIProvider {
|
|
|
31
33
|
* Initialize MCP tools for this session
|
|
32
34
|
*/
|
|
33
35
|
private initializeMCP;
|
|
34
|
-
|
|
35
|
-
|
|
36
|
+
/**
|
|
37
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
38
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
39
|
+
*/
|
|
40
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: any): Promise<StreamResult>;
|
|
41
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
36
42
|
/**
|
|
37
43
|
* Detect if the prompt is requesting tool usage
|
|
38
44
|
*/
|
|
@@ -52,13 +58,12 @@ export declare class MCPAwareProvider implements AIProvider {
|
|
|
52
58
|
*/
|
|
53
59
|
cleanup(): Promise<void>;
|
|
54
60
|
/**
|
|
55
|
-
* Alias for
|
|
61
|
+
* Alias for generate() - CLI-SDK consistency
|
|
56
62
|
*/
|
|
57
|
-
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
58
63
|
/**
|
|
59
|
-
* Short alias for
|
|
64
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
60
65
|
*/
|
|
61
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<
|
|
66
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
62
67
|
}
|
|
63
68
|
/**
|
|
64
69
|
* Create an MCP-aware provider
|
|
@@ -83,7 +83,40 @@ export class MCPAwareProvider {
|
|
|
83
83
|
// Continue without MCP tools if initialization fails
|
|
84
84
|
}
|
|
85
85
|
}
|
|
86
|
-
|
|
86
|
+
/**
|
|
87
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
88
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
89
|
+
*/
|
|
90
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
91
|
+
const functionTag = "MCPAwareProvider.stream";
|
|
92
|
+
const startTime = Date.now();
|
|
93
|
+
// Parse parameters - support both string and options object
|
|
94
|
+
const options = typeof optionsOrPrompt === "string"
|
|
95
|
+
? { input: { text: optionsOrPrompt } }
|
|
96
|
+
: optionsOrPrompt;
|
|
97
|
+
// Validate input
|
|
98
|
+
if (!options?.input?.text ||
|
|
99
|
+
typeof options.input.text !== "string" ||
|
|
100
|
+
options.input.text.trim() === "") {
|
|
101
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
102
|
+
}
|
|
103
|
+
// Use base provider's stream implementation
|
|
104
|
+
const baseResult = await this.baseProvider.stream(options);
|
|
105
|
+
if (!baseResult) {
|
|
106
|
+
throw new Error("No stream response received from provider");
|
|
107
|
+
}
|
|
108
|
+
// Return the result with MCP metadata
|
|
109
|
+
return {
|
|
110
|
+
...baseResult,
|
|
111
|
+
provider: "mcp",
|
|
112
|
+
model: options.model || "unknown",
|
|
113
|
+
metadata: {
|
|
114
|
+
streamId: `mcp-${Date.now()}`,
|
|
115
|
+
startTime,
|
|
116
|
+
},
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
87
120
|
// Ensure MCP is initialized
|
|
88
121
|
await this.initializeMCP();
|
|
89
122
|
// Parse options
|
|
@@ -114,15 +147,19 @@ PARAMS: <json_params>
|
|
|
114
147
|
|
|
115
148
|
Otherwise, provide a direct response.`;
|
|
116
149
|
// Generate response with enhanced prompt
|
|
117
|
-
const response = await this.baseProvider.
|
|
150
|
+
const response = await this.baseProvider.generate({
|
|
118
151
|
...options,
|
|
119
152
|
prompt: enhancedPrompt,
|
|
120
153
|
}, analysisSchema);
|
|
121
154
|
if (!response) {
|
|
122
|
-
return
|
|
155
|
+
return {
|
|
156
|
+
content: "No response generated",
|
|
157
|
+
provider: "mcp",
|
|
158
|
+
model: "unknown",
|
|
159
|
+
};
|
|
123
160
|
}
|
|
124
161
|
// Check if response includes tool invocation
|
|
125
|
-
const toolMatch = response.
|
|
162
|
+
const toolMatch = response.content.match(/TOOL:\s*(\S+)\s*\nPARAMS:\s*({.*})/s);
|
|
126
163
|
if (toolMatch) {
|
|
127
164
|
const toolName = toolMatch[1];
|
|
128
165
|
const toolParams = JSON.parse(toolMatch[2]);
|
|
@@ -135,12 +172,16 @@ Tool ${toolName} was executed with result:
|
|
|
135
172
|
${JSON.stringify(toolResult, null, 2)}
|
|
136
173
|
|
|
137
174
|
Please provide a response based on this information.`;
|
|
138
|
-
const finalResponse = await this.baseProvider.
|
|
175
|
+
const finalResponse = await this.baseProvider.generate({
|
|
139
176
|
...options,
|
|
140
177
|
prompt: finalPrompt,
|
|
141
178
|
}, analysisSchema);
|
|
142
179
|
if (!finalResponse) {
|
|
143
|
-
return
|
|
180
|
+
return {
|
|
181
|
+
content: "Tool execution failed",
|
|
182
|
+
provider: "mcp",
|
|
183
|
+
model: "unknown",
|
|
184
|
+
};
|
|
144
185
|
}
|
|
145
186
|
// Return response (tool usage is tracked internally)
|
|
146
187
|
return finalResponse;
|
|
@@ -148,12 +189,15 @@ Please provide a response based on this information.`;
|
|
|
148
189
|
return response;
|
|
149
190
|
}
|
|
150
191
|
// Regular generation without tools
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
192
|
+
const result = await this.baseProvider.generate(options);
|
|
193
|
+
if (!result) {
|
|
194
|
+
return {
|
|
195
|
+
content: "Base provider returned no result",
|
|
196
|
+
provider: "mcp",
|
|
197
|
+
model: "unknown",
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
return result;
|
|
157
201
|
}
|
|
158
202
|
/**
|
|
159
203
|
* Detect if the prompt is requesting tool usage
|
|
@@ -193,16 +237,13 @@ Please provide a response based on this information.`;
|
|
|
193
237
|
}
|
|
194
238
|
}
|
|
195
239
|
/**
|
|
196
|
-
* Alias for
|
|
240
|
+
* Alias for generate() - CLI-SDK consistency
|
|
197
241
|
*/
|
|
198
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
199
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
200
|
-
}
|
|
201
242
|
/**
|
|
202
|
-
* Short alias for
|
|
243
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
203
244
|
*/
|
|
204
245
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
205
|
-
return this.
|
|
246
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
206
247
|
}
|
|
207
248
|
}
|
|
208
249
|
/**
|