@juspay/neurolink 4.2.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +47 -2
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -29
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +2 -3
  122. package/dist/cli/commands/agent-generate.d.ts +0 -1
  123. package/dist/cli/commands/agent-generate.js +0 -67
package/dist/cli/index.js CHANGED
@@ -21,7 +21,7 @@ import chalk from "chalk";
21
21
  import fs from "fs";
22
22
  import { addMCPCommands } from "./commands/mcp.js";
23
23
  import { addOllamaCommands } from "./commands/ollama.js";
24
- import { agentGenerateCommand } from "./commands/agent-generate.js";
24
+ import { CLICommandFactory } from "./factories/command-factory.js";
25
25
  import { AgentEnhancedProvider } from "../lib/providers/agent-enhanced-provider.js";
26
26
  import { logger } from "../lib/utils/logger.js";
27
27
  /**
@@ -290,9 +290,9 @@ const cli = yargs(args)
290
290
  }
291
291
  exitProcess(); // Default exit
292
292
  })
293
- // Generate Text Command
294
- .command(["generate-text [prompt]", "generate [prompt]", "gen [prompt]"], "Generate text using AI providers", (yargsInstance) => yargsInstance
295
- .usage("Usage: $0 generate-text [prompt] [options]")
293
+ // Generate Command (Primary)
294
+ .command(["generate [prompt]", "gen [prompt]"], "Generate content using AI providers", (yargsInstance) => yargsInstance
295
+ .usage("Usage: $0 generate [prompt] [options]")
296
296
  .positional("prompt", {
297
297
  type: "string",
298
298
  description: "Text prompt for AI generation (or read from stdin)",
@@ -379,10 +379,10 @@ const cli = yargs(args)
379
379
  type: "string",
380
380
  description: "JSON context object for custom data",
381
381
  })
382
- .example('$0 generate-text "Hello world"', "Basic text generation")
383
- .example('$0 generate-text "Write a story" --provider openai', "Use specific provider")
384
- .example('$0 generate-text "What time is it?"', "Use with natural tool integration (default)")
385
- .example('$0 generate-text "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
382
+ .example('$0 generate "Hello world"', "Basic content generation")
383
+ .example('$0 generate "Write a story" --provider openai', "Use specific provider")
384
+ .example('$0 generate "What time is it?"', "Use with natural tool integration (default)")
385
+ .example('$0 generate "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
386
386
  // SOLUTION 1: Handle stdin input if no prompt provided
387
387
  if (!argv.prompt && !process.stdin.isTTY) {
388
388
  // Read from stdin
@@ -430,11 +430,7 @@ const cli = yargs(args)
430
430
  if (errors.length > 0) {
431
431
  throw new Error(`Parameter validation failed:\n${errors.map((e) => ` • ${e}`).join("\n")}\n\nUse --help for valid parameter ranges.`);
432
432
  }
433
- // Check if generate-text was used specifically (for deprecation warning)
434
- const usedCommand = argv._[0];
435
- if (usedCommand === "generate-text" && !argv.quiet) {
436
- console.warn(chalk.yellow('⚠️ Warning: "generate-text" is deprecated. Use "generate" or "gen" instead for multimodal support.'));
437
- }
433
+ // Command is now the primary generate method
438
434
  let originalConsole = {};
439
435
  if (argv.format === "json" && !argv.quiet) {
440
436
  // Suppress only if not quiet, as quiet implies no spinners anyway
@@ -470,8 +466,8 @@ const cli = yargs(args)
470
466
  }
471
467
  if (argv.disableTools === true) {
472
468
  // Tools disabled - use standard SDK
473
- generatePromise = sdk.generateText({
474
- prompt: argv.prompt,
469
+ generatePromise = sdk.generate({
470
+ input: { text: argv.prompt },
475
471
  provider: argv.provider === "auto"
476
472
  ? undefined
477
473
  : argv.provider,
@@ -508,7 +504,7 @@ const cli = yargs(args)
508
504
  model: argv.model, // Use specified model or default
509
505
  toolCategory: "all", // Enable all tool categories
510
506
  });
511
- generatePromise = agentProvider.generateText({
507
+ generatePromise = agentProvider.generate({
512
508
  prompt: argv.prompt,
513
509
  temperature: argv.temperature,
514
510
  maxTokens: argv.maxTokens, // Respect user's token limit - no artificial caps
@@ -637,7 +633,7 @@ const cli = yargs(args)
637
633
  }
638
634
  })
639
635
  // Stream Text Command
640
- .command("stream [prompt]", "Stream text generation in real-time", (yargsInstance) => yargsInstance
636
+ .command("stream [prompt]", "Stream generation in real-time", (yargsInstance) => yargsInstance
641
637
  .usage("Usage: $0 stream [prompt] [options]")
642
638
  .positional("prompt", {
643
639
  type: "string",
@@ -751,8 +747,8 @@ const cli = yargs(args)
751
747
  let stream;
752
748
  if (argv.disableTools === true) {
753
749
  // Tools disabled - use standard SDK
754
- stream = await sdk.generateTextStream({
755
- prompt: argv.prompt,
750
+ stream = await sdk.stream({
751
+ input: { text: argv.prompt },
756
752
  provider: argv.provider === "auto"
757
753
  ? undefined
758
754
  : argv.provider,
@@ -785,8 +781,8 @@ const cli = yargs(args)
785
781
  toolCategory: "all", // Enable all tool categories
786
782
  });
787
783
  // Note: AgentEnhancedProvider doesn't support streaming with tools yet
788
- // Fall back to generateText for now
789
- const result = await agentProvider.generateText({
784
+ // Fall back to generate for now
785
+ const result = await agentProvider.generate({
790
786
  prompt: argv.prompt,
791
787
  temperature: argv.temperature,
792
788
  // NEW: Analytics and evaluation support
@@ -795,7 +791,7 @@ const cli = yargs(args)
795
791
  context: contextObj,
796
792
  });
797
793
  // Simulate streaming by outputting the result
798
- const text = result?.text || "";
794
+ const text = result?.content || "";
799
795
  const CHUNK_SIZE = 10;
800
796
  const DELAY_MS = 50;
801
797
  for (let i = 0; i < text.length; i += CHUNK_SIZE) {
@@ -810,7 +806,7 @@ const cli = yargs(args)
810
806
  displayAnalyticsAndEvaluation(result, argv);
811
807
  return; // Exit early for agent mode
812
808
  }
813
- for await (const chunk of stream) {
809
+ for await (const chunk of stream.stream) {
814
810
  process.stdout.write(chunk.content);
815
811
  // In debug mode, interleaved logging would appear here
816
812
  // (SDK logs are controlled by NEUROLINK_DEBUG set in middleware)
@@ -818,6 +814,8 @@ const cli = yargs(args)
818
814
  if (!argv.quiet) {
819
815
  process.stdout.write("\n");
820
816
  } // Ensure newline after stream
817
+ // Exit successfully
818
+ process.exit(0);
821
819
  }
822
820
  catch (error) {
823
821
  handleError(error, "Text streaming");
@@ -912,8 +910,8 @@ const cli = yargs(args)
912
910
  spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
913
911
  }
914
912
  try {
915
- const result = await sdk.generateText({
916
- prompt: prompts[i],
913
+ const result = await sdk.generate({
914
+ input: { text: prompts[i] },
917
915
  provider: argv.provider === "auto"
918
916
  ? undefined
919
917
  : argv.provider,
@@ -1075,8 +1073,8 @@ const cli = yargs(args)
1075
1073
  try {
1076
1074
  const start = Date.now();
1077
1075
  // Add timeout to prevent hanging
1078
- const testPromise = sdk.generateText({
1079
- prompt: "test",
1076
+ const testPromise = sdk.generate({
1077
+ input: { text: "test" },
1080
1078
  provider: p,
1081
1079
  maxTokens: 1,
1082
1080
  disableTools: true, // Disable tools for faster status check
@@ -1238,12 +1236,12 @@ const cli = yargs(args)
1238
1236
  .example("$0 completion >> ~/.zshrc", "Add to zsh"), async (argv) => {
1239
1237
  cli.showCompletionScript();
1240
1238
  });
1239
+ // Add NEW Generate Command (Primary)
1240
+ cli.command(CLICommandFactory.createGenerateCommand());
1241
1241
  // Add MCP Commands
1242
1242
  addMCPCommands(cli);
1243
1243
  // Add Ollama Commands
1244
1244
  addOllamaCommands(cli);
1245
- // Add Agent Generate Command
1246
- agentGenerateCommand(cli);
1247
1245
  // Execute CLI
1248
1246
  (async () => {
1249
1247
  try {
@@ -244,7 +244,7 @@ export async function testProviderConnectivity(providers, quiet = false) {
244
244
  }
245
245
  try {
246
246
  const start = Date.now();
247
- await sdk.generateText({ prompt: "test", provider, maxTokens: 1 });
247
+ await sdk.generate({ input: { text: "test" }, provider, maxTokens: 1 });
248
248
  const duration = Date.now() - start;
249
249
  results.push({ provider, status: "working", responseTime: duration });
250
250
  if (spinner) {
@@ -291,7 +291,7 @@ export function displaySetupSummary(result, quiet = false) {
291
291
  if (working > 0) {
292
292
  console.log(chalk.green("\n✅ Setup completed successfully!"));
293
293
  console.log(chalk.yellow("💡 You can now use NeuroLink with your configured providers."));
294
- console.log(chalk.gray(' Try: neurolink generate-text "Hello, AI!"'));
294
+ console.log(chalk.gray(' Try: neurolink generate "Hello, AI!"'));
295
295
  }
296
296
  else {
297
297
  console.log(chalk.red("\n❌ No providers are working."));
@@ -47,9 +47,9 @@ export interface UnifiedEvaluationContext {
47
47
  * Unified Evaluation Schema (Lighthouse-compatible with extensions)
48
48
  */
49
49
  export declare const unifiedEvaluationSchema: z.ZodObject<{
50
- relevanceScore: z.ZodNumber;
51
- accuracyScore: z.ZodNumber;
52
- completenessScore: z.ZodNumber;
50
+ relevance: z.ZodNumber;
51
+ accuracy: z.ZodNumber;
52
+ completeness: z.ZodNumber;
53
53
  domainAlignment: z.ZodOptional<z.ZodNumber>;
54
54
  terminologyAccuracy: z.ZodOptional<z.ZodNumber>;
55
55
  toolEffectiveness: z.ZodOptional<z.ZodNumber>;
@@ -58,9 +58,9 @@ export declare const unifiedEvaluationSchema: z.ZodObject<{
58
58
  suggestedImprovements: z.ZodOptional<z.ZodString>;
59
59
  alertSeverity: z.ZodEnum<["low", "medium", "high", "none"]>;
60
60
  }, "strip", z.ZodTypeAny, {
61
- relevanceScore: number;
62
- accuracyScore: number;
63
- completenessScore: number;
61
+ relevance: number;
62
+ accuracy: number;
63
+ completeness: number;
64
64
  isOffTopic: boolean;
65
65
  reasoning: string;
66
66
  alertSeverity: "low" | "medium" | "high" | "none";
@@ -69,9 +69,9 @@ export declare const unifiedEvaluationSchema: z.ZodObject<{
69
69
  toolEffectiveness?: number | undefined;
70
70
  suggestedImprovements?: string | undefined;
71
71
  }, {
72
- relevanceScore: number;
73
- accuracyScore: number;
74
- completenessScore: number;
72
+ relevance: number;
73
+ accuracy: number;
74
+ completeness: number;
75
75
  isOffTopic: boolean;
76
76
  reasoning: string;
77
77
  alertSeverity: "low" | "medium" | "high" | "none";
@@ -16,17 +16,17 @@ import { z } from "zod";
16
16
  */
17
17
  export const unifiedEvaluationSchema = z.object({
18
18
  // Core evaluation scores
19
- relevanceScore: z
19
+ relevance: z
20
20
  .number()
21
21
  .min(0)
22
22
  .max(10)
23
23
  .describe("Score (0-10) for how well the response addresses query intent and aligns with domain/role. 10 is most relevant."),
24
- accuracyScore: z
24
+ accuracy: z
25
25
  .number()
26
26
  .min(0)
27
27
  .max(10)
28
28
  .describe("Score (0-10) for factual correctness against data, tool outputs, and domain knowledge. 10 is most accurate."),
29
- completenessScore: z
29
+ completeness: z
30
30
  .number()
31
31
  .min(0)
32
32
  .max(10)
@@ -113,8 +113,8 @@ export async function performUnifiedEvaluation(context) {
113
113
  }
114
114
  catch (structuredError) {
115
115
  logger.warn(`[${functionTag}] Structured evaluation failed, using fallback`, { structuredError });
116
- // Fallback to legacy generateText
117
- const result = await evaluationModel.generateText({
116
+ // Fallback to legacy generate
117
+ const result = await evaluationModel.generate({
118
118
  prompt: evaluationPrompt + "\n\nRespond with valid JSON only.",
119
119
  temperature: 0.1,
120
120
  maxTokens: 1000,
@@ -302,9 +302,9 @@ function processStructuredEvaluationResult(result, modelConfig, evaluationTime,
302
302
  const overall = Math.round(allScores.reduce((sum, score) => sum + score, 0) / allScores.length);
303
303
  return {
304
304
  // Core scores
305
- relevanceScore: Math.max(0, Math.min(10, Math.round(result.relevanceScore || 0))),
306
- accuracyScore: Math.max(0, Math.min(10, Math.round(result.accuracyScore || 0))),
307
- completenessScore: Math.max(0, Math.min(10, Math.round(result.completenessScore || 0))),
305
+ relevance: Math.max(0, Math.min(10, Math.round(result.relevanceScore || 0))),
306
+ accuracy: Math.max(0, Math.min(10, Math.round(result.accuracyScore || 0))),
307
+ completeness: Math.max(0, Math.min(10, Math.round(result.completenessScore || 0))),
308
308
  overall: Math.max(0, Math.min(10, overall)),
309
309
  // Enhanced insights
310
310
  isOffTopic: result.isOffTopic || false,
@@ -372,9 +372,9 @@ function parseUnifiedEvaluationResult(evaluationText, modelConfig, evaluationTim
372
372
  ? parseInt(completenessMatch[1] || completenessMatch[2] || completenessMatch[3], 10)
373
373
  : 8; // Default fallback score
374
374
  return {
375
- relevanceScore: Math.max(0, Math.min(10, relevance)),
376
- accuracyScore: Math.max(0, Math.min(10, accuracy)),
377
- completenessScore: Math.max(0, Math.min(10, completeness)),
375
+ relevance: Math.max(0, Math.min(10, relevance)),
376
+ accuracy: Math.max(0, Math.min(10, accuracy)),
377
+ completeness: Math.max(0, Math.min(10, completeness)),
378
378
  overall: Math.round((relevance + accuracy + completeness) / 3),
379
379
  isOffTopic: false,
380
380
  alertSeverity: "none",
@@ -400,9 +400,9 @@ function parseUnifiedEvaluationResult(evaluationText, modelConfig, evaluationTim
400
400
  */
401
401
  function getDefaultUnifiedEvaluation(reason, evaluationTime, context) {
402
402
  return {
403
- relevanceScore: 0,
404
- accuracyScore: 0,
405
- completenessScore: 0,
403
+ relevance: 0,
404
+ accuracy: 0,
405
+ completeness: 0,
406
406
  overall: 0,
407
407
  isOffTopic: false,
408
408
  alertSeverity: "high",
@@ -1,5 +1,32 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
- import type { StreamTextResult, ToolSet, Schema, GenerateTextResult, Tool } from "ai";
2
+ import type { Schema, Tool } from "ai";
3
+ import type { GenerateResult } from "../types/generate-types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
5
+ export interface TextGenerationResult {
6
+ content: string;
7
+ provider?: string;
8
+ model?: string;
9
+ usage?: {
10
+ promptTokens?: number;
11
+ completionTokens?: number;
12
+ totalTokens?: number;
13
+ };
14
+ responseTime?: number;
15
+ toolsUsed?: string[];
16
+ toolExecutions?: Array<{
17
+ toolName: string;
18
+ executionTime: number;
19
+ success: boolean;
20
+ serverId?: string;
21
+ }>;
22
+ enhancedWithTools?: boolean;
23
+ availableTools?: Array<{
24
+ name: string;
25
+ description: string;
26
+ server: string;
27
+ category?: string;
28
+ }>;
29
+ }
3
30
  /**
4
31
  * Supported AI Provider Names
5
32
  */
@@ -12,7 +39,8 @@ export declare enum AIProviderName {
12
39
  GOOGLE_AI = "google-ai",
13
40
  HUGGINGFACE = "huggingface",
14
41
  OLLAMA = "ollama",
15
- MISTRAL = "mistral"
42
+ MISTRAL = "mistral",
43
+ AUTO = "auto"
16
44
  }
17
45
  /**
18
46
  * Supported Models for Amazon Bedrock
@@ -73,28 +101,7 @@ export interface StreamingOptions {
73
101
  */
74
102
  export interface TextGenerationOptions {
75
103
  prompt: string;
76
- model?: string;
77
- temperature?: number;
78
- maxTokens?: number;
79
- systemPrompt?: string;
80
- schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
81
- tools?: Record<string, Tool>;
82
- timeout?: number | string;
83
- enableEvaluation?: boolean;
84
- enableAnalytics?: boolean;
85
- context?: Record<string, any>;
86
- evaluationDomain?: string;
87
- toolUsageContext?: string;
88
- conversationHistory?: Array<{
89
- role: string;
90
- content: string;
91
- }>;
92
- }
93
- /**
94
- * Stream text options interface
95
- */
96
- export interface StreamTextOptions {
97
- prompt: string;
104
+ provider?: AIProviderName;
98
105
  model?: string;
99
106
  temperature?: number;
100
107
  maxTokens?: number;
@@ -133,10 +140,13 @@ export interface AnalyticsData {
133
140
  * Updated to match Lighthouse's exact evaluation interface for consistency
134
141
  */
135
142
  export interface EvaluationData {
136
- relevanceScore: number;
137
- accuracyScore: number;
138
- completenessScore: number;
143
+ relevance: number;
144
+ accuracy: number;
145
+ completeness: number;
139
146
  overall: number;
147
+ domainAlignment?: number;
148
+ terminologyAccuracy?: number;
149
+ toolEffectiveness?: number;
140
150
  isOffTopic: boolean;
141
151
  alertSeverity: "low" | "medium" | "high" | "none";
142
152
  reasoning: string;
@@ -207,11 +217,7 @@ export interface ProviderModelConfig {
207
217
  /**
208
218
  * Enhanced result interfaces with optional analytics/evaluation
209
219
  */
210
- export interface EnhancedGenerateTextResult extends GenerateTextResult<ToolSet, unknown> {
211
- analytics?: AnalyticsData;
212
- evaluation?: EvaluationData;
213
- }
214
- export interface EnhancedStreamTextResult extends StreamTextResult<ToolSet, unknown> {
220
+ export interface EnhancedGenerateResult extends GenerateResult {
215
221
  analytics?: AnalyticsData;
216
222
  evaluation?: EvaluationData;
217
223
  }
@@ -240,26 +246,13 @@ export interface StreamingMetadata {
240
246
  modelUsed: string;
241
247
  }
242
248
  export type ProgressCallback = (progress: StreamingProgressData) => void;
243
- export interface EnhancedStreamTextOptions extends StreamTextOptions {
244
- enableProgressTracking?: boolean;
245
- progressCallback?: ProgressCallback;
246
- includeStreamingMetadata?: boolean;
247
- streamingBufferSize?: number;
248
- enableStreamingHeaders?: boolean;
249
- customStreamingConfig?: {
250
- chunkDelayMs?: number;
251
- maxConcurrentChunks?: number;
252
- compressionEnabled?: boolean;
253
- };
254
- }
255
249
  /**
256
250
  * AI Provider interface with flexible parameter support
257
251
  */
258
252
  export interface AIProvider {
259
- streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedStreamTextResult | null>;
260
- generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
261
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
262
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
253
+ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
254
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
255
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
263
256
  }
264
257
  /**
265
258
  * Provider attempt result for iteration tracking
@@ -12,6 +12,7 @@ export var AIProviderName;
12
12
  AIProviderName["HUGGINGFACE"] = "huggingface";
13
13
  AIProviderName["OLLAMA"] = "ollama";
14
14
  AIProviderName["MISTRAL"] = "mistral";
15
+ AIProviderName["AUTO"] = "auto";
15
16
  })(AIProviderName || (AIProviderName = {}));
16
17
  /**
17
18
  * Supported Models for Amazon Bedrock
@@ -0,0 +1,20 @@
1
+ import type { GenerateOptions, GenerateResult } from "../types/generate-types.js";
2
+ import type { TextGenerationOptions } from "../core/types.js";
3
+ /**
4
+ * Compatibility conversion factory for seamless migration
5
+ * between generateText and generate functions
6
+ */
7
+ export declare class CompatibilityConversionFactory {
8
+ /**
9
+ * Convert TextGenerationOptions to GenerateOptions
10
+ */
11
+ static convertTextToGenerate(options: TextGenerationOptions): GenerateOptions;
12
+ /**
13
+ * Convert GenerateResult to legacy TextGenerationResult format
14
+ */
15
+ static convertGenerateToText(result: GenerateResult): any;
16
+ /**
17
+ * Convert GenerateOptions to TextGenerationOptions
18
+ */
19
+ static convertGenerateToText_Options(options: GenerateOptions): TextGenerationOptions;
20
+ }
@@ -0,0 +1,69 @@
1
+ /**
2
+ * Compatibility conversion factory for seamless migration
3
+ * between generateText and generate functions
4
+ */
5
+ export class CompatibilityConversionFactory {
6
+ /**
7
+ * Convert TextGenerationOptions to GenerateOptions
8
+ */
9
+ static convertTextToGenerate(options) {
10
+ const { prompt, ...rest } = options;
11
+ return {
12
+ input: { text: prompt },
13
+ output: { format: "text" },
14
+ provider: rest.provider,
15
+ model: rest.model,
16
+ temperature: rest.temperature,
17
+ maxTokens: rest.maxTokens,
18
+ systemPrompt: rest.systemPrompt,
19
+ schema: rest.schema,
20
+ tools: rest.tools,
21
+ timeout: rest.timeout,
22
+ enableEvaluation: rest.enableEvaluation,
23
+ enableAnalytics: rest.enableAnalytics,
24
+ context: rest.context,
25
+ evaluationDomain: rest.evaluationDomain,
26
+ toolUsageContext: rest.toolUsageContext,
27
+ conversationHistory: rest.conversationHistory,
28
+ };
29
+ }
30
+ /**
31
+ * Convert GenerateResult to legacy TextGenerationResult format
32
+ */
33
+ static convertGenerateToText(result) {
34
+ return {
35
+ content: result.content,
36
+ provider: result.provider,
37
+ model: result.model,
38
+ usage: result.usage,
39
+ responseTime: result.responseTime,
40
+ toolsUsed: result.toolsUsed,
41
+ toolExecutions: result.toolExecutions,
42
+ enhancedWithTools: result.enhancedWithTools,
43
+ availableTools: result.availableTools,
44
+ analytics: result.analytics,
45
+ evaluation: result.evaluation,
46
+ };
47
+ }
48
+ /**
49
+ * Convert GenerateOptions to TextGenerationOptions
50
+ */
51
+ static convertGenerateToText_Options(options) {
52
+ return {
53
+ prompt: options.input.text,
54
+ model: options.model,
55
+ temperature: options.temperature,
56
+ maxTokens: options.maxTokens,
57
+ systemPrompt: options.systemPrompt,
58
+ schema: options.schema,
59
+ tools: options.tools,
60
+ timeout: options.timeout,
61
+ enableEvaluation: options.enableEvaluation,
62
+ enableAnalytics: options.enableAnalytics,
63
+ context: options.context,
64
+ evaluationDomain: options.evaluationDomain,
65
+ toolUsageContext: options.toolUsageContext,
66
+ conversationHistory: options.conversationHistory,
67
+ };
68
+ }
69
+ }
@@ -0,0 +1,20 @@
1
+ import type { EnhancedProvider } from "../types/generate-types.js";
2
+ import type { AIProvider } from "../core/types.js";
3
+ /**
4
+ * Factory for enhancing providers with generate() capability using Proxy pattern
5
+ * Maintains 100% backward compatibility while adding new generate method
6
+ */
7
+ export declare class ProviderGenerateFactory {
8
+ /**
9
+ * Enhance any provider with generate() method using TypeScript Proxy
10
+ */
11
+ static enhanceProvider<T extends AIProvider>(provider: T): T & EnhancedProvider;
12
+ /**
13
+ * Create the generate() method that internally uses generateText for performance parity
14
+ */
15
+ private static createGenerateMethod;
16
+ /**
17
+ * Enhance all providers from a registry
18
+ */
19
+ static enhanceAllProviders(providers: Map<string, AIProvider>): Map<string, AIProvider & EnhancedProvider>;
20
+ }
@@ -0,0 +1,87 @@
1
+ import { CompatibilityConversionFactory } from "./compatibility-factory.js";
2
+ /**
3
+ * Factory for enhancing providers with generate() capability using Proxy pattern
4
+ * Maintains 100% backward compatibility while adding new generate method
5
+ */
6
+ export class ProviderGenerateFactory {
7
+ /**
8
+ * Enhance any provider with generate() method using TypeScript Proxy
9
+ */
10
+ static enhanceProvider(provider) {
11
+ return new Proxy(provider, {
12
+ get(target, prop, receiver) {
13
+ if (prop === "generate") {
14
+ return ProviderGenerateFactory.createGenerateMethod(target);
15
+ }
16
+ return Reflect.get(target, prop, receiver);
17
+ },
18
+ has(target, prop) {
19
+ if (prop === "generate") {
20
+ return true;
21
+ }
22
+ return Reflect.has(target, prop);
23
+ },
24
+ });
25
+ }
26
+ /**
27
+ * Create the generate() method that internally uses generateText for performance parity
28
+ */
29
+ static createGenerateMethod(provider) {
30
+ return async (options) => {
31
+ // Validate input
32
+ if (!options.input?.text) {
33
+ throw new Error("Generate options must include input.text");
34
+ }
35
+ // Convert GenerateOptions to TextGenerationOptions
36
+ const textOptions = CompatibilityConversionFactory.convertGenerateToText_Options(options);
37
+ try {
38
+ // Use existing generate method for identical performance
39
+ const textResult = await provider.generate(textOptions);
40
+ // Convert back to GenerateResult format with type safety
41
+ const generateResult = {
42
+ content: textResult?.content || "",
43
+ outputs: { text: textResult?.content || "" },
44
+ provider: textResult?.provider,
45
+ model: textResult?.model,
46
+ usage: textResult?.usage
47
+ ? {
48
+ inputTokens: textResult.usage?.promptTokens || 0,
49
+ outputTokens: textResult.usage?.completionTokens || 0,
50
+ totalTokens: textResult.usage?.totalTokens || 0,
51
+ }
52
+ : undefined,
53
+ responseTime: textResult?.responseTime,
54
+ toolsUsed: textResult?.toolsUsed,
55
+ toolExecutions: textResult?.toolExecutions?.map((te) => ({
56
+ name: te.toolName || te.name || "",
57
+ input: te.input || {},
58
+ output: te.output || te.result,
59
+ duration: te.executionTime || te.duration || 0,
60
+ })),
61
+ enhancedWithTools: textResult?.enhancedWithTools,
62
+ availableTools: textResult?.availableTools?.map((at) => ({
63
+ name: at.name || "",
64
+ description: at.description || "",
65
+ parameters: at.parameters || {},
66
+ })),
67
+ analytics: textResult?.analytics,
68
+ evaluation: textResult?.evaluation,
69
+ };
70
+ return generateResult;
71
+ }
72
+ catch (error) {
73
+ throw new Error(`Generate method failed: ${error}`);
74
+ }
75
+ };
76
+ }
77
+ /**
78
+ * Enhance all providers from a registry
79
+ */
80
+ static enhanceAllProviders(providers) {
81
+ const enhancedProviders = new Map();
82
+ for (const [name, provider] of providers) {
83
+ enhancedProviders.set(name, this.enhanceProvider(provider));
84
+ }
85
+ return enhancedProviders;
86
+ }
87
+ }
package/dist/index.d.ts CHANGED
@@ -9,13 +9,15 @@
9
9
  import { AIProviderFactory } from "./core/factory.js";
10
10
  export { AIProviderFactory };
11
11
  export type { AIProvider, AIProviderName, ProviderConfig, StreamingOptions, ProviderAttempt, SupportedModelName, } from "./core/types.js";
12
+ export type { GenerateOptions, GenerateResult, EnhancedProvider, } from "./types/generate-types.js";
13
+ export { CompatibilityConversionFactory } from "./factories/compatibility-factory.js";
14
+ export { ProviderGenerateFactory } from "./factories/provider-generate-factory.js";
12
15
  export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS, } from "./core/types.js";
13
16
  export { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, } from "./providers/index.js";
14
17
  export type { ProviderName } from "./providers/index.js";
15
18
  export { PROVIDERS, AVAILABLE_PROVIDERS } from "./providers/index.js";
16
19
  export { getBestProvider, getAvailableProviders, isValidProvider, } from "./utils/providerUtils.js";
17
20
  export { NeuroLink } from "./neurolink.js";
18
- export type { TextGenerationOptions, StreamTextOptions, TextGenerationResult, } from "./neurolink.js";
19
21
  export declare const VERSION = "1.0.0";
20
22
  /**
21
23
  * Quick start factory function
@@ -25,7 +27,7 @@ export declare const VERSION = "1.0.0";
25
27
  * import { createAIProvider } from '@juspay/neurolink';
26
28
  *
27
29
  * const provider = await createAIProvider('bedrock');
28
- * const result = await provider.streamText('Hello, AI!');
30
+ * const result = await provider.stream({ input: { text: 'Hello, AI!' } });
29
31
  * ```
30
32
  */
31
33
  export declare function createAIProvider(providerName?: string, modelName?: string): Promise<import("./core/types.js").AIProvider>;