@juspay/neurolink 4.2.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +47 -2
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -29
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +2 -3
  122. package/dist/cli/commands/agent-generate.d.ts +0 -1
  123. package/dist/cli/commands/agent-generate.js +0 -67
package/dist/index.js CHANGED
@@ -9,6 +9,8 @@
9
9
  // Core exports
10
10
  import { AIProviderFactory } from "./core/factory.js";
11
11
  export { AIProviderFactory };
12
+ export { CompatibilityConversionFactory } from "./factories/compatibility-factory.js";
13
+ export { ProviderGenerateFactory } from "./factories/provider-generate-factory.js";
12
14
  // Model enums
13
15
  export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS, } from "./core/types.js";
14
16
  // Provider exports
@@ -28,7 +30,7 @@ export const VERSION = "1.0.0";
28
30
  * import { createAIProvider } from '@juspay/neurolink';
29
31
  *
30
32
  * const provider = await createAIProvider('bedrock');
31
- * const result = await provider.streamText('Hello, AI!');
33
+ * const result = await provider.stream({ input: { text: 'Hello, AI!' } });
32
34
  * ```
33
35
  */
34
36
  export async function createAIProvider(providerName, modelName) {
@@ -42,14 +42,15 @@ export class SSEChatHandler {
42
42
  data: { type: "start", sessionId, messageId: userMessage.id },
43
43
  });
44
44
  // Generate AI response with streaming
45
- const aiResponse = await this.provider.streamText({
46
- prompt: message,
45
+ const aiResponse = await this.provider.stream({
46
+ input: { text: message },
47
47
  temperature: options.temperature,
48
48
  maxTokens: options.maxTokens,
49
49
  systemPrompt: options.systemPrompt,
50
50
  });
51
- if (aiResponse?.textStream) {
52
- const reader = aiResponse.textStream.getReader();
51
+ if (aiResponse?.stream) {
52
+ // Convert async iterable to readable stream
53
+ const reader = aiResponse.stream;
53
54
  let fullResponse = "";
54
55
  try {
55
56
  while (true) {
@@ -60,12 +60,12 @@ export class WebSocketChatHandler extends SSEChatHandler {
60
60
  latencyTarget: 200,
61
61
  });
62
62
  // Generate AI response
63
- const result = await this.provider.generateText({
63
+ const result = await this.provider.generate({
64
64
  prompt: request.prompt,
65
65
  temperature: request.options?.temperature,
66
66
  maxTokens: request.options?.maxTokens,
67
67
  });
68
- if (!result || !result.text) {
68
+ if (!result || !result.content) {
69
69
  throw new Error("Invalid AI response");
70
70
  }
71
71
  // Send response via WebSocket
@@ -75,7 +75,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
75
75
  connectionId,
76
76
  timestamp: Date.now(),
77
77
  data: {
78
- text: result.text,
78
+ text: result.content,
79
79
  sessionId: request.sessionId,
80
80
  metadata: {
81
81
  provider: this.provider.constructor.name,
@@ -98,12 +98,12 @@ export class WebSocketChatHandler extends SSEChatHandler {
98
98
  async handleGroupChat(roomId, request) {
99
99
  try {
100
100
  // Process AI request
101
- const result = await this.provider.generateText({
101
+ const result = await this.provider.generate({
102
102
  prompt: request.prompt,
103
103
  temperature: request.options?.temperature,
104
104
  maxTokens: request.options?.maxTokens,
105
105
  });
106
- if (!result || !result.text) {
106
+ if (!result || !result.content) {
107
107
  throw new Error("Invalid AI response");
108
108
  }
109
109
  // Broadcast to room
@@ -113,7 +113,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
113
113
  connectionId: "system",
114
114
  timestamp: Date.now(),
115
115
  data: {
116
- text: result.text,
116
+ text: result.content,
117
117
  sessionId: request.sessionId,
118
118
  userId: request.userId,
119
119
  isGroupMessage: true,
@@ -136,11 +136,11 @@ export class WebSocketChatHandler extends SSEChatHandler {
136
136
  const channelId = randomUUID();
137
137
  const channel = this.wsServer.createStreamingChannel(connectionId, channelId);
138
138
  // Generate response
139
- const result = await this.provider.generateText({
139
+ const result = await this.provider.generate({
140
140
  prompt: request.prompt,
141
141
  ...request.options,
142
142
  });
143
- if (!result || !result.text) {
143
+ if (!result || !result.content) {
144
144
  throw new Error("Invalid AI response");
145
145
  }
146
146
  // Send complete response
@@ -150,7 +150,7 @@ export class WebSocketChatHandler extends SSEChatHandler {
150
150
  connectionId,
151
151
  timestamp: Date.now(),
152
152
  data: {
153
- text: result.text,
153
+ text: result.content,
154
154
  isStreamingComplete: true,
155
155
  channelId,
156
156
  },
@@ -47,9 +47,9 @@ export interface UnifiedEvaluationContext {
47
47
  * Unified Evaluation Schema (Lighthouse-compatible with extensions)
48
48
  */
49
49
  export declare const unifiedEvaluationSchema: z.ZodObject<{
50
- relevanceScore: z.ZodNumber;
51
- accuracyScore: z.ZodNumber;
52
- completenessScore: z.ZodNumber;
50
+ relevance: z.ZodNumber;
51
+ accuracy: z.ZodNumber;
52
+ completeness: z.ZodNumber;
53
53
  domainAlignment: z.ZodOptional<z.ZodNumber>;
54
54
  terminologyAccuracy: z.ZodOptional<z.ZodNumber>;
55
55
  toolEffectiveness: z.ZodOptional<z.ZodNumber>;
@@ -58,9 +58,9 @@ export declare const unifiedEvaluationSchema: z.ZodObject<{
58
58
  suggestedImprovements: z.ZodOptional<z.ZodString>;
59
59
  alertSeverity: z.ZodEnum<["low", "medium", "high", "none"]>;
60
60
  }, "strip", z.ZodTypeAny, {
61
- relevanceScore: number;
62
- accuracyScore: number;
63
- completenessScore: number;
61
+ relevance: number;
62
+ accuracy: number;
63
+ completeness: number;
64
64
  isOffTopic: boolean;
65
65
  reasoning: string;
66
66
  alertSeverity: "low" | "medium" | "high" | "none";
@@ -69,9 +69,9 @@ export declare const unifiedEvaluationSchema: z.ZodObject<{
69
69
  toolEffectiveness?: number | undefined;
70
70
  suggestedImprovements?: string | undefined;
71
71
  }, {
72
- relevanceScore: number;
73
- accuracyScore: number;
74
- completenessScore: number;
72
+ relevance: number;
73
+ accuracy: number;
74
+ completeness: number;
75
75
  isOffTopic: boolean;
76
76
  reasoning: string;
77
77
  alertSeverity: "low" | "medium" | "high" | "none";
@@ -16,17 +16,17 @@ import { z } from "zod";
16
16
  */
17
17
  export const unifiedEvaluationSchema = z.object({
18
18
  // Core evaluation scores
19
- relevanceScore: z
19
+ relevance: z
20
20
  .number()
21
21
  .min(0)
22
22
  .max(10)
23
23
  .describe("Score (0-10) for how well the response addresses query intent and aligns with domain/role. 10 is most relevant."),
24
- accuracyScore: z
24
+ accuracy: z
25
25
  .number()
26
26
  .min(0)
27
27
  .max(10)
28
28
  .describe("Score (0-10) for factual correctness against data, tool outputs, and domain knowledge. 10 is most accurate."),
29
- completenessScore: z
29
+ completeness: z
30
30
  .number()
31
31
  .min(0)
32
32
  .max(10)
@@ -113,8 +113,8 @@ export async function performUnifiedEvaluation(context) {
113
113
  }
114
114
  catch (structuredError) {
115
115
  logger.warn(`[${functionTag}] Structured evaluation failed, using fallback`, { structuredError });
116
- // Fallback to legacy generateText
117
- const result = await evaluationModel.generateText({
116
+ // Fallback to legacy generate
117
+ const result = await evaluationModel.generate({
118
118
  prompt: evaluationPrompt + "\n\nRespond with valid JSON only.",
119
119
  temperature: 0.1,
120
120
  maxTokens: 1000,
@@ -302,9 +302,9 @@ function processStructuredEvaluationResult(result, modelConfig, evaluationTime,
302
302
  const overall = Math.round(allScores.reduce((sum, score) => sum + score, 0) / allScores.length);
303
303
  return {
304
304
  // Core scores
305
- relevanceScore: Math.max(0, Math.min(10, Math.round(result.relevanceScore || 0))),
306
- accuracyScore: Math.max(0, Math.min(10, Math.round(result.accuracyScore || 0))),
307
- completenessScore: Math.max(0, Math.min(10, Math.round(result.completenessScore || 0))),
305
+ relevance: Math.max(0, Math.min(10, Math.round(result.relevanceScore || 0))),
306
+ accuracy: Math.max(0, Math.min(10, Math.round(result.accuracyScore || 0))),
307
+ completeness: Math.max(0, Math.min(10, Math.round(result.completenessScore || 0))),
308
308
  overall: Math.max(0, Math.min(10, overall)),
309
309
  // Enhanced insights
310
310
  isOffTopic: result.isOffTopic || false,
@@ -372,9 +372,9 @@ function parseUnifiedEvaluationResult(evaluationText, modelConfig, evaluationTim
372
372
  ? parseInt(completenessMatch[1] || completenessMatch[2] || completenessMatch[3], 10)
373
373
  : 8; // Default fallback score
374
374
  return {
375
- relevanceScore: Math.max(0, Math.min(10, relevance)),
376
- accuracyScore: Math.max(0, Math.min(10, accuracy)),
377
- completenessScore: Math.max(0, Math.min(10, completeness)),
375
+ relevance: Math.max(0, Math.min(10, relevance)),
376
+ accuracy: Math.max(0, Math.min(10, accuracy)),
377
+ completeness: Math.max(0, Math.min(10, completeness)),
378
378
  overall: Math.round((relevance + accuracy + completeness) / 3),
379
379
  isOffTopic: false,
380
380
  alertSeverity: "none",
@@ -400,9 +400,9 @@ function parseUnifiedEvaluationResult(evaluationText, modelConfig, evaluationTim
400
400
  */
401
401
  function getDefaultUnifiedEvaluation(reason, evaluationTime, context) {
402
402
  return {
403
- relevanceScore: 0,
404
- accuracyScore: 0,
405
- completenessScore: 0,
403
+ relevance: 0,
404
+ accuracy: 0,
405
+ completeness: 0,
406
406
  overall: 0,
407
407
  isOffTopic: false,
408
408
  alertSeverity: "high",
@@ -1,5 +1,32 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
- import type { StreamTextResult, ToolSet, Schema, GenerateTextResult, Tool } from "ai";
2
+ import type { Schema, Tool } from "ai";
3
+ import type { GenerateResult } from "../types/generate-types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
5
+ export interface TextGenerationResult {
6
+ content: string;
7
+ provider?: string;
8
+ model?: string;
9
+ usage?: {
10
+ promptTokens?: number;
11
+ completionTokens?: number;
12
+ totalTokens?: number;
13
+ };
14
+ responseTime?: number;
15
+ toolsUsed?: string[];
16
+ toolExecutions?: Array<{
17
+ toolName: string;
18
+ executionTime: number;
19
+ success: boolean;
20
+ serverId?: string;
21
+ }>;
22
+ enhancedWithTools?: boolean;
23
+ availableTools?: Array<{
24
+ name: string;
25
+ description: string;
26
+ server: string;
27
+ category?: string;
28
+ }>;
29
+ }
3
30
  /**
4
31
  * Supported AI Provider Names
5
32
  */
@@ -12,7 +39,8 @@ export declare enum AIProviderName {
12
39
  GOOGLE_AI = "google-ai",
13
40
  HUGGINGFACE = "huggingface",
14
41
  OLLAMA = "ollama",
15
- MISTRAL = "mistral"
42
+ MISTRAL = "mistral",
43
+ AUTO = "auto"
16
44
  }
17
45
  /**
18
46
  * Supported Models for Amazon Bedrock
@@ -73,28 +101,7 @@ export interface StreamingOptions {
73
101
  */
74
102
  export interface TextGenerationOptions {
75
103
  prompt: string;
76
- model?: string;
77
- temperature?: number;
78
- maxTokens?: number;
79
- systemPrompt?: string;
80
- schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
81
- tools?: Record<string, Tool>;
82
- timeout?: number | string;
83
- enableEvaluation?: boolean;
84
- enableAnalytics?: boolean;
85
- context?: Record<string, any>;
86
- evaluationDomain?: string;
87
- toolUsageContext?: string;
88
- conversationHistory?: Array<{
89
- role: string;
90
- content: string;
91
- }>;
92
- }
93
- /**
94
- * Stream text options interface
95
- */
96
- export interface StreamTextOptions {
97
- prompt: string;
104
+ provider?: AIProviderName;
98
105
  model?: string;
99
106
  temperature?: number;
100
107
  maxTokens?: number;
@@ -133,10 +140,13 @@ export interface AnalyticsData {
133
140
  * Updated to match Lighthouse's exact evaluation interface for consistency
134
141
  */
135
142
  export interface EvaluationData {
136
- relevanceScore: number;
137
- accuracyScore: number;
138
- completenessScore: number;
143
+ relevance: number;
144
+ accuracy: number;
145
+ completeness: number;
139
146
  overall: number;
147
+ domainAlignment?: number;
148
+ terminologyAccuracy?: number;
149
+ toolEffectiveness?: number;
140
150
  isOffTopic: boolean;
141
151
  alertSeverity: "low" | "medium" | "high" | "none";
142
152
  reasoning: string;
@@ -207,11 +217,7 @@ export interface ProviderModelConfig {
207
217
  /**
208
218
  * Enhanced result interfaces with optional analytics/evaluation
209
219
  */
210
- export interface EnhancedGenerateTextResult extends GenerateTextResult<ToolSet, unknown> {
211
- analytics?: AnalyticsData;
212
- evaluation?: EvaluationData;
213
- }
214
- export interface EnhancedStreamTextResult extends StreamTextResult<ToolSet, unknown> {
220
+ export interface EnhancedGenerateResult extends GenerateResult {
215
221
  analytics?: AnalyticsData;
216
222
  evaluation?: EvaluationData;
217
223
  }
@@ -240,26 +246,13 @@ export interface StreamingMetadata {
240
246
  modelUsed: string;
241
247
  }
242
248
  export type ProgressCallback = (progress: StreamingProgressData) => void;
243
- export interface EnhancedStreamTextOptions extends StreamTextOptions {
244
- enableProgressTracking?: boolean;
245
- progressCallback?: ProgressCallback;
246
- includeStreamingMetadata?: boolean;
247
- streamingBufferSize?: number;
248
- enableStreamingHeaders?: boolean;
249
- customStreamingConfig?: {
250
- chunkDelayMs?: number;
251
- maxConcurrentChunks?: number;
252
- compressionEnabled?: boolean;
253
- };
254
- }
255
249
  /**
256
250
  * AI Provider interface with flexible parameter support
257
251
  */
258
252
  export interface AIProvider {
259
- streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedStreamTextResult | null>;
260
- generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
261
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
262
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
253
+ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
254
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
255
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
263
256
  }
264
257
  /**
265
258
  * Provider attempt result for iteration tracking
@@ -12,6 +12,7 @@ export var AIProviderName;
12
12
  AIProviderName["HUGGINGFACE"] = "huggingface";
13
13
  AIProviderName["OLLAMA"] = "ollama";
14
14
  AIProviderName["MISTRAL"] = "mistral";
15
+ AIProviderName["AUTO"] = "auto";
15
16
  })(AIProviderName || (AIProviderName = {}));
16
17
  /**
17
18
  * Supported Models for Amazon Bedrock
@@ -0,0 +1,20 @@
1
+ import type { GenerateOptions, GenerateResult } from "../types/generate-types.js";
2
+ import type { TextGenerationOptions } from "../core/types.js";
3
+ /**
4
+ * Compatibility conversion factory for seamless migration
5
+ * between generateText and generate functions
6
+ */
7
+ export declare class CompatibilityConversionFactory {
8
+ /**
9
+ * Convert TextGenerationOptions to GenerateOptions
10
+ */
11
+ static convertTextToGenerate(options: TextGenerationOptions): GenerateOptions;
12
+ /**
13
+ * Convert GenerateResult to legacy TextGenerationResult format
14
+ */
15
+ static convertGenerateToText(result: GenerateResult): any;
16
+ /**
17
+ * Convert GenerateOptions to TextGenerationOptions
18
+ */
19
+ static convertGenerateToText_Options(options: GenerateOptions): TextGenerationOptions;
20
+ }
@@ -0,0 +1,69 @@
1
+ /**
2
+ * Compatibility conversion factory for seamless migration
3
+ * between generateText and generate functions
4
+ */
5
+ export class CompatibilityConversionFactory {
6
+ /**
7
+ * Convert TextGenerationOptions to GenerateOptions
8
+ */
9
+ static convertTextToGenerate(options) {
10
+ const { prompt, ...rest } = options;
11
+ return {
12
+ input: { text: prompt },
13
+ output: { format: "text" },
14
+ provider: rest.provider,
15
+ model: rest.model,
16
+ temperature: rest.temperature,
17
+ maxTokens: rest.maxTokens,
18
+ systemPrompt: rest.systemPrompt,
19
+ schema: rest.schema,
20
+ tools: rest.tools,
21
+ timeout: rest.timeout,
22
+ enableEvaluation: rest.enableEvaluation,
23
+ enableAnalytics: rest.enableAnalytics,
24
+ context: rest.context,
25
+ evaluationDomain: rest.evaluationDomain,
26
+ toolUsageContext: rest.toolUsageContext,
27
+ conversationHistory: rest.conversationHistory,
28
+ };
29
+ }
30
+ /**
31
+ * Convert GenerateResult to legacy TextGenerationResult format
32
+ */
33
+ static convertGenerateToText(result) {
34
+ return {
35
+ content: result.content,
36
+ provider: result.provider,
37
+ model: result.model,
38
+ usage: result.usage,
39
+ responseTime: result.responseTime,
40
+ toolsUsed: result.toolsUsed,
41
+ toolExecutions: result.toolExecutions,
42
+ enhancedWithTools: result.enhancedWithTools,
43
+ availableTools: result.availableTools,
44
+ analytics: result.analytics,
45
+ evaluation: result.evaluation,
46
+ };
47
+ }
48
+ /**
49
+ * Convert GenerateOptions to TextGenerationOptions
50
+ */
51
+ static convertGenerateToText_Options(options) {
52
+ return {
53
+ prompt: options.input.text,
54
+ model: options.model,
55
+ temperature: options.temperature,
56
+ maxTokens: options.maxTokens,
57
+ systemPrompt: options.systemPrompt,
58
+ schema: options.schema,
59
+ tools: options.tools,
60
+ timeout: options.timeout,
61
+ enableEvaluation: options.enableEvaluation,
62
+ enableAnalytics: options.enableAnalytics,
63
+ context: options.context,
64
+ evaluationDomain: options.evaluationDomain,
65
+ toolUsageContext: options.toolUsageContext,
66
+ conversationHistory: options.conversationHistory,
67
+ };
68
+ }
69
+ }
@@ -0,0 +1,20 @@
1
+ import type { EnhancedProvider } from "../types/generate-types.js";
2
+ import type { AIProvider } from "../core/types.js";
3
+ /**
4
+ * Factory for enhancing providers with generate() capability using Proxy pattern
5
+ * Maintains 100% backward compatibility while adding new generate method
6
+ */
7
+ export declare class ProviderGenerateFactory {
8
+ /**
9
+ * Enhance any provider with generate() method using TypeScript Proxy
10
+ */
11
+ static enhanceProvider<T extends AIProvider>(provider: T): T & EnhancedProvider;
12
+ /**
13
+ * Create the generate() method that internally uses generateText for performance parity
14
+ */
15
+ private static createGenerateMethod;
16
+ /**
17
+ * Enhance all providers from a registry
18
+ */
19
+ static enhanceAllProviders(providers: Map<string, AIProvider>): Map<string, AIProvider & EnhancedProvider>;
20
+ }
@@ -0,0 +1,87 @@
1
+ import { CompatibilityConversionFactory } from "./compatibility-factory.js";
2
+ /**
3
+ * Factory for enhancing providers with generate() capability using Proxy pattern
4
+ * Maintains 100% backward compatibility while adding new generate method
5
+ */
6
+ export class ProviderGenerateFactory {
7
+ /**
8
+ * Enhance any provider with generate() method using TypeScript Proxy
9
+ */
10
+ static enhanceProvider(provider) {
11
+ return new Proxy(provider, {
12
+ get(target, prop, receiver) {
13
+ if (prop === "generate") {
14
+ return ProviderGenerateFactory.createGenerateMethod(target);
15
+ }
16
+ return Reflect.get(target, prop, receiver);
17
+ },
18
+ has(target, prop) {
19
+ if (prop === "generate") {
20
+ return true;
21
+ }
22
+ return Reflect.has(target, prop);
23
+ },
24
+ });
25
+ }
26
+ /**
27
+ * Create the generate() method that internally uses generateText for performance parity
28
+ */
29
+ static createGenerateMethod(provider) {
30
+ return async (options) => {
31
+ // Validate input
32
+ if (!options.input?.text) {
33
+ throw new Error("Generate options must include input.text");
34
+ }
35
+ // Convert GenerateOptions to TextGenerationOptions
36
+ const textOptions = CompatibilityConversionFactory.convertGenerateToText_Options(options);
37
+ try {
38
+ // Use existing generate method for identical performance
39
+ const textResult = await provider.generate(textOptions);
40
+ // Convert back to GenerateResult format with type safety
41
+ const generateResult = {
42
+ content: textResult?.content || "",
43
+ outputs: { text: textResult?.content || "" },
44
+ provider: textResult?.provider,
45
+ model: textResult?.model,
46
+ usage: textResult?.usage
47
+ ? {
48
+ inputTokens: textResult.usage?.promptTokens || 0,
49
+ outputTokens: textResult.usage?.completionTokens || 0,
50
+ totalTokens: textResult.usage?.totalTokens || 0,
51
+ }
52
+ : undefined,
53
+ responseTime: textResult?.responseTime,
54
+ toolsUsed: textResult?.toolsUsed,
55
+ toolExecutions: textResult?.toolExecutions?.map((te) => ({
56
+ name: te.toolName || te.name || "",
57
+ input: te.input || {},
58
+ output: te.output || te.result,
59
+ duration: te.executionTime || te.duration || 0,
60
+ })),
61
+ enhancedWithTools: textResult?.enhancedWithTools,
62
+ availableTools: textResult?.availableTools?.map((at) => ({
63
+ name: at.name || "",
64
+ description: at.description || "",
65
+ parameters: at.parameters || {},
66
+ })),
67
+ analytics: textResult?.analytics,
68
+ evaluation: textResult?.evaluation,
69
+ };
70
+ return generateResult;
71
+ }
72
+ catch (error) {
73
+ throw new Error(`Generate method failed: ${error}`);
74
+ }
75
+ };
76
+ }
77
+ /**
78
+ * Enhance all providers from a registry
79
+ */
80
+ static enhanceAllProviders(providers) {
81
+ const enhancedProviders = new Map();
82
+ for (const [name, provider] of providers) {
83
+ enhancedProviders.set(name, this.enhanceProvider(provider));
84
+ }
85
+ return enhancedProviders;
86
+ }
87
+ }
@@ -9,13 +9,15 @@
9
9
  import { AIProviderFactory } from "./core/factory.js";
10
10
  export { AIProviderFactory };
11
11
  export type { AIProvider, AIProviderName, ProviderConfig, StreamingOptions, ProviderAttempt, SupportedModelName, } from "./core/types.js";
12
+ export type { GenerateOptions, GenerateResult, EnhancedProvider, } from "./types/generate-types.js";
13
+ export { CompatibilityConversionFactory } from "./factories/compatibility-factory.js";
14
+ export { ProviderGenerateFactory } from "./factories/provider-generate-factory.js";
12
15
  export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS, } from "./core/types.js";
13
16
  export { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, } from "./providers/index.js";
14
17
  export type { ProviderName } from "./providers/index.js";
15
18
  export { PROVIDERS, AVAILABLE_PROVIDERS } from "./providers/index.js";
16
19
  export { getBestProvider, getAvailableProviders, isValidProvider, } from "./utils/providerUtils.js";
17
20
  export { NeuroLink } from "./neurolink.js";
18
- export type { TextGenerationOptions, StreamTextOptions, TextGenerationResult, } from "./neurolink.js";
19
21
  export declare const VERSION = "1.0.0";
20
22
  /**
21
23
  * Quick start factory function
@@ -25,7 +27,7 @@ export declare const VERSION = "1.0.0";
25
27
  * import { createAIProvider } from '@juspay/neurolink';
26
28
  *
27
29
  * const provider = await createAIProvider('bedrock');
28
- * const result = await provider.streamText('Hello, AI!');
30
+ * const result = await provider.stream({ input: { text: 'Hello, AI!' } });
29
31
  * ```
30
32
  */
31
33
  export declare function createAIProvider(providerName?: string, modelName?: string): Promise<import("./index.js").AIProvider>;
package/dist/lib/index.js CHANGED
@@ -9,6 +9,8 @@
9
9
  // Core exports
10
10
  import { AIProviderFactory } from "./core/factory.js";
11
11
  export { AIProviderFactory };
12
+ export { CompatibilityConversionFactory } from "./factories/compatibility-factory.js";
13
+ export { ProviderGenerateFactory } from "./factories/provider-generate-factory.js";
12
14
  // Model enums
13
15
  export { BedrockModels, OpenAIModels, VertexModels, DEFAULT_PROVIDER_CONFIGS, } from "./core/types.js";
14
16
  // Provider exports
@@ -28,7 +30,7 @@ export const VERSION = "1.0.0";
28
30
  * import { createAIProvider } from '@juspay/neurolink';
29
31
  *
30
32
  * const provider = await createAIProvider('bedrock');
31
- * const result = await provider.streamText('Hello, AI!');
33
+ * const result = await provider.stream({ input: { text: 'Hello, AI!' } });
32
34
  * ```
33
35
  */
34
36
  export async function createAIProvider(providerName, modelName) {
@@ -49,11 +49,11 @@ export class NeuroLinkMCPClient extends EventEmitter {
49
49
  return result;
50
50
  }
51
51
  // If it's in Lighthouse format with content array
52
- if (result.content &&
53
- Array.isArray(result.content) &&
54
- result.content[0]?.text) {
52
+ if (result.text &&
53
+ Array.isArray(result.text) &&
54
+ result.text[0]?.text) {
55
55
  try {
56
- const data = JSON.parse(result.content[0].text);
56
+ const data = JSON.parse(result.text[0].text);
57
57
  return {
58
58
  success: !result.isError,
59
59
  data,
@@ -69,7 +69,7 @@ export class NeuroLinkMCPClient extends EventEmitter {
69
69
  // If JSON parsing fails, return the text as-is
70
70
  return {
71
71
  success: !result.isError,
72
- data: { text: result.content[0].text },
72
+ data: { text: result.text[0].text },
73
73
  metadata: {
74
74
  toolName,
75
75
  serverId,
@@ -199,11 +199,11 @@ export class DynamicOrchestrator extends MCPOrchestrator {
199
199
  .replace("{previousResults}", previousResults || "None yet");
200
200
  try {
201
201
  // Use AI Core Server to get tool decision
202
- const generateTextTool = aiCoreServer.tools["generate-text"];
203
- if (!generateTextTool) {
204
- throw new Error("generate-text tool not found");
202
+ const generateTool = aiCoreServer.tools["generate"];
203
+ if (!generateTool) {
204
+ throw new Error("generate tool not found");
205
205
  }
206
- const aiResponse = await generateTextTool.execute({
206
+ const aiResponse = await generateTool.execute({
207
207
  prompt: "Select the next tool to execute based on the context provided.",
208
208
  systemPrompt,
209
209
  provider: "google-ai", // Use fast model for decisions
@@ -279,11 +279,11 @@ ${executionSummary}
279
279
  Provide a clear, concise answer that addresses the user's request based on the tool results.`;
280
280
  try {
281
281
  // Use AI to generate final summary
282
- const generateTextTool = aiCoreServer.tools["generate-text"];
283
- if (!generateTextTool) {
284
- throw new Error("generate-text tool not found");
282
+ const generateTool = aiCoreServer.tools["generate"];
283
+ if (!generateTool) {
284
+ throw new Error("generate tool not found");
285
285
  }
286
- const aiResponse = await generateTextTool.execute({
286
+ const aiResponse = await generateTool.execute({
287
287
  prompt: summaryPrompt,
288
288
  provider: "google-ai",
289
289
  model: "gemini-2.5-pro",