@juspay/neurolink 5.0.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. package/CHANGELOG.md +7 -0
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -26
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +1 -1
@@ -70,29 +70,29 @@ export class OpenAI {
70
70
  getModel() {
71
71
  return this.model;
72
72
  }
73
- async streamText(optionsOrPrompt, analysisSchema) {
74
- const functionTag = "OpenAI.streamText";
73
+ async generate(optionsOrPrompt, analysisSchema) {
74
+ const functionTag = "OpenAI.generate";
75
75
  const provider = "openai";
76
- let chunkCount = 0;
76
+ const startTime = Date.now();
77
77
  try {
78
78
  // Parse parameters - support both string and options object
79
79
  const options = typeof optionsOrPrompt === "string"
80
80
  ? { prompt: optionsOrPrompt }
81
81
  : optionsOrPrompt;
82
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
82
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
83
83
  // Use schema from options or fallback parameter
84
84
  const finalSchema = schema || analysisSchema;
85
- logger.debug(`[${functionTag}] Stream text started`, {
85
+ logger.debug(`[${functionTag}] Generate text started`, {
86
86
  provider,
87
87
  modelName: this.modelName,
88
- promptLength: prompt.length,
88
+ promptLength: prompt?.length || 0,
89
89
  temperature,
90
90
  maxTokens,
91
91
  timeout,
92
92
  });
93
93
  // Create timeout controller if timeout is specified
94
- const timeoutController = createTimeoutController(timeout, provider, "stream");
95
- const streamOptions = {
94
+ const timeoutController = createTimeoutController(timeout, provider, "generate");
95
+ const generateOptions = {
96
96
  model: this.model,
97
97
  prompt: prompt,
98
98
  system: systemPrompt,
@@ -102,50 +102,51 @@ export class OpenAI {
102
102
  ...(timeoutController && {
103
103
  abortSignal: timeoutController.controller.signal,
104
104
  }),
105
- onError: (event) => {
106
- const error = event.error;
107
- const errorMessage = error instanceof Error ? error.message : String(error);
108
- const errorStack = error instanceof Error ? error.stack : undefined;
109
- logger.debug(`[${functionTag}] Stream text error`, {
110
- provider,
111
- modelName: this.modelName,
112
- error: errorMessage,
113
- stack: errorStack,
114
- promptLength: prompt.length,
115
- chunkCount,
116
- });
117
- },
118
- onFinish: (event) => {
119
- logger.debug(`[${functionTag}] Stream text finished`, {
120
- provider,
121
- modelName: this.modelName,
122
- finishReason: event.finishReason,
123
- usage: event.usage,
124
- totalChunks: chunkCount,
125
- promptLength: prompt.length,
126
- responseLength: event.text?.length || 0,
127
- });
128
- },
129
- onChunk: (event) => {
130
- chunkCount++;
131
- logger.debug(`[${functionTag}] Stream text chunk`, {
132
- provider,
133
- modelName: this.modelName,
134
- chunkNumber: chunkCount,
135
- chunkLength: event.chunk.text?.length || 0,
136
- chunkType: event.chunk.type,
137
- });
138
- },
139
105
  };
140
106
  if (finalSchema) {
141
- streamOptions.experimental_output = Output.object({
107
+ generateOptions.experimental_output = Output.object({
142
108
  schema: finalSchema,
143
109
  });
144
110
  }
145
- const result = streamText(streamOptions);
146
- // For streaming, we can't clean up immediately, but the timeout will auto-clean
147
- // The user should handle the stream and any timeout errors
148
- return result;
111
+ try {
112
+ const result = await generateText(generateOptions);
113
+ // Clean up timeout if successful
114
+ timeoutController?.cleanup();
115
+ logger.debug(`[${functionTag}] Generate text completed`, {
116
+ provider,
117
+ modelName: this.modelName,
118
+ usage: result.usage,
119
+ finishReason: result.finishReason,
120
+ responseLength: result.text?.length || 0,
121
+ timeout,
122
+ });
123
+ // Add analytics if enabled
124
+ if (options.enableAnalytics) {
125
+ const { createAnalytics } = await import("./analytics-helper.js");
126
+ result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
127
+ }
128
+ // Add evaluation if enabled
129
+ if (options.enableEvaluation) {
130
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
131
+ }
132
+ return {
133
+ content: result.text,
134
+ provider: "openai",
135
+ model: this.modelName,
136
+ usage: result.usage
137
+ ? {
138
+ inputTokens: result.usage.promptTokens,
139
+ outputTokens: result.usage.completionTokens,
140
+ totalTokens: result.usage.totalTokens,
141
+ }
142
+ : undefined,
143
+ responseTime: Date.now() - startTime,
144
+ };
145
+ }
146
+ finally {
147
+ // Always cleanup timeout
148
+ timeoutController?.cleanup();
149
+ }
149
150
  }
150
151
  catch (err) {
151
152
  // Log timeout errors specifically
@@ -161,36 +162,48 @@ export class OpenAI {
161
162
  logger.debug(`[${functionTag}] Exception`, {
162
163
  provider,
163
164
  modelName: this.modelName,
164
- message: "Error in streaming text",
165
+ message: "Error in generating text",
165
166
  err: String(err),
166
167
  });
167
168
  }
168
169
  throw err; // Re-throw error to trigger fallback
169
170
  }
170
171
  }
171
- async generateText(optionsOrPrompt, analysisSchema) {
172
- const functionTag = "OpenAI.generateText";
172
+ /**
173
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
174
+ * Future-ready for multi-modal capabilities with current text focus
175
+ */
176
+ async stream(optionsOrPrompt, analysisSchema) {
177
+ const functionTag = "OpenAI.stream";
173
178
  const provider = "openai";
179
+ let chunkCount = 0;
174
180
  const startTime = Date.now();
175
181
  try {
176
182
  // Parse parameters - support both string and options object
177
183
  const options = typeof optionsOrPrompt === "string"
178
- ? { prompt: optionsOrPrompt }
184
+ ? { input: { text: optionsOrPrompt } }
179
185
  : optionsOrPrompt;
180
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
186
+ // Validate input
187
+ if (!options?.input?.text ||
188
+ typeof options.input.text !== "string" ||
189
+ options.input.text.trim() === "") {
190
+ throw new Error("Stream options must include input.text as a non-empty string");
191
+ }
192
+ // Convert to internal parameters
193
+ const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
181
194
  // Use schema from options or fallback parameter
182
195
  const finalSchema = schema || analysisSchema;
183
- logger.debug(`[${functionTag}] Generate text started`, {
196
+ logger.debug(`[${functionTag}] Stream request started`, {
184
197
  provider,
185
198
  modelName: this.modelName,
186
- promptLength: prompt.length,
199
+ promptLength: prompt?.length || 0,
187
200
  temperature,
188
201
  maxTokens,
189
202
  timeout,
190
203
  });
191
204
  // Create timeout controller if timeout is specified
192
- const timeoutController = createTimeoutController(timeout, provider, "generate");
193
- const generateOptions = {
205
+ const timeoutController = createTimeoutController(timeout, provider, "stream");
206
+ const streamOptions = {
194
207
  model: this.model,
195
208
  prompt: prompt,
196
209
  system: systemPrompt,
@@ -200,39 +213,70 @@ export class OpenAI {
200
213
  ...(timeoutController && {
201
214
  abortSignal: timeoutController.controller.signal,
202
215
  }),
216
+ onError: (event) => {
217
+ const error = event.error;
218
+ const errorMessage = error instanceof Error ? error.message : String(error);
219
+ const errorStack = error instanceof Error ? error.stack : undefined;
220
+ logger.debug(`[${functionTag}] Stream error`, {
221
+ provider,
222
+ modelName: this.modelName,
223
+ error: errorMessage,
224
+ stack: errorStack,
225
+ promptLength: prompt.length,
226
+ chunkCount,
227
+ });
228
+ },
229
+ onFinish: (event) => {
230
+ logger.debug(`[${functionTag}] Stream finished`, {
231
+ provider,
232
+ modelName: this.modelName,
233
+ finishReason: event.finishReason,
234
+ usage: event.usage,
235
+ totalChunks: chunkCount,
236
+ promptLength: prompt.length,
237
+ responseLength: event.text?.length || 0,
238
+ });
239
+ },
240
+ onChunk: (event) => {
241
+ chunkCount++;
242
+ logger.debug(`[${functionTag}] Stream chunk`, {
243
+ provider,
244
+ modelName: this.modelName,
245
+ chunkNumber: chunkCount,
246
+ chunkLength: event.chunk.text?.length || 0,
247
+ chunkType: event.chunk.type,
248
+ });
249
+ },
203
250
  };
204
251
  if (finalSchema) {
205
- generateOptions.experimental_output = Output.object({
252
+ streamOptions.experimental_output = Output.object({
206
253
  schema: finalSchema,
207
254
  });
208
255
  }
209
- try {
210
- const result = await generateText(generateOptions);
211
- // Clean up timeout if successful
212
- timeoutController?.cleanup();
213
- logger.debug(`[${functionTag}] Generate text completed`, {
214
- provider,
215
- modelName: this.modelName,
216
- usage: result.usage,
217
- finishReason: result.finishReason,
218
- responseLength: result.text?.length || 0,
219
- timeout,
220
- });
221
- // Add analytics if enabled
222
- if (options.enableAnalytics) {
223
- const { createAnalytics } = await import("./analytics-helper.js");
224
- result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
225
- }
226
- // Add evaluation if enabled
227
- if (options.enableEvaluation) {
228
- result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
229
- }
230
- return result;
231
- }
232
- finally {
233
- // Always cleanup timeout
234
- timeoutController?.cleanup();
235
- }
256
+ const result = streamText(streamOptions);
257
+ logger.debug(`[${functionTag}] Stream request completed`, {
258
+ provider,
259
+ modelName: this.modelName,
260
+ });
261
+ // Convert to StreamResult format
262
+ return {
263
+ stream: result.textStream
264
+ ? (async function* () {
265
+ for await (const chunk of result.textStream) {
266
+ yield { content: chunk };
267
+ }
268
+ })()
269
+ : (async function* () {
270
+ yield { content: "" };
271
+ throw new Error("No textStream available from AI SDK");
272
+ })(),
273
+ provider: "openai",
274
+ model: this.modelName,
275
+ metadata: {
276
+ streamId: `openai-${Date.now()}`,
277
+ startTime,
278
+ },
279
+ };
236
280
  }
237
281
  catch (err) {
238
282
  // Log timeout errors specifically
@@ -248,7 +292,7 @@ export class OpenAI {
248
292
  logger.debug(`[${functionTag}] Exception`, {
249
293
  provider,
250
294
  modelName: this.modelName,
251
- message: "Error in generating text",
295
+ message: "Error in streaming content",
252
296
  err: String(err),
253
297
  });
254
298
  }
@@ -256,21 +300,12 @@ export class OpenAI {
256
300
  }
257
301
  }
258
302
  /**
259
- * Alias for generateText() - CLI-SDK consistency
260
- * @param optionsOrPrompt - TextGenerationOptions object or prompt string
261
- * @param analysisSchema - Optional schema for output validation
262
- * @returns Promise resolving to GenerateTextResult or null
263
- */
264
- async generate(optionsOrPrompt, analysisSchema) {
265
- return this.generateText(optionsOrPrompt, analysisSchema);
266
- }
267
- /**
268
- * Short alias for generateText() - CLI-SDK consistency
303
+ * Short alias for generate() - CLI-SDK consistency
269
304
  * @param optionsOrPrompt - TextGenerationOptions object or prompt string
270
305
  * @param analysisSchema - Optional schema for output validation
271
- * @returns Promise resolving to GenerateTextResult or null
306
+ * @returns Promise resolving to GenerateResult or null
272
307
  */
273
308
  async gen(optionsOrPrompt, analysisSchema) {
274
- return this.generateText(optionsOrPrompt, analysisSchema);
309
+ return this.generate(optionsOrPrompt, analysisSchema);
275
310
  }
276
311
  }
@@ -0,0 +1,79 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import type { Tool, Schema } from "ai";
3
+ import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
4
+ /**
5
+ * Generate function options interface - Primary method for content generation
6
+ * Future-ready for multi-modal capabilities while maintaining text focus
7
+ */
8
+ export interface GenerateOptions {
9
+ input: {
10
+ text: string;
11
+ };
12
+ output?: {
13
+ format?: "text" | "structured" | "json";
14
+ };
15
+ provider?: AIProviderName | string;
16
+ model?: string;
17
+ temperature?: number;
18
+ maxTokens?: number;
19
+ systemPrompt?: string;
20
+ schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
21
+ tools?: Record<string, Tool>;
22
+ timeout?: number | string;
23
+ disableTools?: boolean;
24
+ enableEvaluation?: boolean;
25
+ enableAnalytics?: boolean;
26
+ context?: Record<string, any>;
27
+ evaluationDomain?: string;
28
+ toolUsageContext?: string;
29
+ conversationHistory?: Array<{
30
+ role: string;
31
+ content: string;
32
+ }>;
33
+ }
34
+ /**
35
+ * Generate function result interface - Primary output format
36
+ * Future-ready for multi-modal outputs while maintaining text focus
37
+ */
38
+ export interface GenerateResult {
39
+ content: string;
40
+ outputs?: {
41
+ text: string;
42
+ };
43
+ provider?: string;
44
+ model?: string;
45
+ usage?: {
46
+ inputTokens: number;
47
+ outputTokens: number;
48
+ totalTokens: number;
49
+ };
50
+ responseTime?: number;
51
+ toolCalls?: Array<{
52
+ toolCallId: string;
53
+ toolName: string;
54
+ args: Record<string, any>;
55
+ }>;
56
+ toolsUsed?: string[];
57
+ toolExecutions?: Array<{
58
+ name: string;
59
+ input: Record<string, any>;
60
+ output: any;
61
+ duration: number;
62
+ }>;
63
+ enhancedWithTools?: boolean;
64
+ availableTools?: Array<{
65
+ name: string;
66
+ description: string;
67
+ parameters: Record<string, any>;
68
+ }>;
69
+ analytics?: AnalyticsData;
70
+ evaluation?: EvaluationData;
71
+ }
72
+ /**
73
+ * Enhanced provider interface with generate method
74
+ */
75
+ export interface EnhancedProvider {
76
+ generate(options: GenerateOptions): Promise<GenerateResult>;
77
+ getName(): string;
78
+ isAvailable(): Promise<boolean>;
79
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,83 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import type { Tool, Schema } from "ai";
3
+ import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
4
+ /**
5
+ * Stream function options interface - Primary method for streaming content
6
+ * Future-ready for multi-modal capabilities while maintaining text focus
7
+ */
8
+ export interface StreamOptions {
9
+ input: {
10
+ text: string;
11
+ };
12
+ output?: {
13
+ format?: "text" | "structured" | "json";
14
+ streaming?: {
15
+ chunkSize?: number;
16
+ bufferSize?: number;
17
+ enableProgress?: boolean;
18
+ };
19
+ };
20
+ provider?: AIProviderName | string;
21
+ model?: string;
22
+ temperature?: number;
23
+ maxTokens?: number;
24
+ systemPrompt?: string;
25
+ schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
26
+ tools?: Record<string, Tool>;
27
+ timeout?: number | string;
28
+ disableTools?: boolean;
29
+ enableEvaluation?: boolean;
30
+ enableAnalytics?: boolean;
31
+ context?: Record<string, any>;
32
+ evaluationDomain?: string;
33
+ toolUsageContext?: string;
34
+ conversationHistory?: Array<{
35
+ role: string;
36
+ content: string;
37
+ }>;
38
+ }
39
+ /**
40
+ * Stream function result interface - Primary output format for streaming
41
+ * Future-ready for multi-modal outputs while maintaining text focus
42
+ */
43
+ export interface StreamResult {
44
+ stream: AsyncIterable<{
45
+ content: string;
46
+ }>;
47
+ provider?: string;
48
+ model?: string;
49
+ metadata?: {
50
+ streamId?: string;
51
+ startTime?: number;
52
+ totalChunks?: number;
53
+ estimatedDuration?: number;
54
+ };
55
+ toolCalls?: Array<{
56
+ toolCallId: string;
57
+ toolName: string;
58
+ args: Record<string, any>;
59
+ }>;
60
+ toolsUsed?: string[];
61
+ toolExecutions?: Array<{
62
+ name: string;
63
+ input: Record<string, any>;
64
+ output: any;
65
+ duration: number;
66
+ }>;
67
+ enhancedWithTools?: boolean;
68
+ availableTools?: Array<{
69
+ name: string;
70
+ description: string;
71
+ parameters: Record<string, any>;
72
+ }>;
73
+ analytics?: AnalyticsData;
74
+ evaluation?: EvaluationData;
75
+ }
76
+ /**
77
+ * Enhanced provider interface with stream method
78
+ */
79
+ export interface EnhancedStreamProvider {
80
+ stream(options: StreamOptions): Promise<StreamResult>;
81
+ getName(): string;
82
+ isAvailable(): Promise<boolean>;
83
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -85,7 +85,7 @@ async function isProviderAvailable(providerName) {
85
85
  }
86
86
  try {
87
87
  const provider = await AIProviderFactory.createProvider(providerName);
88
- await provider.generateText({ prompt: "test", maxTokens: 1 });
88
+ await provider.generate({ prompt: "test", maxTokens: 1 });
89
89
  return true;
90
90
  }
91
91
  catch (error) {
@@ -2,7 +2,7 @@
2
2
  * Phase 2: Enhanced Streaming Infrastructure
3
3
  * Streaming utilities for progress tracking and metadata enhancement
4
4
  */
5
- import type { StreamingProgressData, StreamingMetadata, ProgressCallback, EnhancedStreamTextOptions } from "../core/types.js";
5
+ import type { StreamingProgressData, StreamingMetadata, ProgressCallback } from "../core/types.js";
6
6
  export interface UIProgressHandler {
7
7
  onProgress: (progress: StreamingProgressData) => void;
8
8
  onComplete: (metadata: StreamingMetadata) => void;
@@ -19,6 +19,18 @@ export interface StreamingStats {
19
19
  /**
20
20
  * Enhanced streaming utilities for progress tracking and metadata
21
21
  */
22
+ export interface StreamingConfigOptions {
23
+ enableProgressTracking?: boolean;
24
+ progressCallback?: ProgressCallback;
25
+ includeStreamingMetadata?: boolean;
26
+ streamingBufferSize?: number;
27
+ enableStreamingHeaders?: boolean;
28
+ }
29
+ /**
30
+ * Legacy interface for backward compatibility
31
+ */
32
+ export interface EnhancedStreamTextOptions extends StreamingConfigOptions {
33
+ }
22
34
  export declare class StreamingEnhancer {
23
35
  /**
24
36
  * Add progress tracking to a readable stream
@@ -42,7 +54,7 @@ export declare class StreamingEnhancer {
42
54
  /**
43
55
  * Create enhanced streaming configuration
44
56
  */
45
- static createStreamingConfig(options: EnhancedStreamTextOptions): {
57
+ static createStreamingConfig(options: StreamingConfigOptions | EnhancedStreamTextOptions): {
46
58
  progressTracking: boolean;
47
59
  callback?: ProgressCallback;
48
60
  metadata: boolean;
@@ -2,9 +2,6 @@
2
2
  * Phase 2: Enhanced Streaming Infrastructure
3
3
  * Streaming utilities for progress tracking and metadata enhancement
4
4
  */
5
- /**
6
- * Enhanced streaming utilities for progress tracking and metadata
7
- */
8
5
  export class StreamingEnhancer {
9
6
  /**
10
7
  * Add progress tracking to a readable stream
@@ -49,11 +49,11 @@ export class NeuroLinkMCPClient extends EventEmitter {
49
49
  return result;
50
50
  }
51
51
  // If it's in Lighthouse format with content array
52
- if (result.content &&
53
- Array.isArray(result.content) &&
54
- result.content[0]?.text) {
52
+ if (result.text &&
53
+ Array.isArray(result.text) &&
54
+ result.text[0]?.text) {
55
55
  try {
56
- const data = JSON.parse(result.content[0].text);
56
+ const data = JSON.parse(result.text[0].text);
57
57
  return {
58
58
  success: !result.isError,
59
59
  data,
@@ -69,7 +69,7 @@ export class NeuroLinkMCPClient extends EventEmitter {
69
69
  // If JSON parsing fails, return the text as-is
70
70
  return {
71
71
  success: !result.isError,
72
- data: { text: result.content[0].text },
72
+ data: { text: result.text[0].text },
73
73
  metadata: {
74
74
  toolName,
75
75
  serverId,
@@ -199,11 +199,11 @@ export class DynamicOrchestrator extends MCPOrchestrator {
199
199
  .replace("{previousResults}", previousResults || "None yet");
200
200
  try {
201
201
  // Use AI Core Server to get tool decision
202
- const generateTextTool = aiCoreServer.tools["generate-text"];
203
- if (!generateTextTool) {
204
- throw new Error("generate-text tool not found");
202
+ const generateTool = aiCoreServer.tools["generate"];
203
+ if (!generateTool) {
204
+ throw new Error("generate tool not found");
205
205
  }
206
- const aiResponse = await generateTextTool.execute({
206
+ const aiResponse = await generateTool.execute({
207
207
  prompt: "Select the next tool to execute based on the context provided.",
208
208
  systemPrompt,
209
209
  provider: "google-ai", // Use fast model for decisions
@@ -279,11 +279,11 @@ ${executionSummary}
279
279
  Provide a clear, concise answer that addresses the user's request based on the tool results.`;
280
280
  try {
281
281
  // Use AI to generate final summary
282
- const generateTextTool = aiCoreServer.tools["generate-text"];
283
- if (!generateTextTool) {
284
- throw new Error("generate-text tool not found");
282
+ const generateTool = aiCoreServer.tools["generate"];
283
+ if (!generateTool) {
284
+ throw new Error("generate tool not found");
285
285
  }
286
- const aiResponse = await generateTextTool.execute({
286
+ const aiResponse = await generateTool.execute({
287
287
  prompt: summaryPrompt,
288
288
  provider: "google-ai",
289
289
  model: "gemini-2.5-pro",
@@ -139,7 +139,7 @@ export class ExternalMCPClient extends EventEmitter {
139
139
  // Transform MCP response to NeuroLink format
140
140
  const result = {
141
141
  success: !response.isError,
142
- data: response.content || response.result || response,
142
+ data: response.text || response.result || response,
143
143
  metadata: {
144
144
  toolName,
145
145
  serverId: this.config.name,
@@ -150,7 +150,7 @@ export class ExternalMCPClient extends EventEmitter {
150
150
  },
151
151
  };
152
152
  if (response.isError) {
153
- result.error = response.content?.[0]?.text || "Tool execution failed";
153
+ result.error = response.text?.[0]?.text || "Tool execution failed";
154
154
  }
155
155
  logger.debug(`[External MCP] Tool ${toolName} executed in ${executionTime}ms`);
156
156
  return result;
@@ -134,7 +134,7 @@ export interface MCPServerConfig {
134
134
  * });
135
135
  *
136
136
  * aiCoreServer.registerTool({
137
- * name: 'generate-text',
137
+ * name: 'generate',
138
138
  * description: 'Generate text using AI providers',
139
139
  * execute: async (params, context) => {
140
140
  * // Tool implementation
@@ -50,7 +50,7 @@ const ServerConfigSchema = z.object({
50
50
  * });
51
51
  *
52
52
  * aiCoreServer.registerTool({
53
- * name: 'generate-text',
53
+ * name: 'generate',
54
54
  * description: 'Generate text using AI providers',
55
55
  * execute: async (params, context) => {
56
56
  * // Tool implementation