@juspay/neurolink 4.2.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +47 -2
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -29
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +2 -3
  122. package/dist/cli/commands/agent-generate.d.ts +0 -1
  123. package/dist/cli/commands/agent-generate.js +0 -67
@@ -71,29 +71,29 @@ export class OpenAI {
71
71
  getModel() {
72
72
  return this.model;
73
73
  }
74
- async streamText(optionsOrPrompt, analysisSchema) {
75
- const functionTag = "OpenAI.streamText";
74
+ async generate(optionsOrPrompt, analysisSchema) {
75
+ const functionTag = "OpenAI.generate";
76
76
  const provider = "openai";
77
- let chunkCount = 0;
77
+ const startTime = Date.now();
78
78
  try {
79
79
  // Parse parameters - support both string and options object
80
80
  const options = typeof optionsOrPrompt === "string"
81
81
  ? { prompt: optionsOrPrompt }
82
82
  : optionsOrPrompt;
83
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
83
+ const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
84
84
  // Use schema from options or fallback parameter
85
85
  const finalSchema = schema || analysisSchema;
86
- logger.debug(`[${functionTag}] Stream text started`, {
86
+ logger.debug(`[${functionTag}] Generate text started`, {
87
87
  provider,
88
88
  modelName: this.modelName,
89
- promptLength: prompt.length,
89
+ promptLength: prompt?.length || 0,
90
90
  temperature,
91
91
  maxTokens,
92
92
  timeout,
93
93
  });
94
94
  // Create timeout controller if timeout is specified
95
- const timeoutController = createTimeoutController(timeout, provider, "stream");
96
- const streamOptions = {
95
+ const timeoutController = createTimeoutController(timeout, provider, "generate");
96
+ const generateOptions = {
97
97
  model: this.model,
98
98
  prompt: prompt,
99
99
  system: systemPrompt,
@@ -103,50 +103,51 @@ export class OpenAI {
103
103
  ...(timeoutController && {
104
104
  abortSignal: timeoutController.controller.signal,
105
105
  }),
106
- onError: (event) => {
107
- const error = event.error;
108
- const errorMessage = error instanceof Error ? error.message : String(error);
109
- const errorStack = error instanceof Error ? error.stack : undefined;
110
- logger.debug(`[${functionTag}] Stream text error`, {
111
- provider,
112
- modelName: this.modelName,
113
- error: errorMessage,
114
- stack: errorStack,
115
- promptLength: prompt.length,
116
- chunkCount,
117
- });
118
- },
119
- onFinish: (event) => {
120
- logger.debug(`[${functionTag}] Stream text finished`, {
121
- provider,
122
- modelName: this.modelName,
123
- finishReason: event.finishReason,
124
- usage: event.usage,
125
- totalChunks: chunkCount,
126
- promptLength: prompt.length,
127
- responseLength: event.text?.length || 0,
128
- });
129
- },
130
- onChunk: (event) => {
131
- chunkCount++;
132
- logger.debug(`[${functionTag}] Stream text chunk`, {
133
- provider,
134
- modelName: this.modelName,
135
- chunkNumber: chunkCount,
136
- chunkLength: event.chunk.text?.length || 0,
137
- chunkType: event.chunk.type,
138
- });
139
- },
140
106
  };
141
107
  if (finalSchema) {
142
- streamOptions.experimental_output = Output.object({
108
+ generateOptions.experimental_output = Output.object({
143
109
  schema: finalSchema,
144
110
  });
145
111
  }
146
- const result = streamText(streamOptions);
147
- // For streaming, we can't clean up immediately, but the timeout will auto-clean
148
- // The user should handle the stream and any timeout errors
149
- return result;
112
+ try {
113
+ const result = await generateText(generateOptions);
114
+ // Clean up timeout if successful
115
+ timeoutController?.cleanup();
116
+ logger.debug(`[${functionTag}] Generate text completed`, {
117
+ provider,
118
+ modelName: this.modelName,
119
+ usage: result.usage,
120
+ finishReason: result.finishReason,
121
+ responseLength: result.text?.length || 0,
122
+ timeout,
123
+ });
124
+ // Add analytics if enabled
125
+ if (options.enableAnalytics) {
126
+ const { createAnalytics } = await import("./analytics-helper.js");
127
+ result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
128
+ }
129
+ // Add evaluation if enabled
130
+ if (options.enableEvaluation) {
131
+ result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
132
+ }
133
+ return {
134
+ content: result.text,
135
+ provider: "openai",
136
+ model: this.modelName,
137
+ usage: result.usage
138
+ ? {
139
+ inputTokens: result.usage.promptTokens,
140
+ outputTokens: result.usage.completionTokens,
141
+ totalTokens: result.usage.totalTokens,
142
+ }
143
+ : undefined,
144
+ responseTime: Date.now() - startTime,
145
+ };
146
+ }
147
+ finally {
148
+ // Always cleanup timeout
149
+ timeoutController?.cleanup();
150
+ }
150
151
  }
151
152
  catch (err) {
152
153
  // Log timeout errors specifically
@@ -162,36 +163,48 @@ export class OpenAI {
162
163
  logger.debug(`[${functionTag}] Exception`, {
163
164
  provider,
164
165
  modelName: this.modelName,
165
- message: "Error in streaming text",
166
+ message: "Error in generating text",
166
167
  err: String(err),
167
168
  });
168
169
  }
169
170
  throw err; // Re-throw error to trigger fallback
170
171
  }
171
172
  }
172
- async generateText(optionsOrPrompt, analysisSchema) {
173
- const functionTag = "OpenAI.generateText";
173
+ /**
174
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
175
+ * Future-ready for multi-modal capabilities with current text focus
176
+ */
177
+ async stream(optionsOrPrompt, analysisSchema) {
178
+ const functionTag = "OpenAI.stream";
174
179
  const provider = "openai";
180
+ let chunkCount = 0;
175
181
  const startTime = Date.now();
176
182
  try {
177
183
  // Parse parameters - support both string and options object
178
184
  const options = typeof optionsOrPrompt === "string"
179
- ? { prompt: optionsOrPrompt }
185
+ ? { input: { text: optionsOrPrompt } }
180
186
  : optionsOrPrompt;
181
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
187
+ // Validate input
188
+ if (!options?.input?.text ||
189
+ typeof options.input.text !== "string" ||
190
+ options.input.text.trim() === "") {
191
+ throw new Error("Stream options must include input.text as a non-empty string");
192
+ }
193
+ // Convert to internal parameters
194
+ const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
182
195
  // Use schema from options or fallback parameter
183
196
  const finalSchema = schema || analysisSchema;
184
- logger.debug(`[${functionTag}] Generate text started`, {
197
+ logger.debug(`[${functionTag}] Stream request started`, {
185
198
  provider,
186
199
  modelName: this.modelName,
187
- promptLength: prompt.length,
200
+ promptLength: prompt?.length || 0,
188
201
  temperature,
189
202
  maxTokens,
190
203
  timeout,
191
204
  });
192
205
  // Create timeout controller if timeout is specified
193
- const timeoutController = createTimeoutController(timeout, provider, "generate");
194
- const generateOptions = {
206
+ const timeoutController = createTimeoutController(timeout, provider, "stream");
207
+ const streamOptions = {
195
208
  model: this.model,
196
209
  prompt: prompt,
197
210
  system: systemPrompt,
@@ -201,39 +214,70 @@ export class OpenAI {
201
214
  ...(timeoutController && {
202
215
  abortSignal: timeoutController.controller.signal,
203
216
  }),
217
+ onError: (event) => {
218
+ const error = event.error;
219
+ const errorMessage = error instanceof Error ? error.message : String(error);
220
+ const errorStack = error instanceof Error ? error.stack : undefined;
221
+ logger.debug(`[${functionTag}] Stream error`, {
222
+ provider,
223
+ modelName: this.modelName,
224
+ error: errorMessage,
225
+ stack: errorStack,
226
+ promptLength: prompt.length,
227
+ chunkCount,
228
+ });
229
+ },
230
+ onFinish: (event) => {
231
+ logger.debug(`[${functionTag}] Stream finished`, {
232
+ provider,
233
+ modelName: this.modelName,
234
+ finishReason: event.finishReason,
235
+ usage: event.usage,
236
+ totalChunks: chunkCount,
237
+ promptLength: prompt.length,
238
+ responseLength: event.text?.length || 0,
239
+ });
240
+ },
241
+ onChunk: (event) => {
242
+ chunkCount++;
243
+ logger.debug(`[${functionTag}] Stream chunk`, {
244
+ provider,
245
+ modelName: this.modelName,
246
+ chunkNumber: chunkCount,
247
+ chunkLength: event.chunk.text?.length || 0,
248
+ chunkType: event.chunk.type,
249
+ });
250
+ },
204
251
  };
205
252
  if (finalSchema) {
206
- generateOptions.experimental_output = Output.object({
253
+ streamOptions.experimental_output = Output.object({
207
254
  schema: finalSchema,
208
255
  });
209
256
  }
210
- try {
211
- const result = await generateText(generateOptions);
212
- // Clean up timeout if successful
213
- timeoutController?.cleanup();
214
- logger.debug(`[${functionTag}] Generate text completed`, {
215
- provider,
216
- modelName: this.modelName,
217
- usage: result.usage,
218
- finishReason: result.finishReason,
219
- responseLength: result.text?.length || 0,
220
- timeout,
221
- });
222
- // Add analytics if enabled
223
- if (options.enableAnalytics) {
224
- const { createAnalytics } = await import("./analytics-helper.js");
225
- result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
226
- }
227
- // Add evaluation if enabled
228
- if (options.enableEvaluation) {
229
- result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
230
- }
231
- return result;
232
- }
233
- finally {
234
- // Always cleanup timeout
235
- timeoutController?.cleanup();
236
- }
257
+ const result = streamText(streamOptions);
258
+ logger.debug(`[${functionTag}] Stream request completed`, {
259
+ provider,
260
+ modelName: this.modelName,
261
+ });
262
+ // Convert to StreamResult format
263
+ return {
264
+ stream: result.textStream
265
+ ? (async function* () {
266
+ for await (const chunk of result.textStream) {
267
+ yield { content: chunk };
268
+ }
269
+ })()
270
+ : (async function* () {
271
+ yield { content: "" };
272
+ throw new Error("No textStream available from AI SDK");
273
+ })(),
274
+ provider: "openai",
275
+ model: this.modelName,
276
+ metadata: {
277
+ streamId: `openai-${Date.now()}`,
278
+ startTime,
279
+ },
280
+ };
237
281
  }
238
282
  catch (err) {
239
283
  // Log timeout errors specifically
@@ -249,7 +293,7 @@ export class OpenAI {
249
293
  logger.debug(`[${functionTag}] Exception`, {
250
294
  provider,
251
295
  modelName: this.modelName,
252
- message: "Error in generating text",
296
+ message: "Error in streaming content",
253
297
  err: String(err),
254
298
  });
255
299
  }
@@ -257,21 +301,12 @@ export class OpenAI {
257
301
  }
258
302
  }
259
303
  /**
260
- * Alias for generateText() - CLI-SDK consistency
261
- * @param optionsOrPrompt - TextGenerationOptions object or prompt string
262
- * @param analysisSchema - Optional schema for output validation
263
- * @returns Promise resolving to GenerateTextResult or null
264
- */
265
- async generate(optionsOrPrompt, analysisSchema) {
266
- return this.generateText(optionsOrPrompt, analysisSchema);
267
- }
268
- /**
269
- * Short alias for generateText() - CLI-SDK consistency
304
+ * Short alias for generate() - CLI-SDK consistency
270
305
  * @param optionsOrPrompt - TextGenerationOptions object or prompt string
271
306
  * @param analysisSchema - Optional schema for output validation
272
- * @returns Promise resolving to GenerateTextResult or null
307
+ * @returns Promise resolving to GenerateResult or null
273
308
  */
274
309
  async gen(optionsOrPrompt, analysisSchema) {
275
- return this.generateText(optionsOrPrompt, analysisSchema);
310
+ return this.generate(optionsOrPrompt, analysisSchema);
276
311
  }
277
312
  }
@@ -0,0 +1,79 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import type { Tool, Schema } from "ai";
3
+ import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
4
+ /**
5
+ * Generate function options interface - Primary method for content generation
6
+ * Future-ready for multi-modal capabilities while maintaining text focus
7
+ */
8
+ export interface GenerateOptions {
9
+ input: {
10
+ text: string;
11
+ };
12
+ output?: {
13
+ format?: "text" | "structured" | "json";
14
+ };
15
+ provider?: AIProviderName | string;
16
+ model?: string;
17
+ temperature?: number;
18
+ maxTokens?: number;
19
+ systemPrompt?: string;
20
+ schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
21
+ tools?: Record<string, Tool>;
22
+ timeout?: number | string;
23
+ disableTools?: boolean;
24
+ enableEvaluation?: boolean;
25
+ enableAnalytics?: boolean;
26
+ context?: Record<string, any>;
27
+ evaluationDomain?: string;
28
+ toolUsageContext?: string;
29
+ conversationHistory?: Array<{
30
+ role: string;
31
+ content: string;
32
+ }>;
33
+ }
34
+ /**
35
+ * Generate function result interface - Primary output format
36
+ * Future-ready for multi-modal outputs while maintaining text focus
37
+ */
38
+ export interface GenerateResult {
39
+ content: string;
40
+ outputs?: {
41
+ text: string;
42
+ };
43
+ provider?: string;
44
+ model?: string;
45
+ usage?: {
46
+ inputTokens: number;
47
+ outputTokens: number;
48
+ totalTokens: number;
49
+ };
50
+ responseTime?: number;
51
+ toolCalls?: Array<{
52
+ toolCallId: string;
53
+ toolName: string;
54
+ args: Record<string, any>;
55
+ }>;
56
+ toolsUsed?: string[];
57
+ toolExecutions?: Array<{
58
+ name: string;
59
+ input: Record<string, any>;
60
+ output: any;
61
+ duration: number;
62
+ }>;
63
+ enhancedWithTools?: boolean;
64
+ availableTools?: Array<{
65
+ name: string;
66
+ description: string;
67
+ parameters: Record<string, any>;
68
+ }>;
69
+ analytics?: AnalyticsData;
70
+ evaluation?: EvaluationData;
71
+ }
72
+ /**
73
+ * Enhanced provider interface with generate method
74
+ */
75
+ export interface EnhancedProvider {
76
+ generate(options: GenerateOptions): Promise<GenerateResult>;
77
+ getName(): string;
78
+ isAvailable(): Promise<boolean>;
79
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,83 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import type { Tool, Schema } from "ai";
3
+ import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
4
+ /**
5
+ * Stream function options interface - Primary method for streaming content
6
+ * Future-ready for multi-modal capabilities while maintaining text focus
7
+ */
8
+ export interface StreamOptions {
9
+ input: {
10
+ text: string;
11
+ };
12
+ output?: {
13
+ format?: "text" | "structured" | "json";
14
+ streaming?: {
15
+ chunkSize?: number;
16
+ bufferSize?: number;
17
+ enableProgress?: boolean;
18
+ };
19
+ };
20
+ provider?: AIProviderName | string;
21
+ model?: string;
22
+ temperature?: number;
23
+ maxTokens?: number;
24
+ systemPrompt?: string;
25
+ schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
26
+ tools?: Record<string, Tool>;
27
+ timeout?: number | string;
28
+ disableTools?: boolean;
29
+ enableEvaluation?: boolean;
30
+ enableAnalytics?: boolean;
31
+ context?: Record<string, any>;
32
+ evaluationDomain?: string;
33
+ toolUsageContext?: string;
34
+ conversationHistory?: Array<{
35
+ role: string;
36
+ content: string;
37
+ }>;
38
+ }
39
+ /**
40
+ * Stream function result interface - Primary output format for streaming
41
+ * Future-ready for multi-modal outputs while maintaining text focus
42
+ */
43
+ export interface StreamResult {
44
+ stream: AsyncIterable<{
45
+ content: string;
46
+ }>;
47
+ provider?: string;
48
+ model?: string;
49
+ metadata?: {
50
+ streamId?: string;
51
+ startTime?: number;
52
+ totalChunks?: number;
53
+ estimatedDuration?: number;
54
+ };
55
+ toolCalls?: Array<{
56
+ toolCallId: string;
57
+ toolName: string;
58
+ args: Record<string, any>;
59
+ }>;
60
+ toolsUsed?: string[];
61
+ toolExecutions?: Array<{
62
+ name: string;
63
+ input: Record<string, any>;
64
+ output: any;
65
+ duration: number;
66
+ }>;
67
+ enhancedWithTools?: boolean;
68
+ availableTools?: Array<{
69
+ name: string;
70
+ description: string;
71
+ parameters: Record<string, any>;
72
+ }>;
73
+ analytics?: AnalyticsData;
74
+ evaluation?: EvaluationData;
75
+ }
76
+ /**
77
+ * Enhanced provider interface with stream method
78
+ */
79
+ export interface EnhancedStreamProvider {
80
+ stream(options: StreamOptions): Promise<StreamResult>;
81
+ getName(): string;
82
+ isAvailable(): Promise<boolean>;
83
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -85,7 +85,7 @@ async function isProviderAvailable(providerName) {
85
85
  }
86
86
  try {
87
87
  const provider = await AIProviderFactory.createProvider(providerName);
88
- await provider.generateText({ prompt: "test", maxTokens: 1 });
88
+ await provider.generate({ prompt: "test", maxTokens: 1 });
89
89
  return true;
90
90
  }
91
91
  catch (error) {
@@ -2,7 +2,7 @@
2
2
  * Phase 2: Enhanced Streaming Infrastructure
3
3
  * Streaming utilities for progress tracking and metadata enhancement
4
4
  */
5
- import type { StreamingProgressData, StreamingMetadata, ProgressCallback, EnhancedStreamTextOptions } from "../core/types.js";
5
+ import type { StreamingProgressData, StreamingMetadata, ProgressCallback } from "../core/types.js";
6
6
  export interface UIProgressHandler {
7
7
  onProgress: (progress: StreamingProgressData) => void;
8
8
  onComplete: (metadata: StreamingMetadata) => void;
@@ -19,6 +19,18 @@ export interface StreamingStats {
19
19
  /**
20
20
  * Enhanced streaming utilities for progress tracking and metadata
21
21
  */
22
+ export interface StreamingConfigOptions {
23
+ enableProgressTracking?: boolean;
24
+ progressCallback?: ProgressCallback;
25
+ includeStreamingMetadata?: boolean;
26
+ streamingBufferSize?: number;
27
+ enableStreamingHeaders?: boolean;
28
+ }
29
+ /**
30
+ * Legacy interface for backward compatibility
31
+ */
32
+ export interface EnhancedStreamTextOptions extends StreamingConfigOptions {
33
+ }
22
34
  export declare class StreamingEnhancer {
23
35
  /**
24
36
  * Add progress tracking to a readable stream
@@ -42,7 +54,7 @@ export declare class StreamingEnhancer {
42
54
  /**
43
55
  * Create enhanced streaming configuration
44
56
  */
45
- static createStreamingConfig(options: EnhancedStreamTextOptions): {
57
+ static createStreamingConfig(options: StreamingConfigOptions | EnhancedStreamTextOptions): {
46
58
  progressTracking: boolean;
47
59
  callback?: ProgressCallback;
48
60
  metadata: boolean;
@@ -2,9 +2,6 @@
2
2
  * Phase 2: Enhanced Streaming Infrastructure
3
3
  * Streaming utilities for progress tracking and metadata enhancement
4
4
  */
5
- /**
6
- * Enhanced streaming utilities for progress tracking and metadata
7
- */
8
5
  export class StreamingEnhancer {
9
6
  /**
10
7
  * Add progress tracking to a readable stream
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "4.2.0",
3
+ "version": "5.1.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",
@@ -71,9 +71,8 @@
71
71
  "content:cleanup": "node tools/converted-scripts/cleanup-hash-named-videos.js",
72
72
  "content:all": "pnpm run content:screenshots && pnpm run content:videos",
73
73
  "// Documentation Automation": "",
74
- "docs:sync": "node tools/content/documentation-sync.js",
75
74
  "docs:validate": "node tools/content/documentation-sync.js --validate",
76
- "docs:generate": "pnpm run docs:sync && pnpm run content:screenshots",
75
+ "docs:generate": "pnpm run docs:validate && pnpm run content:screenshots",
77
76
  "// Development & Monitoring": "",
78
77
  "dev:full": "node tools/development/dev-server.js",
79
78
  "dev:health": "node tools/development/health-monitor.js",
@@ -1 +0,0 @@
1
- export declare function agentGenerateCommand(cli: any): void;
@@ -1,67 +0,0 @@
1
- import { AgentEnhancedProvider } from "../../lib/providers/agent-enhanced-provider.js";
2
- import ora from "ora";
3
- import chalk from "chalk";
4
- export function agentGenerateCommand(cli) {
5
- cli.command("agent-generate <prompt>", "Generate text with agent capabilities (tool calling)", (yargs) => yargs
6
- .positional("prompt", {
7
- describe: "The prompt for the agent",
8
- type: "string",
9
- })
10
- .option("provider", {
11
- alias: "p",
12
- describe: "The AI provider to use",
13
- type: "string",
14
- choices: ["google-ai", "openai", "anthropic"],
15
- default: "google-ai",
16
- })
17
- .option("model", {
18
- alias: "m",
19
- describe: "The model to use",
20
- type: "string",
21
- })
22
- .option("toolCategory", {
23
- alias: "t",
24
- describe: "The category of tools to use",
25
- type: "string",
26
- choices: ["basic", "filesystem", "utility", "all"],
27
- default: "all",
28
- }), async (argv) => {
29
- const { prompt, provider, model, toolCategory } = argv;
30
- const spinner = ora(`Generating response with ${provider} agent...`).start();
31
- try {
32
- const agentProvider = new AgentEnhancedProvider({
33
- provider,
34
- model,
35
- toolCategory,
36
- });
37
- const result = await agentProvider.generateText(prompt);
38
- if (result) {
39
- spinner.succeed("Response generated successfully!");
40
- console.log(chalk.green("\nAI Response:"));
41
- console.log(result.text);
42
- if (result.toolCalls && result.toolCalls.length > 0) {
43
- console.log(chalk.yellow("\nTools Called:"));
44
- for (const call of result.toolCalls) {
45
- console.log(`- ${call.toolName}`);
46
- console.log(` Args: ${JSON.stringify(call.args)}`);
47
- }
48
- }
49
- if (result.toolResults && result.toolResults.length > 0) {
50
- console.log(chalk.blue("\nTool Results:"));
51
- for (const toolResult of result.toolResults) {
52
- console.log(`- ${toolResult.toolName}`);
53
- console.log(` Result: ${JSON.stringify(toolResult.result)}`);
54
- }
55
- }
56
- }
57
- else {
58
- spinner.fail("Failed to generate response.");
59
- }
60
- }
61
- catch (error) {
62
- spinner.fail("An error occurred during generation.");
63
- console.error(chalk.red(error));
64
- process.exit(1);
65
- }
66
- });
67
- }