@juspay/neurolink 4.2.0 → 5.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +47 -2
- package/README.md +51 -60
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/mcp.js +1 -1
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +14 -0
- package/dist/cli/factories/command-factory.js +129 -0
- package/dist/cli/index.js +27 -29
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/evaluation.d.ts +9 -9
- package/dist/core/evaluation.js +14 -14
- package/dist/core/types.d.ts +41 -48
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/evaluation.d.ts +9 -9
- package/dist/lib/core/evaluation.js +14 -14
- package/dist/lib/core/types.d.ts +41 -48
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/index.d.ts +4 -2
- package/dist/lib/index.js +3 -1
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/neurolink.d.ts +21 -73
- package/dist/lib/neurolink.js +230 -119
- package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/lib/providers/agent-enhanced-provider.js +87 -96
- package/dist/lib/providers/amazonBedrock.d.ts +17 -8
- package/dist/lib/providers/amazonBedrock.js +60 -30
- package/dist/lib/providers/anthropic.d.ts +14 -10
- package/dist/lib/providers/anthropic.js +84 -154
- package/dist/lib/providers/azureOpenAI.d.ts +9 -6
- package/dist/lib/providers/azureOpenAI.js +70 -159
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/googleAIStudio.d.ts +12 -19
- package/dist/lib/providers/googleAIStudio.js +65 -34
- package/dist/lib/providers/googleVertexAI.d.ts +11 -15
- package/dist/lib/providers/googleVertexAI.js +146 -118
- package/dist/lib/providers/huggingFace.d.ts +10 -11
- package/dist/lib/providers/huggingFace.js +61 -24
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +59 -18
- package/dist/lib/providers/mistralAI.d.ts +14 -11
- package/dist/lib/providers/mistralAI.js +60 -29
- package/dist/lib/providers/ollama.d.ts +9 -8
- package/dist/lib/providers/ollama.js +134 -91
- package/dist/lib/providers/openAI.d.ts +11 -12
- package/dist/lib/providers/openAI.js +132 -97
- package/dist/lib/types/generate-types.d.ts +79 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/stream-types.d.ts +83 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/neurolink.d.ts +21 -73
- package/dist/neurolink.js +230 -119
- package/dist/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/providers/agent-enhanced-provider.js +87 -95
- package/dist/providers/amazonBedrock.d.ts +17 -8
- package/dist/providers/amazonBedrock.js +60 -30
- package/dist/providers/anthropic.d.ts +14 -10
- package/dist/providers/anthropic.js +84 -154
- package/dist/providers/azureOpenAI.d.ts +9 -6
- package/dist/providers/azureOpenAI.js +70 -159
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/googleAIStudio.d.ts +12 -19
- package/dist/providers/googleAIStudio.js +65 -34
- package/dist/providers/googleVertexAI.d.ts +11 -15
- package/dist/providers/googleVertexAI.js +146 -118
- package/dist/providers/huggingFace.d.ts +10 -11
- package/dist/providers/huggingFace.js +61 -24
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +59 -18
- package/dist/providers/mistralAI.d.ts +14 -11
- package/dist/providers/mistralAI.js +60 -29
- package/dist/providers/ollama.d.ts +9 -8
- package/dist/providers/ollama.js +133 -90
- package/dist/providers/openAI.d.ts +11 -12
- package/dist/providers/openAI.js +132 -97
- package/dist/types/generate-types.d.ts +79 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/stream-types.d.ts +83 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +2 -3
- package/dist/cli/commands/agent-generate.d.ts +0 -1
- package/dist/cli/commands/agent-generate.js +0 -67
package/dist/providers/openAI.js
CHANGED
|
@@ -71,29 +71,29 @@ export class OpenAI {
|
|
|
71
71
|
getModel() {
|
|
72
72
|
return this.model;
|
|
73
73
|
}
|
|
74
|
-
async
|
|
75
|
-
const functionTag = "OpenAI.
|
|
74
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
75
|
+
const functionTag = "OpenAI.generate";
|
|
76
76
|
const provider = "openai";
|
|
77
|
-
|
|
77
|
+
const startTime = Date.now();
|
|
78
78
|
try {
|
|
79
79
|
// Parse parameters - support both string and options object
|
|
80
80
|
const options = typeof optionsOrPrompt === "string"
|
|
81
81
|
? { prompt: optionsOrPrompt }
|
|
82
82
|
: optionsOrPrompt;
|
|
83
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "
|
|
83
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
84
84
|
// Use schema from options or fallback parameter
|
|
85
85
|
const finalSchema = schema || analysisSchema;
|
|
86
|
-
logger.debug(`[${functionTag}]
|
|
86
|
+
logger.debug(`[${functionTag}] Generate text started`, {
|
|
87
87
|
provider,
|
|
88
88
|
modelName: this.modelName,
|
|
89
|
-
promptLength: prompt
|
|
89
|
+
promptLength: prompt?.length || 0,
|
|
90
90
|
temperature,
|
|
91
91
|
maxTokens,
|
|
92
92
|
timeout,
|
|
93
93
|
});
|
|
94
94
|
// Create timeout controller if timeout is specified
|
|
95
|
-
const timeoutController = createTimeoutController(timeout, provider, "
|
|
96
|
-
const
|
|
95
|
+
const timeoutController = createTimeoutController(timeout, provider, "generate");
|
|
96
|
+
const generateOptions = {
|
|
97
97
|
model: this.model,
|
|
98
98
|
prompt: prompt,
|
|
99
99
|
system: systemPrompt,
|
|
@@ -103,50 +103,51 @@ export class OpenAI {
|
|
|
103
103
|
...(timeoutController && {
|
|
104
104
|
abortSignal: timeoutController.controller.signal,
|
|
105
105
|
}),
|
|
106
|
-
onError: (event) => {
|
|
107
|
-
const error = event.error;
|
|
108
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
109
|
-
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
110
|
-
logger.debug(`[${functionTag}] Stream text error`, {
|
|
111
|
-
provider,
|
|
112
|
-
modelName: this.modelName,
|
|
113
|
-
error: errorMessage,
|
|
114
|
-
stack: errorStack,
|
|
115
|
-
promptLength: prompt.length,
|
|
116
|
-
chunkCount,
|
|
117
|
-
});
|
|
118
|
-
},
|
|
119
|
-
onFinish: (event) => {
|
|
120
|
-
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
121
|
-
provider,
|
|
122
|
-
modelName: this.modelName,
|
|
123
|
-
finishReason: event.finishReason,
|
|
124
|
-
usage: event.usage,
|
|
125
|
-
totalChunks: chunkCount,
|
|
126
|
-
promptLength: prompt.length,
|
|
127
|
-
responseLength: event.text?.length || 0,
|
|
128
|
-
});
|
|
129
|
-
},
|
|
130
|
-
onChunk: (event) => {
|
|
131
|
-
chunkCount++;
|
|
132
|
-
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
133
|
-
provider,
|
|
134
|
-
modelName: this.modelName,
|
|
135
|
-
chunkNumber: chunkCount,
|
|
136
|
-
chunkLength: event.chunk.text?.length || 0,
|
|
137
|
-
chunkType: event.chunk.type,
|
|
138
|
-
});
|
|
139
|
-
},
|
|
140
106
|
};
|
|
141
107
|
if (finalSchema) {
|
|
142
|
-
|
|
108
|
+
generateOptions.experimental_output = Output.object({
|
|
143
109
|
schema: finalSchema,
|
|
144
110
|
});
|
|
145
111
|
}
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
112
|
+
try {
|
|
113
|
+
const result = await generateText(generateOptions);
|
|
114
|
+
// Clean up timeout if successful
|
|
115
|
+
timeoutController?.cleanup();
|
|
116
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
117
|
+
provider,
|
|
118
|
+
modelName: this.modelName,
|
|
119
|
+
usage: result.usage,
|
|
120
|
+
finishReason: result.finishReason,
|
|
121
|
+
responseLength: result.text?.length || 0,
|
|
122
|
+
timeout,
|
|
123
|
+
});
|
|
124
|
+
// Add analytics if enabled
|
|
125
|
+
if (options.enableAnalytics) {
|
|
126
|
+
const { createAnalytics } = await import("./analytics-helper.js");
|
|
127
|
+
result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
|
|
128
|
+
}
|
|
129
|
+
// Add evaluation if enabled
|
|
130
|
+
if (options.enableEvaluation) {
|
|
131
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
|
|
132
|
+
}
|
|
133
|
+
return {
|
|
134
|
+
content: result.text,
|
|
135
|
+
provider: "openai",
|
|
136
|
+
model: this.modelName,
|
|
137
|
+
usage: result.usage
|
|
138
|
+
? {
|
|
139
|
+
inputTokens: result.usage.promptTokens,
|
|
140
|
+
outputTokens: result.usage.completionTokens,
|
|
141
|
+
totalTokens: result.usage.totalTokens,
|
|
142
|
+
}
|
|
143
|
+
: undefined,
|
|
144
|
+
responseTime: Date.now() - startTime,
|
|
145
|
+
};
|
|
146
|
+
}
|
|
147
|
+
finally {
|
|
148
|
+
// Always cleanup timeout
|
|
149
|
+
timeoutController?.cleanup();
|
|
150
|
+
}
|
|
150
151
|
}
|
|
151
152
|
catch (err) {
|
|
152
153
|
// Log timeout errors specifically
|
|
@@ -162,36 +163,48 @@ export class OpenAI {
|
|
|
162
163
|
logger.debug(`[${functionTag}] Exception`, {
|
|
163
164
|
provider,
|
|
164
165
|
modelName: this.modelName,
|
|
165
|
-
message: "Error in
|
|
166
|
+
message: "Error in generating text",
|
|
166
167
|
err: String(err),
|
|
167
168
|
});
|
|
168
169
|
}
|
|
169
170
|
throw err; // Re-throw error to trigger fallback
|
|
170
171
|
}
|
|
171
172
|
}
|
|
172
|
-
|
|
173
|
-
|
|
173
|
+
/**
|
|
174
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
175
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
176
|
+
*/
|
|
177
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
178
|
+
const functionTag = "OpenAI.stream";
|
|
174
179
|
const provider = "openai";
|
|
180
|
+
let chunkCount = 0;
|
|
175
181
|
const startTime = Date.now();
|
|
176
182
|
try {
|
|
177
183
|
// Parse parameters - support both string and options object
|
|
178
184
|
const options = typeof optionsOrPrompt === "string"
|
|
179
|
-
? {
|
|
185
|
+
? { input: { text: optionsOrPrompt } }
|
|
180
186
|
: optionsOrPrompt;
|
|
181
|
-
|
|
187
|
+
// Validate input
|
|
188
|
+
if (!options?.input?.text ||
|
|
189
|
+
typeof options.input.text !== "string" ||
|
|
190
|
+
options.input.text.trim() === "") {
|
|
191
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
192
|
+
}
|
|
193
|
+
// Convert to internal parameters
|
|
194
|
+
const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
182
195
|
// Use schema from options or fallback parameter
|
|
183
196
|
const finalSchema = schema || analysisSchema;
|
|
184
|
-
logger.debug(`[${functionTag}]
|
|
197
|
+
logger.debug(`[${functionTag}] Stream request started`, {
|
|
185
198
|
provider,
|
|
186
199
|
modelName: this.modelName,
|
|
187
|
-
promptLength: prompt
|
|
200
|
+
promptLength: prompt?.length || 0,
|
|
188
201
|
temperature,
|
|
189
202
|
maxTokens,
|
|
190
203
|
timeout,
|
|
191
204
|
});
|
|
192
205
|
// Create timeout controller if timeout is specified
|
|
193
|
-
const timeoutController = createTimeoutController(timeout, provider, "
|
|
194
|
-
const
|
|
206
|
+
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
207
|
+
const streamOptions = {
|
|
195
208
|
model: this.model,
|
|
196
209
|
prompt: prompt,
|
|
197
210
|
system: systemPrompt,
|
|
@@ -201,39 +214,70 @@ export class OpenAI {
|
|
|
201
214
|
...(timeoutController && {
|
|
202
215
|
abortSignal: timeoutController.controller.signal,
|
|
203
216
|
}),
|
|
217
|
+
onError: (event) => {
|
|
218
|
+
const error = event.error;
|
|
219
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
220
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
221
|
+
logger.debug(`[${functionTag}] Stream error`, {
|
|
222
|
+
provider,
|
|
223
|
+
modelName: this.modelName,
|
|
224
|
+
error: errorMessage,
|
|
225
|
+
stack: errorStack,
|
|
226
|
+
promptLength: prompt.length,
|
|
227
|
+
chunkCount,
|
|
228
|
+
});
|
|
229
|
+
},
|
|
230
|
+
onFinish: (event) => {
|
|
231
|
+
logger.debug(`[${functionTag}] Stream finished`, {
|
|
232
|
+
provider,
|
|
233
|
+
modelName: this.modelName,
|
|
234
|
+
finishReason: event.finishReason,
|
|
235
|
+
usage: event.usage,
|
|
236
|
+
totalChunks: chunkCount,
|
|
237
|
+
promptLength: prompt.length,
|
|
238
|
+
responseLength: event.text?.length || 0,
|
|
239
|
+
});
|
|
240
|
+
},
|
|
241
|
+
onChunk: (event) => {
|
|
242
|
+
chunkCount++;
|
|
243
|
+
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
244
|
+
provider,
|
|
245
|
+
modelName: this.modelName,
|
|
246
|
+
chunkNumber: chunkCount,
|
|
247
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
248
|
+
chunkType: event.chunk.type,
|
|
249
|
+
});
|
|
250
|
+
},
|
|
204
251
|
};
|
|
205
252
|
if (finalSchema) {
|
|
206
|
-
|
|
253
|
+
streamOptions.experimental_output = Output.object({
|
|
207
254
|
schema: finalSchema,
|
|
208
255
|
});
|
|
209
256
|
}
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
// Always cleanup timeout
|
|
235
|
-
timeoutController?.cleanup();
|
|
236
|
-
}
|
|
257
|
+
const result = streamText(streamOptions);
|
|
258
|
+
logger.debug(`[${functionTag}] Stream request completed`, {
|
|
259
|
+
provider,
|
|
260
|
+
modelName: this.modelName,
|
|
261
|
+
});
|
|
262
|
+
// Convert to StreamResult format
|
|
263
|
+
return {
|
|
264
|
+
stream: result.textStream
|
|
265
|
+
? (async function* () {
|
|
266
|
+
for await (const chunk of result.textStream) {
|
|
267
|
+
yield { content: chunk };
|
|
268
|
+
}
|
|
269
|
+
})()
|
|
270
|
+
: (async function* () {
|
|
271
|
+
yield { content: "" };
|
|
272
|
+
throw new Error("No textStream available from AI SDK");
|
|
273
|
+
})(),
|
|
274
|
+
provider: "openai",
|
|
275
|
+
model: this.modelName,
|
|
276
|
+
metadata: {
|
|
277
|
+
streamId: `openai-${Date.now()}`,
|
|
278
|
+
startTime,
|
|
279
|
+
},
|
|
280
|
+
};
|
|
237
281
|
}
|
|
238
282
|
catch (err) {
|
|
239
283
|
// Log timeout errors specifically
|
|
@@ -249,7 +293,7 @@ export class OpenAI {
|
|
|
249
293
|
logger.debug(`[${functionTag}] Exception`, {
|
|
250
294
|
provider,
|
|
251
295
|
modelName: this.modelName,
|
|
252
|
-
message: "Error in
|
|
296
|
+
message: "Error in streaming content",
|
|
253
297
|
err: String(err),
|
|
254
298
|
});
|
|
255
299
|
}
|
|
@@ -257,21 +301,12 @@ export class OpenAI {
|
|
|
257
301
|
}
|
|
258
302
|
}
|
|
259
303
|
/**
|
|
260
|
-
*
|
|
261
|
-
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
262
|
-
* @param analysisSchema - Optional schema for output validation
|
|
263
|
-
* @returns Promise resolving to GenerateTextResult or null
|
|
264
|
-
*/
|
|
265
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
266
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
267
|
-
}
|
|
268
|
-
/**
|
|
269
|
-
* Short alias for generateText() - CLI-SDK consistency
|
|
304
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
270
305
|
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
271
306
|
* @param analysisSchema - Optional schema for output validation
|
|
272
|
-
* @returns Promise resolving to
|
|
307
|
+
* @returns Promise resolving to GenerateResult or null
|
|
273
308
|
*/
|
|
274
309
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
275
|
-
return this.
|
|
310
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
276
311
|
}
|
|
277
312
|
}
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import type { Tool, Schema } from "ai";
|
|
3
|
+
import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
|
|
4
|
+
/**
|
|
5
|
+
* Generate function options interface - Primary method for content generation
|
|
6
|
+
* Future-ready for multi-modal capabilities while maintaining text focus
|
|
7
|
+
*/
|
|
8
|
+
export interface GenerateOptions {
|
|
9
|
+
input: {
|
|
10
|
+
text: string;
|
|
11
|
+
};
|
|
12
|
+
output?: {
|
|
13
|
+
format?: "text" | "structured" | "json";
|
|
14
|
+
};
|
|
15
|
+
provider?: AIProviderName | string;
|
|
16
|
+
model?: string;
|
|
17
|
+
temperature?: number;
|
|
18
|
+
maxTokens?: number;
|
|
19
|
+
systemPrompt?: string;
|
|
20
|
+
schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
|
|
21
|
+
tools?: Record<string, Tool>;
|
|
22
|
+
timeout?: number | string;
|
|
23
|
+
disableTools?: boolean;
|
|
24
|
+
enableEvaluation?: boolean;
|
|
25
|
+
enableAnalytics?: boolean;
|
|
26
|
+
context?: Record<string, any>;
|
|
27
|
+
evaluationDomain?: string;
|
|
28
|
+
toolUsageContext?: string;
|
|
29
|
+
conversationHistory?: Array<{
|
|
30
|
+
role: string;
|
|
31
|
+
content: string;
|
|
32
|
+
}>;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Generate function result interface - Primary output format
|
|
36
|
+
* Future-ready for multi-modal outputs while maintaining text focus
|
|
37
|
+
*/
|
|
38
|
+
export interface GenerateResult {
|
|
39
|
+
content: string;
|
|
40
|
+
outputs?: {
|
|
41
|
+
text: string;
|
|
42
|
+
};
|
|
43
|
+
provider?: string;
|
|
44
|
+
model?: string;
|
|
45
|
+
usage?: {
|
|
46
|
+
inputTokens: number;
|
|
47
|
+
outputTokens: number;
|
|
48
|
+
totalTokens: number;
|
|
49
|
+
};
|
|
50
|
+
responseTime?: number;
|
|
51
|
+
toolCalls?: Array<{
|
|
52
|
+
toolCallId: string;
|
|
53
|
+
toolName: string;
|
|
54
|
+
args: Record<string, any>;
|
|
55
|
+
}>;
|
|
56
|
+
toolsUsed?: string[];
|
|
57
|
+
toolExecutions?: Array<{
|
|
58
|
+
name: string;
|
|
59
|
+
input: Record<string, any>;
|
|
60
|
+
output: any;
|
|
61
|
+
duration: number;
|
|
62
|
+
}>;
|
|
63
|
+
enhancedWithTools?: boolean;
|
|
64
|
+
availableTools?: Array<{
|
|
65
|
+
name: string;
|
|
66
|
+
description: string;
|
|
67
|
+
parameters: Record<string, any>;
|
|
68
|
+
}>;
|
|
69
|
+
analytics?: AnalyticsData;
|
|
70
|
+
evaluation?: EvaluationData;
|
|
71
|
+
}
|
|
72
|
+
/**
|
|
73
|
+
* Enhanced provider interface with generate method
|
|
74
|
+
*/
|
|
75
|
+
export interface EnhancedProvider {
|
|
76
|
+
generate(options: GenerateOptions): Promise<GenerateResult>;
|
|
77
|
+
getName(): string;
|
|
78
|
+
isAvailable(): Promise<boolean>;
|
|
79
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import type { Tool, Schema } from "ai";
|
|
3
|
+
import type { AIProviderName, AnalyticsData, EvaluationData } from "../core/types.js";
|
|
4
|
+
/**
|
|
5
|
+
* Stream function options interface - Primary method for streaming content
|
|
6
|
+
* Future-ready for multi-modal capabilities while maintaining text focus
|
|
7
|
+
*/
|
|
8
|
+
export interface StreamOptions {
|
|
9
|
+
input: {
|
|
10
|
+
text: string;
|
|
11
|
+
};
|
|
12
|
+
output?: {
|
|
13
|
+
format?: "text" | "structured" | "json";
|
|
14
|
+
streaming?: {
|
|
15
|
+
chunkSize?: number;
|
|
16
|
+
bufferSize?: number;
|
|
17
|
+
enableProgress?: boolean;
|
|
18
|
+
};
|
|
19
|
+
};
|
|
20
|
+
provider?: AIProviderName | string;
|
|
21
|
+
model?: string;
|
|
22
|
+
temperature?: number;
|
|
23
|
+
maxTokens?: number;
|
|
24
|
+
systemPrompt?: string;
|
|
25
|
+
schema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>;
|
|
26
|
+
tools?: Record<string, Tool>;
|
|
27
|
+
timeout?: number | string;
|
|
28
|
+
disableTools?: boolean;
|
|
29
|
+
enableEvaluation?: boolean;
|
|
30
|
+
enableAnalytics?: boolean;
|
|
31
|
+
context?: Record<string, any>;
|
|
32
|
+
evaluationDomain?: string;
|
|
33
|
+
toolUsageContext?: string;
|
|
34
|
+
conversationHistory?: Array<{
|
|
35
|
+
role: string;
|
|
36
|
+
content: string;
|
|
37
|
+
}>;
|
|
38
|
+
}
|
|
39
|
+
/**
|
|
40
|
+
* Stream function result interface - Primary output format for streaming
|
|
41
|
+
* Future-ready for multi-modal outputs while maintaining text focus
|
|
42
|
+
*/
|
|
43
|
+
export interface StreamResult {
|
|
44
|
+
stream: AsyncIterable<{
|
|
45
|
+
content: string;
|
|
46
|
+
}>;
|
|
47
|
+
provider?: string;
|
|
48
|
+
model?: string;
|
|
49
|
+
metadata?: {
|
|
50
|
+
streamId?: string;
|
|
51
|
+
startTime?: number;
|
|
52
|
+
totalChunks?: number;
|
|
53
|
+
estimatedDuration?: number;
|
|
54
|
+
};
|
|
55
|
+
toolCalls?: Array<{
|
|
56
|
+
toolCallId: string;
|
|
57
|
+
toolName: string;
|
|
58
|
+
args: Record<string, any>;
|
|
59
|
+
}>;
|
|
60
|
+
toolsUsed?: string[];
|
|
61
|
+
toolExecutions?: Array<{
|
|
62
|
+
name: string;
|
|
63
|
+
input: Record<string, any>;
|
|
64
|
+
output: any;
|
|
65
|
+
duration: number;
|
|
66
|
+
}>;
|
|
67
|
+
enhancedWithTools?: boolean;
|
|
68
|
+
availableTools?: Array<{
|
|
69
|
+
name: string;
|
|
70
|
+
description: string;
|
|
71
|
+
parameters: Record<string, any>;
|
|
72
|
+
}>;
|
|
73
|
+
analytics?: AnalyticsData;
|
|
74
|
+
evaluation?: EvaluationData;
|
|
75
|
+
}
|
|
76
|
+
/**
|
|
77
|
+
* Enhanced provider interface with stream method
|
|
78
|
+
*/
|
|
79
|
+
export interface EnhancedStreamProvider {
|
|
80
|
+
stream(options: StreamOptions): Promise<StreamResult>;
|
|
81
|
+
getName(): string;
|
|
82
|
+
isAvailable(): Promise<boolean>;
|
|
83
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -85,7 +85,7 @@ async function isProviderAvailable(providerName) {
|
|
|
85
85
|
}
|
|
86
86
|
try {
|
|
87
87
|
const provider = await AIProviderFactory.createProvider(providerName);
|
|
88
|
-
await provider.
|
|
88
|
+
await provider.generate({ prompt: "test", maxTokens: 1 });
|
|
89
89
|
return true;
|
|
90
90
|
}
|
|
91
91
|
catch (error) {
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* Phase 2: Enhanced Streaming Infrastructure
|
|
3
3
|
* Streaming utilities for progress tracking and metadata enhancement
|
|
4
4
|
*/
|
|
5
|
-
import type { StreamingProgressData, StreamingMetadata, ProgressCallback
|
|
5
|
+
import type { StreamingProgressData, StreamingMetadata, ProgressCallback } from "../core/types.js";
|
|
6
6
|
export interface UIProgressHandler {
|
|
7
7
|
onProgress: (progress: StreamingProgressData) => void;
|
|
8
8
|
onComplete: (metadata: StreamingMetadata) => void;
|
|
@@ -19,6 +19,18 @@ export interface StreamingStats {
|
|
|
19
19
|
/**
|
|
20
20
|
* Enhanced streaming utilities for progress tracking and metadata
|
|
21
21
|
*/
|
|
22
|
+
export interface StreamingConfigOptions {
|
|
23
|
+
enableProgressTracking?: boolean;
|
|
24
|
+
progressCallback?: ProgressCallback;
|
|
25
|
+
includeStreamingMetadata?: boolean;
|
|
26
|
+
streamingBufferSize?: number;
|
|
27
|
+
enableStreamingHeaders?: boolean;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Legacy interface for backward compatibility
|
|
31
|
+
*/
|
|
32
|
+
export interface EnhancedStreamTextOptions extends StreamingConfigOptions {
|
|
33
|
+
}
|
|
22
34
|
export declare class StreamingEnhancer {
|
|
23
35
|
/**
|
|
24
36
|
* Add progress tracking to a readable stream
|
|
@@ -42,7 +54,7 @@ export declare class StreamingEnhancer {
|
|
|
42
54
|
/**
|
|
43
55
|
* Create enhanced streaming configuration
|
|
44
56
|
*/
|
|
45
|
-
static createStreamingConfig(options: EnhancedStreamTextOptions): {
|
|
57
|
+
static createStreamingConfig(options: StreamingConfigOptions | EnhancedStreamTextOptions): {
|
|
46
58
|
progressTracking: boolean;
|
|
47
59
|
callback?: ProgressCallback;
|
|
48
60
|
metadata: boolean;
|
|
@@ -2,9 +2,6 @@
|
|
|
2
2
|
* Phase 2: Enhanced Streaming Infrastructure
|
|
3
3
|
* Streaming utilities for progress tracking and metadata enhancement
|
|
4
4
|
*/
|
|
5
|
-
/**
|
|
6
|
-
* Enhanced streaming utilities for progress tracking and metadata
|
|
7
|
-
*/
|
|
8
5
|
export class StreamingEnhancer {
|
|
9
6
|
/**
|
|
10
7
|
* Add progress tracking to a readable stream
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "
|
|
3
|
+
"version": "5.1.0",
|
|
4
4
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|
|
@@ -71,9 +71,8 @@
|
|
|
71
71
|
"content:cleanup": "node tools/converted-scripts/cleanup-hash-named-videos.js",
|
|
72
72
|
"content:all": "pnpm run content:screenshots && pnpm run content:videos",
|
|
73
73
|
"// Documentation Automation": "",
|
|
74
|
-
"docs:sync": "node tools/content/documentation-sync.js",
|
|
75
74
|
"docs:validate": "node tools/content/documentation-sync.js --validate",
|
|
76
|
-
"docs:generate": "pnpm run docs:
|
|
75
|
+
"docs:generate": "pnpm run docs:validate && pnpm run content:screenshots",
|
|
77
76
|
"// Development & Monitoring": "",
|
|
78
77
|
"dev:full": "node tools/development/dev-server.js",
|
|
79
78
|
"dev:health": "node tools/development/health-monitor.js",
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
export declare function agentGenerateCommand(cli: any): void;
|
|
@@ -1,67 +0,0 @@
|
|
|
1
|
-
import { AgentEnhancedProvider } from "../../lib/providers/agent-enhanced-provider.js";
|
|
2
|
-
import ora from "ora";
|
|
3
|
-
import chalk from "chalk";
|
|
4
|
-
export function agentGenerateCommand(cli) {
|
|
5
|
-
cli.command("agent-generate <prompt>", "Generate text with agent capabilities (tool calling)", (yargs) => yargs
|
|
6
|
-
.positional("prompt", {
|
|
7
|
-
describe: "The prompt for the agent",
|
|
8
|
-
type: "string",
|
|
9
|
-
})
|
|
10
|
-
.option("provider", {
|
|
11
|
-
alias: "p",
|
|
12
|
-
describe: "The AI provider to use",
|
|
13
|
-
type: "string",
|
|
14
|
-
choices: ["google-ai", "openai", "anthropic"],
|
|
15
|
-
default: "google-ai",
|
|
16
|
-
})
|
|
17
|
-
.option("model", {
|
|
18
|
-
alias: "m",
|
|
19
|
-
describe: "The model to use",
|
|
20
|
-
type: "string",
|
|
21
|
-
})
|
|
22
|
-
.option("toolCategory", {
|
|
23
|
-
alias: "t",
|
|
24
|
-
describe: "The category of tools to use",
|
|
25
|
-
type: "string",
|
|
26
|
-
choices: ["basic", "filesystem", "utility", "all"],
|
|
27
|
-
default: "all",
|
|
28
|
-
}), async (argv) => {
|
|
29
|
-
const { prompt, provider, model, toolCategory } = argv;
|
|
30
|
-
const spinner = ora(`Generating response with ${provider} agent...`).start();
|
|
31
|
-
try {
|
|
32
|
-
const agentProvider = new AgentEnhancedProvider({
|
|
33
|
-
provider,
|
|
34
|
-
model,
|
|
35
|
-
toolCategory,
|
|
36
|
-
});
|
|
37
|
-
const result = await agentProvider.generateText(prompt);
|
|
38
|
-
if (result) {
|
|
39
|
-
spinner.succeed("Response generated successfully!");
|
|
40
|
-
console.log(chalk.green("\nAI Response:"));
|
|
41
|
-
console.log(result.text);
|
|
42
|
-
if (result.toolCalls && result.toolCalls.length > 0) {
|
|
43
|
-
console.log(chalk.yellow("\nTools Called:"));
|
|
44
|
-
for (const call of result.toolCalls) {
|
|
45
|
-
console.log(`- ${call.toolName}`);
|
|
46
|
-
console.log(` Args: ${JSON.stringify(call.args)}`);
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
if (result.toolResults && result.toolResults.length > 0) {
|
|
50
|
-
console.log(chalk.blue("\nTool Results:"));
|
|
51
|
-
for (const toolResult of result.toolResults) {
|
|
52
|
-
console.log(`- ${toolResult.toolName}`);
|
|
53
|
-
console.log(` Result: ${JSON.stringify(toolResult.result)}`);
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
}
|
|
57
|
-
else {
|
|
58
|
-
spinner.fail("Failed to generate response.");
|
|
59
|
-
}
|
|
60
|
-
}
|
|
61
|
-
catch (error) {
|
|
62
|
-
spinner.fail("An error occurred during generation.");
|
|
63
|
-
console.error(chalk.red(error));
|
|
64
|
-
process.exit(1);
|
|
65
|
-
}
|
|
66
|
-
});
|
|
67
|
-
}
|