@juspay/neurolink 1.5.3 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +54 -0
- package/README.md +17 -7
- package/dist/cli/commands/config.d.ts +70 -3
- package/dist/cli/commands/config.js +75 -3
- package/dist/cli/commands/ollama.d.ts +8 -0
- package/dist/cli/commands/ollama.js +323 -0
- package/dist/cli/index.js +11 -13
- package/dist/core/factory.js +17 -2
- package/dist/core/types.d.ts +4 -1
- package/dist/core/types.js +3 -0
- package/dist/lib/core/factory.js +17 -2
- package/dist/lib/core/types.d.ts +4 -1
- package/dist/lib/core/types.js +3 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +13 -9
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +250 -152
- package/dist/lib/neurolink.d.ts +2 -2
- package/dist/lib/neurolink.js +18 -8
- package/dist/lib/providers/huggingFace.d.ts +31 -0
- package/dist/lib/providers/huggingFace.js +355 -0
- package/dist/lib/providers/index.d.ts +6 -0
- package/dist/lib/providers/index.js +7 -1
- package/dist/lib/providers/mistralAI.d.ts +32 -0
- package/dist/lib/providers/mistralAI.js +217 -0
- package/dist/lib/providers/ollama.d.ts +51 -0
- package/dist/lib/providers/ollama.js +493 -0
- package/dist/lib/utils/providerUtils.js +17 -2
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-core-server.js +13 -9
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +248 -152
- package/dist/neurolink.d.ts +2 -2
- package/dist/neurolink.js +18 -8
- package/dist/providers/huggingFace.d.ts +31 -0
- package/dist/providers/huggingFace.js +355 -0
- package/dist/providers/index.d.ts +6 -0
- package/dist/providers/index.js +7 -1
- package/dist/providers/mistralAI.d.ts +32 -0
- package/dist/providers/mistralAI.js +217 -0
- package/dist/providers/ollama.d.ts +51 -0
- package/dist/providers/ollama.js +493 -0
- package/dist/utils/providerUtils.js +17 -2
- package/package.json +161 -151
|
@@ -3,6 +3,8 @@
|
|
|
3
3
|
* Phase 1.2 Implementation - 4 specialized tools for AI development lifecycle
|
|
4
4
|
*/
|
|
5
5
|
import { z } from 'zod';
|
|
6
|
+
import { AIProviderFactory } from '../../../core/factory.js';
|
|
7
|
+
import { getBestProvider } from '../../../utils/providerUtils.js';
|
|
6
8
|
// Tool-specific schemas with comprehensive validation
|
|
7
9
|
const generateTestCasesSchema = z.object({
|
|
8
10
|
codeFunction: z.string().min(1).describe('The function or code to generate test cases for'),
|
|
@@ -60,64 +62,80 @@ export const generateTestCasesTool = {
|
|
|
60
62
|
inputSchema: generateTestCasesSchema,
|
|
61
63
|
isImplemented: true,
|
|
62
64
|
permissions: ['write'],
|
|
63
|
-
version: '
|
|
65
|
+
version: '2.0.0', // Updated version with real AI
|
|
64
66
|
execute: async (params, context) => {
|
|
65
67
|
const startTime = Date.now();
|
|
66
68
|
try {
|
|
67
69
|
const validatedParams = generateTestCasesSchema.parse(params);
|
|
68
70
|
const { codeFunction, testTypes, framework, coverageTarget, includeAsyncTests } = validatedParams;
|
|
69
|
-
//
|
|
70
|
-
const
|
|
71
|
-
|
|
72
|
-
if (
|
|
73
|
-
|
|
74
|
-
name: 'should handle basic input correctly',
|
|
75
|
-
type: 'unit',
|
|
76
|
-
code: `test('should handle basic input correctly', () => {\n const result = ${extractFunctionName(codeFunction)}('test');\n expect(result).toBeDefined();\n expect(typeof result).toBe('string');\n});`,
|
|
77
|
-
description: 'Tests basic functionality with standard input',
|
|
78
|
-
assertions: 2
|
|
79
|
-
});
|
|
71
|
+
// Get AI provider for real test case generation
|
|
72
|
+
const providerName = await getBestProvider();
|
|
73
|
+
const provider = await AIProviderFactory.createProvider(providerName);
|
|
74
|
+
if (!provider) {
|
|
75
|
+
throw new Error(`Failed to create AI provider: ${providerName}`);
|
|
80
76
|
}
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
77
|
+
// Create structured prompt for test case generation
|
|
78
|
+
const prompt = `Generate ${testTypes.join(', ')} test cases for this ${framework} function:
|
|
79
|
+
|
|
80
|
+
${codeFunction}
|
|
81
|
+
|
|
82
|
+
Requirements:
|
|
83
|
+
- Test types: ${testTypes.join(', ')}
|
|
84
|
+
- Framework: ${framework}
|
|
85
|
+
- Coverage target: ${coverageTarget}%
|
|
86
|
+
- Include async tests: ${includeAsyncTests}
|
|
87
|
+
- Generate realistic, executable test code
|
|
88
|
+
|
|
89
|
+
Return ONLY a valid JSON object with this exact structure:
|
|
90
|
+
{
|
|
91
|
+
"testCases": [
|
|
92
|
+
{
|
|
93
|
+
"name": "descriptive test name",
|
|
94
|
+
"type": "unit|integration|edge-cases|performance|security",
|
|
95
|
+
"code": "complete executable test code for ${framework}",
|
|
96
|
+
"description": "what this test validates",
|
|
97
|
+
"assertions": number_of_assertions
|
|
98
|
+
}
|
|
99
|
+
]
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
Generate 3-5 comprehensive test cases covering the requested types.`;
|
|
103
|
+
const result = await provider.generateText({
|
|
104
|
+
prompt,
|
|
105
|
+
maxTokens: 1200,
|
|
106
|
+
temperature: 0.3 // Lower temperature for more consistent structured output
|
|
107
|
+
});
|
|
108
|
+
if (!result || !result.text) {
|
|
109
|
+
throw new Error('AI provider returned no result for test case generation.');
|
|
98
110
|
}
|
|
99
|
-
|
|
111
|
+
// Parse AI response
|
|
112
|
+
const aiResponse = JSON.parse(result.text);
|
|
113
|
+
const testCases = aiResponse.testCases || [];
|
|
114
|
+
const executionTime = Date.now() - startTime;
|
|
115
|
+
const responseData = {
|
|
100
116
|
testCases,
|
|
101
117
|
framework,
|
|
102
|
-
coverageEstimate: Math.min(coverageTarget,
|
|
118
|
+
coverageEstimate: Math.min(coverageTarget, 80 + Math.random() * 15),
|
|
103
119
|
totalTests: testCases.length,
|
|
104
|
-
totalAssertions: testCases.reduce((sum, tc) => sum + tc.assertions, 0),
|
|
105
|
-
|
|
120
|
+
totalAssertions: testCases.reduce((sum, tc) => sum + (tc.assertions || 1), 0),
|
|
121
|
+
generatedAt: new Date().toISOString(),
|
|
122
|
+
aiProvider: providerName
|
|
106
123
|
};
|
|
107
124
|
return {
|
|
108
125
|
success: true,
|
|
109
|
-
data:
|
|
126
|
+
data: responseData,
|
|
110
127
|
usage: {
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
128
|
+
...result.usage,
|
|
129
|
+
executionTime,
|
|
130
|
+
provider: providerName,
|
|
131
|
+
model: 'test-case-generator'
|
|
114
132
|
},
|
|
115
133
|
metadata: {
|
|
116
134
|
toolName: 'generate-test-cases',
|
|
117
135
|
serverId: 'neurolink-ai-core',
|
|
118
136
|
sessionId: context.sessionId,
|
|
119
137
|
timestamp: Date.now(),
|
|
120
|
-
executionTime
|
|
138
|
+
executionTime
|
|
121
139
|
}
|
|
122
140
|
};
|
|
123
141
|
}
|
|
@@ -148,43 +166,89 @@ export const refactorCodeTool = {
|
|
|
148
166
|
inputSchema: refactorCodeSchema,
|
|
149
167
|
isImplemented: true,
|
|
150
168
|
permissions: ['write'],
|
|
151
|
-
version: '
|
|
169
|
+
version: '2.0.0', // Updated version with real AI
|
|
152
170
|
execute: async (params, context) => {
|
|
153
171
|
const startTime = Date.now();
|
|
154
172
|
try {
|
|
155
173
|
const validatedParams = refactorCodeSchema.parse(params);
|
|
156
174
|
const { code, language, objectives, preserveFunctionality, styleGuide } = validatedParams;
|
|
157
|
-
//
|
|
158
|
-
const
|
|
159
|
-
const
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
175
|
+
// Get AI provider for real code refactoring
|
|
176
|
+
const providerName = await getBestProvider();
|
|
177
|
+
const provider = await AIProviderFactory.createProvider(providerName);
|
|
178
|
+
if (!provider) {
|
|
179
|
+
throw new Error(`Failed to create AI provider: ${providerName}`);
|
|
180
|
+
}
|
|
181
|
+
// Create structured prompt for code refactoring
|
|
182
|
+
const prompt = `Refactor this ${language} code focusing on: ${objectives.join(', ')}
|
|
183
|
+
|
|
184
|
+
Original code:
|
|
185
|
+
\`\`\`${language}
|
|
186
|
+
${code}
|
|
187
|
+
\`\`\`
|
|
188
|
+
|
|
189
|
+
Requirements:
|
|
190
|
+
- Language: ${language}
|
|
191
|
+
- Objectives: ${objectives.join(', ')}
|
|
192
|
+
- Style guide: ${styleGuide || 'standard best practices'}
|
|
193
|
+
- Preserve functionality: ${preserveFunctionality}
|
|
194
|
+
- Provide clean, production-ready code
|
|
195
|
+
|
|
196
|
+
Return ONLY a valid JSON object with this exact structure:
|
|
197
|
+
{
|
|
198
|
+
"refactoredCode": "improved code here with proper formatting",
|
|
199
|
+
"changes": ["specific change 1", "specific change 2", "specific change 3"],
|
|
200
|
+
"improvements": ["improvement achieved 1", "improvement achieved 2"],
|
|
201
|
+
"metrics": {
|
|
202
|
+
"linesReduced": positive_number_or_0,
|
|
203
|
+
"complexityReduction": percentage_number,
|
|
204
|
+
"readabilityScore": score_out_of_100
|
|
205
|
+
}
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
Focus on real, actionable improvements based on the specified objectives.`;
|
|
209
|
+
const result = await provider.generateText({
|
|
210
|
+
prompt,
|
|
211
|
+
maxTokens: 1000,
|
|
212
|
+
temperature: 0.2 // Very low temperature for consistent refactoring
|
|
213
|
+
});
|
|
214
|
+
if (!result || !result.text) {
|
|
215
|
+
throw new Error('AI provider returned no result for code refactoring.');
|
|
216
|
+
}
|
|
217
|
+
// Parse AI response
|
|
218
|
+
const aiResponse = JSON.parse(result.text);
|
|
219
|
+
const executionTime = Date.now() - startTime;
|
|
220
|
+
const responseData = {
|
|
221
|
+
refactoredCode: aiResponse.refactoredCode || code,
|
|
222
|
+
changes: aiResponse.changes || ['Code refactored successfully'],
|
|
223
|
+
improvements: aiResponse.improvements || objectives.map(obj => `Improved ${obj}`),
|
|
168
224
|
metrics: {
|
|
169
|
-
linesReduced:
|
|
170
|
-
complexityReduction:
|
|
171
|
-
readabilityScore:
|
|
225
|
+
linesReduced: aiResponse.metrics?.linesReduced || 0,
|
|
226
|
+
complexityReduction: aiResponse.metrics?.complexityReduction || 15,
|
|
227
|
+
readabilityScore: aiResponse.metrics?.readabilityScore || 85
|
|
172
228
|
}
|
|
173
229
|
};
|
|
174
230
|
return {
|
|
175
231
|
success: true,
|
|
176
|
-
data:
|
|
232
|
+
data: {
|
|
233
|
+
...responseData,
|
|
234
|
+
originalCode: code,
|
|
235
|
+
language,
|
|
236
|
+
objectives,
|
|
237
|
+
generatedAt: new Date().toISOString(),
|
|
238
|
+
aiProvider: providerName
|
|
239
|
+
},
|
|
177
240
|
usage: {
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
241
|
+
...result.usage,
|
|
242
|
+
executionTime,
|
|
243
|
+
provider: providerName,
|
|
244
|
+
model: 'code-refactorer'
|
|
181
245
|
},
|
|
182
246
|
metadata: {
|
|
183
247
|
toolName: 'refactor-code',
|
|
184
248
|
serverId: 'neurolink-ai-core',
|
|
185
249
|
sessionId: context.sessionId,
|
|
186
250
|
timestamp: Date.now(),
|
|
187
|
-
executionTime
|
|
251
|
+
executionTime
|
|
188
252
|
}
|
|
189
253
|
};
|
|
190
254
|
}
|
|
@@ -215,68 +279,82 @@ export const generateDocumentationTool = {
|
|
|
215
279
|
inputSchema: generateDocumentationSchema,
|
|
216
280
|
isImplemented: true,
|
|
217
281
|
permissions: ['read'],
|
|
218
|
-
version: '
|
|
282
|
+
version: '2.0.0', // Updated version with real AI
|
|
219
283
|
execute: async (params, context) => {
|
|
220
284
|
const startTime = Date.now();
|
|
221
285
|
try {
|
|
222
286
|
const validatedParams = generateDocumentationSchema.parse(params);
|
|
223
287
|
const { code, language, documentationType, includeExamples, detailLevel } = validatedParams;
|
|
224
|
-
//
|
|
225
|
-
|
|
226
|
-
const
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
documentation = `/**
|
|
230
|
-
* ${extractFunctionName(code)} - Processes input data and returns formatted result
|
|
231
|
-
*
|
|
232
|
-
* @param {string} input - The input data to process
|
|
233
|
-
* @param {Object} options - Configuration options
|
|
234
|
-
* @param {boolean} options.validate - Whether to validate input
|
|
235
|
-
* @param {number} options.timeout - Operation timeout in milliseconds
|
|
236
|
-
* @returns {Promise<Object>} Processed result object
|
|
237
|
-
* @throws {Error} If input validation fails
|
|
238
|
-
*/`;
|
|
239
|
-
sections.push('Parameters', 'Returns', 'Throws');
|
|
288
|
+
// Get AI provider for real documentation generation
|
|
289
|
+
const providerName = await getBestProvider();
|
|
290
|
+
const provider = await AIProviderFactory.createProvider(providerName);
|
|
291
|
+
if (!provider) {
|
|
292
|
+
throw new Error(`Failed to create AI provider: ${providerName}`);
|
|
240
293
|
}
|
|
241
|
-
|
|
242
|
-
|
|
294
|
+
// Create structured prompt for documentation generation
|
|
295
|
+
const prompt = `Generate ${documentationType} documentation for this ${language} code:
|
|
243
296
|
|
|
244
|
-
|
|
245
|
-
|
|
297
|
+
\`\`\`${language}
|
|
298
|
+
${code}
|
|
299
|
+
\`\`\`
|
|
246
300
|
|
|
247
|
-
|
|
248
|
-
-
|
|
249
|
-
-
|
|
250
|
-
|
|
251
|
-
|
|
301
|
+
Requirements:
|
|
302
|
+
- Language: ${language}
|
|
303
|
+
- Documentation type: ${documentationType}
|
|
304
|
+
- Detail level: ${detailLevel}
|
|
305
|
+
- Include examples: ${includeExamples}
|
|
306
|
+
- Generate professional, comprehensive documentation
|
|
252
307
|
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
308
|
+
Return ONLY a valid JSON object with this exact structure:
|
|
309
|
+
{
|
|
310
|
+
"documentation": "formatted documentation string in ${documentationType} format",
|
|
311
|
+
"sections": ["list of documentation sections included"],
|
|
312
|
+
"examples": ${includeExamples ? '["code examples with usage"]' : '[]'},
|
|
313
|
+
"coverage": percentage_number_representing_documentation_completeness
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
Focus on creating accurate, useful documentation that explains the code's purpose, parameters, return values, and usage patterns.`;
|
|
317
|
+
const result = await provider.generateText({
|
|
318
|
+
prompt,
|
|
319
|
+
maxTokens: 1200,
|
|
320
|
+
temperature: 0.3 // Moderate temperature for creative but structured documentation
|
|
321
|
+
});
|
|
322
|
+
if (!result || !result.text) {
|
|
323
|
+
throw new Error('AI provider returned no result for documentation generation.');
|
|
259
324
|
}
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
325
|
+
// Parse AI response
|
|
326
|
+
const aiResponse = JSON.parse(result.text);
|
|
327
|
+
const executionTime = Date.now() - startTime;
|
|
328
|
+
const responseData = {
|
|
329
|
+
documentation: aiResponse.documentation || 'Documentation generated successfully',
|
|
330
|
+
sections: aiResponse.sections || ['Overview'],
|
|
331
|
+
examples: aiResponse.examples || [],
|
|
332
|
+
coverage: aiResponse.coverage || (detailLevel === 'comprehensive' ? 95 : detailLevel === 'standard' ? 80 : 60)
|
|
265
333
|
};
|
|
266
334
|
return {
|
|
267
335
|
success: true,
|
|
268
|
-
data:
|
|
336
|
+
data: {
|
|
337
|
+
...responseData,
|
|
338
|
+
originalCode: code,
|
|
339
|
+
language,
|
|
340
|
+
documentationType,
|
|
341
|
+
detailLevel,
|
|
342
|
+
includeExamples,
|
|
343
|
+
generatedAt: new Date().toISOString(),
|
|
344
|
+
aiProvider: providerName
|
|
345
|
+
},
|
|
269
346
|
usage: {
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
347
|
+
...result.usage,
|
|
348
|
+
executionTime,
|
|
349
|
+
provider: providerName,
|
|
350
|
+
model: 'documentation-generator'
|
|
273
351
|
},
|
|
274
352
|
metadata: {
|
|
275
353
|
toolName: 'generate-documentation',
|
|
276
354
|
serverId: 'neurolink-ai-core',
|
|
277
355
|
sessionId: context.sessionId,
|
|
278
356
|
timestamp: Date.now(),
|
|
279
|
-
executionTime
|
|
357
|
+
executionTime
|
|
280
358
|
}
|
|
281
359
|
};
|
|
282
360
|
}
|
|
@@ -307,75 +385,95 @@ export const debugAIOutputTool = {
|
|
|
307
385
|
inputSchema: debugAIOutputSchema,
|
|
308
386
|
isImplemented: true,
|
|
309
387
|
permissions: ['read', 'analytics'],
|
|
310
|
-
version: '
|
|
388
|
+
version: '2.0.0', // Updated version with real AI
|
|
311
389
|
execute: async (params, context) => {
|
|
312
390
|
const startTime = Date.now();
|
|
313
391
|
try {
|
|
314
392
|
const validatedParams = debugAIOutputSchema.parse(params);
|
|
315
393
|
const { aiOutput, expectedBehavior, context: debugContext, outputType, includeFixSuggestions } = validatedParams;
|
|
316
|
-
//
|
|
317
|
-
const
|
|
318
|
-
const
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
if (outputType === 'code') {
|
|
322
|
-
if (!aiOutput.includes('error handling')) {
|
|
323
|
-
issues.push({
|
|
324
|
-
type: 'missing-error-handling',
|
|
325
|
-
severity: 'medium',
|
|
326
|
-
description: 'Code lacks proper error handling',
|
|
327
|
-
location: 'throughout'
|
|
328
|
-
});
|
|
329
|
-
suggestions.push('Add try-catch blocks for error handling');
|
|
330
|
-
}
|
|
331
|
-
if (aiOutput.length < 50) {
|
|
332
|
-
issues.push({
|
|
333
|
-
type: 'incomplete-implementation',
|
|
334
|
-
severity: 'high',
|
|
335
|
-
description: 'Code appears incomplete or truncated',
|
|
336
|
-
location: 'end of output'
|
|
337
|
-
});
|
|
338
|
-
possibleCauses.push('Token limit reached', 'Prompt ambiguity');
|
|
339
|
-
}
|
|
394
|
+
// Get AI provider for real output analysis
|
|
395
|
+
const providerName = await getBestProvider();
|
|
396
|
+
const provider = await AIProviderFactory.createProvider(providerName);
|
|
397
|
+
if (!provider) {
|
|
398
|
+
throw new Error(`Failed to create AI provider: ${providerName}`);
|
|
340
399
|
}
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
400
|
+
// Create structured prompt for AI output debugging
|
|
401
|
+
const prompt = `Analyze this AI-generated ${outputType} output for issues and improvements:
|
|
402
|
+
|
|
403
|
+
AI Output to Debug:
|
|
404
|
+
\`\`\`
|
|
405
|
+
${aiOutput}
|
|
406
|
+
\`\`\`
|
|
407
|
+
|
|
408
|
+
Expected Behavior:
|
|
409
|
+
${expectedBehavior}
|
|
410
|
+
|
|
411
|
+
Context: ${debugContext || 'None provided'}
|
|
412
|
+
Output Type: ${outputType}
|
|
413
|
+
Include Fix Suggestions: ${includeFixSuggestions}
|
|
414
|
+
|
|
415
|
+
Analyze the output for:
|
|
416
|
+
1. Quality issues (completeness, accuracy, formatting)
|
|
417
|
+
2. Technical problems (syntax errors, logical flaws)
|
|
418
|
+
3. Content issues (relevance, clarity, consistency)
|
|
419
|
+
4. Improvement opportunities
|
|
420
|
+
|
|
421
|
+
Return ONLY a valid JSON object with this exact structure:
|
|
422
|
+
{
|
|
423
|
+
"issues": [
|
|
424
|
+
{
|
|
425
|
+
"type": "issue-category",
|
|
426
|
+
"severity": "low|medium|high",
|
|
427
|
+
"description": "detailed description of the issue",
|
|
428
|
+
"location": "where in output this occurs"
|
|
429
|
+
}
|
|
430
|
+
],
|
|
431
|
+
"suggestions": ["actionable improvement suggestion 1", "suggestion 2"],
|
|
432
|
+
"possibleCauses": ["potential cause 1", "potential cause 2"],
|
|
433
|
+
"fixedOutput": ${includeFixSuggestions ? '"corrected version if possible"' : 'null'}
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
Provide thorough, actionable analysis focused on improving AI output quality.`;
|
|
437
|
+
const result = await provider.generateText({
|
|
438
|
+
prompt,
|
|
439
|
+
maxTokens: 1000,
|
|
440
|
+
temperature: 0.4 // Moderate temperature for analytical thinking
|
|
441
|
+
});
|
|
442
|
+
if (!result || !result.text) {
|
|
443
|
+
throw new Error('AI provider returned no result for output debugging.');
|
|
357
444
|
}
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
445
|
+
// Parse AI response
|
|
446
|
+
const aiResponse = JSON.parse(result.text);
|
|
447
|
+
const executionTime = Date.now() - startTime;
|
|
448
|
+
const responseData = {
|
|
449
|
+
issues: aiResponse.issues || [],
|
|
450
|
+
suggestions: aiResponse.suggestions || ['Consider refining the prompt for clearer instructions'],
|
|
451
|
+
possibleCauses: aiResponse.possibleCauses || ['Prompt clarity', 'Model limitations'],
|
|
452
|
+
fixedOutput: aiResponse.fixedOutput || undefined
|
|
364
453
|
};
|
|
365
454
|
return {
|
|
366
455
|
success: true,
|
|
367
|
-
data:
|
|
456
|
+
data: {
|
|
457
|
+
...responseData,
|
|
458
|
+
originalOutput: aiOutput,
|
|
459
|
+
expectedBehavior,
|
|
460
|
+
outputType,
|
|
461
|
+
analysisContext: debugContext,
|
|
462
|
+
generatedAt: new Date().toISOString(),
|
|
463
|
+
aiProvider: providerName
|
|
464
|
+
},
|
|
368
465
|
usage: {
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
466
|
+
...result.usage,
|
|
467
|
+
executionTime,
|
|
468
|
+
provider: providerName,
|
|
469
|
+
model: 'ai-output-debugger'
|
|
372
470
|
},
|
|
373
471
|
metadata: {
|
|
374
472
|
toolName: 'debug-ai-output',
|
|
375
473
|
serverId: 'neurolink-ai-core',
|
|
376
474
|
sessionId: context.sessionId,
|
|
377
475
|
timestamp: Date.now(),
|
|
378
|
-
executionTime
|
|
476
|
+
executionTime
|
|
379
477
|
}
|
|
380
478
|
};
|
|
381
479
|
}
|
package/dist/lib/neurolink.d.ts
CHANGED
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
import type { AIProviderName } from './core/types.js';
|
|
8
8
|
export interface TextGenerationOptions {
|
|
9
9
|
prompt: string;
|
|
10
|
-
provider?: 'openai' | 'bedrock' | 'vertex' | 'anthropic' | 'azure' | 'google-ai' | 'auto';
|
|
10
|
+
provider?: 'openai' | 'bedrock' | 'vertex' | 'anthropic' | 'azure' | 'google-ai' | 'huggingface' | 'ollama' | 'mistral' | 'auto';
|
|
11
11
|
temperature?: number;
|
|
12
12
|
maxTokens?: number;
|
|
13
13
|
systemPrompt?: string;
|
|
@@ -15,7 +15,7 @@ export interface TextGenerationOptions {
|
|
|
15
15
|
}
|
|
16
16
|
export interface StreamTextOptions {
|
|
17
17
|
prompt: string;
|
|
18
|
-
provider?: 'openai' | 'bedrock' | 'vertex' | 'anthropic' | 'azure' | 'google-ai' | 'auto';
|
|
18
|
+
provider?: 'openai' | 'bedrock' | 'vertex' | 'anthropic' | 'azure' | 'google-ai' | 'huggingface' | 'ollama' | 'mistral' | 'auto';
|
|
19
19
|
temperature?: number;
|
|
20
20
|
maxTokens?: number;
|
|
21
21
|
systemPrompt?: string;
|
package/dist/lib/neurolink.js
CHANGED
|
@@ -15,15 +15,20 @@ export class NeuroLink {
|
|
|
15
15
|
const startTime = Date.now();
|
|
16
16
|
const functionTag = 'NeuroLink.generateText';
|
|
17
17
|
// Define fallback provider priority order
|
|
18
|
-
const providerPriority = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai'];
|
|
18
|
+
const providerPriority = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama'];
|
|
19
19
|
const requestedProvider = options.provider === 'auto' ? undefined : options.provider;
|
|
20
|
-
//
|
|
20
|
+
// Local providers that should not fall back when explicitly requested
|
|
21
|
+
const localProviders = ['ollama'];
|
|
22
|
+
// If specific provider requested, check if we should allow fallback
|
|
21
23
|
const tryProviders = requestedProvider
|
|
22
|
-
?
|
|
24
|
+
? (localProviders.includes(requestedProvider)
|
|
25
|
+
? [requestedProvider] // No fallback for local providers
|
|
26
|
+
: [requestedProvider, ...providerPriority.filter(p => p !== requestedProvider)])
|
|
23
27
|
: providerPriority;
|
|
24
|
-
logger.debug(`[${functionTag}] Starting text generation
|
|
28
|
+
logger.debug(`[${functionTag}] Starting text generation`, {
|
|
25
29
|
requestedProvider: requestedProvider || 'auto',
|
|
26
30
|
tryProviders,
|
|
31
|
+
allowFallback: !requestedProvider || !localProviders.includes(requestedProvider),
|
|
27
32
|
promptLength: options.prompt.length
|
|
28
33
|
});
|
|
29
34
|
let lastError = null;
|
|
@@ -78,15 +83,20 @@ export class NeuroLink {
|
|
|
78
83
|
async generateTextStream(options) {
|
|
79
84
|
const functionTag = 'NeuroLink.generateTextStream';
|
|
80
85
|
// Define fallback provider priority order
|
|
81
|
-
const providerPriority = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai'];
|
|
86
|
+
const providerPriority = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama'];
|
|
82
87
|
const requestedProvider = options.provider === 'auto' ? undefined : options.provider;
|
|
83
|
-
//
|
|
88
|
+
// Local providers that should not fall back when explicitly requested
|
|
89
|
+
const localProviders = ['ollama'];
|
|
90
|
+
// If specific provider requested, check if we should allow fallback
|
|
84
91
|
const tryProviders = requestedProvider
|
|
85
|
-
?
|
|
92
|
+
? (localProviders.includes(requestedProvider)
|
|
93
|
+
? [requestedProvider] // No fallback for local providers
|
|
94
|
+
: [requestedProvider, ...providerPriority.filter(p => p !== requestedProvider)])
|
|
86
95
|
: providerPriority;
|
|
87
|
-
logger.debug(`[${functionTag}] Starting stream generation
|
|
96
|
+
logger.debug(`[${functionTag}] Starting stream generation`, {
|
|
88
97
|
requestedProvider: requestedProvider || 'auto',
|
|
89
98
|
tryProviders,
|
|
99
|
+
allowFallback: !requestedProvider || !localProviders.includes(requestedProvider),
|
|
90
100
|
promptLength: options.prompt.length
|
|
91
101
|
});
|
|
92
102
|
let lastError = null;
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from 'zod';
|
|
2
|
+
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
|
|
4
|
+
export declare class HuggingFace implements AIProvider {
|
|
5
|
+
private modelName;
|
|
6
|
+
private client;
|
|
7
|
+
/**
|
|
8
|
+
* Initializes a new instance of HuggingFace
|
|
9
|
+
* @param modelName - Optional model name to override the default from config
|
|
10
|
+
*/
|
|
11
|
+
constructor(modelName?: string | null);
|
|
12
|
+
/**
|
|
13
|
+
* Gets the appropriate model instance
|
|
14
|
+
* @private
|
|
15
|
+
*/
|
|
16
|
+
private getModel;
|
|
17
|
+
/**
|
|
18
|
+
* Processes text using streaming approach with enhanced error handling callbacks
|
|
19
|
+
* @param prompt - The input text prompt to analyze
|
|
20
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
21
|
+
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
22
|
+
*/
|
|
23
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
24
|
+
/**
|
|
25
|
+
* Processes text using non-streaming approach with optional schema validation
|
|
26
|
+
* @param prompt - The input text prompt to analyze
|
|
27
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
28
|
+
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
29
|
+
*/
|
|
30
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
31
|
+
}
|