@juspay/neurolink 1.5.3 → 1.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +241 -1
- package/README.md +113 -20
- package/dist/agent/direct-tools.d.ts +1203 -0
- package/dist/agent/direct-tools.js +387 -0
- package/dist/cli/commands/agent-generate.d.ts +2 -0
- package/dist/cli/commands/agent-generate.js +70 -0
- package/dist/cli/commands/config.d.ts +76 -9
- package/dist/cli/commands/config.js +358 -233
- package/dist/cli/commands/mcp.d.ts +2 -1
- package/dist/cli/commands/mcp.js +874 -146
- package/dist/cli/commands/ollama.d.ts +8 -0
- package/dist/cli/commands/ollama.js +333 -0
- package/dist/cli/index.js +591 -327
- package/dist/cli/utils/complete-setup.d.ts +19 -0
- package/dist/cli/utils/complete-setup.js +81 -0
- package/dist/cli/utils/env-manager.d.ts +44 -0
- package/dist/cli/utils/env-manager.js +226 -0
- package/dist/cli/utils/interactive-setup.d.ts +48 -0
- package/dist/cli/utils/interactive-setup.js +302 -0
- package/dist/core/dynamic-models.d.ts +208 -0
- package/dist/core/dynamic-models.js +250 -0
- package/dist/core/factory.d.ts +13 -6
- package/dist/core/factory.js +180 -50
- package/dist/core/types.d.ts +8 -3
- package/dist/core/types.js +7 -4
- package/dist/index.d.ts +16 -16
- package/dist/index.js +16 -16
- package/dist/lib/agent/direct-tools.d.ts +1203 -0
- package/dist/lib/agent/direct-tools.js +387 -0
- package/dist/lib/core/dynamic-models.d.ts +208 -0
- package/dist/lib/core/dynamic-models.js +250 -0
- package/dist/lib/core/factory.d.ts +13 -6
- package/dist/lib/core/factory.js +180 -50
- package/dist/lib/core/types.d.ts +8 -3
- package/dist/lib/core/types.js +7 -4
- package/dist/lib/index.d.ts +16 -16
- package/dist/lib/index.js +16 -16
- package/dist/lib/mcp/auto-discovery.d.ts +120 -0
- package/dist/lib/mcp/auto-discovery.js +793 -0
- package/dist/lib/mcp/client.d.ts +66 -0
- package/dist/lib/mcp/client.js +245 -0
- package/dist/lib/mcp/config.d.ts +31 -0
- package/dist/lib/mcp/config.js +74 -0
- package/dist/lib/mcp/context-manager.d.ts +4 -4
- package/dist/lib/mcp/context-manager.js +24 -18
- package/dist/lib/mcp/factory.d.ts +28 -11
- package/dist/lib/mcp/factory.js +36 -29
- package/dist/lib/mcp/function-calling.d.ts +51 -0
- package/dist/lib/mcp/function-calling.js +510 -0
- package/dist/lib/mcp/index.d.ts +190 -0
- package/dist/lib/mcp/index.js +156 -0
- package/dist/lib/mcp/initialize-tools.d.ts +28 -0
- package/dist/lib/mcp/initialize-tools.js +209 -0
- package/dist/lib/mcp/initialize.d.ts +17 -0
- package/dist/lib/mcp/initialize.js +51 -0
- package/dist/lib/mcp/logging.d.ts +71 -0
- package/dist/lib/mcp/logging.js +183 -0
- package/dist/lib/mcp/manager.d.ts +67 -0
- package/dist/lib/mcp/manager.js +176 -0
- package/dist/lib/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +417 -0
- package/dist/lib/mcp/orchestrator.d.ts +3 -3
- package/dist/lib/mcp/orchestrator.js +46 -43
- package/dist/lib/mcp/registry.d.ts +2 -2
- package/dist/lib/mcp/registry.js +42 -33
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +205 -66
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +143 -99
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +404 -251
- package/dist/lib/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/lib/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/lib/mcp/tool-integration.d.ts +67 -0
- package/dist/lib/mcp/tool-integration.js +179 -0
- package/dist/lib/mcp/unified-registry.d.ts +269 -0
- package/dist/lib/mcp/unified-registry.js +1411 -0
- package/dist/lib/neurolink.d.ts +68 -6
- package/dist/lib/neurolink.js +314 -42
- package/dist/lib/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/lib/providers/agent-enhanced-provider.js +242 -0
- package/dist/lib/providers/amazonBedrock.d.ts +3 -3
- package/dist/lib/providers/amazonBedrock.js +54 -50
- package/dist/lib/providers/anthropic.d.ts +2 -2
- package/dist/lib/providers/anthropic.js +92 -84
- package/dist/lib/providers/azureOpenAI.d.ts +2 -2
- package/dist/lib/providers/azureOpenAI.js +97 -86
- package/dist/lib/providers/function-calling-provider.d.ts +70 -0
- package/dist/lib/providers/function-calling-provider.js +359 -0
- package/dist/lib/providers/googleAIStudio.d.ts +10 -5
- package/dist/lib/providers/googleAIStudio.js +60 -38
- package/dist/lib/providers/googleVertexAI.d.ts +3 -3
- package/dist/lib/providers/googleVertexAI.js +96 -86
- package/dist/lib/providers/huggingFace.d.ts +31 -0
- package/dist/lib/providers/huggingFace.js +362 -0
- package/dist/lib/providers/index.d.ts +14 -8
- package/dist/lib/providers/index.js +18 -12
- package/dist/lib/providers/mcp-provider.d.ts +62 -0
- package/dist/lib/providers/mcp-provider.js +183 -0
- package/dist/lib/providers/mistralAI.d.ts +32 -0
- package/dist/lib/providers/mistralAI.js +223 -0
- package/dist/lib/providers/ollama.d.ts +51 -0
- package/dist/lib/providers/ollama.js +508 -0
- package/dist/lib/providers/openAI.d.ts +7 -3
- package/dist/lib/providers/openAI.js +45 -33
- package/dist/lib/utils/logger.js +2 -2
- package/dist/lib/utils/providerUtils.js +59 -22
- package/dist/mcp/auto-discovery.d.ts +120 -0
- package/dist/mcp/auto-discovery.js +794 -0
- package/dist/mcp/client.d.ts +66 -0
- package/dist/mcp/client.js +245 -0
- package/dist/mcp/config.d.ts +31 -0
- package/dist/mcp/config.js +74 -0
- package/dist/mcp/context-manager.d.ts +4 -4
- package/dist/mcp/context-manager.js +24 -18
- package/dist/mcp/factory.d.ts +28 -11
- package/dist/mcp/factory.js +36 -29
- package/dist/mcp/function-calling.d.ts +51 -0
- package/dist/mcp/function-calling.js +510 -0
- package/dist/mcp/index.d.ts +190 -0
- package/dist/mcp/index.js +156 -0
- package/dist/mcp/initialize-tools.d.ts +28 -0
- package/dist/mcp/initialize-tools.js +210 -0
- package/dist/mcp/initialize.d.ts +17 -0
- package/dist/mcp/initialize.js +51 -0
- package/dist/mcp/logging.d.ts +71 -0
- package/dist/mcp/logging.js +183 -0
- package/dist/mcp/manager.d.ts +67 -0
- package/dist/mcp/manager.js +176 -0
- package/dist/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/mcp/neurolink-mcp-client.js +417 -0
- package/dist/mcp/orchestrator.d.ts +3 -3
- package/dist/mcp/orchestrator.js +46 -43
- package/dist/mcp/registry.d.ts +2 -2
- package/dist/mcp/registry.js +42 -33
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +205 -66
- package/dist/mcp/servers/ai-providers/ai-core-server.js +143 -99
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +404 -253
- package/dist/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/mcp/tool-integration.d.ts +67 -0
- package/dist/mcp/tool-integration.js +179 -0
- package/dist/mcp/unified-registry.d.ts +269 -0
- package/dist/mcp/unified-registry.js +1411 -0
- package/dist/neurolink.d.ts +68 -6
- package/dist/neurolink.js +314 -42
- package/dist/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/providers/agent-enhanced-provider.js +242 -0
- package/dist/providers/amazonBedrock.d.ts +3 -3
- package/dist/providers/amazonBedrock.js +54 -50
- package/dist/providers/anthropic.d.ts +2 -2
- package/dist/providers/anthropic.js +92 -84
- package/dist/providers/azureOpenAI.d.ts +2 -2
- package/dist/providers/azureOpenAI.js +97 -86
- package/dist/providers/function-calling-provider.d.ts +70 -0
- package/dist/providers/function-calling-provider.js +359 -0
- package/dist/providers/googleAIStudio.d.ts +10 -5
- package/dist/providers/googleAIStudio.js +60 -38
- package/dist/providers/googleVertexAI.d.ts +3 -3
- package/dist/providers/googleVertexAI.js +96 -86
- package/dist/providers/huggingFace.d.ts +31 -0
- package/dist/providers/huggingFace.js +362 -0
- package/dist/providers/index.d.ts +14 -8
- package/dist/providers/index.js +18 -12
- package/dist/providers/mcp-provider.d.ts +62 -0
- package/dist/providers/mcp-provider.js +183 -0
- package/dist/providers/mistralAI.d.ts +32 -0
- package/dist/providers/mistralAI.js +223 -0
- package/dist/providers/ollama.d.ts +51 -0
- package/dist/providers/ollama.js +508 -0
- package/dist/providers/openAI.d.ts +7 -3
- package/dist/providers/openAI.js +45 -33
- package/dist/utils/logger.js +2 -2
- package/dist/utils/providerUtils.js +59 -22
- package/package.json +28 -4
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
import { HfInference } from "@huggingface/inference";
|
|
2
|
+
import { streamText, generateText, Output, } from "ai";
|
|
3
|
+
import { logger } from "../utils/logger.js";
|
|
4
|
+
// Default system context
|
|
5
|
+
const DEFAULT_SYSTEM_CONTEXT = {
|
|
6
|
+
systemPrompt: "You are a helpful AI assistant.",
|
|
7
|
+
};
|
|
8
|
+
// Configuration helpers
|
|
9
|
+
const getHuggingFaceApiKey = () => {
|
|
10
|
+
const apiKey = process.env.HUGGINGFACE_API_KEY || process.env.HF_TOKEN;
|
|
11
|
+
if (!apiKey) {
|
|
12
|
+
throw new Error("HUGGINGFACE_API_KEY environment variable is not set");
|
|
13
|
+
}
|
|
14
|
+
return apiKey;
|
|
15
|
+
};
|
|
16
|
+
const getHuggingFaceModelId = () => {
|
|
17
|
+
return process.env.HUGGINGFACE_MODEL || "microsoft/DialoGPT-medium";
|
|
18
|
+
};
|
|
19
|
+
const hasValidAuth = () => {
|
|
20
|
+
return !!(process.env.HUGGINGFACE_API_KEY || process.env.HF_TOKEN);
|
|
21
|
+
};
|
|
22
|
+
// Lazy initialization cache
|
|
23
|
+
let _hfClient = null;
|
|
24
|
+
function getHuggingFaceClient() {
|
|
25
|
+
if (!_hfClient) {
|
|
26
|
+
const apiKey = getHuggingFaceApiKey();
|
|
27
|
+
_hfClient = new HfInference(apiKey);
|
|
28
|
+
}
|
|
29
|
+
return _hfClient;
|
|
30
|
+
}
|
|
31
|
+
// Retry configuration for model loading
|
|
32
|
+
const RETRY_CONFIG = {
|
|
33
|
+
maxRetries: 3,
|
|
34
|
+
baseDelay: 2000, // 2 seconds
|
|
35
|
+
maxDelay: 30000, // 30 seconds
|
|
36
|
+
backoffMultiplier: 2,
|
|
37
|
+
};
|
|
38
|
+
// Helper function for exponential backoff retry
|
|
39
|
+
async function retryWithBackoff(operation, retryConfig = RETRY_CONFIG) {
|
|
40
|
+
let lastError;
|
|
41
|
+
for (let attempt = 0; attempt <= retryConfig.maxRetries; attempt++) {
|
|
42
|
+
try {
|
|
43
|
+
return await operation();
|
|
44
|
+
}
|
|
45
|
+
catch (error) {
|
|
46
|
+
lastError = error;
|
|
47
|
+
// Check if it's a model loading error (503 status)
|
|
48
|
+
if (error instanceof Error && error.message.includes("503")) {
|
|
49
|
+
if (attempt < retryConfig.maxRetries) {
|
|
50
|
+
const delay = Math.min(retryConfig.baseDelay *
|
|
51
|
+
Math.pow(retryConfig.backoffMultiplier, attempt), retryConfig.maxDelay);
|
|
52
|
+
logger.debug("HuggingFace model loading, retrying...", {
|
|
53
|
+
attempt: attempt + 1,
|
|
54
|
+
maxRetries: retryConfig.maxRetries,
|
|
55
|
+
delayMs: delay,
|
|
56
|
+
error: error.message,
|
|
57
|
+
});
|
|
58
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
59
|
+
continue;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
// For non-503 errors or final attempt, throw immediately
|
|
63
|
+
throw error;
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
throw lastError;
|
|
67
|
+
}
|
|
68
|
+
// Custom LanguageModelV1 implementation for Hugging Face
|
|
69
|
+
class HuggingFaceLanguageModel {
|
|
70
|
+
specificationVersion = "v1";
|
|
71
|
+
provider = "huggingface";
|
|
72
|
+
modelId;
|
|
73
|
+
maxTokens;
|
|
74
|
+
supportsStreaming = true;
|
|
75
|
+
defaultObjectGenerationMode = "json";
|
|
76
|
+
client;
|
|
77
|
+
constructor(modelId, client) {
|
|
78
|
+
this.modelId = modelId;
|
|
79
|
+
this.client = client;
|
|
80
|
+
}
|
|
81
|
+
estimateTokens(text) {
|
|
82
|
+
return Math.ceil(text.length / 4); // Rough estimation: 4 characters per token
|
|
83
|
+
}
|
|
84
|
+
convertMessagesToPrompt(messages) {
|
|
85
|
+
return messages
|
|
86
|
+
.map((msg) => {
|
|
87
|
+
if (typeof msg.content === "string") {
|
|
88
|
+
return `${msg.role}: ${msg.content}`;
|
|
89
|
+
}
|
|
90
|
+
else if (Array.isArray(msg.content)) {
|
|
91
|
+
// Handle multi-part content (text, images, etc.)
|
|
92
|
+
return `${msg.role}: ${msg.content
|
|
93
|
+
.filter((part) => part.type === "text")
|
|
94
|
+
.map((part) => part.text)
|
|
95
|
+
.join(" ")}`;
|
|
96
|
+
}
|
|
97
|
+
return "";
|
|
98
|
+
})
|
|
99
|
+
.join("\n");
|
|
100
|
+
}
|
|
101
|
+
async doGenerate(options) {
|
|
102
|
+
const prompt = this.convertMessagesToPrompt(options.prompt);
|
|
103
|
+
const response = await retryWithBackoff(async () => {
|
|
104
|
+
return await this.client.textGeneration({
|
|
105
|
+
model: this.modelId,
|
|
106
|
+
inputs: prompt,
|
|
107
|
+
parameters: {
|
|
108
|
+
temperature: options.temperature || 0.7,
|
|
109
|
+
max_new_tokens: options.maxTokens || 500,
|
|
110
|
+
return_full_text: false,
|
|
111
|
+
do_sample: (options.temperature || 0.7) > 0,
|
|
112
|
+
},
|
|
113
|
+
});
|
|
114
|
+
});
|
|
115
|
+
const generatedText = response.generated_text || "";
|
|
116
|
+
const promptTokens = this.estimateTokens(prompt);
|
|
117
|
+
const completionTokens = this.estimateTokens(generatedText);
|
|
118
|
+
return {
|
|
119
|
+
text: generatedText,
|
|
120
|
+
usage: {
|
|
121
|
+
promptTokens,
|
|
122
|
+
completionTokens,
|
|
123
|
+
totalTokens: promptTokens + completionTokens,
|
|
124
|
+
},
|
|
125
|
+
finishReason: "stop",
|
|
126
|
+
logprobs: undefined,
|
|
127
|
+
rawCall: { rawPrompt: prompt, rawSettings: options },
|
|
128
|
+
rawResponse: { headers: {} },
|
|
129
|
+
};
|
|
130
|
+
}
|
|
131
|
+
async doStream(options) {
|
|
132
|
+
const prompt = this.convertMessagesToPrompt(options.prompt);
|
|
133
|
+
// HuggingFace Inference API doesn't support true streaming
|
|
134
|
+
// We'll simulate streaming by generating the full text and chunking it
|
|
135
|
+
const response = await this.doGenerate(options);
|
|
136
|
+
// Create a ReadableStream that chunks the response
|
|
137
|
+
const stream = new ReadableStream({
|
|
138
|
+
start(controller) {
|
|
139
|
+
const text = response.text || "";
|
|
140
|
+
const chunkSize = Math.max(1, Math.floor(text.length / 10)); // 10 chunks
|
|
141
|
+
let index = 0;
|
|
142
|
+
const pushChunk = () => {
|
|
143
|
+
if (index < text.length) {
|
|
144
|
+
const chunk = text.slice(index, index + chunkSize);
|
|
145
|
+
controller.enqueue({
|
|
146
|
+
type: "text-delta",
|
|
147
|
+
textDelta: chunk,
|
|
148
|
+
});
|
|
149
|
+
index += chunkSize;
|
|
150
|
+
// Add delay to simulate streaming
|
|
151
|
+
setTimeout(pushChunk, 50);
|
|
152
|
+
}
|
|
153
|
+
else {
|
|
154
|
+
// Send finish event
|
|
155
|
+
controller.enqueue({
|
|
156
|
+
type: "finish",
|
|
157
|
+
finishReason: response.finishReason,
|
|
158
|
+
usage: response.usage,
|
|
159
|
+
logprobs: response.logprobs,
|
|
160
|
+
});
|
|
161
|
+
controller.close();
|
|
162
|
+
}
|
|
163
|
+
};
|
|
164
|
+
pushChunk();
|
|
165
|
+
},
|
|
166
|
+
});
|
|
167
|
+
return {
|
|
168
|
+
stream,
|
|
169
|
+
rawCall: response.rawCall,
|
|
170
|
+
rawResponse: response.rawResponse,
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
// Hugging Face class with enhanced error handling
|
|
175
|
+
export class HuggingFace {
|
|
176
|
+
modelName;
|
|
177
|
+
client;
|
|
178
|
+
/**
|
|
179
|
+
* Initializes a new instance of HuggingFace
|
|
180
|
+
* @param modelName - Optional model name to override the default from config
|
|
181
|
+
*/
|
|
182
|
+
constructor(modelName) {
|
|
183
|
+
const functionTag = "HuggingFace.constructor";
|
|
184
|
+
this.modelName = modelName || getHuggingFaceModelId();
|
|
185
|
+
try {
|
|
186
|
+
this.client = getHuggingFaceClient();
|
|
187
|
+
logger.debug(`[${functionTag}] Initialization started`, {
|
|
188
|
+
modelName: this.modelName,
|
|
189
|
+
hasApiKey: hasValidAuth(),
|
|
190
|
+
});
|
|
191
|
+
logger.debug(`[${functionTag}] Initialization completed`, {
|
|
192
|
+
modelName: this.modelName,
|
|
193
|
+
success: true,
|
|
194
|
+
});
|
|
195
|
+
}
|
|
196
|
+
catch (err) {
|
|
197
|
+
logger.error(`[${functionTag}] Initialization failed`, {
|
|
198
|
+
message: "Error in initializing Hugging Face",
|
|
199
|
+
modelName: this.modelName,
|
|
200
|
+
error: err instanceof Error ? err.message : String(err),
|
|
201
|
+
stack: err instanceof Error ? err.stack : undefined,
|
|
202
|
+
});
|
|
203
|
+
throw err;
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
/**
|
|
207
|
+
* Gets the appropriate model instance
|
|
208
|
+
* @private
|
|
209
|
+
*/
|
|
210
|
+
getModel() {
|
|
211
|
+
logger.debug("HuggingFace.getModel - Hugging Face model selected", {
|
|
212
|
+
modelName: this.modelName,
|
|
213
|
+
});
|
|
214
|
+
return new HuggingFaceLanguageModel(this.modelName, this.client);
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Processes text using streaming approach with enhanced error handling callbacks
|
|
218
|
+
* @param prompt - The input text prompt to analyze
|
|
219
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
220
|
+
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
221
|
+
*/
|
|
222
|
+
async streamText(optionsOrPrompt, analysisSchema) {
|
|
223
|
+
const functionTag = "HuggingFace.streamText";
|
|
224
|
+
const provider = "huggingface";
|
|
225
|
+
let chunkCount = 0;
|
|
226
|
+
try {
|
|
227
|
+
// Parse parameters - support both string and options object
|
|
228
|
+
const options = typeof optionsOrPrompt === "string"
|
|
229
|
+
? { prompt: optionsOrPrompt }
|
|
230
|
+
: optionsOrPrompt;
|
|
231
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
232
|
+
// Use schema from options or fallback parameter
|
|
233
|
+
const finalSchema = schema || analysisSchema;
|
|
234
|
+
logger.debug(`[${functionTag}] Stream request started`, {
|
|
235
|
+
provider,
|
|
236
|
+
modelName: this.modelName,
|
|
237
|
+
promptLength: prompt.length,
|
|
238
|
+
temperature,
|
|
239
|
+
maxTokens,
|
|
240
|
+
hasSchema: !!finalSchema,
|
|
241
|
+
});
|
|
242
|
+
const model = this.getModel();
|
|
243
|
+
const streamOptions = {
|
|
244
|
+
model: model,
|
|
245
|
+
prompt: prompt,
|
|
246
|
+
system: systemPrompt,
|
|
247
|
+
temperature,
|
|
248
|
+
maxTokens,
|
|
249
|
+
onError: (event) => {
|
|
250
|
+
const error = event.error;
|
|
251
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
252
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
253
|
+
logger.error(`[${functionTag}] Stream text error`, {
|
|
254
|
+
provider,
|
|
255
|
+
modelName: this.modelName,
|
|
256
|
+
error: errorMessage,
|
|
257
|
+
stack: errorStack,
|
|
258
|
+
promptLength: prompt.length,
|
|
259
|
+
chunkCount,
|
|
260
|
+
});
|
|
261
|
+
},
|
|
262
|
+
onFinish: (event) => {
|
|
263
|
+
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
264
|
+
provider,
|
|
265
|
+
modelName: this.modelName,
|
|
266
|
+
finishReason: event.finishReason,
|
|
267
|
+
usage: event.usage,
|
|
268
|
+
totalChunks: chunkCount,
|
|
269
|
+
promptLength: prompt.length,
|
|
270
|
+
responseLength: event.text?.length || 0,
|
|
271
|
+
});
|
|
272
|
+
},
|
|
273
|
+
onChunk: (event) => {
|
|
274
|
+
chunkCount++;
|
|
275
|
+
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
276
|
+
provider,
|
|
277
|
+
modelName: this.modelName,
|
|
278
|
+
chunkNumber: chunkCount,
|
|
279
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
280
|
+
chunkType: event.chunk.type,
|
|
281
|
+
});
|
|
282
|
+
},
|
|
283
|
+
};
|
|
284
|
+
if (finalSchema) {
|
|
285
|
+
streamOptions.experimental_output = Output.object({
|
|
286
|
+
schema: finalSchema,
|
|
287
|
+
});
|
|
288
|
+
}
|
|
289
|
+
const result = streamText(streamOptions);
|
|
290
|
+
return result;
|
|
291
|
+
}
|
|
292
|
+
catch (err) {
|
|
293
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
294
|
+
provider,
|
|
295
|
+
modelName: this.modelName,
|
|
296
|
+
message: "Error in streaming text",
|
|
297
|
+
err: String(err),
|
|
298
|
+
promptLength: typeof optionsOrPrompt === "string"
|
|
299
|
+
? optionsOrPrompt.length
|
|
300
|
+
: optionsOrPrompt.prompt.length,
|
|
301
|
+
});
|
|
302
|
+
throw err; // Re-throw error to trigger fallback
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
/**
|
|
306
|
+
* Processes text using non-streaming approach with optional schema validation
|
|
307
|
+
* @param prompt - The input text prompt to analyze
|
|
308
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
309
|
+
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
310
|
+
*/
|
|
311
|
+
async generateText(optionsOrPrompt, analysisSchema) {
|
|
312
|
+
const functionTag = "HuggingFace.generateText";
|
|
313
|
+
const provider = "huggingface";
|
|
314
|
+
try {
|
|
315
|
+
// Parse parameters - support both string and options object
|
|
316
|
+
const options = typeof optionsOrPrompt === "string"
|
|
317
|
+
? { prompt: optionsOrPrompt }
|
|
318
|
+
: optionsOrPrompt;
|
|
319
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
320
|
+
// Use schema from options or fallback parameter
|
|
321
|
+
const finalSchema = schema || analysisSchema;
|
|
322
|
+
logger.debug(`[${functionTag}] Generate request started`, {
|
|
323
|
+
provider,
|
|
324
|
+
modelName: this.modelName,
|
|
325
|
+
promptLength: prompt.length,
|
|
326
|
+
temperature,
|
|
327
|
+
maxTokens,
|
|
328
|
+
});
|
|
329
|
+
const model = this.getModel();
|
|
330
|
+
const generateOptions = {
|
|
331
|
+
model: model,
|
|
332
|
+
prompt: prompt,
|
|
333
|
+
system: systemPrompt,
|
|
334
|
+
temperature,
|
|
335
|
+
maxTokens,
|
|
336
|
+
};
|
|
337
|
+
if (finalSchema) {
|
|
338
|
+
generateOptions.experimental_output = Output.object({
|
|
339
|
+
schema: finalSchema,
|
|
340
|
+
});
|
|
341
|
+
}
|
|
342
|
+
const result = await generateText(generateOptions);
|
|
343
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
344
|
+
provider,
|
|
345
|
+
modelName: this.modelName,
|
|
346
|
+
usage: result.usage,
|
|
347
|
+
finishReason: result.finishReason,
|
|
348
|
+
responseLength: result.text?.length || 0,
|
|
349
|
+
});
|
|
350
|
+
return result;
|
|
351
|
+
}
|
|
352
|
+
catch (err) {
|
|
353
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
354
|
+
provider,
|
|
355
|
+
modelName: this.modelName,
|
|
356
|
+
message: "Error in generating text",
|
|
357
|
+
err: String(err),
|
|
358
|
+
});
|
|
359
|
+
throw err; // Re-throw error to trigger fallback
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
}
|
|
@@ -2,13 +2,16 @@
|
|
|
2
2
|
* Provider exports for Vercel AI SDK integration
|
|
3
3
|
* This file centralizes all AI provider classes for easy import and usage
|
|
4
4
|
*/
|
|
5
|
-
export { GoogleVertexAI } from
|
|
6
|
-
export { AmazonBedrock } from
|
|
7
|
-
export { OpenAI } from
|
|
8
|
-
export { AnthropicProvider } from
|
|
9
|
-
export { AzureOpenAIProvider } from
|
|
10
|
-
export { GoogleAIStudio } from
|
|
11
|
-
export
|
|
5
|
+
export { GoogleVertexAI } from "./googleVertexAI.js";
|
|
6
|
+
export { AmazonBedrock } from "./amazonBedrock.js";
|
|
7
|
+
export { OpenAI } from "./openAI.js";
|
|
8
|
+
export { AnthropicProvider } from "./anthropic.js";
|
|
9
|
+
export { AzureOpenAIProvider } from "./azureOpenAI.js";
|
|
10
|
+
export { GoogleAIStudio } from "./googleAIStudio.js";
|
|
11
|
+
export { HuggingFace } from "./huggingFace.js";
|
|
12
|
+
export { Ollama } from "./ollama.js";
|
|
13
|
+
export { MistralAI } from "./mistralAI.js";
|
|
14
|
+
export type { AIProvider } from "../core/types.js";
|
|
12
15
|
/**
|
|
13
16
|
* Provider registry for dynamic provider instantiation
|
|
14
17
|
*/
|
|
@@ -18,7 +21,10 @@ export declare const PROVIDERS: {
|
|
|
18
21
|
readonly openai: "OpenAI";
|
|
19
22
|
readonly anthropic: "AnthropicProvider";
|
|
20
23
|
readonly azure: "AzureOpenAIProvider";
|
|
21
|
-
readonly
|
|
24
|
+
readonly "google-ai": "GoogleAIStudio";
|
|
25
|
+
readonly huggingface: "HuggingFace";
|
|
26
|
+
readonly ollama: "Ollama";
|
|
27
|
+
readonly mistral: "MistralAI";
|
|
22
28
|
};
|
|
23
29
|
/**
|
|
24
30
|
* Type for valid provider names
|
|
@@ -2,22 +2,28 @@
|
|
|
2
2
|
* Provider exports for Vercel AI SDK integration
|
|
3
3
|
* This file centralizes all AI provider classes for easy import and usage
|
|
4
4
|
*/
|
|
5
|
-
export { GoogleVertexAI } from
|
|
6
|
-
export { AmazonBedrock } from
|
|
7
|
-
export { OpenAI } from
|
|
8
|
-
export { AnthropicProvider } from
|
|
9
|
-
export { AzureOpenAIProvider } from
|
|
10
|
-
export { GoogleAIStudio } from
|
|
5
|
+
export { GoogleVertexAI } from "./googleVertexAI.js";
|
|
6
|
+
export { AmazonBedrock } from "./amazonBedrock.js";
|
|
7
|
+
export { OpenAI } from "./openAI.js";
|
|
8
|
+
export { AnthropicProvider } from "./anthropic.js";
|
|
9
|
+
export { AzureOpenAIProvider } from "./azureOpenAI.js";
|
|
10
|
+
export { GoogleAIStudio } from "./googleAIStudio.js";
|
|
11
|
+
export { HuggingFace } from "./huggingFace.js";
|
|
12
|
+
export { Ollama } from "./ollama.js";
|
|
13
|
+
export { MistralAI } from "./mistralAI.js";
|
|
11
14
|
/**
|
|
12
15
|
* Provider registry for dynamic provider instantiation
|
|
13
16
|
*/
|
|
14
17
|
export const PROVIDERS = {
|
|
15
|
-
vertex:
|
|
16
|
-
bedrock:
|
|
17
|
-
openai:
|
|
18
|
-
anthropic:
|
|
19
|
-
azure:
|
|
20
|
-
|
|
18
|
+
vertex: "GoogleVertexAI",
|
|
19
|
+
bedrock: "AmazonBedrock",
|
|
20
|
+
openai: "OpenAI",
|
|
21
|
+
anthropic: "AnthropicProvider",
|
|
22
|
+
azure: "AzureOpenAIProvider",
|
|
23
|
+
"google-ai": "GoogleAIStudio",
|
|
24
|
+
huggingface: "HuggingFace",
|
|
25
|
+
ollama: "Ollama",
|
|
26
|
+
mistral: "MistralAI",
|
|
21
27
|
};
|
|
22
28
|
/**
|
|
23
29
|
* List of all available provider names
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink MCP-Aware AI Provider
|
|
3
|
+
* Integrates MCP tools with AI providers following Lighthouse's pattern
|
|
4
|
+
*/
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
6
|
+
import type { StreamTextResult, ToolSet, Schema, GenerateTextResult } from "ai";
|
|
7
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
8
|
+
/**
|
|
9
|
+
* MCP-Aware Provider Configuration
|
|
10
|
+
*/
|
|
11
|
+
export interface MCPProviderConfig {
|
|
12
|
+
baseProvider: AIProvider;
|
|
13
|
+
providerName?: string;
|
|
14
|
+
modelName?: string;
|
|
15
|
+
enableMCP?: boolean;
|
|
16
|
+
sessionId?: string;
|
|
17
|
+
userId?: string;
|
|
18
|
+
organizationId?: string;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* MCP-Aware AI Provider
|
|
22
|
+
* Wraps any AI provider with MCP tool capabilities
|
|
23
|
+
*/
|
|
24
|
+
export declare class MCPAwareProvider implements AIProvider {
|
|
25
|
+
private baseProvider;
|
|
26
|
+
private config;
|
|
27
|
+
private sessionId;
|
|
28
|
+
private mcpInitialized;
|
|
29
|
+
constructor(config: MCPProviderConfig);
|
|
30
|
+
/**
|
|
31
|
+
* Initialize MCP tools for this session
|
|
32
|
+
*/
|
|
33
|
+
private initializeMCP;
|
|
34
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
35
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
36
|
+
/**
|
|
37
|
+
* Detect if the prompt is requesting tool usage
|
|
38
|
+
*/
|
|
39
|
+
private detectToolRequest;
|
|
40
|
+
/**
|
|
41
|
+
* Get session statistics
|
|
42
|
+
*/
|
|
43
|
+
getSessionStats(): {
|
|
44
|
+
sessionId: string;
|
|
45
|
+
toolCount: number;
|
|
46
|
+
executionCount: number;
|
|
47
|
+
isConnected: boolean;
|
|
48
|
+
uptime: number;
|
|
49
|
+
} | null;
|
|
50
|
+
/**
|
|
51
|
+
* Clean up session
|
|
52
|
+
*/
|
|
53
|
+
cleanup(): Promise<void>;
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Create an MCP-aware provider
|
|
57
|
+
*/
|
|
58
|
+
export declare function createMCPAwareProvider(baseProvider: AIProvider, config?: Partial<MCPProviderConfig>): MCPAwareProvider;
|
|
59
|
+
/**
|
|
60
|
+
* Check if a provider is MCP-aware
|
|
61
|
+
*/
|
|
62
|
+
export declare function isMCPAwareProvider(provider: AIProvider): provider is MCPAwareProvider;
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink MCP-Aware AI Provider
|
|
3
|
+
* Integrates MCP tools with AI providers following Lighthouse's pattern
|
|
4
|
+
*/
|
|
5
|
+
import { getMCPManager } from "../mcp/manager.js";
|
|
6
|
+
import { initializeMCPTools } from "../mcp/initialize-tools.js";
|
|
7
|
+
import { logger } from "../utils/logger.js";
|
|
8
|
+
import { v4 as uuidv4 } from "uuid";
|
|
9
|
+
/**
|
|
10
|
+
* MCP-Aware AI Provider
|
|
11
|
+
* Wraps any AI provider with MCP tool capabilities
|
|
12
|
+
*/
|
|
13
|
+
export class MCPAwareProvider {
|
|
14
|
+
baseProvider;
|
|
15
|
+
config;
|
|
16
|
+
sessionId;
|
|
17
|
+
mcpInitialized = false;
|
|
18
|
+
constructor(config) {
|
|
19
|
+
this.baseProvider = config.baseProvider;
|
|
20
|
+
this.config = config;
|
|
21
|
+
this.sessionId = config.sessionId || uuidv4();
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Initialize MCP tools for this session
|
|
25
|
+
*/
|
|
26
|
+
async initializeMCP() {
|
|
27
|
+
if (this.mcpInitialized || this.config.enableMCP === false) {
|
|
28
|
+
return;
|
|
29
|
+
}
|
|
30
|
+
try {
|
|
31
|
+
// Get or create MCP client for this session
|
|
32
|
+
const mcpClient = getMCPManager(this.sessionId, {
|
|
33
|
+
userId: this.config.userId,
|
|
34
|
+
aiProvider: this.config.providerName,
|
|
35
|
+
modelId: this.config.modelName,
|
|
36
|
+
});
|
|
37
|
+
// Create execution context
|
|
38
|
+
const context = {
|
|
39
|
+
sessionId: this.sessionId,
|
|
40
|
+
userId: this.config.userId,
|
|
41
|
+
organizationId: this.config.organizationId,
|
|
42
|
+
aiProvider: this.config.providerName,
|
|
43
|
+
modelId: this.config.modelName,
|
|
44
|
+
timestamp: Date.now(),
|
|
45
|
+
};
|
|
46
|
+
// Initialize all MCP tools
|
|
47
|
+
initializeMCPTools(this.sessionId, mcpClient, context);
|
|
48
|
+
this.mcpInitialized = true;
|
|
49
|
+
const tools = mcpClient.getTools();
|
|
50
|
+
const toolCount = Object.keys(tools).length;
|
|
51
|
+
logger.info(`[MCP Provider] Initialized ${toolCount} tools for session ${this.sessionId}`);
|
|
52
|
+
}
|
|
53
|
+
catch (error) {
|
|
54
|
+
logger.error(`[MCP Provider] Failed to initialize MCP for session ${this.sessionId}`, error);
|
|
55
|
+
// Continue without MCP tools if initialization fails
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
async generateText(optionsOrPrompt, analysisSchema) {
|
|
59
|
+
// Ensure MCP is initialized
|
|
60
|
+
await this.initializeMCP();
|
|
61
|
+
// Parse options
|
|
62
|
+
const options = typeof optionsOrPrompt === "string"
|
|
63
|
+
? { prompt: optionsOrPrompt }
|
|
64
|
+
: optionsOrPrompt;
|
|
65
|
+
// Check if prompt requests tool usage
|
|
66
|
+
const needsTools = this.detectToolRequest(options.prompt);
|
|
67
|
+
if (needsTools && this.mcpInitialized) {
|
|
68
|
+
// Get MCP client
|
|
69
|
+
const mcpClient = getMCPManager(this.sessionId);
|
|
70
|
+
// Create enhanced prompt with available tools
|
|
71
|
+
const tools = mcpClient.getTools();
|
|
72
|
+
const toolList = Object.keys(tools)
|
|
73
|
+
.map((name) => {
|
|
74
|
+
const tool = tools[name];
|
|
75
|
+
return `- ${name}: ${tool.description || "No description"}`;
|
|
76
|
+
})
|
|
77
|
+
.join("\n");
|
|
78
|
+
const enhancedPrompt = `${options.prompt}
|
|
79
|
+
|
|
80
|
+
Available tools:
|
|
81
|
+
${toolList}
|
|
82
|
+
|
|
83
|
+
To use a tool, respond with:
|
|
84
|
+
TOOL: <tool_name>
|
|
85
|
+
PARAMS: <json_params>
|
|
86
|
+
|
|
87
|
+
Otherwise, provide a direct response.`;
|
|
88
|
+
// Generate response with enhanced prompt
|
|
89
|
+
const response = await this.baseProvider.generateText({
|
|
90
|
+
...options,
|
|
91
|
+
prompt: enhancedPrompt,
|
|
92
|
+
}, analysisSchema);
|
|
93
|
+
if (!response) {
|
|
94
|
+
return null;
|
|
95
|
+
}
|
|
96
|
+
// Check if response includes tool invocation
|
|
97
|
+
const toolMatch = response.text.match(/TOOL:\s*(\S+)\s*\nPARAMS:\s*({.*})/s);
|
|
98
|
+
if (toolMatch) {
|
|
99
|
+
const toolName = toolMatch[1];
|
|
100
|
+
const toolParams = JSON.parse(toolMatch[2]);
|
|
101
|
+
// Execute tool
|
|
102
|
+
const toolResult = await mcpClient.executeTool(toolName, toolParams);
|
|
103
|
+
// Generate final response with tool result
|
|
104
|
+
const finalPrompt = `${options.prompt}
|
|
105
|
+
|
|
106
|
+
Tool ${toolName} was executed with result:
|
|
107
|
+
${JSON.stringify(toolResult, null, 2)}
|
|
108
|
+
|
|
109
|
+
Please provide a response based on this information.`;
|
|
110
|
+
const finalResponse = await this.baseProvider.generateText({
|
|
111
|
+
...options,
|
|
112
|
+
prompt: finalPrompt,
|
|
113
|
+
}, analysisSchema);
|
|
114
|
+
if (!finalResponse) {
|
|
115
|
+
return null;
|
|
116
|
+
}
|
|
117
|
+
// Return response (tool usage is tracked internally)
|
|
118
|
+
return finalResponse;
|
|
119
|
+
}
|
|
120
|
+
return response;
|
|
121
|
+
}
|
|
122
|
+
// Regular generation without tools
|
|
123
|
+
return this.baseProvider.generateText(options);
|
|
124
|
+
}
|
|
125
|
+
async streamText(optionsOrPrompt, analysisSchema) {
|
|
126
|
+
// For now, streaming doesn't support tool usage
|
|
127
|
+
// This matches Lighthouse's approach where MCP is used for non-streaming requests
|
|
128
|
+
return this.baseProvider.streamText(optionsOrPrompt, analysisSchema);
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Detect if the prompt is requesting tool usage
|
|
132
|
+
*/
|
|
133
|
+
detectToolRequest(prompt) {
|
|
134
|
+
const toolKeywords = [
|
|
135
|
+
"use tool",
|
|
136
|
+
"call tool",
|
|
137
|
+
"execute tool",
|
|
138
|
+
"run tool",
|
|
139
|
+
"invoke tool",
|
|
140
|
+
"what tools",
|
|
141
|
+
"available tools",
|
|
142
|
+
"list tools",
|
|
143
|
+
];
|
|
144
|
+
const lowerPrompt = prompt.toLowerCase();
|
|
145
|
+
return toolKeywords.some((keyword) => lowerPrompt.includes(keyword));
|
|
146
|
+
}
|
|
147
|
+
/**
|
|
148
|
+
* Get session statistics
|
|
149
|
+
*/
|
|
150
|
+
getSessionStats() {
|
|
151
|
+
if (!this.mcpInitialized) {
|
|
152
|
+
return null;
|
|
153
|
+
}
|
|
154
|
+
const mcpClient = getMCPManager(this.sessionId);
|
|
155
|
+
return mcpClient.getStats();
|
|
156
|
+
}
|
|
157
|
+
/**
|
|
158
|
+
* Clean up session
|
|
159
|
+
*/
|
|
160
|
+
async cleanup() {
|
|
161
|
+
if (this.mcpInitialized) {
|
|
162
|
+
const { removeMCPManager } = await import("../mcp/manager.js");
|
|
163
|
+
await removeMCPManager(this.sessionId);
|
|
164
|
+
this.mcpInitialized = false;
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
/**
|
|
169
|
+
* Create an MCP-aware provider
|
|
170
|
+
*/
|
|
171
|
+
export function createMCPAwareProvider(baseProvider, config) {
|
|
172
|
+
return new MCPAwareProvider({
|
|
173
|
+
baseProvider,
|
|
174
|
+
enableMCP: true,
|
|
175
|
+
...config,
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
/**
|
|
179
|
+
* Check if a provider is MCP-aware
|
|
180
|
+
*/
|
|
181
|
+
export function isMCPAwareProvider(provider) {
|
|
182
|
+
return provider instanceof MCPAwareProvider;
|
|
183
|
+
}
|