@juspay/neurolink 3.0.1 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +57 -6
- package/README.md +235 -2
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/chat/client-utils.d.ts +92 -0
- package/dist/chat/client-utils.js +298 -0
- package/dist/chat/index.d.ts +27 -0
- package/dist/chat/index.js +41 -0
- package/dist/chat/session-storage.d.ts +77 -0
- package/dist/chat/session-storage.js +233 -0
- package/dist/chat/session.d.ts +95 -0
- package/dist/chat/session.js +257 -0
- package/dist/chat/sse-handler.d.ts +49 -0
- package/dist/chat/sse-handler.js +266 -0
- package/dist/chat/types.d.ts +73 -0
- package/dist/chat/types.js +5 -0
- package/dist/chat/websocket-chat-handler.d.ts +36 -0
- package/dist/chat/websocket-chat-handler.js +262 -0
- package/dist/cli/commands/config.js +12 -12
- package/dist/cli/commands/mcp.js +3 -4
- package/dist/cli/index.d.ts +0 -7
- package/dist/cli/index.js +247 -28
- package/dist/config/configManager.d.ts +60 -0
- package/dist/config/configManager.js +300 -0
- package/dist/config/types.d.ts +136 -0
- package/dist/config/types.js +43 -0
- package/dist/core/analytics.d.ts +23 -0
- package/dist/core/analytics.js +131 -0
- package/dist/core/constants.d.ts +41 -0
- package/dist/core/constants.js +50 -0
- package/dist/core/defaults.d.ts +18 -0
- package/dist/core/defaults.js +29 -0
- package/dist/core/evaluation-config.d.ts +29 -0
- package/dist/core/evaluation-config.js +144 -0
- package/dist/core/evaluation-providers.d.ts +30 -0
- package/dist/core/evaluation-providers.js +187 -0
- package/dist/core/evaluation.d.ts +117 -0
- package/dist/core/evaluation.js +528 -0
- package/dist/core/factory.js +33 -25
- package/dist/core/types.d.ts +165 -6
- package/dist/core/types.js +3 -4
- package/dist/index.d.ts +9 -4
- package/dist/index.js +25 -4
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/chat/client-utils.d.ts +92 -0
- package/dist/lib/chat/client-utils.js +298 -0
- package/dist/lib/chat/index.d.ts +27 -0
- package/dist/lib/chat/index.js +41 -0
- package/dist/lib/chat/session-storage.d.ts +77 -0
- package/dist/lib/chat/session-storage.js +233 -0
- package/dist/lib/chat/session.d.ts +95 -0
- package/dist/lib/chat/session.js +257 -0
- package/dist/lib/chat/sse-handler.d.ts +49 -0
- package/dist/lib/chat/sse-handler.js +266 -0
- package/dist/lib/chat/types.d.ts +73 -0
- package/dist/lib/chat/types.js +5 -0
- package/dist/lib/chat/websocket-chat-handler.d.ts +36 -0
- package/dist/lib/chat/websocket-chat-handler.js +262 -0
- package/dist/lib/config/configManager.d.ts +60 -0
- package/dist/lib/config/configManager.js +300 -0
- package/dist/lib/config/types.d.ts +136 -0
- package/dist/lib/config/types.js +43 -0
- package/dist/lib/core/analytics.d.ts +23 -0
- package/dist/lib/core/analytics.js +131 -0
- package/dist/lib/core/constants.d.ts +41 -0
- package/dist/lib/core/constants.js +50 -0
- package/dist/lib/core/defaults.d.ts +18 -0
- package/dist/lib/core/defaults.js +29 -0
- package/dist/lib/core/evaluation-config.d.ts +29 -0
- package/dist/lib/core/evaluation-config.js +144 -0
- package/dist/lib/core/evaluation-providers.d.ts +30 -0
- package/dist/lib/core/evaluation-providers.js +187 -0
- package/dist/lib/core/evaluation.d.ts +117 -0
- package/dist/lib/core/evaluation.js +528 -0
- package/dist/lib/core/factory.js +33 -26
- package/dist/lib/core/types.d.ts +165 -6
- package/dist/lib/core/types.js +3 -4
- package/dist/lib/index.d.ts +9 -4
- package/dist/lib/index.js +25 -4
- package/dist/lib/mcp/contracts/mcpContract.d.ts +118 -0
- package/dist/lib/mcp/contracts/mcpContract.js +5 -0
- package/dist/lib/mcp/function-calling.js +11 -3
- package/dist/lib/mcp/logging.js +5 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +2 -1
- package/dist/lib/mcp/orchestrator.js +18 -9
- package/dist/lib/mcp/registry.d.ts +49 -16
- package/dist/lib/mcp/registry.js +80 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
- package/dist/lib/mcp/tool-integration.js +1 -1
- package/dist/lib/mcp/tool-registry.d.ts +55 -34
- package/dist/lib/mcp/tool-registry.js +111 -97
- package/dist/lib/mcp/unified-mcp.js +6 -1
- package/dist/lib/mcp/unified-registry.d.ts +12 -4
- package/dist/lib/mcp/unified-registry.js +17 -4
- package/dist/lib/neurolink.d.ts +26 -0
- package/dist/lib/neurolink.js +43 -1
- package/dist/lib/providers/agent-enhanced-provider.d.ts +11 -2
- package/dist/lib/providers/agent-enhanced-provider.js +86 -15
- package/dist/lib/providers/amazonBedrock.d.ts +9 -1
- package/dist/lib/providers/amazonBedrock.js +26 -2
- package/dist/lib/providers/analytics-helper.d.ts +53 -0
- package/dist/lib/providers/analytics-helper.js +151 -0
- package/dist/lib/providers/anthropic.d.ts +11 -1
- package/dist/lib/providers/anthropic.js +29 -4
- package/dist/lib/providers/azureOpenAI.d.ts +3 -1
- package/dist/lib/providers/azureOpenAI.js +28 -4
- package/dist/lib/providers/function-calling-provider.d.ts +9 -1
- package/dist/lib/providers/function-calling-provider.js +14 -1
- package/dist/lib/providers/googleAIStudio.d.ts +15 -1
- package/dist/lib/providers/googleAIStudio.js +32 -2
- package/dist/lib/providers/googleVertexAI.d.ts +9 -1
- package/dist/lib/providers/googleVertexAI.js +31 -2
- package/dist/lib/providers/huggingFace.d.ts +3 -1
- package/dist/lib/providers/huggingFace.js +26 -3
- package/dist/lib/providers/mcp-provider.d.ts +9 -1
- package/dist/lib/providers/mcp-provider.js +12 -0
- package/dist/lib/providers/mistralAI.d.ts +3 -1
- package/dist/lib/providers/mistralAI.js +25 -2
- package/dist/lib/providers/ollama.d.ts +3 -1
- package/dist/lib/providers/ollama.js +27 -4
- package/dist/lib/providers/openAI.d.ts +15 -1
- package/dist/lib/providers/openAI.js +32 -2
- package/dist/lib/proxy/proxy-fetch.js +8 -7
- package/dist/lib/services/streaming/streaming-manager.d.ts +29 -0
- package/dist/lib/services/streaming/streaming-manager.js +244 -0
- package/dist/lib/services/types.d.ts +155 -0
- package/dist/lib/services/types.js +2 -0
- package/dist/lib/services/websocket/websocket-server.d.ts +34 -0
- package/dist/lib/services/websocket/websocket-server.js +304 -0
- package/dist/lib/telemetry/index.d.ts +15 -0
- package/dist/lib/telemetry/index.js +22 -0
- package/dist/lib/telemetry/telemetry-service.d.ts +47 -0
- package/dist/lib/telemetry/telemetry-service.js +259 -0
- package/dist/lib/utils/streaming-utils.d.ts +67 -0
- package/dist/lib/utils/streaming-utils.js +201 -0
- package/dist/mcp/contracts/mcpContract.d.ts +118 -0
- package/dist/mcp/contracts/mcpContract.js +5 -0
- package/dist/mcp/function-calling.js +11 -3
- package/dist/mcp/logging.js +5 -0
- package/dist/mcp/neurolink-mcp-client.js +2 -1
- package/dist/mcp/orchestrator.js +18 -9
- package/dist/mcp/registry.d.ts +49 -16
- package/dist/mcp/registry.js +80 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +5 -4
- package/dist/mcp/tool-integration.js +1 -1
- package/dist/mcp/tool-registry.d.ts +55 -34
- package/dist/mcp/tool-registry.js +111 -97
- package/dist/mcp/unified-mcp.js +6 -1
- package/dist/mcp/unified-registry.d.ts +12 -4
- package/dist/mcp/unified-registry.js +17 -4
- package/dist/neurolink.d.ts +26 -0
- package/dist/neurolink.js +43 -1
- package/dist/providers/agent-enhanced-provider.d.ts +11 -2
- package/dist/providers/agent-enhanced-provider.js +86 -15
- package/dist/providers/amazonBedrock.d.ts +9 -1
- package/dist/providers/amazonBedrock.js +26 -2
- package/dist/providers/analytics-helper.d.ts +53 -0
- package/dist/providers/analytics-helper.js +151 -0
- package/dist/providers/anthropic.d.ts +11 -1
- package/dist/providers/anthropic.js +29 -4
- package/dist/providers/azureOpenAI.d.ts +3 -1
- package/dist/providers/azureOpenAI.js +29 -4
- package/dist/providers/function-calling-provider.d.ts +9 -1
- package/dist/providers/function-calling-provider.js +14 -1
- package/dist/providers/googleAIStudio.d.ts +15 -1
- package/dist/providers/googleAIStudio.js +32 -2
- package/dist/providers/googleVertexAI.d.ts +9 -1
- package/dist/providers/googleVertexAI.js +31 -2
- package/dist/providers/huggingFace.d.ts +3 -1
- package/dist/providers/huggingFace.js +26 -3
- package/dist/providers/mcp-provider.d.ts +9 -1
- package/dist/providers/mcp-provider.js +12 -0
- package/dist/providers/mistralAI.d.ts +3 -1
- package/dist/providers/mistralAI.js +25 -2
- package/dist/providers/ollama.d.ts +3 -1
- package/dist/providers/ollama.js +27 -4
- package/dist/providers/openAI.d.ts +15 -1
- package/dist/providers/openAI.js +33 -2
- package/dist/proxy/proxy-fetch.js +8 -7
- package/dist/services/streaming/streaming-manager.d.ts +29 -0
- package/dist/services/streaming/streaming-manager.js +244 -0
- package/dist/services/types.d.ts +155 -0
- package/dist/services/types.js +2 -0
- package/dist/services/websocket/websocket-server.d.ts +34 -0
- package/dist/services/websocket/websocket-server.js +304 -0
- package/dist/telemetry/index.d.ts +15 -0
- package/dist/telemetry/index.js +22 -0
- package/dist/telemetry/telemetry-service.d.ts +47 -0
- package/dist/telemetry/telemetry-service.js +261 -0
- package/dist/utils/streaming-utils.d.ts +67 -0
- package/dist/utils/streaming-utils.js +201 -0
- package/package.json +18 -2
|
@@ -7,6 +7,9 @@
|
|
|
7
7
|
import { AIProviderName } from "../core/types.js";
|
|
8
8
|
import { logger } from "../utils/logger.js";
|
|
9
9
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
10
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
11
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
12
|
+
import { createAnalytics } from "../core/analytics.js";
|
|
10
13
|
import { createProxyFetch } from "../proxy/proxy-fetch.js";
|
|
11
14
|
export class AnthropicProvider {
|
|
12
15
|
name = AIProviderName.ANTHROPIC;
|
|
@@ -58,12 +61,13 @@ export class AnthropicProvider {
|
|
|
58
61
|
async generateText(optionsOrPrompt, schema) {
|
|
59
62
|
const functionTag = "AnthropicProvider.generateText";
|
|
60
63
|
const provider = "anthropic";
|
|
64
|
+
const startTime = Date.now();
|
|
61
65
|
logger.debug(`[${functionTag}] Starting text generation`);
|
|
62
66
|
// Parse parameters with backward compatibility
|
|
63
67
|
const options = typeof optionsOrPrompt === "string"
|
|
64
68
|
? { prompt: optionsOrPrompt }
|
|
65
69
|
: optionsOrPrompt;
|
|
66
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
70
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "generate"), enableAnalytics = false, enableEvaluation = false, context, } = options;
|
|
67
71
|
logger.debug(`[${functionTag}] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}, Timeout: ${timeout}`);
|
|
68
72
|
const requestBody = {
|
|
69
73
|
model: this.getModel(),
|
|
@@ -86,7 +90,7 @@ export class AnthropicProvider {
|
|
|
86
90
|
timeoutController?.cleanup();
|
|
87
91
|
logger.debug(`[${functionTag}] Success. Generated ${data.usage.output_tokens} tokens`);
|
|
88
92
|
const content = data.content.map((block) => block.text).join("");
|
|
89
|
-
|
|
93
|
+
const result = {
|
|
90
94
|
content,
|
|
91
95
|
provider: this.name,
|
|
92
96
|
model: data.model,
|
|
@@ -97,6 +101,15 @@ export class AnthropicProvider {
|
|
|
97
101
|
},
|
|
98
102
|
finishReason: data.stop_reason,
|
|
99
103
|
};
|
|
104
|
+
// Add analytics if enabled
|
|
105
|
+
if (options.enableAnalytics) {
|
|
106
|
+
result.analytics = createAnalytics(provider, this.defaultModel, result, Date.now() - startTime, options.context);
|
|
107
|
+
}
|
|
108
|
+
// Add evaluation if enabled
|
|
109
|
+
if (options.enableEvaluation) {
|
|
110
|
+
result.evaluation = await evaluateResponse(prompt, result.content, options.context);
|
|
111
|
+
}
|
|
112
|
+
return result;
|
|
100
113
|
}
|
|
101
114
|
catch (error) {
|
|
102
115
|
// Always cleanup timeout
|
|
@@ -133,7 +146,7 @@ export class AnthropicProvider {
|
|
|
133
146
|
const options = typeof optionsOrPrompt === "string"
|
|
134
147
|
? { prompt: optionsOrPrompt }
|
|
135
148
|
: optionsOrPrompt;
|
|
136
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
149
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
137
150
|
logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
|
|
138
151
|
const requestBody = {
|
|
139
152
|
model: this.getModel(),
|
|
@@ -243,7 +256,7 @@ export class AnthropicProvider {
|
|
|
243
256
|
const options = typeof optionsOrPrompt === "string"
|
|
244
257
|
? { prompt: optionsOrPrompt }
|
|
245
258
|
: optionsOrPrompt;
|
|
246
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
259
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", } = options;
|
|
247
260
|
logger.debug(`[AnthropicProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
248
261
|
const requestBody = {
|
|
249
262
|
model: this.getModel(),
|
|
@@ -377,4 +390,16 @@ export class AnthropicProvider {
|
|
|
377
390
|
"long-context", // Claude models support up to 200k tokens
|
|
378
391
|
];
|
|
379
392
|
}
|
|
393
|
+
/**
|
|
394
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
395
|
+
*/
|
|
396
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
397
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
398
|
+
}
|
|
399
|
+
/**
|
|
400
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
401
|
+
*/
|
|
402
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
403
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
404
|
+
}
|
|
380
405
|
}
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
* Enterprise-grade OpenAI integration through Microsoft Azure.
|
|
5
5
|
* Supports all OpenAI models with enhanced security and compliance.
|
|
6
6
|
*/
|
|
7
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
7
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
8
8
|
import { AIProviderName } from "../core/types.js";
|
|
9
9
|
export declare class AzureOpenAIProvider implements AIProvider {
|
|
10
10
|
readonly name: AIProviderName;
|
|
@@ -34,4 +34,6 @@ export declare class AzureOpenAIProvider implements AIProvider {
|
|
|
34
34
|
supportsStreaming(): boolean;
|
|
35
35
|
supportsSchema(): boolean;
|
|
36
36
|
getCapabilities(): string[];
|
|
37
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
38
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
37
39
|
}
|
|
@@ -7,6 +7,8 @@
|
|
|
7
7
|
import { AIProviderName } from "../core/types.js";
|
|
8
8
|
import { logger } from "../utils/logger.js";
|
|
9
9
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
10
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
11
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
10
12
|
export class AzureOpenAIProvider {
|
|
11
13
|
name = AIProviderName.AZURE;
|
|
12
14
|
apiKey;
|
|
@@ -69,12 +71,13 @@ export class AzureOpenAIProvider {
|
|
|
69
71
|
async generateText(optionsOrPrompt, schema) {
|
|
70
72
|
const functionTag = "AzureOpenAIProvider.generateText";
|
|
71
73
|
const provider = "azure";
|
|
74
|
+
const startTime = Date.now();
|
|
72
75
|
logger.debug(`[${functionTag}] Starting text generation`);
|
|
73
76
|
// Parse parameters with backward compatibility
|
|
74
77
|
const options = typeof optionsOrPrompt === "string"
|
|
75
78
|
? { prompt: optionsOrPrompt }
|
|
76
79
|
: optionsOrPrompt;
|
|
77
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
80
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
78
81
|
logger.debug(`[${functionTag}] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}, Timeout: ${timeout}`);
|
|
79
82
|
const messages = [];
|
|
80
83
|
if (systemPrompt) {
|
|
@@ -101,7 +104,7 @@ export class AzureOpenAIProvider {
|
|
|
101
104
|
timeoutController?.cleanup();
|
|
102
105
|
logger.debug(`[${functionTag}] Success. Generated ${data.usage.completion_tokens} tokens`);
|
|
103
106
|
const content = data.choices[0]?.message?.content || "";
|
|
104
|
-
|
|
107
|
+
const result = {
|
|
105
108
|
content,
|
|
106
109
|
provider: this.name,
|
|
107
110
|
model: data.model,
|
|
@@ -112,6 +115,21 @@ export class AzureOpenAIProvider {
|
|
|
112
115
|
},
|
|
113
116
|
finishReason: data.choices[0]?.finish_reason || "stop",
|
|
114
117
|
};
|
|
118
|
+
// Add analytics if enabled
|
|
119
|
+
if (options.enableAnalytics) {
|
|
120
|
+
result.analytics = {
|
|
121
|
+
provider: this.name,
|
|
122
|
+
model: data.model,
|
|
123
|
+
tokens: result.usage,
|
|
124
|
+
responseTime: Date.now() - startTime,
|
|
125
|
+
context: options.context,
|
|
126
|
+
};
|
|
127
|
+
}
|
|
128
|
+
// Add evaluation if enabled
|
|
129
|
+
if (options.enableEvaluation) {
|
|
130
|
+
result.evaluation = await evaluateResponse(options.prompt, content, options.context);
|
|
131
|
+
}
|
|
132
|
+
return result;
|
|
115
133
|
}
|
|
116
134
|
catch (error) {
|
|
117
135
|
// Always cleanup timeout
|
|
@@ -148,7 +166,7 @@ export class AzureOpenAIProvider {
|
|
|
148
166
|
const options = typeof optionsOrPrompt === "string"
|
|
149
167
|
? { prompt: optionsOrPrompt }
|
|
150
168
|
: optionsOrPrompt;
|
|
151
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
169
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
152
170
|
logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
|
|
153
171
|
const messages = [];
|
|
154
172
|
if (systemPrompt) {
|
|
@@ -262,7 +280,7 @@ export class AzureOpenAIProvider {
|
|
|
262
280
|
const options = typeof optionsOrPrompt === "string"
|
|
263
281
|
? { prompt: optionsOrPrompt }
|
|
264
282
|
: optionsOrPrompt;
|
|
265
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
283
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", } = options;
|
|
266
284
|
logger.debug(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
267
285
|
const messages = [];
|
|
268
286
|
if (systemPrompt) {
|
|
@@ -409,4 +427,10 @@ export class AzureOpenAIProvider {
|
|
|
409
427
|
"content-filtering",
|
|
410
428
|
];
|
|
411
429
|
}
|
|
430
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
431
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
432
|
+
}
|
|
433
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
434
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
435
|
+
}
|
|
412
436
|
}
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Integrates MCP tools directly with AI SDK's function calling capabilities
|
|
4
4
|
* This is the missing piece that enables true AI function calling!
|
|
5
5
|
*/
|
|
6
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
6
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
7
7
|
import { type GenerateTextResult, type StreamTextResult, type ToolSet, type Schema } from "ai";
|
|
8
8
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
9
9
|
/**
|
|
@@ -48,6 +48,14 @@ export declare class FunctionCallingProvider implements AIProvider {
|
|
|
48
48
|
* Stream text with function calling support
|
|
49
49
|
*/
|
|
50
50
|
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
51
|
+
/**
|
|
52
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
53
|
+
*/
|
|
54
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
55
|
+
/**
|
|
56
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
57
|
+
*/
|
|
58
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
51
59
|
}
|
|
52
60
|
/**
|
|
53
61
|
* Create a function-calling enhanced version of any AI provider
|
|
@@ -7,6 +7,7 @@ import { generateText as aiGenerateText, Output, } from "ai";
|
|
|
7
7
|
import { getAvailableFunctionTools, executeFunctionCall, isFunctionCallingAvailable, } from "../mcp/function-calling.js";
|
|
8
8
|
import { createExecutionContext } from "../mcp/context-manager.js";
|
|
9
9
|
import { mcpLogger } from "../mcp/logging.js";
|
|
10
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
10
11
|
/**
|
|
11
12
|
* Enhanced provider that enables real function calling with MCP tools
|
|
12
13
|
*/
|
|
@@ -106,7 +107,7 @@ export class FunctionCallingProvider {
|
|
|
106
107
|
prompt: options.prompt,
|
|
107
108
|
system: options.systemPrompt || "You are a helpful AI assistant.",
|
|
108
109
|
temperature: options.temperature || 0.7,
|
|
109
|
-
maxTokens: options.maxTokens
|
|
110
|
+
maxTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
110
111
|
tools: toolsWithExecution,
|
|
111
112
|
toolChoice: "auto", // Let the AI decide when to use tools
|
|
112
113
|
maxSteps: 5, // CRITICAL: Enable multi-turn tool execution
|
|
@@ -337,6 +338,18 @@ These functions provide accurate, real-time data. Use them actively to enhance y
|
|
|
337
338
|
return this.baseProvider.streamText(options, analysisSchema);
|
|
338
339
|
}
|
|
339
340
|
}
|
|
341
|
+
/**
|
|
342
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
343
|
+
*/
|
|
344
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
345
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
346
|
+
}
|
|
347
|
+
/**
|
|
348
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
349
|
+
*/
|
|
350
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
351
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
352
|
+
}
|
|
340
353
|
}
|
|
341
354
|
/**
|
|
342
355
|
* Create a function-calling enhanced version of any AI provider
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
2
|
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
|
|
3
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
4
4
|
export declare class GoogleAIStudio implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
/**
|
|
@@ -32,4 +32,18 @@ export declare class GoogleAIStudio implements AIProvider {
|
|
|
32
32
|
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
33
33
|
*/
|
|
34
34
|
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
35
|
+
/**
|
|
36
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
37
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
38
|
+
* @param analysisSchema - Optional schema for output validation
|
|
39
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
40
|
+
*/
|
|
41
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
|
|
42
|
+
/**
|
|
43
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
44
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
45
|
+
* @param analysisSchema - Optional schema for output validation
|
|
46
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
47
|
+
*/
|
|
48
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
|
|
35
49
|
}
|
|
@@ -2,7 +2,9 @@ import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
|
2
2
|
import { streamText, generateText, Output, } from "ai";
|
|
3
3
|
import { logger } from "../utils/logger.js";
|
|
4
4
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
5
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
5
6
|
import { createProxyFetch } from "../proxy/proxy-fetch.js";
|
|
7
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
6
8
|
// CRITICAL: Setup environment variables early for AI SDK compatibility
|
|
7
9
|
// The AI SDK specifically looks for GOOGLE_GENERATIVE_AI_API_KEY
|
|
8
10
|
// We need to ensure this is set before any AI SDK operations
|
|
@@ -113,7 +115,7 @@ export class GoogleAIStudio {
|
|
|
113
115
|
const options = typeof optionsOrPrompt === "string"
|
|
114
116
|
? { prompt: optionsOrPrompt }
|
|
115
117
|
: optionsOrPrompt;
|
|
116
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
118
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
117
119
|
// Use schema from options or fallback parameter
|
|
118
120
|
const finalSchema = schema || analysisSchema;
|
|
119
121
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -219,12 +221,13 @@ export class GoogleAIStudio {
|
|
|
219
221
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
220
222
|
const functionTag = "GoogleAIStudio.generateText";
|
|
221
223
|
const provider = "google-ai";
|
|
224
|
+
const startTime = Date.now();
|
|
222
225
|
try {
|
|
223
226
|
// Parse parameters - support both string and options object
|
|
224
227
|
const options = typeof optionsOrPrompt === "string"
|
|
225
228
|
? { prompt: optionsOrPrompt }
|
|
226
229
|
: optionsOrPrompt;
|
|
227
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
230
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
228
231
|
// Use schema from options or fallback parameter
|
|
229
232
|
const finalSchema = schema || analysisSchema;
|
|
230
233
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -272,6 +275,15 @@ export class GoogleAIStudio {
|
|
|
272
275
|
responseLength: result.text?.length || 0,
|
|
273
276
|
timeout,
|
|
274
277
|
});
|
|
278
|
+
// Add analytics if enabled
|
|
279
|
+
if (options.enableAnalytics) {
|
|
280
|
+
const { createAnalytics } = await import("./analytics-helper.js");
|
|
281
|
+
result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
|
|
282
|
+
}
|
|
283
|
+
// Add evaluation if enabled
|
|
284
|
+
if (options.enableEvaluation) {
|
|
285
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
|
|
286
|
+
}
|
|
275
287
|
return result;
|
|
276
288
|
}
|
|
277
289
|
finally {
|
|
@@ -300,4 +312,22 @@ export class GoogleAIStudio {
|
|
|
300
312
|
throw err; // Re-throw error to trigger fallback
|
|
301
313
|
}
|
|
302
314
|
}
|
|
315
|
+
/**
|
|
316
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
317
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
318
|
+
* @param analysisSchema - Optional schema for output validation
|
|
319
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
320
|
+
*/
|
|
321
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
322
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
323
|
+
}
|
|
324
|
+
/**
|
|
325
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
326
|
+
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
327
|
+
* @param analysisSchema - Optional schema for output validation
|
|
328
|
+
* @returns Promise resolving to GenerateTextResult or null
|
|
329
|
+
*/
|
|
330
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
331
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
332
|
+
}
|
|
303
333
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
2
|
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
|
|
3
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
4
4
|
export declare class GoogleVertexAI implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
/**
|
|
@@ -27,4 +27,12 @@ export declare class GoogleVertexAI implements AIProvider {
|
|
|
27
27
|
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
28
28
|
*/
|
|
29
29
|
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
30
|
+
/**
|
|
31
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
32
|
+
*/
|
|
33
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
|
|
34
|
+
/**
|
|
35
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
36
|
+
*/
|
|
37
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateTextResult | null>;
|
|
30
38
|
}
|
|
@@ -24,7 +24,9 @@ async function getCreateVertexAnthropic() {
|
|
|
24
24
|
import { streamText, generateText, Output, } from "ai";
|
|
25
25
|
import { logger } from "../utils/logger.js";
|
|
26
26
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
27
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
27
28
|
import { createProxyFetch } from "../proxy/proxy-fetch.js";
|
|
29
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
28
30
|
// Default system context
|
|
29
31
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
30
32
|
systemPrompt: "You are a helpful AI assistant.",
|
|
@@ -289,7 +291,7 @@ export class GoogleVertexAI {
|
|
|
289
291
|
const options = typeof optionsOrPrompt === "string"
|
|
290
292
|
? { prompt: optionsOrPrompt }
|
|
291
293
|
: optionsOrPrompt;
|
|
292
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
294
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
293
295
|
// Use schema from options or fallback parameter
|
|
294
296
|
const finalSchema = schema || analysisSchema;
|
|
295
297
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -392,12 +394,13 @@ export class GoogleVertexAI {
|
|
|
392
394
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
393
395
|
const functionTag = "GoogleVertexAI.generateText";
|
|
394
396
|
const provider = "vertex";
|
|
397
|
+
const startTime = Date.now();
|
|
395
398
|
try {
|
|
396
399
|
// Parse parameters - support both string and options object
|
|
397
400
|
const options = typeof optionsOrPrompt === "string"
|
|
398
401
|
? { prompt: optionsOrPrompt }
|
|
399
402
|
: optionsOrPrompt;
|
|
400
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
403
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
401
404
|
// Use schema from options or fallback parameter
|
|
402
405
|
const finalSchema = schema || analysisSchema;
|
|
403
406
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -440,6 +443,20 @@ export class GoogleVertexAI {
|
|
|
440
443
|
responseLength: result.text?.length || 0,
|
|
441
444
|
timeout,
|
|
442
445
|
});
|
|
446
|
+
// Add analytics if enabled
|
|
447
|
+
if (options.enableAnalytics) {
|
|
448
|
+
result.analytics = {
|
|
449
|
+
provider,
|
|
450
|
+
model: this.modelName,
|
|
451
|
+
tokens: result.usage,
|
|
452
|
+
responseTime: Date.now() - startTime,
|
|
453
|
+
context: options.context,
|
|
454
|
+
};
|
|
455
|
+
}
|
|
456
|
+
// Add evaluation if enabled
|
|
457
|
+
if (options.enableEvaluation) {
|
|
458
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
459
|
+
}
|
|
443
460
|
return result;
|
|
444
461
|
}
|
|
445
462
|
finally {
|
|
@@ -469,4 +486,16 @@ export class GoogleVertexAI {
|
|
|
469
486
|
throw err; // Re-throw error to trigger fallback
|
|
470
487
|
}
|
|
471
488
|
}
|
|
489
|
+
/**
|
|
490
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
491
|
+
*/
|
|
492
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
493
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
494
|
+
}
|
|
495
|
+
/**
|
|
496
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
497
|
+
*/
|
|
498
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
499
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
500
|
+
}
|
|
472
501
|
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
2
|
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
|
|
3
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
4
4
|
export declare class HuggingFace implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
private client;
|
|
@@ -28,4 +28,6 @@ export declare class HuggingFace implements AIProvider {
|
|
|
28
28
|
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
29
29
|
*/
|
|
30
30
|
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
31
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
32
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
31
33
|
}
|
|
@@ -2,6 +2,8 @@ import { HfInference } from "@huggingface/inference";
|
|
|
2
2
|
import { streamText, generateText, Output, } from "ai";
|
|
3
3
|
import { logger } from "../utils/logger.js";
|
|
4
4
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
5
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
6
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
5
7
|
// Default system context
|
|
6
8
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
7
9
|
systemPrompt: "You are a helpful AI assistant.",
|
|
@@ -107,7 +109,7 @@ class HuggingFaceLanguageModel {
|
|
|
107
109
|
inputs: prompt,
|
|
108
110
|
parameters: {
|
|
109
111
|
temperature: options.temperature || 0.7,
|
|
110
|
-
max_new_tokens: options.maxTokens
|
|
112
|
+
max_new_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
111
113
|
return_full_text: false,
|
|
112
114
|
do_sample: (options.temperature || 0.7) > 0,
|
|
113
115
|
},
|
|
@@ -229,7 +231,7 @@ export class HuggingFace {
|
|
|
229
231
|
const options = typeof optionsOrPrompt === "string"
|
|
230
232
|
? { prompt: optionsOrPrompt }
|
|
231
233
|
: optionsOrPrompt;
|
|
232
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
234
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
233
235
|
// Use schema from options or fallback parameter
|
|
234
236
|
const finalSchema = schema || analysisSchema;
|
|
235
237
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -332,12 +334,13 @@ export class HuggingFace {
|
|
|
332
334
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
333
335
|
const functionTag = "HuggingFace.generateText";
|
|
334
336
|
const provider = "huggingface";
|
|
337
|
+
const startTime = Date.now();
|
|
335
338
|
try {
|
|
336
339
|
// Parse parameters - support both string and options object
|
|
337
340
|
const options = typeof optionsOrPrompt === "string"
|
|
338
341
|
? { prompt: optionsOrPrompt }
|
|
339
342
|
: optionsOrPrompt;
|
|
340
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
343
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
341
344
|
// Use schema from options or fallback parameter
|
|
342
345
|
const finalSchema = schema || analysisSchema;
|
|
343
346
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -379,6 +382,20 @@ export class HuggingFace {
|
|
|
379
382
|
responseLength: result.text?.length || 0,
|
|
380
383
|
timeout,
|
|
381
384
|
});
|
|
385
|
+
// Add analytics if enabled
|
|
386
|
+
if (options.enableAnalytics) {
|
|
387
|
+
result.analytics = {
|
|
388
|
+
provider,
|
|
389
|
+
model: this.modelName,
|
|
390
|
+
tokens: result.usage,
|
|
391
|
+
responseTime: Date.now() - startTime,
|
|
392
|
+
context: options.context,
|
|
393
|
+
};
|
|
394
|
+
}
|
|
395
|
+
// Add evaluation if enabled
|
|
396
|
+
if (options.enableEvaluation) {
|
|
397
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
398
|
+
}
|
|
382
399
|
return result;
|
|
383
400
|
}
|
|
384
401
|
finally {
|
|
@@ -407,4 +424,10 @@ export class HuggingFace {
|
|
|
407
424
|
throw err; // Re-throw error to trigger fallback
|
|
408
425
|
}
|
|
409
426
|
}
|
|
427
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
428
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
429
|
+
}
|
|
430
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
431
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
432
|
+
}
|
|
410
433
|
}
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
* NeuroLink MCP-Aware AI Provider
|
|
3
3
|
* Integrates MCP tools with AI providers following Lighthouse's pattern
|
|
4
4
|
*/
|
|
5
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
6
6
|
import type { StreamTextResult, ToolSet, Schema, GenerateTextResult } from "ai";
|
|
7
7
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
8
8
|
/**
|
|
@@ -51,6 +51,14 @@ export declare class MCPAwareProvider implements AIProvider {
|
|
|
51
51
|
* Clean up session
|
|
52
52
|
*/
|
|
53
53
|
cleanup(): Promise<void>;
|
|
54
|
+
/**
|
|
55
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
56
|
+
*/
|
|
57
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
58
|
+
/**
|
|
59
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
60
|
+
*/
|
|
61
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
54
62
|
}
|
|
55
63
|
/**
|
|
56
64
|
* Create an MCP-aware provider
|
|
@@ -192,6 +192,18 @@ Please provide a response based on this information.`;
|
|
|
192
192
|
this.mcpInitialized = false;
|
|
193
193
|
}
|
|
194
194
|
}
|
|
195
|
+
/**
|
|
196
|
+
* Alias for generateText() - CLI-SDK consistency
|
|
197
|
+
*/
|
|
198
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
199
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Short alias for generateText() - CLI-SDK consistency
|
|
203
|
+
*/
|
|
204
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
205
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
206
|
+
}
|
|
195
207
|
}
|
|
196
208
|
/**
|
|
197
209
|
* Create an MCP-aware provider
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
2
|
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
|
|
3
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
|
|
4
4
|
export declare class MistralAI implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
private client;
|
|
@@ -28,5 +28,7 @@ export declare class MistralAI implements AIProvider {
|
|
|
28
28
|
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
29
29
|
*/
|
|
30
30
|
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
31
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
32
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
31
33
|
}
|
|
32
34
|
export default MistralAI;
|
|
@@ -2,6 +2,8 @@ import { createMistral } from "@ai-sdk/mistral";
|
|
|
2
2
|
import { streamText, generateText, Output, } from "ai";
|
|
3
3
|
import { logger } from "../utils/logger.js";
|
|
4
4
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
5
|
+
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
6
|
+
import { evaluateResponse } from "../core/evaluation.js";
|
|
5
7
|
// Default system context
|
|
6
8
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
7
9
|
systemPrompt: "You are a helpful AI assistant.",
|
|
@@ -89,7 +91,7 @@ export class MistralAI {
|
|
|
89
91
|
const options = typeof optionsOrPrompt === "string"
|
|
90
92
|
? { prompt: optionsOrPrompt }
|
|
91
93
|
: optionsOrPrompt;
|
|
92
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
94
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
93
95
|
// Use schema from options or fallback parameter
|
|
94
96
|
const finalSchema = schema || analysisSchema;
|
|
95
97
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -192,12 +194,13 @@ export class MistralAI {
|
|
|
192
194
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
193
195
|
const functionTag = "MistralAI.generateText";
|
|
194
196
|
const provider = "mistral";
|
|
197
|
+
const startTime = Date.now();
|
|
195
198
|
try {
|
|
196
199
|
// Parse parameters - support both string and options object
|
|
197
200
|
const options = typeof optionsOrPrompt === "string"
|
|
198
201
|
? { prompt: optionsOrPrompt }
|
|
199
202
|
: optionsOrPrompt;
|
|
200
|
-
const { prompt, temperature = 0.7, maxTokens =
|
|
203
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
201
204
|
// Use schema from options or fallback parameter
|
|
202
205
|
const finalSchema = schema || analysisSchema;
|
|
203
206
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -239,6 +242,20 @@ export class MistralAI {
|
|
|
239
242
|
responseLength: result.text?.length || 0,
|
|
240
243
|
timeout,
|
|
241
244
|
});
|
|
245
|
+
// Add analytics if enabled
|
|
246
|
+
if (options.enableAnalytics) {
|
|
247
|
+
result.analytics = {
|
|
248
|
+
provider,
|
|
249
|
+
model: this.modelName,
|
|
250
|
+
tokens: result.usage,
|
|
251
|
+
responseTime: Date.now() - startTime,
|
|
252
|
+
context: options.context,
|
|
253
|
+
};
|
|
254
|
+
}
|
|
255
|
+
// Add evaluation if enabled
|
|
256
|
+
if (options.enableEvaluation) {
|
|
257
|
+
result.evaluation = await evaluateResponse(prompt, result.text, options.context);
|
|
258
|
+
}
|
|
242
259
|
return result;
|
|
243
260
|
}
|
|
244
261
|
finally {
|
|
@@ -267,5 +284,11 @@ export class MistralAI {
|
|
|
267
284
|
throw err; // Re-throw error to trigger fallback
|
|
268
285
|
}
|
|
269
286
|
}
|
|
287
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
288
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
289
|
+
}
|
|
290
|
+
async gen(optionsOrPrompt, analysisSchema) {
|
|
291
|
+
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
292
|
+
}
|
|
270
293
|
}
|
|
271
294
|
export default MistralAI;
|