@juspay/neurolink 1.6.0 → 1.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +193 -7
- package/README.md +100 -17
- package/dist/agent/direct-tools.d.ts +1203 -0
- package/dist/agent/direct-tools.js +387 -0
- package/dist/cli/commands/agent-generate.d.ts +2 -0
- package/dist/cli/commands/agent-generate.js +70 -0
- package/dist/cli/commands/config.d.ts +6 -6
- package/dist/cli/commands/config.js +326 -273
- package/dist/cli/commands/mcp.d.ts +2 -1
- package/dist/cli/commands/mcp.js +874 -146
- package/dist/cli/commands/ollama.d.ts +1 -1
- package/dist/cli/commands/ollama.js +153 -143
- package/dist/cli/index.js +589 -323
- package/dist/cli/utils/complete-setup.d.ts +19 -0
- package/dist/cli/utils/complete-setup.js +81 -0
- package/dist/cli/utils/env-manager.d.ts +44 -0
- package/dist/cli/utils/env-manager.js +226 -0
- package/dist/cli/utils/interactive-setup.d.ts +48 -0
- package/dist/cli/utils/interactive-setup.js +302 -0
- package/dist/core/dynamic-models.d.ts +208 -0
- package/dist/core/dynamic-models.js +250 -0
- package/dist/core/factory.d.ts +13 -6
- package/dist/core/factory.js +176 -61
- package/dist/core/types.d.ts +4 -2
- package/dist/core/types.js +4 -4
- package/dist/index.d.ts +16 -16
- package/dist/index.js +16 -16
- package/dist/lib/agent/direct-tools.d.ts +1203 -0
- package/dist/lib/agent/direct-tools.js +387 -0
- package/dist/lib/core/dynamic-models.d.ts +208 -0
- package/dist/lib/core/dynamic-models.js +250 -0
- package/dist/lib/core/factory.d.ts +13 -6
- package/dist/lib/core/factory.js +176 -61
- package/dist/lib/core/types.d.ts +4 -2
- package/dist/lib/core/types.js +4 -4
- package/dist/lib/index.d.ts +16 -16
- package/dist/lib/index.js +16 -16
- package/dist/lib/mcp/auto-discovery.d.ts +120 -0
- package/dist/lib/mcp/auto-discovery.js +793 -0
- package/dist/lib/mcp/client.d.ts +66 -0
- package/dist/lib/mcp/client.js +245 -0
- package/dist/lib/mcp/config.d.ts +31 -0
- package/dist/lib/mcp/config.js +74 -0
- package/dist/lib/mcp/context-manager.d.ts +4 -4
- package/dist/lib/mcp/context-manager.js +24 -18
- package/dist/lib/mcp/factory.d.ts +28 -11
- package/dist/lib/mcp/factory.js +36 -29
- package/dist/lib/mcp/function-calling.d.ts +51 -0
- package/dist/lib/mcp/function-calling.js +510 -0
- package/dist/lib/mcp/index.d.ts +190 -0
- package/dist/lib/mcp/index.js +156 -0
- package/dist/lib/mcp/initialize-tools.d.ts +28 -0
- package/dist/lib/mcp/initialize-tools.js +209 -0
- package/dist/lib/mcp/initialize.d.ts +17 -0
- package/dist/lib/mcp/initialize.js +51 -0
- package/dist/lib/mcp/logging.d.ts +71 -0
- package/dist/lib/mcp/logging.js +183 -0
- package/dist/lib/mcp/manager.d.ts +67 -0
- package/dist/lib/mcp/manager.js +176 -0
- package/dist/lib/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +417 -0
- package/dist/lib/mcp/orchestrator.d.ts +3 -3
- package/dist/lib/mcp/orchestrator.js +46 -43
- package/dist/lib/mcp/registry.d.ts +2 -2
- package/dist/lib/mcp/registry.js +42 -33
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +142 -102
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
- package/dist/lib/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/lib/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/lib/mcp/tool-integration.d.ts +67 -0
- package/dist/lib/mcp/tool-integration.js +179 -0
- package/dist/lib/mcp/unified-registry.d.ts +269 -0
- package/dist/lib/mcp/unified-registry.js +1411 -0
- package/dist/lib/neurolink.d.ts +68 -6
- package/dist/lib/neurolink.js +304 -42
- package/dist/lib/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/lib/providers/agent-enhanced-provider.js +242 -0
- package/dist/lib/providers/amazonBedrock.d.ts +3 -3
- package/dist/lib/providers/amazonBedrock.js +54 -50
- package/dist/lib/providers/anthropic.d.ts +2 -2
- package/dist/lib/providers/anthropic.js +92 -84
- package/dist/lib/providers/azureOpenAI.d.ts +2 -2
- package/dist/lib/providers/azureOpenAI.js +97 -86
- package/dist/lib/providers/function-calling-provider.d.ts +70 -0
- package/dist/lib/providers/function-calling-provider.js +359 -0
- package/dist/lib/providers/googleAIStudio.d.ts +10 -5
- package/dist/lib/providers/googleAIStudio.js +60 -38
- package/dist/lib/providers/googleVertexAI.d.ts +3 -3
- package/dist/lib/providers/googleVertexAI.js +96 -86
- package/dist/lib/providers/huggingFace.d.ts +3 -3
- package/dist/lib/providers/huggingFace.js +70 -63
- package/dist/lib/providers/index.d.ts +11 -11
- package/dist/lib/providers/index.js +18 -18
- package/dist/lib/providers/mcp-provider.d.ts +62 -0
- package/dist/lib/providers/mcp-provider.js +183 -0
- package/dist/lib/providers/mistralAI.d.ts +3 -3
- package/dist/lib/providers/mistralAI.js +42 -36
- package/dist/lib/providers/ollama.d.ts +4 -4
- package/dist/lib/providers/ollama.js +113 -98
- package/dist/lib/providers/openAI.d.ts +7 -3
- package/dist/lib/providers/openAI.js +45 -33
- package/dist/lib/utils/logger.js +2 -2
- package/dist/lib/utils/providerUtils.js +53 -31
- package/dist/mcp/auto-discovery.d.ts +120 -0
- package/dist/mcp/auto-discovery.js +794 -0
- package/dist/mcp/client.d.ts +66 -0
- package/dist/mcp/client.js +245 -0
- package/dist/mcp/config.d.ts +31 -0
- package/dist/mcp/config.js +74 -0
- package/dist/mcp/context-manager.d.ts +4 -4
- package/dist/mcp/context-manager.js +24 -18
- package/dist/mcp/factory.d.ts +28 -11
- package/dist/mcp/factory.js +36 -29
- package/dist/mcp/function-calling.d.ts +51 -0
- package/dist/mcp/function-calling.js +510 -0
- package/dist/mcp/index.d.ts +190 -0
- package/dist/mcp/index.js +156 -0
- package/dist/mcp/initialize-tools.d.ts +28 -0
- package/dist/mcp/initialize-tools.js +210 -0
- package/dist/mcp/initialize.d.ts +17 -0
- package/dist/mcp/initialize.js +51 -0
- package/dist/mcp/logging.d.ts +71 -0
- package/dist/mcp/logging.js +183 -0
- package/dist/mcp/manager.d.ts +67 -0
- package/dist/mcp/manager.js +176 -0
- package/dist/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/mcp/neurolink-mcp-client.js +417 -0
- package/dist/mcp/orchestrator.d.ts +3 -3
- package/dist/mcp/orchestrator.js +46 -43
- package/dist/mcp/registry.d.ts +2 -2
- package/dist/mcp/registry.js +42 -33
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
- package/dist/mcp/servers/ai-providers/ai-core-server.js +142 -102
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
- package/dist/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/mcp/tool-integration.d.ts +67 -0
- package/dist/mcp/tool-integration.js +179 -0
- package/dist/mcp/unified-registry.d.ts +269 -0
- package/dist/mcp/unified-registry.js +1411 -0
- package/dist/neurolink.d.ts +68 -6
- package/dist/neurolink.js +304 -42
- package/dist/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/providers/agent-enhanced-provider.js +242 -0
- package/dist/providers/amazonBedrock.d.ts +3 -3
- package/dist/providers/amazonBedrock.js +54 -50
- package/dist/providers/anthropic.d.ts +2 -2
- package/dist/providers/anthropic.js +92 -84
- package/dist/providers/azureOpenAI.d.ts +2 -2
- package/dist/providers/azureOpenAI.js +97 -86
- package/dist/providers/function-calling-provider.d.ts +70 -0
- package/dist/providers/function-calling-provider.js +359 -0
- package/dist/providers/googleAIStudio.d.ts +10 -5
- package/dist/providers/googleAIStudio.js +60 -38
- package/dist/providers/googleVertexAI.d.ts +3 -3
- package/dist/providers/googleVertexAI.js +96 -86
- package/dist/providers/huggingFace.d.ts +3 -3
- package/dist/providers/huggingFace.js +70 -63
- package/dist/providers/index.d.ts +11 -11
- package/dist/providers/index.js +18 -18
- package/dist/providers/mcp-provider.d.ts +62 -0
- package/dist/providers/mcp-provider.js +183 -0
- package/dist/providers/mistralAI.d.ts +3 -3
- package/dist/providers/mistralAI.js +42 -36
- package/dist/providers/ollama.d.ts +4 -4
- package/dist/providers/ollama.js +113 -98
- package/dist/providers/openAI.d.ts +7 -3
- package/dist/providers/openAI.js +45 -33
- package/dist/utils/logger.js +2 -2
- package/dist/utils/providerUtils.js +53 -31
- package/package.json +175 -161
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink MCP-Aware AI Provider
|
|
3
|
+
* Integrates MCP tools with AI providers following Lighthouse's pattern
|
|
4
|
+
*/
|
|
5
|
+
import { getMCPManager } from "../mcp/manager.js";
|
|
6
|
+
import { initializeMCPTools } from "../mcp/initialize-tools.js";
|
|
7
|
+
import { logger } from "../utils/logger.js";
|
|
8
|
+
import { v4 as uuidv4 } from "uuid";
|
|
9
|
+
/**
|
|
10
|
+
* MCP-Aware AI Provider
|
|
11
|
+
* Wraps any AI provider with MCP tool capabilities
|
|
12
|
+
*/
|
|
13
|
+
export class MCPAwareProvider {
|
|
14
|
+
baseProvider;
|
|
15
|
+
config;
|
|
16
|
+
sessionId;
|
|
17
|
+
mcpInitialized = false;
|
|
18
|
+
constructor(config) {
|
|
19
|
+
this.baseProvider = config.baseProvider;
|
|
20
|
+
this.config = config;
|
|
21
|
+
this.sessionId = config.sessionId || uuidv4();
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Initialize MCP tools for this session
|
|
25
|
+
*/
|
|
26
|
+
async initializeMCP() {
|
|
27
|
+
if (this.mcpInitialized || this.config.enableMCP === false) {
|
|
28
|
+
return;
|
|
29
|
+
}
|
|
30
|
+
try {
|
|
31
|
+
// Get or create MCP client for this session
|
|
32
|
+
const mcpClient = getMCPManager(this.sessionId, {
|
|
33
|
+
userId: this.config.userId,
|
|
34
|
+
aiProvider: this.config.providerName,
|
|
35
|
+
modelId: this.config.modelName,
|
|
36
|
+
});
|
|
37
|
+
// Create execution context
|
|
38
|
+
const context = {
|
|
39
|
+
sessionId: this.sessionId,
|
|
40
|
+
userId: this.config.userId,
|
|
41
|
+
organizationId: this.config.organizationId,
|
|
42
|
+
aiProvider: this.config.providerName,
|
|
43
|
+
modelId: this.config.modelName,
|
|
44
|
+
timestamp: Date.now(),
|
|
45
|
+
};
|
|
46
|
+
// Initialize all MCP tools
|
|
47
|
+
initializeMCPTools(this.sessionId, mcpClient, context);
|
|
48
|
+
this.mcpInitialized = true;
|
|
49
|
+
const tools = mcpClient.getTools();
|
|
50
|
+
const toolCount = Object.keys(tools).length;
|
|
51
|
+
logger.info(`[MCP Provider] Initialized ${toolCount} tools for session ${this.sessionId}`);
|
|
52
|
+
}
|
|
53
|
+
catch (error) {
|
|
54
|
+
logger.error(`[MCP Provider] Failed to initialize MCP for session ${this.sessionId}`, error);
|
|
55
|
+
// Continue without MCP tools if initialization fails
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
async generateText(optionsOrPrompt, analysisSchema) {
|
|
59
|
+
// Ensure MCP is initialized
|
|
60
|
+
await this.initializeMCP();
|
|
61
|
+
// Parse options
|
|
62
|
+
const options = typeof optionsOrPrompt === "string"
|
|
63
|
+
? { prompt: optionsOrPrompt }
|
|
64
|
+
: optionsOrPrompt;
|
|
65
|
+
// Check if prompt requests tool usage
|
|
66
|
+
const needsTools = this.detectToolRequest(options.prompt);
|
|
67
|
+
if (needsTools && this.mcpInitialized) {
|
|
68
|
+
// Get MCP client
|
|
69
|
+
const mcpClient = getMCPManager(this.sessionId);
|
|
70
|
+
// Create enhanced prompt with available tools
|
|
71
|
+
const tools = mcpClient.getTools();
|
|
72
|
+
const toolList = Object.keys(tools)
|
|
73
|
+
.map((name) => {
|
|
74
|
+
const tool = tools[name];
|
|
75
|
+
return `- ${name}: ${tool.description || "No description"}`;
|
|
76
|
+
})
|
|
77
|
+
.join("\n");
|
|
78
|
+
const enhancedPrompt = `${options.prompt}
|
|
79
|
+
|
|
80
|
+
Available tools:
|
|
81
|
+
${toolList}
|
|
82
|
+
|
|
83
|
+
To use a tool, respond with:
|
|
84
|
+
TOOL: <tool_name>
|
|
85
|
+
PARAMS: <json_params>
|
|
86
|
+
|
|
87
|
+
Otherwise, provide a direct response.`;
|
|
88
|
+
// Generate response with enhanced prompt
|
|
89
|
+
const response = await this.baseProvider.generateText({
|
|
90
|
+
...options,
|
|
91
|
+
prompt: enhancedPrompt,
|
|
92
|
+
}, analysisSchema);
|
|
93
|
+
if (!response) {
|
|
94
|
+
return null;
|
|
95
|
+
}
|
|
96
|
+
// Check if response includes tool invocation
|
|
97
|
+
const toolMatch = response.text.match(/TOOL:\s*(\S+)\s*\nPARAMS:\s*({.*})/s);
|
|
98
|
+
if (toolMatch) {
|
|
99
|
+
const toolName = toolMatch[1];
|
|
100
|
+
const toolParams = JSON.parse(toolMatch[2]);
|
|
101
|
+
// Execute tool
|
|
102
|
+
const toolResult = await mcpClient.executeTool(toolName, toolParams);
|
|
103
|
+
// Generate final response with tool result
|
|
104
|
+
const finalPrompt = `${options.prompt}
|
|
105
|
+
|
|
106
|
+
Tool ${toolName} was executed with result:
|
|
107
|
+
${JSON.stringify(toolResult, null, 2)}
|
|
108
|
+
|
|
109
|
+
Please provide a response based on this information.`;
|
|
110
|
+
const finalResponse = await this.baseProvider.generateText({
|
|
111
|
+
...options,
|
|
112
|
+
prompt: finalPrompt,
|
|
113
|
+
}, analysisSchema);
|
|
114
|
+
if (!finalResponse) {
|
|
115
|
+
return null;
|
|
116
|
+
}
|
|
117
|
+
// Return response (tool usage is tracked internally)
|
|
118
|
+
return finalResponse;
|
|
119
|
+
}
|
|
120
|
+
return response;
|
|
121
|
+
}
|
|
122
|
+
// Regular generation without tools
|
|
123
|
+
return this.baseProvider.generateText(options);
|
|
124
|
+
}
|
|
125
|
+
async streamText(optionsOrPrompt, analysisSchema) {
|
|
126
|
+
// For now, streaming doesn't support tool usage
|
|
127
|
+
// This matches Lighthouse's approach where MCP is used for non-streaming requests
|
|
128
|
+
return this.baseProvider.streamText(optionsOrPrompt, analysisSchema);
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Detect if the prompt is requesting tool usage
|
|
132
|
+
*/
|
|
133
|
+
detectToolRequest(prompt) {
|
|
134
|
+
const toolKeywords = [
|
|
135
|
+
"use tool",
|
|
136
|
+
"call tool",
|
|
137
|
+
"execute tool",
|
|
138
|
+
"run tool",
|
|
139
|
+
"invoke tool",
|
|
140
|
+
"what tools",
|
|
141
|
+
"available tools",
|
|
142
|
+
"list tools",
|
|
143
|
+
];
|
|
144
|
+
const lowerPrompt = prompt.toLowerCase();
|
|
145
|
+
return toolKeywords.some((keyword) => lowerPrompt.includes(keyword));
|
|
146
|
+
}
|
|
147
|
+
/**
|
|
148
|
+
* Get session statistics
|
|
149
|
+
*/
|
|
150
|
+
getSessionStats() {
|
|
151
|
+
if (!this.mcpInitialized) {
|
|
152
|
+
return null;
|
|
153
|
+
}
|
|
154
|
+
const mcpClient = getMCPManager(this.sessionId);
|
|
155
|
+
return mcpClient.getStats();
|
|
156
|
+
}
|
|
157
|
+
/**
|
|
158
|
+
* Clean up session
|
|
159
|
+
*/
|
|
160
|
+
async cleanup() {
|
|
161
|
+
if (this.mcpInitialized) {
|
|
162
|
+
const { removeMCPManager } = await import("../mcp/manager.js");
|
|
163
|
+
await removeMCPManager(this.sessionId);
|
|
164
|
+
this.mcpInitialized = false;
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
/**
|
|
169
|
+
* Create an MCP-aware provider
|
|
170
|
+
*/
|
|
171
|
+
export function createMCPAwareProvider(baseProvider, config) {
|
|
172
|
+
return new MCPAwareProvider({
|
|
173
|
+
baseProvider,
|
|
174
|
+
enableMCP: true,
|
|
175
|
+
...config,
|
|
176
|
+
});
|
|
177
|
+
}
|
|
178
|
+
/**
|
|
179
|
+
* Check if a provider is MCP-aware
|
|
180
|
+
*/
|
|
181
|
+
export function isMCPAwareProvider(provider) {
|
|
182
|
+
return provider instanceof MCPAwareProvider;
|
|
183
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import type { ZodType, ZodTypeDef } from
|
|
2
|
-
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from
|
|
3
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from "ai";
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
4
4
|
export declare class MistralAI implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
private client;
|
|
@@ -1,20 +1,20 @@
|
|
|
1
|
-
import { createMistral } from
|
|
2
|
-
import { streamText, generateText, Output } from
|
|
3
|
-
import { logger } from
|
|
1
|
+
import { createMistral } from "@ai-sdk/mistral";
|
|
2
|
+
import { streamText, generateText, Output, } from "ai";
|
|
3
|
+
import { logger } from "../utils/logger.js";
|
|
4
4
|
// Default system context
|
|
5
5
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
6
|
-
systemPrompt:
|
|
6
|
+
systemPrompt: "You are a helpful AI assistant.",
|
|
7
7
|
};
|
|
8
8
|
// Configuration helpers
|
|
9
9
|
const getMistralApiKey = () => {
|
|
10
10
|
const apiKey = process.env.MISTRAL_API_KEY;
|
|
11
11
|
if (!apiKey) {
|
|
12
|
-
throw new Error(
|
|
12
|
+
throw new Error("MISTRAL_API_KEY environment variable is not set");
|
|
13
13
|
}
|
|
14
14
|
return apiKey;
|
|
15
15
|
};
|
|
16
16
|
const getMistralModelId = () => {
|
|
17
|
-
return process.env.MISTRAL_MODEL ||
|
|
17
|
+
return process.env.MISTRAL_MODEL || "mistral-small";
|
|
18
18
|
};
|
|
19
19
|
const hasValidAuth = () => {
|
|
20
20
|
return !!process.env.MISTRAL_API_KEY;
|
|
@@ -26,7 +26,7 @@ function getMistralClient() {
|
|
|
26
26
|
const apiKey = getMistralApiKey();
|
|
27
27
|
_mistralClient = createMistral({
|
|
28
28
|
apiKey,
|
|
29
|
-
baseURL: process.env.MISTRAL_ENDPOINT ||
|
|
29
|
+
baseURL: process.env.MISTRAL_ENDPOINT || "https://api.mistral.ai/v1",
|
|
30
30
|
});
|
|
31
31
|
}
|
|
32
32
|
return _mistralClient;
|
|
@@ -40,25 +40,25 @@ export class MistralAI {
|
|
|
40
40
|
* @param modelName - Optional model name to override the default from config
|
|
41
41
|
*/
|
|
42
42
|
constructor(modelName) {
|
|
43
|
-
const functionTag =
|
|
43
|
+
const functionTag = "MistralAI.constructor";
|
|
44
44
|
this.modelName = modelName || getMistralModelId();
|
|
45
45
|
try {
|
|
46
46
|
this.client = getMistralClient();
|
|
47
47
|
logger.debug(`[${functionTag}] Initialization started`, {
|
|
48
48
|
modelName: this.modelName,
|
|
49
|
-
hasApiKey: hasValidAuth()
|
|
49
|
+
hasApiKey: hasValidAuth(),
|
|
50
50
|
});
|
|
51
51
|
logger.debug(`[${functionTag}] Initialization completed`, {
|
|
52
52
|
modelName: this.modelName,
|
|
53
|
-
success: true
|
|
53
|
+
success: true,
|
|
54
54
|
});
|
|
55
55
|
}
|
|
56
56
|
catch (err) {
|
|
57
57
|
logger.error(`[${functionTag}] Initialization failed`, {
|
|
58
|
-
message:
|
|
58
|
+
message: "Error in initializing Mistral AI",
|
|
59
59
|
modelName: this.modelName,
|
|
60
60
|
error: err instanceof Error ? err.message : String(err),
|
|
61
|
-
stack: err instanceof Error ? err.stack : undefined
|
|
61
|
+
stack: err instanceof Error ? err.stack : undefined,
|
|
62
62
|
});
|
|
63
63
|
throw err;
|
|
64
64
|
}
|
|
@@ -68,8 +68,8 @@ export class MistralAI {
|
|
|
68
68
|
* @private
|
|
69
69
|
*/
|
|
70
70
|
getModel() {
|
|
71
|
-
logger.debug(
|
|
72
|
-
modelName: this.modelName
|
|
71
|
+
logger.debug("MistralAI.getModel - Mistral AI model selected", {
|
|
72
|
+
modelName: this.modelName,
|
|
73
73
|
});
|
|
74
74
|
return this.client(this.modelName);
|
|
75
75
|
}
|
|
@@ -80,15 +80,15 @@ export class MistralAI {
|
|
|
80
80
|
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
81
81
|
*/
|
|
82
82
|
async streamText(optionsOrPrompt, analysisSchema) {
|
|
83
|
-
const functionTag =
|
|
84
|
-
const provider =
|
|
83
|
+
const functionTag = "MistralAI.streamText";
|
|
84
|
+
const provider = "mistral";
|
|
85
85
|
let chunkCount = 0;
|
|
86
86
|
try {
|
|
87
87
|
// Parse parameters - support both string and options object
|
|
88
|
-
const options = typeof optionsOrPrompt ===
|
|
88
|
+
const options = typeof optionsOrPrompt === "string"
|
|
89
89
|
? { prompt: optionsOrPrompt }
|
|
90
90
|
: optionsOrPrompt;
|
|
91
|
-
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
91
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
92
92
|
// Use schema from options or fallback parameter
|
|
93
93
|
const finalSchema = schema || analysisSchema;
|
|
94
94
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -97,7 +97,7 @@ export class MistralAI {
|
|
|
97
97
|
promptLength: prompt.length,
|
|
98
98
|
temperature,
|
|
99
99
|
maxTokens,
|
|
100
|
-
hasSchema: !!finalSchema
|
|
100
|
+
hasSchema: !!finalSchema,
|
|
101
101
|
});
|
|
102
102
|
const model = this.getModel();
|
|
103
103
|
const streamOptions = {
|
|
@@ -116,7 +116,7 @@ export class MistralAI {
|
|
|
116
116
|
error: errorMessage,
|
|
117
117
|
stack: errorStack,
|
|
118
118
|
promptLength: prompt.length,
|
|
119
|
-
chunkCount
|
|
119
|
+
chunkCount,
|
|
120
120
|
});
|
|
121
121
|
},
|
|
122
122
|
onFinish: (event) => {
|
|
@@ -127,7 +127,7 @@ export class MistralAI {
|
|
|
127
127
|
usage: event.usage,
|
|
128
128
|
totalChunks: chunkCount,
|
|
129
129
|
promptLength: prompt.length,
|
|
130
|
-
responseLength: event.text?.length || 0
|
|
130
|
+
responseLength: event.text?.length || 0,
|
|
131
131
|
});
|
|
132
132
|
},
|
|
133
133
|
onChunk: (event) => {
|
|
@@ -137,12 +137,14 @@ export class MistralAI {
|
|
|
137
137
|
modelName: this.modelName,
|
|
138
138
|
chunkNumber: chunkCount,
|
|
139
139
|
chunkLength: event.chunk.text?.length || 0,
|
|
140
|
-
chunkType: event.chunk.type
|
|
140
|
+
chunkType: event.chunk.type,
|
|
141
141
|
});
|
|
142
|
-
}
|
|
142
|
+
},
|
|
143
143
|
};
|
|
144
144
|
if (finalSchema) {
|
|
145
|
-
streamOptions.experimental_output = Output.object({
|
|
145
|
+
streamOptions.experimental_output = Output.object({
|
|
146
|
+
schema: finalSchema,
|
|
147
|
+
});
|
|
146
148
|
}
|
|
147
149
|
const result = streamText(streamOptions);
|
|
148
150
|
return result;
|
|
@@ -151,9 +153,11 @@ export class MistralAI {
|
|
|
151
153
|
logger.error(`[${functionTag}] Exception`, {
|
|
152
154
|
provider,
|
|
153
155
|
modelName: this.modelName,
|
|
154
|
-
message:
|
|
156
|
+
message: "Error in streaming text",
|
|
155
157
|
err: String(err),
|
|
156
|
-
promptLength: typeof optionsOrPrompt ===
|
|
158
|
+
promptLength: typeof optionsOrPrompt === "string"
|
|
159
|
+
? optionsOrPrompt.length
|
|
160
|
+
: optionsOrPrompt.prompt.length,
|
|
157
161
|
});
|
|
158
162
|
throw err; // Re-throw error to trigger fallback
|
|
159
163
|
}
|
|
@@ -165,14 +169,14 @@ export class MistralAI {
|
|
|
165
169
|
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
166
170
|
*/
|
|
167
171
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
168
|
-
const functionTag =
|
|
169
|
-
const provider =
|
|
172
|
+
const functionTag = "MistralAI.generateText";
|
|
173
|
+
const provider = "mistral";
|
|
170
174
|
try {
|
|
171
175
|
// Parse parameters - support both string and options object
|
|
172
|
-
const options = typeof optionsOrPrompt ===
|
|
176
|
+
const options = typeof optionsOrPrompt === "string"
|
|
173
177
|
? { prompt: optionsOrPrompt }
|
|
174
178
|
: optionsOrPrompt;
|
|
175
|
-
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
179
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
176
180
|
// Use schema from options or fallback parameter
|
|
177
181
|
const finalSchema = schema || analysisSchema;
|
|
178
182
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -180,7 +184,7 @@ export class MistralAI {
|
|
|
180
184
|
modelName: this.modelName,
|
|
181
185
|
promptLength: prompt.length,
|
|
182
186
|
temperature,
|
|
183
|
-
maxTokens
|
|
187
|
+
maxTokens,
|
|
184
188
|
});
|
|
185
189
|
const model = this.getModel();
|
|
186
190
|
const generateOptions = {
|
|
@@ -188,10 +192,12 @@ export class MistralAI {
|
|
|
188
192
|
prompt: prompt,
|
|
189
193
|
system: systemPrompt,
|
|
190
194
|
temperature,
|
|
191
|
-
maxTokens
|
|
195
|
+
maxTokens,
|
|
192
196
|
};
|
|
193
197
|
if (finalSchema) {
|
|
194
|
-
generateOptions.experimental_output = Output.object({
|
|
198
|
+
generateOptions.experimental_output = Output.object({
|
|
199
|
+
schema: finalSchema,
|
|
200
|
+
});
|
|
195
201
|
}
|
|
196
202
|
const result = await generateText(generateOptions);
|
|
197
203
|
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
@@ -199,7 +205,7 @@ export class MistralAI {
|
|
|
199
205
|
modelName: this.modelName,
|
|
200
206
|
usage: result.usage,
|
|
201
207
|
finishReason: result.finishReason,
|
|
202
|
-
responseLength: result.text?.length || 0
|
|
208
|
+
responseLength: result.text?.length || 0,
|
|
203
209
|
});
|
|
204
210
|
return result;
|
|
205
211
|
}
|
|
@@ -207,8 +213,8 @@ export class MistralAI {
|
|
|
207
213
|
logger.error(`[${functionTag}] Exception`, {
|
|
208
214
|
provider,
|
|
209
215
|
modelName: this.modelName,
|
|
210
|
-
message:
|
|
211
|
-
err: String(err)
|
|
216
|
+
message: "Error in generating text",
|
|
217
|
+
err: String(err),
|
|
212
218
|
});
|
|
213
219
|
throw err; // Re-throw error to trigger fallback
|
|
214
220
|
}
|
|
@@ -10,10 +10,10 @@
|
|
|
10
10
|
* - Health checking and service validation
|
|
11
11
|
* - Streaming and non-streaming text generation
|
|
12
12
|
*/
|
|
13
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from
|
|
14
|
-
import type { GenerateTextResult, StreamTextResult, ToolSet } from
|
|
15
|
-
import type { ZodType, ZodTypeDef } from
|
|
16
|
-
import type { Schema } from
|
|
13
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
14
|
+
import type { GenerateTextResult, StreamTextResult, ToolSet } from "ai";
|
|
15
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
16
|
+
import type { Schema } from "ai";
|
|
17
17
|
export declare class Ollama implements AIProvider {
|
|
18
18
|
private baseUrl;
|
|
19
19
|
private modelName;
|