@juspay/neurolink 1.6.0 → 1.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +193 -7
- package/README.md +100 -17
- package/dist/agent/direct-tools.d.ts +1203 -0
- package/dist/agent/direct-tools.js +387 -0
- package/dist/cli/commands/agent-generate.d.ts +2 -0
- package/dist/cli/commands/agent-generate.js +70 -0
- package/dist/cli/commands/config.d.ts +6 -6
- package/dist/cli/commands/config.js +326 -273
- package/dist/cli/commands/mcp.d.ts +2 -1
- package/dist/cli/commands/mcp.js +874 -146
- package/dist/cli/commands/ollama.d.ts +1 -1
- package/dist/cli/commands/ollama.js +153 -143
- package/dist/cli/index.js +589 -323
- package/dist/cli/utils/complete-setup.d.ts +19 -0
- package/dist/cli/utils/complete-setup.js +81 -0
- package/dist/cli/utils/env-manager.d.ts +44 -0
- package/dist/cli/utils/env-manager.js +226 -0
- package/dist/cli/utils/interactive-setup.d.ts +48 -0
- package/dist/cli/utils/interactive-setup.js +302 -0
- package/dist/core/dynamic-models.d.ts +208 -0
- package/dist/core/dynamic-models.js +250 -0
- package/dist/core/factory.d.ts +13 -6
- package/dist/core/factory.js +176 -61
- package/dist/core/types.d.ts +4 -2
- package/dist/core/types.js +4 -4
- package/dist/index.d.ts +16 -16
- package/dist/index.js +16 -16
- package/dist/lib/agent/direct-tools.d.ts +1203 -0
- package/dist/lib/agent/direct-tools.js +387 -0
- package/dist/lib/core/dynamic-models.d.ts +208 -0
- package/dist/lib/core/dynamic-models.js +250 -0
- package/dist/lib/core/factory.d.ts +13 -6
- package/dist/lib/core/factory.js +176 -61
- package/dist/lib/core/types.d.ts +4 -2
- package/dist/lib/core/types.js +4 -4
- package/dist/lib/index.d.ts +16 -16
- package/dist/lib/index.js +16 -16
- package/dist/lib/mcp/auto-discovery.d.ts +120 -0
- package/dist/lib/mcp/auto-discovery.js +793 -0
- package/dist/lib/mcp/client.d.ts +66 -0
- package/dist/lib/mcp/client.js +245 -0
- package/dist/lib/mcp/config.d.ts +31 -0
- package/dist/lib/mcp/config.js +74 -0
- package/dist/lib/mcp/context-manager.d.ts +4 -4
- package/dist/lib/mcp/context-manager.js +24 -18
- package/dist/lib/mcp/factory.d.ts +28 -11
- package/dist/lib/mcp/factory.js +36 -29
- package/dist/lib/mcp/function-calling.d.ts +51 -0
- package/dist/lib/mcp/function-calling.js +510 -0
- package/dist/lib/mcp/index.d.ts +190 -0
- package/dist/lib/mcp/index.js +156 -0
- package/dist/lib/mcp/initialize-tools.d.ts +28 -0
- package/dist/lib/mcp/initialize-tools.js +209 -0
- package/dist/lib/mcp/initialize.d.ts +17 -0
- package/dist/lib/mcp/initialize.js +51 -0
- package/dist/lib/mcp/logging.d.ts +71 -0
- package/dist/lib/mcp/logging.js +183 -0
- package/dist/lib/mcp/manager.d.ts +67 -0
- package/dist/lib/mcp/manager.js +176 -0
- package/dist/lib/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +417 -0
- package/dist/lib/mcp/orchestrator.d.ts +3 -3
- package/dist/lib/mcp/orchestrator.js +46 -43
- package/dist/lib/mcp/registry.d.ts +2 -2
- package/dist/lib/mcp/registry.js +42 -33
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +142 -102
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
- package/dist/lib/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/lib/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/lib/mcp/tool-integration.d.ts +67 -0
- package/dist/lib/mcp/tool-integration.js +179 -0
- package/dist/lib/mcp/unified-registry.d.ts +269 -0
- package/dist/lib/mcp/unified-registry.js +1411 -0
- package/dist/lib/neurolink.d.ts +68 -6
- package/dist/lib/neurolink.js +304 -42
- package/dist/lib/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/lib/providers/agent-enhanced-provider.js +242 -0
- package/dist/lib/providers/amazonBedrock.d.ts +3 -3
- package/dist/lib/providers/amazonBedrock.js +54 -50
- package/dist/lib/providers/anthropic.d.ts +2 -2
- package/dist/lib/providers/anthropic.js +92 -84
- package/dist/lib/providers/azureOpenAI.d.ts +2 -2
- package/dist/lib/providers/azureOpenAI.js +97 -86
- package/dist/lib/providers/function-calling-provider.d.ts +70 -0
- package/dist/lib/providers/function-calling-provider.js +359 -0
- package/dist/lib/providers/googleAIStudio.d.ts +10 -5
- package/dist/lib/providers/googleAIStudio.js +60 -38
- package/dist/lib/providers/googleVertexAI.d.ts +3 -3
- package/dist/lib/providers/googleVertexAI.js +96 -86
- package/dist/lib/providers/huggingFace.d.ts +3 -3
- package/dist/lib/providers/huggingFace.js +70 -63
- package/dist/lib/providers/index.d.ts +11 -11
- package/dist/lib/providers/index.js +18 -18
- package/dist/lib/providers/mcp-provider.d.ts +62 -0
- package/dist/lib/providers/mcp-provider.js +183 -0
- package/dist/lib/providers/mistralAI.d.ts +3 -3
- package/dist/lib/providers/mistralAI.js +42 -36
- package/dist/lib/providers/ollama.d.ts +4 -4
- package/dist/lib/providers/ollama.js +113 -98
- package/dist/lib/providers/openAI.d.ts +7 -3
- package/dist/lib/providers/openAI.js +45 -33
- package/dist/lib/utils/logger.js +2 -2
- package/dist/lib/utils/providerUtils.js +53 -31
- package/dist/mcp/auto-discovery.d.ts +120 -0
- package/dist/mcp/auto-discovery.js +794 -0
- package/dist/mcp/client.d.ts +66 -0
- package/dist/mcp/client.js +245 -0
- package/dist/mcp/config.d.ts +31 -0
- package/dist/mcp/config.js +74 -0
- package/dist/mcp/context-manager.d.ts +4 -4
- package/dist/mcp/context-manager.js +24 -18
- package/dist/mcp/factory.d.ts +28 -11
- package/dist/mcp/factory.js +36 -29
- package/dist/mcp/function-calling.d.ts +51 -0
- package/dist/mcp/function-calling.js +510 -0
- package/dist/mcp/index.d.ts +190 -0
- package/dist/mcp/index.js +156 -0
- package/dist/mcp/initialize-tools.d.ts +28 -0
- package/dist/mcp/initialize-tools.js +210 -0
- package/dist/mcp/initialize.d.ts +17 -0
- package/dist/mcp/initialize.js +51 -0
- package/dist/mcp/logging.d.ts +71 -0
- package/dist/mcp/logging.js +183 -0
- package/dist/mcp/manager.d.ts +67 -0
- package/dist/mcp/manager.js +176 -0
- package/dist/mcp/neurolink-mcp-client.d.ts +96 -0
- package/dist/mcp/neurolink-mcp-client.js +417 -0
- package/dist/mcp/orchestrator.d.ts +3 -3
- package/dist/mcp/orchestrator.js +46 -43
- package/dist/mcp/registry.d.ts +2 -2
- package/dist/mcp/registry.js +42 -33
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +1 -1
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +204 -65
- package/dist/mcp/servers/ai-providers/ai-core-server.js +142 -102
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +6 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +197 -142
- package/dist/mcp/servers/utilities/utility-server.d.ts +8 -0
- package/dist/mcp/servers/utilities/utility-server.js +326 -0
- package/dist/mcp/tool-integration.d.ts +67 -0
- package/dist/mcp/tool-integration.js +179 -0
- package/dist/mcp/unified-registry.d.ts +269 -0
- package/dist/mcp/unified-registry.js +1411 -0
- package/dist/neurolink.d.ts +68 -6
- package/dist/neurolink.js +304 -42
- package/dist/providers/agent-enhanced-provider.d.ts +59 -0
- package/dist/providers/agent-enhanced-provider.js +242 -0
- package/dist/providers/amazonBedrock.d.ts +3 -3
- package/dist/providers/amazonBedrock.js +54 -50
- package/dist/providers/anthropic.d.ts +2 -2
- package/dist/providers/anthropic.js +92 -84
- package/dist/providers/azureOpenAI.d.ts +2 -2
- package/dist/providers/azureOpenAI.js +97 -86
- package/dist/providers/function-calling-provider.d.ts +70 -0
- package/dist/providers/function-calling-provider.js +359 -0
- package/dist/providers/googleAIStudio.d.ts +10 -5
- package/dist/providers/googleAIStudio.js +60 -38
- package/dist/providers/googleVertexAI.d.ts +3 -3
- package/dist/providers/googleVertexAI.js +96 -86
- package/dist/providers/huggingFace.d.ts +3 -3
- package/dist/providers/huggingFace.js +70 -63
- package/dist/providers/index.d.ts +11 -11
- package/dist/providers/index.js +18 -18
- package/dist/providers/mcp-provider.d.ts +62 -0
- package/dist/providers/mcp-provider.js +183 -0
- package/dist/providers/mistralAI.d.ts +3 -3
- package/dist/providers/mistralAI.js +42 -36
- package/dist/providers/ollama.d.ts +4 -4
- package/dist/providers/ollama.js +113 -98
- package/dist/providers/openAI.d.ts +7 -3
- package/dist/providers/openAI.js +45 -33
- package/dist/utils/logger.js +2 -2
- package/dist/utils/providerUtils.js +53 -31
- package/package.json +175 -161
package/dist/providers/ollama.js
CHANGED
|
@@ -10,20 +10,20 @@
|
|
|
10
10
|
* - Health checking and service validation
|
|
11
11
|
* - Streaming and non-streaming text generation
|
|
12
12
|
*/
|
|
13
|
-
import { streamText, generateText, Output } from
|
|
14
|
-
import { logger } from
|
|
13
|
+
import { streamText, generateText, Output } from "ai";
|
|
14
|
+
import { logger } from "../utils/logger.js";
|
|
15
15
|
// Default system context
|
|
16
16
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
17
|
-
systemPrompt:
|
|
17
|
+
systemPrompt: "You are a helpful AI assistant.",
|
|
18
18
|
};
|
|
19
19
|
// Custom LanguageModelV1 implementation for Ollama
|
|
20
20
|
class OllamaLanguageModel {
|
|
21
|
-
specificationVersion =
|
|
22
|
-
provider =
|
|
21
|
+
specificationVersion = "v1";
|
|
22
|
+
provider = "ollama";
|
|
23
23
|
modelId;
|
|
24
24
|
maxTokens;
|
|
25
25
|
supportsStreaming = true;
|
|
26
|
-
defaultObjectGenerationMode =
|
|
26
|
+
defaultObjectGenerationMode = "json";
|
|
27
27
|
baseUrl;
|
|
28
28
|
timeout;
|
|
29
29
|
constructor(modelId, baseUrl, timeout) {
|
|
@@ -36,29 +36,29 @@ class OllamaLanguageModel {
|
|
|
36
36
|
}
|
|
37
37
|
convertMessagesToPrompt(messages) {
|
|
38
38
|
return messages
|
|
39
|
-
.map(msg => {
|
|
40
|
-
if (typeof msg.content ===
|
|
39
|
+
.map((msg) => {
|
|
40
|
+
if (typeof msg.content === "string") {
|
|
41
41
|
return `${msg.role}: ${msg.content}`;
|
|
42
42
|
}
|
|
43
43
|
else if (Array.isArray(msg.content)) {
|
|
44
44
|
// Handle multi-part content (text, images, etc.)
|
|
45
45
|
return `${msg.role}: ${msg.content
|
|
46
|
-
.filter((part) => part.type ===
|
|
46
|
+
.filter((part) => part.type === "text")
|
|
47
47
|
.map((part) => part.text)
|
|
48
|
-
.join(
|
|
48
|
+
.join(" ")}`;
|
|
49
49
|
}
|
|
50
|
-
return
|
|
50
|
+
return "";
|
|
51
51
|
})
|
|
52
|
-
.join(
|
|
52
|
+
.join("\n");
|
|
53
53
|
}
|
|
54
54
|
async checkHealth() {
|
|
55
55
|
try {
|
|
56
56
|
const controller = new AbortController();
|
|
57
57
|
const timeoutId = setTimeout(() => controller.abort(), 5000);
|
|
58
58
|
const response = await fetch(`${this.baseUrl}/api/tags`, {
|
|
59
|
-
method:
|
|
59
|
+
method: "GET",
|
|
60
60
|
signal: controller.signal,
|
|
61
|
-
headers: {
|
|
61
|
+
headers: { "Content-Type": "application/json" },
|
|
62
62
|
});
|
|
63
63
|
clearTimeout(timeoutId);
|
|
64
64
|
return response.ok;
|
|
@@ -70,16 +70,17 @@ class OllamaLanguageModel {
|
|
|
70
70
|
async ensureModelAvailable() {
|
|
71
71
|
try {
|
|
72
72
|
const response = await fetch(`${this.baseUrl}/api/tags`);
|
|
73
|
-
if (!response.ok)
|
|
74
|
-
throw new Error(
|
|
75
|
-
|
|
76
|
-
const
|
|
73
|
+
if (!response.ok) {
|
|
74
|
+
throw new Error("Cannot access Ollama");
|
|
75
|
+
}
|
|
76
|
+
const data = (await response.json());
|
|
77
|
+
const models = data.models?.map((m) => m.name) || [];
|
|
77
78
|
if (!models.includes(this.modelId)) {
|
|
78
79
|
// Try to pull the model
|
|
79
80
|
const pullResponse = await fetch(`${this.baseUrl}/api/pull`, {
|
|
80
|
-
method:
|
|
81
|
-
headers: {
|
|
82
|
-
body: JSON.stringify({ name: this.modelId })
|
|
81
|
+
method: "POST",
|
|
82
|
+
headers: { "Content-Type": "application/json" },
|
|
83
|
+
body: JSON.stringify({ name: this.modelId }),
|
|
83
84
|
});
|
|
84
85
|
if (!pullResponse.ok) {
|
|
85
86
|
throw new Error(`Model '${this.modelId}' not available and cannot be pulled`);
|
|
@@ -94,7 +95,7 @@ class OllamaLanguageModel {
|
|
|
94
95
|
// Health check and model availability
|
|
95
96
|
const isHealthy = await this.checkHealth();
|
|
96
97
|
if (!isHealthy) {
|
|
97
|
-
throw new Error(
|
|
98
|
+
throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
|
|
98
99
|
}
|
|
99
100
|
await this.ensureModelAvailable();
|
|
100
101
|
const prompt = this.convertMessagesToPrompt(options.prompt);
|
|
@@ -105,24 +106,24 @@ class OllamaLanguageModel {
|
|
|
105
106
|
options: {
|
|
106
107
|
temperature: options.temperature || 0.7,
|
|
107
108
|
num_predict: options.maxTokens || 500,
|
|
108
|
-
}
|
|
109
|
+
},
|
|
109
110
|
};
|
|
110
111
|
const controller = new AbortController();
|
|
111
112
|
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
112
113
|
try {
|
|
113
114
|
const response = await fetch(`${this.baseUrl}/api/generate`, {
|
|
114
|
-
method:
|
|
115
|
-
headers: {
|
|
115
|
+
method: "POST",
|
|
116
|
+
headers: { "Content-Type": "application/json" },
|
|
116
117
|
body: JSON.stringify(requestPayload),
|
|
117
|
-
signal: controller.signal
|
|
118
|
+
signal: controller.signal,
|
|
118
119
|
});
|
|
119
120
|
clearTimeout(timeoutId);
|
|
120
121
|
if (!response.ok) {
|
|
121
122
|
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
|
|
122
123
|
}
|
|
123
|
-
const data = await response.json();
|
|
124
|
+
const data = (await response.json());
|
|
124
125
|
if (!data.response) {
|
|
125
|
-
throw new Error(
|
|
126
|
+
throw new Error("No response received from Ollama");
|
|
126
127
|
}
|
|
127
128
|
const promptTokens = this.estimateTokens(prompt);
|
|
128
129
|
const completionTokens = this.estimateTokens(data.response);
|
|
@@ -131,22 +132,25 @@ class OllamaLanguageModel {
|
|
|
131
132
|
usage: {
|
|
132
133
|
promptTokens,
|
|
133
134
|
completionTokens,
|
|
134
|
-
totalTokens: promptTokens + completionTokens
|
|
135
|
+
totalTokens: promptTokens + completionTokens,
|
|
135
136
|
},
|
|
136
|
-
finishReason:
|
|
137
|
+
finishReason: "stop",
|
|
137
138
|
logprobs: undefined,
|
|
138
139
|
rawCall: { rawPrompt: prompt, rawSettings: options },
|
|
139
|
-
rawResponse: { headers: {} }
|
|
140
|
+
rawResponse: { headers: {} },
|
|
140
141
|
};
|
|
141
142
|
}
|
|
142
143
|
catch (error) {
|
|
143
144
|
clearTimeout(timeoutId);
|
|
144
145
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
145
|
-
if (errorMessage.includes(
|
|
146
|
+
if (errorMessage.includes("AbortError") ||
|
|
147
|
+
errorMessage.includes("timeout")) {
|
|
146
148
|
throw new Error(`Ollama request timeout (${this.timeout}ms). The model may be large or the system is under load.`);
|
|
147
149
|
}
|
|
148
|
-
if (errorMessage.includes(
|
|
149
|
-
|
|
150
|
+
if (errorMessage.includes("ECONNREFUSED") ||
|
|
151
|
+
errorMessage.includes("fetch failed")) {
|
|
152
|
+
throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
|
|
153
|
+
this.baseUrl);
|
|
150
154
|
}
|
|
151
155
|
throw error;
|
|
152
156
|
}
|
|
@@ -155,7 +159,7 @@ class OllamaLanguageModel {
|
|
|
155
159
|
// Health check and model availability
|
|
156
160
|
const isHealthy = await this.checkHealth();
|
|
157
161
|
if (!isHealthy) {
|
|
158
|
-
throw new Error(
|
|
162
|
+
throw new Error("Ollama service is not running or accessible. Please ensure Ollama is installed and running.");
|
|
159
163
|
}
|
|
160
164
|
await this.ensureModelAvailable();
|
|
161
165
|
const prompt = this.convertMessagesToPrompt(options.prompt);
|
|
@@ -166,23 +170,23 @@ class OllamaLanguageModel {
|
|
|
166
170
|
options: {
|
|
167
171
|
temperature: options.temperature || 0.7,
|
|
168
172
|
num_predict: options.maxTokens || 500,
|
|
169
|
-
}
|
|
173
|
+
},
|
|
170
174
|
};
|
|
171
175
|
const controller = new AbortController();
|
|
172
176
|
const timeoutId = setTimeout(() => controller.abort(), this.timeout);
|
|
173
177
|
try {
|
|
174
178
|
const response = await fetch(`${this.baseUrl}/api/generate`, {
|
|
175
|
-
method:
|
|
176
|
-
headers: {
|
|
179
|
+
method: "POST",
|
|
180
|
+
headers: { "Content-Type": "application/json" },
|
|
177
181
|
body: JSON.stringify(requestPayload),
|
|
178
|
-
signal: controller.signal
|
|
182
|
+
signal: controller.signal,
|
|
179
183
|
});
|
|
180
184
|
clearTimeout(timeoutId);
|
|
181
185
|
if (!response.ok) {
|
|
182
186
|
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
|
|
183
187
|
}
|
|
184
188
|
if (!response.body) {
|
|
185
|
-
throw new Error(
|
|
189
|
+
throw new Error("No response body received from Ollama streaming API");
|
|
186
190
|
}
|
|
187
191
|
// Create a ReadableStream that parses Ollama's streaming format
|
|
188
192
|
const stream = new ReadableStream({
|
|
@@ -193,29 +197,31 @@ class OllamaLanguageModel {
|
|
|
193
197
|
try {
|
|
194
198
|
while (true) {
|
|
195
199
|
const { done, value } = await reader.read();
|
|
196
|
-
if (done)
|
|
200
|
+
if (done) {
|
|
197
201
|
break;
|
|
202
|
+
}
|
|
198
203
|
const chunk = decoder.decode(value, { stream: true });
|
|
199
|
-
const lines = chunk.split(
|
|
204
|
+
const lines = chunk.split("\n").filter((line) => line.trim());
|
|
200
205
|
for (const line of lines) {
|
|
201
206
|
try {
|
|
202
207
|
const data = JSON.parse(line);
|
|
203
208
|
if (data.response) {
|
|
204
209
|
controller.enqueue({
|
|
205
|
-
type:
|
|
206
|
-
textDelta: data.response
|
|
210
|
+
type: "text-delta",
|
|
211
|
+
textDelta: data.response,
|
|
207
212
|
});
|
|
208
213
|
totalTokens += Math.ceil(data.response.length / 4);
|
|
209
214
|
}
|
|
210
215
|
if (data.done) {
|
|
211
216
|
controller.enqueue({
|
|
212
|
-
type:
|
|
213
|
-
finishReason:
|
|
217
|
+
type: "finish",
|
|
218
|
+
finishReason: "stop",
|
|
214
219
|
usage: {
|
|
215
|
-
promptTokens: data.prompt_eval_count ||
|
|
216
|
-
|
|
220
|
+
promptTokens: data.prompt_eval_count ||
|
|
221
|
+
Math.ceil(prompt.length / 4),
|
|
222
|
+
completionTokens: data.eval_count || totalTokens,
|
|
217
223
|
},
|
|
218
|
-
logprobs: undefined
|
|
224
|
+
logprobs: undefined,
|
|
219
225
|
});
|
|
220
226
|
controller.close();
|
|
221
227
|
return;
|
|
@@ -230,22 +236,25 @@ class OllamaLanguageModel {
|
|
|
230
236
|
finally {
|
|
231
237
|
reader.releaseLock();
|
|
232
238
|
}
|
|
233
|
-
}
|
|
239
|
+
},
|
|
234
240
|
});
|
|
235
241
|
return {
|
|
236
242
|
stream,
|
|
237
243
|
rawCall: { rawPrompt: prompt, rawSettings: options },
|
|
238
|
-
rawResponse: { headers: {} }
|
|
244
|
+
rawResponse: { headers: {} },
|
|
239
245
|
};
|
|
240
246
|
}
|
|
241
247
|
catch (error) {
|
|
242
248
|
clearTimeout(timeoutId);
|
|
243
249
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
244
|
-
if (errorMessage.includes(
|
|
250
|
+
if (errorMessage.includes("AbortError") ||
|
|
251
|
+
errorMessage.includes("timeout")) {
|
|
245
252
|
throw new Error(`Ollama streaming timeout (${this.timeout}ms). The model may be large or the system is under load.`);
|
|
246
253
|
}
|
|
247
|
-
if (errorMessage.includes(
|
|
248
|
-
|
|
254
|
+
if (errorMessage.includes("ECONNREFUSED") ||
|
|
255
|
+
errorMessage.includes("fetch failed")) {
|
|
256
|
+
throw new Error("Cannot connect to Ollama service. Please ensure Ollama is installed and running on " +
|
|
257
|
+
this.baseUrl);
|
|
249
258
|
}
|
|
250
259
|
throw error;
|
|
251
260
|
}
|
|
@@ -256,13 +265,13 @@ export class Ollama {
|
|
|
256
265
|
modelName;
|
|
257
266
|
timeout;
|
|
258
267
|
constructor(modelName) {
|
|
259
|
-
this.baseUrl = process.env.OLLAMA_BASE_URL ||
|
|
260
|
-
this.modelName = modelName || process.env.OLLAMA_MODEL ||
|
|
261
|
-
this.timeout = parseInt(process.env.OLLAMA_TIMEOUT ||
|
|
262
|
-
logger.debug(
|
|
268
|
+
this.baseUrl = process.env.OLLAMA_BASE_URL || "http://localhost:11434";
|
|
269
|
+
this.modelName = modelName || process.env.OLLAMA_MODEL || "llama2";
|
|
270
|
+
this.timeout = parseInt(process.env.OLLAMA_TIMEOUT || "60000"); // 60 seconds default
|
|
271
|
+
logger.debug("[Ollama] Initialized", {
|
|
263
272
|
baseUrl: this.baseUrl,
|
|
264
273
|
modelName: this.modelName,
|
|
265
|
-
timeout: this.timeout
|
|
274
|
+
timeout: this.timeout,
|
|
266
275
|
});
|
|
267
276
|
}
|
|
268
277
|
/**
|
|
@@ -270,8 +279,8 @@ export class Ollama {
|
|
|
270
279
|
* @private
|
|
271
280
|
*/
|
|
272
281
|
getModel() {
|
|
273
|
-
logger.debug(
|
|
274
|
-
modelName: this.modelName
|
|
282
|
+
logger.debug("Ollama.getModel - Ollama model selected", {
|
|
283
|
+
modelName: this.modelName,
|
|
275
284
|
});
|
|
276
285
|
return new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
|
|
277
286
|
}
|
|
@@ -280,35 +289,35 @@ export class Ollama {
|
|
|
280
289
|
*/
|
|
281
290
|
async checkHealth() {
|
|
282
291
|
const model = new OllamaLanguageModel(this.modelName, this.baseUrl, this.timeout);
|
|
283
|
-
return await model[
|
|
292
|
+
return await model["checkHealth"]();
|
|
284
293
|
}
|
|
285
294
|
/**
|
|
286
295
|
* List available models on the Ollama instance
|
|
287
296
|
*/
|
|
288
297
|
async listModels() {
|
|
289
|
-
const functionTag =
|
|
298
|
+
const functionTag = "Ollama.listModels";
|
|
290
299
|
try {
|
|
291
300
|
logger.debug(`[${functionTag}] Listing available models`);
|
|
292
301
|
const response = await fetch(`${this.baseUrl}/api/tags`, {
|
|
293
|
-
method:
|
|
302
|
+
method: "GET",
|
|
294
303
|
headers: {
|
|
295
|
-
|
|
296
|
-
}
|
|
304
|
+
"Content-Type": "application/json",
|
|
305
|
+
},
|
|
297
306
|
});
|
|
298
307
|
if (!response.ok) {
|
|
299
308
|
throw new Error(`Failed to list models: ${response.status} ${response.statusText}`);
|
|
300
309
|
}
|
|
301
|
-
const data = await response.json();
|
|
302
|
-
const modelNames = data.models?.map(model => model.name) || [];
|
|
310
|
+
const data = (await response.json());
|
|
311
|
+
const modelNames = data.models?.map((model) => model.name) || [];
|
|
303
312
|
logger.debug(`[${functionTag}] Found models`, {
|
|
304
313
|
count: modelNames.length,
|
|
305
|
-
models: modelNames
|
|
314
|
+
models: modelNames,
|
|
306
315
|
});
|
|
307
316
|
return modelNames;
|
|
308
317
|
}
|
|
309
318
|
catch (error) {
|
|
310
319
|
logger.debug(`[${functionTag}] Error listing models`, {
|
|
311
|
-
error: error instanceof Error ? error.message : String(error)
|
|
320
|
+
error: error instanceof Error ? error.message : String(error),
|
|
312
321
|
});
|
|
313
322
|
throw new Error(`Failed to list Ollama models: ${error instanceof Error ? error.message : String(error)}`);
|
|
314
323
|
}
|
|
@@ -329,17 +338,17 @@ export class Ollama {
|
|
|
329
338
|
* Pull/download a model to the local Ollama instance
|
|
330
339
|
*/
|
|
331
340
|
async pullModel(modelName) {
|
|
332
|
-
const functionTag =
|
|
341
|
+
const functionTag = "Ollama.pullModel";
|
|
333
342
|
try {
|
|
334
343
|
logger.debug(`[${functionTag}] Pulling model`, { modelName });
|
|
335
344
|
const response = await fetch(`${this.baseUrl}/api/pull`, {
|
|
336
|
-
method:
|
|
345
|
+
method: "POST",
|
|
337
346
|
headers: {
|
|
338
|
-
|
|
347
|
+
"Content-Type": "application/json",
|
|
339
348
|
},
|
|
340
349
|
body: JSON.stringify({
|
|
341
|
-
name: modelName
|
|
342
|
-
})
|
|
350
|
+
name: modelName,
|
|
351
|
+
}),
|
|
343
352
|
});
|
|
344
353
|
if (!response.ok) {
|
|
345
354
|
throw new Error(`Failed to pull model: ${response.status} ${response.statusText}`);
|
|
@@ -351,7 +360,7 @@ export class Ollama {
|
|
|
351
360
|
catch (error) {
|
|
352
361
|
logger.debug(`[${functionTag}] Error pulling model`, {
|
|
353
362
|
modelName,
|
|
354
|
-
error: error instanceof Error ? error.message : String(error)
|
|
363
|
+
error: error instanceof Error ? error.message : String(error),
|
|
355
364
|
});
|
|
356
365
|
throw new Error(`Failed to pull model '${modelName}': ${error instanceof Error ? error.message : String(error)}`);
|
|
357
366
|
}
|
|
@@ -360,14 +369,14 @@ export class Ollama {
|
|
|
360
369
|
* Generate text using Ollama local models
|
|
361
370
|
*/
|
|
362
371
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
363
|
-
const functionTag =
|
|
364
|
-
const provider =
|
|
372
|
+
const functionTag = "Ollama.generateText";
|
|
373
|
+
const provider = "ollama";
|
|
365
374
|
try {
|
|
366
375
|
// Parse parameters - support both string and options object
|
|
367
|
-
const options = typeof optionsOrPrompt ===
|
|
376
|
+
const options = typeof optionsOrPrompt === "string"
|
|
368
377
|
? { prompt: optionsOrPrompt }
|
|
369
378
|
: optionsOrPrompt;
|
|
370
|
-
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
379
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
371
380
|
// Use schema from options or fallback parameter
|
|
372
381
|
const finalSchema = schema || analysisSchema;
|
|
373
382
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -375,7 +384,7 @@ export class Ollama {
|
|
|
375
384
|
modelName: this.modelName,
|
|
376
385
|
promptLength: prompt.length,
|
|
377
386
|
temperature,
|
|
378
|
-
maxTokens
|
|
387
|
+
maxTokens,
|
|
379
388
|
});
|
|
380
389
|
const model = this.getModel();
|
|
381
390
|
const generateOptions = {
|
|
@@ -383,10 +392,12 @@ export class Ollama {
|
|
|
383
392
|
prompt: prompt,
|
|
384
393
|
system: systemPrompt,
|
|
385
394
|
temperature,
|
|
386
|
-
maxTokens
|
|
395
|
+
maxTokens,
|
|
387
396
|
};
|
|
388
397
|
if (finalSchema) {
|
|
389
|
-
generateOptions.experimental_output = Output.object({
|
|
398
|
+
generateOptions.experimental_output = Output.object({
|
|
399
|
+
schema: finalSchema,
|
|
400
|
+
});
|
|
390
401
|
}
|
|
391
402
|
const result = await generateText(generateOptions);
|
|
392
403
|
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
@@ -394,7 +405,7 @@ export class Ollama {
|
|
|
394
405
|
modelName: this.modelName,
|
|
395
406
|
usage: result.usage,
|
|
396
407
|
finishReason: result.finishReason,
|
|
397
|
-
responseLength: result.text?.length || 0
|
|
408
|
+
responseLength: result.text?.length || 0,
|
|
398
409
|
});
|
|
399
410
|
return result;
|
|
400
411
|
}
|
|
@@ -402,8 +413,8 @@ export class Ollama {
|
|
|
402
413
|
logger.debug(`[${functionTag}] Exception`, {
|
|
403
414
|
provider,
|
|
404
415
|
modelName: this.modelName,
|
|
405
|
-
message:
|
|
406
|
-
err: String(err)
|
|
416
|
+
message: "Error in generating text",
|
|
417
|
+
err: String(err),
|
|
407
418
|
});
|
|
408
419
|
throw err; // Re-throw error to trigger fallback
|
|
409
420
|
}
|
|
@@ -412,15 +423,15 @@ export class Ollama {
|
|
|
412
423
|
* Generate streaming text using Ollama local models
|
|
413
424
|
*/
|
|
414
425
|
async streamText(optionsOrPrompt, analysisSchema) {
|
|
415
|
-
const functionTag =
|
|
416
|
-
const provider =
|
|
426
|
+
const functionTag = "Ollama.streamText";
|
|
427
|
+
const provider = "ollama";
|
|
417
428
|
let chunkCount = 0;
|
|
418
429
|
try {
|
|
419
430
|
// Parse parameters - support both string and options object
|
|
420
|
-
const options = typeof optionsOrPrompt ===
|
|
431
|
+
const options = typeof optionsOrPrompt === "string"
|
|
421
432
|
? { prompt: optionsOrPrompt }
|
|
422
433
|
: optionsOrPrompt;
|
|
423
|
-
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
434
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
424
435
|
// Use schema from options or fallback parameter
|
|
425
436
|
const finalSchema = schema || analysisSchema;
|
|
426
437
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -429,7 +440,7 @@ export class Ollama {
|
|
|
429
440
|
promptLength: prompt.length,
|
|
430
441
|
temperature,
|
|
431
442
|
maxTokens,
|
|
432
|
-
hasSchema: !!finalSchema
|
|
443
|
+
hasSchema: !!finalSchema,
|
|
433
444
|
});
|
|
434
445
|
const model = this.getModel();
|
|
435
446
|
const streamOptions = {
|
|
@@ -448,7 +459,7 @@ export class Ollama {
|
|
|
448
459
|
error: errorMessage,
|
|
449
460
|
stack: errorStack,
|
|
450
461
|
promptLength: prompt.length,
|
|
451
|
-
chunkCount
|
|
462
|
+
chunkCount,
|
|
452
463
|
});
|
|
453
464
|
},
|
|
454
465
|
onFinish: (event) => {
|
|
@@ -459,7 +470,7 @@ export class Ollama {
|
|
|
459
470
|
usage: event.usage,
|
|
460
471
|
totalChunks: chunkCount,
|
|
461
472
|
promptLength: prompt.length,
|
|
462
|
-
responseLength: event.text?.length || 0
|
|
473
|
+
responseLength: event.text?.length || 0,
|
|
463
474
|
});
|
|
464
475
|
},
|
|
465
476
|
onChunk: (event) => {
|
|
@@ -469,12 +480,14 @@ export class Ollama {
|
|
|
469
480
|
modelName: this.modelName,
|
|
470
481
|
chunkNumber: chunkCount,
|
|
471
482
|
chunkLength: event.chunk.text?.length || 0,
|
|
472
|
-
chunkType: event.chunk.type
|
|
483
|
+
chunkType: event.chunk.type,
|
|
473
484
|
});
|
|
474
|
-
}
|
|
485
|
+
},
|
|
475
486
|
};
|
|
476
487
|
if (finalSchema) {
|
|
477
|
-
streamOptions.experimental_output = Output.object({
|
|
488
|
+
streamOptions.experimental_output = Output.object({
|
|
489
|
+
schema: finalSchema,
|
|
490
|
+
});
|
|
478
491
|
}
|
|
479
492
|
const result = streamText(streamOptions);
|
|
480
493
|
return result;
|
|
@@ -483,9 +496,11 @@ export class Ollama {
|
|
|
483
496
|
logger.debug(`[${functionTag}] Exception`, {
|
|
484
497
|
provider,
|
|
485
498
|
modelName: this.modelName,
|
|
486
|
-
message:
|
|
499
|
+
message: "Error in streaming text",
|
|
487
500
|
err: String(err),
|
|
488
|
-
promptLength: typeof optionsOrPrompt ===
|
|
501
|
+
promptLength: typeof optionsOrPrompt === "string"
|
|
502
|
+
? optionsOrPrompt.length
|
|
503
|
+
: optionsOrPrompt.prompt.length,
|
|
489
504
|
});
|
|
490
505
|
throw err; // Re-throw error to trigger fallback
|
|
491
506
|
}
|
|
@@ -1,10 +1,14 @@
|
|
|
1
|
-
import type { ZodType, ZodTypeDef } from
|
|
2
|
-
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from
|
|
3
|
-
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from
|
|
1
|
+
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
+
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult, type LanguageModelV1 } from "ai";
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from "../core/types.js";
|
|
4
4
|
export declare class OpenAI implements AIProvider {
|
|
5
5
|
private modelName;
|
|
6
6
|
private model;
|
|
7
7
|
constructor(modelName?: string | null);
|
|
8
|
+
/**
|
|
9
|
+
* Get the underlying model for function calling
|
|
10
|
+
*/
|
|
11
|
+
getModel(): LanguageModelV1;
|
|
8
12
|
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
9
13
|
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
10
14
|
}
|