@mandujs/core 0.9.0 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -1
- package/src/brain/adapters/ollama.ts +88 -102
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mandujs/core",
|
|
3
|
-
"version": "0.9.
|
|
3
|
+
"version": "0.9.1",
|
|
4
4
|
"description": "Mandu Framework Core - Spec, Generator, Guard, Runtime",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./src/index.ts",
|
|
@@ -45,5 +45,8 @@
|
|
|
45
45
|
"react": ">=18.0.0",
|
|
46
46
|
"react-dom": ">=18.0.0",
|
|
47
47
|
"zod": ">=3.0.0"
|
|
48
|
+
},
|
|
49
|
+
"dependencies": {
|
|
50
|
+
"ollama": "^0.6.3"
|
|
48
51
|
}
|
|
49
52
|
}
|
|
@@ -2,9 +2,11 @@
|
|
|
2
2
|
* Brain v0.1 - Ollama LLM Adapter
|
|
3
3
|
*
|
|
4
4
|
* Default adapter for local sLLM via Ollama.
|
|
5
|
-
*
|
|
5
|
+
* Uses official ollama npm package for reliable API integration.
|
|
6
|
+
* Recommended models: ministral-3:3b, llama3.2, codellama, mistral
|
|
6
7
|
*/
|
|
7
8
|
|
|
9
|
+
import { Ollama } from "ollama";
|
|
8
10
|
import { BaseLLMAdapter } from "./base";
|
|
9
11
|
import type {
|
|
10
12
|
AdapterConfig,
|
|
@@ -27,28 +29,6 @@ export const DEFAULT_OLLAMA_CONFIG: AdapterConfig = {
|
|
|
27
29
|
timeout: 30000, // 30 seconds
|
|
28
30
|
};
|
|
29
31
|
|
|
30
|
-
/**
|
|
31
|
-
* Ollama API response types
|
|
32
|
-
*/
|
|
33
|
-
interface OllamaTagsResponse {
|
|
34
|
-
models: Array<{
|
|
35
|
-
name: string;
|
|
36
|
-
size: number;
|
|
37
|
-
modified_at: string;
|
|
38
|
-
}>;
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
interface OllamaChatResponse {
|
|
42
|
-
model: string;
|
|
43
|
-
message: {
|
|
44
|
-
role: string;
|
|
45
|
-
content: string;
|
|
46
|
-
};
|
|
47
|
-
done: boolean;
|
|
48
|
-
eval_count?: number;
|
|
49
|
-
prompt_eval_count?: number;
|
|
50
|
-
}
|
|
51
|
-
|
|
52
32
|
/**
|
|
53
33
|
* Ollama LLM Adapter
|
|
54
34
|
*
|
|
@@ -57,12 +37,17 @@ interface OllamaChatResponse {
|
|
|
57
37
|
*/
|
|
58
38
|
export class OllamaAdapter extends BaseLLMAdapter {
|
|
59
39
|
readonly name = "ollama";
|
|
40
|
+
private client: Ollama;
|
|
60
41
|
|
|
61
42
|
constructor(config: Partial<AdapterConfig> = {}) {
|
|
62
43
|
super({
|
|
63
44
|
...DEFAULT_OLLAMA_CONFIG,
|
|
64
45
|
...config,
|
|
65
46
|
});
|
|
47
|
+
|
|
48
|
+
this.client = new Ollama({
|
|
49
|
+
host: this.baseUrl,
|
|
50
|
+
});
|
|
66
51
|
}
|
|
67
52
|
|
|
68
53
|
/**
|
|
@@ -70,28 +55,8 @@ export class OllamaAdapter extends BaseLLMAdapter {
|
|
|
70
55
|
*/
|
|
71
56
|
async checkStatus(): Promise<AdapterStatus> {
|
|
72
57
|
try {
|
|
73
|
-
const
|
|
74
|
-
const
|
|
75
|
-
() => controller.abort(),
|
|
76
|
-
this.config.timeout ?? 5000
|
|
77
|
-
);
|
|
78
|
-
|
|
79
|
-
const response = await fetch(`${this.baseUrl}/api/tags`, {
|
|
80
|
-
signal: controller.signal,
|
|
81
|
-
});
|
|
82
|
-
|
|
83
|
-
clearTimeout(timeoutId);
|
|
84
|
-
|
|
85
|
-
if (!response.ok) {
|
|
86
|
-
return {
|
|
87
|
-
available: false,
|
|
88
|
-
model: null,
|
|
89
|
-
error: `Ollama API error: ${response.status}`,
|
|
90
|
-
};
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
const data = (await response.json()) as OllamaTagsResponse;
|
|
94
|
-
const models = data.models || [];
|
|
58
|
+
const response = await this.client.list();
|
|
59
|
+
const models = response.models || [];
|
|
95
60
|
|
|
96
61
|
// Check if configured model is available
|
|
97
62
|
const modelAvailable = models.some(
|
|
@@ -122,21 +87,14 @@ export class OllamaAdapter extends BaseLLMAdapter {
|
|
|
122
87
|
model: this.config.model,
|
|
123
88
|
};
|
|
124
89
|
} catch (error) {
|
|
125
|
-
if (error instanceof Error && error.name === "AbortError") {
|
|
126
|
-
return {
|
|
127
|
-
available: false,
|
|
128
|
-
model: null,
|
|
129
|
-
error: "Ollama connection timeout",
|
|
130
|
-
};
|
|
131
|
-
}
|
|
132
|
-
|
|
133
90
|
const errorMessage =
|
|
134
91
|
error instanceof Error ? error.message : "Unknown error";
|
|
135
92
|
|
|
136
93
|
// Check for common connection errors
|
|
137
94
|
if (
|
|
138
95
|
errorMessage.includes("ECONNREFUSED") ||
|
|
139
|
-
errorMessage.includes("fetch failed")
|
|
96
|
+
errorMessage.includes("fetch failed") ||
|
|
97
|
+
errorMessage.includes("Unable to connect")
|
|
140
98
|
) {
|
|
141
99
|
return {
|
|
142
100
|
available: false,
|
|
@@ -163,47 +121,26 @@ export class OllamaAdapter extends BaseLLMAdapter {
|
|
|
163
121
|
const { temperature = 0.7, maxTokens = 2048 } = options;
|
|
164
122
|
|
|
165
123
|
try {
|
|
166
|
-
const
|
|
167
|
-
|
|
168
|
-
() =>
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
124
|
+
const response = await this.client.chat({
|
|
125
|
+
model: this.config.model,
|
|
126
|
+
messages: messages.map((m) => ({
|
|
127
|
+
role: m.role,
|
|
128
|
+
content: m.content,
|
|
129
|
+
})),
|
|
130
|
+
stream: false,
|
|
131
|
+
options: {
|
|
132
|
+
temperature,
|
|
133
|
+
num_predict: maxTokens,
|
|
176
134
|
},
|
|
177
|
-
body: JSON.stringify({
|
|
178
|
-
model: this.config.model,
|
|
179
|
-
messages: messages.map((m) => ({
|
|
180
|
-
role: m.role,
|
|
181
|
-
content: m.content,
|
|
182
|
-
})),
|
|
183
|
-
stream: false,
|
|
184
|
-
options: {
|
|
185
|
-
temperature,
|
|
186
|
-
num_predict: maxTokens,
|
|
187
|
-
},
|
|
188
|
-
}),
|
|
189
|
-
signal: controller.signal,
|
|
190
135
|
});
|
|
191
136
|
|
|
192
|
-
clearTimeout(timeoutId);
|
|
193
|
-
|
|
194
|
-
if (!response.ok) {
|
|
195
|
-
const errorText = await response.text();
|
|
196
|
-
throw new Error(`Ollama API error: ${response.status} - ${errorText}`);
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
const data = (await response.json()) as OllamaChatResponse;
|
|
200
|
-
|
|
201
137
|
return {
|
|
202
|
-
content:
|
|
138
|
+
content: response.message?.content || "",
|
|
203
139
|
usage: {
|
|
204
|
-
promptTokens:
|
|
205
|
-
completionTokens:
|
|
206
|
-
totalTokens:
|
|
140
|
+
promptTokens: response.prompt_eval_count || 0,
|
|
141
|
+
completionTokens: response.eval_count || 0,
|
|
142
|
+
totalTokens:
|
|
143
|
+
(response.prompt_eval_count || 0) + (response.eval_count || 0),
|
|
207
144
|
},
|
|
208
145
|
};
|
|
209
146
|
} catch (error) {
|
|
@@ -215,28 +152,77 @@ export class OllamaAdapter extends BaseLLMAdapter {
|
|
|
215
152
|
}
|
|
216
153
|
|
|
217
154
|
/**
|
|
218
|
-
* Pull a model from Ollama registry
|
|
155
|
+
* Pull a model from Ollama registry with progress callback
|
|
219
156
|
*/
|
|
220
|
-
async pullModel(
|
|
157
|
+
async pullModel(
|
|
158
|
+
modelName?: string,
|
|
159
|
+
onProgress?: (status: string, completed?: number, total?: number) => void
|
|
160
|
+
): Promise<{ success: boolean; error?: string }> {
|
|
221
161
|
const model = modelName ?? this.config.model;
|
|
222
162
|
|
|
223
163
|
try {
|
|
224
|
-
const
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
"Content-Type": "application/json",
|
|
228
|
-
},
|
|
229
|
-
body: JSON.stringify({
|
|
230
|
-
name: model,
|
|
231
|
-
stream: false,
|
|
232
|
-
}),
|
|
164
|
+
const stream = await this.client.pull({
|
|
165
|
+
model,
|
|
166
|
+
stream: true,
|
|
233
167
|
});
|
|
234
168
|
|
|
235
|
-
|
|
169
|
+
for await (const progress of stream) {
|
|
170
|
+
if (onProgress && progress.status) {
|
|
171
|
+
onProgress(
|
|
172
|
+
progress.status,
|
|
173
|
+
progress.completed,
|
|
174
|
+
progress.total
|
|
175
|
+
);
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
return { success: true };
|
|
180
|
+
} catch (error) {
|
|
181
|
+
return {
|
|
182
|
+
success: false,
|
|
183
|
+
error: error instanceof Error ? error.message : "Unknown error",
|
|
184
|
+
};
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
/**
|
|
189
|
+
* Check if Ollama server is reachable
|
|
190
|
+
*/
|
|
191
|
+
async isServerRunning(): Promise<boolean> {
|
|
192
|
+
try {
|
|
193
|
+
await this.client.list();
|
|
194
|
+
return true;
|
|
236
195
|
} catch {
|
|
237
196
|
return false;
|
|
238
197
|
}
|
|
239
198
|
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* List all available models
|
|
202
|
+
*/
|
|
203
|
+
async listModels(): Promise<string[]> {
|
|
204
|
+
try {
|
|
205
|
+
const response = await this.client.list();
|
|
206
|
+
return (response.models || []).map((m) => m.name);
|
|
207
|
+
} catch {
|
|
208
|
+
return [];
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* Generate embeddings for text
|
|
214
|
+
*/
|
|
215
|
+
async embed(text: string, model?: string): Promise<number[] | null> {
|
|
216
|
+
try {
|
|
217
|
+
const response = await this.client.embed({
|
|
218
|
+
model: model ?? this.config.model,
|
|
219
|
+
input: text,
|
|
220
|
+
});
|
|
221
|
+
return response.embeddings?.[0] ?? null;
|
|
222
|
+
} catch {
|
|
223
|
+
return null;
|
|
224
|
+
}
|
|
225
|
+
}
|
|
240
226
|
}
|
|
241
227
|
|
|
242
228
|
/**
|