codemaxxing 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +16 -16
- package/dist/agent.d.ts +7 -0
- package/dist/agent.js +51 -2
- package/dist/exec.js +10 -0
- package/dist/index.js +322 -4
- package/dist/utils/hardware.d.ts +17 -0
- package/dist/utils/hardware.js +120 -0
- package/dist/utils/mcp.d.ts +55 -0
- package/dist/utils/mcp.js +251 -0
- package/dist/utils/models.d.ts +17 -0
- package/dist/utils/models.js +113 -0
- package/dist/utils/ollama.d.ts +22 -0
- package/dist/utils/ollama.js +121 -0
- package/package.json +2 -1
- package/src/agent.ts +55 -2
- package/src/exec.ts +12 -0
- package/src/index.tsx +413 -2
- package/src/utils/hardware.ts +131 -0
- package/src/utils/mcp.ts +307 -0
- package/src/utils/models.ts +137 -0
- package/src/utils/ollama.ts +137 -0
package/src/utils/mcp.ts
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MCP (Model Context Protocol) client support
|
|
3
|
+
* Connects to external MCP servers and exposes their tools to the LLM agent.
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
7
|
+
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
|
8
|
+
import { readFileSync, writeFileSync, existsSync, mkdirSync } from "fs";
|
|
9
|
+
import { join } from "path";
|
|
10
|
+
import { homedir } from "os";
|
|
11
|
+
import type { ChatCompletionTool } from "openai/resources/chat/completions";
|
|
12
|
+
|
|
13
|
+
// ── Types ──
|
|
14
|
+
|
|
15
|
+
export interface MCPServerConfig {
|
|
16
|
+
command: string;
|
|
17
|
+
args?: string[];
|
|
18
|
+
env?: Record<string, string>;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export interface MCPConfig {
|
|
22
|
+
mcpServers: Record<string, MCPServerConfig>;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export interface ConnectedServer {
|
|
26
|
+
name: string;
|
|
27
|
+
client: Client;
|
|
28
|
+
transport: StdioClientTransport;
|
|
29
|
+
tools: Array<{ name: string; description?: string; inputSchema: Record<string, unknown> }>;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// ── Config paths ──
|
|
33
|
+
|
|
34
|
+
const GLOBAL_CONFIG_DIR = join(homedir(), ".codemaxxing");
|
|
35
|
+
const GLOBAL_CONFIG_PATH = join(GLOBAL_CONFIG_DIR, "mcp.json");
|
|
36
|
+
|
|
37
|
+
function getProjectConfigPaths(cwd: string): string[] {
|
|
38
|
+
return [
|
|
39
|
+
join(cwd, ".codemaxxing", "mcp.json"),
|
|
40
|
+
join(cwd, ".cursor", "mcp.json"),
|
|
41
|
+
join(cwd, "opencode.json"),
|
|
42
|
+
];
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// ── Config loading ──
|
|
46
|
+
|
|
47
|
+
function loadConfigFile(path: string): MCPConfig | null {
|
|
48
|
+
try {
|
|
49
|
+
if (!existsSync(path)) return null;
|
|
50
|
+
const raw = readFileSync(path, "utf-8");
|
|
51
|
+
const parsed = JSON.parse(raw);
|
|
52
|
+
if (parsed.mcpServers && typeof parsed.mcpServers === "object") {
|
|
53
|
+
return parsed as MCPConfig;
|
|
54
|
+
}
|
|
55
|
+
return null;
|
|
56
|
+
} catch {
|
|
57
|
+
return null;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
export function loadMCPConfig(cwd: string): MCPConfig {
|
|
62
|
+
const merged: MCPConfig = { mcpServers: {} };
|
|
63
|
+
|
|
64
|
+
// Load global config first (lower priority)
|
|
65
|
+
const globalConfig = loadConfigFile(GLOBAL_CONFIG_PATH);
|
|
66
|
+
if (globalConfig) {
|
|
67
|
+
Object.assign(merged.mcpServers, globalConfig.mcpServers);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// Load project configs (higher priority — later overwrites earlier)
|
|
71
|
+
for (const configPath of getProjectConfigPaths(cwd)) {
|
|
72
|
+
const config = loadConfigFile(configPath);
|
|
73
|
+
if (config) {
|
|
74
|
+
Object.assign(merged.mcpServers, config.mcpServers);
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
return merged;
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// ── Connection management ──
|
|
82
|
+
|
|
83
|
+
const connectedServers: ConnectedServer[] = [];
|
|
84
|
+
|
|
85
|
+
export async function connectToServers(
|
|
86
|
+
config: MCPConfig,
|
|
87
|
+
onStatus?: (name: string, status: string) => void,
|
|
88
|
+
): Promise<ConnectedServer[]> {
|
|
89
|
+
const entries = Object.entries(config.mcpServers);
|
|
90
|
+
if (entries.length === 0) return [];
|
|
91
|
+
|
|
92
|
+
for (const [name, serverConfig] of entries) {
|
|
93
|
+
try {
|
|
94
|
+
onStatus?.(name, "connecting");
|
|
95
|
+
|
|
96
|
+
const transport = new StdioClientTransport({
|
|
97
|
+
command: serverConfig.command,
|
|
98
|
+
args: serverConfig.args ?? [],
|
|
99
|
+
env: { ...process.env, ...(serverConfig.env ?? {}) } as Record<string, string>,
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
const client = new Client({
|
|
103
|
+
name: "codemaxxing",
|
|
104
|
+
version: "0.3.0",
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
await client.connect(transport);
|
|
108
|
+
|
|
109
|
+
// Fetch available tools
|
|
110
|
+
const toolsResult = await client.listTools();
|
|
111
|
+
const tools = (toolsResult.tools ?? []).map((t) => ({
|
|
112
|
+
name: t.name,
|
|
113
|
+
description: t.description,
|
|
114
|
+
inputSchema: (t.inputSchema ?? { type: "object", properties: {} }) as Record<string, unknown>,
|
|
115
|
+
}));
|
|
116
|
+
|
|
117
|
+
const server: ConnectedServer = { name, client, transport, tools };
|
|
118
|
+
connectedServers.push(server);
|
|
119
|
+
onStatus?.(name, `connected (${tools.length} tools)`);
|
|
120
|
+
} catch (err: any) {
|
|
121
|
+
onStatus?.(name, `failed: ${err.message}`);
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
return connectedServers;
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
export async function disconnectAll(): Promise<void> {
|
|
129
|
+
for (const server of connectedServers) {
|
|
130
|
+
try {
|
|
131
|
+
await server.client.close();
|
|
132
|
+
} catch {
|
|
133
|
+
// Ignore cleanup errors
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
connectedServers.length = 0;
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
export function getConnectedServers(): ConnectedServer[] {
|
|
140
|
+
return connectedServers;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
// ── Tool format conversion ──
|
|
144
|
+
|
|
145
|
+
export function getAllMCPTools(servers: ConnectedServer[]): ChatCompletionTool[] {
|
|
146
|
+
const tools: ChatCompletionTool[] = [];
|
|
147
|
+
|
|
148
|
+
for (const server of servers) {
|
|
149
|
+
for (const tool of server.tools) {
|
|
150
|
+
tools.push({
|
|
151
|
+
type: "function",
|
|
152
|
+
function: {
|
|
153
|
+
name: `mcp_${server.name}_${tool.name}`,
|
|
154
|
+
description: `[MCP: ${server.name}] ${tool.description ?? tool.name}`,
|
|
155
|
+
parameters: tool.inputSchema as any,
|
|
156
|
+
},
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
return tools;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Parse an MCP tool call name to extract server name and tool name.
|
|
166
|
+
* Format: mcp_<serverName>_<toolName>
|
|
167
|
+
* Server names can contain hyphens but not underscores (by convention).
|
|
168
|
+
*/
|
|
169
|
+
export function parseMCPToolName(fullName: string): { serverName: string; toolName: string } | null {
|
|
170
|
+
if (!fullName.startsWith("mcp_")) return null;
|
|
171
|
+
const rest = fullName.slice(4); // Remove "mcp_"
|
|
172
|
+
|
|
173
|
+
// Find the server by matching known connected server names
|
|
174
|
+
for (const server of connectedServers) {
|
|
175
|
+
const prefix = server.name + "_";
|
|
176
|
+
if (rest.startsWith(prefix)) {
|
|
177
|
+
return { serverName: server.name, toolName: rest.slice(prefix.length) };
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
|
|
181
|
+
// Fallback: split on first underscore
|
|
182
|
+
const idx = rest.indexOf("_");
|
|
183
|
+
if (idx === -1) return null;
|
|
184
|
+
return { serverName: rest.slice(0, idx), toolName: rest.slice(idx + 1) };
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// ── Tool execution ──
|
|
188
|
+
|
|
189
|
+
export async function callMCPTool(
|
|
190
|
+
serverName: string,
|
|
191
|
+
toolName: string,
|
|
192
|
+
args: Record<string, unknown>,
|
|
193
|
+
): Promise<string> {
|
|
194
|
+
const server = connectedServers.find((s) => s.name === serverName);
|
|
195
|
+
if (!server) {
|
|
196
|
+
return `Error: MCP server "${serverName}" not found or not connected.`;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
try {
|
|
200
|
+
const result = await server.client.callTool({ name: toolName, arguments: args });
|
|
201
|
+
// MCP tool results have a content array
|
|
202
|
+
const content = result.content;
|
|
203
|
+
if (Array.isArray(content)) {
|
|
204
|
+
return content
|
|
205
|
+
.map((c: any) => {
|
|
206
|
+
if (c.type === "text") return c.text;
|
|
207
|
+
if (c.type === "image") return `[image: ${c.mimeType}]`;
|
|
208
|
+
return JSON.stringify(c);
|
|
209
|
+
})
|
|
210
|
+
.join("\n");
|
|
211
|
+
}
|
|
212
|
+
return typeof content === "string" ? content : JSON.stringify(content);
|
|
213
|
+
} catch (err: any) {
|
|
214
|
+
return `Error calling MCP tool "${toolName}" on server "${serverName}": ${err.message}`;
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// ── Server management ──
|
|
219
|
+
|
|
220
|
+
export function addServer(name: string, config: MCPServerConfig): { ok: boolean; message: string } {
|
|
221
|
+
try {
|
|
222
|
+
if (!existsSync(GLOBAL_CONFIG_DIR)) {
|
|
223
|
+
mkdirSync(GLOBAL_CONFIG_DIR, { recursive: true });
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
let existing: MCPConfig = { mcpServers: {} };
|
|
227
|
+
if (existsSync(GLOBAL_CONFIG_PATH)) {
|
|
228
|
+
try {
|
|
229
|
+
existing = JSON.parse(readFileSync(GLOBAL_CONFIG_PATH, "utf-8"));
|
|
230
|
+
if (!existing.mcpServers) existing.mcpServers = {};
|
|
231
|
+
} catch {
|
|
232
|
+
existing = { mcpServers: {} };
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
existing.mcpServers[name] = config;
|
|
237
|
+
writeFileSync(GLOBAL_CONFIG_PATH, JSON.stringify(existing, null, 2) + "\n", "utf-8");
|
|
238
|
+
return { ok: true, message: `Added MCP server "${name}" to global config.` };
|
|
239
|
+
} catch (err: any) {
|
|
240
|
+
return { ok: false, message: `Failed to add server: ${err.message}` };
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
export function removeServer(name: string): { ok: boolean; message: string } {
|
|
245
|
+
try {
|
|
246
|
+
if (!existsSync(GLOBAL_CONFIG_PATH)) {
|
|
247
|
+
return { ok: false, message: `No global MCP config found.` };
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
const existing: MCPConfig = JSON.parse(readFileSync(GLOBAL_CONFIG_PATH, "utf-8"));
|
|
251
|
+
if (!existing.mcpServers || !existing.mcpServers[name]) {
|
|
252
|
+
return { ok: false, message: `Server "${name}" not found in global config.` };
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
delete existing.mcpServers[name];
|
|
256
|
+
writeFileSync(GLOBAL_CONFIG_PATH, JSON.stringify(existing, null, 2) + "\n", "utf-8");
|
|
257
|
+
return { ok: true, message: `Removed MCP server "${name}" from global config.` };
|
|
258
|
+
} catch (err: any) {
|
|
259
|
+
return { ok: false, message: `Failed to remove server: ${err.message}` };
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
export function listServers(cwd: string): Array<{ name: string; source: string; command: string; connected: boolean; toolCount: number }> {
|
|
264
|
+
const result: Array<{ name: string; source: string; command: string; connected: boolean; toolCount: number }> = [];
|
|
265
|
+
|
|
266
|
+
// Gather from global config
|
|
267
|
+
const globalConfig = loadConfigFile(GLOBAL_CONFIG_PATH);
|
|
268
|
+
if (globalConfig) {
|
|
269
|
+
for (const [name, cfg] of Object.entries(globalConfig.mcpServers)) {
|
|
270
|
+
const connected = connectedServers.find((s) => s.name === name);
|
|
271
|
+
result.push({
|
|
272
|
+
name,
|
|
273
|
+
source: "global",
|
|
274
|
+
command: `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim(),
|
|
275
|
+
connected: !!connected,
|
|
276
|
+
toolCount: connected?.tools.length ?? 0,
|
|
277
|
+
});
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
// Gather from project configs
|
|
282
|
+
for (const configPath of getProjectConfigPaths(cwd)) {
|
|
283
|
+
const config = loadConfigFile(configPath);
|
|
284
|
+
if (config) {
|
|
285
|
+
const source = configPath.includes(".cursor") ? "cursor" : configPath.includes("opencode") ? "opencode" : "project";
|
|
286
|
+
for (const [name, cfg] of Object.entries(config.mcpServers)) {
|
|
287
|
+
// Skip if already listed from global (project overrides)
|
|
288
|
+
const existing = result.find((r) => r.name === name);
|
|
289
|
+
if (existing) {
|
|
290
|
+
existing.source = source;
|
|
291
|
+
existing.command = `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim();
|
|
292
|
+
continue;
|
|
293
|
+
}
|
|
294
|
+
const connected = connectedServers.find((s) => s.name === name);
|
|
295
|
+
result.push({
|
|
296
|
+
name,
|
|
297
|
+
source,
|
|
298
|
+
command: `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim(),
|
|
299
|
+
connected: !!connected,
|
|
300
|
+
toolCount: connected?.tools.length ?? 0,
|
|
301
|
+
});
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
return result;
|
|
307
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import type { HardwareInfo } from "./hardware.js";
|
|
2
|
+
|
|
3
|
+
export interface RecommendedModel {
|
|
4
|
+
name: string; // Display name
|
|
5
|
+
ollamaId: string; // Ollama model ID
|
|
6
|
+
size: number; // Download size in GB
|
|
7
|
+
ramRequired: number; // Minimum RAM in GB
|
|
8
|
+
vramOptimal: number; // Optimal VRAM in GB (0 = CPU fine)
|
|
9
|
+
description: string; // One-liner
|
|
10
|
+
speed: string; // e.g., "~45 tok/s on M1"
|
|
11
|
+
quality: "good" | "great" | "best";
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
export type ModelFit = "perfect" | "good" | "tight" | "skip";
|
|
15
|
+
|
|
16
|
+
export interface ScoredModel extends RecommendedModel {
|
|
17
|
+
fit: ModelFit;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const MODELS: RecommendedModel[] = [
|
|
21
|
+
{
|
|
22
|
+
name: "Qwen 2.5 Coder 3B",
|
|
23
|
+
ollamaId: "qwen2.5-coder:3b",
|
|
24
|
+
size: 2,
|
|
25
|
+
ramRequired: 8,
|
|
26
|
+
vramOptimal: 4,
|
|
27
|
+
description: "Lightweight, fast coding model",
|
|
28
|
+
speed: "~60 tok/s on M1",
|
|
29
|
+
quality: "good",
|
|
30
|
+
},
|
|
31
|
+
{
|
|
32
|
+
name: "Qwen 2.5 Coder 7B",
|
|
33
|
+
ollamaId: "qwen2.5-coder:7b",
|
|
34
|
+
size: 5,
|
|
35
|
+
ramRequired: 16,
|
|
36
|
+
vramOptimal: 8,
|
|
37
|
+
description: "Sweet spot for most machines",
|
|
38
|
+
speed: "~45 tok/s on M1",
|
|
39
|
+
quality: "great",
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
name: "Qwen 2.5 Coder 14B",
|
|
43
|
+
ollamaId: "qwen2.5-coder:14b",
|
|
44
|
+
size: 9,
|
|
45
|
+
ramRequired: 32,
|
|
46
|
+
vramOptimal: 16,
|
|
47
|
+
description: "High quality coding",
|
|
48
|
+
speed: "~25 tok/s on M1 Pro",
|
|
49
|
+
quality: "best",
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
name: "Qwen 2.5 Coder 32B",
|
|
53
|
+
ollamaId: "qwen2.5-coder:32b",
|
|
54
|
+
size: 20,
|
|
55
|
+
ramRequired: 48,
|
|
56
|
+
vramOptimal: 32,
|
|
57
|
+
description: "Premium quality, needs lots of RAM",
|
|
58
|
+
speed: "~12 tok/s on M1 Max",
|
|
59
|
+
quality: "best",
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
name: "DeepSeek Coder V2 16B",
|
|
63
|
+
ollamaId: "deepseek-coder-v2:16b",
|
|
64
|
+
size: 9,
|
|
65
|
+
ramRequired: 32,
|
|
66
|
+
vramOptimal: 16,
|
|
67
|
+
description: "Strong alternative for coding",
|
|
68
|
+
speed: "~30 tok/s on M1 Pro",
|
|
69
|
+
quality: "great",
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
name: "CodeLlama 7B",
|
|
73
|
+
ollamaId: "codellama:7b",
|
|
74
|
+
size: 4,
|
|
75
|
+
ramRequired: 16,
|
|
76
|
+
vramOptimal: 8,
|
|
77
|
+
description: "Meta's coding model",
|
|
78
|
+
speed: "~40 tok/s on M1",
|
|
79
|
+
quality: "good",
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
name: "StarCoder2 7B",
|
|
83
|
+
ollamaId: "starcoder2:7b",
|
|
84
|
+
size: 4,
|
|
85
|
+
ramRequired: 16,
|
|
86
|
+
vramOptimal: 8,
|
|
87
|
+
description: "Good for code completion",
|
|
88
|
+
speed: "~40 tok/s on M1",
|
|
89
|
+
quality: "good",
|
|
90
|
+
},
|
|
91
|
+
];
|
|
92
|
+
|
|
93
|
+
function scoreModel(model: RecommendedModel, ramGB: number, vramGB: number): ModelFit {
|
|
94
|
+
if (ramGB < model.ramRequired) return "skip";
|
|
95
|
+
|
|
96
|
+
const ramHeadroom = ramGB - model.ramRequired;
|
|
97
|
+
const hasGoodVRAM = vramGB >= model.vramOptimal;
|
|
98
|
+
|
|
99
|
+
if (hasGoodVRAM && ramHeadroom >= 4) return "perfect";
|
|
100
|
+
if (hasGoodVRAM || ramHeadroom >= 8) return "good";
|
|
101
|
+
if (ramHeadroom >= 0) return "tight";
|
|
102
|
+
return "skip";
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
const qualityOrder: Record<string, number> = { best: 3, great: 2, good: 1 };
|
|
106
|
+
const fitOrder: Record<string, number> = { perfect: 4, good: 3, tight: 2, skip: 1 };
|
|
107
|
+
|
|
108
|
+
export function getRecommendations(hardware: HardwareInfo): ScoredModel[] {
|
|
109
|
+
const ramGB = hardware.ram / (1024 * 1024 * 1024);
|
|
110
|
+
const vramGB = hardware.gpu?.vram ? hardware.gpu.vram / (1024 * 1024 * 1024) : 0;
|
|
111
|
+
|
|
112
|
+
// Apple Silicon uses unified memory — VRAM = RAM
|
|
113
|
+
const effectiveVRAM = hardware.appleSilicon ? ramGB : vramGB;
|
|
114
|
+
|
|
115
|
+
const scored: ScoredModel[] = MODELS.map((m) => ({
|
|
116
|
+
...m,
|
|
117
|
+
fit: scoreModel(m, ramGB, effectiveVRAM),
|
|
118
|
+
}));
|
|
119
|
+
|
|
120
|
+
// Sort: perfect first, then by quality descending
|
|
121
|
+
scored.sort((a, b) => {
|
|
122
|
+
const fitDiff = (fitOrder[b.fit] ?? 0) - (fitOrder[a.fit] ?? 0);
|
|
123
|
+
if (fitDiff !== 0) return fitDiff;
|
|
124
|
+
return (qualityOrder[b.quality] ?? 0) - (qualityOrder[a.quality] ?? 0);
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
return scored;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
export function getFitIcon(fit: ModelFit): string {
|
|
131
|
+
switch (fit) {
|
|
132
|
+
case "perfect": return "\u2B50"; // ⭐
|
|
133
|
+
case "good": return "\u2705"; // ✅
|
|
134
|
+
case "tight": return "\u26A0\uFE0F"; // ⚠️
|
|
135
|
+
case "skip": return "\u274C"; // ❌
|
|
136
|
+
}
|
|
137
|
+
}
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import { execSync, spawn } from "child_process";
|
|
2
|
+
|
|
3
|
+
/** Check if ollama binary exists on PATH */
|
|
4
|
+
export function isOllamaInstalled(): boolean {
|
|
5
|
+
try {
|
|
6
|
+
const cmd = process.platform === "win32" ? "where ollama" : "which ollama";
|
|
7
|
+
execSync(cmd, { stdio: ["pipe", "pipe", "pipe"], timeout: 3000 });
|
|
8
|
+
return true;
|
|
9
|
+
} catch {
|
|
10
|
+
return false;
|
|
11
|
+
}
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
/** Check if ollama server is responding */
|
|
15
|
+
export async function isOllamaRunning(): Promise<boolean> {
|
|
16
|
+
try {
|
|
17
|
+
const controller = new AbortController();
|
|
18
|
+
const timeout = setTimeout(() => controller.abort(), 2000);
|
|
19
|
+
const res = await fetch("http://localhost:11434/api/tags", { signal: controller.signal });
|
|
20
|
+
clearTimeout(timeout);
|
|
21
|
+
return res.ok;
|
|
22
|
+
} catch {
|
|
23
|
+
return false;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/** Get the install command for the user's OS */
|
|
28
|
+
export function getOllamaInstallCommand(os: "macos" | "linux" | "windows"): string {
|
|
29
|
+
switch (os) {
|
|
30
|
+
case "macos": return "brew install ollama";
|
|
31
|
+
case "linux": return "curl -fsSL https://ollama.com/install.sh | sh";
|
|
32
|
+
case "windows": return "winget install Ollama.Ollama";
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
/** Start ollama serve in background */
|
|
37
|
+
export function startOllama(): void {
|
|
38
|
+
const child = spawn("ollama", ["serve"], {
|
|
39
|
+
detached: true,
|
|
40
|
+
stdio: "ignore",
|
|
41
|
+
});
|
|
42
|
+
child.unref();
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export interface PullProgress {
|
|
46
|
+
status: string;
|
|
47
|
+
total?: number;
|
|
48
|
+
completed?: number;
|
|
49
|
+
percent: number;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Pull a model from Ollama registry.
|
|
54
|
+
* Calls onProgress with download updates.
|
|
55
|
+
* Returns a promise that resolves when complete.
|
|
56
|
+
*/
|
|
57
|
+
export function pullModel(
|
|
58
|
+
modelId: string,
|
|
59
|
+
onProgress?: (progress: PullProgress) => void
|
|
60
|
+
): Promise<void> {
|
|
61
|
+
return new Promise((resolve, reject) => {
|
|
62
|
+
const child = spawn("ollama", ["pull", modelId], {
|
|
63
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
64
|
+
});
|
|
65
|
+
|
|
66
|
+
let lastOutput = "";
|
|
67
|
+
|
|
68
|
+
const parseLine = (data: string) => {
|
|
69
|
+
lastOutput = data;
|
|
70
|
+
// Ollama pull output looks like:
|
|
71
|
+
// pulling manifest
|
|
72
|
+
// pulling abc123... 58% ▕██████████░░░░░░░░░░▏ 2.9 GB/5.0 GB
|
|
73
|
+
// verifying sha256 digest
|
|
74
|
+
// writing manifest
|
|
75
|
+
// success
|
|
76
|
+
|
|
77
|
+
// Try to parse percentage
|
|
78
|
+
const pctMatch = data.match(/(\d+)%/);
|
|
79
|
+
const sizeMatch = data.match(/([\d.]+)\s*GB\s*\/\s*([\d.]+)\s*GB/);
|
|
80
|
+
|
|
81
|
+
if (pctMatch) {
|
|
82
|
+
const percent = parseInt(pctMatch[1]);
|
|
83
|
+
let completed: number | undefined;
|
|
84
|
+
let total: number | undefined;
|
|
85
|
+
if (sizeMatch) {
|
|
86
|
+
completed = parseFloat(sizeMatch[1]) * 1024 * 1024 * 1024;
|
|
87
|
+
total = parseFloat(sizeMatch[2]) * 1024 * 1024 * 1024;
|
|
88
|
+
}
|
|
89
|
+
onProgress?.({ status: "downloading", total, completed, percent });
|
|
90
|
+
} else if (data.includes("pulling manifest")) {
|
|
91
|
+
onProgress?.({ status: "pulling manifest", percent: 0 });
|
|
92
|
+
} else if (data.includes("verifying")) {
|
|
93
|
+
onProgress?.({ status: "verifying", percent: 100 });
|
|
94
|
+
} else if (data.includes("writing manifest")) {
|
|
95
|
+
onProgress?.({ status: "writing manifest", percent: 100 });
|
|
96
|
+
} else if (data.includes("success")) {
|
|
97
|
+
onProgress?.({ status: "success", percent: 100 });
|
|
98
|
+
}
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
child.stdout?.on("data", (data: Buffer) => {
|
|
102
|
+
parseLine(data.toString().trim());
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
child.stderr?.on("data", (data: Buffer) => {
|
|
106
|
+
// Ollama writes progress to stderr
|
|
107
|
+
parseLine(data.toString().trim());
|
|
108
|
+
});
|
|
109
|
+
|
|
110
|
+
child.on("close", (code) => {
|
|
111
|
+
if (code === 0) {
|
|
112
|
+
resolve();
|
|
113
|
+
} else {
|
|
114
|
+
reject(new Error(`ollama pull failed (exit ${code}): ${lastOutput}`));
|
|
115
|
+
}
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
child.on("error", (err) => {
|
|
119
|
+
reject(new Error(`Failed to run ollama pull: ${err.message}`));
|
|
120
|
+
});
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/** List models installed in Ollama */
|
|
125
|
+
export async function listInstalledModels(): Promise<string[]> {
|
|
126
|
+
try {
|
|
127
|
+
const controller = new AbortController();
|
|
128
|
+
const timeout = setTimeout(() => controller.abort(), 3000);
|
|
129
|
+
const res = await fetch("http://localhost:11434/api/tags", { signal: controller.signal });
|
|
130
|
+
clearTimeout(timeout);
|
|
131
|
+
if (res.ok) {
|
|
132
|
+
const data = (await res.json()) as { models?: Array<{ name: string }> };
|
|
133
|
+
return (data.models ?? []).map((m) => m.name);
|
|
134
|
+
}
|
|
135
|
+
} catch { /* not running */ }
|
|
136
|
+
return [];
|
|
137
|
+
}
|