codemaxxing 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +16 -16
- package/dist/agent.d.ts +7 -0
- package/dist/agent.js +51 -2
- package/dist/exec.js +10 -0
- package/dist/index.js +322 -4
- package/dist/utils/hardware.d.ts +17 -0
- package/dist/utils/hardware.js +120 -0
- package/dist/utils/mcp.d.ts +55 -0
- package/dist/utils/mcp.js +251 -0
- package/dist/utils/models.d.ts +17 -0
- package/dist/utils/models.js +113 -0
- package/dist/utils/ollama.d.ts +22 -0
- package/dist/utils/ollama.js +121 -0
- package/package.json +2 -1
- package/src/agent.ts +55 -2
- package/src/exec.ts +12 -0
- package/src/index.tsx +413 -2
- package/src/utils/hardware.ts +131 -0
- package/src/utils/mcp.ts +307 -0
- package/src/utils/models.ts +137 -0
- package/src/utils/ollama.ts +137 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
import os from "os";
|
|
2
|
+
import { execSync } from "child_process";
|
|
3
|
+
function getOS() {
|
|
4
|
+
switch (process.platform) {
|
|
5
|
+
case "darwin": return "macos";
|
|
6
|
+
case "win32": return "windows";
|
|
7
|
+
default: return "linux";
|
|
8
|
+
}
|
|
9
|
+
}
|
|
10
|
+
function getCPU() {
|
|
11
|
+
const cpus = os.cpus();
|
|
12
|
+
return {
|
|
13
|
+
name: cpus[0]?.model?.trim() ?? "Unknown CPU",
|
|
14
|
+
cores: cpus.length,
|
|
15
|
+
speed: cpus[0]?.speed ?? 0, // MHz
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
function getGPU(platform) {
|
|
19
|
+
try {
|
|
20
|
+
if (platform === "macos") {
|
|
21
|
+
const raw = execSync("system_profiler SPDisplaysDataType -json", {
|
|
22
|
+
encoding: "utf-8",
|
|
23
|
+
timeout: 5000,
|
|
24
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
25
|
+
});
|
|
26
|
+
const data = JSON.parse(raw);
|
|
27
|
+
const displays = data?.SPDisplaysDataType;
|
|
28
|
+
if (Array.isArray(displays) && displays.length > 0) {
|
|
29
|
+
const gpu = displays[0];
|
|
30
|
+
const name = gpu.sppci_model ?? gpu._name ?? "Unknown GPU";
|
|
31
|
+
// On Apple Silicon, VRAM is shared (unified memory) — report total RAM
|
|
32
|
+
const vramStr = gpu["spdisplays_vram"] ?? gpu["spdisplays_vram_shared"] ?? "";
|
|
33
|
+
let vram = 0;
|
|
34
|
+
if (vramStr) {
|
|
35
|
+
const match = vramStr.match(/(\d+)\s*(GB|MB)/i);
|
|
36
|
+
if (match) {
|
|
37
|
+
vram = parseInt(match[1]) * (match[2].toUpperCase() === "GB" ? 1024 * 1024 * 1024 : 1024 * 1024);
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
// Apple Silicon unified memory — use total RAM as VRAM
|
|
41
|
+
if (vram === 0 && name.toLowerCase().includes("apple")) {
|
|
42
|
+
vram = os.totalmem();
|
|
43
|
+
}
|
|
44
|
+
return { name, vram };
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
if (platform === "linux") {
|
|
48
|
+
// Try NVIDIA first
|
|
49
|
+
try {
|
|
50
|
+
const raw = execSync("nvidia-smi --query-gpu=name,memory.total --format=csv,noheader", {
|
|
51
|
+
encoding: "utf-8",
|
|
52
|
+
timeout: 5000,
|
|
53
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
54
|
+
});
|
|
55
|
+
const line = raw.trim().split("\n")[0];
|
|
56
|
+
if (line) {
|
|
57
|
+
const parts = line.split(",").map(s => s.trim());
|
|
58
|
+
const name = parts[0] ?? "NVIDIA GPU";
|
|
59
|
+
const memMatch = (parts[1] ?? "").match(/(\d+)/);
|
|
60
|
+
const vram = memMatch ? parseInt(memMatch[1]) * 1024 * 1024 : 0; // MiB to bytes
|
|
61
|
+
return { name, vram };
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
catch {
|
|
65
|
+
// No NVIDIA, try lspci
|
|
66
|
+
try {
|
|
67
|
+
const raw = execSync("lspci | grep -i vga", {
|
|
68
|
+
encoding: "utf-8",
|
|
69
|
+
timeout: 5000,
|
|
70
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
71
|
+
});
|
|
72
|
+
const line = raw.trim().split("\n")[0];
|
|
73
|
+
if (line) {
|
|
74
|
+
const name = line.split(":").slice(2).join(":").trim() || "Unknown GPU";
|
|
75
|
+
return { name, vram: 0 };
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
catch { /* no lspci */ }
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
if (platform === "windows") {
|
|
82
|
+
try {
|
|
83
|
+
const raw = execSync("wmic path win32_VideoController get Name,AdapterRAM /format:csv", {
|
|
84
|
+
encoding: "utf-8",
|
|
85
|
+
timeout: 5000,
|
|
86
|
+
stdio: ["pipe", "pipe", "pipe"],
|
|
87
|
+
});
|
|
88
|
+
const lines = raw.trim().split("\n").filter(l => l.trim() && !l.startsWith("Node"));
|
|
89
|
+
if (lines.length > 0) {
|
|
90
|
+
const parts = lines[0].split(",");
|
|
91
|
+
const adapterRAM = parseInt(parts[1] ?? "0");
|
|
92
|
+
const name = parts[2]?.trim() ?? "Unknown GPU";
|
|
93
|
+
return { name, vram: isNaN(adapterRAM) ? 0 : adapterRAM };
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
catch { /* no wmic */ }
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
catch {
|
|
100
|
+
// GPU detection failed
|
|
101
|
+
}
|
|
102
|
+
return null;
|
|
103
|
+
}
|
|
104
|
+
export function detectHardware() {
|
|
105
|
+
const platform = getOS();
|
|
106
|
+
const cpu = getCPU();
|
|
107
|
+
const ram = os.totalmem();
|
|
108
|
+
const gpu = getGPU(platform);
|
|
109
|
+
// Detect Apple Silicon
|
|
110
|
+
const appleSilicon = platform === "macos" && /apple\s+m/i.test(cpu.name);
|
|
111
|
+
return { cpu, ram, gpu, os: platform, appleSilicon };
|
|
112
|
+
}
|
|
113
|
+
/** Format bytes to human-readable string */
|
|
114
|
+
export function formatBytes(bytes) {
|
|
115
|
+
if (bytes >= 1024 * 1024 * 1024)
|
|
116
|
+
return `${Math.round(bytes / (1024 * 1024 * 1024))} GB`;
|
|
117
|
+
if (bytes >= 1024 * 1024)
|
|
118
|
+
return `${Math.round(bytes / (1024 * 1024))} MB`;
|
|
119
|
+
return `${Math.round(bytes / 1024)} KB`;
|
|
120
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MCP (Model Context Protocol) client support
|
|
3
|
+
* Connects to external MCP servers and exposes their tools to the LLM agent.
|
|
4
|
+
*/
|
|
5
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
6
|
+
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
|
7
|
+
import type { ChatCompletionTool } from "openai/resources/chat/completions";
|
|
8
|
+
export interface MCPServerConfig {
|
|
9
|
+
command: string;
|
|
10
|
+
args?: string[];
|
|
11
|
+
env?: Record<string, string>;
|
|
12
|
+
}
|
|
13
|
+
export interface MCPConfig {
|
|
14
|
+
mcpServers: Record<string, MCPServerConfig>;
|
|
15
|
+
}
|
|
16
|
+
export interface ConnectedServer {
|
|
17
|
+
name: string;
|
|
18
|
+
client: Client;
|
|
19
|
+
transport: StdioClientTransport;
|
|
20
|
+
tools: Array<{
|
|
21
|
+
name: string;
|
|
22
|
+
description?: string;
|
|
23
|
+
inputSchema: Record<string, unknown>;
|
|
24
|
+
}>;
|
|
25
|
+
}
|
|
26
|
+
export declare function loadMCPConfig(cwd: string): MCPConfig;
|
|
27
|
+
export declare function connectToServers(config: MCPConfig, onStatus?: (name: string, status: string) => void): Promise<ConnectedServer[]>;
|
|
28
|
+
export declare function disconnectAll(): Promise<void>;
|
|
29
|
+
export declare function getConnectedServers(): ConnectedServer[];
|
|
30
|
+
export declare function getAllMCPTools(servers: ConnectedServer[]): ChatCompletionTool[];
|
|
31
|
+
/**
|
|
32
|
+
* Parse an MCP tool call name to extract server name and tool name.
|
|
33
|
+
* Format: mcp_<serverName>_<toolName>
|
|
34
|
+
* Server names can contain hyphens but not underscores (by convention).
|
|
35
|
+
*/
|
|
36
|
+
export declare function parseMCPToolName(fullName: string): {
|
|
37
|
+
serverName: string;
|
|
38
|
+
toolName: string;
|
|
39
|
+
} | null;
|
|
40
|
+
export declare function callMCPTool(serverName: string, toolName: string, args: Record<string, unknown>): Promise<string>;
|
|
41
|
+
export declare function addServer(name: string, config: MCPServerConfig): {
|
|
42
|
+
ok: boolean;
|
|
43
|
+
message: string;
|
|
44
|
+
};
|
|
45
|
+
export declare function removeServer(name: string): {
|
|
46
|
+
ok: boolean;
|
|
47
|
+
message: string;
|
|
48
|
+
};
|
|
49
|
+
export declare function listServers(cwd: string): Array<{
|
|
50
|
+
name: string;
|
|
51
|
+
source: string;
|
|
52
|
+
command: string;
|
|
53
|
+
connected: boolean;
|
|
54
|
+
toolCount: number;
|
|
55
|
+
}>;
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MCP (Model Context Protocol) client support
|
|
3
|
+
* Connects to external MCP servers and exposes their tools to the LLM agent.
|
|
4
|
+
*/
|
|
5
|
+
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
|
6
|
+
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
|
7
|
+
import { readFileSync, writeFileSync, existsSync, mkdirSync } from "fs";
|
|
8
|
+
import { join } from "path";
|
|
9
|
+
import { homedir } from "os";
|
|
10
|
+
// ── Config paths ──
|
|
11
|
+
const GLOBAL_CONFIG_DIR = join(homedir(), ".codemaxxing");
|
|
12
|
+
const GLOBAL_CONFIG_PATH = join(GLOBAL_CONFIG_DIR, "mcp.json");
|
|
13
|
+
function getProjectConfigPaths(cwd) {
|
|
14
|
+
return [
|
|
15
|
+
join(cwd, ".codemaxxing", "mcp.json"),
|
|
16
|
+
join(cwd, ".cursor", "mcp.json"),
|
|
17
|
+
join(cwd, "opencode.json"),
|
|
18
|
+
];
|
|
19
|
+
}
|
|
20
|
+
// ── Config loading ──
|
|
21
|
+
function loadConfigFile(path) {
|
|
22
|
+
try {
|
|
23
|
+
if (!existsSync(path))
|
|
24
|
+
return null;
|
|
25
|
+
const raw = readFileSync(path, "utf-8");
|
|
26
|
+
const parsed = JSON.parse(raw);
|
|
27
|
+
if (parsed.mcpServers && typeof parsed.mcpServers === "object") {
|
|
28
|
+
return parsed;
|
|
29
|
+
}
|
|
30
|
+
return null;
|
|
31
|
+
}
|
|
32
|
+
catch {
|
|
33
|
+
return null;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
export function loadMCPConfig(cwd) {
|
|
37
|
+
const merged = { mcpServers: {} };
|
|
38
|
+
// Load global config first (lower priority)
|
|
39
|
+
const globalConfig = loadConfigFile(GLOBAL_CONFIG_PATH);
|
|
40
|
+
if (globalConfig) {
|
|
41
|
+
Object.assign(merged.mcpServers, globalConfig.mcpServers);
|
|
42
|
+
}
|
|
43
|
+
// Load project configs (higher priority — later overwrites earlier)
|
|
44
|
+
for (const configPath of getProjectConfigPaths(cwd)) {
|
|
45
|
+
const config = loadConfigFile(configPath);
|
|
46
|
+
if (config) {
|
|
47
|
+
Object.assign(merged.mcpServers, config.mcpServers);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
return merged;
|
|
51
|
+
}
|
|
52
|
+
// ── Connection management ──
|
|
53
|
+
const connectedServers = [];
|
|
54
|
+
export async function connectToServers(config, onStatus) {
|
|
55
|
+
const entries = Object.entries(config.mcpServers);
|
|
56
|
+
if (entries.length === 0)
|
|
57
|
+
return [];
|
|
58
|
+
for (const [name, serverConfig] of entries) {
|
|
59
|
+
try {
|
|
60
|
+
onStatus?.(name, "connecting");
|
|
61
|
+
const transport = new StdioClientTransport({
|
|
62
|
+
command: serverConfig.command,
|
|
63
|
+
args: serverConfig.args ?? [],
|
|
64
|
+
env: { ...process.env, ...(serverConfig.env ?? {}) },
|
|
65
|
+
});
|
|
66
|
+
const client = new Client({
|
|
67
|
+
name: "codemaxxing",
|
|
68
|
+
version: "0.3.0",
|
|
69
|
+
});
|
|
70
|
+
await client.connect(transport);
|
|
71
|
+
// Fetch available tools
|
|
72
|
+
const toolsResult = await client.listTools();
|
|
73
|
+
const tools = (toolsResult.tools ?? []).map((t) => ({
|
|
74
|
+
name: t.name,
|
|
75
|
+
description: t.description,
|
|
76
|
+
inputSchema: (t.inputSchema ?? { type: "object", properties: {} }),
|
|
77
|
+
}));
|
|
78
|
+
const server = { name, client, transport, tools };
|
|
79
|
+
connectedServers.push(server);
|
|
80
|
+
onStatus?.(name, `connected (${tools.length} tools)`);
|
|
81
|
+
}
|
|
82
|
+
catch (err) {
|
|
83
|
+
onStatus?.(name, `failed: ${err.message}`);
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
return connectedServers;
|
|
87
|
+
}
|
|
88
|
+
export async function disconnectAll() {
|
|
89
|
+
for (const server of connectedServers) {
|
|
90
|
+
try {
|
|
91
|
+
await server.client.close();
|
|
92
|
+
}
|
|
93
|
+
catch {
|
|
94
|
+
// Ignore cleanup errors
|
|
95
|
+
}
|
|
96
|
+
}
|
|
97
|
+
connectedServers.length = 0;
|
|
98
|
+
}
|
|
99
|
+
export function getConnectedServers() {
|
|
100
|
+
return connectedServers;
|
|
101
|
+
}
|
|
102
|
+
// ── Tool format conversion ──
|
|
103
|
+
export function getAllMCPTools(servers) {
|
|
104
|
+
const tools = [];
|
|
105
|
+
for (const server of servers) {
|
|
106
|
+
for (const tool of server.tools) {
|
|
107
|
+
tools.push({
|
|
108
|
+
type: "function",
|
|
109
|
+
function: {
|
|
110
|
+
name: `mcp_${server.name}_${tool.name}`,
|
|
111
|
+
description: `[MCP: ${server.name}] ${tool.description ?? tool.name}`,
|
|
112
|
+
parameters: tool.inputSchema,
|
|
113
|
+
},
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
return tools;
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Parse an MCP tool call name to extract server name and tool name.
|
|
121
|
+
* Format: mcp_<serverName>_<toolName>
|
|
122
|
+
* Server names can contain hyphens but not underscores (by convention).
|
|
123
|
+
*/
|
|
124
|
+
export function parseMCPToolName(fullName) {
|
|
125
|
+
if (!fullName.startsWith("mcp_"))
|
|
126
|
+
return null;
|
|
127
|
+
const rest = fullName.slice(4); // Remove "mcp_"
|
|
128
|
+
// Find the server by matching known connected server names
|
|
129
|
+
for (const server of connectedServers) {
|
|
130
|
+
const prefix = server.name + "_";
|
|
131
|
+
if (rest.startsWith(prefix)) {
|
|
132
|
+
return { serverName: server.name, toolName: rest.slice(prefix.length) };
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
// Fallback: split on first underscore
|
|
136
|
+
const idx = rest.indexOf("_");
|
|
137
|
+
if (idx === -1)
|
|
138
|
+
return null;
|
|
139
|
+
return { serverName: rest.slice(0, idx), toolName: rest.slice(idx + 1) };
|
|
140
|
+
}
|
|
141
|
+
// ── Tool execution ──
|
|
142
|
+
export async function callMCPTool(serverName, toolName, args) {
|
|
143
|
+
const server = connectedServers.find((s) => s.name === serverName);
|
|
144
|
+
if (!server) {
|
|
145
|
+
return `Error: MCP server "${serverName}" not found or not connected.`;
|
|
146
|
+
}
|
|
147
|
+
try {
|
|
148
|
+
const result = await server.client.callTool({ name: toolName, arguments: args });
|
|
149
|
+
// MCP tool results have a content array
|
|
150
|
+
const content = result.content;
|
|
151
|
+
if (Array.isArray(content)) {
|
|
152
|
+
return content
|
|
153
|
+
.map((c) => {
|
|
154
|
+
if (c.type === "text")
|
|
155
|
+
return c.text;
|
|
156
|
+
if (c.type === "image")
|
|
157
|
+
return `[image: ${c.mimeType}]`;
|
|
158
|
+
return JSON.stringify(c);
|
|
159
|
+
})
|
|
160
|
+
.join("\n");
|
|
161
|
+
}
|
|
162
|
+
return typeof content === "string" ? content : JSON.stringify(content);
|
|
163
|
+
}
|
|
164
|
+
catch (err) {
|
|
165
|
+
return `Error calling MCP tool "${toolName}" on server "${serverName}": ${err.message}`;
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
// ── Server management ──
|
|
169
|
+
export function addServer(name, config) {
|
|
170
|
+
try {
|
|
171
|
+
if (!existsSync(GLOBAL_CONFIG_DIR)) {
|
|
172
|
+
mkdirSync(GLOBAL_CONFIG_DIR, { recursive: true });
|
|
173
|
+
}
|
|
174
|
+
let existing = { mcpServers: {} };
|
|
175
|
+
if (existsSync(GLOBAL_CONFIG_PATH)) {
|
|
176
|
+
try {
|
|
177
|
+
existing = JSON.parse(readFileSync(GLOBAL_CONFIG_PATH, "utf-8"));
|
|
178
|
+
if (!existing.mcpServers)
|
|
179
|
+
existing.mcpServers = {};
|
|
180
|
+
}
|
|
181
|
+
catch {
|
|
182
|
+
existing = { mcpServers: {} };
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
existing.mcpServers[name] = config;
|
|
186
|
+
writeFileSync(GLOBAL_CONFIG_PATH, JSON.stringify(existing, null, 2) + "\n", "utf-8");
|
|
187
|
+
return { ok: true, message: `Added MCP server "${name}" to global config.` };
|
|
188
|
+
}
|
|
189
|
+
catch (err) {
|
|
190
|
+
return { ok: false, message: `Failed to add server: ${err.message}` };
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
export function removeServer(name) {
|
|
194
|
+
try {
|
|
195
|
+
if (!existsSync(GLOBAL_CONFIG_PATH)) {
|
|
196
|
+
return { ok: false, message: `No global MCP config found.` };
|
|
197
|
+
}
|
|
198
|
+
const existing = JSON.parse(readFileSync(GLOBAL_CONFIG_PATH, "utf-8"));
|
|
199
|
+
if (!existing.mcpServers || !existing.mcpServers[name]) {
|
|
200
|
+
return { ok: false, message: `Server "${name}" not found in global config.` };
|
|
201
|
+
}
|
|
202
|
+
delete existing.mcpServers[name];
|
|
203
|
+
writeFileSync(GLOBAL_CONFIG_PATH, JSON.stringify(existing, null, 2) + "\n", "utf-8");
|
|
204
|
+
return { ok: true, message: `Removed MCP server "${name}" from global config.` };
|
|
205
|
+
}
|
|
206
|
+
catch (err) {
|
|
207
|
+
return { ok: false, message: `Failed to remove server: ${err.message}` };
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
export function listServers(cwd) {
|
|
211
|
+
const result = [];
|
|
212
|
+
// Gather from global config
|
|
213
|
+
const globalConfig = loadConfigFile(GLOBAL_CONFIG_PATH);
|
|
214
|
+
if (globalConfig) {
|
|
215
|
+
for (const [name, cfg] of Object.entries(globalConfig.mcpServers)) {
|
|
216
|
+
const connected = connectedServers.find((s) => s.name === name);
|
|
217
|
+
result.push({
|
|
218
|
+
name,
|
|
219
|
+
source: "global",
|
|
220
|
+
command: `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim(),
|
|
221
|
+
connected: !!connected,
|
|
222
|
+
toolCount: connected?.tools.length ?? 0,
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
// Gather from project configs
|
|
227
|
+
for (const configPath of getProjectConfigPaths(cwd)) {
|
|
228
|
+
const config = loadConfigFile(configPath);
|
|
229
|
+
if (config) {
|
|
230
|
+
const source = configPath.includes(".cursor") ? "cursor" : configPath.includes("opencode") ? "opencode" : "project";
|
|
231
|
+
for (const [name, cfg] of Object.entries(config.mcpServers)) {
|
|
232
|
+
// Skip if already listed from global (project overrides)
|
|
233
|
+
const existing = result.find((r) => r.name === name);
|
|
234
|
+
if (existing) {
|
|
235
|
+
existing.source = source;
|
|
236
|
+
existing.command = `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim();
|
|
237
|
+
continue;
|
|
238
|
+
}
|
|
239
|
+
const connected = connectedServers.find((s) => s.name === name);
|
|
240
|
+
result.push({
|
|
241
|
+
name,
|
|
242
|
+
source,
|
|
243
|
+
command: `${cfg.command} ${(cfg.args ?? []).join(" ")}`.trim(),
|
|
244
|
+
connected: !!connected,
|
|
245
|
+
toolCount: connected?.tools.length ?? 0,
|
|
246
|
+
});
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
}
|
|
250
|
+
return result;
|
|
251
|
+
}
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import type { HardwareInfo } from "./hardware.js";
|
|
2
|
+
export interface RecommendedModel {
|
|
3
|
+
name: string;
|
|
4
|
+
ollamaId: string;
|
|
5
|
+
size: number;
|
|
6
|
+
ramRequired: number;
|
|
7
|
+
vramOptimal: number;
|
|
8
|
+
description: string;
|
|
9
|
+
speed: string;
|
|
10
|
+
quality: "good" | "great" | "best";
|
|
11
|
+
}
|
|
12
|
+
export type ModelFit = "perfect" | "good" | "tight" | "skip";
|
|
13
|
+
export interface ScoredModel extends RecommendedModel {
|
|
14
|
+
fit: ModelFit;
|
|
15
|
+
}
|
|
16
|
+
export declare function getRecommendations(hardware: HardwareInfo): ScoredModel[];
|
|
17
|
+
export declare function getFitIcon(fit: ModelFit): string;
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
const MODELS = [
|
|
2
|
+
{
|
|
3
|
+
name: "Qwen 2.5 Coder 3B",
|
|
4
|
+
ollamaId: "qwen2.5-coder:3b",
|
|
5
|
+
size: 2,
|
|
6
|
+
ramRequired: 8,
|
|
7
|
+
vramOptimal: 4,
|
|
8
|
+
description: "Lightweight, fast coding model",
|
|
9
|
+
speed: "~60 tok/s on M1",
|
|
10
|
+
quality: "good",
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
name: "Qwen 2.5 Coder 7B",
|
|
14
|
+
ollamaId: "qwen2.5-coder:7b",
|
|
15
|
+
size: 5,
|
|
16
|
+
ramRequired: 16,
|
|
17
|
+
vramOptimal: 8,
|
|
18
|
+
description: "Sweet spot for most machines",
|
|
19
|
+
speed: "~45 tok/s on M1",
|
|
20
|
+
quality: "great",
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
name: "Qwen 2.5 Coder 14B",
|
|
24
|
+
ollamaId: "qwen2.5-coder:14b",
|
|
25
|
+
size: 9,
|
|
26
|
+
ramRequired: 32,
|
|
27
|
+
vramOptimal: 16,
|
|
28
|
+
description: "High quality coding",
|
|
29
|
+
speed: "~25 tok/s on M1 Pro",
|
|
30
|
+
quality: "best",
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
name: "Qwen 2.5 Coder 32B",
|
|
34
|
+
ollamaId: "qwen2.5-coder:32b",
|
|
35
|
+
size: 20,
|
|
36
|
+
ramRequired: 48,
|
|
37
|
+
vramOptimal: 32,
|
|
38
|
+
description: "Premium quality, needs lots of RAM",
|
|
39
|
+
speed: "~12 tok/s on M1 Max",
|
|
40
|
+
quality: "best",
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
name: "DeepSeek Coder V2 16B",
|
|
44
|
+
ollamaId: "deepseek-coder-v2:16b",
|
|
45
|
+
size: 9,
|
|
46
|
+
ramRequired: 32,
|
|
47
|
+
vramOptimal: 16,
|
|
48
|
+
description: "Strong alternative for coding",
|
|
49
|
+
speed: "~30 tok/s on M1 Pro",
|
|
50
|
+
quality: "great",
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: "CodeLlama 7B",
|
|
54
|
+
ollamaId: "codellama:7b",
|
|
55
|
+
size: 4,
|
|
56
|
+
ramRequired: 16,
|
|
57
|
+
vramOptimal: 8,
|
|
58
|
+
description: "Meta's coding model",
|
|
59
|
+
speed: "~40 tok/s on M1",
|
|
60
|
+
quality: "good",
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
name: "StarCoder2 7B",
|
|
64
|
+
ollamaId: "starcoder2:7b",
|
|
65
|
+
size: 4,
|
|
66
|
+
ramRequired: 16,
|
|
67
|
+
vramOptimal: 8,
|
|
68
|
+
description: "Good for code completion",
|
|
69
|
+
speed: "~40 tok/s on M1",
|
|
70
|
+
quality: "good",
|
|
71
|
+
},
|
|
72
|
+
];
|
|
73
|
+
function scoreModel(model, ramGB, vramGB) {
|
|
74
|
+
if (ramGB < model.ramRequired)
|
|
75
|
+
return "skip";
|
|
76
|
+
const ramHeadroom = ramGB - model.ramRequired;
|
|
77
|
+
const hasGoodVRAM = vramGB >= model.vramOptimal;
|
|
78
|
+
if (hasGoodVRAM && ramHeadroom >= 4)
|
|
79
|
+
return "perfect";
|
|
80
|
+
if (hasGoodVRAM || ramHeadroom >= 8)
|
|
81
|
+
return "good";
|
|
82
|
+
if (ramHeadroom >= 0)
|
|
83
|
+
return "tight";
|
|
84
|
+
return "skip";
|
|
85
|
+
}
|
|
86
|
+
const qualityOrder = { best: 3, great: 2, good: 1 };
|
|
87
|
+
const fitOrder = { perfect: 4, good: 3, tight: 2, skip: 1 };
|
|
88
|
+
export function getRecommendations(hardware) {
|
|
89
|
+
const ramGB = hardware.ram / (1024 * 1024 * 1024);
|
|
90
|
+
const vramGB = hardware.gpu?.vram ? hardware.gpu.vram / (1024 * 1024 * 1024) : 0;
|
|
91
|
+
// Apple Silicon uses unified memory — VRAM = RAM
|
|
92
|
+
const effectiveVRAM = hardware.appleSilicon ? ramGB : vramGB;
|
|
93
|
+
const scored = MODELS.map((m) => ({
|
|
94
|
+
...m,
|
|
95
|
+
fit: scoreModel(m, ramGB, effectiveVRAM),
|
|
96
|
+
}));
|
|
97
|
+
// Sort: perfect first, then by quality descending
|
|
98
|
+
scored.sort((a, b) => {
|
|
99
|
+
const fitDiff = (fitOrder[b.fit] ?? 0) - (fitOrder[a.fit] ?? 0);
|
|
100
|
+
if (fitDiff !== 0)
|
|
101
|
+
return fitDiff;
|
|
102
|
+
return (qualityOrder[b.quality] ?? 0) - (qualityOrder[a.quality] ?? 0);
|
|
103
|
+
});
|
|
104
|
+
return scored;
|
|
105
|
+
}
|
|
106
|
+
export function getFitIcon(fit) {
|
|
107
|
+
switch (fit) {
|
|
108
|
+
case "perfect": return "\u2B50"; // ⭐
|
|
109
|
+
case "good": return "\u2705"; // ✅
|
|
110
|
+
case "tight": return "\u26A0\uFE0F"; // ⚠️
|
|
111
|
+
case "skip": return "\u274C"; // ❌
|
|
112
|
+
}
|
|
113
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/** Check if ollama binary exists on PATH */
|
|
2
|
+
export declare function isOllamaInstalled(): boolean;
|
|
3
|
+
/** Check if ollama server is responding */
|
|
4
|
+
export declare function isOllamaRunning(): Promise<boolean>;
|
|
5
|
+
/** Get the install command for the user's OS */
|
|
6
|
+
export declare function getOllamaInstallCommand(os: "macos" | "linux" | "windows"): string;
|
|
7
|
+
/** Start ollama serve in background */
|
|
8
|
+
export declare function startOllama(): void;
|
|
9
|
+
export interface PullProgress {
|
|
10
|
+
status: string;
|
|
11
|
+
total?: number;
|
|
12
|
+
completed?: number;
|
|
13
|
+
percent: number;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Pull a model from Ollama registry.
|
|
17
|
+
* Calls onProgress with download updates.
|
|
18
|
+
* Returns a promise that resolves when complete.
|
|
19
|
+
*/
|
|
20
|
+
export declare function pullModel(modelId: string, onProgress?: (progress: PullProgress) => void): Promise<void>;
|
|
21
|
+
/** List models installed in Ollama */
|
|
22
|
+
export declare function listInstalledModels(): Promise<string[]>;
|