codemaxxing 0.3.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,121 @@
1
+ import { execSync, spawn } from "child_process";
2
+ /** Check if ollama binary exists on PATH */
3
+ export function isOllamaInstalled() {
4
+ try {
5
+ const cmd = process.platform === "win32" ? "where ollama" : "which ollama";
6
+ execSync(cmd, { stdio: ["pipe", "pipe", "pipe"], timeout: 3000 });
7
+ return true;
8
+ }
9
+ catch {
10
+ return false;
11
+ }
12
+ }
13
+ /** Check if ollama server is responding */
14
+ export async function isOllamaRunning() {
15
+ try {
16
+ const controller = new AbortController();
17
+ const timeout = setTimeout(() => controller.abort(), 2000);
18
+ const res = await fetch("http://localhost:11434/api/tags", { signal: controller.signal });
19
+ clearTimeout(timeout);
20
+ return res.ok;
21
+ }
22
+ catch {
23
+ return false;
24
+ }
25
+ }
26
+ /** Get the install command for the user's OS */
27
+ export function getOllamaInstallCommand(os) {
28
+ switch (os) {
29
+ case "macos": return "brew install ollama";
30
+ case "linux": return "curl -fsSL https://ollama.com/install.sh | sh";
31
+ case "windows": return "winget install Ollama.Ollama";
32
+ }
33
+ }
34
+ /** Start ollama serve in background */
35
+ export function startOllama() {
36
+ const child = spawn("ollama", ["serve"], {
37
+ detached: true,
38
+ stdio: "ignore",
39
+ });
40
+ child.unref();
41
+ }
42
+ /**
43
+ * Pull a model from Ollama registry.
44
+ * Calls onProgress with download updates.
45
+ * Returns a promise that resolves when complete.
46
+ */
47
+ export function pullModel(modelId, onProgress) {
48
+ return new Promise((resolve, reject) => {
49
+ const child = spawn("ollama", ["pull", modelId], {
50
+ stdio: ["pipe", "pipe", "pipe"],
51
+ });
52
+ let lastOutput = "";
53
+ const parseLine = (data) => {
54
+ lastOutput = data;
55
+ // Ollama pull output looks like:
56
+ // pulling manifest
57
+ // pulling abc123... 58% ▕██████████░░░░░░░░░░▏ 2.9 GB/5.0 GB
58
+ // verifying sha256 digest
59
+ // writing manifest
60
+ // success
61
+ // Try to parse percentage
62
+ const pctMatch = data.match(/(\d+)%/);
63
+ const sizeMatch = data.match(/([\d.]+)\s*GB\s*\/\s*([\d.]+)\s*GB/);
64
+ if (pctMatch) {
65
+ const percent = parseInt(pctMatch[1]);
66
+ let completed;
67
+ let total;
68
+ if (sizeMatch) {
69
+ completed = parseFloat(sizeMatch[1]) * 1024 * 1024 * 1024;
70
+ total = parseFloat(sizeMatch[2]) * 1024 * 1024 * 1024;
71
+ }
72
+ onProgress?.({ status: "downloading", total, completed, percent });
73
+ }
74
+ else if (data.includes("pulling manifest")) {
75
+ onProgress?.({ status: "pulling manifest", percent: 0 });
76
+ }
77
+ else if (data.includes("verifying")) {
78
+ onProgress?.({ status: "verifying", percent: 100 });
79
+ }
80
+ else if (data.includes("writing manifest")) {
81
+ onProgress?.({ status: "writing manifest", percent: 100 });
82
+ }
83
+ else if (data.includes("success")) {
84
+ onProgress?.({ status: "success", percent: 100 });
85
+ }
86
+ };
87
+ child.stdout?.on("data", (data) => {
88
+ parseLine(data.toString().trim());
89
+ });
90
+ child.stderr?.on("data", (data) => {
91
+ // Ollama writes progress to stderr
92
+ parseLine(data.toString().trim());
93
+ });
94
+ child.on("close", (code) => {
95
+ if (code === 0) {
96
+ resolve();
97
+ }
98
+ else {
99
+ reject(new Error(`ollama pull failed (exit ${code}): ${lastOutput}`));
100
+ }
101
+ });
102
+ child.on("error", (err) => {
103
+ reject(new Error(`Failed to run ollama pull: ${err.message}`));
104
+ });
105
+ });
106
+ }
107
+ /** List models installed in Ollama */
108
+ export async function listInstalledModels() {
109
+ try {
110
+ const controller = new AbortController();
111
+ const timeout = setTimeout(() => controller.abort(), 3000);
112
+ const res = await fetch("http://localhost:11434/api/tags", { signal: controller.signal });
113
+ clearTimeout(timeout);
114
+ if (res.ok) {
115
+ const data = (await res.json());
116
+ return (data.models ?? []).map((m) => m.name);
117
+ }
118
+ }
119
+ catch { /* not running */ }
120
+ return [];
121
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codemaxxing",
3
- "version": "0.3.1",
3
+ "version": "0.4.0",
4
4
  "description": "Open-source terminal coding agent. Connect any LLM. Max your code.",
5
5
  "main": "dist/index.js",
6
6
  "bin": {
package/src/index.tsx CHANGED
@@ -13,6 +13,9 @@ import { getTheme, listThemes, THEMES, DEFAULT_THEME, type Theme } from "./theme
13
13
  import { PROVIDERS, getCredentials, openRouterOAuth, anthropicSetupToken, importCodexToken, importQwenToken, copilotDeviceFlow, saveApiKey } from "./utils/auth.js";
14
14
  import { listInstalledSkills, installSkill, removeSkill, getRegistrySkills, searchRegistry, createSkillScaffold, getActiveSkills, getActiveSkillCount } from "./utils/skills.js";
15
15
  import { listServers, addServer, removeServer, getAllMCPTools, getConnectedServers } from "./utils/mcp.js";
16
+ import { detectHardware, formatBytes, type HardwareInfo } from "./utils/hardware.js";
17
+ import { getRecommendations, getFitIcon, type ScoredModel } from "./utils/models.js";
18
+ import { isOllamaInstalled, isOllamaRunning, getOllamaInstallCommand, startOllama, pullModel, type PullProgress } from "./utils/ollama.js";
16
19
 
17
20
  const VERSION = "0.1.9";
18
21
 
@@ -172,6 +175,16 @@ function App() {
172
175
  resolve: (decision: "yes" | "no" | "always") => void;
173
176
  } | null>(null);
174
177
 
178
+ // ── Setup Wizard State ──
179
+ type WizardScreen = "connection" | "models" | "install-ollama" | "pulling" | null;
180
+ const [wizardScreen, setWizardScreen] = useState<WizardScreen>(null);
181
+ const [wizardIndex, setWizardIndex] = useState(0);
182
+ const [wizardHardware, setWizardHardware] = useState<HardwareInfo | null>(null);
183
+ const [wizardModels, setWizardModels] = useState<ScoredModel[]>([]);
184
+ const [wizardPullProgress, setWizardPullProgress] = useState<PullProgress | null>(null);
185
+ const [wizardPullError, setWizardPullError] = useState<string | null>(null);
186
+ const [wizardSelectedModel, setWizardSelectedModel] = useState<ScoredModel | null>(null);
187
+
175
188
  // Listen for paste events from stdin interceptor
176
189
  useEffect(() => {
177
190
  const handler = ({ content, lines }: { content: string; lines: number }) => {
@@ -210,10 +223,11 @@ function App() {
210
223
  setConnectionInfo([...info]);
211
224
  } else {
212
225
  info.push("✗ No local LLM server found.");
213
- info.push(" /connect — retry after starting LM Studio or Ollama");
214
- info.push(" /login — authenticate with a cloud provider");
215
226
  setConnectionInfo([...info]);
216
227
  setReady(true);
228
+ // Show the setup wizard on first run
229
+ setWizardScreen("connection");
230
+ setWizardIndex(0);
217
231
  return;
218
232
  }
219
233
  } else {
@@ -1141,6 +1155,222 @@ function App() {
1141
1155
  return;
1142
1156
  }
1143
1157
 
1158
+ // ── Setup Wizard Navigation ──
1159
+ if (wizardScreen) {
1160
+ if (wizardScreen === "connection") {
1161
+ const items = ["local", "openrouter", "apikey", "existing"];
1162
+ if (key.upArrow) {
1163
+ setWizardIndex((prev) => (prev - 1 + items.length) % items.length);
1164
+ return;
1165
+ }
1166
+ if (key.downArrow) {
1167
+ setWizardIndex((prev) => (prev + 1) % items.length);
1168
+ return;
1169
+ }
1170
+ if (key.escape) {
1171
+ setWizardScreen(null);
1172
+ return;
1173
+ }
1174
+ if (key.return) {
1175
+ const selected = items[wizardIndex];
1176
+ if (selected === "local") {
1177
+ // Scan hardware and show model picker
1178
+ const hw = detectHardware();
1179
+ setWizardHardware(hw);
1180
+ const recs = getRecommendations(hw).filter(m => m.fit !== "skip");
1181
+ setWizardModels(recs);
1182
+ setWizardScreen("models");
1183
+ setWizardIndex(0);
1184
+ } else if (selected === "openrouter") {
1185
+ setWizardScreen(null);
1186
+ addMsg("info", "Starting OpenRouter OAuth — opening browser...");
1187
+ setLoading(true);
1188
+ setSpinnerMsg("Waiting for authorization...");
1189
+ openRouterOAuth((msg: string) => addMsg("info", msg))
1190
+ .then(() => {
1191
+ addMsg("info", "✅ OpenRouter authenticated! Use /connect to connect.");
1192
+ setLoading(false);
1193
+ })
1194
+ .catch((err: any) => { addMsg("error", `OAuth failed: ${err.message}`); setLoading(false); });
1195
+ } else if (selected === "apikey") {
1196
+ setWizardScreen(null);
1197
+ setLoginPicker(true);
1198
+ setLoginPickerIndex(0);
1199
+ } else if (selected === "existing") {
1200
+ setWizardScreen(null);
1201
+ addMsg("info", "Start your LLM server, then type /connect to retry.");
1202
+ }
1203
+ return;
1204
+ }
1205
+ return;
1206
+ }
1207
+
1208
+ if (wizardScreen === "models") {
1209
+ const models = wizardModels;
1210
+ if (key.upArrow) {
1211
+ setWizardIndex((prev) => (prev - 1 + models.length) % models.length);
1212
+ return;
1213
+ }
1214
+ if (key.downArrow) {
1215
+ setWizardIndex((prev) => (prev + 1) % models.length);
1216
+ return;
1217
+ }
1218
+ if (key.escape) {
1219
+ setWizardScreen("connection");
1220
+ setWizardIndex(0);
1221
+ return;
1222
+ }
1223
+ if (key.return) {
1224
+ const selected = models[wizardIndex];
1225
+ if (selected) {
1226
+ setWizardSelectedModel(selected);
1227
+ // Check if Ollama is installed
1228
+ if (!isOllamaInstalled()) {
1229
+ setWizardScreen("install-ollama");
1230
+ } else {
1231
+ // Start pulling the model
1232
+ setWizardScreen("pulling");
1233
+ setWizardPullProgress({ status: "starting", percent: 0 });
1234
+ setWizardPullError(null);
1235
+
1236
+ (async () => {
1237
+ try {
1238
+ // Ensure ollama is running
1239
+ const running = await isOllamaRunning();
1240
+ if (!running) {
1241
+ setWizardPullProgress({ status: "Starting Ollama server...", percent: 0 });
1242
+ startOllama();
1243
+ // Wait for it to come up
1244
+ for (let i = 0; i < 15; i++) {
1245
+ await new Promise(r => setTimeout(r, 1000));
1246
+ if (await isOllamaRunning()) break;
1247
+ }
1248
+ if (!(await isOllamaRunning())) {
1249
+ setWizardPullError("Could not start Ollama server. Run 'ollama serve' manually, then press Enter.");
1250
+ return;
1251
+ }
1252
+ }
1253
+
1254
+ await pullModel(selected.ollamaId, (p) => {
1255
+ setWizardPullProgress(p);
1256
+ });
1257
+
1258
+ setWizardPullProgress({ status: "success", percent: 100 });
1259
+
1260
+ // Wait briefly then connect
1261
+ await new Promise(r => setTimeout(r, 500));
1262
+ setWizardScreen(null);
1263
+ setWizardPullProgress(null);
1264
+ setWizardSelectedModel(null);
1265
+ addMsg("info", `✅ ${selected.name} installed! Connecting...`);
1266
+ await connectToProvider(true);
1267
+ } catch (err: any) {
1268
+ setWizardPullError(err.message);
1269
+ }
1270
+ })();
1271
+ }
1272
+ }
1273
+ return;
1274
+ }
1275
+ return;
1276
+ }
1277
+
1278
+ if (wizardScreen === "install-ollama") {
1279
+ if (key.escape) {
1280
+ setWizardScreen("models");
1281
+ setWizardIndex(0);
1282
+ return;
1283
+ }
1284
+ if (key.return) {
1285
+ // User says they installed it — check and proceed
1286
+ if (isOllamaInstalled()) {
1287
+ const selected = wizardSelectedModel;
1288
+ if (selected) {
1289
+ setWizardScreen("pulling");
1290
+ setWizardPullProgress({ status: "starting", percent: 0 });
1291
+ setWizardPullError(null);
1292
+
1293
+ (async () => {
1294
+ try {
1295
+ const running = await isOllamaRunning();
1296
+ if (!running) {
1297
+ setWizardPullProgress({ status: "Starting Ollama server...", percent: 0 });
1298
+ startOllama();
1299
+ for (let i = 0; i < 15; i++) {
1300
+ await new Promise(r => setTimeout(r, 1000));
1301
+ if (await isOllamaRunning()) break;
1302
+ }
1303
+ if (!(await isOllamaRunning())) {
1304
+ setWizardPullError("Could not start Ollama server. Run 'ollama serve' manually, then press Enter.");
1305
+ return;
1306
+ }
1307
+ }
1308
+ await pullModel(selected.ollamaId, (p) => setWizardPullProgress(p));
1309
+ setWizardPullProgress({ status: "success", percent: 100 });
1310
+ await new Promise(r => setTimeout(r, 500));
1311
+ setWizardScreen(null);
1312
+ setWizardPullProgress(null);
1313
+ setWizardSelectedModel(null);
1314
+ addMsg("info", `✅ ${selected.name} installed! Connecting...`);
1315
+ await connectToProvider(true);
1316
+ } catch (err: any) {
1317
+ setWizardPullError(err.message);
1318
+ }
1319
+ })();
1320
+ }
1321
+ } else {
1322
+ addMsg("info", "Ollama not found yet. Install it and press Enter again.");
1323
+ }
1324
+ return;
1325
+ }
1326
+ return;
1327
+ }
1328
+
1329
+ if (wizardScreen === "pulling") {
1330
+ // Allow retry on error
1331
+ if (wizardPullError && key.return) {
1332
+ const selected = wizardSelectedModel;
1333
+ if (selected) {
1334
+ setWizardPullError(null);
1335
+ setWizardPullProgress({ status: "retrying", percent: 0 });
1336
+ (async () => {
1337
+ try {
1338
+ const running = await isOllamaRunning();
1339
+ if (!running) {
1340
+ startOllama();
1341
+ for (let i = 0; i < 15; i++) {
1342
+ await new Promise(r => setTimeout(r, 1000));
1343
+ if (await isOllamaRunning()) break;
1344
+ }
1345
+ }
1346
+ await pullModel(selected.ollamaId, (p) => setWizardPullProgress(p));
1347
+ setWizardPullProgress({ status: "success", percent: 100 });
1348
+ await new Promise(r => setTimeout(r, 500));
1349
+ setWizardScreen(null);
1350
+ setWizardPullProgress(null);
1351
+ setWizardSelectedModel(null);
1352
+ addMsg("info", `✅ ${selected.name} installed! Connecting...`);
1353
+ await connectToProvider(true);
1354
+ } catch (err: any) {
1355
+ setWizardPullError(err.message);
1356
+ }
1357
+ })();
1358
+ }
1359
+ return;
1360
+ }
1361
+ if (wizardPullError && key.escape) {
1362
+ setWizardScreen("models");
1363
+ setWizardIndex(0);
1364
+ setWizardPullError(null);
1365
+ setWizardPullProgress(null);
1366
+ return;
1367
+ }
1368
+ return; // Ignore keys while pulling
1369
+ }
1370
+
1371
+ return;
1372
+ }
1373
+
1144
1374
  // Theme picker navigation
1145
1375
  if (themePicker) {
1146
1376
  const themeKeys = listThemes();
@@ -1602,6 +1832,101 @@ function App() {
1602
1832
  </Box>
1603
1833
  )}
1604
1834
 
1835
+ {/* ═══ SETUP WIZARD ═══ */}
1836
+ {wizardScreen === "connection" && (
1837
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.border} paddingX={1} marginBottom={0}>
1838
+ <Text bold color={theme.colors.secondary}>No LLM detected. How do you want to connect?</Text>
1839
+ <Text>{""}</Text>
1840
+ {[
1841
+ { key: "local", icon: "\uD83D\uDDA5\uFE0F", label: "Set up a local model", desc: "free, runs on your machine" },
1842
+ { key: "openrouter", icon: "\uD83C\uDF10", label: "OpenRouter", desc: "200+ cloud models, browser login" },
1843
+ { key: "apikey", icon: "\uD83D\uDD11", label: "Enter API key manually", desc: "" },
1844
+ { key: "existing", icon: "\u2699\uFE0F", label: "I already have a server running", desc: "" },
1845
+ ].map((item, i) => (
1846
+ <Text key={item.key}>
1847
+ {i === wizardIndex ? <Text color={theme.colors.suggestion} bold>{" \u25B8 "}</Text> : <Text>{" "}</Text>}
1848
+ <Text color={i === wizardIndex ? theme.colors.suggestion : theme.colors.primary} bold>{item.icon} {item.label}</Text>
1849
+ {item.desc ? <Text color={theme.colors.muted}>{" ("}{item.desc}{")"}</Text> : null}
1850
+ </Text>
1851
+ ))}
1852
+ <Text>{""}</Text>
1853
+ <Text dimColor>{" \u2191\u2193 navigate \u00B7 Enter to select"}</Text>
1854
+ </Box>
1855
+ )}
1856
+
1857
+ {wizardScreen === "models" && wizardHardware && (
1858
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.border} paddingX={1} marginBottom={0}>
1859
+ <Text bold color={theme.colors.secondary}>Your hardware:</Text>
1860
+ <Text color={theme.colors.muted}>{" CPU: "}{wizardHardware.cpu.name}{" ("}{wizardHardware.cpu.cores}{" cores)"}</Text>
1861
+ <Text color={theme.colors.muted}>{" RAM: "}{formatBytes(wizardHardware.ram)}</Text>
1862
+ {wizardHardware.gpu ? (
1863
+ <Text color={theme.colors.muted}>{" GPU: "}{wizardHardware.gpu.name}{wizardHardware.gpu.vram > 0 ? ` (${formatBytes(wizardHardware.gpu.vram)})` : ""}</Text>
1864
+ ) : (
1865
+ <Text color={theme.colors.muted}>{" GPU: none detected"}</Text>
1866
+ )}
1867
+ <Text>{""}</Text>
1868
+ <Text bold color={theme.colors.secondary}>Recommended models:</Text>
1869
+ <Text>{""}</Text>
1870
+ {wizardModels.map((m, i) => (
1871
+ <Text key={m.ollamaId}>
1872
+ {i === wizardIndex ? <Text color={theme.colors.suggestion} bold>{" \u25B8 "}</Text> : <Text>{" "}</Text>}
1873
+ <Text>{getFitIcon(m.fit)} </Text>
1874
+ <Text color={i === wizardIndex ? theme.colors.suggestion : theme.colors.primary} bold>{m.name}</Text>
1875
+ <Text color={theme.colors.muted}>{" ~"}{m.size}{" GB \u00B7 "}{m.quality === "best" ? "Best" : m.quality === "great" ? "Great" : "Good"}{" quality \u00B7 "}{m.speed}</Text>
1876
+ </Text>
1877
+ ))}
1878
+ {wizardModels.length === 0 && (
1879
+ <Text color={theme.colors.error}>{" No suitable models found for your hardware."}</Text>
1880
+ )}
1881
+ <Text>{""}</Text>
1882
+ <Text dimColor>{" \u2191\u2193 navigate \u00B7 Enter to install \u00B7 Esc back"}</Text>
1883
+ </Box>
1884
+ )}
1885
+
1886
+ {wizardScreen === "install-ollama" && (
1887
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.warning} paddingX={1} marginBottom={0}>
1888
+ <Text bold color={theme.colors.warning}>Ollama is required for local models.</Text>
1889
+ <Text>{""}</Text>
1890
+ <Text color={theme.colors.primary}>{" Install with: "}<Text bold>{getOllamaInstallCommand(wizardHardware?.os ?? "linux")}</Text></Text>
1891
+ <Text>{""}</Text>
1892
+ <Text dimColor>{" Run the command above, then press Enter to continue..."}</Text>
1893
+ <Text dimColor>{" Esc to go back"}</Text>
1894
+ </Box>
1895
+ )}
1896
+
1897
+ {wizardScreen === "pulling" && wizardSelectedModel && (
1898
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.border} paddingX={1} marginBottom={0}>
1899
+ {wizardPullError ? (
1900
+ <>
1901
+ <Text color={theme.colors.error} bold>{" \u274C Error: "}{wizardPullError}</Text>
1902
+ <Text>{""}</Text>
1903
+ <Text dimColor>{" Press Enter to retry \u00B7 Esc to go back"}</Text>
1904
+ </>
1905
+ ) : wizardPullProgress ? (
1906
+ <>
1907
+ <Text bold color={theme.colors.secondary}>{" Downloading "}{wizardSelectedModel.name}{"..."}</Text>
1908
+ {wizardPullProgress.status === "downloading" || wizardPullProgress.percent > 0 ? (
1909
+ <>
1910
+ <Text>
1911
+ {" "}
1912
+ <Text color={theme.colors.primary}>
1913
+ {"\u2588".repeat(Math.floor(wizardPullProgress.percent / 5))}
1914
+ {"\u2591".repeat(20 - Math.floor(wizardPullProgress.percent / 5))}
1915
+ </Text>
1916
+ {" "}<Text bold>{wizardPullProgress.percent}%</Text>
1917
+ {wizardPullProgress.completed != null && wizardPullProgress.total != null ? (
1918
+ <Text color={theme.colors.muted}>{" \u00B7 "}{formatBytes(wizardPullProgress.completed)}{" / "}{formatBytes(wizardPullProgress.total)}</Text>
1919
+ ) : null}
1920
+ </Text>
1921
+ </>
1922
+ ) : (
1923
+ <Text color={theme.colors.muted}>{" "}{wizardPullProgress.status}...</Text>
1924
+ )}
1925
+ </>
1926
+ ) : null}
1927
+ </Box>
1928
+ )}
1929
+
1605
1930
  {/* ═══ COMMAND SUGGESTIONS ═══ */}
1606
1931
  {showSuggestions && (
1607
1932
  <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.muted} paddingX={1} marginBottom={0}>
@@ -0,0 +1,131 @@
1
+ import os from "os";
2
+ import { execSync } from "child_process";
3
+
4
+ export interface HardwareInfo {
5
+ cpu: { name: string; cores: number; speed: number };
6
+ ram: number; // bytes
7
+ gpu: { name: string; vram: number } | null; // vram in bytes, null if no GPU
8
+ os: "macos" | "linux" | "windows";
9
+ appleSilicon: boolean;
10
+ }
11
+
12
+ function getOS(): "macos" | "linux" | "windows" {
13
+ switch (process.platform) {
14
+ case "darwin": return "macos";
15
+ case "win32": return "windows";
16
+ default: return "linux";
17
+ }
18
+ }
19
+
20
+ function getCPU(): { name: string; cores: number; speed: number } {
21
+ const cpus = os.cpus();
22
+ return {
23
+ name: cpus[0]?.model?.trim() ?? "Unknown CPU",
24
+ cores: cpus.length,
25
+ speed: cpus[0]?.speed ?? 0, // MHz
26
+ };
27
+ }
28
+
29
+ function getGPU(platform: "macos" | "linux" | "windows"): { name: string; vram: number } | null {
30
+ try {
31
+ if (platform === "macos") {
32
+ const raw = execSync("system_profiler SPDisplaysDataType -json", {
33
+ encoding: "utf-8",
34
+ timeout: 5000,
35
+ stdio: ["pipe", "pipe", "pipe"],
36
+ });
37
+ const data = JSON.parse(raw);
38
+ const displays = data?.SPDisplaysDataType;
39
+ if (Array.isArray(displays) && displays.length > 0) {
40
+ const gpu = displays[0];
41
+ const name: string = gpu.sppci_model ?? gpu._name ?? "Unknown GPU";
42
+ // On Apple Silicon, VRAM is shared (unified memory) — report total RAM
43
+ const vramStr: string = gpu["spdisplays_vram"] ?? gpu["spdisplays_vram_shared"] ?? "";
44
+ let vram = 0;
45
+ if (vramStr) {
46
+ const match = vramStr.match(/(\d+)\s*(GB|MB)/i);
47
+ if (match) {
48
+ vram = parseInt(match[1]) * (match[2].toUpperCase() === "GB" ? 1024 * 1024 * 1024 : 1024 * 1024);
49
+ }
50
+ }
51
+ // Apple Silicon unified memory — use total RAM as VRAM
52
+ if (vram === 0 && name.toLowerCase().includes("apple")) {
53
+ vram = os.totalmem();
54
+ }
55
+ return { name, vram };
56
+ }
57
+ }
58
+
59
+ if (platform === "linux") {
60
+ // Try NVIDIA first
61
+ try {
62
+ const raw = execSync("nvidia-smi --query-gpu=name,memory.total --format=csv,noheader", {
63
+ encoding: "utf-8",
64
+ timeout: 5000,
65
+ stdio: ["pipe", "pipe", "pipe"],
66
+ });
67
+ const line = raw.trim().split("\n")[0];
68
+ if (line) {
69
+ const parts = line.split(",").map(s => s.trim());
70
+ const name = parts[0] ?? "NVIDIA GPU";
71
+ const memMatch = (parts[1] ?? "").match(/(\d+)/);
72
+ const vram = memMatch ? parseInt(memMatch[1]) * 1024 * 1024 : 0; // MiB to bytes
73
+ return { name, vram };
74
+ }
75
+ } catch {
76
+ // No NVIDIA, try lspci
77
+ try {
78
+ const raw = execSync("lspci | grep -i vga", {
79
+ encoding: "utf-8",
80
+ timeout: 5000,
81
+ stdio: ["pipe", "pipe", "pipe"],
82
+ });
83
+ const line = raw.trim().split("\n")[0];
84
+ if (line) {
85
+ const name = line.split(":").slice(2).join(":").trim() || "Unknown GPU";
86
+ return { name, vram: 0 };
87
+ }
88
+ } catch { /* no lspci */ }
89
+ }
90
+ }
91
+
92
+ if (platform === "windows") {
93
+ try {
94
+ const raw = execSync("wmic path win32_VideoController get Name,AdapterRAM /format:csv", {
95
+ encoding: "utf-8",
96
+ timeout: 5000,
97
+ stdio: ["pipe", "pipe", "pipe"],
98
+ });
99
+ const lines = raw.trim().split("\n").filter(l => l.trim() && !l.startsWith("Node"));
100
+ if (lines.length > 0) {
101
+ const parts = lines[0].split(",");
102
+ const adapterRAM = parseInt(parts[1] ?? "0");
103
+ const name = parts[2]?.trim() ?? "Unknown GPU";
104
+ return { name, vram: isNaN(adapterRAM) ? 0 : adapterRAM };
105
+ }
106
+ } catch { /* no wmic */ }
107
+ }
108
+ } catch {
109
+ // GPU detection failed
110
+ }
111
+ return null;
112
+ }
113
+
114
+ export function detectHardware(): HardwareInfo {
115
+ const platform = getOS();
116
+ const cpu = getCPU();
117
+ const ram = os.totalmem();
118
+ const gpu = getGPU(platform);
119
+
120
+ // Detect Apple Silicon
121
+ const appleSilicon = platform === "macos" && /apple\s+m/i.test(cpu.name);
122
+
123
+ return { cpu, ram, gpu, os: platform, appleSilicon };
124
+ }
125
+
126
+ /** Format bytes to human-readable string */
127
+ export function formatBytes(bytes: number): string {
128
+ if (bytes >= 1024 * 1024 * 1024) return `${Math.round(bytes / (1024 * 1024 * 1024))} GB`;
129
+ if (bytes >= 1024 * 1024) return `${Math.round(bytes / (1024 * 1024))} MB`;
130
+ return `${Math.round(bytes / 1024)} KB`;
131
+ }