codemaxxing 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.tsx CHANGED
@@ -12,6 +12,10 @@ import { isGitRepo, getBranch, getStatus, getDiff, undoLastCommit } from "./util
12
12
  import { getTheme, listThemes, THEMES, DEFAULT_THEME, type Theme } from "./themes.js";
13
13
  import { PROVIDERS, getCredentials, openRouterOAuth, anthropicSetupToken, importCodexToken, importQwenToken, copilotDeviceFlow, saveApiKey } from "./utils/auth.js";
14
14
  import { listInstalledSkills, installSkill, removeSkill, getRegistrySkills, searchRegistry, createSkillScaffold, getActiveSkills, getActiveSkillCount } from "./utils/skills.js";
15
+ import { listServers, addServer, removeServer, getAllMCPTools, getConnectedServers } from "./utils/mcp.js";
16
+ import { detectHardware, formatBytes, type HardwareInfo } from "./utils/hardware.js";
17
+ import { getRecommendations, getFitIcon, type ScoredModel } from "./utils/models.js";
18
+ import { isOllamaInstalled, isOllamaRunning, getOllamaInstallCommand, startOllama, pullModel, type PullProgress } from "./utils/ollama.js";
15
19
 
16
20
  const VERSION = "0.1.9";
17
21
 
@@ -58,6 +62,11 @@ const SLASH_COMMANDS = [
58
62
  { cmd: "/lint", desc: "show auto-lint status" },
59
63
  { cmd: "/lint on", desc: "enable auto-lint" },
60
64
  { cmd: "/lint off", desc: "disable auto-lint" },
65
+ { cmd: "/mcp", desc: "show MCP servers" },
66
+ { cmd: "/mcp tools", desc: "list MCP tools" },
67
+ { cmd: "/mcp add", desc: "add MCP server" },
68
+ { cmd: "/mcp remove", desc: "remove MCP server" },
69
+ { cmd: "/mcp reconnect", desc: "reconnect MCP servers" },
61
70
  { cmd: "/quit", desc: "exit" },
62
71
  ];
63
72
 
@@ -166,6 +175,16 @@ function App() {
166
175
  resolve: (decision: "yes" | "no" | "always") => void;
167
176
  } | null>(null);
168
177
 
178
+ // ── Setup Wizard State ──
179
+ type WizardScreen = "connection" | "models" | "install-ollama" | "pulling" | null;
180
+ const [wizardScreen, setWizardScreen] = useState<WizardScreen>(null);
181
+ const [wizardIndex, setWizardIndex] = useState(0);
182
+ const [wizardHardware, setWizardHardware] = useState<HardwareInfo | null>(null);
183
+ const [wizardModels, setWizardModels] = useState<ScoredModel[]>([]);
184
+ const [wizardPullProgress, setWizardPullProgress] = useState<PullProgress | null>(null);
185
+ const [wizardPullError, setWizardPullError] = useState<string | null>(null);
186
+ const [wizardSelectedModel, setWizardSelectedModel] = useState<ScoredModel | null>(null);
187
+
169
188
  // Listen for paste events from stdin interceptor
170
189
  useEffect(() => {
171
190
  const handler = ({ content, lines }: { content: string; lines: number }) => {
@@ -204,10 +223,11 @@ function App() {
204
223
  setConnectionInfo([...info]);
205
224
  } else {
206
225
  info.push("✗ No local LLM server found.");
207
- info.push(" /connect — retry after starting LM Studio or Ollama");
208
- info.push(" /login — authenticate with a cloud provider");
209
226
  setConnectionInfo([...info]);
210
227
  setReady(true);
228
+ // Show the setup wizard on first run
229
+ setWizardScreen("connection");
230
+ setWizardIndex(0);
211
231
  return;
212
232
  }
213
233
  } else {
@@ -287,6 +307,9 @@ function App() {
287
307
  onLintResult: (file, errors) => {
288
308
  addMsg("info", `🔍 Lint errors in ${file}:\n${errors}`);
289
309
  },
310
+ onMCPStatus: (server, status) => {
311
+ addMsg("info", `🔌 MCP ${server}: ${status}`);
312
+ },
290
313
  contextCompressionThreshold: config.defaults.contextCompressionThreshold,
291
314
  onToolApproval: (name, args, diff) => {
292
315
  return new Promise((resolve) => {
@@ -306,6 +329,13 @@ function App() {
306
329
  setConnectionInfo([...info]);
307
330
  }
308
331
 
332
+ // Show MCP server count
333
+ const mcpCount = a.getMCPServerCount();
334
+ if (mcpCount > 0) {
335
+ info.push(`🔌 ${mcpCount} MCP server${mcpCount > 1 ? "s" : ""} connected`);
336
+ setConnectionInfo([...info]);
337
+ }
338
+
309
339
  setAgent(a);
310
340
  setModelName(provider.model);
311
341
  providerRef.current = { baseUrl: provider.baseUrl, apiKey: provider.apiKey };
@@ -419,6 +449,11 @@ function App() {
419
449
  " /lint — show auto-lint status & detected linter",
420
450
  " /lint on — enable auto-lint",
421
451
  " /lint off — disable auto-lint",
452
+ " /mcp — show MCP servers & status",
453
+ " /mcp tools — list all MCP tools",
454
+ " /mcp add — add MCP server to global config",
455
+ " /mcp remove — remove MCP server",
456
+ " /mcp reconnect — reconnect all MCP servers",
422
457
  " /quit — exit",
423
458
  ].join("\n"));
424
459
  return;
@@ -579,6 +614,71 @@ function App() {
579
614
  return;
580
615
  }
581
616
 
617
+ // ── MCP commands (partially work without agent) ──
618
+ if (trimmed === "/mcp" || trimmed === "/mcp list") {
619
+ const servers = listServers(process.cwd());
620
+ if (servers.length === 0) {
621
+ addMsg("info", "🔌 No MCP servers configured.\n Add one: /mcp add <name> <command> [args...]");
622
+ } else {
623
+ const lines = servers.map((s) => {
624
+ const status = s.connected ? `✔ connected (${s.toolCount} tools)` : "✗ not connected";
625
+ return ` ${s.connected ? "●" : "○"} ${s.name} [${s.source}] — ${s.command}\n ${status}`;
626
+ });
627
+ addMsg("info", `🔌 MCP Servers:\n${lines.join("\n")}`);
628
+ }
629
+ return;
630
+ }
631
+ if (trimmed === "/mcp tools") {
632
+ const servers = getConnectedServers();
633
+ if (servers.length === 0) {
634
+ addMsg("info", "🔌 No MCP servers connected.");
635
+ return;
636
+ }
637
+ const lines: string[] = [];
638
+ for (const server of servers) {
639
+ lines.push(`${server.name} (${server.tools.length} tools):`);
640
+ for (const tool of server.tools) {
641
+ lines.push(` • ${tool.name} — ${tool.description ?? "(no description)"}`);
642
+ }
643
+ }
644
+ addMsg("info", `🔌 MCP Tools:\n${lines.join("\n")}`);
645
+ return;
646
+ }
647
+ if (trimmed.startsWith("/mcp add ")) {
648
+ const parts = trimmed.replace("/mcp add ", "").trim().split(/\s+/);
649
+ if (parts.length < 2) {
650
+ addMsg("info", "Usage: /mcp add <name> <command> [args...]\n Example: /mcp add github npx -y @modelcontextprotocol/server-github");
651
+ return;
652
+ }
653
+ const [name, command, ...cmdArgs] = parts;
654
+ const result = addServer(name, { command, args: cmdArgs.length > 0 ? cmdArgs : undefined });
655
+ addMsg(result.ok ? "info" : "error", result.ok ? `✅ ${result.message}` : `✗ ${result.message}`);
656
+ return;
657
+ }
658
+ if (trimmed.startsWith("/mcp remove ")) {
659
+ const name = trimmed.replace("/mcp remove ", "").trim();
660
+ if (!name) {
661
+ addMsg("info", "Usage: /mcp remove <name>");
662
+ return;
663
+ }
664
+ const result = removeServer(name);
665
+ addMsg(result.ok ? "info" : "error", result.ok ? `✅ ${result.message}` : `✗ ${result.message}`);
666
+ return;
667
+ }
668
+ if (trimmed === "/mcp reconnect") {
669
+ if (!agent) {
670
+ addMsg("info", "⚠ No agent connected. Connect first.");
671
+ return;
672
+ }
673
+ addMsg("info", "🔌 Reconnecting MCP servers...");
674
+ await agent.reconnectMCP();
675
+ const count = agent.getMCPServerCount();
676
+ addMsg("info", count > 0
677
+ ? `✅ ${count} MCP server${count > 1 ? "s" : ""} reconnected.`
678
+ : "No MCP servers connected.");
679
+ return;
680
+ }
681
+
582
682
  // Commands below require an active LLM connection
583
683
  if (!agent) {
584
684
  addMsg("info", "⚠ No LLM connected. Use /login to authenticate with a provider, or start a local server.");
@@ -1055,6 +1155,222 @@ function App() {
1055
1155
  return;
1056
1156
  }
1057
1157
 
1158
+ // ── Setup Wizard Navigation ──
1159
+ if (wizardScreen) {
1160
+ if (wizardScreen === "connection") {
1161
+ const items = ["local", "openrouter", "apikey", "existing"];
1162
+ if (key.upArrow) {
1163
+ setWizardIndex((prev) => (prev - 1 + items.length) % items.length);
1164
+ return;
1165
+ }
1166
+ if (key.downArrow) {
1167
+ setWizardIndex((prev) => (prev + 1) % items.length);
1168
+ return;
1169
+ }
1170
+ if (key.escape) {
1171
+ setWizardScreen(null);
1172
+ return;
1173
+ }
1174
+ if (key.return) {
1175
+ const selected = items[wizardIndex];
1176
+ if (selected === "local") {
1177
+ // Scan hardware and show model picker
1178
+ const hw = detectHardware();
1179
+ setWizardHardware(hw);
1180
+ const recs = getRecommendations(hw).filter(m => m.fit !== "skip");
1181
+ setWizardModels(recs);
1182
+ setWizardScreen("models");
1183
+ setWizardIndex(0);
1184
+ } else if (selected === "openrouter") {
1185
+ setWizardScreen(null);
1186
+ addMsg("info", "Starting OpenRouter OAuth — opening browser...");
1187
+ setLoading(true);
1188
+ setSpinnerMsg("Waiting for authorization...");
1189
+ openRouterOAuth((msg: string) => addMsg("info", msg))
1190
+ .then(() => {
1191
+ addMsg("info", "✅ OpenRouter authenticated! Use /connect to connect.");
1192
+ setLoading(false);
1193
+ })
1194
+ .catch((err: any) => { addMsg("error", `OAuth failed: ${err.message}`); setLoading(false); });
1195
+ } else if (selected === "apikey") {
1196
+ setWizardScreen(null);
1197
+ setLoginPicker(true);
1198
+ setLoginPickerIndex(0);
1199
+ } else if (selected === "existing") {
1200
+ setWizardScreen(null);
1201
+ addMsg("info", "Start your LLM server, then type /connect to retry.");
1202
+ }
1203
+ return;
1204
+ }
1205
+ return;
1206
+ }
1207
+
1208
+ if (wizardScreen === "models") {
1209
+ const models = wizardModels;
1210
+ if (key.upArrow) {
1211
+ setWizardIndex((prev) => (prev - 1 + models.length) % models.length);
1212
+ return;
1213
+ }
1214
+ if (key.downArrow) {
1215
+ setWizardIndex((prev) => (prev + 1) % models.length);
1216
+ return;
1217
+ }
1218
+ if (key.escape) {
1219
+ setWizardScreen("connection");
1220
+ setWizardIndex(0);
1221
+ return;
1222
+ }
1223
+ if (key.return) {
1224
+ const selected = models[wizardIndex];
1225
+ if (selected) {
1226
+ setWizardSelectedModel(selected);
1227
+ // Check if Ollama is installed
1228
+ if (!isOllamaInstalled()) {
1229
+ setWizardScreen("install-ollama");
1230
+ } else {
1231
+ // Start pulling the model
1232
+ setWizardScreen("pulling");
1233
+ setWizardPullProgress({ status: "starting", percent: 0 });
1234
+ setWizardPullError(null);
1235
+
1236
+ (async () => {
1237
+ try {
1238
+ // Ensure ollama is running
1239
+ const running = await isOllamaRunning();
1240
+ if (!running) {
1241
+ setWizardPullProgress({ status: "Starting Ollama server...", percent: 0 });
1242
+ startOllama();
1243
+ // Wait for it to come up
1244
+ for (let i = 0; i < 15; i++) {
1245
+ await new Promise(r => setTimeout(r, 1000));
1246
+ if (await isOllamaRunning()) break;
1247
+ }
1248
+ if (!(await isOllamaRunning())) {
1249
+ setWizardPullError("Could not start Ollama server. Run 'ollama serve' manually, then press Enter.");
1250
+ return;
1251
+ }
1252
+ }
1253
+
1254
+ await pullModel(selected.ollamaId, (p) => {
1255
+ setWizardPullProgress(p);
1256
+ });
1257
+
1258
+ setWizardPullProgress({ status: "success", percent: 100 });
1259
+
1260
+ // Wait briefly then connect
1261
+ await new Promise(r => setTimeout(r, 500));
1262
+ setWizardScreen(null);
1263
+ setWizardPullProgress(null);
1264
+ setWizardSelectedModel(null);
1265
+ addMsg("info", `✅ ${selected.name} installed! Connecting...`);
1266
+ await connectToProvider(true);
1267
+ } catch (err: any) {
1268
+ setWizardPullError(err.message);
1269
+ }
1270
+ })();
1271
+ }
1272
+ }
1273
+ return;
1274
+ }
1275
+ return;
1276
+ }
1277
+
1278
+ if (wizardScreen === "install-ollama") {
1279
+ if (key.escape) {
1280
+ setWizardScreen("models");
1281
+ setWizardIndex(0);
1282
+ return;
1283
+ }
1284
+ if (key.return) {
1285
+ // User says they installed it — check and proceed
1286
+ if (isOllamaInstalled()) {
1287
+ const selected = wizardSelectedModel;
1288
+ if (selected) {
1289
+ setWizardScreen("pulling");
1290
+ setWizardPullProgress({ status: "starting", percent: 0 });
1291
+ setWizardPullError(null);
1292
+
1293
+ (async () => {
1294
+ try {
1295
+ const running = await isOllamaRunning();
1296
+ if (!running) {
1297
+ setWizardPullProgress({ status: "Starting Ollama server...", percent: 0 });
1298
+ startOllama();
1299
+ for (let i = 0; i < 15; i++) {
1300
+ await new Promise(r => setTimeout(r, 1000));
1301
+ if (await isOllamaRunning()) break;
1302
+ }
1303
+ if (!(await isOllamaRunning())) {
1304
+ setWizardPullError("Could not start Ollama server. Run 'ollama serve' manually, then press Enter.");
1305
+ return;
1306
+ }
1307
+ }
1308
+ await pullModel(selected.ollamaId, (p) => setWizardPullProgress(p));
1309
+ setWizardPullProgress({ status: "success", percent: 100 });
1310
+ await new Promise(r => setTimeout(r, 500));
1311
+ setWizardScreen(null);
1312
+ setWizardPullProgress(null);
1313
+ setWizardSelectedModel(null);
1314
+ addMsg("info", `✅ ${selected.name} installed! Connecting...`);
1315
+ await connectToProvider(true);
1316
+ } catch (err: any) {
1317
+ setWizardPullError(err.message);
1318
+ }
1319
+ })();
1320
+ }
1321
+ } else {
1322
+ addMsg("info", "Ollama not found yet. Install it and press Enter again.");
1323
+ }
1324
+ return;
1325
+ }
1326
+ return;
1327
+ }
1328
+
1329
+ if (wizardScreen === "pulling") {
1330
+ // Allow retry on error
1331
+ if (wizardPullError && key.return) {
1332
+ const selected = wizardSelectedModel;
1333
+ if (selected) {
1334
+ setWizardPullError(null);
1335
+ setWizardPullProgress({ status: "retrying", percent: 0 });
1336
+ (async () => {
1337
+ try {
1338
+ const running = await isOllamaRunning();
1339
+ if (!running) {
1340
+ startOllama();
1341
+ for (let i = 0; i < 15; i++) {
1342
+ await new Promise(r => setTimeout(r, 1000));
1343
+ if (await isOllamaRunning()) break;
1344
+ }
1345
+ }
1346
+ await pullModel(selected.ollamaId, (p) => setWizardPullProgress(p));
1347
+ setWizardPullProgress({ status: "success", percent: 100 });
1348
+ await new Promise(r => setTimeout(r, 500));
1349
+ setWizardScreen(null);
1350
+ setWizardPullProgress(null);
1351
+ setWizardSelectedModel(null);
1352
+ addMsg("info", `✅ ${selected.name} installed! Connecting...`);
1353
+ await connectToProvider(true);
1354
+ } catch (err: any) {
1355
+ setWizardPullError(err.message);
1356
+ }
1357
+ })();
1358
+ }
1359
+ return;
1360
+ }
1361
+ if (wizardPullError && key.escape) {
1362
+ setWizardScreen("models");
1363
+ setWizardIndex(0);
1364
+ setWizardPullError(null);
1365
+ setWizardPullProgress(null);
1366
+ return;
1367
+ }
1368
+ return; // Ignore keys while pulling
1369
+ }
1370
+
1371
+ return;
1372
+ }
1373
+
1058
1374
  // Theme picker navigation
1059
1375
  if (themePicker) {
1060
1376
  const themeKeys = listThemes();
@@ -1516,6 +1832,101 @@ function App() {
1516
1832
  </Box>
1517
1833
  )}
1518
1834
 
1835
+ {/* ═══ SETUP WIZARD ═══ */}
1836
+ {wizardScreen === "connection" && (
1837
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.border} paddingX={1} marginBottom={0}>
1838
+ <Text bold color={theme.colors.secondary}>No LLM detected. How do you want to connect?</Text>
1839
+ <Text>{""}</Text>
1840
+ {[
1841
+ { key: "local", icon: "\uD83D\uDDA5\uFE0F", label: "Set up a local model", desc: "free, runs on your machine" },
1842
+ { key: "openrouter", icon: "\uD83C\uDF10", label: "OpenRouter", desc: "200+ cloud models, browser login" },
1843
+ { key: "apikey", icon: "\uD83D\uDD11", label: "Enter API key manually", desc: "" },
1844
+ { key: "existing", icon: "\u2699\uFE0F", label: "I already have a server running", desc: "" },
1845
+ ].map((item, i) => (
1846
+ <Text key={item.key}>
1847
+ {i === wizardIndex ? <Text color={theme.colors.suggestion} bold>{" \u25B8 "}</Text> : <Text>{" "}</Text>}
1848
+ <Text color={i === wizardIndex ? theme.colors.suggestion : theme.colors.primary} bold>{item.icon} {item.label}</Text>
1849
+ {item.desc ? <Text color={theme.colors.muted}>{" ("}{item.desc}{")"}</Text> : null}
1850
+ </Text>
1851
+ ))}
1852
+ <Text>{""}</Text>
1853
+ <Text dimColor>{" \u2191\u2193 navigate \u00B7 Enter to select"}</Text>
1854
+ </Box>
1855
+ )}
1856
+
1857
+ {wizardScreen === "models" && wizardHardware && (
1858
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.border} paddingX={1} marginBottom={0}>
1859
+ <Text bold color={theme.colors.secondary}>Your hardware:</Text>
1860
+ <Text color={theme.colors.muted}>{" CPU: "}{wizardHardware.cpu.name}{" ("}{wizardHardware.cpu.cores}{" cores)"}</Text>
1861
+ <Text color={theme.colors.muted}>{" RAM: "}{formatBytes(wizardHardware.ram)}</Text>
1862
+ {wizardHardware.gpu ? (
1863
+ <Text color={theme.colors.muted}>{" GPU: "}{wizardHardware.gpu.name}{wizardHardware.gpu.vram > 0 ? ` (${formatBytes(wizardHardware.gpu.vram)})` : ""}</Text>
1864
+ ) : (
1865
+ <Text color={theme.colors.muted}>{" GPU: none detected"}</Text>
1866
+ )}
1867
+ <Text>{""}</Text>
1868
+ <Text bold color={theme.colors.secondary}>Recommended models:</Text>
1869
+ <Text>{""}</Text>
1870
+ {wizardModels.map((m, i) => (
1871
+ <Text key={m.ollamaId}>
1872
+ {i === wizardIndex ? <Text color={theme.colors.suggestion} bold>{" \u25B8 "}</Text> : <Text>{" "}</Text>}
1873
+ <Text>{getFitIcon(m.fit)} </Text>
1874
+ <Text color={i === wizardIndex ? theme.colors.suggestion : theme.colors.primary} bold>{m.name}</Text>
1875
+ <Text color={theme.colors.muted}>{" ~"}{m.size}{" GB \u00B7 "}{m.quality === "best" ? "Best" : m.quality === "great" ? "Great" : "Good"}{" quality \u00B7 "}{m.speed}</Text>
1876
+ </Text>
1877
+ ))}
1878
+ {wizardModels.length === 0 && (
1879
+ <Text color={theme.colors.error}>{" No suitable models found for your hardware."}</Text>
1880
+ )}
1881
+ <Text>{""}</Text>
1882
+ <Text dimColor>{" \u2191\u2193 navigate \u00B7 Enter to install \u00B7 Esc back"}</Text>
1883
+ </Box>
1884
+ )}
1885
+
1886
+ {wizardScreen === "install-ollama" && (
1887
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.warning} paddingX={1} marginBottom={0}>
1888
+ <Text bold color={theme.colors.warning}>Ollama is required for local models.</Text>
1889
+ <Text>{""}</Text>
1890
+ <Text color={theme.colors.primary}>{" Install with: "}<Text bold>{getOllamaInstallCommand(wizardHardware?.os ?? "linux")}</Text></Text>
1891
+ <Text>{""}</Text>
1892
+ <Text dimColor>{" Run the command above, then press Enter to continue..."}</Text>
1893
+ <Text dimColor>{" Esc to go back"}</Text>
1894
+ </Box>
1895
+ )}
1896
+
1897
+ {wizardScreen === "pulling" && wizardSelectedModel && (
1898
+ <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.border} paddingX={1} marginBottom={0}>
1899
+ {wizardPullError ? (
1900
+ <>
1901
+ <Text color={theme.colors.error} bold>{" \u274C Error: "}{wizardPullError}</Text>
1902
+ <Text>{""}</Text>
1903
+ <Text dimColor>{" Press Enter to retry \u00B7 Esc to go back"}</Text>
1904
+ </>
1905
+ ) : wizardPullProgress ? (
1906
+ <>
1907
+ <Text bold color={theme.colors.secondary}>{" Downloading "}{wizardSelectedModel.name}{"..."}</Text>
1908
+ {wizardPullProgress.status === "downloading" || wizardPullProgress.percent > 0 ? (
1909
+ <>
1910
+ <Text>
1911
+ {" "}
1912
+ <Text color={theme.colors.primary}>
1913
+ {"\u2588".repeat(Math.floor(wizardPullProgress.percent / 5))}
1914
+ {"\u2591".repeat(20 - Math.floor(wizardPullProgress.percent / 5))}
1915
+ </Text>
1916
+ {" "}<Text bold>{wizardPullProgress.percent}%</Text>
1917
+ {wizardPullProgress.completed != null && wizardPullProgress.total != null ? (
1918
+ <Text color={theme.colors.muted}>{" \u00B7 "}{formatBytes(wizardPullProgress.completed)}{" / "}{formatBytes(wizardPullProgress.total)}</Text>
1919
+ ) : null}
1920
+ </Text>
1921
+ </>
1922
+ ) : (
1923
+ <Text color={theme.colors.muted}>{" "}{wizardPullProgress.status}...</Text>
1924
+ )}
1925
+ </>
1926
+ ) : null}
1927
+ </Box>
1928
+ )}
1929
+
1519
1930
  {/* ═══ COMMAND SUGGESTIONS ═══ */}
1520
1931
  {showSuggestions && (
1521
1932
  <Box flexDirection="column" borderStyle="single" borderColor={theme.colors.muted} paddingX={1} marginBottom={0}>
@@ -0,0 +1,131 @@
1
+ import os from "os";
2
+ import { execSync } from "child_process";
3
+
4
+ export interface HardwareInfo {
5
+ cpu: { name: string; cores: number; speed: number };
6
+ ram: number; // bytes
7
+ gpu: { name: string; vram: number } | null; // vram in bytes, null if no GPU
8
+ os: "macos" | "linux" | "windows";
9
+ appleSilicon: boolean;
10
+ }
11
+
12
+ function getOS(): "macos" | "linux" | "windows" {
13
+ switch (process.platform) {
14
+ case "darwin": return "macos";
15
+ case "win32": return "windows";
16
+ default: return "linux";
17
+ }
18
+ }
19
+
20
+ function getCPU(): { name: string; cores: number; speed: number } {
21
+ const cpus = os.cpus();
22
+ return {
23
+ name: cpus[0]?.model?.trim() ?? "Unknown CPU",
24
+ cores: cpus.length,
25
+ speed: cpus[0]?.speed ?? 0, // MHz
26
+ };
27
+ }
28
+
29
+ function getGPU(platform: "macos" | "linux" | "windows"): { name: string; vram: number } | null {
30
+ try {
31
+ if (platform === "macos") {
32
+ const raw = execSync("system_profiler SPDisplaysDataType -json", {
33
+ encoding: "utf-8",
34
+ timeout: 5000,
35
+ stdio: ["pipe", "pipe", "pipe"],
36
+ });
37
+ const data = JSON.parse(raw);
38
+ const displays = data?.SPDisplaysDataType;
39
+ if (Array.isArray(displays) && displays.length > 0) {
40
+ const gpu = displays[0];
41
+ const name: string = gpu.sppci_model ?? gpu._name ?? "Unknown GPU";
42
+ // On Apple Silicon, VRAM is shared (unified memory) — report total RAM
43
+ const vramStr: string = gpu["spdisplays_vram"] ?? gpu["spdisplays_vram_shared"] ?? "";
44
+ let vram = 0;
45
+ if (vramStr) {
46
+ const match = vramStr.match(/(\d+)\s*(GB|MB)/i);
47
+ if (match) {
48
+ vram = parseInt(match[1]) * (match[2].toUpperCase() === "GB" ? 1024 * 1024 * 1024 : 1024 * 1024);
49
+ }
50
+ }
51
+ // Apple Silicon unified memory — use total RAM as VRAM
52
+ if (vram === 0 && name.toLowerCase().includes("apple")) {
53
+ vram = os.totalmem();
54
+ }
55
+ return { name, vram };
56
+ }
57
+ }
58
+
59
+ if (platform === "linux") {
60
+ // Try NVIDIA first
61
+ try {
62
+ const raw = execSync("nvidia-smi --query-gpu=name,memory.total --format=csv,noheader", {
63
+ encoding: "utf-8",
64
+ timeout: 5000,
65
+ stdio: ["pipe", "pipe", "pipe"],
66
+ });
67
+ const line = raw.trim().split("\n")[0];
68
+ if (line) {
69
+ const parts = line.split(",").map(s => s.trim());
70
+ const name = parts[0] ?? "NVIDIA GPU";
71
+ const memMatch = (parts[1] ?? "").match(/(\d+)/);
72
+ const vram = memMatch ? parseInt(memMatch[1]) * 1024 * 1024 : 0; // MiB to bytes
73
+ return { name, vram };
74
+ }
75
+ } catch {
76
+ // No NVIDIA, try lspci
77
+ try {
78
+ const raw = execSync("lspci | grep -i vga", {
79
+ encoding: "utf-8",
80
+ timeout: 5000,
81
+ stdio: ["pipe", "pipe", "pipe"],
82
+ });
83
+ const line = raw.trim().split("\n")[0];
84
+ if (line) {
85
+ const name = line.split(":").slice(2).join(":").trim() || "Unknown GPU";
86
+ return { name, vram: 0 };
87
+ }
88
+ } catch { /* no lspci */ }
89
+ }
90
+ }
91
+
92
+ if (platform === "windows") {
93
+ try {
94
+ const raw = execSync("wmic path win32_VideoController get Name,AdapterRAM /format:csv", {
95
+ encoding: "utf-8",
96
+ timeout: 5000,
97
+ stdio: ["pipe", "pipe", "pipe"],
98
+ });
99
+ const lines = raw.trim().split("\n").filter(l => l.trim() && !l.startsWith("Node"));
100
+ if (lines.length > 0) {
101
+ const parts = lines[0].split(",");
102
+ const adapterRAM = parseInt(parts[1] ?? "0");
103
+ const name = parts[2]?.trim() ?? "Unknown GPU";
104
+ return { name, vram: isNaN(adapterRAM) ? 0 : adapterRAM };
105
+ }
106
+ } catch { /* no wmic */ }
107
+ }
108
+ } catch {
109
+ // GPU detection failed
110
+ }
111
+ return null;
112
+ }
113
+
114
+ export function detectHardware(): HardwareInfo {
115
+ const platform = getOS();
116
+ const cpu = getCPU();
117
+ const ram = os.totalmem();
118
+ const gpu = getGPU(platform);
119
+
120
+ // Detect Apple Silicon
121
+ const appleSilicon = platform === "macos" && /apple\s+m/i.test(cpu.name);
122
+
123
+ return { cpu, ram, gpu, os: platform, appleSilicon };
124
+ }
125
+
126
+ /** Format bytes to human-readable string */
127
+ export function formatBytes(bytes: number): string {
128
+ if (bytes >= 1024 * 1024 * 1024) return `${Math.round(bytes / (1024 * 1024 * 1024))} GB`;
129
+ if (bytes >= 1024 * 1024) return `${Math.round(bytes / (1024 * 1024))} MB`;
130
+ return `${Math.round(bytes / 1024)} KB`;
131
+ }