loclaude 0.0.1-alpha.2 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -277,6 +277,76 @@ var require_bytes = __commonJS((exports, module) => {
277
277
  }
278
278
  });
279
279
 
280
+ // ../../node_modules/.bun/picocolors@1.1.1/node_modules/picocolors/picocolors.js
281
+ var require_picocolors = __commonJS((exports, module) => {
282
+ var p = process || {};
283
+ var argv = p.argv || [];
284
+ var env = p.env || {};
285
+ var isColorSupported = !(!!env.NO_COLOR || argv.includes("--no-color")) && (!!env.FORCE_COLOR || argv.includes("--color") || p.platform === "win32" || (p.stdout || {}).isTTY && env.TERM !== "dumb" || !!env.CI);
286
+ var formatter = (open, close, replace = open) => (input) => {
287
+ let string = "" + input, index = string.indexOf(close, open.length);
288
+ return ~index ? open + replaceClose(string, close, replace, index) + close : open + string + close;
289
+ };
290
+ var replaceClose = (string, close, replace, index) => {
291
+ let result = "", cursor = 0;
292
+ do {
293
+ result += string.substring(cursor, index) + replace;
294
+ cursor = index + close.length;
295
+ index = string.indexOf(close, cursor);
296
+ } while (~index);
297
+ return result + string.substring(cursor);
298
+ };
299
+ var createColors = (enabled = isColorSupported) => {
300
+ let f = enabled ? formatter : () => String;
301
+ return {
302
+ isColorSupported: enabled,
303
+ reset: f("\x1B[0m", "\x1B[0m"),
304
+ bold: f("\x1B[1m", "\x1B[22m", "\x1B[22m\x1B[1m"),
305
+ dim: f("\x1B[2m", "\x1B[22m", "\x1B[22m\x1B[2m"),
306
+ italic: f("\x1B[3m", "\x1B[23m"),
307
+ underline: f("\x1B[4m", "\x1B[24m"),
308
+ inverse: f("\x1B[7m", "\x1B[27m"),
309
+ hidden: f("\x1B[8m", "\x1B[28m"),
310
+ strikethrough: f("\x1B[9m", "\x1B[29m"),
311
+ black: f("\x1B[30m", "\x1B[39m"),
312
+ red: f("\x1B[31m", "\x1B[39m"),
313
+ green: f("\x1B[32m", "\x1B[39m"),
314
+ yellow: f("\x1B[33m", "\x1B[39m"),
315
+ blue: f("\x1B[34m", "\x1B[39m"),
316
+ magenta: f("\x1B[35m", "\x1B[39m"),
317
+ cyan: f("\x1B[36m", "\x1B[39m"),
318
+ white: f("\x1B[37m", "\x1B[39m"),
319
+ gray: f("\x1B[90m", "\x1B[39m"),
320
+ bgBlack: f("\x1B[40m", "\x1B[49m"),
321
+ bgRed: f("\x1B[41m", "\x1B[49m"),
322
+ bgGreen: f("\x1B[42m", "\x1B[49m"),
323
+ bgYellow: f("\x1B[43m", "\x1B[49m"),
324
+ bgBlue: f("\x1B[44m", "\x1B[49m"),
325
+ bgMagenta: f("\x1B[45m", "\x1B[49m"),
326
+ bgCyan: f("\x1B[46m", "\x1B[49m"),
327
+ bgWhite: f("\x1B[47m", "\x1B[49m"),
328
+ blackBright: f("\x1B[90m", "\x1B[39m"),
329
+ redBright: f("\x1B[91m", "\x1B[39m"),
330
+ greenBright: f("\x1B[92m", "\x1B[39m"),
331
+ yellowBright: f("\x1B[93m", "\x1B[39m"),
332
+ blueBright: f("\x1B[94m", "\x1B[39m"),
333
+ magentaBright: f("\x1B[95m", "\x1B[39m"),
334
+ cyanBright: f("\x1B[96m", "\x1B[39m"),
335
+ whiteBright: f("\x1B[97m", "\x1B[39m"),
336
+ bgBlackBright: f("\x1B[100m", "\x1B[49m"),
337
+ bgRedBright: f("\x1B[101m", "\x1B[49m"),
338
+ bgGreenBright: f("\x1B[102m", "\x1B[49m"),
339
+ bgYellowBright: f("\x1B[103m", "\x1B[49m"),
340
+ bgBlueBright: f("\x1B[104m", "\x1B[49m"),
341
+ bgMagentaBright: f("\x1B[105m", "\x1B[49m"),
342
+ bgCyanBright: f("\x1B[106m", "\x1B[49m"),
343
+ bgWhiteBright: f("\x1B[107m", "\x1B[49m")
344
+ };
345
+ };
346
+ module.exports = createColors();
347
+ module.exports.createColors = createColors;
348
+ });
349
+
280
350
  // ../../node_modules/.bun/cac@6.7.14/node_modules/cac/dist/index.mjs
281
351
  import { EventEmitter } from "events";
282
352
  function toArr(any) {
@@ -1019,13 +1089,13 @@ function getClaudeExtraArgs() {
1019
1089
  var OLLAMA_URL = getOllamaUrl();
1020
1090
  var DEFAULT_MODEL = getDefaultModel();
1021
1091
 
1022
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/key.js
1092
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/key.js
1023
1093
  var isUpKey = (key, keybindings = []) => key.name === "up" || keybindings.includes("vim") && key.name === "k" || keybindings.includes("emacs") && key.ctrl && key.name === "p";
1024
1094
  var isDownKey = (key, keybindings = []) => key.name === "down" || keybindings.includes("vim") && key.name === "j" || keybindings.includes("emacs") && key.ctrl && key.name === "n";
1025
1095
  var isBackspaceKey = (key) => key.name === "backspace";
1026
1096
  var isNumberKey = (key) => "1234567890".includes(key.name);
1027
1097
  var isEnterKey = (key) => key.name === "enter" || key.name === "return";
1028
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/errors.js
1098
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/errors.js
1029
1099
  class AbortPromptError extends Error {
1030
1100
  name = "AbortPromptError";
1031
1101
  message = "Prompt was aborted";
@@ -1051,10 +1121,10 @@ class HookError extends Error {
1051
1121
  class ValidationError extends Error {
1052
1122
  name = "ValidationError";
1053
1123
  }
1054
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-state.js
1124
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-state.js
1055
1125
  import { AsyncResource as AsyncResource2 } from "node:async_hooks";
1056
1126
 
1057
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/hook-engine.js
1127
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/hook-engine.js
1058
1128
  import { AsyncLocalStorage, AsyncResource } from "node:async_hooks";
1059
1129
  var hookStorage = new AsyncLocalStorage;
1060
1130
  function createStore(rl) {
@@ -1159,7 +1229,7 @@ var effectScheduler = {
1159
1229
  }
1160
1230
  };
1161
1231
 
1162
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-state.js
1232
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-state.js
1163
1233
  function useState(defaultValue) {
1164
1234
  return withPointer((pointer) => {
1165
1235
  const setState = AsyncResource2.bind(function setState(newValue) {
@@ -1177,7 +1247,7 @@ function useState(defaultValue) {
1177
1247
  });
1178
1248
  }
1179
1249
 
1180
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-effect.js
1250
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-effect.js
1181
1251
  function useEffect(cb, depArray) {
1182
1252
  withPointer((pointer) => {
1183
1253
  const oldDeps = pointer.get();
@@ -1189,7 +1259,7 @@ function useEffect(cb, depArray) {
1189
1259
  });
1190
1260
  }
1191
1261
 
1192
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/theme.js
1262
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/theme.js
1193
1263
  import { styleText } from "node:util";
1194
1264
 
1195
1265
  // ../../node_modules/.bun/@inquirer+figures@2.0.3/node_modules/@inquirer/figures/dist/index.js
@@ -1481,7 +1551,7 @@ var figures = shouldUseMain ? mainSymbols : fallbackSymbols;
1481
1551
  var dist_default2 = figures;
1482
1552
  var replacements = Object.entries(specialMainSymbols);
1483
1553
 
1484
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/theme.js
1554
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/theme.js
1485
1555
  var defaultTheme = {
1486
1556
  prefix: {
1487
1557
  idle: styleText("blue", "?"),
@@ -1502,7 +1572,7 @@ var defaultTheme = {
1502
1572
  }
1503
1573
  };
1504
1574
 
1505
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/make-theme.js
1575
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/make-theme.js
1506
1576
  function isPlainObject(value) {
1507
1577
  if (typeof value !== "object" || value === null)
1508
1578
  return false;
@@ -1530,7 +1600,7 @@ function makeTheme(...themes) {
1530
1600
  return deepMerge2(...themesToMerge);
1531
1601
  }
1532
1602
 
1533
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-prefix.js
1603
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-prefix.js
1534
1604
  function usePrefix({ status = "idle", theme }) {
1535
1605
  const [showLoader, setShowLoader] = useState(false);
1536
1606
  const [tick, setTick] = useState(0);
@@ -1560,7 +1630,7 @@ function usePrefix({ status = "idle", theme }) {
1560
1630
  const iconName = status === "loading" ? "idle" : status;
1561
1631
  return typeof prefix === "string" ? prefix : prefix[iconName] ?? prefix["idle"];
1562
1632
  }
1563
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-memo.js
1633
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-memo.js
1564
1634
  function useMemo(fn, dependencies) {
1565
1635
  return withPointer((pointer) => {
1566
1636
  const prev = pointer.get();
@@ -1572,11 +1642,11 @@ function useMemo(fn, dependencies) {
1572
1642
  return prev.value;
1573
1643
  });
1574
1644
  }
1575
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-ref.js
1645
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-ref.js
1576
1646
  function useRef(val) {
1577
1647
  return useState({ current: val })[0];
1578
1648
  }
1579
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-keypress.js
1649
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-keypress.js
1580
1650
  function useKeypress(userHandler) {
1581
1651
  const signal = useRef(userHandler);
1582
1652
  signal.current = userHandler;
@@ -1594,7 +1664,7 @@ function useKeypress(userHandler) {
1594
1664
  };
1595
1665
  }, []);
1596
1666
  }
1597
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/utils.js
1667
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/utils.js
1598
1668
  var import_cli_width = __toESM(require_cli_width(), 1);
1599
1669
 
1600
1670
  // ../../node_modules/.bun/ansi-regex@6.2.2/node_modules/ansi-regex/index.js
@@ -2028,7 +2098,7 @@ function wrapAnsi(string, columns, options) {
2028
2098
  `);
2029
2099
  }
2030
2100
 
2031
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/utils.js
2101
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/utils.js
2032
2102
  function breakLines(content, width) {
2033
2103
  return content.split(`
2034
2104
  `).flatMap((line) => wrapAnsi(line, width, { trim: false, hard: true }).split(`
@@ -2039,7 +2109,7 @@ function readlineWidth() {
2039
2109
  return import_cli_width.default({ defaultWidth: 80, output: readline().output });
2040
2110
  }
2041
2111
 
2042
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/pagination/use-pagination.js
2112
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/pagination/use-pagination.js
2043
2113
  function usePointerPosition({ active, renderedItems, pageSize, loop }) {
2044
2114
  const state = useRef({
2045
2115
  lastPointer: active,
@@ -2105,7 +2175,7 @@ function usePagination({ items, active, renderItem, pageSize, loop = true }) {
2105
2175
  return pageBuffer.filter((line) => typeof line === "string").join(`
2106
2176
  `);
2107
2177
  }
2108
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/create-prompt.js
2178
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/create-prompt.js
2109
2179
  var import_mute_stream = __toESM(require_lib(), 1);
2110
2180
  import * as readline2 from "node:readline";
2111
2181
  import { AsyncResource as AsyncResource3 } from "node:async_hooks";
@@ -2318,7 +2388,7 @@ var {
2318
2388
  unload
2319
2389
  } = signalExitWrap(processOk(process3) ? new SignalExit(process3) : new SignalExitFallback);
2320
2390
 
2321
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/screen-manager.js
2391
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/screen-manager.js
2322
2392
  import { stripVTControlCharacters } from "node:util";
2323
2393
 
2324
2394
  // ../../node_modules/.bun/@inquirer+ansi@2.0.3/node_modules/@inquirer/ansi/dist/index.js
@@ -2337,7 +2407,7 @@ var cursorTo = (x, y) => {
2337
2407
  var eraseLine = ESC + "2K";
2338
2408
  var eraseLines = (lines) => lines > 0 ? (eraseLine + cursorUp(1)).repeat(lines - 1) + eraseLine + cursorLeft : "";
2339
2409
 
2340
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/screen-manager.js
2410
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/screen-manager.js
2341
2411
  var height = (content) => content.split(`
2342
2412
  `).length;
2343
2413
  var lastLine = (content) => content.split(`
@@ -2402,7 +2472,7 @@ class ScreenManager {
2402
2472
  }
2403
2473
  }
2404
2474
 
2405
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/promise-polyfill.js
2475
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/promise-polyfill.js
2406
2476
  class PromisePolyfill extends Promise {
2407
2477
  static withResolver() {
2408
2478
  let resolve;
@@ -2415,7 +2485,7 @@ class PromisePolyfill extends Promise {
2415
2485
  }
2416
2486
  }
2417
2487
 
2418
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/create-prompt.js
2488
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/create-prompt.js
2419
2489
  function getCallSites() {
2420
2490
  const _prepareStackTrace = Error.prepareStackTrace;
2421
2491
  let result = [];
@@ -2501,7 +2571,7 @@ function createPrompt(view) {
2501
2571
  };
2502
2572
  return prompt;
2503
2573
  }
2504
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/Separator.js
2574
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/Separator.js
2505
2575
  import { styleText as styleText2 } from "node:util";
2506
2576
  class Separator {
2507
2577
  separator = styleText2("dim", Array.from({ length: 15 }).join(dist_default2.line));
@@ -2515,7 +2585,7 @@ class Separator {
2515
2585
  return Boolean(choice && typeof choice === "object" && "type" in choice && choice.type === "separator");
2516
2586
  }
2517
2587
  }
2518
- // ../../node_modules/.bun/@inquirer+select@5.0.4+c195ea72dffa657e/node_modules/@inquirer/select/dist/index.js
2588
+ // ../../node_modules/.bun/@inquirer+select@5.0.4+b219b5910764fa5c/node_modules/@inquirer/select/dist/index.js
2519
2589
  import { styleText as styleText3 } from "node:util";
2520
2590
  var selectTheme = {
2521
2591
  icon: { cursor: dist_default2.pointer },
@@ -2674,15 +2744,67 @@ var dist_default3 = createPrompt((config, done) => {
2674
2744
  // lib/utils.ts
2675
2745
  var import_bytes = __toESM(require_bytes(), 1);
2676
2746
 
2747
+ // lib/output.ts
2748
+ var import_picocolors = __toESM(require_picocolors(), 1);
2749
+ var brand = (text) => import_picocolors.default.cyan(import_picocolors.default.bold(text));
2750
+ var success = (text) => `${import_picocolors.default.green("✓")} ${text}`;
2751
+ var warn = (text) => `${import_picocolors.default.yellow("⚠")} ${text}`;
2752
+ var error = (text) => `${import_picocolors.default.red("✗")} ${text}`;
2753
+ var info = (text) => `${import_picocolors.default.cyan("ℹ")} ${text}`;
2754
+ var dim = (text) => import_picocolors.default.dim(text);
2755
+ var green = (text) => import_picocolors.default.green(text);
2756
+ var yellow = (text) => import_picocolors.default.yellow(text);
2757
+ var red = (text) => import_picocolors.default.red(text);
2758
+ var cyan = (text) => import_picocolors.default.cyan(text);
2759
+ var magenta = (text) => import_picocolors.default.magenta(text);
2760
+ function header(text) {
2761
+ console.log("");
2762
+ console.log(brand(` ${text}`));
2763
+ console.log(import_picocolors.default.dim(" " + "─".repeat(text.length + 2)));
2764
+ }
2765
+ function labelValue(label, value) {
2766
+ console.log(` ${import_picocolors.default.dim(label + ":")} ${value}`);
2767
+ }
2768
+ function statusLine(status, name, message, extra) {
2769
+ const icons = { ok: "✓", warning: "⚠", error: "✗" };
2770
+ const colors = { ok: import_picocolors.default.green, warning: import_picocolors.default.yellow, error: import_picocolors.default.red };
2771
+ let line = `${colors[status](icons[status])} ${name}: ${message}`;
2772
+ if (extra) {
2773
+ line += ` ${import_picocolors.default.dim(`(${extra})`)}`;
2774
+ }
2775
+ return line;
2776
+ }
2777
+ function tableRow(columns, widths) {
2778
+ return columns.map((col, i) => {
2779
+ const width = widths[i] || col.length;
2780
+ return col.padEnd(width);
2781
+ }).join(" ");
2782
+ }
2783
+ function tableHeader(columns, widths) {
2784
+ const headerRow = tableRow(columns.map((c) => import_picocolors.default.bold(c)), widths);
2785
+ const underlineRow = widths.map((w) => "─".repeat(w)).join(" ");
2786
+ console.log(headerRow);
2787
+ console.log(import_picocolors.default.dim(underlineRow));
2788
+ }
2789
+ function url(urlStr) {
2790
+ return import_picocolors.default.underline(import_picocolors.default.cyan(urlStr));
2791
+ }
2792
+ function cmd(command) {
2793
+ return import_picocolors.default.cyan(command);
2794
+ }
2795
+ function file(filePath) {
2796
+ return import_picocolors.default.magenta(filePath);
2797
+ }
2798
+
2677
2799
  // lib/spawn.ts
2678
- async function spawn(cmd, opts = {}) {
2679
- const command = cmd[0];
2680
- const args = cmd.slice(1);
2800
+ async function spawn(cmd2, opts = {}) {
2801
+ const command = cmd2[0];
2802
+ const args = cmd2.slice(1);
2681
2803
  if (command === undefined) {
2682
2804
  throw new Error("No command provided");
2683
2805
  }
2684
2806
  if (typeof Bun !== "undefined") {
2685
- const proc = Bun.spawn(cmd, {
2807
+ const proc = Bun.spawn(cmd2, {
2686
2808
  env: opts.env ?? process.env,
2687
2809
  cwd: opts.cwd ?? process.cwd(),
2688
2810
  stdin: opts.stdin ?? "inherit",
@@ -2702,14 +2824,14 @@ async function spawn(cmd, opts = {}) {
2702
2824
  });
2703
2825
  }
2704
2826
  }
2705
- async function spawnCapture(cmd, opts = {}) {
2706
- const command = cmd[0];
2707
- const args = cmd.slice(1);
2827
+ async function spawnCapture(cmd2, opts = {}) {
2828
+ const command = cmd2[0];
2829
+ const args = cmd2.slice(1);
2708
2830
  if (command === undefined) {
2709
2831
  throw new Error("No command provided");
2710
2832
  }
2711
2833
  if (typeof Bun !== "undefined") {
2712
- const proc = Bun.spawn(cmd, {
2834
+ const proc = Bun.spawn(cmd2, {
2713
2835
  env: opts.env ?? process.env,
2714
2836
  cwd: opts.cwd,
2715
2837
  stdin: opts.stdin ?? "ignore",
@@ -2744,17 +2866,17 @@ async function spawnCapture(cmd, opts = {}) {
2744
2866
  });
2745
2867
  }
2746
2868
  }
2747
- async function commandExists(cmd) {
2869
+ async function commandExists(cmd2) {
2748
2870
  try {
2749
- const result = await spawnCapture(process.platform === "win32" ? ["where", cmd] : ["which", cmd]);
2871
+ const result = await spawnCapture(process.platform === "win32" ? ["where", cmd2] : ["which", cmd2]);
2750
2872
  return result.exitCode === 0;
2751
2873
  } catch {
2752
2874
  return false;
2753
2875
  }
2754
2876
  }
2755
- async function getCommandVersion(cmd) {
2877
+ async function getCommandVersion(cmd2) {
2756
2878
  try {
2757
- const result = await spawnCapture([cmd, "--version"]);
2879
+ const result = await spawnCapture([cmd2, "--version"]);
2758
2880
  if (result.exitCode === 0 && result.stdout) {
2759
2881
  return result.stdout.trim().split(`
2760
2882
  `)[0] ?? null;
@@ -2775,33 +2897,100 @@ async function fetchOllamaModels() {
2775
2897
  const data = await response.json();
2776
2898
  return data.models ?? [];
2777
2899
  }
2900
+ async function fetchRunningModels() {
2901
+ const ollamaUrl = getOllamaUrl();
2902
+ try {
2903
+ const response = await fetch(`${ollamaUrl}/api/ps`, {
2904
+ signal: AbortSignal.timeout(5000)
2905
+ });
2906
+ if (!response.ok) {
2907
+ return [];
2908
+ }
2909
+ const data = await response.json();
2910
+ return data.models ?? [];
2911
+ } catch (error2) {
2912
+ return [];
2913
+ }
2914
+ }
2915
+ async function isModelLoaded(modelName) {
2916
+ const runningModels = await fetchRunningModels();
2917
+ return runningModels.some((m) => m.model === modelName || m.name === modelName || m.model.startsWith(modelName + ":") || modelName.startsWith(m.model));
2918
+ }
2919
+ async function loadModel(modelName, keepAlive = "10m") {
2920
+ const ollamaUrl = getOllamaUrl();
2921
+ const response = await fetch(`${ollamaUrl}/api/generate`, {
2922
+ method: "POST",
2923
+ headers: {
2924
+ "Content-Type": "application/json"
2925
+ },
2926
+ body: JSON.stringify({
2927
+ model: modelName,
2928
+ prompt: "",
2929
+ stream: false,
2930
+ keep_alive: keepAlive
2931
+ })
2932
+ });
2933
+ if (!response.ok) {
2934
+ throw new Error(`Failed to load model: ${response.statusText}`);
2935
+ }
2936
+ await response.json();
2937
+ }
2938
+ async function ensureModelLoaded(modelName) {
2939
+ const isLoaded = await isModelLoaded(modelName);
2940
+ if (isLoaded) {
2941
+ console.log(dim(` Model ${magenta(modelName)} is already loaded`));
2942
+ return;
2943
+ }
2944
+ console.log(info(`Loading model ${magenta(modelName)}...`));
2945
+ console.log(dim(" This may take a moment on first run"));
2946
+ try {
2947
+ await loadModel(modelName, "10m");
2948
+ console.log(success(`Model ${magenta(modelName)} loaded (keep_alive: 10m)`));
2949
+ } catch (error2) {
2950
+ console.log(warn(`Could not pre-load model (will load on first request)`));
2951
+ console.log(dim(` ${error2 instanceof Error ? error2.message : "Unknown error"}`));
2952
+ }
2953
+ }
2778
2954
  async function selectModelInteractively() {
2779
2955
  const ollamaUrl = getOllamaUrl();
2780
2956
  let models;
2781
2957
  try {
2782
2958
  models = await fetchOllamaModels();
2783
- } catch (error) {
2784
- console.error("Error: Could not connect to Ollama at", ollamaUrl);
2785
- console.error("Make sure Ollama is running: loclaude docker-up");
2959
+ } catch (error2) {
2960
+ console.log(warn(`Could not connect to Ollama at ${ollamaUrl}`));
2961
+ console.log(dim(" Make sure Ollama is running: loclaude docker-up"));
2786
2962
  process.exit(1);
2787
2963
  }
2788
2964
  if (models.length === 0) {
2789
- console.error("Error: No models found in Ollama.");
2790
- console.error("Pull a model first: loclaude models-pull <model-name>");
2965
+ console.log(warn("No models found in Ollama."));
2966
+ console.log(dim(" Pull a model first: loclaude models-pull <model-name>"));
2791
2967
  process.exit(1);
2792
2968
  }
2969
+ const runningModels = await fetchRunningModels();
2970
+ const loadedModelNames = new Set(runningModels.map((m) => m.model));
2793
2971
  const selected = await dist_default3({
2794
2972
  message: "Select a model",
2795
- choices: models.map((model) => ({
2796
- name: `${model.name} (${import_bytes.default(model.size)})`,
2797
- value: model.name
2798
- }))
2973
+ choices: models.map((model) => {
2974
+ const isLoaded = loadedModelNames.has(model.name);
2975
+ const loadedIndicator = isLoaded ? " [loaded]" : "";
2976
+ return {
2977
+ name: `${model.name} (${import_bytes.default(model.size)})${loadedIndicator}`,
2978
+ value: model.name
2979
+ };
2980
+ })
2799
2981
  });
2800
2982
  return selected;
2801
2983
  }
2802
2984
  async function launchClaude(model, passthroughArgs) {
2803
2985
  const ollamaUrl = getOllamaUrl();
2804
2986
  const extraArgs = getClaudeExtraArgs();
2987
+ console.log("");
2988
+ console.log(cyan("Launching Claude Code with Ollama"));
2989
+ console.log(dim(` Model: ${magenta(model)}`));
2990
+ console.log(dim(` API: ${ollamaUrl}`));
2991
+ console.log("");
2992
+ await ensureModelLoaded(model);
2993
+ console.log("");
2805
2994
  const env = {
2806
2995
  ...process.env,
2807
2996
  ANTHROPIC_AUTH_TOKEN: "ollama",
@@ -2815,213 +3004,753 @@ async function launchClaude(model, passthroughArgs) {
2815
3004
  // lib/commands/init.ts
2816
3005
  import { existsSync as existsSync2, mkdirSync, writeFileSync, readFileSync as readFileSync2 } from "fs";
2817
3006
  import { join as join2 } from "path";
2818
- var DOCKER_COMPOSE_TEMPLATE = `services:
2819
- ollama:
2820
- image: ollama/ollama:latest
2821
- container_name: ollama
2822
- runtime: nvidia
2823
- environment:
2824
- - NVIDIA_VISIBLE_DEVICES=all
2825
- - NVIDIA_DRIVER_CAPABILITIES=compute,utility
2826
- volumes:
2827
- - ./models:/root/.ollama
2828
- ports:
2829
- - "11434:11434"
2830
- restart: unless-stopped
2831
- healthcheck:
2832
- test: ["CMD", "ollama", "list"]
2833
- interval: 300s
2834
- timeout: 2s
2835
- retries: 3
2836
- start_period: 40s
2837
- deploy:
2838
- resources:
2839
- reservations:
2840
- devices:
2841
- - driver: nvidia
2842
- count: all
2843
- capabilities: [gpu]
2844
-
2845
- open-webui:
2846
- image: ghcr.io/open-webui/open-webui:cuda
2847
- container_name: open-webui
2848
- ports:
2849
- - "3000:8080"
2850
- environment:
2851
- - OLLAMA_BASE_URL=http://ollama:11434
2852
- depends_on:
2853
- - ollama
2854
- restart: unless-stopped
2855
- healthcheck:
2856
- test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
2857
- interval: 30s
2858
- timeout: 10s
2859
- retries: 3
2860
- start_period: 60s
2861
- volumes:
2862
- - open-webui:/app/backend/data
2863
- deploy:
2864
- resources:
2865
- reservations:
2866
- devices:
2867
- - driver: nvidia
2868
- count: all
2869
- capabilities: [gpu]
2870
3007
 
2871
- volumes:
2872
- open-webui:
2873
- `;
2874
- var CONFIG_TEMPLATE = `{
2875
- "ollama": {
2876
- "url": "http://localhost:11434",
2877
- "defaultModel": "qwen3-coder:30b"
2878
- },
2879
- "docker": {
2880
- "composeFile": "./docker-compose.yml",
2881
- "gpu": true
3008
+ // lib/commands/doctor.ts
3009
+ async function checkDocker() {
3010
+ const exists = await commandExists("docker");
3011
+ if (!exists) {
3012
+ return {
3013
+ name: "Docker",
3014
+ status: "error",
3015
+ message: "Not installed",
3016
+ hint: "Install Docker: https://docs.docker.com/get-docker/"
3017
+ };
2882
3018
  }
3019
+ const version = await getCommandVersion("docker");
3020
+ return {
3021
+ name: "Docker",
3022
+ status: "ok",
3023
+ message: "Installed",
3024
+ version: version ?? undefined
3025
+ };
2883
3026
  }
2884
- `;
2885
- var GITIGNORE_TEMPLATE = `# Ollama models (large binary files)
2886
- models/
2887
- `;
2888
- var MISE_TOML_TEMPLATE = `# Mise task runner configuration
2889
- # Run \`mise tasks\` to see all available tasks
2890
- # https://mise.jdx.dev/
2891
-
2892
- [tasks]
2893
-
2894
- # =============================================================================
2895
- # Docker Management
2896
- # =============================================================================
2897
-
2898
- [tasks.up]
2899
- description = "Start Ollama and Open WebUI containers"
2900
- run = "loclaude docker-up"
2901
-
2902
- [tasks.down]
2903
- description = "Stop all containers"
2904
- run = "loclaude docker-down"
2905
-
2906
- [tasks.restart]
2907
- description = "Restart all containers"
2908
- run = "loclaude docker-restart"
2909
-
2910
- [tasks.status]
2911
- description = "Show container status"
2912
- run = "loclaude docker-status"
2913
-
2914
- [tasks.logs]
2915
- description = "Follow container logs"
2916
- run = "loclaude docker-logs --follow"
2917
-
2918
- # =============================================================================
2919
- # Model Management
2920
- # =============================================================================
2921
-
2922
- [tasks.models]
2923
- description = "List installed models"
2924
- run = "loclaude models"
2925
-
2926
- [tasks.pull]
2927
- description = "Pull a model (usage: mise run pull <model-name>)"
2928
- run = "loclaude models-pull {{arg(name='model')}}"
2929
-
2930
- # =============================================================================
2931
- # Claude Code
2932
- # =============================================================================
2933
-
2934
- [tasks.claude]
2935
- description = "Run Claude Code with local Ollama"
2936
- run = "loclaude run"
2937
-
2938
- [tasks."claude:model"]
2939
- description = "Run Claude with specific model (usage: mise run claude:model <model>)"
2940
- run = "loclaude run -m {{arg(name='model')}}"
2941
-
2942
- # =============================================================================
2943
- # Diagnostics
2944
- # =============================================================================
2945
-
2946
- [tasks.doctor]
2947
- description = "Check system requirements"
2948
- run = "loclaude doctor"
2949
-
2950
- [tasks.gpu]
2951
- description = "Check GPU status"
2952
- run = "docker exec ollama nvidia-smi"
2953
- `;
2954
- var README_TEMPLATE = `# Project Name
2955
-
2956
- > Powered by [loclaude](https://github.com/nicholasgalante1997/docker-ollama) - Run Claude Code with local Ollama LLMs
2957
-
2958
- ## Prerequisites
2959
-
2960
- - [Docker](https://docs.docker.com/get-docker/) with Docker Compose v2
2961
- - [NVIDIA GPU](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) with drivers and container toolkit
2962
- - [mise](https://mise.jdx.dev/) task runner (recommended)
2963
- - [loclaude](https://www.npmjs.com/package/loclaude) CLI (\`npm install -g loclaude\`)
2964
-
2965
- ## Quick Start
2966
-
2967
- \`\`\`bash
2968
- # Start the LLM backend (Ollama + Open WebUI)
2969
- mise run up
2970
-
2971
- # Pull a model
2972
- mise run pull qwen3-coder:30b
2973
-
2974
- # Run Claude Code with local LLM
2975
- mise run claude
2976
- \`\`\`
2977
-
2978
- ## Available Commands
2979
-
2980
- Run \`mise tasks\` to see all available commands.
2981
-
2982
- | Command | Description |
2983
- |---------|-------------|
2984
- | \`mise run up\` | Start Ollama and Open WebUI containers |
2985
- | \`mise run down\` | Stop all containers |
2986
- | \`mise run status\` | Show container status |
2987
- | \`mise run logs\` | Follow container logs |
2988
- | \`mise run models\` | List installed models |
2989
- | \`mise run pull <model>\` | Pull a model from Ollama registry |
2990
- | \`mise run claude\` | Run Claude Code with model selection |
2991
- | \`mise run claude:model <model>\` | Run Claude with specific model |
2992
- | \`mise run doctor\` | Check system requirements |
2993
- | \`mise run gpu\` | Check GPU status |
2994
-
2995
- ## Service URLs
2996
-
2997
- | Service | URL | Description |
2998
- |---------|-----|-------------|
2999
- | Ollama API | http://localhost:11434 | LLM inference API |
3000
- | Open WebUI | http://localhost:3000 | Chat interface |
3001
-
3002
- ## Project Structure
3003
-
3004
- \`\`\`
3005
- .
3006
- ├── .claude/
3007
- │ └── CLAUDE.md # Claude Code instructions
3008
- ├── .loclaude/
3009
- │ └── config.json # Loclaude configuration
3010
- ├── models/ # Ollama model storage (gitignored)
3011
- ├── docker-compose.yml # Container definitions
3012
- ├── mise.toml # Task runner configuration
3013
- └── README.md
3014
- \`\`\`
3015
-
3016
- ## Configuration
3017
-
3018
- ### Loclaude Config (\`.loclaude/config.json\`)
3019
-
3020
- \`\`\`json
3021
- {
3022
- "ollama": {
3023
- "url": "http://localhost:11434",
3024
- "defaultModel": "qwen3-coder:30b"
3027
+ async function checkDockerCompose() {
3028
+ const result = await spawnCapture(["docker", "compose", "version"]);
3029
+ if (result.exitCode === 0) {
3030
+ const version = result.stdout?.trim().split(`
3031
+ `)[0];
3032
+ return {
3033
+ name: "Docker Compose",
3034
+ status: "ok",
3035
+ message: "Installed (v2)",
3036
+ version: version ?? undefined
3037
+ };
3038
+ }
3039
+ const v1Exists = await commandExists("docker-compose");
3040
+ if (v1Exists) {
3041
+ const version = await getCommandVersion("docker-compose");
3042
+ return {
3043
+ name: "Docker Compose",
3044
+ status: "warning",
3045
+ message: "Using legacy v1",
3046
+ version: version ?? undefined,
3047
+ hint: "Consider upgrading to Docker Compose v2"
3048
+ };
3049
+ }
3050
+ return {
3051
+ name: "Docker Compose",
3052
+ status: "error",
3053
+ message: "Not installed",
3054
+ hint: "Docker Compose is included with Docker Desktop, or install separately"
3055
+ };
3056
+ }
3057
+ async function checkNvidiaSmi() {
3058
+ const exists = await commandExists("nvidia-smi");
3059
+ if (!exists) {
3060
+ return {
3061
+ name: "NVIDIA GPU",
3062
+ status: "warning",
3063
+ message: "nvidia-smi not found",
3064
+ hint: "GPU support requires NVIDIA drivers. CPU-only mode will be used."
3065
+ };
3066
+ }
3067
+ const result = await spawnCapture(["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"]);
3068
+ if (result.exitCode === 0 && result.stdout) {
3069
+ const gpus = result.stdout.trim().split(`
3070
+ `).filter(Boolean);
3071
+ return {
3072
+ name: "NVIDIA GPU",
3073
+ status: "ok",
3074
+ message: `${gpus.length} GPU(s) detected`,
3075
+ version: gpus[0]
3076
+ };
3077
+ }
3078
+ return {
3079
+ name: "NVIDIA GPU",
3080
+ status: "warning",
3081
+ message: "nvidia-smi failed",
3082
+ hint: "GPU may not be available. Check NVIDIA drivers."
3083
+ };
3084
+ }
3085
+ async function checkNvidiaContainerToolkit() {
3086
+ const result = await spawnCapture(["docker", "info", "--format", "{{.Runtimes}}"]);
3087
+ if (result.exitCode === 0 && result.stdout?.includes("nvidia")) {
3088
+ return {
3089
+ name: "NVIDIA Container Toolkit",
3090
+ status: "ok",
3091
+ message: "nvidia runtime available"
3092
+ };
3093
+ }
3094
+ return {
3095
+ name: "NVIDIA Container Toolkit",
3096
+ status: "warning",
3097
+ message: "nvidia runtime not found",
3098
+ hint: "Install: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html"
3099
+ };
3100
+ }
3101
+ async function checkClaude() {
3102
+ const exists = await commandExists("claude");
3103
+ if (!exists) {
3104
+ return {
3105
+ name: "Claude Code",
3106
+ status: "error",
3107
+ message: "Not installed",
3108
+ hint: "Install: npm install -g @anthropic-ai/claude-code"
3109
+ };
3110
+ }
3111
+ const version = await getCommandVersion("claude");
3112
+ return {
3113
+ name: "Claude Code",
3114
+ status: "ok",
3115
+ message: "Installed",
3116
+ version: version ?? undefined
3117
+ };
3118
+ }
3119
+ async function checkOllamaConnection() {
3120
+ const ollamaUrl = getOllamaUrl();
3121
+ try {
3122
+ const response = await fetch(`${ollamaUrl}/api/tags`, {
3123
+ signal: AbortSignal.timeout(5000)
3124
+ });
3125
+ if (response.ok) {
3126
+ const data = await response.json();
3127
+ const modelCount = data.models?.length ?? 0;
3128
+ return {
3129
+ name: "Ollama API",
3130
+ status: "ok",
3131
+ message: `Connected (${modelCount} model${modelCount === 1 ? "" : "s"})`,
3132
+ version: ollamaUrl
3133
+ };
3134
+ }
3135
+ return {
3136
+ name: "Ollama API",
3137
+ status: "warning",
3138
+ message: `HTTP ${response.status}`,
3139
+ hint: "Ollama may not be running. Try: loclaude docker-up"
3140
+ };
3141
+ } catch (error3) {
3142
+ return {
3143
+ name: "Ollama API",
3144
+ status: "warning",
3145
+ message: "Not reachable",
3146
+ hint: `Cannot connect to ${ollamaUrl}. Start Ollama: loclaude docker-up`
3147
+ };
3148
+ }
3149
+ }
3150
+ var MIN_OLLAMA_VERSION = "0.14.2";
3151
+ function parseVersion(version) {
3152
+ const match = version.match(/(\d+)\.(\d+)\.(\d+)/);
3153
+ if (!match || !match[1] || !match[2] || !match[3])
3154
+ return null;
3155
+ return {
3156
+ major: parseInt(match[1], 10),
3157
+ minor: parseInt(match[2], 10),
3158
+ patch: parseInt(match[3], 10)
3159
+ };
3160
+ }
3161
+ function compareVersions(a, b) {
3162
+ const parsedA = parseVersion(a);
3163
+ const parsedB = parseVersion(b);
3164
+ if (!parsedA || !parsedB)
3165
+ return 0;
3166
+ if (parsedA.major !== parsedB.major)
3167
+ return parsedA.major - parsedB.major;
3168
+ if (parsedA.minor !== parsedB.minor)
3169
+ return parsedA.minor - parsedB.minor;
3170
+ return parsedA.patch - parsedB.patch;
3171
+ }
3172
+ async function checkOllamaVersion() {
3173
+ const ollamaUrl = getOllamaUrl();
3174
+ try {
3175
+ const response = await fetch(`${ollamaUrl}/api/version`, {
3176
+ signal: AbortSignal.timeout(5000)
3177
+ });
3178
+ if (!response.ok) {
3179
+ return {
3180
+ name: "Ollama Version",
3181
+ status: "warning",
3182
+ message: "Could not determine version",
3183
+ hint: "Ollama may not be running. Try: loclaude docker-up"
3184
+ };
3185
+ }
3186
+ const data = await response.json();
3187
+ const version = data.version;
3188
+ if (!version) {
3189
+ return {
3190
+ name: "Ollama Version",
3191
+ status: "warning",
3192
+ message: "Unknown version",
3193
+ hint: "Could not parse version from Ollama API"
3194
+ };
3195
+ }
3196
+ const comparison = compareVersions(version, MIN_OLLAMA_VERSION);
3197
+ if (comparison > 0) {
3198
+ return {
3199
+ name: "Ollama Version",
3200
+ status: "ok",
3201
+ message: "Compatible",
3202
+ version
3203
+ };
3204
+ } else if (comparison === 0) {
3205
+ return {
3206
+ name: "Ollama Version",
3207
+ status: "ok",
3208
+ message: "Compatible",
3209
+ version,
3210
+ hint: `Version ${version} is the minimum. Consider upgrading for best compatibility.`
3211
+ };
3212
+ } else {
3213
+ return {
3214
+ name: "Ollama Version",
3215
+ status: "error",
3216
+ message: `Version too old (requires > ${MIN_OLLAMA_VERSION})`,
3217
+ version,
3218
+ hint: `Upgrade Ollama to a version greater than ${MIN_OLLAMA_VERSION}`
3219
+ };
3220
+ }
3221
+ } catch (error3) {
3222
+ return {
3223
+ name: "Ollama Version",
3224
+ status: "warning",
3225
+ message: "Could not check version",
3226
+ hint: `Cannot connect to ${ollamaUrl}. Start Ollama: loclaude docker-up`
3227
+ };
3228
+ }
3229
+ }
3230
+ function formatCheck(check) {
3231
+ let line = statusLine(check.status, check.name, check.message, check.version);
3232
+ if (check.hint) {
3233
+ line += `
3234
+ ${dim("→")} ${dim(check.hint)}`;
3235
+ }
3236
+ return line;
3237
+ }
3238
+ async function doctor() {
3239
+ header("System Health Check");
3240
+ console.log("");
3241
+ const checks = await Promise.all([
3242
+ checkDocker(),
3243
+ checkDockerCompose(),
3244
+ checkNvidiaSmi(),
3245
+ checkNvidiaContainerToolkit(),
3246
+ checkClaude(),
3247
+ checkOllamaConnection(),
3248
+ checkOllamaVersion()
3249
+ ]);
3250
+ for (const check of checks) {
3251
+ console.log(formatCheck(check));
3252
+ }
3253
+ const errors2 = checks.filter((c) => c.status === "error");
3254
+ const warnings = checks.filter((c) => c.status === "warning");
3255
+ console.log("");
3256
+ if (errors2.length > 0) {
3257
+ console.log(red(`${errors2.length} error(s) found.`) + " Fix these before proceeding.");
3258
+ process.exit(1);
3259
+ } else if (warnings.length > 0) {
3260
+ console.log(yellow(`${warnings.length} warning(s).`) + " loclaude may work with limited functionality.");
3261
+ } else {
3262
+ console.log(green("All checks passed!") + " Ready to use loclaude.");
3263
+ }
3264
+ }
3265
+ async function hasNvidiaGpu() {
3266
+ const exists = await commandExists("nvidia-smi");
3267
+ if (!exists)
3268
+ return false;
3269
+ const result = await spawnCapture(["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"]);
3270
+ return result.exitCode === 0 && Boolean(result.stdout?.trim());
3271
+ }
3272
+
3273
+ // lib/commands/init.ts
3274
+ var DOCKER_COMPOSE_TEMPLATE_GPU = `# =============================================================================
3275
+ # LOCLAUDE DOCKER COMPOSE - GPU MODE
3276
+ # =============================================================================
3277
+ # This configuration runs Ollama with NVIDIA GPU acceleration for fast inference.
3278
+ # Generated by: loclaude init
3279
+ #
3280
+ # Prerequisites:
3281
+ # - NVIDIA GPU with CUDA support
3282
+ # - NVIDIA drivers installed on host
3283
+ # - NVIDIA Container Toolkit: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit
3284
+ #
3285
+ # Quick test for GPU support:
3286
+ # docker run --rm --gpus all nvidia/cuda:12.0-base nvidia-smi
3287
+ #
3288
+ # =============================================================================
3289
+
3290
+ services:
3291
+ # ===========================================================================
3292
+ # OLLAMA - Local LLM Inference Server
3293
+ # ===========================================================================
3294
+ # Ollama provides the AI backend that Claude Code connects to.
3295
+ # It runs large language models locally on your hardware.
3296
+ #
3297
+ # API Documentation: https://github.com/ollama/ollama/blob/main/docs/api.md
3298
+ # Model Library: https://ollama.com/library
3299
+ # ===========================================================================
3300
+ ollama:
3301
+ # Official Ollama image - 'latest' ensures newest features and model support
3302
+ image: ollama/ollama:latest
3303
+
3304
+ # Fixed container name for easy CLI access:
3305
+ # docker exec ollama ollama list
3306
+ # docker logs ollama
3307
+ container_name: ollama
3308
+
3309
+ # NVIDIA Container Runtime - Required for GPU access
3310
+ # This makes CUDA libraries available inside the container
3311
+ runtime: nvidia
3312
+
3313
+ environment:
3314
+ # ---------------------------------------------------------------------------
3315
+ # GPU Configuration
3316
+ # ---------------------------------------------------------------------------
3317
+ # NVIDIA_VISIBLE_DEVICES: Which GPUs to expose to the container
3318
+ # - 'all': Use all available GPUs (recommended for most setups)
3319
+ # - '0': Use only GPU 0
3320
+ # - '0,1': Use GPUs 0 and 1
3321
+ - NVIDIA_VISIBLE_DEVICES=all
3322
+
3323
+ # NVIDIA_DRIVER_CAPABILITIES: What GPU features to enable
3324
+ # - 'compute': CUDA compute (required for inference)
3325
+ # - 'utility': nvidia-smi and other tools
3326
+ - NVIDIA_DRIVER_CAPABILITIES=compute,utility
3327
+
3328
+ # ---------------------------------------------------------------------------
3329
+ # Ollama Configuration (Optional)
3330
+ # ---------------------------------------------------------------------------
3331
+ # Uncomment these to customize Ollama behavior:
3332
+
3333
+ # Maximum number of models loaded in memory simultaneously
3334
+ # Lower this if you're running out of VRAM
3335
+ # - OLLAMA_MAX_LOADED_MODELS=1
3336
+
3337
+ # Maximum parallel inference requests per model
3338
+ # Higher values use more VRAM but handle more concurrent requests
3339
+ # - OLLAMA_NUM_PARALLEL=1
3340
+
3341
+ # Enable debug logging for troubleshooting
3342
+ # - OLLAMA_DEBUG=1
3343
+
3344
+ # Custom model storage location (inside container)
3345
+ # - OLLAMA_MODELS=/root/.ollama
3346
+
3347
+ volumes:
3348
+ # ---------------------------------------------------------------------------
3349
+ # Model Storage
3350
+ # ---------------------------------------------------------------------------
3351
+ # Maps ./models on your host to /root/.ollama in the container
3352
+ # This persists downloaded models across container restarts
3353
+ #
3354
+ # Disk space requirements (approximate):
3355
+ # - 7B model: ~4GB
3356
+ # - 13B model: ~8GB
3357
+ # - 30B model: ~16GB
3358
+ # - 70B model: ~40GB
3359
+ - ./models:/root/.ollama
3360
+
3361
+ ports:
3362
+ # Ollama API port - access at http://localhost:11434
3363
+ # Used by Claude Code and other Ollama clients
3364
+ - "11434:11434"
3365
+
3366
+ # Restart policy - keeps Ollama running unless manually stopped
3367
+ restart: unless-stopped
3368
+
3369
+ healthcheck:
3370
+ # Verify Ollama is responsive by listing models
3371
+ test: ["CMD", "ollama", "list"]
3372
+ interval: 300s # Check every 5 minutes
3373
+ timeout: 2s # Fail if no response in 2 seconds
3374
+ retries: 3 # Mark unhealthy after 3 consecutive failures
3375
+ start_period: 40s # Grace period for initial model loading
3376
+
3377
+ deploy:
3378
+ resources:
3379
+ reservations:
3380
+ devices:
3381
+ # Request GPU access from Docker
3382
+ - driver: nvidia
3383
+ count: all # Use all available GPUs
3384
+ capabilities: [gpu] # Request GPU compute capability
3385
+
3386
+ # ===========================================================================
3387
+ # OPEN WEBUI - Chat Interface (Optional)
3388
+ # ===========================================================================
3389
+ # Open WebUI provides a ChatGPT-like interface for your local models.
3390
+ # Access at http://localhost:3000 after starting containers.
3391
+ #
3392
+ # Features:
3393
+ # - Multi-model chat interface
3394
+ # - Conversation history
3395
+ # - Model management UI
3396
+ # - RAG/document upload support
3397
+ #
3398
+ # Documentation: https://docs.openwebui.com/
3399
+ # ===========================================================================
3400
+ open-webui:
3401
+ # CUDA-enabled image for GPU-accelerated features (embeddings, etc.)
3402
+ # Change to :main if you don't need GPU features in the UI
3403
+ image: ghcr.io/open-webui/open-webui:cuda
3404
+
3405
+ container_name: open-webui
3406
+
3407
+ ports:
3408
+ # Web UI port - access at http://localhost:3000
3409
+ - "3000:8080"
3410
+
3411
+ environment:
3412
+ # Tell Open WebUI where to find Ollama
3413
+ # Uses Docker internal networking (service name as hostname)
3414
+ - OLLAMA_BASE_URL=http://ollama:11434
3415
+
3416
+ # Wait for Ollama to be ready before starting
3417
+ depends_on:
3418
+ - ollama
3419
+
3420
+ restart: unless-stopped
3421
+
3422
+ healthcheck:
3423
+ test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
3424
+ interval: 30s
3425
+ timeout: 10s
3426
+ retries: 3
3427
+ start_period: 60s
3428
+
3429
+ volumes:
3430
+ # Persistent storage for conversations, settings, and user data
3431
+ - open-webui:/app/backend/data
3432
+
3433
+ deploy:
3434
+ resources:
3435
+ reservations:
3436
+ devices:
3437
+ - driver: nvidia
3438
+ count: all
3439
+ capabilities: [gpu]
3440
+
3441
+ # =============================================================================
3442
+ # VOLUMES
3443
+ # =============================================================================
3444
+ # Named volumes for persistent data that survives container recreation
3445
+ volumes:
3446
+ open-webui:
3447
+ # Open WebUI data: conversations, user settings, uploads
3448
+ # Located at /var/lib/docker/volumes/open-webui/_data on host
3449
+ `;
3450
+ var DOCKER_COMPOSE_TEMPLATE_CPU = `# =============================================================================
3451
+ # LOCLAUDE DOCKER COMPOSE - CPU MODE
3452
+ # =============================================================================
3453
+ # This configuration runs Ollama in CPU-only mode.
3454
+ # Inference will be slower than GPU mode but works on any system.
3455
+ # Generated by: loclaude init --no-gpu
3456
+ #
3457
+ # Performance notes:
3458
+ # - 7B models: ~10-20 tokens/sec on modern CPUs
3459
+ # - Larger models will be significantly slower
3460
+ # - Consider using quantized models (Q4_K_M, Q5_K_M) for better performance
3461
+ #
3462
+ # Recommended CPU-optimized models:
3463
+ # - llama3.2:3b (fast, good for simple tasks)
3464
+ # - qwen2.5-coder:7b (coding tasks)
3465
+ # - gemma2:9b (general purpose)
3466
+ #
3467
+ # =============================================================================
3468
+
3469
+ services:
3470
+ # ===========================================================================
3471
+ # OLLAMA - Local LLM Inference Server (CPU Mode)
3472
+ # ===========================================================================
3473
+ # Ollama provides the AI backend that Claude Code connects to.
3474
+ # Running in CPU mode - no GPU acceleration.
3475
+ #
3476
+ # API Documentation: https://github.com/ollama/ollama/blob/main/docs/api.md
3477
+ # Model Library: https://ollama.com/library
3478
+ # ===========================================================================
3479
+ ollama:
3480
+ # Official Ollama image - works for both CPU and GPU
3481
+ image: ollama/ollama:latest
3482
+
3483
+ # Fixed container name for easy CLI access
3484
+ container_name: ollama
3485
+
3486
+ # NOTE: No 'runtime: nvidia' - running in CPU mode
3487
+
3488
+ environment:
3489
+ # ---------------------------------------------------------------------------
3490
+ # Ollama Configuration (Optional)
3491
+ # ---------------------------------------------------------------------------
3492
+ # Uncomment these to customize Ollama behavior:
3493
+
3494
+ # Maximum number of models loaded in memory simultaneously
3495
+ # CPU mode uses system RAM instead of VRAM
3496
+ # - OLLAMA_MAX_LOADED_MODELS=1
3497
+
3498
+ # Number of CPU threads to use (default: auto-detect)
3499
+ # - OLLAMA_NUM_THREADS=8
3500
+
3501
+ # Enable debug logging for troubleshooting
3502
+ # - OLLAMA_DEBUG=1
3503
+
3504
+ volumes:
3505
+ # ---------------------------------------------------------------------------
3506
+ # Model Storage
3507
+ # ---------------------------------------------------------------------------
3508
+ # Maps ./models on your host to /root/.ollama in the container
3509
+ # This persists downloaded models across container restarts
3510
+ - ./models:/root/.ollama
3511
+
3512
+ ports:
3513
+ # Ollama API port - access at http://localhost:11434
3514
+ - "11434:11434"
3515
+
3516
+ restart: unless-stopped
3517
+
3518
+ healthcheck:
3519
+ test: ["CMD", "ollama", "list"]
3520
+ interval: 300s
3521
+ timeout: 2s
3522
+ retries: 3
3523
+ start_period: 40s
3524
+
3525
+ # CPU resource limits (optional - uncomment to constrain)
3526
+ # deploy:
3527
+ # resources:
3528
+ # limits:
3529
+ # cpus: '4' # Limit to 4 CPU cores
3530
+ # memory: 16G # Limit to 16GB RAM
3531
+ # reservations:
3532
+ # cpus: '2' # Reserve at least 2 cores
3533
+ # memory: 8G # Reserve at least 8GB RAM
3534
+
3535
+ # ===========================================================================
3536
+ # OPEN WEBUI - Chat Interface (Optional)
3537
+ # ===========================================================================
3538
+ # Open WebUI provides a ChatGPT-like interface for your local models.
3539
+ # Access at http://localhost:3000 after starting containers.
3540
+ #
3541
+ # Documentation: https://docs.openwebui.com/
3542
+ # ===========================================================================
3543
+ open-webui:
3544
+ # Standard image (no CUDA) - smaller download, CPU-only features
3545
+ image: ghcr.io/open-webui/open-webui:main
3546
+
3547
+ container_name: open-webui
3548
+
3549
+ ports:
3550
+ - "3000:8080"
3551
+
3552
+ environment:
3553
+ - OLLAMA_BASE_URL=http://ollama:11434
3554
+
3555
+ depends_on:
3556
+ - ollama
3557
+
3558
+ restart: unless-stopped
3559
+
3560
+ healthcheck:
3561
+ test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
3562
+ interval: 30s
3563
+ timeout: 10s
3564
+ retries: 3
3565
+ start_period: 60s
3566
+
3567
+ volumes:
3568
+ - open-webui:/app/backend/data
3569
+
3570
+ # =============================================================================
3571
+ # VOLUMES
3572
+ # =============================================================================
3573
+ volumes:
3574
+ open-webui:
3575
+ `;
3576
+ function getConfigTemplate(gpu) {
3577
+ return `{
3578
+ "ollama": {
3579
+ "url": "http://localhost:11434",
3580
+ "defaultModel": "${gpu ? "qwen3-coder:30b" : "qwen2.5-coder:7b"}"
3581
+ },
3582
+ "docker": {
3583
+ "composeFile": "./docker-compose.yml",
3584
+ "gpu": ${gpu}
3585
+ }
3586
+ }
3587
+ `;
3588
+ }
3589
+ var GITIGNORE_TEMPLATE = `# Ollama models (large binary files)
3590
+ # These are downloaded by Ollama and can be re-pulled anytime
3591
+ models/
3592
+ `;
3593
+ var MISE_TOML_TEMPLATE = `# =============================================================================
3594
+ # MISE TASK RUNNER CONFIGURATION
3595
+ # =============================================================================
3596
+ # Mise is a task runner that provides convenient shortcuts for common operations.
3597
+ # Run 'mise tasks' to see all available tasks.
3598
+ #
3599
+ # Documentation: https://mise.jdx.dev/
3600
+ # Install: curl https://mise.jdx.dev/install.sh | sh
3601
+ # =============================================================================
3602
+
3603
+ [tasks]
3604
+
3605
+ # =============================================================================
3606
+ # Docker Management
3607
+ # =============================================================================
3608
+ # Commands for managing the Ollama and Open WebUI containers
3609
+
3610
+ [tasks.up]
3611
+ description = "Start Ollama and Open WebUI containers"
3612
+ run = "loclaude docker-up"
3613
+
3614
+ [tasks.down]
3615
+ description = "Stop all containers"
3616
+ run = "loclaude docker-down"
3617
+
3618
+ [tasks.restart]
3619
+ description = "Restart all containers"
3620
+ run = "loclaude docker-restart"
3621
+
3622
+ [tasks.status]
3623
+ description = "Show container status"
3624
+ run = "loclaude docker-status"
3625
+
3626
+ [tasks.logs]
3627
+ description = "Follow container logs"
3628
+ run = "loclaude docker-logs --follow"
3629
+
3630
+ # =============================================================================
3631
+ # Model Management
3632
+ # =============================================================================
3633
+ # Commands for managing Ollama models (download, remove, list)
3634
+
3635
+ [tasks.models]
3636
+ description = "List installed models"
3637
+ run = "loclaude models"
3638
+
3639
+ [tasks.pull]
3640
+ description = "Pull a model (usage: mise run pull <model-name>)"
3641
+ run = "loclaude models-pull {{arg(name='model')}}"
3642
+
3643
+ [tasks."pull:recommended"]
3644
+ description = "Pull the recommended coding model"
3645
+ run = "loclaude models-pull qwen3-coder:30b"
3646
+
3647
+ # =============================================================================
3648
+ # Claude Code
3649
+ # =============================================================================
3650
+ # Commands for running Claude Code with local Ollama
3651
+
3652
+ [tasks.claude]
3653
+ description = "Run Claude Code with local Ollama"
3654
+ run = "loclaude run"
3655
+
3656
+ [tasks."claude:model"]
3657
+ description = "Run Claude with specific model (usage: mise run claude:model <model>)"
3658
+ run = "loclaude run -m {{arg(name='model')}}"
3659
+
3660
+ # =============================================================================
3661
+ # Diagnostics
3662
+ # =============================================================================
3663
+ # Commands for checking system health and troubleshooting
3664
+
3665
+ [tasks.doctor]
3666
+ description = "Check system requirements"
3667
+ run = "loclaude doctor"
3668
+
3669
+ [tasks.gpu]
3670
+ description = "Check GPU status (requires NVIDIA GPU)"
3671
+ run = "docker exec ollama nvidia-smi"
3672
+
3673
+ [tasks.config]
3674
+ description = "Show current configuration"
3675
+ run = "loclaude config"
3676
+ `;
3677
+ var README_TEMPLATE = `# Project Name
3678
+
3679
+ > Powered by [loclaude](https://github.com/nicholasgalante1997/loclaude) - Run Claude Code with local Ollama LLMs
3680
+
3681
+ ## Prerequisites
3682
+
3683
+ - [Docker](https://docs.docker.com/get-docker/) with Docker Compose v2
3684
+ - [mise](https://mise.jdx.dev/) task runner (recommended)
3685
+ - [loclaude](https://www.npmjs.com/package/loclaude) CLI (\`npm install -g loclaude\`)
3686
+
3687
+ ### For GPU Mode (Recommended)
3688
+
3689
+ - [NVIDIA GPU](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) with CUDA support
3690
+ - NVIDIA drivers installed on host
3691
+ - [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
3692
+
3693
+ ## Quick Start
3694
+
3695
+ \`\`\`bash
3696
+ # Start the LLM backend (Ollama + Open WebUI)
3697
+ mise run up
3698
+
3699
+ # Pull a model (adjust based on your hardware)
3700
+ mise run pull qwen3-coder:30b # GPU: 30B model (~16GB VRAM)
3701
+ mise run pull qwen2.5-coder:7b # CPU: 7B model (faster)
3702
+
3703
+ # Run Claude Code with local LLM
3704
+ mise run claude
3705
+ \`\`\`
3706
+
3707
+ ## Available Commands
3708
+
3709
+ Run \`mise tasks\` to see all available commands.
3710
+
3711
+ | Command | Description |
3712
+ |---------|-------------|
3713
+ | \`mise run up\` | Start Ollama and Open WebUI containers |
3714
+ | \`mise run down\` | Stop all containers |
3715
+ | \`mise run status\` | Show container status |
3716
+ | \`mise run logs\` | Follow container logs |
3717
+ | \`mise run models\` | List installed models |
3718
+ | \`mise run pull <model>\` | Pull a model from Ollama registry |
3719
+ | \`mise run claude\` | Run Claude Code with model selection |
3720
+ | \`mise run claude:model <model>\` | Run Claude with specific model |
3721
+ | \`mise run doctor\` | Check system requirements |
3722
+ | \`mise run gpu\` | Check GPU status |
3723
+
3724
+ ## Service URLs
3725
+
3726
+ | Service | URL | Description |
3727
+ |---------|-----|-------------|
3728
+ | Ollama API | http://localhost:11434 | LLM inference API |
3729
+ | Open WebUI | http://localhost:3000 | Chat interface |
3730
+
3731
+ ## Project Structure
3732
+
3733
+ \`\`\`
3734
+ .
3735
+ ├── .claude/
3736
+ │ └── CLAUDE.md # Claude Code project instructions
3737
+ ├── .loclaude/
3738
+ │ └── config.json # Loclaude configuration
3739
+ ├── models/ # Ollama model storage (gitignored)
3740
+ ├── docker-compose.yml # Container definitions
3741
+ ├── mise.toml # Task runner configuration
3742
+ └── README.md
3743
+ \`\`\`
3744
+
3745
+ ## Configuration
3746
+
3747
+ ### Loclaude Config (\`.loclaude/config.json\`)
3748
+
3749
+ \`\`\`json
3750
+ {
3751
+ "ollama": {
3752
+ "url": "http://localhost:11434",
3753
+ "defaultModel": "qwen3-coder:30b"
3025
3754
  },
3026
3755
  "docker": {
3027
3756
  "composeFile": "./docker-compose.yml",
@@ -3036,6 +3765,25 @@ Run \`mise tasks\` to see all available commands.
3036
3765
  |----------|-------------|---------|
3037
3766
  | \`OLLAMA_URL\` | Ollama API endpoint | \`http://localhost:11434\` |
3038
3767
  | \`OLLAMA_MODEL\` | Default model name | \`qwen3-coder:30b\` |
3768
+ | \`LOCLAUDE_GPU\` | Enable GPU mode | \`true\` |
3769
+
3770
+ ## Recommended Models
3771
+
3772
+ ### For GPU (NVIDIA with 16GB+ VRAM)
3773
+
3774
+ | Model | Size | Use Case |
3775
+ |-------|------|----------|
3776
+ | \`qwen3-coder:30b\` | ~16GB | Best coding performance |
3777
+ | \`gpt-oss:20b\` | ~12GB | General purpose |
3778
+ | \`glm-4.7:cloud\` | Cloud | No local storage needed |
3779
+
3780
+ ### For CPU or Limited VRAM
3781
+
3782
+ | Model | Size | Use Case |
3783
+ |-------|------|----------|
3784
+ | \`qwen2.5-coder:7b\` | ~4GB | Coding on CPU |
3785
+ | \`llama3.2:3b\` | ~2GB | Fast, simple tasks |
3786
+ | \`gemma2:9b\` | ~5GB | General purpose |
3039
3787
 
3040
3788
  ## Troubleshooting
3041
3789
 
@@ -3057,6 +3805,12 @@ mise run logs
3057
3805
  mise run down && mise run up
3058
3806
  \`\`\`
3059
3807
 
3808
+ ### GPU Not Detected
3809
+
3810
+ 1. Verify NVIDIA drivers: \`nvidia-smi\`
3811
+ 2. Check Docker GPU access: \`docker run --rm --gpus all nvidia/cuda:12.0-base nvidia-smi\`
3812
+ 3. Install NVIDIA Container Toolkit if missing
3813
+
3060
3814
  ## License
3061
3815
 
3062
3816
  MIT
@@ -3067,7 +3821,7 @@ Project-specific instructions for Claude Code.
3067
3821
 
3068
3822
  ## Project Overview
3069
3823
 
3070
- This project uses [loclaude](https://github.com/nicholasgalante1997/docker-ollama) to run Claude Code with local Ollama LLMs.
3824
+ This project uses [loclaude](https://github.com/nicholasgalante1997/loclaude) to run Claude Code with local Ollama LLMs.
3071
3825
 
3072
3826
  ## Quick Reference
3073
3827
 
@@ -3123,304 +3877,153 @@ async function init(options = {}) {
3123
3877
  const claudeDir = join2(cwd, ".claude");
3124
3878
  const claudeMdPath = join2(claudeDir, "CLAUDE.md");
3125
3879
  const readmePath = join2(cwd, "README.md");
3126
- console.log(`Initializing loclaude project...
3127
- `);
3880
+ header("Initializing loclaude project");
3881
+ console.log("");
3882
+ let gpuMode;
3883
+ if (options.gpu === false) {
3884
+ gpuMode = false;
3885
+ console.log(info("CPU-only mode (--no-gpu)"));
3886
+ } else if (options.gpu === true) {
3887
+ gpuMode = true;
3888
+ console.log(info("GPU mode enabled (--gpu)"));
3889
+ } else {
3890
+ console.log(dim(" Detecting GPU..."));
3891
+ gpuMode = await hasNvidiaGpu();
3892
+ if (gpuMode) {
3893
+ console.log(success("NVIDIA GPU detected - using GPU mode"));
3894
+ } else {
3895
+ console.log(warn("No NVIDIA GPU detected - using CPU mode"));
3896
+ console.log(dim(" Use --gpu to force GPU mode if you have an NVIDIA GPU"));
3897
+ }
3898
+ }
3899
+ console.log("");
3128
3900
  if (existsSync2(readmePath) && !options.force) {
3129
- console.log("⚠️ README.md already exists");
3901
+ console.log(warn(`${file("README.md")} already exists`));
3130
3902
  } else {
3131
3903
  writeFileSync(readmePath, README_TEMPLATE);
3132
- console.log("✓ Created README.md");
3904
+ console.log(success(`Created ${file("README.md")}`));
3133
3905
  }
3134
3906
  if (existsSync2(composePath) && !options.force) {
3135
- console.log("⚠️ docker-compose.yml already exists");
3136
- console.log(` Use --force to overwrite
3137
- `);
3907
+ console.log(warn(`${file("docker-compose.yml")} already exists`));
3908
+ console.log(dim(" Use --force to overwrite"));
3138
3909
  } else {
3139
- let composeContent = DOCKER_COMPOSE_TEMPLATE;
3910
+ let composeContent = gpuMode ? DOCKER_COMPOSE_TEMPLATE_GPU : DOCKER_COMPOSE_TEMPLATE_CPU;
3140
3911
  if (options.noWebui) {
3141
- composeContent = composeContent.replace(/\n open-webui:[\s\S]*?capabilities: \[gpu\]\n/m, `
3142
- `).replace(/\nvolumes:\n open-webui:\n/, `
3912
+ composeContent = composeContent.replace(/\n # =+\n # OPEN WEBUI[\s\S]*?capabilities: \[gpu\]\n/m, `
3913
+ `).replace(/\n # =+\n # OPEN WEBUI[\s\S]*?open-webui:\/app\/backend\/data\n/m, `
3914
+ `).replace(/\nvolumes:\n open-webui:\n.*$/m, `
3143
3915
  `);
3144
3916
  }
3145
3917
  writeFileSync(composePath, composeContent);
3146
- console.log(" Created docker-compose.yml");
3918
+ const modeLabel = gpuMode ? cyan("GPU") : cyan("CPU");
3919
+ console.log(success(`Created ${file("docker-compose.yml")} (${modeLabel} mode)`));
3147
3920
  }
3148
3921
  if (existsSync2(miseTomlPath) && !options.force) {
3149
- console.log("⚠️ mise.toml already exists");
3922
+ console.log(warn(`${file("mise.toml")} already exists`));
3150
3923
  } else {
3151
3924
  writeFileSync(miseTomlPath, MISE_TOML_TEMPLATE);
3152
- console.log("✓ Created mise.toml");
3925
+ console.log(success(`Created ${file("mise.toml")}`));
3153
3926
  }
3154
3927
  if (!existsSync2(claudeDir)) {
3155
3928
  mkdirSync(claudeDir, { recursive: true });
3156
3929
  }
3157
3930
  if (existsSync2(claudeMdPath) && !options.force) {
3158
- console.log("⚠️ .claude/CLAUDE.md already exists");
3931
+ console.log(warn(`${file(".claude/CLAUDE.md")} already exists`));
3159
3932
  } else {
3160
3933
  writeFileSync(claudeMdPath, CLAUDE_MD_TEMPLATE);
3161
- console.log("✓ Created .claude/CLAUDE.md");
3934
+ console.log(success(`Created ${file(".claude/CLAUDE.md")}`));
3162
3935
  }
3163
3936
  if (!existsSync2(configDir)) {
3164
3937
  mkdirSync(configDir, { recursive: true });
3165
- console.log("✓ Created .loclaude/ directory");
3938
+ console.log(success(`Created ${file(".loclaude/")} directory`));
3166
3939
  }
3167
3940
  if (existsSync2(configPath) && !options.force) {
3168
- console.log("⚠️ .loclaude/config.json already exists");
3941
+ console.log(warn(`${file(".loclaude/config.json")} already exists`));
3169
3942
  } else {
3170
- writeFileSync(configPath, CONFIG_TEMPLATE);
3171
- console.log("✓ Created .loclaude/config.json");
3943
+ writeFileSync(configPath, getConfigTemplate(gpuMode));
3944
+ console.log(success(`Created ${file(".loclaude/config.json")}`));
3172
3945
  }
3173
3946
  if (!existsSync2(modelsDir)) {
3174
3947
  mkdirSync(modelsDir, { recursive: true });
3175
- console.log("✓ Created models/ directory");
3948
+ console.log(success(`Created ${file("models/")} directory`));
3176
3949
  }
3177
3950
  if (existsSync2(gitignorePath)) {
3178
3951
  const existing = readFileSync2(gitignorePath, "utf-8");
3179
3952
  if (!existing.includes("models/")) {
3180
3953
  writeFileSync(gitignorePath, existing + `
3181
3954
  ` + GITIGNORE_TEMPLATE);
3182
- console.log("✓ Updated .gitignore");
3955
+ console.log(success(`Updated ${file(".gitignore")}`));
3183
3956
  }
3184
3957
  } else {
3185
3958
  writeFileSync(gitignorePath, GITIGNORE_TEMPLATE);
3186
- console.log("✓ Created .gitignore");
3187
- }
3188
- console.log(`
3189
- \uD83C\uDF89 Project initialized!
3190
- `);
3191
- console.log("Next steps:");
3192
- console.log(" 1. Start containers: mise run up");
3193
- console.log(" 2. Pull a model: mise run pull qwen3-coder:30b");
3194
- console.log(" 3. Run Claude: mise run claude");
3195
- console.log(`
3196
- Service URLs:`);
3197
- console.log(" Ollama API: http://localhost:11434");
3198
- if (!options.noWebui) {
3199
- console.log(" Open WebUI: http://localhost:3000");
3200
- }
3201
- }
3202
- // lib/commands/doctor.ts
3203
- async function checkDocker() {
3204
- const exists = await commandExists("docker");
3205
- if (!exists) {
3206
- return {
3207
- name: "Docker",
3208
- status: "error",
3209
- message: "Not installed",
3210
- hint: "Install Docker: https://docs.docker.com/get-docker/"
3211
- };
3212
- }
3213
- const version = await getCommandVersion("docker");
3214
- return {
3215
- name: "Docker",
3216
- status: "ok",
3217
- message: "Installed",
3218
- version: version ?? undefined
3219
- };
3220
- }
3221
- async function checkDockerCompose() {
3222
- const result = await spawnCapture(["docker", "compose", "version"]);
3223
- if (result.exitCode === 0) {
3224
- const version = result.stdout?.trim().split(`
3225
- `)[0];
3226
- return {
3227
- name: "Docker Compose",
3228
- status: "ok",
3229
- message: "Installed (v2)",
3230
- version: version ?? undefined
3231
- };
3232
- }
3233
- const v1Exists = await commandExists("docker-compose");
3234
- if (v1Exists) {
3235
- const version = await getCommandVersion("docker-compose");
3236
- return {
3237
- name: "Docker Compose",
3238
- status: "warning",
3239
- message: "Using legacy v1",
3240
- version: version ?? undefined,
3241
- hint: "Consider upgrading to Docker Compose v2"
3242
- };
3243
- }
3244
- return {
3245
- name: "Docker Compose",
3246
- status: "error",
3247
- message: "Not installed",
3248
- hint: "Docker Compose is included with Docker Desktop, or install separately"
3249
- };
3250
- }
3251
- async function checkNvidiaSmi() {
3252
- const exists = await commandExists("nvidia-smi");
3253
- if (!exists) {
3254
- return {
3255
- name: "NVIDIA GPU",
3256
- status: "warning",
3257
- message: "nvidia-smi not found",
3258
- hint: "GPU support requires NVIDIA drivers. CPU-only mode will be used."
3259
- };
3260
- }
3261
- const result = await spawnCapture(["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"]);
3262
- if (result.exitCode === 0 && result.stdout) {
3263
- const gpus = result.stdout.trim().split(`
3264
- `).filter(Boolean);
3265
- return {
3266
- name: "NVIDIA GPU",
3267
- status: "ok",
3268
- message: `${gpus.length} GPU(s) detected`,
3269
- version: gpus[0]
3270
- };
3271
- }
3272
- return {
3273
- name: "NVIDIA GPU",
3274
- status: "warning",
3275
- message: "nvidia-smi failed",
3276
- hint: "GPU may not be available. Check NVIDIA drivers."
3277
- };
3278
- }
3279
- async function checkNvidiaContainerToolkit() {
3280
- const result = await spawnCapture(["docker", "info", "--format", "{{.Runtimes}}"]);
3281
- if (result.exitCode === 0 && result.stdout?.includes("nvidia")) {
3282
- return {
3283
- name: "NVIDIA Container Toolkit",
3284
- status: "ok",
3285
- message: "nvidia runtime available"
3286
- };
3287
- }
3288
- return {
3289
- name: "NVIDIA Container Toolkit",
3290
- status: "warning",
3291
- message: "nvidia runtime not found",
3292
- hint: "Install: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html"
3293
- };
3294
- }
3295
- async function checkClaude() {
3296
- const exists = await commandExists("claude");
3297
- if (!exists) {
3298
- return {
3299
- name: "Claude Code",
3300
- status: "error",
3301
- message: "Not installed",
3302
- hint: "Install: npm install -g @anthropic-ai/claude-code"
3303
- };
3304
- }
3305
- const version = await getCommandVersion("claude");
3306
- return {
3307
- name: "Claude Code",
3308
- status: "ok",
3309
- message: "Installed",
3310
- version: version ?? undefined
3311
- };
3312
- }
3313
- async function checkOllamaConnection() {
3314
- const ollamaUrl = getOllamaUrl();
3315
- try {
3316
- const response = await fetch(`${ollamaUrl}/api/tags`, {
3317
- signal: AbortSignal.timeout(5000)
3318
- });
3319
- if (response.ok) {
3320
- const data = await response.json();
3321
- const modelCount = data.models?.length ?? 0;
3322
- return {
3323
- name: "Ollama API",
3324
- status: "ok",
3325
- message: `Connected (${modelCount} model${modelCount === 1 ? "" : "s"})`,
3326
- version: ollamaUrl
3327
- };
3328
- }
3329
- return {
3330
- name: "Ollama API",
3331
- status: "warning",
3332
- message: `HTTP ${response.status}`,
3333
- hint: "Ollama may not be running. Try: loclaude docker-up"
3334
- };
3335
- } catch (error) {
3336
- return {
3337
- name: "Ollama API",
3338
- status: "warning",
3339
- message: "Not reachable",
3340
- hint: `Cannot connect to ${ollamaUrl}. Start Ollama: loclaude docker-up`
3341
- };
3959
+ console.log(success(`Created ${file(".gitignore")}`));
3342
3960
  }
3343
- }
3344
- function formatCheck(check) {
3345
- const icons = {
3346
- ok: "✓",
3347
- warning: "⚠",
3348
- error: "✗"
3349
- };
3350
- const colors = {
3351
- ok: "\x1B[32m",
3352
- warning: "\x1B[33m",
3353
- error: "\x1B[31m"
3354
- };
3355
- const reset = "\x1B[0m";
3356
- const icon = icons[check.status];
3357
- const color = colors[check.status];
3358
- let line = `${color}${icon}${reset} ${check.name}: ${check.message}`;
3359
- if (check.version) {
3360
- line += ` (${check.version})`;
3361
- }
3362
- if (check.hint) {
3363
- line += `
3364
- ${check.hint}`;
3365
- }
3366
- return line;
3367
- }
3368
- async function doctor() {
3369
- console.log(`Checking system requirements...
3370
- `);
3371
- const checks = await Promise.all([
3372
- checkDocker(),
3373
- checkDockerCompose(),
3374
- checkNvidiaSmi(),
3375
- checkNvidiaContainerToolkit(),
3376
- checkClaude(),
3377
- checkOllamaConnection()
3378
- ]);
3379
- for (const check of checks) {
3380
- console.log(formatCheck(check));
3381
- }
3382
- const errors2 = checks.filter((c) => c.status === "error");
3383
- const warnings = checks.filter((c) => c.status === "warning");
3961
+ const recommendedModel = gpuMode ? "qwen3-coder:30b" : "qwen2.5-coder:7b";
3384
3962
  console.log("");
3385
- if (errors2.length > 0) {
3386
- console.log(`\x1B[31m${errors2.length} error(s) found.\x1B[0m Fix these before proceeding.`);
3387
- process.exit(1);
3388
- } else if (warnings.length > 0) {
3389
- console.log(`\x1B[33m${warnings.length} warning(s).\x1B[0m loclaude may work with limited functionality.`);
3390
- } else {
3391
- console.log("\x1B[32mAll checks passed!\x1B[0m Ready to use loclaude.");
3963
+ console.log(green("Project initialized!"));
3964
+ console.log("");
3965
+ console.log(cyan("Next steps:"));
3966
+ console.log(` 1. Start containers: ${cmd("mise run up")}`);
3967
+ console.log(` 2. Pull a model: ${cmd(`mise run pull ${recommendedModel}`)}`);
3968
+ console.log(` 3. Run Claude: ${cmd("mise run claude")}`);
3969
+ console.log("");
3970
+ console.log(cyan("Service URLs:"));
3971
+ console.log(` Ollama API: ${url("http://localhost:11434")}`);
3972
+ if (!options.noWebui) {
3973
+ console.log(` Open WebUI: ${url("http://localhost:3000")}`);
3392
3974
  }
3393
3975
  }
3394
3976
  // lib/commands/config.ts
3395
- import { inspect } from "util";
3396
3977
  async function configShow() {
3397
3978
  const config = loadConfig();
3398
3979
  const activePath = getActiveConfigPath();
3399
- console.log(`Current configuration:
3400
- `);
3401
- console.log(inspect(config, false, 3, true));
3402
- console.log(`
3403
- ---`);
3980
+ header("Current Configuration");
3981
+ console.log("");
3982
+ console.log(cyan("Ollama:"));
3983
+ labelValue(" URL", config.ollama.url);
3984
+ labelValue(" Default Model", magenta(config.ollama.defaultModel));
3985
+ console.log("");
3986
+ console.log(cyan("Docker:"));
3987
+ labelValue(" Compose File", config.docker.composeFile);
3988
+ labelValue(" GPU Mode", config.docker.gpu ? green("enabled") : dim("disabled"));
3989
+ console.log("");
3990
+ console.log(cyan("Claude:"));
3991
+ if (config.claude.extraArgs.length > 0) {
3992
+ labelValue(" Extra Args", config.claude.extraArgs.join(" "));
3993
+ } else {
3994
+ labelValue(" Extra Args", dim("none"));
3995
+ }
3996
+ console.log("");
3997
+ console.log(dim("─".repeat(40)));
3404
3998
  if (activePath) {
3405
- console.log(`Loaded from: ${activePath}`);
3999
+ console.log(dim(`Loaded from: ${file(activePath)}`));
3406
4000
  } else {
3407
- console.log("Using default configuration (no config file found)");
4001
+ console.log(dim("Using default configuration (no config file found)"));
3408
4002
  }
3409
4003
  }
3410
4004
  async function configPaths() {
3411
4005
  const paths = getConfigSearchPaths();
3412
4006
  const activePath = getActiveConfigPath();
3413
- console.log(`Config file search paths (in priority order):
3414
- `);
3415
- for (const path of paths) {
3416
- const isActive = path === activePath;
3417
- const marker = isActive ? " active" : "";
3418
- console.log(` ${path}${marker}`);
4007
+ header("Config Search Paths");
4008
+ console.log("");
4009
+ console.log(dim("Files are checked in priority order (first found wins):"));
4010
+ console.log("");
4011
+ for (let i = 0;i < paths.length; i++) {
4012
+ const configPath = paths[i];
4013
+ if (!configPath)
4014
+ continue;
4015
+ const isActive = configPath === activePath;
4016
+ const num = `${i + 1}.`;
4017
+ if (isActive) {
4018
+ console.log(` ${num} ${file(configPath)} ${green("← active")}`);
4019
+ } else {
4020
+ console.log(` ${num} ${dim(configPath)}`);
4021
+ }
3419
4022
  }
4023
+ console.log("");
3420
4024
  if (!activePath) {
3421
- console.log(`
3422
- No config file found. Using defaults.`);
3423
- console.log("Run 'loclaude init' to create a project config.");
4025
+ console.log(info("No config file found. Using defaults."));
4026
+ console.log(dim(` Run ${cmd("loclaude init")} to create a project config.`));
3424
4027
  }
3425
4028
  }
3426
4029
  // lib/commands/docker.ts
@@ -3459,42 +4062,44 @@ function getComposeCommand() {
3459
4062
  async function runCompose(args, options = {}) {
3460
4063
  const composeFile = options.file ?? findComposeFile();
3461
4064
  if (!composeFile) {
3462
- console.error("Error: No docker-compose.yml found");
3463
- console.error("Run 'loclaude init' to create one, or specify --file");
4065
+ console.log(error("No docker-compose.yml found"));
4066
+ console.log(dim(` Run ${cmd("loclaude init")} to create one, or specify --file`));
3464
4067
  return 1;
3465
4068
  }
3466
- const cmd = [...getComposeCommand(), "-f", composeFile, ...args];
3467
- return spawn(cmd);
4069
+ const cmd_args = [...getComposeCommand(), "-f", composeFile, ...args];
4070
+ return spawn(cmd_args);
3468
4071
  }
3469
4072
  async function dockerUp(options = {}) {
3470
4073
  const args = ["up"];
3471
4074
  if (options.detach !== false) {
3472
4075
  args.push("-d");
3473
4076
  }
3474
- console.log(`Starting containers...
3475
- `);
4077
+ console.log(info("Starting containers..."));
4078
+ console.log("");
3476
4079
  const exitCode = await runCompose(args, options);
3477
4080
  if (exitCode === 0) {
3478
- console.log(`
3479
- Containers started`);
3480
- console.log(`
3481
- Service URLs:`);
3482
- console.log(" Ollama API: http://localhost:11434");
3483
- console.log(" Open WebUI: http://localhost:3000");
4081
+ console.log("");
4082
+ console.log(success("Containers started"));
4083
+ console.log("");
4084
+ console.log(cyan("Service URLs:"));
4085
+ console.log(` Ollama API: ${url("http://localhost:11434")}`);
4086
+ console.log(` Open WebUI: ${url("http://localhost:3000")}`);
3484
4087
  }
3485
4088
  process.exit(exitCode);
3486
4089
  }
3487
4090
  async function dockerDown(options = {}) {
3488
- console.log(`Stopping containers...
3489
- `);
4091
+ console.log(info("Stopping containers..."));
4092
+ console.log("");
3490
4093
  const exitCode = await runCompose(["down"], options);
3491
4094
  if (exitCode === 0) {
3492
- console.log(`
3493
- Containers stopped`);
4095
+ console.log("");
4096
+ console.log(success("Containers stopped"));
3494
4097
  }
3495
4098
  process.exit(exitCode);
3496
4099
  }
3497
4100
  async function dockerStatus(options = {}) {
4101
+ console.log(info("Container status:"));
4102
+ console.log("");
3498
4103
  const exitCode = await runCompose(["ps"], options);
3499
4104
  process.exit(exitCode);
3500
4105
  }
@@ -3505,17 +4110,21 @@ async function dockerLogs(options = {}) {
3505
4110
  }
3506
4111
  if (options.service) {
3507
4112
  args.push(options.service);
4113
+ console.log(info(`Logs for ${cyan(options.service)}:`));
4114
+ } else {
4115
+ console.log(info("Container logs:"));
3508
4116
  }
4117
+ console.log("");
3509
4118
  const exitCode = await runCompose(args, options);
3510
4119
  process.exit(exitCode);
3511
4120
  }
3512
4121
  async function dockerRestart(options = {}) {
3513
- console.log(`Restarting containers...
3514
- `);
4122
+ console.log(info("Restarting containers..."));
4123
+ console.log("");
3515
4124
  const exitCode = await runCompose(["restart"], options);
3516
4125
  if (exitCode === 0) {
3517
- console.log(`
3518
- Containers restarted`);
4126
+ console.log("");
4127
+ console.log(success("Containers restarted"));
3519
4128
  }
3520
4129
  process.exit(exitCode);
3521
4130
  }
@@ -3532,11 +4141,11 @@ async function fetchModels() {
3532
4141
  }
3533
4142
  const data = await response.json();
3534
4143
  return data.models ?? [];
3535
- } catch (error) {
3536
- if (error instanceof Error && error.name === "TimeoutError") {
4144
+ } catch (error3) {
4145
+ if (error3 instanceof Error && error3.name === "TimeoutError") {
3537
4146
  throw new Error(`Connection to Ollama timed out (${ollamaUrl})`);
3538
4147
  }
3539
- throw error;
4148
+ throw error3;
3540
4149
  }
3541
4150
  }
3542
4151
  async function isOllamaInDocker() {
@@ -3551,83 +4160,99 @@ async function runOllamaCommand(args) {
3551
4160
  return spawn(["ollama", ...args]);
3552
4161
  }
3553
4162
  }
4163
+ function formatSize(sizeBytes) {
4164
+ const sizeStr = import_bytes2.default(sizeBytes) ?? "?";
4165
+ const sizeNum = sizeBytes / (1024 * 1024 * 1024);
4166
+ if (sizeNum > 20) {
4167
+ return yellow(sizeStr);
4168
+ } else if (sizeNum > 10) {
4169
+ return cyan(sizeStr);
4170
+ }
4171
+ return dim(sizeStr);
4172
+ }
3554
4173
  async function modelsList() {
3555
4174
  try {
3556
4175
  const models = await fetchModels();
3557
4176
  if (models.length === 0) {
3558
- console.log("No models installed.");
3559
- console.log(`
3560
- Pull a model with: loclaude models-pull <model-name>`);
3561
- console.log("Example: loclaude models-pull llama3.2");
4177
+ header("Installed Models");
4178
+ console.log("");
4179
+ console.log(info("No models installed."));
4180
+ console.log("");
4181
+ console.log(`Pull a model with: ${cmd("loclaude models-pull <model-name>")}`);
4182
+ console.log(`Example: ${cmd("loclaude models-pull llama3.2")}`);
3562
4183
  return;
3563
4184
  }
3564
- console.log(`Installed models:
3565
- `);
4185
+ header("Installed Models");
4186
+ console.log("");
3566
4187
  const nameWidth = Math.max(...models.map((m) => m.name.length), "NAME".length);
3567
4188
  const sizeWidth = 10;
3568
- console.log(`${"NAME".padEnd(nameWidth)} ${"SIZE".padStart(sizeWidth)} MODIFIED`);
3569
- console.log("-".repeat(nameWidth + sizeWidth + 30));
4189
+ const modifiedWidth = 20;
4190
+ tableHeader(["NAME", "SIZE", "MODIFIED"], [nameWidth, sizeWidth, modifiedWidth]);
3570
4191
  for (const model of models) {
3571
- const name = model.name.padEnd(nameWidth);
3572
- const size = (import_bytes2.default(model.size) ?? "?").padStart(sizeWidth);
3573
- const modified = formatRelativeTime(model.modified_at);
4192
+ const name = magenta(model.name.padEnd(nameWidth));
4193
+ const size = formatSize(model.size).padStart(sizeWidth);
4194
+ const modified = dim(formatRelativeTime(model.modified_at));
3574
4195
  console.log(`${name} ${size} ${modified}`);
3575
4196
  }
3576
- console.log(`
3577
- ${models.length} model(s) installed`);
3578
- } catch (error) {
4197
+ console.log("");
4198
+ console.log(dim(`${models.length} model(s) installed`));
4199
+ } catch (err) {
3579
4200
  const ollamaUrl = getOllamaUrl();
3580
- console.error("Error: Could not connect to Ollama at", ollamaUrl);
3581
- console.error("Make sure Ollama is running: loclaude docker-up");
4201
+ console.log(error(`Could not connect to Ollama at ${ollamaUrl}`));
4202
+ console.log(dim(` Make sure Ollama is running: ${cmd("loclaude docker-up")}`));
3582
4203
  process.exit(1);
3583
4204
  }
3584
4205
  }
3585
4206
  async function modelsPull(modelName) {
3586
4207
  if (!modelName) {
3587
- console.error("Error: Model name required");
3588
- console.error("Usage: loclaude models pull <model-name>");
3589
- console.error("Example: loclaude models pull llama3.2");
4208
+ console.log(error("Model name required"));
4209
+ console.log(dim(`Usage: ${cmd("loclaude models-pull <model-name>")}`));
4210
+ console.log(dim(`Example: ${cmd("loclaude models-pull llama3.2")}`));
3590
4211
  process.exit(1);
3591
4212
  }
3592
- console.log(`Pulling model: ${modelName}
3593
- `);
4213
+ console.log(info(`Pulling model: ${magenta(modelName)}`));
4214
+ console.log("");
3594
4215
  const exitCode = await runOllamaCommand(["pull", modelName]);
3595
4216
  if (exitCode === 0) {
3596
- console.log(`
3597
- Model '${modelName}' pulled successfully`);
4217
+ console.log("");
4218
+ console.log(success(`Model '${magenta(modelName)}' pulled successfully`));
3598
4219
  }
3599
4220
  process.exit(exitCode);
3600
4221
  }
3601
4222
  async function modelsRm(modelName) {
3602
4223
  if (!modelName) {
3603
- console.error("Error: Model name required");
3604
- console.error("Usage: loclaude models rm <model-name>");
4224
+ console.log(error("Model name required"));
4225
+ console.log(dim(`Usage: ${cmd("loclaude models-rm <model-name>")}`));
3605
4226
  process.exit(1);
3606
4227
  }
3607
- console.log(`Removing model: ${modelName}
3608
- `);
4228
+ console.log(info(`Removing model: ${magenta(modelName)}`));
4229
+ console.log("");
3609
4230
  const exitCode = await runOllamaCommand(["rm", modelName]);
3610
4231
  if (exitCode === 0) {
3611
- console.log(`
3612
- Model '${modelName}' removed`);
4232
+ console.log("");
4233
+ console.log(success(`Model '${magenta(modelName)}' removed`));
3613
4234
  }
3614
4235
  process.exit(exitCode);
3615
4236
  }
3616
4237
  async function modelsShow(modelName) {
3617
4238
  if (!modelName) {
3618
- console.error("Error: Model name required");
3619
- console.error("Usage: loclaude models show <model-name>");
4239
+ console.log(error("Model name required"));
4240
+ console.log(dim(`Usage: ${cmd("loclaude models-show <model-name>")}`));
3620
4241
  process.exit(1);
3621
4242
  }
4243
+ console.log(info(`Model details: ${magenta(modelName)}`));
4244
+ console.log("");
3622
4245
  const exitCode = await runOllamaCommand(["show", modelName]);
3623
4246
  process.exit(exitCode);
3624
4247
  }
3625
4248
  async function modelsRun(modelName) {
3626
4249
  if (!modelName) {
3627
- console.error("Error: Model name required");
3628
- console.error("Usage: loclaude models run <model-name>");
4250
+ console.log(error("Model name required"));
4251
+ console.log(dim(`Usage: ${cmd("loclaude models-run <model-name>")}`));
3629
4252
  process.exit(1);
3630
4253
  }
4254
+ console.log(info(`Running model: ${magenta(modelName)}`));
4255
+ console.log("");
3631
4256
  const exitCode = await runOllamaCommand(["run", modelName]);
3632
4257
  process.exit(exitCode);
3633
4258
  }
@@ -3667,7 +4292,7 @@ cli.command("run [...args]", "Run Claude Code with local Ollama", {
3667
4292
  }
3668
4293
  await launchClaude(model, args);
3669
4294
  });
3670
- cli.command("init", "Initialize a new loclaude project").option("--force", "Overwrite existing files").option("--no-webui", "Skip Open WebUI in docker-compose").action(async (options) => {
4295
+ cli.command("init", "Initialize a new loclaude project").option("--force", "Overwrite existing files").option("--no-webui", "Skip Open WebUI in docker-compose").option("--gpu", "Force GPU mode (NVIDIA)").option("--no-gpu", "Force CPU-only mode").action(async (options) => {
3671
4296
  await init(options);
3672
4297
  });
3673
4298
  cli.command("doctor", "Check system requirements and health").action(async () => {
@@ -3723,5 +4348,5 @@ export {
3723
4348
  cli
3724
4349
  };
3725
4350
 
3726
- //# debugId=79E8E47212AC1C8E64756E2164756E21
4351
+ //# debugId=69EBCABA5DC262EB64756E2164756E21
3727
4352
  //# sourceMappingURL=index.js.map