loclaude 0.0.1-alpha.2 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -270,6 +270,76 @@ var require_bytes = __commonJS((exports, module) => {
270
270
  }
271
271
  });
272
272
 
273
+ // ../../node_modules/.bun/picocolors@1.1.1/node_modules/picocolors/picocolors.js
274
+ var require_picocolors = __commonJS((exports, module) => {
275
+ var p = process || {};
276
+ var argv = p.argv || [];
277
+ var env = p.env || {};
278
+ var isColorSupported = !(!!env.NO_COLOR || argv.includes("--no-color")) && (!!env.FORCE_COLOR || argv.includes("--color") || p.platform === "win32" || (p.stdout || {}).isTTY && env.TERM !== "dumb" || !!env.CI);
279
+ var formatter = (open, close, replace = open) => (input) => {
280
+ let string = "" + input, index = string.indexOf(close, open.length);
281
+ return ~index ? open + replaceClose(string, close, replace, index) + close : open + string + close;
282
+ };
283
+ var replaceClose = (string, close, replace, index) => {
284
+ let result = "", cursor = 0;
285
+ do {
286
+ result += string.substring(cursor, index) + replace;
287
+ cursor = index + close.length;
288
+ index = string.indexOf(close, cursor);
289
+ } while (~index);
290
+ return result + string.substring(cursor);
291
+ };
292
+ var createColors = (enabled = isColorSupported) => {
293
+ let f = enabled ? formatter : () => String;
294
+ return {
295
+ isColorSupported: enabled,
296
+ reset: f("\x1B[0m", "\x1B[0m"),
297
+ bold: f("\x1B[1m", "\x1B[22m", "\x1B[22m\x1B[1m"),
298
+ dim: f("\x1B[2m", "\x1B[22m", "\x1B[22m\x1B[2m"),
299
+ italic: f("\x1B[3m", "\x1B[23m"),
300
+ underline: f("\x1B[4m", "\x1B[24m"),
301
+ inverse: f("\x1B[7m", "\x1B[27m"),
302
+ hidden: f("\x1B[8m", "\x1B[28m"),
303
+ strikethrough: f("\x1B[9m", "\x1B[29m"),
304
+ black: f("\x1B[30m", "\x1B[39m"),
305
+ red: f("\x1B[31m", "\x1B[39m"),
306
+ green: f("\x1B[32m", "\x1B[39m"),
307
+ yellow: f("\x1B[33m", "\x1B[39m"),
308
+ blue: f("\x1B[34m", "\x1B[39m"),
309
+ magenta: f("\x1B[35m", "\x1B[39m"),
310
+ cyan: f("\x1B[36m", "\x1B[39m"),
311
+ white: f("\x1B[37m", "\x1B[39m"),
312
+ gray: f("\x1B[90m", "\x1B[39m"),
313
+ bgBlack: f("\x1B[40m", "\x1B[49m"),
314
+ bgRed: f("\x1B[41m", "\x1B[49m"),
315
+ bgGreen: f("\x1B[42m", "\x1B[49m"),
316
+ bgYellow: f("\x1B[43m", "\x1B[49m"),
317
+ bgBlue: f("\x1B[44m", "\x1B[49m"),
318
+ bgMagenta: f("\x1B[45m", "\x1B[49m"),
319
+ bgCyan: f("\x1B[46m", "\x1B[49m"),
320
+ bgWhite: f("\x1B[47m", "\x1B[49m"),
321
+ blackBright: f("\x1B[90m", "\x1B[39m"),
322
+ redBright: f("\x1B[91m", "\x1B[39m"),
323
+ greenBright: f("\x1B[92m", "\x1B[39m"),
324
+ yellowBright: f("\x1B[93m", "\x1B[39m"),
325
+ blueBright: f("\x1B[94m", "\x1B[39m"),
326
+ magentaBright: f("\x1B[95m", "\x1B[39m"),
327
+ cyanBright: f("\x1B[96m", "\x1B[39m"),
328
+ whiteBright: f("\x1B[97m", "\x1B[39m"),
329
+ bgBlackBright: f("\x1B[100m", "\x1B[49m"),
330
+ bgRedBright: f("\x1B[101m", "\x1B[49m"),
331
+ bgGreenBright: f("\x1B[102m", "\x1B[49m"),
332
+ bgYellowBright: f("\x1B[103m", "\x1B[49m"),
333
+ bgBlueBright: f("\x1B[104m", "\x1B[49m"),
334
+ bgMagentaBright: f("\x1B[105m", "\x1B[49m"),
335
+ bgCyanBright: f("\x1B[106m", "\x1B[49m"),
336
+ bgWhiteBright: f("\x1B[107m", "\x1B[49m")
337
+ };
338
+ };
339
+ module.exports = createColors();
340
+ module.exports.createColors = createColors;
341
+ });
342
+
273
343
  // ../../node_modules/.bun/cac@6.7.14/node_modules/cac/dist/index.mjs
274
344
  import { EventEmitter } from "events";
275
345
  function toArr(any) {
@@ -1012,13 +1082,13 @@ function getClaudeExtraArgs() {
1012
1082
  var OLLAMA_URL = getOllamaUrl();
1013
1083
  var DEFAULT_MODEL = getDefaultModel();
1014
1084
 
1015
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/key.js
1085
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/key.js
1016
1086
  var isUpKey = (key, keybindings = []) => key.name === "up" || keybindings.includes("vim") && key.name === "k" || keybindings.includes("emacs") && key.ctrl && key.name === "p";
1017
1087
  var isDownKey = (key, keybindings = []) => key.name === "down" || keybindings.includes("vim") && key.name === "j" || keybindings.includes("emacs") && key.ctrl && key.name === "n";
1018
1088
  var isBackspaceKey = (key) => key.name === "backspace";
1019
1089
  var isNumberKey = (key) => "1234567890".includes(key.name);
1020
1090
  var isEnterKey = (key) => key.name === "enter" || key.name === "return";
1021
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/errors.js
1091
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/errors.js
1022
1092
  class AbortPromptError extends Error {
1023
1093
  name = "AbortPromptError";
1024
1094
  message = "Prompt was aborted";
@@ -1044,10 +1114,10 @@ class HookError extends Error {
1044
1114
  class ValidationError extends Error {
1045
1115
  name = "ValidationError";
1046
1116
  }
1047
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-state.js
1117
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-state.js
1048
1118
  import { AsyncResource as AsyncResource2 } from "async_hooks";
1049
1119
 
1050
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/hook-engine.js
1120
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/hook-engine.js
1051
1121
  import { AsyncLocalStorage, AsyncResource } from "async_hooks";
1052
1122
  var hookStorage = new AsyncLocalStorage;
1053
1123
  function createStore(rl) {
@@ -1152,7 +1222,7 @@ var effectScheduler = {
1152
1222
  }
1153
1223
  };
1154
1224
 
1155
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-state.js
1225
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-state.js
1156
1226
  function useState(defaultValue) {
1157
1227
  return withPointer((pointer) => {
1158
1228
  const setState = AsyncResource2.bind(function setState(newValue) {
@@ -1170,7 +1240,7 @@ function useState(defaultValue) {
1170
1240
  });
1171
1241
  }
1172
1242
 
1173
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-effect.js
1243
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-effect.js
1174
1244
  function useEffect(cb, depArray) {
1175
1245
  withPointer((pointer) => {
1176
1246
  const oldDeps = pointer.get();
@@ -1182,7 +1252,7 @@ function useEffect(cb, depArray) {
1182
1252
  });
1183
1253
  }
1184
1254
 
1185
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/theme.js
1255
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/theme.js
1186
1256
  import { styleText } from "util";
1187
1257
 
1188
1258
  // ../../node_modules/.bun/@inquirer+figures@2.0.3/node_modules/@inquirer/figures/dist/index.js
@@ -1474,7 +1544,7 @@ var figures = shouldUseMain ? mainSymbols : fallbackSymbols;
1474
1544
  var dist_default2 = figures;
1475
1545
  var replacements = Object.entries(specialMainSymbols);
1476
1546
 
1477
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/theme.js
1547
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/theme.js
1478
1548
  var defaultTheme = {
1479
1549
  prefix: {
1480
1550
  idle: styleText("blue", "?"),
@@ -1495,7 +1565,7 @@ var defaultTheme = {
1495
1565
  }
1496
1566
  };
1497
1567
 
1498
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/make-theme.js
1568
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/make-theme.js
1499
1569
  function isPlainObject(value) {
1500
1570
  if (typeof value !== "object" || value === null)
1501
1571
  return false;
@@ -1523,7 +1593,7 @@ function makeTheme(...themes) {
1523
1593
  return deepMerge2(...themesToMerge);
1524
1594
  }
1525
1595
 
1526
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-prefix.js
1596
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-prefix.js
1527
1597
  function usePrefix({ status = "idle", theme }) {
1528
1598
  const [showLoader, setShowLoader] = useState(false);
1529
1599
  const [tick, setTick] = useState(0);
@@ -1553,7 +1623,7 @@ function usePrefix({ status = "idle", theme }) {
1553
1623
  const iconName = status === "loading" ? "idle" : status;
1554
1624
  return typeof prefix === "string" ? prefix : prefix[iconName] ?? prefix["idle"];
1555
1625
  }
1556
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-memo.js
1626
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-memo.js
1557
1627
  function useMemo(fn, dependencies) {
1558
1628
  return withPointer((pointer) => {
1559
1629
  const prev = pointer.get();
@@ -1565,11 +1635,11 @@ function useMemo(fn, dependencies) {
1565
1635
  return prev.value;
1566
1636
  });
1567
1637
  }
1568
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-ref.js
1638
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-ref.js
1569
1639
  function useRef(val) {
1570
1640
  return useState({ current: val })[0];
1571
1641
  }
1572
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/use-keypress.js
1642
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/use-keypress.js
1573
1643
  function useKeypress(userHandler) {
1574
1644
  const signal = useRef(userHandler);
1575
1645
  signal.current = userHandler;
@@ -1587,7 +1657,7 @@ function useKeypress(userHandler) {
1587
1657
  };
1588
1658
  }, []);
1589
1659
  }
1590
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/utils.js
1660
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/utils.js
1591
1661
  var import_cli_width = __toESM(require_cli_width(), 1);
1592
1662
 
1593
1663
  // ../../node_modules/.bun/ansi-regex@6.2.2/node_modules/ansi-regex/index.js
@@ -2025,7 +2095,7 @@ function wrapAnsi(string, columns, options) {
2025
2095
  `);
2026
2096
  }
2027
2097
 
2028
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/utils.js
2098
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/utils.js
2029
2099
  function breakLines(content, width) {
2030
2100
  return content.split(`
2031
2101
  `).flatMap((line) => wrapAnsi(line, width, { trim: false, hard: true }).split(`
@@ -2036,7 +2106,7 @@ function readlineWidth() {
2036
2106
  return import_cli_width.default({ defaultWidth: 80, output: readline().output });
2037
2107
  }
2038
2108
 
2039
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/pagination/use-pagination.js
2109
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/pagination/use-pagination.js
2040
2110
  function usePointerPosition({ active, renderedItems, pageSize, loop }) {
2041
2111
  const state = useRef({
2042
2112
  lastPointer: active,
@@ -2102,7 +2172,7 @@ function usePagination({ items, active, renderItem, pageSize, loop = true }) {
2102
2172
  return pageBuffer.filter((line) => typeof line === "string").join(`
2103
2173
  `);
2104
2174
  }
2105
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/create-prompt.js
2175
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/create-prompt.js
2106
2176
  var import_mute_stream = __toESM(require_lib(), 1);
2107
2177
  import * as readline2 from "readline";
2108
2178
  import { AsyncResource as AsyncResource3 } from "async_hooks";
@@ -2315,7 +2385,7 @@ var {
2315
2385
  unload
2316
2386
  } = signalExitWrap(processOk(process3) ? new SignalExit(process3) : new SignalExitFallback);
2317
2387
 
2318
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/screen-manager.js
2388
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/screen-manager.js
2319
2389
  import { stripVTControlCharacters } from "util";
2320
2390
 
2321
2391
  // ../../node_modules/.bun/@inquirer+ansi@2.0.3/node_modules/@inquirer/ansi/dist/index.js
@@ -2334,7 +2404,7 @@ var cursorTo = (x, y) => {
2334
2404
  var eraseLine = ESC + "2K";
2335
2405
  var eraseLines = (lines) => lines > 0 ? (eraseLine + cursorUp(1)).repeat(lines - 1) + eraseLine + cursorLeft : "";
2336
2406
 
2337
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/screen-manager.js
2407
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/screen-manager.js
2338
2408
  var height = (content) => content.split(`
2339
2409
  `).length;
2340
2410
  var lastLine = (content) => content.split(`
@@ -2399,7 +2469,7 @@ class ScreenManager {
2399
2469
  }
2400
2470
  }
2401
2471
 
2402
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/promise-polyfill.js
2472
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/promise-polyfill.js
2403
2473
  class PromisePolyfill extends Promise {
2404
2474
  static withResolver() {
2405
2475
  let resolve;
@@ -2412,7 +2482,7 @@ class PromisePolyfill extends Promise {
2412
2482
  }
2413
2483
  }
2414
2484
 
2415
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/create-prompt.js
2485
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/create-prompt.js
2416
2486
  function getCallSites() {
2417
2487
  const _prepareStackTrace = Error.prepareStackTrace;
2418
2488
  let result = [];
@@ -2498,7 +2568,7 @@ function createPrompt(view) {
2498
2568
  };
2499
2569
  return prompt;
2500
2570
  }
2501
- // ../../node_modules/.bun/@inquirer+core@11.1.1+c195ea72dffa657e/node_modules/@inquirer/core/dist/lib/Separator.js
2571
+ // ../../node_modules/.bun/@inquirer+core@11.1.1+b219b5910764fa5c/node_modules/@inquirer/core/dist/lib/Separator.js
2502
2572
  import { styleText as styleText2 } from "util";
2503
2573
  class Separator {
2504
2574
  separator = styleText2("dim", Array.from({ length: 15 }).join(dist_default2.line));
@@ -2512,7 +2582,7 @@ class Separator {
2512
2582
  return Boolean(choice && typeof choice === "object" && "type" in choice && choice.type === "separator");
2513
2583
  }
2514
2584
  }
2515
- // ../../node_modules/.bun/@inquirer+select@5.0.4+c195ea72dffa657e/node_modules/@inquirer/select/dist/index.js
2585
+ // ../../node_modules/.bun/@inquirer+select@5.0.4+b219b5910764fa5c/node_modules/@inquirer/select/dist/index.js
2516
2586
  import { styleText as styleText3 } from "util";
2517
2587
  var selectTheme = {
2518
2588
  icon: { cursor: dist_default2.pointer },
@@ -2671,15 +2741,67 @@ var dist_default3 = createPrompt((config, done) => {
2671
2741
  // lib/utils.ts
2672
2742
  var import_bytes = __toESM(require_bytes(), 1);
2673
2743
 
2744
+ // lib/output.ts
2745
+ var import_picocolors = __toESM(require_picocolors(), 1);
2746
+ var brand = (text) => import_picocolors.default.cyan(import_picocolors.default.bold(text));
2747
+ var success = (text) => `${import_picocolors.default.green("\u2713")} ${text}`;
2748
+ var warn = (text) => `${import_picocolors.default.yellow("\u26A0")} ${text}`;
2749
+ var error = (text) => `${import_picocolors.default.red("\u2717")} ${text}`;
2750
+ var info = (text) => `${import_picocolors.default.cyan("\u2139")} ${text}`;
2751
+ var dim = (text) => import_picocolors.default.dim(text);
2752
+ var green = (text) => import_picocolors.default.green(text);
2753
+ var yellow = (text) => import_picocolors.default.yellow(text);
2754
+ var red = (text) => import_picocolors.default.red(text);
2755
+ var cyan = (text) => import_picocolors.default.cyan(text);
2756
+ var magenta = (text) => import_picocolors.default.magenta(text);
2757
+ function header(text) {
2758
+ console.log("");
2759
+ console.log(brand(` ${text}`));
2760
+ console.log(import_picocolors.default.dim(" " + "\u2500".repeat(text.length + 2)));
2761
+ }
2762
+ function labelValue(label, value) {
2763
+ console.log(` ${import_picocolors.default.dim(label + ":")} ${value}`);
2764
+ }
2765
+ function statusLine(status, name, message, extra) {
2766
+ const icons = { ok: "\u2713", warning: "\u26A0", error: "\u2717" };
2767
+ const colors = { ok: import_picocolors.default.green, warning: import_picocolors.default.yellow, error: import_picocolors.default.red };
2768
+ let line = `${colors[status](icons[status])} ${name}: ${message}`;
2769
+ if (extra) {
2770
+ line += ` ${import_picocolors.default.dim(`(${extra})`)}`;
2771
+ }
2772
+ return line;
2773
+ }
2774
+ function tableRow(columns, widths) {
2775
+ return columns.map((col, i) => {
2776
+ const width = widths[i] || col.length;
2777
+ return col.padEnd(width);
2778
+ }).join(" ");
2779
+ }
2780
+ function tableHeader(columns, widths) {
2781
+ const headerRow = tableRow(columns.map((c) => import_picocolors.default.bold(c)), widths);
2782
+ const underlineRow = widths.map((w) => "\u2500".repeat(w)).join(" ");
2783
+ console.log(headerRow);
2784
+ console.log(import_picocolors.default.dim(underlineRow));
2785
+ }
2786
+ function url(urlStr) {
2787
+ return import_picocolors.default.underline(import_picocolors.default.cyan(urlStr));
2788
+ }
2789
+ function cmd(command) {
2790
+ return import_picocolors.default.cyan(command);
2791
+ }
2792
+ function file(filePath) {
2793
+ return import_picocolors.default.magenta(filePath);
2794
+ }
2795
+
2674
2796
  // lib/spawn.ts
2675
- async function spawn(cmd, opts = {}) {
2676
- const command = cmd[0];
2677
- const args = cmd.slice(1);
2797
+ async function spawn(cmd2, opts = {}) {
2798
+ const command = cmd2[0];
2799
+ const args = cmd2.slice(1);
2678
2800
  if (command === undefined) {
2679
2801
  throw new Error("No command provided");
2680
2802
  }
2681
2803
  if (typeof Bun !== "undefined") {
2682
- const proc = Bun.spawn(cmd, {
2804
+ const proc = Bun.spawn(cmd2, {
2683
2805
  env: opts.env ?? process.env,
2684
2806
  cwd: opts.cwd ?? process.cwd(),
2685
2807
  stdin: opts.stdin ?? "inherit",
@@ -2699,14 +2821,14 @@ async function spawn(cmd, opts = {}) {
2699
2821
  });
2700
2822
  }
2701
2823
  }
2702
- async function spawnCapture(cmd, opts = {}) {
2703
- const command = cmd[0];
2704
- const args = cmd.slice(1);
2824
+ async function spawnCapture(cmd2, opts = {}) {
2825
+ const command = cmd2[0];
2826
+ const args = cmd2.slice(1);
2705
2827
  if (command === undefined) {
2706
2828
  throw new Error("No command provided");
2707
2829
  }
2708
2830
  if (typeof Bun !== "undefined") {
2709
- const proc = Bun.spawn(cmd, {
2831
+ const proc = Bun.spawn(cmd2, {
2710
2832
  env: opts.env ?? process.env,
2711
2833
  cwd: opts.cwd,
2712
2834
  stdin: opts.stdin ?? "ignore",
@@ -2741,17 +2863,17 @@ async function spawnCapture(cmd, opts = {}) {
2741
2863
  });
2742
2864
  }
2743
2865
  }
2744
- async function commandExists(cmd) {
2866
+ async function commandExists(cmd2) {
2745
2867
  try {
2746
- const result = await spawnCapture(process.platform === "win32" ? ["where", cmd] : ["which", cmd]);
2868
+ const result = await spawnCapture(process.platform === "win32" ? ["where", cmd2] : ["which", cmd2]);
2747
2869
  return result.exitCode === 0;
2748
2870
  } catch {
2749
2871
  return false;
2750
2872
  }
2751
2873
  }
2752
- async function getCommandVersion(cmd) {
2874
+ async function getCommandVersion(cmd2) {
2753
2875
  try {
2754
- const result = await spawnCapture([cmd, "--version"]);
2876
+ const result = await spawnCapture([cmd2, "--version"]);
2755
2877
  if (result.exitCode === 0 && result.stdout) {
2756
2878
  return result.stdout.trim().split(`
2757
2879
  `)[0] ?? null;
@@ -2772,33 +2894,100 @@ async function fetchOllamaModels() {
2772
2894
  const data = await response.json();
2773
2895
  return data.models ?? [];
2774
2896
  }
2897
+ async function fetchRunningModels() {
2898
+ const ollamaUrl = getOllamaUrl();
2899
+ try {
2900
+ const response = await fetch(`${ollamaUrl}/api/ps`, {
2901
+ signal: AbortSignal.timeout(5000)
2902
+ });
2903
+ if (!response.ok) {
2904
+ return [];
2905
+ }
2906
+ const data = await response.json();
2907
+ return data.models ?? [];
2908
+ } catch (error2) {
2909
+ return [];
2910
+ }
2911
+ }
2912
+ async function isModelLoaded(modelName) {
2913
+ const runningModels = await fetchRunningModels();
2914
+ return runningModels.some((m) => m.model === modelName || m.name === modelName || m.model.startsWith(modelName + ":") || modelName.startsWith(m.model));
2915
+ }
2916
+ async function loadModel(modelName, keepAlive = "10m") {
2917
+ const ollamaUrl = getOllamaUrl();
2918
+ const response = await fetch(`${ollamaUrl}/api/generate`, {
2919
+ method: "POST",
2920
+ headers: {
2921
+ "Content-Type": "application/json"
2922
+ },
2923
+ body: JSON.stringify({
2924
+ model: modelName,
2925
+ prompt: "",
2926
+ stream: false,
2927
+ keep_alive: keepAlive
2928
+ })
2929
+ });
2930
+ if (!response.ok) {
2931
+ throw new Error(`Failed to load model: ${response.statusText}`);
2932
+ }
2933
+ await response.json();
2934
+ }
2935
+ async function ensureModelLoaded(modelName) {
2936
+ const isLoaded = await isModelLoaded(modelName);
2937
+ if (isLoaded) {
2938
+ console.log(dim(` Model ${magenta(modelName)} is already loaded`));
2939
+ return;
2940
+ }
2941
+ console.log(info(`Loading model ${magenta(modelName)}...`));
2942
+ console.log(dim(" This may take a moment on first run"));
2943
+ try {
2944
+ await loadModel(modelName, "10m");
2945
+ console.log(success(`Model ${magenta(modelName)} loaded (keep_alive: 10m)`));
2946
+ } catch (error2) {
2947
+ console.log(warn(`Could not pre-load model (will load on first request)`));
2948
+ console.log(dim(` ${error2 instanceof Error ? error2.message : "Unknown error"}`));
2949
+ }
2950
+ }
2775
2951
  async function selectModelInteractively() {
2776
2952
  const ollamaUrl = getOllamaUrl();
2777
2953
  let models;
2778
2954
  try {
2779
2955
  models = await fetchOllamaModels();
2780
- } catch (error) {
2781
- console.error("Error: Could not connect to Ollama at", ollamaUrl);
2782
- console.error("Make sure Ollama is running: loclaude docker-up");
2956
+ } catch (error2) {
2957
+ console.log(warn(`Could not connect to Ollama at ${ollamaUrl}`));
2958
+ console.log(dim(" Make sure Ollama is running: loclaude docker-up"));
2783
2959
  process.exit(1);
2784
2960
  }
2785
2961
  if (models.length === 0) {
2786
- console.error("Error: No models found in Ollama.");
2787
- console.error("Pull a model first: loclaude models-pull <model-name>");
2962
+ console.log(warn("No models found in Ollama."));
2963
+ console.log(dim(" Pull a model first: loclaude models-pull <model-name>"));
2788
2964
  process.exit(1);
2789
2965
  }
2966
+ const runningModels = await fetchRunningModels();
2967
+ const loadedModelNames = new Set(runningModels.map((m) => m.model));
2790
2968
  const selected = await dist_default3({
2791
2969
  message: "Select a model",
2792
- choices: models.map((model) => ({
2793
- name: `${model.name} (${import_bytes.default(model.size)})`,
2794
- value: model.name
2795
- }))
2970
+ choices: models.map((model) => {
2971
+ const isLoaded = loadedModelNames.has(model.name);
2972
+ const loadedIndicator = isLoaded ? " [loaded]" : "";
2973
+ return {
2974
+ name: `${model.name} (${import_bytes.default(model.size)})${loadedIndicator}`,
2975
+ value: model.name
2976
+ };
2977
+ })
2796
2978
  });
2797
2979
  return selected;
2798
2980
  }
2799
2981
  async function launchClaude(model, passthroughArgs) {
2800
2982
  const ollamaUrl = getOllamaUrl();
2801
2983
  const extraArgs = getClaudeExtraArgs();
2984
+ console.log("");
2985
+ console.log(cyan("Launching Claude Code with Ollama"));
2986
+ console.log(dim(` Model: ${magenta(model)}`));
2987
+ console.log(dim(` API: ${ollamaUrl}`));
2988
+ console.log("");
2989
+ await ensureModelLoaded(model);
2990
+ console.log("");
2802
2991
  const env = {
2803
2992
  ...process.env,
2804
2993
  ANTHROPIC_AUTH_TOKEN: "ollama",
@@ -2812,213 +3001,753 @@ async function launchClaude(model, passthroughArgs) {
2812
3001
  // lib/commands/init.ts
2813
3002
  import { existsSync as existsSync2, mkdirSync, writeFileSync, readFileSync as readFileSync2 } from "fs";
2814
3003
  import { join as join2 } from "path";
2815
- var DOCKER_COMPOSE_TEMPLATE = `services:
2816
- ollama:
2817
- image: ollama/ollama:latest
2818
- container_name: ollama
2819
- runtime: nvidia
2820
- environment:
2821
- - NVIDIA_VISIBLE_DEVICES=all
2822
- - NVIDIA_DRIVER_CAPABILITIES=compute,utility
2823
- volumes:
2824
- - ./models:/root/.ollama
2825
- ports:
2826
- - "11434:11434"
2827
- restart: unless-stopped
2828
- healthcheck:
2829
- test: ["CMD", "ollama", "list"]
2830
- interval: 300s
2831
- timeout: 2s
2832
- retries: 3
2833
- start_period: 40s
2834
- deploy:
2835
- resources:
2836
- reservations:
2837
- devices:
2838
- - driver: nvidia
2839
- count: all
2840
- capabilities: [gpu]
2841
-
2842
- open-webui:
2843
- image: ghcr.io/open-webui/open-webui:cuda
2844
- container_name: open-webui
2845
- ports:
2846
- - "3000:8080"
2847
- environment:
2848
- - OLLAMA_BASE_URL=http://ollama:11434
2849
- depends_on:
2850
- - ollama
2851
- restart: unless-stopped
2852
- healthcheck:
2853
- test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
2854
- interval: 30s
2855
- timeout: 10s
2856
- retries: 3
2857
- start_period: 60s
2858
- volumes:
2859
- - open-webui:/app/backend/data
2860
- deploy:
2861
- resources:
2862
- reservations:
2863
- devices:
2864
- - driver: nvidia
2865
- count: all
2866
- capabilities: [gpu]
2867
3004
 
2868
- volumes:
2869
- open-webui:
2870
- `;
2871
- var CONFIG_TEMPLATE = `{
2872
- "ollama": {
2873
- "url": "http://localhost:11434",
2874
- "defaultModel": "qwen3-coder:30b"
2875
- },
2876
- "docker": {
2877
- "composeFile": "./docker-compose.yml",
2878
- "gpu": true
3005
+ // lib/commands/doctor.ts
3006
+ async function checkDocker() {
3007
+ const exists = await commandExists("docker");
3008
+ if (!exists) {
3009
+ return {
3010
+ name: "Docker",
3011
+ status: "error",
3012
+ message: "Not installed",
3013
+ hint: "Install Docker: https://docs.docker.com/get-docker/"
3014
+ };
2879
3015
  }
3016
+ const version = await getCommandVersion("docker");
3017
+ return {
3018
+ name: "Docker",
3019
+ status: "ok",
3020
+ message: "Installed",
3021
+ version: version ?? undefined
3022
+ };
2880
3023
  }
2881
- `;
2882
- var GITIGNORE_TEMPLATE = `# Ollama models (large binary files)
2883
- models/
2884
- `;
2885
- var MISE_TOML_TEMPLATE = `# Mise task runner configuration
2886
- # Run \`mise tasks\` to see all available tasks
2887
- # https://mise.jdx.dev/
2888
-
2889
- [tasks]
2890
-
2891
- # =============================================================================
2892
- # Docker Management
2893
- # =============================================================================
2894
-
2895
- [tasks.up]
2896
- description = "Start Ollama and Open WebUI containers"
2897
- run = "loclaude docker-up"
2898
-
2899
- [tasks.down]
2900
- description = "Stop all containers"
2901
- run = "loclaude docker-down"
2902
-
2903
- [tasks.restart]
2904
- description = "Restart all containers"
2905
- run = "loclaude docker-restart"
2906
-
2907
- [tasks.status]
2908
- description = "Show container status"
2909
- run = "loclaude docker-status"
2910
-
2911
- [tasks.logs]
2912
- description = "Follow container logs"
2913
- run = "loclaude docker-logs --follow"
2914
-
2915
- # =============================================================================
2916
- # Model Management
2917
- # =============================================================================
2918
-
2919
- [tasks.models]
2920
- description = "List installed models"
2921
- run = "loclaude models"
2922
-
2923
- [tasks.pull]
2924
- description = "Pull a model (usage: mise run pull <model-name>)"
2925
- run = "loclaude models-pull {{arg(name='model')}}"
2926
-
2927
- # =============================================================================
2928
- # Claude Code
2929
- # =============================================================================
2930
-
2931
- [tasks.claude]
2932
- description = "Run Claude Code with local Ollama"
2933
- run = "loclaude run"
2934
-
2935
- [tasks."claude:model"]
2936
- description = "Run Claude with specific model (usage: mise run claude:model <model>)"
2937
- run = "loclaude run -m {{arg(name='model')}}"
2938
-
2939
- # =============================================================================
2940
- # Diagnostics
2941
- # =============================================================================
2942
-
2943
- [tasks.doctor]
2944
- description = "Check system requirements"
2945
- run = "loclaude doctor"
2946
-
2947
- [tasks.gpu]
2948
- description = "Check GPU status"
2949
- run = "docker exec ollama nvidia-smi"
2950
- `;
2951
- var README_TEMPLATE = `# Project Name
2952
-
2953
- > Powered by [loclaude](https://github.com/nicholasgalante1997/docker-ollama) - Run Claude Code with local Ollama LLMs
2954
-
2955
- ## Prerequisites
2956
-
2957
- - [Docker](https://docs.docker.com/get-docker/) with Docker Compose v2
2958
- - [NVIDIA GPU](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) with drivers and container toolkit
2959
- - [mise](https://mise.jdx.dev/) task runner (recommended)
2960
- - [loclaude](https://www.npmjs.com/package/loclaude) CLI (\`npm install -g loclaude\`)
2961
-
2962
- ## Quick Start
2963
-
2964
- \`\`\`bash
2965
- # Start the LLM backend (Ollama + Open WebUI)
2966
- mise run up
2967
-
2968
- # Pull a model
2969
- mise run pull qwen3-coder:30b
2970
-
2971
- # Run Claude Code with local LLM
2972
- mise run claude
2973
- \`\`\`
2974
-
2975
- ## Available Commands
2976
-
2977
- Run \`mise tasks\` to see all available commands.
2978
-
2979
- | Command | Description |
2980
- |---------|-------------|
2981
- | \`mise run up\` | Start Ollama and Open WebUI containers |
2982
- | \`mise run down\` | Stop all containers |
2983
- | \`mise run status\` | Show container status |
2984
- | \`mise run logs\` | Follow container logs |
2985
- | \`mise run models\` | List installed models |
2986
- | \`mise run pull <model>\` | Pull a model from Ollama registry |
2987
- | \`mise run claude\` | Run Claude Code with model selection |
2988
- | \`mise run claude:model <model>\` | Run Claude with specific model |
2989
- | \`mise run doctor\` | Check system requirements |
2990
- | \`mise run gpu\` | Check GPU status |
2991
-
2992
- ## Service URLs
2993
-
2994
- | Service | URL | Description |
2995
- |---------|-----|-------------|
2996
- | Ollama API | http://localhost:11434 | LLM inference API |
2997
- | Open WebUI | http://localhost:3000 | Chat interface |
2998
-
2999
- ## Project Structure
3000
-
3001
- \`\`\`
3002
- .
3003
- \u251C\u2500\u2500 .claude/
3004
- \u2502 \u2514\u2500\u2500 CLAUDE.md # Claude Code instructions
3005
- \u251C\u2500\u2500 .loclaude/
3006
- \u2502 \u2514\u2500\u2500 config.json # Loclaude configuration
3007
- \u251C\u2500\u2500 models/ # Ollama model storage (gitignored)
3008
- \u251C\u2500\u2500 docker-compose.yml # Container definitions
3009
- \u251C\u2500\u2500 mise.toml # Task runner configuration
3010
- \u2514\u2500\u2500 README.md
3011
- \`\`\`
3012
-
3013
- ## Configuration
3014
-
3015
- ### Loclaude Config (\`.loclaude/config.json\`)
3016
-
3017
- \`\`\`json
3018
- {
3019
- "ollama": {
3020
- "url": "http://localhost:11434",
3021
- "defaultModel": "qwen3-coder:30b"
3024
+ async function checkDockerCompose() {
3025
+ const result = await spawnCapture(["docker", "compose", "version"]);
3026
+ if (result.exitCode === 0) {
3027
+ const version = result.stdout?.trim().split(`
3028
+ `)[0];
3029
+ return {
3030
+ name: "Docker Compose",
3031
+ status: "ok",
3032
+ message: "Installed (v2)",
3033
+ version: version ?? undefined
3034
+ };
3035
+ }
3036
+ const v1Exists = await commandExists("docker-compose");
3037
+ if (v1Exists) {
3038
+ const version = await getCommandVersion("docker-compose");
3039
+ return {
3040
+ name: "Docker Compose",
3041
+ status: "warning",
3042
+ message: "Using legacy v1",
3043
+ version: version ?? undefined,
3044
+ hint: "Consider upgrading to Docker Compose v2"
3045
+ };
3046
+ }
3047
+ return {
3048
+ name: "Docker Compose",
3049
+ status: "error",
3050
+ message: "Not installed",
3051
+ hint: "Docker Compose is included with Docker Desktop, or install separately"
3052
+ };
3053
+ }
3054
+ async function checkNvidiaSmi() {
3055
+ const exists = await commandExists("nvidia-smi");
3056
+ if (!exists) {
3057
+ return {
3058
+ name: "NVIDIA GPU",
3059
+ status: "warning",
3060
+ message: "nvidia-smi not found",
3061
+ hint: "GPU support requires NVIDIA drivers. CPU-only mode will be used."
3062
+ };
3063
+ }
3064
+ const result = await spawnCapture(["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"]);
3065
+ if (result.exitCode === 0 && result.stdout) {
3066
+ const gpus = result.stdout.trim().split(`
3067
+ `).filter(Boolean);
3068
+ return {
3069
+ name: "NVIDIA GPU",
3070
+ status: "ok",
3071
+ message: `${gpus.length} GPU(s) detected`,
3072
+ version: gpus[0]
3073
+ };
3074
+ }
3075
+ return {
3076
+ name: "NVIDIA GPU",
3077
+ status: "warning",
3078
+ message: "nvidia-smi failed",
3079
+ hint: "GPU may not be available. Check NVIDIA drivers."
3080
+ };
3081
+ }
3082
+ async function checkNvidiaContainerToolkit() {
3083
+ const result = await spawnCapture(["docker", "info", "--format", "{{.Runtimes}}"]);
3084
+ if (result.exitCode === 0 && result.stdout?.includes("nvidia")) {
3085
+ return {
3086
+ name: "NVIDIA Container Toolkit",
3087
+ status: "ok",
3088
+ message: "nvidia runtime available"
3089
+ };
3090
+ }
3091
+ return {
3092
+ name: "NVIDIA Container Toolkit",
3093
+ status: "warning",
3094
+ message: "nvidia runtime not found",
3095
+ hint: "Install: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html"
3096
+ };
3097
+ }
3098
+ async function checkClaude() {
3099
+ const exists = await commandExists("claude");
3100
+ if (!exists) {
3101
+ return {
3102
+ name: "Claude Code",
3103
+ status: "error",
3104
+ message: "Not installed",
3105
+ hint: "Install: npm install -g @anthropic-ai/claude-code"
3106
+ };
3107
+ }
3108
+ const version = await getCommandVersion("claude");
3109
+ return {
3110
+ name: "Claude Code",
3111
+ status: "ok",
3112
+ message: "Installed",
3113
+ version: version ?? undefined
3114
+ };
3115
+ }
3116
+ async function checkOllamaConnection() {
3117
+ const ollamaUrl = getOllamaUrl();
3118
+ try {
3119
+ const response = await fetch(`${ollamaUrl}/api/tags`, {
3120
+ signal: AbortSignal.timeout(5000)
3121
+ });
3122
+ if (response.ok) {
3123
+ const data = await response.json();
3124
+ const modelCount = data.models?.length ?? 0;
3125
+ return {
3126
+ name: "Ollama API",
3127
+ status: "ok",
3128
+ message: `Connected (${modelCount} model${modelCount === 1 ? "" : "s"})`,
3129
+ version: ollamaUrl
3130
+ };
3131
+ }
3132
+ return {
3133
+ name: "Ollama API",
3134
+ status: "warning",
3135
+ message: `HTTP ${response.status}`,
3136
+ hint: "Ollama may not be running. Try: loclaude docker-up"
3137
+ };
3138
+ } catch (error3) {
3139
+ return {
3140
+ name: "Ollama API",
3141
+ status: "warning",
3142
+ message: "Not reachable",
3143
+ hint: `Cannot connect to ${ollamaUrl}. Start Ollama: loclaude docker-up`
3144
+ };
3145
+ }
3146
+ }
3147
+ var MIN_OLLAMA_VERSION = "0.14.2";
3148
+ function parseVersion(version) {
3149
+ const match = version.match(/(\d+)\.(\d+)\.(\d+)/);
3150
+ if (!match || !match[1] || !match[2] || !match[3])
3151
+ return null;
3152
+ return {
3153
+ major: parseInt(match[1], 10),
3154
+ minor: parseInt(match[2], 10),
3155
+ patch: parseInt(match[3], 10)
3156
+ };
3157
+ }
3158
+ function compareVersions(a, b) {
3159
+ const parsedA = parseVersion(a);
3160
+ const parsedB = parseVersion(b);
3161
+ if (!parsedA || !parsedB)
3162
+ return 0;
3163
+ if (parsedA.major !== parsedB.major)
3164
+ return parsedA.major - parsedB.major;
3165
+ if (parsedA.minor !== parsedB.minor)
3166
+ return parsedA.minor - parsedB.minor;
3167
+ return parsedA.patch - parsedB.patch;
3168
+ }
3169
+ async function checkOllamaVersion() {
3170
+ const ollamaUrl = getOllamaUrl();
3171
+ try {
3172
+ const response = await fetch(`${ollamaUrl}/api/version`, {
3173
+ signal: AbortSignal.timeout(5000)
3174
+ });
3175
+ if (!response.ok) {
3176
+ return {
3177
+ name: "Ollama Version",
3178
+ status: "warning",
3179
+ message: "Could not determine version",
3180
+ hint: "Ollama may not be running. Try: loclaude docker-up"
3181
+ };
3182
+ }
3183
+ const data = await response.json();
3184
+ const version = data.version;
3185
+ if (!version) {
3186
+ return {
3187
+ name: "Ollama Version",
3188
+ status: "warning",
3189
+ message: "Unknown version",
3190
+ hint: "Could not parse version from Ollama API"
3191
+ };
3192
+ }
3193
+ const comparison = compareVersions(version, MIN_OLLAMA_VERSION);
3194
+ if (comparison > 0) {
3195
+ return {
3196
+ name: "Ollama Version",
3197
+ status: "ok",
3198
+ message: "Compatible",
3199
+ version
3200
+ };
3201
+ } else if (comparison === 0) {
3202
+ return {
3203
+ name: "Ollama Version",
3204
+ status: "ok",
3205
+ message: "Compatible",
3206
+ version,
3207
+ hint: `Version ${version} is the minimum. Consider upgrading for best compatibility.`
3208
+ };
3209
+ } else {
3210
+ return {
3211
+ name: "Ollama Version",
3212
+ status: "error",
3213
+ message: `Version too old (requires > ${MIN_OLLAMA_VERSION})`,
3214
+ version,
3215
+ hint: `Upgrade Ollama to a version greater than ${MIN_OLLAMA_VERSION}`
3216
+ };
3217
+ }
3218
+ } catch (error3) {
3219
+ return {
3220
+ name: "Ollama Version",
3221
+ status: "warning",
3222
+ message: "Could not check version",
3223
+ hint: `Cannot connect to ${ollamaUrl}. Start Ollama: loclaude docker-up`
3224
+ };
3225
+ }
3226
+ }
3227
+ function formatCheck(check) {
3228
+ let line = statusLine(check.status, check.name, check.message, check.version);
3229
+ if (check.hint) {
3230
+ line += `
3231
+ ${dim("\u2192")} ${dim(check.hint)}`;
3232
+ }
3233
+ return line;
3234
+ }
3235
+ async function doctor() {
3236
+ header("System Health Check");
3237
+ console.log("");
3238
+ const checks = await Promise.all([
3239
+ checkDocker(),
3240
+ checkDockerCompose(),
3241
+ checkNvidiaSmi(),
3242
+ checkNvidiaContainerToolkit(),
3243
+ checkClaude(),
3244
+ checkOllamaConnection(),
3245
+ checkOllamaVersion()
3246
+ ]);
3247
+ for (const check of checks) {
3248
+ console.log(formatCheck(check));
3249
+ }
3250
+ const errors2 = checks.filter((c) => c.status === "error");
3251
+ const warnings = checks.filter((c) => c.status === "warning");
3252
+ console.log("");
3253
+ if (errors2.length > 0) {
3254
+ console.log(red(`${errors2.length} error(s) found.`) + " Fix these before proceeding.");
3255
+ process.exit(1);
3256
+ } else if (warnings.length > 0) {
3257
+ console.log(yellow(`${warnings.length} warning(s).`) + " loclaude may work with limited functionality.");
3258
+ } else {
3259
+ console.log(green("All checks passed!") + " Ready to use loclaude.");
3260
+ }
3261
+ }
3262
+ async function hasNvidiaGpu() {
3263
+ const exists = await commandExists("nvidia-smi");
3264
+ if (!exists)
3265
+ return false;
3266
+ const result = await spawnCapture(["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"]);
3267
+ return result.exitCode === 0 && Boolean(result.stdout?.trim());
3268
+ }
3269
+
3270
+ // lib/commands/init.ts
3271
+ var DOCKER_COMPOSE_TEMPLATE_GPU = `# =============================================================================
3272
+ # LOCLAUDE DOCKER COMPOSE - GPU MODE
3273
+ # =============================================================================
3274
+ # This configuration runs Ollama with NVIDIA GPU acceleration for fast inference.
3275
+ # Generated by: loclaude init
3276
+ #
3277
+ # Prerequisites:
3278
+ # - NVIDIA GPU with CUDA support
3279
+ # - NVIDIA drivers installed on host
3280
+ # - NVIDIA Container Toolkit: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit
3281
+ #
3282
+ # Quick test for GPU support:
3283
+ # docker run --rm --gpus all nvidia/cuda:12.0-base nvidia-smi
3284
+ #
3285
+ # =============================================================================
3286
+
3287
+ services:
3288
+ # ===========================================================================
3289
+ # OLLAMA - Local LLM Inference Server
3290
+ # ===========================================================================
3291
+ # Ollama provides the AI backend that Claude Code connects to.
3292
+ # It runs large language models locally on your hardware.
3293
+ #
3294
+ # API Documentation: https://github.com/ollama/ollama/blob/main/docs/api.md
3295
+ # Model Library: https://ollama.com/library
3296
+ # ===========================================================================
3297
+ ollama:
3298
+ # Official Ollama image - 'latest' ensures newest features and model support
3299
+ image: ollama/ollama:latest
3300
+
3301
+ # Fixed container name for easy CLI access:
3302
+ # docker exec ollama ollama list
3303
+ # docker logs ollama
3304
+ container_name: ollama
3305
+
3306
+ # NVIDIA Container Runtime - Required for GPU access
3307
+ # This makes CUDA libraries available inside the container
3308
+ runtime: nvidia
3309
+
3310
+ environment:
3311
+ # ---------------------------------------------------------------------------
3312
+ # GPU Configuration
3313
+ # ---------------------------------------------------------------------------
3314
+ # NVIDIA_VISIBLE_DEVICES: Which GPUs to expose to the container
3315
+ # - 'all': Use all available GPUs (recommended for most setups)
3316
+ # - '0': Use only GPU 0
3317
+ # - '0,1': Use GPUs 0 and 1
3318
+ - NVIDIA_VISIBLE_DEVICES=all
3319
+
3320
+ # NVIDIA_DRIVER_CAPABILITIES: What GPU features to enable
3321
+ # - 'compute': CUDA compute (required for inference)
3322
+ # - 'utility': nvidia-smi and other tools
3323
+ - NVIDIA_DRIVER_CAPABILITIES=compute,utility
3324
+
3325
+ # ---------------------------------------------------------------------------
3326
+ # Ollama Configuration (Optional)
3327
+ # ---------------------------------------------------------------------------
3328
+ # Uncomment these to customize Ollama behavior:
3329
+
3330
+ # Maximum number of models loaded in memory simultaneously
3331
+ # Lower this if you're running out of VRAM
3332
+ # - OLLAMA_MAX_LOADED_MODELS=1
3333
+
3334
+ # Maximum parallel inference requests per model
3335
+ # Higher values use more VRAM but handle more concurrent requests
3336
+ # - OLLAMA_NUM_PARALLEL=1
3337
+
3338
+ # Enable debug logging for troubleshooting
3339
+ # - OLLAMA_DEBUG=1
3340
+
3341
+ # Custom model storage location (inside container)
3342
+ # - OLLAMA_MODELS=/root/.ollama
3343
+
3344
+ volumes:
3345
+ # ---------------------------------------------------------------------------
3346
+ # Model Storage
3347
+ # ---------------------------------------------------------------------------
3348
+ # Maps ./models on your host to /root/.ollama in the container
3349
+ # This persists downloaded models across container restarts
3350
+ #
3351
+ # Disk space requirements (approximate):
3352
+ # - 7B model: ~4GB
3353
+ # - 13B model: ~8GB
3354
+ # - 30B model: ~16GB
3355
+ # - 70B model: ~40GB
3356
+ - ./models:/root/.ollama
3357
+
3358
+ ports:
3359
+ # Ollama API port - access at http://localhost:11434
3360
+ # Used by Claude Code and other Ollama clients
3361
+ - "11434:11434"
3362
+
3363
+ # Restart policy - keeps Ollama running unless manually stopped
3364
+ restart: unless-stopped
3365
+
3366
+ healthcheck:
3367
+ # Verify Ollama is responsive by listing models
3368
+ test: ["CMD", "ollama", "list"]
3369
+ interval: 300s # Check every 5 minutes
3370
+ timeout: 2s # Fail if no response in 2 seconds
3371
+ retries: 3 # Mark unhealthy after 3 consecutive failures
3372
+ start_period: 40s # Grace period for initial model loading
3373
+
3374
+ deploy:
3375
+ resources:
3376
+ reservations:
3377
+ devices:
3378
+ # Request GPU access from Docker
3379
+ - driver: nvidia
3380
+ count: all # Use all available GPUs
3381
+ capabilities: [gpu] # Request GPU compute capability
3382
+
3383
+ # ===========================================================================
3384
+ # OPEN WEBUI - Chat Interface (Optional)
3385
+ # ===========================================================================
3386
+ # Open WebUI provides a ChatGPT-like interface for your local models.
3387
+ # Access at http://localhost:3000 after starting containers.
3388
+ #
3389
+ # Features:
3390
+ # - Multi-model chat interface
3391
+ # - Conversation history
3392
+ # - Model management UI
3393
+ # - RAG/document upload support
3394
+ #
3395
+ # Documentation: https://docs.openwebui.com/
3396
+ # ===========================================================================
3397
+ open-webui:
3398
+ # CUDA-enabled image for GPU-accelerated features (embeddings, etc.)
3399
+ # Change to :main if you don't need GPU features in the UI
3400
+ image: ghcr.io/open-webui/open-webui:cuda
3401
+
3402
+ container_name: open-webui
3403
+
3404
+ ports:
3405
+ # Web UI port - access at http://localhost:3000
3406
+ - "3000:8080"
3407
+
3408
+ environment:
3409
+ # Tell Open WebUI where to find Ollama
3410
+ # Uses Docker internal networking (service name as hostname)
3411
+ - OLLAMA_BASE_URL=http://ollama:11434
3412
+
3413
+ # Wait for Ollama to be ready before starting
3414
+ depends_on:
3415
+ - ollama
3416
+
3417
+ restart: unless-stopped
3418
+
3419
+ healthcheck:
3420
+ test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
3421
+ interval: 30s
3422
+ timeout: 10s
3423
+ retries: 3
3424
+ start_period: 60s
3425
+
3426
+ volumes:
3427
+ # Persistent storage for conversations, settings, and user data
3428
+ - open-webui:/app/backend/data
3429
+
3430
+ deploy:
3431
+ resources:
3432
+ reservations:
3433
+ devices:
3434
+ - driver: nvidia
3435
+ count: all
3436
+ capabilities: [gpu]
3437
+
3438
+ # =============================================================================
3439
+ # VOLUMES
3440
+ # =============================================================================
3441
+ # Named volumes for persistent data that survives container recreation
3442
+ volumes:
3443
+ open-webui:
3444
+ # Open WebUI data: conversations, user settings, uploads
3445
+ # Located at /var/lib/docker/volumes/open-webui/_data on host
3446
+ `;
3447
+ var DOCKER_COMPOSE_TEMPLATE_CPU = `# =============================================================================
3448
+ # LOCLAUDE DOCKER COMPOSE - CPU MODE
3449
+ # =============================================================================
3450
+ # This configuration runs Ollama in CPU-only mode.
3451
+ # Inference will be slower than GPU mode but works on any system.
3452
+ # Generated by: loclaude init --no-gpu
3453
+ #
3454
+ # Performance notes:
3455
+ # - 7B models: ~10-20 tokens/sec on modern CPUs
3456
+ # - Larger models will be significantly slower
3457
+ # - Consider using quantized models (Q4_K_M, Q5_K_M) for better performance
3458
+ #
3459
+ # Recommended CPU-optimized models:
3460
+ # - llama3.2:3b (fast, good for simple tasks)
3461
+ # - qwen2.5-coder:7b (coding tasks)
3462
+ # - gemma2:9b (general purpose)
3463
+ #
3464
+ # =============================================================================
3465
+
3466
+ services:
3467
+ # ===========================================================================
3468
+ # OLLAMA - Local LLM Inference Server (CPU Mode)
3469
+ # ===========================================================================
3470
+ # Ollama provides the AI backend that Claude Code connects to.
3471
+ # Running in CPU mode - no GPU acceleration.
3472
+ #
3473
+ # API Documentation: https://github.com/ollama/ollama/blob/main/docs/api.md
3474
+ # Model Library: https://ollama.com/library
3475
+ # ===========================================================================
3476
+ ollama:
3477
+ # Official Ollama image - works for both CPU and GPU
3478
+ image: ollama/ollama:latest
3479
+
3480
+ # Fixed container name for easy CLI access
3481
+ container_name: ollama
3482
+
3483
+ # NOTE: No 'runtime: nvidia' - running in CPU mode
3484
+
3485
+ environment:
3486
+ # ---------------------------------------------------------------------------
3487
+ # Ollama Configuration (Optional)
3488
+ # ---------------------------------------------------------------------------
3489
+ # Uncomment these to customize Ollama behavior:
3490
+
3491
+ # Maximum number of models loaded in memory simultaneously
3492
+ # CPU mode uses system RAM instead of VRAM
3493
+ # - OLLAMA_MAX_LOADED_MODELS=1
3494
+
3495
+ # Number of CPU threads to use (default: auto-detect)
3496
+ # - OLLAMA_NUM_THREADS=8
3497
+
3498
+ # Enable debug logging for troubleshooting
3499
+ # - OLLAMA_DEBUG=1
3500
+
3501
+ volumes:
3502
+ # ---------------------------------------------------------------------------
3503
+ # Model Storage
3504
+ # ---------------------------------------------------------------------------
3505
+ # Maps ./models on your host to /root/.ollama in the container
3506
+ # This persists downloaded models across container restarts
3507
+ - ./models:/root/.ollama
3508
+
3509
+ ports:
3510
+ # Ollama API port - access at http://localhost:11434
3511
+ - "11434:11434"
3512
+
3513
+ restart: unless-stopped
3514
+
3515
+ healthcheck:
3516
+ test: ["CMD", "ollama", "list"]
3517
+ interval: 300s
3518
+ timeout: 2s
3519
+ retries: 3
3520
+ start_period: 40s
3521
+
3522
+ # CPU resource limits (optional - uncomment to constrain)
3523
+ # deploy:
3524
+ # resources:
3525
+ # limits:
3526
+ # cpus: '4' # Limit to 4 CPU cores
3527
+ # memory: 16G # Limit to 16GB RAM
3528
+ # reservations:
3529
+ # cpus: '2' # Reserve at least 2 cores
3530
+ # memory: 8G # Reserve at least 8GB RAM
3531
+
3532
+ # ===========================================================================
3533
+ # OPEN WEBUI - Chat Interface (Optional)
3534
+ # ===========================================================================
3535
+ # Open WebUI provides a ChatGPT-like interface for your local models.
3536
+ # Access at http://localhost:3000 after starting containers.
3537
+ #
3538
+ # Documentation: https://docs.openwebui.com/
3539
+ # ===========================================================================
3540
+ open-webui:
3541
+ # Standard image (no CUDA) - smaller download, CPU-only features
3542
+ image: ghcr.io/open-webui/open-webui:main
3543
+
3544
+ container_name: open-webui
3545
+
3546
+ ports:
3547
+ - "3000:8080"
3548
+
3549
+ environment:
3550
+ - OLLAMA_BASE_URL=http://ollama:11434
3551
+
3552
+ depends_on:
3553
+ - ollama
3554
+
3555
+ restart: unless-stopped
3556
+
3557
+ healthcheck:
3558
+ test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
3559
+ interval: 30s
3560
+ timeout: 10s
3561
+ retries: 3
3562
+ start_period: 60s
3563
+
3564
+ volumes:
3565
+ - open-webui:/app/backend/data
3566
+
3567
+ # =============================================================================
3568
+ # VOLUMES
3569
+ # =============================================================================
3570
+ volumes:
3571
+ open-webui:
3572
+ `;
3573
+ function getConfigTemplate(gpu) {
3574
+ return `{
3575
+ "ollama": {
3576
+ "url": "http://localhost:11434",
3577
+ "defaultModel": "${gpu ? "qwen3-coder:30b" : "qwen2.5-coder:7b"}"
3578
+ },
3579
+ "docker": {
3580
+ "composeFile": "./docker-compose.yml",
3581
+ "gpu": ${gpu}
3582
+ }
3583
+ }
3584
+ `;
3585
+ }
3586
+ var GITIGNORE_TEMPLATE = `# Ollama models (large binary files)
3587
+ # These are downloaded by Ollama and can be re-pulled anytime
3588
+ models/
3589
+ `;
3590
+ var MISE_TOML_TEMPLATE = `# =============================================================================
3591
+ # MISE TASK RUNNER CONFIGURATION
3592
+ # =============================================================================
3593
+ # Mise is a task runner that provides convenient shortcuts for common operations.
3594
+ # Run 'mise tasks' to see all available tasks.
3595
+ #
3596
+ # Documentation: https://mise.jdx.dev/
3597
+ # Install: curl https://mise.jdx.dev/install.sh | sh
3598
+ # =============================================================================
3599
+
3600
+ [tasks]
3601
+
3602
+ # =============================================================================
3603
+ # Docker Management
3604
+ # =============================================================================
3605
+ # Commands for managing the Ollama and Open WebUI containers
3606
+
3607
+ [tasks.up]
3608
+ description = "Start Ollama and Open WebUI containers"
3609
+ run = "loclaude docker-up"
3610
+
3611
+ [tasks.down]
3612
+ description = "Stop all containers"
3613
+ run = "loclaude docker-down"
3614
+
3615
+ [tasks.restart]
3616
+ description = "Restart all containers"
3617
+ run = "loclaude docker-restart"
3618
+
3619
+ [tasks.status]
3620
+ description = "Show container status"
3621
+ run = "loclaude docker-status"
3622
+
3623
+ [tasks.logs]
3624
+ description = "Follow container logs"
3625
+ run = "loclaude docker-logs --follow"
3626
+
3627
+ # =============================================================================
3628
+ # Model Management
3629
+ # =============================================================================
3630
+ # Commands for managing Ollama models (download, remove, list)
3631
+
3632
+ [tasks.models]
3633
+ description = "List installed models"
3634
+ run = "loclaude models"
3635
+
3636
+ [tasks.pull]
3637
+ description = "Pull a model (usage: mise run pull <model-name>)"
3638
+ run = "loclaude models-pull {{arg(name='model')}}"
3639
+
3640
+ [tasks."pull:recommended"]
3641
+ description = "Pull the recommended coding model"
3642
+ run = "loclaude models-pull qwen3-coder:30b"
3643
+
3644
+ # =============================================================================
3645
+ # Claude Code
3646
+ # =============================================================================
3647
+ # Commands for running Claude Code with local Ollama
3648
+
3649
+ [tasks.claude]
3650
+ description = "Run Claude Code with local Ollama"
3651
+ run = "loclaude run"
3652
+
3653
+ [tasks."claude:model"]
3654
+ description = "Run Claude with specific model (usage: mise run claude:model <model>)"
3655
+ run = "loclaude run -m {{arg(name='model')}}"
3656
+
3657
+ # =============================================================================
3658
+ # Diagnostics
3659
+ # =============================================================================
3660
+ # Commands for checking system health and troubleshooting
3661
+
3662
+ [tasks.doctor]
3663
+ description = "Check system requirements"
3664
+ run = "loclaude doctor"
3665
+
3666
+ [tasks.gpu]
3667
+ description = "Check GPU status (requires NVIDIA GPU)"
3668
+ run = "docker exec ollama nvidia-smi"
3669
+
3670
+ [tasks.config]
3671
+ description = "Show current configuration"
3672
+ run = "loclaude config"
3673
+ `;
3674
+ var README_TEMPLATE = `# Project Name
3675
+
3676
+ > Powered by [loclaude](https://github.com/nicholasgalante1997/loclaude) - Run Claude Code with local Ollama LLMs
3677
+
3678
+ ## Prerequisites
3679
+
3680
+ - [Docker](https://docs.docker.com/get-docker/) with Docker Compose v2
3681
+ - [mise](https://mise.jdx.dev/) task runner (recommended)
3682
+ - [loclaude](https://www.npmjs.com/package/loclaude) CLI (\`npm install -g loclaude\`)
3683
+
3684
+ ### For GPU Mode (Recommended)
3685
+
3686
+ - [NVIDIA GPU](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) with CUDA support
3687
+ - NVIDIA drivers installed on host
3688
+ - [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
3689
+
3690
+ ## Quick Start
3691
+
3692
+ \`\`\`bash
3693
+ # Start the LLM backend (Ollama + Open WebUI)
3694
+ mise run up
3695
+
3696
+ # Pull a model (adjust based on your hardware)
3697
+ mise run pull qwen3-coder:30b # GPU: 30B model (~16GB VRAM)
3698
+ mise run pull qwen2.5-coder:7b # CPU: 7B model (faster)
3699
+
3700
+ # Run Claude Code with local LLM
3701
+ mise run claude
3702
+ \`\`\`
3703
+
3704
+ ## Available Commands
3705
+
3706
+ Run \`mise tasks\` to see all available commands.
3707
+
3708
+ | Command | Description |
3709
+ |---------|-------------|
3710
+ | \`mise run up\` | Start Ollama and Open WebUI containers |
3711
+ | \`mise run down\` | Stop all containers |
3712
+ | \`mise run status\` | Show container status |
3713
+ | \`mise run logs\` | Follow container logs |
3714
+ | \`mise run models\` | List installed models |
3715
+ | \`mise run pull <model>\` | Pull a model from Ollama registry |
3716
+ | \`mise run claude\` | Run Claude Code with model selection |
3717
+ | \`mise run claude:model <model>\` | Run Claude with specific model |
3718
+ | \`mise run doctor\` | Check system requirements |
3719
+ | \`mise run gpu\` | Check GPU status |
3720
+
3721
+ ## Service URLs
3722
+
3723
+ | Service | URL | Description |
3724
+ |---------|-----|-------------|
3725
+ | Ollama API | http://localhost:11434 | LLM inference API |
3726
+ | Open WebUI | http://localhost:3000 | Chat interface |
3727
+
3728
+ ## Project Structure
3729
+
3730
+ \`\`\`
3731
+ .
3732
+ \u251C\u2500\u2500 .claude/
3733
+ \u2502 \u2514\u2500\u2500 CLAUDE.md # Claude Code project instructions
3734
+ \u251C\u2500\u2500 .loclaude/
3735
+ \u2502 \u2514\u2500\u2500 config.json # Loclaude configuration
3736
+ \u251C\u2500\u2500 models/ # Ollama model storage (gitignored)
3737
+ \u251C\u2500\u2500 docker-compose.yml # Container definitions
3738
+ \u251C\u2500\u2500 mise.toml # Task runner configuration
3739
+ \u2514\u2500\u2500 README.md
3740
+ \`\`\`
3741
+
3742
+ ## Configuration
3743
+
3744
+ ### Loclaude Config (\`.loclaude/config.json\`)
3745
+
3746
+ \`\`\`json
3747
+ {
3748
+ "ollama": {
3749
+ "url": "http://localhost:11434",
3750
+ "defaultModel": "qwen3-coder:30b"
3022
3751
  },
3023
3752
  "docker": {
3024
3753
  "composeFile": "./docker-compose.yml",
@@ -3033,6 +3762,25 @@ Run \`mise tasks\` to see all available commands.
3033
3762
  |----------|-------------|---------|
3034
3763
  | \`OLLAMA_URL\` | Ollama API endpoint | \`http://localhost:11434\` |
3035
3764
  | \`OLLAMA_MODEL\` | Default model name | \`qwen3-coder:30b\` |
3765
+ | \`LOCLAUDE_GPU\` | Enable GPU mode | \`true\` |
3766
+
3767
+ ## Recommended Models
3768
+
3769
+ ### For GPU (NVIDIA with 16GB+ VRAM)
3770
+
3771
+ | Model | Size | Use Case |
3772
+ |-------|------|----------|
3773
+ | \`qwen3-coder:30b\` | ~16GB | Best coding performance |
3774
+ | \`gpt-oss:20b\` | ~12GB | General purpose |
3775
+ | \`glm-4.7:cloud\` | Cloud | No local storage needed |
3776
+
3777
+ ### For CPU or Limited VRAM
3778
+
3779
+ | Model | Size | Use Case |
3780
+ |-------|------|----------|
3781
+ | \`qwen2.5-coder:7b\` | ~4GB | Coding on CPU |
3782
+ | \`llama3.2:3b\` | ~2GB | Fast, simple tasks |
3783
+ | \`gemma2:9b\` | ~5GB | General purpose |
3036
3784
 
3037
3785
  ## Troubleshooting
3038
3786
 
@@ -3054,6 +3802,12 @@ mise run logs
3054
3802
  mise run down && mise run up
3055
3803
  \`\`\`
3056
3804
 
3805
+ ### GPU Not Detected
3806
+
3807
+ 1. Verify NVIDIA drivers: \`nvidia-smi\`
3808
+ 2. Check Docker GPU access: \`docker run --rm --gpus all nvidia/cuda:12.0-base nvidia-smi\`
3809
+ 3. Install NVIDIA Container Toolkit if missing
3810
+
3057
3811
  ## License
3058
3812
 
3059
3813
  MIT
@@ -3064,7 +3818,7 @@ Project-specific instructions for Claude Code.
3064
3818
 
3065
3819
  ## Project Overview
3066
3820
 
3067
- This project uses [loclaude](https://github.com/nicholasgalante1997/docker-ollama) to run Claude Code with local Ollama LLMs.
3821
+ This project uses [loclaude](https://github.com/nicholasgalante1997/loclaude) to run Claude Code with local Ollama LLMs.
3068
3822
 
3069
3823
  ## Quick Reference
3070
3824
 
@@ -3120,304 +3874,153 @@ async function init(options = {}) {
3120
3874
  const claudeDir = join2(cwd, ".claude");
3121
3875
  const claudeMdPath = join2(claudeDir, "CLAUDE.md");
3122
3876
  const readmePath = join2(cwd, "README.md");
3123
- console.log(`Initializing loclaude project...
3124
- `);
3877
+ header("Initializing loclaude project");
3878
+ console.log("");
3879
+ let gpuMode;
3880
+ if (options.gpu === false) {
3881
+ gpuMode = false;
3882
+ console.log(info("CPU-only mode (--no-gpu)"));
3883
+ } else if (options.gpu === true) {
3884
+ gpuMode = true;
3885
+ console.log(info("GPU mode enabled (--gpu)"));
3886
+ } else {
3887
+ console.log(dim(" Detecting GPU..."));
3888
+ gpuMode = await hasNvidiaGpu();
3889
+ if (gpuMode) {
3890
+ console.log(success("NVIDIA GPU detected - using GPU mode"));
3891
+ } else {
3892
+ console.log(warn("No NVIDIA GPU detected - using CPU mode"));
3893
+ console.log(dim(" Use --gpu to force GPU mode if you have an NVIDIA GPU"));
3894
+ }
3895
+ }
3896
+ console.log("");
3125
3897
  if (existsSync2(readmePath) && !options.force) {
3126
- console.log("\u26A0\uFE0F README.md already exists");
3898
+ console.log(warn(`${file("README.md")} already exists`));
3127
3899
  } else {
3128
3900
  writeFileSync(readmePath, README_TEMPLATE);
3129
- console.log("\u2713 Created README.md");
3901
+ console.log(success(`Created ${file("README.md")}`));
3130
3902
  }
3131
3903
  if (existsSync2(composePath) && !options.force) {
3132
- console.log("\u26A0\uFE0F docker-compose.yml already exists");
3133
- console.log(` Use --force to overwrite
3134
- `);
3904
+ console.log(warn(`${file("docker-compose.yml")} already exists`));
3905
+ console.log(dim(" Use --force to overwrite"));
3135
3906
  } else {
3136
- let composeContent = DOCKER_COMPOSE_TEMPLATE;
3907
+ let composeContent = gpuMode ? DOCKER_COMPOSE_TEMPLATE_GPU : DOCKER_COMPOSE_TEMPLATE_CPU;
3137
3908
  if (options.noWebui) {
3138
- composeContent = composeContent.replace(/\n open-webui:[\s\S]*?capabilities: \[gpu\]\n/m, `
3139
- `).replace(/\nvolumes:\n open-webui:\n/, `
3909
+ composeContent = composeContent.replace(/\n # =+\n # OPEN WEBUI[\s\S]*?capabilities: \[gpu\]\n/m, `
3910
+ `).replace(/\n # =+\n # OPEN WEBUI[\s\S]*?open-webui:\/app\/backend\/data\n/m, `
3911
+ `).replace(/\nvolumes:\n open-webui:\n.*$/m, `
3140
3912
  `);
3141
3913
  }
3142
3914
  writeFileSync(composePath, composeContent);
3143
- console.log("\u2713 Created docker-compose.yml");
3915
+ const modeLabel = gpuMode ? cyan("GPU") : cyan("CPU");
3916
+ console.log(success(`Created ${file("docker-compose.yml")} (${modeLabel} mode)`));
3144
3917
  }
3145
3918
  if (existsSync2(miseTomlPath) && !options.force) {
3146
- console.log("\u26A0\uFE0F mise.toml already exists");
3919
+ console.log(warn(`${file("mise.toml")} already exists`));
3147
3920
  } else {
3148
3921
  writeFileSync(miseTomlPath, MISE_TOML_TEMPLATE);
3149
- console.log("\u2713 Created mise.toml");
3922
+ console.log(success(`Created ${file("mise.toml")}`));
3150
3923
  }
3151
3924
  if (!existsSync2(claudeDir)) {
3152
3925
  mkdirSync(claudeDir, { recursive: true });
3153
3926
  }
3154
3927
  if (existsSync2(claudeMdPath) && !options.force) {
3155
- console.log("\u26A0\uFE0F .claude/CLAUDE.md already exists");
3928
+ console.log(warn(`${file(".claude/CLAUDE.md")} already exists`));
3156
3929
  } else {
3157
3930
  writeFileSync(claudeMdPath, CLAUDE_MD_TEMPLATE);
3158
- console.log("\u2713 Created .claude/CLAUDE.md");
3931
+ console.log(success(`Created ${file(".claude/CLAUDE.md")}`));
3159
3932
  }
3160
3933
  if (!existsSync2(configDir)) {
3161
3934
  mkdirSync(configDir, { recursive: true });
3162
- console.log("\u2713 Created .loclaude/ directory");
3935
+ console.log(success(`Created ${file(".loclaude/")} directory`));
3163
3936
  }
3164
3937
  if (existsSync2(configPath) && !options.force) {
3165
- console.log("\u26A0\uFE0F .loclaude/config.json already exists");
3938
+ console.log(warn(`${file(".loclaude/config.json")} already exists`));
3166
3939
  } else {
3167
- writeFileSync(configPath, CONFIG_TEMPLATE);
3168
- console.log("\u2713 Created .loclaude/config.json");
3940
+ writeFileSync(configPath, getConfigTemplate(gpuMode));
3941
+ console.log(success(`Created ${file(".loclaude/config.json")}`));
3169
3942
  }
3170
3943
  if (!existsSync2(modelsDir)) {
3171
3944
  mkdirSync(modelsDir, { recursive: true });
3172
- console.log("\u2713 Created models/ directory");
3945
+ console.log(success(`Created ${file("models/")} directory`));
3173
3946
  }
3174
3947
  if (existsSync2(gitignorePath)) {
3175
3948
  const existing = readFileSync2(gitignorePath, "utf-8");
3176
3949
  if (!existing.includes("models/")) {
3177
3950
  writeFileSync(gitignorePath, existing + `
3178
3951
  ` + GITIGNORE_TEMPLATE);
3179
- console.log("\u2713 Updated .gitignore");
3952
+ console.log(success(`Updated ${file(".gitignore")}`));
3180
3953
  }
3181
3954
  } else {
3182
3955
  writeFileSync(gitignorePath, GITIGNORE_TEMPLATE);
3183
- console.log("\u2713 Created .gitignore");
3184
- }
3185
- console.log(`
3186
- \uD83C\uDF89 Project initialized!
3187
- `);
3188
- console.log("Next steps:");
3189
- console.log(" 1. Start containers: mise run up");
3190
- console.log(" 2. Pull a model: mise run pull qwen3-coder:30b");
3191
- console.log(" 3. Run Claude: mise run claude");
3192
- console.log(`
3193
- Service URLs:`);
3194
- console.log(" Ollama API: http://localhost:11434");
3195
- if (!options.noWebui) {
3196
- console.log(" Open WebUI: http://localhost:3000");
3197
- }
3198
- }
3199
- // lib/commands/doctor.ts
3200
- async function checkDocker() {
3201
- const exists = await commandExists("docker");
3202
- if (!exists) {
3203
- return {
3204
- name: "Docker",
3205
- status: "error",
3206
- message: "Not installed",
3207
- hint: "Install Docker: https://docs.docker.com/get-docker/"
3208
- };
3209
- }
3210
- const version = await getCommandVersion("docker");
3211
- return {
3212
- name: "Docker",
3213
- status: "ok",
3214
- message: "Installed",
3215
- version: version ?? undefined
3216
- };
3217
- }
3218
- async function checkDockerCompose() {
3219
- const result = await spawnCapture(["docker", "compose", "version"]);
3220
- if (result.exitCode === 0) {
3221
- const version = result.stdout?.trim().split(`
3222
- `)[0];
3223
- return {
3224
- name: "Docker Compose",
3225
- status: "ok",
3226
- message: "Installed (v2)",
3227
- version: version ?? undefined
3228
- };
3229
- }
3230
- const v1Exists = await commandExists("docker-compose");
3231
- if (v1Exists) {
3232
- const version = await getCommandVersion("docker-compose");
3233
- return {
3234
- name: "Docker Compose",
3235
- status: "warning",
3236
- message: "Using legacy v1",
3237
- version: version ?? undefined,
3238
- hint: "Consider upgrading to Docker Compose v2"
3239
- };
3240
- }
3241
- return {
3242
- name: "Docker Compose",
3243
- status: "error",
3244
- message: "Not installed",
3245
- hint: "Docker Compose is included with Docker Desktop, or install separately"
3246
- };
3247
- }
3248
- async function checkNvidiaSmi() {
3249
- const exists = await commandExists("nvidia-smi");
3250
- if (!exists) {
3251
- return {
3252
- name: "NVIDIA GPU",
3253
- status: "warning",
3254
- message: "nvidia-smi not found",
3255
- hint: "GPU support requires NVIDIA drivers. CPU-only mode will be used."
3256
- };
3257
- }
3258
- const result = await spawnCapture(["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"]);
3259
- if (result.exitCode === 0 && result.stdout) {
3260
- const gpus = result.stdout.trim().split(`
3261
- `).filter(Boolean);
3262
- return {
3263
- name: "NVIDIA GPU",
3264
- status: "ok",
3265
- message: `${gpus.length} GPU(s) detected`,
3266
- version: gpus[0]
3267
- };
3268
- }
3269
- return {
3270
- name: "NVIDIA GPU",
3271
- status: "warning",
3272
- message: "nvidia-smi failed",
3273
- hint: "GPU may not be available. Check NVIDIA drivers."
3274
- };
3275
- }
3276
- async function checkNvidiaContainerToolkit() {
3277
- const result = await spawnCapture(["docker", "info", "--format", "{{.Runtimes}}"]);
3278
- if (result.exitCode === 0 && result.stdout?.includes("nvidia")) {
3279
- return {
3280
- name: "NVIDIA Container Toolkit",
3281
- status: "ok",
3282
- message: "nvidia runtime available"
3283
- };
3284
- }
3285
- return {
3286
- name: "NVIDIA Container Toolkit",
3287
- status: "warning",
3288
- message: "nvidia runtime not found",
3289
- hint: "Install: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html"
3290
- };
3291
- }
3292
- async function checkClaude() {
3293
- const exists = await commandExists("claude");
3294
- if (!exists) {
3295
- return {
3296
- name: "Claude Code",
3297
- status: "error",
3298
- message: "Not installed",
3299
- hint: "Install: npm install -g @anthropic-ai/claude-code"
3300
- };
3301
- }
3302
- const version = await getCommandVersion("claude");
3303
- return {
3304
- name: "Claude Code",
3305
- status: "ok",
3306
- message: "Installed",
3307
- version: version ?? undefined
3308
- };
3309
- }
3310
- async function checkOllamaConnection() {
3311
- const ollamaUrl = getOllamaUrl();
3312
- try {
3313
- const response = await fetch(`${ollamaUrl}/api/tags`, {
3314
- signal: AbortSignal.timeout(5000)
3315
- });
3316
- if (response.ok) {
3317
- const data = await response.json();
3318
- const modelCount = data.models?.length ?? 0;
3319
- return {
3320
- name: "Ollama API",
3321
- status: "ok",
3322
- message: `Connected (${modelCount} model${modelCount === 1 ? "" : "s"})`,
3323
- version: ollamaUrl
3324
- };
3325
- }
3326
- return {
3327
- name: "Ollama API",
3328
- status: "warning",
3329
- message: `HTTP ${response.status}`,
3330
- hint: "Ollama may not be running. Try: loclaude docker-up"
3331
- };
3332
- } catch (error) {
3333
- return {
3334
- name: "Ollama API",
3335
- status: "warning",
3336
- message: "Not reachable",
3337
- hint: `Cannot connect to ${ollamaUrl}. Start Ollama: loclaude docker-up`
3338
- };
3956
+ console.log(success(`Created ${file(".gitignore")}`));
3339
3957
  }
3340
- }
3341
- function formatCheck(check) {
3342
- const icons = {
3343
- ok: "\u2713",
3344
- warning: "\u26A0",
3345
- error: "\u2717"
3346
- };
3347
- const colors = {
3348
- ok: "\x1B[32m",
3349
- warning: "\x1B[33m",
3350
- error: "\x1B[31m"
3351
- };
3352
- const reset = "\x1B[0m";
3353
- const icon = icons[check.status];
3354
- const color = colors[check.status];
3355
- let line = `${color}${icon}${reset} ${check.name}: ${check.message}`;
3356
- if (check.version) {
3357
- line += ` (${check.version})`;
3358
- }
3359
- if (check.hint) {
3360
- line += `
3361
- ${check.hint}`;
3362
- }
3363
- return line;
3364
- }
3365
- async function doctor() {
3366
- console.log(`Checking system requirements...
3367
- `);
3368
- const checks = await Promise.all([
3369
- checkDocker(),
3370
- checkDockerCompose(),
3371
- checkNvidiaSmi(),
3372
- checkNvidiaContainerToolkit(),
3373
- checkClaude(),
3374
- checkOllamaConnection()
3375
- ]);
3376
- for (const check of checks) {
3377
- console.log(formatCheck(check));
3378
- }
3379
- const errors2 = checks.filter((c) => c.status === "error");
3380
- const warnings = checks.filter((c) => c.status === "warning");
3958
+ const recommendedModel = gpuMode ? "qwen3-coder:30b" : "qwen2.5-coder:7b";
3381
3959
  console.log("");
3382
- if (errors2.length > 0) {
3383
- console.log(`\x1B[31m${errors2.length} error(s) found.\x1B[0m Fix these before proceeding.`);
3384
- process.exit(1);
3385
- } else if (warnings.length > 0) {
3386
- console.log(`\x1B[33m${warnings.length} warning(s).\x1B[0m loclaude may work with limited functionality.`);
3387
- } else {
3388
- console.log("\x1B[32mAll checks passed!\x1B[0m Ready to use loclaude.");
3960
+ console.log(green("Project initialized!"));
3961
+ console.log("");
3962
+ console.log(cyan("Next steps:"));
3963
+ console.log(` 1. Start containers: ${cmd("mise run up")}`);
3964
+ console.log(` 2. Pull a model: ${cmd(`mise run pull ${recommendedModel}`)}`);
3965
+ console.log(` 3. Run Claude: ${cmd("mise run claude")}`);
3966
+ console.log("");
3967
+ console.log(cyan("Service URLs:"));
3968
+ console.log(` Ollama API: ${url("http://localhost:11434")}`);
3969
+ if (!options.noWebui) {
3970
+ console.log(` Open WebUI: ${url("http://localhost:3000")}`);
3389
3971
  }
3390
3972
  }
3391
3973
  // lib/commands/config.ts
3392
- import { inspect } from "util";
3393
3974
  async function configShow() {
3394
3975
  const config = loadConfig();
3395
3976
  const activePath = getActiveConfigPath();
3396
- console.log(`Current configuration:
3397
- `);
3398
- console.log(inspect(config, false, 3, true));
3399
- console.log(`
3400
- ---`);
3977
+ header("Current Configuration");
3978
+ console.log("");
3979
+ console.log(cyan("Ollama:"));
3980
+ labelValue(" URL", config.ollama.url);
3981
+ labelValue(" Default Model", magenta(config.ollama.defaultModel));
3982
+ console.log("");
3983
+ console.log(cyan("Docker:"));
3984
+ labelValue(" Compose File", config.docker.composeFile);
3985
+ labelValue(" GPU Mode", config.docker.gpu ? green("enabled") : dim("disabled"));
3986
+ console.log("");
3987
+ console.log(cyan("Claude:"));
3988
+ if (config.claude.extraArgs.length > 0) {
3989
+ labelValue(" Extra Args", config.claude.extraArgs.join(" "));
3990
+ } else {
3991
+ labelValue(" Extra Args", dim("none"));
3992
+ }
3993
+ console.log("");
3994
+ console.log(dim("\u2500".repeat(40)));
3401
3995
  if (activePath) {
3402
- console.log(`Loaded from: ${activePath}`);
3996
+ console.log(dim(`Loaded from: ${file(activePath)}`));
3403
3997
  } else {
3404
- console.log("Using default configuration (no config file found)");
3998
+ console.log(dim("Using default configuration (no config file found)"));
3405
3999
  }
3406
4000
  }
3407
4001
  async function configPaths() {
3408
4002
  const paths = getConfigSearchPaths();
3409
4003
  const activePath = getActiveConfigPath();
3410
- console.log(`Config file search paths (in priority order):
3411
- `);
3412
- for (const path of paths) {
3413
- const isActive = path === activePath;
3414
- const marker = isActive ? " \u2190 active" : "";
3415
- console.log(` ${path}${marker}`);
4004
+ header("Config Search Paths");
4005
+ console.log("");
4006
+ console.log(dim("Files are checked in priority order (first found wins):"));
4007
+ console.log("");
4008
+ for (let i = 0;i < paths.length; i++) {
4009
+ const configPath = paths[i];
4010
+ if (!configPath)
4011
+ continue;
4012
+ const isActive = configPath === activePath;
4013
+ const num = `${i + 1}.`;
4014
+ if (isActive) {
4015
+ console.log(` ${num} ${file(configPath)} ${green("\u2190 active")}`);
4016
+ } else {
4017
+ console.log(` ${num} ${dim(configPath)}`);
4018
+ }
3416
4019
  }
4020
+ console.log("");
3417
4021
  if (!activePath) {
3418
- console.log(`
3419
- No config file found. Using defaults.`);
3420
- console.log("Run 'loclaude init' to create a project config.");
4022
+ console.log(info("No config file found. Using defaults."));
4023
+ console.log(dim(` Run ${cmd("loclaude init")} to create a project config.`));
3421
4024
  }
3422
4025
  }
3423
4026
  // lib/commands/docker.ts
@@ -3456,42 +4059,44 @@ function getComposeCommand() {
3456
4059
  async function runCompose(args, options = {}) {
3457
4060
  const composeFile = options.file ?? findComposeFile();
3458
4061
  if (!composeFile) {
3459
- console.error("Error: No docker-compose.yml found");
3460
- console.error("Run 'loclaude init' to create one, or specify --file");
4062
+ console.log(error("No docker-compose.yml found"));
4063
+ console.log(dim(` Run ${cmd("loclaude init")} to create one, or specify --file`));
3461
4064
  return 1;
3462
4065
  }
3463
- const cmd = [...getComposeCommand(), "-f", composeFile, ...args];
3464
- return spawn(cmd);
4066
+ const cmd_args = [...getComposeCommand(), "-f", composeFile, ...args];
4067
+ return spawn(cmd_args);
3465
4068
  }
3466
4069
  async function dockerUp(options = {}) {
3467
4070
  const args = ["up"];
3468
4071
  if (options.detach !== false) {
3469
4072
  args.push("-d");
3470
4073
  }
3471
- console.log(`Starting containers...
3472
- `);
4074
+ console.log(info("Starting containers..."));
4075
+ console.log("");
3473
4076
  const exitCode = await runCompose(args, options);
3474
4077
  if (exitCode === 0) {
3475
- console.log(`
3476
- \u2713 Containers started`);
3477
- console.log(`
3478
- Service URLs:`);
3479
- console.log(" Ollama API: http://localhost:11434");
3480
- console.log(" Open WebUI: http://localhost:3000");
4078
+ console.log("");
4079
+ console.log(success("Containers started"));
4080
+ console.log("");
4081
+ console.log(cyan("Service URLs:"));
4082
+ console.log(` Ollama API: ${url("http://localhost:11434")}`);
4083
+ console.log(` Open WebUI: ${url("http://localhost:3000")}`);
3481
4084
  }
3482
4085
  process.exit(exitCode);
3483
4086
  }
3484
4087
  async function dockerDown(options = {}) {
3485
- console.log(`Stopping containers...
3486
- `);
4088
+ console.log(info("Stopping containers..."));
4089
+ console.log("");
3487
4090
  const exitCode = await runCompose(["down"], options);
3488
4091
  if (exitCode === 0) {
3489
- console.log(`
3490
- \u2713 Containers stopped`);
4092
+ console.log("");
4093
+ console.log(success("Containers stopped"));
3491
4094
  }
3492
4095
  process.exit(exitCode);
3493
4096
  }
3494
4097
  async function dockerStatus(options = {}) {
4098
+ console.log(info("Container status:"));
4099
+ console.log("");
3495
4100
  const exitCode = await runCompose(["ps"], options);
3496
4101
  process.exit(exitCode);
3497
4102
  }
@@ -3502,17 +4107,21 @@ async function dockerLogs(options = {}) {
3502
4107
  }
3503
4108
  if (options.service) {
3504
4109
  args.push(options.service);
4110
+ console.log(info(`Logs for ${cyan(options.service)}:`));
4111
+ } else {
4112
+ console.log(info("Container logs:"));
3505
4113
  }
4114
+ console.log("");
3506
4115
  const exitCode = await runCompose(args, options);
3507
4116
  process.exit(exitCode);
3508
4117
  }
3509
4118
  async function dockerRestart(options = {}) {
3510
- console.log(`Restarting containers...
3511
- `);
4119
+ console.log(info("Restarting containers..."));
4120
+ console.log("");
3512
4121
  const exitCode = await runCompose(["restart"], options);
3513
4122
  if (exitCode === 0) {
3514
- console.log(`
3515
- \u2713 Containers restarted`);
4123
+ console.log("");
4124
+ console.log(success("Containers restarted"));
3516
4125
  }
3517
4126
  process.exit(exitCode);
3518
4127
  }
@@ -3529,11 +4138,11 @@ async function fetchModels() {
3529
4138
  }
3530
4139
  const data = await response.json();
3531
4140
  return data.models ?? [];
3532
- } catch (error) {
3533
- if (error instanceof Error && error.name === "TimeoutError") {
4141
+ } catch (error3) {
4142
+ if (error3 instanceof Error && error3.name === "TimeoutError") {
3534
4143
  throw new Error(`Connection to Ollama timed out (${ollamaUrl})`);
3535
4144
  }
3536
- throw error;
4145
+ throw error3;
3537
4146
  }
3538
4147
  }
3539
4148
  async function isOllamaInDocker() {
@@ -3548,83 +4157,99 @@ async function runOllamaCommand(args) {
3548
4157
  return spawn(["ollama", ...args]);
3549
4158
  }
3550
4159
  }
4160
+ function formatSize(sizeBytes) {
4161
+ const sizeStr = import_bytes2.default(sizeBytes) ?? "?";
4162
+ const sizeNum = sizeBytes / (1024 * 1024 * 1024);
4163
+ if (sizeNum > 20) {
4164
+ return yellow(sizeStr);
4165
+ } else if (sizeNum > 10) {
4166
+ return cyan(sizeStr);
4167
+ }
4168
+ return dim(sizeStr);
4169
+ }
3551
4170
  async function modelsList() {
3552
4171
  try {
3553
4172
  const models = await fetchModels();
3554
4173
  if (models.length === 0) {
3555
- console.log("No models installed.");
3556
- console.log(`
3557
- Pull a model with: loclaude models-pull <model-name>`);
3558
- console.log("Example: loclaude models-pull llama3.2");
4174
+ header("Installed Models");
4175
+ console.log("");
4176
+ console.log(info("No models installed."));
4177
+ console.log("");
4178
+ console.log(`Pull a model with: ${cmd("loclaude models-pull <model-name>")}`);
4179
+ console.log(`Example: ${cmd("loclaude models-pull llama3.2")}`);
3559
4180
  return;
3560
4181
  }
3561
- console.log(`Installed models:
3562
- `);
4182
+ header("Installed Models");
4183
+ console.log("");
3563
4184
  const nameWidth = Math.max(...models.map((m) => m.name.length), "NAME".length);
3564
4185
  const sizeWidth = 10;
3565
- console.log(`${"NAME".padEnd(nameWidth)} ${"SIZE".padStart(sizeWidth)} MODIFIED`);
3566
- console.log("-".repeat(nameWidth + sizeWidth + 30));
4186
+ const modifiedWidth = 20;
4187
+ tableHeader(["NAME", "SIZE", "MODIFIED"], [nameWidth, sizeWidth, modifiedWidth]);
3567
4188
  for (const model of models) {
3568
- const name = model.name.padEnd(nameWidth);
3569
- const size = (import_bytes2.default(model.size) ?? "?").padStart(sizeWidth);
3570
- const modified = formatRelativeTime(model.modified_at);
4189
+ const name = magenta(model.name.padEnd(nameWidth));
4190
+ const size = formatSize(model.size).padStart(sizeWidth);
4191
+ const modified = dim(formatRelativeTime(model.modified_at));
3571
4192
  console.log(`${name} ${size} ${modified}`);
3572
4193
  }
3573
- console.log(`
3574
- ${models.length} model(s) installed`);
3575
- } catch (error) {
4194
+ console.log("");
4195
+ console.log(dim(`${models.length} model(s) installed`));
4196
+ } catch (err) {
3576
4197
  const ollamaUrl = getOllamaUrl();
3577
- console.error("Error: Could not connect to Ollama at", ollamaUrl);
3578
- console.error("Make sure Ollama is running: loclaude docker-up");
4198
+ console.log(error(`Could not connect to Ollama at ${ollamaUrl}`));
4199
+ console.log(dim(` Make sure Ollama is running: ${cmd("loclaude docker-up")}`));
3579
4200
  process.exit(1);
3580
4201
  }
3581
4202
  }
3582
4203
  async function modelsPull(modelName) {
3583
4204
  if (!modelName) {
3584
- console.error("Error: Model name required");
3585
- console.error("Usage: loclaude models pull <model-name>");
3586
- console.error("Example: loclaude models pull llama3.2");
4205
+ console.log(error("Model name required"));
4206
+ console.log(dim(`Usage: ${cmd("loclaude models-pull <model-name>")}`));
4207
+ console.log(dim(`Example: ${cmd("loclaude models-pull llama3.2")}`));
3587
4208
  process.exit(1);
3588
4209
  }
3589
- console.log(`Pulling model: ${modelName}
3590
- `);
4210
+ console.log(info(`Pulling model: ${magenta(modelName)}`));
4211
+ console.log("");
3591
4212
  const exitCode = await runOllamaCommand(["pull", modelName]);
3592
4213
  if (exitCode === 0) {
3593
- console.log(`
3594
- \u2713 Model '${modelName}' pulled successfully`);
4214
+ console.log("");
4215
+ console.log(success(`Model '${magenta(modelName)}' pulled successfully`));
3595
4216
  }
3596
4217
  process.exit(exitCode);
3597
4218
  }
3598
4219
  async function modelsRm(modelName) {
3599
4220
  if (!modelName) {
3600
- console.error("Error: Model name required");
3601
- console.error("Usage: loclaude models rm <model-name>");
4221
+ console.log(error("Model name required"));
4222
+ console.log(dim(`Usage: ${cmd("loclaude models-rm <model-name>")}`));
3602
4223
  process.exit(1);
3603
4224
  }
3604
- console.log(`Removing model: ${modelName}
3605
- `);
4225
+ console.log(info(`Removing model: ${magenta(modelName)}`));
4226
+ console.log("");
3606
4227
  const exitCode = await runOllamaCommand(["rm", modelName]);
3607
4228
  if (exitCode === 0) {
3608
- console.log(`
3609
- \u2713 Model '${modelName}' removed`);
4229
+ console.log("");
4230
+ console.log(success(`Model '${magenta(modelName)}' removed`));
3610
4231
  }
3611
4232
  process.exit(exitCode);
3612
4233
  }
3613
4234
  async function modelsShow(modelName) {
3614
4235
  if (!modelName) {
3615
- console.error("Error: Model name required");
3616
- console.error("Usage: loclaude models show <model-name>");
4236
+ console.log(error("Model name required"));
4237
+ console.log(dim(`Usage: ${cmd("loclaude models-show <model-name>")}`));
3617
4238
  process.exit(1);
3618
4239
  }
4240
+ console.log(info(`Model details: ${magenta(modelName)}`));
4241
+ console.log("");
3619
4242
  const exitCode = await runOllamaCommand(["show", modelName]);
3620
4243
  process.exit(exitCode);
3621
4244
  }
3622
4245
  async function modelsRun(modelName) {
3623
4246
  if (!modelName) {
3624
- console.error("Error: Model name required");
3625
- console.error("Usage: loclaude models run <model-name>");
4247
+ console.log(error("Model name required"));
4248
+ console.log(dim(`Usage: ${cmd("loclaude models-run <model-name>")}`));
3626
4249
  process.exit(1);
3627
4250
  }
4251
+ console.log(info(`Running model: ${magenta(modelName)}`));
4252
+ console.log("");
3628
4253
  const exitCode = await runOllamaCommand(["run", modelName]);
3629
4254
  process.exit(exitCode);
3630
4255
  }
@@ -3664,7 +4289,7 @@ cli.command("run [...args]", "Run Claude Code with local Ollama", {
3664
4289
  }
3665
4290
  await launchClaude(model, args);
3666
4291
  });
3667
- cli.command("init", "Initialize a new loclaude project").option("--force", "Overwrite existing files").option("--no-webui", "Skip Open WebUI in docker-compose").action(async (options) => {
4292
+ cli.command("init", "Initialize a new loclaude project").option("--force", "Overwrite existing files").option("--no-webui", "Skip Open WebUI in docker-compose").option("--gpu", "Force GPU mode (NVIDIA)").option("--no-gpu", "Force CPU-only mode").action(async (options) => {
3668
4293
  await init(options);
3669
4294
  });
3670
4295
  cli.command("doctor", "Check system requirements and health").action(async () => {
@@ -3720,5 +4345,5 @@ export {
3720
4345
  cli
3721
4346
  };
3722
4347
 
3723
- //# debugId=44B75412CB54A27464756E2164756E21
4348
+ //# debugId=8AC1271036F3EB4864756E2164756E21
3724
4349
  //# sourceMappingURL=index.bun.js.map