loclaude 0.0.1-alpha.3 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +21 -2
  2. package/README.md +152 -64
  3. package/package.json +25 -14
  4. package/libs/cli/CHANGELOG.md +0 -59
  5. package/libs/cli/LICENSE +0 -31
  6. package/libs/cli/README.md +0 -5
  7. package/libs/cli/dist/cac.d.ts +0 -6
  8. package/libs/cli/dist/cac.d.ts.map +0 -1
  9. package/libs/cli/dist/commands/config.d.ts +0 -6
  10. package/libs/cli/dist/commands/config.d.ts.map +0 -1
  11. package/libs/cli/dist/commands/docker.d.ts +0 -17
  12. package/libs/cli/dist/commands/docker.d.ts.map +0 -1
  13. package/libs/cli/dist/commands/doctor.d.ts +0 -9
  14. package/libs/cli/dist/commands/doctor.d.ts.map +0 -1
  15. package/libs/cli/dist/commands/index.d.ts +0 -6
  16. package/libs/cli/dist/commands/index.d.ts.map +0 -1
  17. package/libs/cli/dist/commands/init.d.ts +0 -11
  18. package/libs/cli/dist/commands/init.d.ts.map +0 -1
  19. package/libs/cli/dist/commands/models.d.ts +0 -9
  20. package/libs/cli/dist/commands/models.d.ts.map +0 -1
  21. package/libs/cli/dist/config.d.ts +0 -74
  22. package/libs/cli/dist/config.d.ts.map +0 -1
  23. package/libs/cli/dist/constants.d.ts +0 -12
  24. package/libs/cli/dist/constants.d.ts.map +0 -1
  25. package/libs/cli/dist/index.bun.js +0 -4268
  26. package/libs/cli/dist/index.bun.js.map +0 -55
  27. package/libs/cli/dist/index.d.ts +0 -2
  28. package/libs/cli/dist/index.d.ts.map +0 -1
  29. package/libs/cli/dist/index.js +0 -4271
  30. package/libs/cli/dist/index.js.map +0 -55
  31. package/libs/cli/dist/output.d.ts +0 -107
  32. package/libs/cli/dist/output.d.ts.map +0 -1
  33. package/libs/cli/dist/spawn.d.ts +0 -35
  34. package/libs/cli/dist/spawn.d.ts.map +0 -1
  35. package/libs/cli/dist/types.d.ts +0 -50
  36. package/libs/cli/dist/types.d.ts.map +0 -1
  37. package/libs/cli/dist/utils.d.ts +0 -32
  38. package/libs/cli/dist/utils.d.ts.map +0 -1
  39. package/libs/cli/package.json +0 -90
package/CHANGELOG.md CHANGED
@@ -7,6 +7,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  ## [Unreleased]
9
9
 
10
+ ## [0.0.3] - 2025-01-22
11
+
12
+ ### Added
13
+
14
+ - Added docs links to README.md
15
+
16
+ ### Changed
17
+
18
+ - Removes `libs` from `package.json#files` array
19
+ - Changes publish tag from `alpha` to `latest`
20
+ - Running `loclaude` with no commands now prints the help message.
21
+
22
+ ## [0.0.2] - 2025-01-22
23
+
24
+ ### Changed
25
+
26
+ - Adds Ollama Version check (>= 0.14.2) to `doctor` command
27
+ - Pins to version v0.0.2 of `@loclaude-internal/cli`
28
+
10
29
  ## [0.0.1-alpha.3] - 2025-01-21
11
30
 
12
31
  ### Added
@@ -63,5 +82,5 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
63
82
 
64
83
  This is an alpha release. The API and command structure may change before 1.0.
65
84
 
66
- [Unreleased]: https://github.com/nicholasgalante1997/docker-ollama/compare/v0.0.1-rc.1...HEAD
67
- [0.0.1-alpha.1]: https://github.com/nicholasgalante1997/docker-ollama/releases/tag/v0.0.1-alpha.1
85
+ [Unreleased]: https://github.com/nicholasgalante1997/loclaude/compare/v0.0.1-rc.1...HEAD
86
+ [0.0.1-alpha.1]: https://github.com/nicholasgalante1997/loclaude/releases/tag/v0.0.1-alpha.1
package/README.md CHANGED
@@ -1,76 +1,110 @@
1
+ <div align="center">
2
+
1
3
  # loclaude
2
4
 
3
- Run [Claude Code](https://docs.anthropic.com/en/docs/claude-code) with local [Ollama](https://ollama.ai/) LLMs.
5
+ Read the [docs](https://nicholasgalante1997.github.io/loclaude/#/)
4
6
 
5
- loclaude provides a CLI to:
6
- - Launch Claude Code sessions connected to your local Ollama instance
7
- - Manage Ollama + Open WebUI Docker containers
8
- - Pull and manage Ollama models
9
- - Scaffold new projects with opinionated Docker configs
10
- - **Supports both GPU and CPU-only modes**
7
+ **Claude Code with Local LLMs**
11
8
 
12
- ## Installation
9
+ Stop burning through Claude API usage limits. Run Claude Code's powerful agentic workflow with local Ollama models on your own hardware.
13
10
 
14
- ```bash
15
- # With npm (requires Node.js 18+)
16
- npm install -g loclaude
11
+ > **Requires ollama v0.14.2 or higher**
17
12
 
18
- # With bun (recommended)
19
- bun install -g loclaude
20
- ```
13
+ **Zero API costs. No rate limits. Complete privacy.**
21
14
 
22
- ## Prerequisites
15
+ [![npm version](https://img.shields.io/npm/v/loclaude.svg)](https://www.npmjs.com/package/loclaude)
16
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
23
17
 
24
- - [Docker](https://docs.docker.com/get-docker/) with Docker Compose v2
25
- - [Claude Code CLI](https://docs.anthropic.com/en/docs/claude-code) installed (`npm install -g @anthropic-ai/claude-code`)
18
+ [Quick Start](#quick-start-5-minutes) [Why loclaude?](#why-loclaude) [Installation](#installation) [FAQ](#faq)
26
19
 
27
- ### For GPU Mode (Recommended)
20
+ </div>
28
21
 
29
- - [NVIDIA GPU](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) with drivers
30
- - [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
22
+ ---
23
+
24
+ ## Why loclaude?
25
+
26
+ ### Real Value
27
+
28
+ - **No Rate Limits**: Use Claude Code as much as you want
29
+ - **Privacy**: Your code never leaves your machine
30
+ - **Cost Control**: Use your own hardware, pay for electricity not tokens
31
+ - **Offline Capable**: Work without internet (after model download)
32
+ - **GPU or CPU**: Works with NVIDIA GPUs or CPU-only systems
31
33
 
32
- ### CPU-Only Mode
34
+ ### What to Expect
33
35
 
34
- No GPU required! Use `--no-gpu` flag during init for systems without NVIDIA GPUs.
36
+ loclaude provides:
35
37
 
36
- Check your setup with:
38
+ - One-command setup for Ollama + Open WebUI containers
39
+ - Smart model management with auto-loading
40
+ - GPU auto-detection with CPU fallback
41
+ - Project scaffolding with Docker configs
42
+
43
+ ## Installation
37
44
 
38
45
  ```bash
39
- loclaude doctor
46
+ # With npm (requires Node.js 18+)
47
+ npm install -g loclaude
48
+
49
+ # With bun (faster, recommended)
50
+ bun install -g loclaude # use bun-loclaude for commands
40
51
  ```
41
52
 
42
- ## Quick Start
53
+ ### vs. Other Solutions
54
+
55
+ | Solution | Cost | Speed | Privacy | Limits |
56
+ |----------|------|-------|---------|--------|
57
+ | **loclaude** | Free after setup | Fast (GPU) | 100% local | None |
58
+ | Claude API/Web | $20-200+/month | Fast | Cloud-based | Rate limited |
59
+ | GitHub Copilot | $10-20/month | Fast | Cloud-based | Context limited |
60
+ | Cursor/Codeium | $20+/month | Fast | Cloud-based | Usage limits |
61
+
62
+ loclaude gives you the utility of Ollama with the convenience of a managed solution for claude code integration.
43
63
 
44
- ### With GPU (Auto-detected)
64
+ ## Quick Start (5 Minutes)
45
65
 
46
66
  ```bash
47
- # Initialize a new project (auto-detects GPU)
67
+ # 1. Install loclaude
68
+ npm install -g loclaude
69
+
70
+ # 2. Install Claude Code (if you haven't already)
71
+ npm install -g @anthropic-ai/claude-code
72
+
73
+ # 3. Setup your project (auto-detects GPU)
48
74
  loclaude init
49
75
 
50
- # Start Ollama + Open WebUI containers
76
+ # 4. Start Ollama container
51
77
  loclaude docker-up
52
78
 
53
- # Pull a model
54
- loclaude models-pull qwen3-coder:30b
79
+ # 5. Pull a model (choose based on your hardware)
80
+ loclaude models-pull qwen3-coder:30b # GPU with 16GB+ VRAM
81
+ # OR
82
+ loclaude models-pull qwen2.5-coder:7b # CPU or limited VRAM
55
83
 
56
- # Run Claude Code with local LLM
84
+ # 6. Run Claude Code with unlimited local LLM
57
85
  loclaude run
58
86
  ```
59
87
 
60
- ### CPU-Only Mode
88
+ That's it! You now have unlimited Claude Code sessions with local models.
61
89
 
62
- ```bash
63
- # Initialize without GPU support
64
- loclaude init --no-gpu
90
+ ## Prerequisites
65
91
 
66
- # Start containers
67
- loclaude docker-up
92
+ **Required:**
68
93
 
69
- # Pull a CPU-optimized model
70
- loclaude models-pull qwen2.5-coder:7b
94
+ - [Docker](https://docs.docker.com/get-docker/) with Docker Compose v2
95
+ - [Claude Code CLI](https://docs.anthropic.com/en/docs/claude-code) (`npm install -g @anthropic-ai/claude-code`)
71
96
 
72
- # Run Claude Code
73
- loclaude run
97
+ **Optional (for GPU acceleration):**
98
+
99
+ - NVIDIA GPU with 16GB+ VRAM (RTX 3090, 4090, A5000, etc.)
100
+ - [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
101
+
102
+ **CPU-only systems work fine!** Use `--no-gpu` flag during init and smaller models.
103
+
104
+ **Check your setup:**
105
+
106
+ ```bash
107
+ loclaude doctor
74
108
  ```
75
109
 
76
110
  ## Features
@@ -78,20 +112,15 @@ loclaude run
78
112
  ### Automatic Model Loading
79
113
 
80
114
  When you run `loclaude run`, it automatically:
115
+
81
116
  1. Checks if your selected model is loaded in Ollama
82
- 2. If not loaded, warms up the model with a 10-minute keep-alive
117
+ 2. If not loaded, warms up the model with a 10-minute keep-alive (Configurable through env vars)
83
118
  3. Shows `[loaded]` indicator in model selection for running models
84
119
 
85
- ### Colorful CLI Output
86
-
87
- All commands feature colorful, themed output for better readability:
88
- - Status indicators with colors (green/yellow/red)
89
- - Model sizes color-coded by magnitude
90
- - Clear headers and structured output
91
-
92
120
  ### GPU Auto-Detection
93
121
 
94
122
  `loclaude init` automatically detects NVIDIA GPUs and configures the appropriate Docker setup:
123
+
95
124
  - **GPU detected**: Uses `runtime: nvidia` and CUDA-enabled images
96
125
  - **No GPU**: Uses CPU-only configuration with smaller default models
97
126
 
@@ -147,21 +176,22 @@ loclaude config-paths # Show config file search paths
147
176
 
148
177
  ## Recommended Models
149
178
 
150
- ### For GPU (16GB+ VRAM)
179
+ ### For GPU (16GB+ VRAM) - Best Experience
180
+
181
+ | Model | Size | Speed | Quality | Best For |
182
+ |-------|------|-------|---------|----------|
183
+ | `qwen3-coder:30b` | ~17 GB | ~50-100 tok/s | Excellent | **Most coding tasks, refactoring, debugging** |
184
+ | `deepseek-coder:33b` | ~18 GB | ~40-80 tok/s | Excellent | Code understanding, complex logic |
151
185
 
152
- | Model | Size | Use Case |
153
- |-------|------|----------|
154
- | `qwen3-coder:30b` | ~17 GB | Best coding performance |
155
- | `deepseek-coder:33b` | ~18 GB | Code understanding |
156
- | `gpt-oss:20b` | ~13 GB | General purpose |
186
+ **Recommendation:** Start with `qwen3-coder:30b` for the best balance of speed and quality.
157
187
 
158
- ### For CPU or Limited VRAM
188
+ ### For CPU or Limited VRAM (<16GB) - Still Productive
159
189
 
160
- | Model | Size | Use Case |
161
- |-------|------|----------|
162
- | `qwen2.5-coder:7b` | ~4 GB | Coding on CPU |
163
- | `llama3.2:3b` | ~2 GB | Fast, simple tasks |
164
- | `gemma2:9b` | ~5 GB | General purpose |
190
+ | Model | Size | Speed | Quality | Best For |
191
+ |-------|------|-------|---------|----------|
192
+ | `qwen2.5-coder:7b` | ~4 GB | ~10-20 tok/s | Good | **Code completion, simple refactoring** |
193
+ | `deepseek-coder:6.7b` | ~4 GB | ~10-20 tok/s | Good | Understanding existing code |
194
+ | `llama3.2:3b` | ~2 GB | ~15-30 tok/s | Fair | Quick edits, file operations |
165
195
 
166
196
  ## Configuration
167
197
 
@@ -217,8 +247,8 @@ When containers are running:
217
247
 
218
248
  | Service | URL | Description |
219
249
  |---------|-----|-------------|
220
- | Ollama API | http://localhost:11434 | LLM inference API |
221
- | Open WebUI | http://localhost:3000 | Chat interface |
250
+ | Ollama API | <http://localhost:11434> | LLM inference API |
251
+ | Open WebUI | <http://localhost:3000> | Chat interface |
222
252
 
223
253
  ## Project Structure
224
254
 
@@ -248,6 +278,30 @@ mise run pull <model> # loclaude models-pull <model>
248
278
  mise run doctor # loclaude doctor
249
279
  ```
250
280
 
281
+ ## FAQ
282
+
283
+ ### Is this really unlimited?
284
+
285
+ Yes! Once you have models downloaded, you can run as many sessions as you want with zero additional cost.
286
+
287
+ ### How does the quality compare to Claude API?
288
+
289
+ 30B parameter models (qwen3-coder:30b) are comparable to GPT-3.5 and work okay for most coding tasks. Larger models have a bit more success. Claude API is still better, but this allows for continuing work when you have hit that pesky usage limit.
290
+
291
+ ### Do I need a GPU?
292
+
293
+ No, but highly recommended. CPU-only mode works with smaller models at ~10-20 tokens/sec. A GPU (16GB+ VRAM) gives you 50-100 tokens/sec with larger, better models.
294
+
295
+ ### What's the catch?
296
+
297
+ - Initial setup takes 5-10 minutes
298
+ - Model downloads are large (4-20GB)
299
+ - GPU hardware investment if you don't have one (~$500-1500 used)
300
+
301
+ ### Can I use this with the Claude API too?
302
+
303
+ Absolutely! Keep using Claude API for critical tasks, use loclaude for everything else to save money and avoid limits.
304
+
251
305
  ## Troubleshooting
252
306
 
253
307
  ### Check System Requirements
@@ -257,6 +311,7 @@ loclaude doctor
257
311
  ```
258
312
 
259
313
  This verifies:
314
+
260
315
  - Docker and Docker Compose installation
261
316
  - NVIDIA GPU detection (optional)
262
317
  - NVIDIA Container Toolkit (optional)
@@ -301,12 +356,45 @@ If inference is slow on CPU:
301
356
  2. Expect ~10-20 tokens/sec on modern CPUs
302
357
  3. Consider cloud models via Ollama: `glm-4.7:cloud`
303
358
 
359
+ ## Contributing
360
+
361
+ loclaude is open source and welcomes contributions! Here's how you can help:
362
+
363
+ ### Share Your Experience
364
+
365
+ - Star the repo if loclaude saves you money or rate limits
366
+ - Share your setup and model recommendations
367
+ - Write about your experience on dev.to, Twitter, or your blog
368
+ - Report bugs and request features via GitHub Issues
369
+
370
+ ### Code Contributions
371
+
372
+ - Fix bugs or add features (see open issues)
373
+ - Improve documentation or examples
374
+ - Add support for new model providers
375
+ - Optimize model loading and performance
376
+
377
+ ### Spread the Word
378
+
379
+ - Post on r/LocalLLaMA, r/selfhosted, r/ClaudeAI
380
+ - Share in Discord/Slack dev communities
381
+ - Help others troubleshoot in GitHub Discussions
382
+
383
+ Every star, issue report, and shared experience helps more developers discover unlimited local Claude Code.
384
+
385
+ ## Getting Help
386
+
387
+ - **Issues/Bugs**: [GitHub Issues](https://github.com/nicholasgalante1997/loclaude/issues)
388
+ - **Questions**: [GitHub Discussions](https://github.com/nicholasgalante1997/loclaude/discussions)
389
+ - **Documentation**: Run `loclaude --help` or check this README
390
+ - **System Check**: Run `loclaude doctor` to diagnose problems
391
+
304
392
  ## Development
305
393
 
306
394
  ### Building from Source
307
395
 
308
396
  ```bash
309
- git clone https://github.com/nicholasgalante1997/docker-ollama.git loclaude
397
+ git clone https://github.com/nicholasgalante1997/loclaude.git loclaude
310
398
  cd loclaude
311
399
  bun install
312
400
  bun run build
package/package.json CHANGED
@@ -1,17 +1,27 @@
1
1
  {
2
2
  "name": "loclaude",
3
- "version": "0.0.1-alpha.3",
4
- "description": "Run Claude Code with local Ollama LLMs",
3
+ "version": "0.0.3",
4
+ "description": "Claude Code with local Ollama LLMs - Zero API costs, no rate limits, complete privacy",
5
5
  "type": "module",
6
6
  "license": "./LICENSE",
7
7
  "keywords": [
8
8
  "claude",
9
+ "claude-code",
9
10
  "ollama",
10
11
  "llm",
12
+ "local-llm",
13
+ "ai-coding",
14
+ "self-hosted",
11
15
  "cli",
12
16
  "ai",
13
17
  "open-webui",
14
- "claude-code"
18
+ "docker",
19
+ "nvidia",
20
+ "gpu",
21
+ "offline-ai",
22
+ "privacy",
23
+ "cost-free",
24
+ "unlimited"
15
25
  ],
16
26
  "contributors": [
17
27
  {
@@ -22,24 +32,22 @@
22
32
  ],
23
33
  "repository": {
24
34
  "type": "git",
25
- "url": "git+https://github.com/nicholasgalante1997/docker-ollama.git"
35
+ "url": "git+https://github.com/nicholasgalante1997/loclaude.git"
26
36
  },
27
- "homepage": "https://github.com/nicholasgalante1997/docker-ollama#readme",
37
+ "homepage": "https://github.com/nicholasgalante1997/loclaude#readme",
28
38
  "bugs": {
29
- "url": "https://github.com/nicholasgalante1997/docker-ollama/issues"
39
+ "url": "https://github.com/nicholasgalante1997/loclaude/issues"
30
40
  },
31
41
  "publishConfig": {
32
42
  "access": "public",
33
43
  "registry": "https://registry.npmjs.org/",
34
- "tag": "alpha"
44
+ "tag": "latest"
35
45
  },
36
46
  "files": [
37
47
  ".claude/CLAUDE.md",
38
48
  "bin",
39
49
  "dist",
40
50
  "docker/docker-compose.yml",
41
- "libs/cli/package.json",
42
- "libs/cli/dist",
43
51
  "README.md",
44
52
  "CHANGELOG.md",
45
53
  "LICENSE"
@@ -63,13 +71,15 @@
63
71
  "test": "bun test libs/cli",
64
72
  "prerelease-check": "./scripts/prerelease-check.sh",
65
73
  "prepublishOnly": "bun run build",
66
- "release": "bun run build && npm publish --access public",
67
- "release:rc": "bun run build && npm publish --tag rc --access public",
68
- "release:alpha": "bun run build && npm publish --tag alpha --access public",
69
- "release:beta": "bun run build && npm publish --tag beta --access public"
74
+ "prerelease": "run-s test build prerelease-check",
75
+ "release": "npm publish --access public",
76
+ "release:rc": "npm publish --tag rc --access public",
77
+ "release:alpha": "npm publish --tag alpha --access public",
78
+ "release:beta": "npm publish --tag beta --access public",
79
+ "postrelease": "./scripts/tag.sh $(jq -r .version package.json)"
70
80
  },
71
81
  "dependencies": {
72
- "@loclaude-internal/cli": "^0.0.1-alpha.2"
82
+ "@loclaude-internal/cli": "^0.0.3"
73
83
  },
74
84
  "peerDependencies": {
75
85
  "typescript": "^5"
@@ -87,6 +97,7 @@
87
97
  "eslint": "^9.39.2",
88
98
  "globals": "^17.0.0",
89
99
  "jiti": "^2.6.1",
100
+ "npm-run-all": "^4.1.5",
90
101
  "prettier": "^3.8.0",
91
102
  "turbo": "^2.7.5",
92
103
  "typescript-eslint": "^8.53.1"
@@ -1,59 +0,0 @@
1
- # Changelog
2
-
3
- All notable changes to this project will be documented in this file.
4
-
5
- The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
6
- and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
-
8
- ## [Unreleased]
9
-
10
- ## [0.0.1-alpha.2] - 2025-01-20
11
-
12
- ### Added
13
-
14
- - Adds support for CPU Only Ollama Hosts
15
-
16
- ### Changed
17
-
18
- - Modifies documentation on output files from `init` command
19
-
20
- ## [0.0.1-alpha.1] - 2025-01-19
21
-
22
- ### Added
23
-
24
- - **CLI Commands**
25
- - `loclaude run` - Run Claude Code with local Ollama (interactive model selection)
26
- - `loclaude init` - Scaffold docker-compose.yml, config, and mise.toml
27
- - `loclaude doctor` - Check system prerequisites (Docker, GPU, Claude CLI)
28
- - `loclaude config` / `loclaude config-paths` - View configuration
29
- - `loclaude docker-up/down/status/logs/restart` - Docker container management
30
- - `loclaude models` - List installed Ollama models
31
- - `loclaude models-pull/rm/show/run` - Model management commands
32
-
33
- - **Configuration System**
34
- - Project-local config: `./.loclaude/config.json`
35
- - User global config: `~/.config/loclaude/config.json`
36
- - Environment variable support (`OLLAMA_URL`, `OLLAMA_MODEL`, etc.)
37
- - Layered config merging with clear priority
38
-
39
- - **Cross-Runtime Support**
40
- - Works with both Bun and Node.js runtimes
41
- - Dual entry points: `bin/index.ts` (Bun) and `bin/index.mjs` (Node)
42
-
43
- - **Docker Integration**
44
- - Bundled docker-compose.yml template with Ollama + Open WebUI
45
- - NVIDIA GPU support out of the box
46
- - Health checks for both services
47
-
48
- - **Project Scaffolding**
49
- - `loclaude init` creates complete project structure
50
- - Generates mise.toml with task aliases
51
- - Creates .claude/CLAUDE.md for Claude Code instructions
52
- - Sets up .gitignore for model directory
53
-
54
- ### Notes
55
-
56
- This is an alpha release. The API and command structure may change before 1.0.
57
-
58
- [Unreleased]: https://github.com/nicholasgalante1997/loclaude/compare/v0.0.1-rc.1...HEAD
59
- [0.0.1-alpha.1]: https://github.com/nicholasgalante1997/loclaude/releases/tag/v0.0.1-alpha.1
package/libs/cli/LICENSE DELETED
@@ -1,31 +0,0 @@
1
- MIT License
2
-
3
- Copyright (c) 2026 Mega Blastoise
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
22
-
23
- ---
24
-
25
- Addendum, January 19, 2026
26
-
27
- This package leverages the [Bun](bun.sh) shell to run shell commands on behalf of the end user.
28
-
29
- In an ideal world, I don't have to write this addendum. In this world, I do.
30
-
31
- If you pass unsanitized input into this command line tool, all bets are off. I'll make no guarantees on your behalf about behavior or safety. I'll personally never talk to you in real life. May god have mercy on your soul.
@@ -1,5 +0,0 @@
1
- # @loclaude-internal/cli
2
-
3
- Internal modules for usage with [`loclaude`](https://www.npmjs.com/package/loclaude). Do not use directly.
4
-
5
- This package is not intended for public use and may change without notice.
@@ -1,6 +0,0 @@
1
- declare const cli: import("cac").CAC;
2
- export declare const help: () => void;
3
- export declare const version: () => void;
4
- export declare const run_cli: () => void;
5
- export { cli };
6
- //# sourceMappingURL=cac.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"cac.d.ts","sourceRoot":"","sources":["../lib/cac.ts"],"names":[],"mappings":"AAqBA,QAAA,MAAM,GAAG,mBAAkB,CAAC;AAsI5B,eAAO,MAAM,IAAI,YAAyB,CAAC;AAC3C,eAAO,MAAM,OAAO,YAA4B,CAAC;AAEjD,eAAO,MAAM,OAAO,QAAO,IAE1B,CAAC;AAEF,OAAO,EAAE,GAAG,EAAE,CAAC"}
@@ -1,6 +0,0 @@
1
- /**
2
- * config command - Show and manage configuration
3
- */
4
- export declare function configShow(): Promise<void>;
5
- export declare function configPaths(): Promise<void>;
6
- //# sourceMappingURL=config.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"config.d.ts","sourceRoot":"","sources":["../../lib/commands/config.ts"],"names":[],"mappings":"AAAA;;GAEG;AAKH,wBAAsB,UAAU,IAAI,OAAO,CAAC,IAAI,CAAC,CAmChD;AAED,wBAAsB,WAAW,IAAI,OAAO,CAAC,IAAI,CAAC,CA6BjD"}
@@ -1,17 +0,0 @@
1
- /**
2
- * docker command - Manage Docker containers
3
- */
4
- export interface DockerOptions {
5
- file?: string;
6
- detach?: boolean;
7
- }
8
- export declare function dockerUp(options?: DockerOptions): Promise<void>;
9
- export declare function dockerDown(options?: DockerOptions): Promise<void>;
10
- export declare function dockerStatus(options?: DockerOptions): Promise<void>;
11
- export declare function dockerLogs(options?: DockerOptions & {
12
- follow?: boolean;
13
- service?: string;
14
- }): Promise<void>;
15
- export declare function dockerRestart(options?: DockerOptions): Promise<void>;
16
- export declare function dockerExec(service: string, command: string[], options?: DockerOptions): Promise<number>;
17
- //# sourceMappingURL=docker.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"docker.d.ts","sourceRoot":"","sources":["../../lib/commands/docker.ts"],"names":[],"mappings":"AAAA;;GAEG;AAiEH,MAAM,WAAW,aAAa;IAC5B,IAAI,CAAC,EAAE,MAAM,CAAC;IACd,MAAM,CAAC,EAAE,OAAO,CAAC;CAClB;AAeD,wBAAsB,QAAQ,CAAC,OAAO,GAAE,aAAkB,GAAG,OAAO,CAAC,IAAI,CAAC,CAoBzE;AAED,wBAAsB,UAAU,CAAC,OAAO,GAAE,aAAkB,GAAG,OAAO,CAAC,IAAI,CAAC,CAW3E;AAED,wBAAsB,YAAY,CAAC,OAAO,GAAE,aAAkB,GAAG,OAAO,CAAC,IAAI,CAAC,CAK7E;AAED,wBAAsB,UAAU,CAC9B,OAAO,GAAE,aAAa,GAAG;IAAE,MAAM,CAAC,EAAE,OAAO,CAAC;IAAC,OAAO,CAAC,EAAE,MAAM,CAAA;CAAO,GACnE,OAAO,CAAC,IAAI,CAAC,CAiBf;AAED,wBAAsB,aAAa,CAAC,OAAO,GAAE,aAAkB,GAAG,OAAO,CAAC,IAAI,CAAC,CAW9E;AAED,wBAAsB,UAAU,CAC9B,OAAO,EAAE,MAAM,EACf,OAAO,EAAE,MAAM,EAAE,EACjB,OAAO,GAAE,aAAkB,GAC1B,OAAO,CAAC,MAAM,CAAC,CAWjB"}
@@ -1,9 +0,0 @@
1
- /**
2
- * doctor command - Check prerequisites and system health
3
- */
4
- export declare function doctor(): Promise<void>;
5
- /**
6
- * Check if NVIDIA GPU is available (exported for use by init command)
7
- */
8
- export declare function hasNvidiaGpu(): Promise<boolean>;
9
- //# sourceMappingURL=doctor.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"doctor.d.ts","sourceRoot":"","sources":["../../lib/commands/doctor.ts"],"names":[],"mappings":"AAAA;;GAEG;AAqLH,wBAAsB,MAAM,IAAI,OAAO,CAAC,IAAI,CAAC,CA8B5C;AAED;;GAEG;AACH,wBAAsB,YAAY,IAAI,OAAO,CAAC,OAAO,CAAC,CAMrD"}
@@ -1,6 +0,0 @@
1
- export { init } from './init';
2
- export { doctor } from './doctor';
3
- export { configShow, configPaths } from './config';
4
- export { dockerUp, dockerDown, dockerStatus, dockerLogs, dockerRestart, dockerExec } from './docker';
5
- export { modelsList, modelsPull, modelsRm, modelsShow, modelsRun } from './models';
6
- //# sourceMappingURL=index.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../lib/commands/index.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,IAAI,EAAE,MAAM,QAAQ,CAAC;AAC9B,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAClC,OAAO,EAAE,UAAU,EAAE,WAAW,EAAE,MAAM,UAAU,CAAC;AACnD,OAAO,EAAE,QAAQ,EAAE,UAAU,EAAE,YAAY,EAAE,UAAU,EAAE,aAAa,EAAE,UAAU,EAAE,MAAM,UAAU,CAAC;AACrG,OAAO,EAAE,UAAU,EAAE,UAAU,EAAE,QAAQ,EAAE,UAAU,EAAE,SAAS,EAAE,MAAM,UAAU,CAAC"}
@@ -1,11 +0,0 @@
1
- /**
2
- * init command - Scaffold docker-compose.yml and config
3
- */
4
- export interface InitOptions {
5
- force?: boolean;
6
- noWebui?: boolean;
7
- gpu?: boolean;
8
- noGpu?: boolean;
9
- }
10
- export declare function init(options?: InitOptions): Promise<void>;
11
- //# sourceMappingURL=init.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../lib/commands/init.ts"],"names":[],"mappings":"AAAA;;GAEG;AA4nBH,MAAM,WAAW,WAAW;IAC1B,KAAK,CAAC,EAAE,OAAO,CAAC;IAChB,OAAO,CAAC,EAAE,OAAO,CAAC;IAClB,GAAG,CAAC,EAAE,OAAO,CAAC;IACd,KAAK,CAAC,EAAE,OAAO,CAAC;CACjB;AAED,wBAAsB,IAAI,CAAC,OAAO,GAAE,WAAgB,GAAG,OAAO,CAAC,IAAI,CAAC,CAsInE"}
@@ -1,9 +0,0 @@
1
- /**
2
- * models command - Manage Ollama models
3
- */
4
- export declare function modelsList(): Promise<void>;
5
- export declare function modelsPull(modelName: string): Promise<void>;
6
- export declare function modelsRm(modelName: string): Promise<void>;
7
- export declare function modelsShow(modelName: string): Promise<void>;
8
- export declare function modelsRun(modelName: string): Promise<void>;
9
- //# sourceMappingURL=models.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"models.d.ts","sourceRoot":"","sources":["../../lib/commands/models.ts"],"names":[],"mappings":"AAAA;;GAEG;AAiFH,wBAAsB,UAAU,IAAI,OAAO,CAAC,IAAI,CAAC,CAyChD;AAED,wBAAsB,UAAU,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAkBjE;AAED,wBAAsB,QAAQ,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAiB/D;AAED,wBAAsB,UAAU,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAWjE;AAED,wBAAsB,SAAS,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC,CAWhE"}