aibox-cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +164 -0
- package/bin/aibox +1156 -0
- package/package.json +39 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 repalash
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
# aibox
|
|
2
|
+
|
|
3
|
+
Run AI coding agents in isolated Docker containers. Mount your project, skip permission prompts safely, run multiple instances in parallel.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
# npm
|
|
9
|
+
npm install -g aibox-cli
|
|
10
|
+
|
|
11
|
+
# brew
|
|
12
|
+
brew install blitzdotdev/tap/aibox
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
### Prerequisites
|
|
16
|
+
|
|
17
|
+
On macOS, if Docker isn't installed, aibox will offer to install [Colima](https://github.com/abiosoft/colima) + Docker via Homebrew automatically. It also works with [Docker Desktop](https://www.docker.com/products/docker-desktop/) or [OrbStack](https://orbstack.dev) if you already have them.
|
|
18
|
+
|
|
19
|
+
## Usage
|
|
20
|
+
|
|
21
|
+
```bash
|
|
22
|
+
# first time (once)
|
|
23
|
+
aibox build
|
|
24
|
+
|
|
25
|
+
# in any project directory
|
|
26
|
+
aibox up # start container
|
|
27
|
+
aibox claude --yolo # no prompts, full sudo, no firewall
|
|
28
|
+
aibox claude --safe # keep prompts, restricted sudo, firewall on
|
|
29
|
+
aibox claude # asks you each time
|
|
30
|
+
aibox shell # zsh inside the container
|
|
31
|
+
aibox shell ls -la # run a command inline
|
|
32
|
+
aibox down # stop and remove
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
### Named Instances
|
|
36
|
+
|
|
37
|
+
Run multiple containers for the same project:
|
|
38
|
+
|
|
39
|
+
```bash
|
|
40
|
+
aibox --name refactor claude --yolo
|
|
41
|
+
aibox --name tests claude --safe
|
|
42
|
+
aibox --name refactor down
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### Isolated Instances
|
|
46
|
+
|
|
47
|
+
By default, named instances share the project directory. For true isolation:
|
|
48
|
+
|
|
49
|
+
```bash
|
|
50
|
+
# Full isolation — copy repo into a Docker volume
|
|
51
|
+
aibox --name refactor --copy claude --yolo
|
|
52
|
+
|
|
53
|
+
# Lightweight — git worktree on host
|
|
54
|
+
aibox --name feat --worktree claude --yolo
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
`--copy` uses `git bundle` to clone tracked files into a volume (excludes .gitignored files, preserves history). Changes stay inside the container until pushed. Best for automation and parallel agents.
|
|
58
|
+
|
|
59
|
+
`--worktree` creates a `git worktree` at `~/.config/aibox/worktrees/`. Near-instant, shares remotes with the main repo. Best for feature branches and quick experiments.
|
|
60
|
+
|
|
61
|
+
Both create a new branch `aibox/<instance-name>` automatically.
|
|
62
|
+
|
|
63
|
+
### Management
|
|
64
|
+
|
|
65
|
+
```bash
|
|
66
|
+
aibox status # list all aibox containers
|
|
67
|
+
aibox volumes # list copy volumes and worktrees
|
|
68
|
+
aibox down # stop current container
|
|
69
|
+
aibox down --clean # also remove copy volumes / worktrees
|
|
70
|
+
aibox down --all # stop all containers for this project
|
|
71
|
+
aibox nuke # remove ALL aibox containers
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
### Custom Image
|
|
75
|
+
|
|
76
|
+
```bash
|
|
77
|
+
aibox --image myteam/devbox:v2 up
|
|
78
|
+
aibox build --image custom:latest
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
## IDE Integration
|
|
82
|
+
|
|
83
|
+
aibox generates a `compose.dev.yaml` and configures your IDE on `aibox init` (or automatically on first `aibox up`).
|
|
84
|
+
|
|
85
|
+
### JetBrains (WebStorm, IntelliJ, etc.)
|
|
86
|
+
|
|
87
|
+
1. Install the [Claude Code plugin](https://plugins.jetbrains.com/plugin/claude-code)
|
|
88
|
+
2. Run `aibox init` in your project
|
|
89
|
+
3. Set the plugin's startup command to:
|
|
90
|
+
```
|
|
91
|
+
/usr/local/bin/aibox claude --yolo
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
The Node.js interpreter is also configured to use the container, so running/debugging from the IDE uses the same sandboxed environment.
|
|
95
|
+
|
|
96
|
+
### VS Code
|
|
97
|
+
|
|
98
|
+
1. Install the [Claude Code extension](https://marketplace.visualstudio.com/items?itemName=anthropic.claude-code)
|
|
99
|
+
2. Set the Claude Code startup command to `aibox claude --yolo`
|
|
100
|
+
3. Or use Dev Containers with the generated `compose.dev.yaml`
|
|
101
|
+
|
|
102
|
+
### Cursor / Windsurf / Other Editors
|
|
103
|
+
|
|
104
|
+
Set your agent's startup command to `aibox claude --yolo`. Works anywhere you can configure a shell command.
|
|
105
|
+
|
|
106
|
+
## Other Agents
|
|
107
|
+
|
|
108
|
+
The container ships with Node.js 20, git, ripgrep, zsh, python3, and build tools. Claude Code is pre-installed, but you can run anything:
|
|
109
|
+
|
|
110
|
+
```bash
|
|
111
|
+
aibox shell
|
|
112
|
+
# inside container:
|
|
113
|
+
aider
|
|
114
|
+
codex
|
|
115
|
+
# etc.
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
Customize the Dockerfile at `~/.config/aibox/Dockerfile`.
|
|
119
|
+
|
|
120
|
+
## How It Works
|
|
121
|
+
|
|
122
|
+
- **Build**: Creates a Docker image with Node.js, Claude Code, and dev tools
|
|
123
|
+
- **Up**: Starts a container with your project bind-mounted
|
|
124
|
+
- **Claude**: Opens Claude Code inside the container, optionally skipping permission prompts
|
|
125
|
+
- **Modes**: `--yolo` gives full access; `--safe` enables firewall + restricted sudo
|
|
126
|
+
- **Auth**: A shared Docker volume persists Claude authentication across containers
|
|
127
|
+
- **Isolation**: Each project gets its own container and isolated `node_modules`
|
|
128
|
+
- **Safety**: Refuses to run in `$HOME`, `/tmp`, or other dangerous directories
|
|
129
|
+
|
|
130
|
+
## Network Firewall
|
|
131
|
+
|
|
132
|
+
In safe mode, outbound traffic is restricted to Claude API, npm, GitHub, PyPI, DNS, and SSH. Add extra domains:
|
|
133
|
+
|
|
134
|
+
```bash
|
|
135
|
+
export AIBOX_EXTRA_DOMAINS="example.com,api.myservice.io"
|
|
136
|
+
aibox claude --safe
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
## Config
|
|
140
|
+
|
|
141
|
+
Per-project settings in `.aibox`:
|
|
142
|
+
|
|
143
|
+
```
|
|
144
|
+
IMAGE=aibox:latest
|
|
145
|
+
SHARED_MODULES=false
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
## All Flags
|
|
149
|
+
|
|
150
|
+
| Flag | Description |
|
|
151
|
+
|------|-------------|
|
|
152
|
+
| `--name NAME` | Named instance (multiple containers per project) |
|
|
153
|
+
| `--image NAME` | Override base Docker image |
|
|
154
|
+
| `--shared-modules` | Share node_modules between host and container |
|
|
155
|
+
| `--copy` | Copy repo into Docker volume (full isolation) |
|
|
156
|
+
| `--worktree` | Use git worktree (lightweight isolation) |
|
|
157
|
+
| `--yolo` | Skip prompts, full sudo, no firewall |
|
|
158
|
+
| `--safe` | Keep prompts, restricted sudo, firewall on |
|
|
159
|
+
| `--all` | With `down`: stop all project containers |
|
|
160
|
+
| `--clean` | With `down`: also remove copy volumes / worktrees |
|
|
161
|
+
|
|
162
|
+
## License
|
|
163
|
+
|
|
164
|
+
MIT
|
package/bin/aibox
ADDED
|
@@ -0,0 +1,1156 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# aibox - Run AI coding tools in isolated Docker containers.
|
|
3
|
+
#
|
|
4
|
+
# Usage:
|
|
5
|
+
# aibox up Start the container
|
|
6
|
+
# aibox claude [--yolo|--safe] Open Claude Code
|
|
7
|
+
# --yolo: skip permissions, full sudo, no firewall
|
|
8
|
+
# --safe: keep permissions, restricted sudo, firewall on
|
|
9
|
+
# default: asks you each time
|
|
10
|
+
# aibox shell Open a zsh shell in the container
|
|
11
|
+
# aibox shell <command> Run a command in the container
|
|
12
|
+
# aibox down Stop and remove the container
|
|
13
|
+
# aibox down --clean Also remove copy volumes / worktrees
|
|
14
|
+
# aibox down --all Stop all containers for this project
|
|
15
|
+
# aibox status List all aibox containers
|
|
16
|
+
# aibox volumes List copy volumes and worktrees
|
|
17
|
+
# aibox nuke Remove ALL aibox containers (all projects)
|
|
18
|
+
# aibox build [--image NAME] Build the base image
|
|
19
|
+
# aibox init [flags] Generate compose.dev.yaml + configure WebStorm
|
|
20
|
+
# aibox help Show this help (default)
|
|
21
|
+
#
|
|
22
|
+
# Flags:
|
|
23
|
+
# --name NAME Run a named instance (e.g. --name refactor)
|
|
24
|
+
# Allows multiple containers per project
|
|
25
|
+
# --image NAME Override base Docker image (default: aibox:latest)
|
|
26
|
+
# --shared-modules Share node_modules between host and container
|
|
27
|
+
# --copy Copy project into container (full isolation, no host sync)
|
|
28
|
+
# --worktree Use git worktree (lightweight isolation, stays on host)
|
|
29
|
+
# --yolo Yolo mode: skip permissions, full sudo, no firewall
|
|
30
|
+
# --safe Safe mode: keep permissions, restricted sudo, firewall on
|
|
31
|
+
#
|
|
32
|
+
# Network firewall:
|
|
33
|
+
# Default: only allows Claude API, npm, GitHub, PyPI, SSH, DNS.
|
|
34
|
+
# Add domains: export AIBOX_EXTRA_DOMAINS="example.com,api.myservice.io"
|
|
35
|
+
#
|
|
36
|
+
# Prerequisites:
|
|
37
|
+
# brew install colima docker docker-compose docker-buildx
|
|
38
|
+
# (or: brew install orbstack)
|
|
39
|
+
#
|
|
40
|
+
# First-time setup (once ever):
|
|
41
|
+
# aibox build
|
|
42
|
+
#
|
|
43
|
+
# Per-project:
|
|
44
|
+
# aibox up # start default container
|
|
45
|
+
# aibox claude # open Claude Code
|
|
46
|
+
# aibox claude --yolo # skip permission prompts (sandboxed)
|
|
47
|
+
# aibox --name feat claude # Claude Code in a second container
|
|
48
|
+
# aibox shell # zsh in the container
|
|
49
|
+
# aibox shell ls -la # run a command inline
|
|
50
|
+
# aibox down # stop default
|
|
51
|
+
# aibox down --all # stop all for this project
|
|
52
|
+
|
|
53
|
+
set -euo pipefail
|
|
54
|
+
|
|
55
|
+
# ── Script identity ──────────────────────────────────────────────
|
|
56
|
+
SCRIPT_NAME="$(basename "$0")"
|
|
57
|
+
AIBOX_VERSION="5"
|
|
58
|
+
CONFIG_DIR="${HOME}/.config/aibox"
|
|
59
|
+
DEFAULT_IMAGE="aibox:latest"
|
|
60
|
+
CONTAINER_PREFIX="aibox"
|
|
61
|
+
|
|
62
|
+
# ── Machine-aware Colima start ───────────────────────────────────
|
|
63
|
+
_colima_start() {
|
|
64
|
+
local total_cpu total_mem_gb vm_cpu vm_mem vm_disk
|
|
65
|
+
|
|
66
|
+
# Detect machine resources
|
|
67
|
+
if [[ "$(uname)" == "Darwin" ]]; then
|
|
68
|
+
total_cpu=$(sysctl -n hw.ncpu 2>/dev/null || echo 4)
|
|
69
|
+
total_mem_gb=$(( $(sysctl -n hw.memsize 2>/dev/null || echo 8589934592) / 1073741824 ))
|
|
70
|
+
else
|
|
71
|
+
total_cpu=$(nproc 2>/dev/null || echo 4)
|
|
72
|
+
total_mem_gb=$(( $(grep MemTotal /proc/meminfo 2>/dev/null | awk '{print $2}' || echo 8388608) / 1048576 ))
|
|
73
|
+
fi
|
|
74
|
+
|
|
75
|
+
# Allocate half CPU (min 2, max 4), half RAM (min 4GB, max 8GB), 100GB disk
|
|
76
|
+
vm_cpu=$(( total_cpu / 2 ))
|
|
77
|
+
[[ $vm_cpu -lt 2 ]] && vm_cpu=2
|
|
78
|
+
[[ $vm_cpu -gt 4 ]] && vm_cpu=4
|
|
79
|
+
vm_mem=$(( total_mem_gb / 2 ))
|
|
80
|
+
[[ $vm_mem -lt 4 ]] && vm_mem=4
|
|
81
|
+
[[ $vm_mem -gt 8 ]] && vm_mem=8
|
|
82
|
+
vm_disk=100
|
|
83
|
+
|
|
84
|
+
echo "Detected: ${total_cpu} CPUs, ${total_mem_gb}GB RAM"
|
|
85
|
+
echo "Colima VM: ${vm_cpu} CPUs, ${vm_mem}GB RAM, ${vm_disk}GB disk"
|
|
86
|
+
|
|
87
|
+
# Try Apple Virtualization framework first (fast), fall back to QEMU
|
|
88
|
+
colima start \
|
|
89
|
+
--cpu "$vm_cpu" --memory "$vm_mem" --disk "$vm_disk" \
|
|
90
|
+
--vm-type vz --vz-rosetta 2>/dev/null \
|
|
91
|
+
|| colima start \
|
|
92
|
+
--cpu "$vm_cpu" --memory "$vm_mem" --disk "$vm_disk"
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# ── Dependency check ─────────────────────────────────────────────
|
|
96
|
+
_check_deps() {
|
|
97
|
+
local missing=()
|
|
98
|
+
|
|
99
|
+
command -v docker &>/dev/null || missing+=("docker")
|
|
100
|
+
command -v docker-compose &>/dev/null || docker compose version &>/dev/null 2>&1 || missing+=("docker-compose")
|
|
101
|
+
|
|
102
|
+
if [[ ${#missing[@]} -eq 0 ]]; then
|
|
103
|
+
# Docker CLI exists — check if daemon is reachable
|
|
104
|
+
if ! docker info &>/dev/null 2>&1; then
|
|
105
|
+
if command -v colima &>/dev/null; then
|
|
106
|
+
echo "Docker daemon not running. Starting Colima..."
|
|
107
|
+
_colima_start
|
|
108
|
+
elif [[ -d "/Applications/OrbStack.app" ]]; then
|
|
109
|
+
echo "Docker daemon not running. Starting OrbStack..."
|
|
110
|
+
open -a OrbStack
|
|
111
|
+
local i=0
|
|
112
|
+
while ! docker info &>/dev/null 2>&1 && [[ $i -lt 30 ]]; do
|
|
113
|
+
sleep 1
|
|
114
|
+
i=$((i + 1))
|
|
115
|
+
done
|
|
116
|
+
if ! docker info &>/dev/null 2>&1; then
|
|
117
|
+
echo "OrbStack started but Docker daemon not ready. Try again in a moment." >&2
|
|
118
|
+
exit 1
|
|
119
|
+
fi
|
|
120
|
+
elif [[ -d "/Applications/Docker.app" ]]; then
|
|
121
|
+
echo "Docker daemon not running. Starting Docker Desktop..."
|
|
122
|
+
open -a Docker
|
|
123
|
+
local i=0
|
|
124
|
+
while ! docker info &>/dev/null 2>&1 && [[ $i -lt 60 ]]; do
|
|
125
|
+
sleep 1
|
|
126
|
+
i=$((i + 1))
|
|
127
|
+
done
|
|
128
|
+
if ! docker info &>/dev/null 2>&1; then
|
|
129
|
+
echo "Docker Desktop started but daemon not ready. Try again in a moment." >&2
|
|
130
|
+
exit 1
|
|
131
|
+
fi
|
|
132
|
+
else
|
|
133
|
+
echo "Docker daemon not running. Install Docker Desktop, Colima, or OrbStack." >&2
|
|
134
|
+
exit 1
|
|
135
|
+
fi
|
|
136
|
+
fi
|
|
137
|
+
return
|
|
138
|
+
fi
|
|
139
|
+
|
|
140
|
+
# Something is missing — offer to install
|
|
141
|
+
if ! command -v brew &>/dev/null; then
|
|
142
|
+
echo "Homebrew is required to install Docker dependencies."
|
|
143
|
+
echo "Will run: /bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\""
|
|
144
|
+
echo ""
|
|
145
|
+
if _confirm_yes "Install Homebrew?"; then
|
|
146
|
+
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
|
|
147
|
+
if [[ -f /opt/homebrew/bin/brew ]]; then
|
|
148
|
+
eval "$(/opt/homebrew/bin/brew shellenv)"
|
|
149
|
+
elif [[ -f /usr/local/bin/brew ]]; then
|
|
150
|
+
eval "$(/usr/local/bin/brew shellenv)"
|
|
151
|
+
fi
|
|
152
|
+
if ! command -v brew &>/dev/null; then
|
|
153
|
+
echo "Homebrew installed but not in PATH. Open a new terminal and re-run ${SCRIPT_NAME}." >&2
|
|
154
|
+
exit 1
|
|
155
|
+
fi
|
|
156
|
+
else
|
|
157
|
+
echo "Aborted. Install Homebrew (https://brew.sh) then re-run ${SCRIPT_NAME}." >&2
|
|
158
|
+
exit 1
|
|
159
|
+
fi
|
|
160
|
+
fi
|
|
161
|
+
|
|
162
|
+
echo "Missing dependencies: ${missing[*]}"
|
|
163
|
+
echo "Will install via Homebrew: colima docker docker-compose docker-buildx"
|
|
164
|
+
echo ""
|
|
165
|
+
if _confirm_yes "Install now?"; then
|
|
166
|
+
brew install colima docker docker-compose docker-buildx
|
|
167
|
+
|
|
168
|
+
mkdir -p ~/.docker/cli-plugins
|
|
169
|
+
ln -sfn "$(brew --prefix)/opt/docker-compose/bin/docker-compose" ~/.docker/cli-plugins/docker-compose
|
|
170
|
+
|
|
171
|
+
echo ""
|
|
172
|
+
echo "Starting Colima..."
|
|
173
|
+
_colima_start
|
|
174
|
+
echo "Docker is ready."
|
|
175
|
+
else
|
|
176
|
+
echo "Aborted. Install manually:" >&2
|
|
177
|
+
echo " brew install colima docker docker-compose docker-buildx" >&2
|
|
178
|
+
exit 1
|
|
179
|
+
fi
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
# ── Parse global flags ───────────────────────────────────────────
|
|
183
|
+
IMAGE="$DEFAULT_IMAGE"
|
|
184
|
+
SHARED_MODULES=false
|
|
185
|
+
INSTANCE_NAME=""
|
|
186
|
+
DOWN_ALL=false
|
|
187
|
+
DOWN_CLEAN=false
|
|
188
|
+
SKIP_PERMISSIONS=false
|
|
189
|
+
SAFE_MODE=false
|
|
190
|
+
ISOLATION=""
|
|
191
|
+
POSITIONAL=()
|
|
192
|
+
|
|
193
|
+
parse_flags() {
|
|
194
|
+
while [[ $# -gt 0 ]]; do
|
|
195
|
+
case "$1" in
|
|
196
|
+
--image)
|
|
197
|
+
IMAGE="${2:?'--image requires a value'}"
|
|
198
|
+
shift 2
|
|
199
|
+
;;
|
|
200
|
+
--name)
|
|
201
|
+
INSTANCE_NAME="${2:?'--name requires a value'}"
|
|
202
|
+
shift 2
|
|
203
|
+
;;
|
|
204
|
+
--shared-modules)
|
|
205
|
+
SHARED_MODULES=true
|
|
206
|
+
shift
|
|
207
|
+
;;
|
|
208
|
+
--copy)
|
|
209
|
+
[[ -n "$ISOLATION" ]] && { echo "Error: --copy and --worktree are mutually exclusive" >&2; exit 1; }
|
|
210
|
+
ISOLATION="copy"
|
|
211
|
+
shift
|
|
212
|
+
;;
|
|
213
|
+
--worktree)
|
|
214
|
+
[[ -n "$ISOLATION" ]] && { echo "Error: --copy and --worktree are mutually exclusive" >&2; exit 1; }
|
|
215
|
+
ISOLATION="worktree"
|
|
216
|
+
shift
|
|
217
|
+
;;
|
|
218
|
+
--all)
|
|
219
|
+
DOWN_ALL=true
|
|
220
|
+
shift
|
|
221
|
+
;;
|
|
222
|
+
--clean)
|
|
223
|
+
DOWN_CLEAN=true
|
|
224
|
+
shift
|
|
225
|
+
;;
|
|
226
|
+
--yolo)
|
|
227
|
+
SKIP_PERMISSIONS=true
|
|
228
|
+
shift
|
|
229
|
+
;;
|
|
230
|
+
--safe)
|
|
231
|
+
SAFE_MODE=true
|
|
232
|
+
shift
|
|
233
|
+
;;
|
|
234
|
+
*)
|
|
235
|
+
POSITIONAL+=("$1")
|
|
236
|
+
shift
|
|
237
|
+
;;
|
|
238
|
+
esac
|
|
239
|
+
done
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
parse_flags "$@"
|
|
243
|
+
if [[ ${#POSITIONAL[@]} -gt 0 ]]; then
|
|
244
|
+
set -- "${POSITIONAL[@]}"
|
|
245
|
+
else
|
|
246
|
+
set --
|
|
247
|
+
fi
|
|
248
|
+
|
|
249
|
+
# Set container mode from flags (cmd_claude may override via interactive prompt)
|
|
250
|
+
if [[ "$SKIP_PERMISSIONS" == "true" ]]; then
|
|
251
|
+
export AIBOX_MODE="yolo"
|
|
252
|
+
elif [[ "$SAFE_MODE" == "true" ]]; then
|
|
253
|
+
export AIBOX_MODE="safe"
|
|
254
|
+
fi
|
|
255
|
+
|
|
256
|
+
# ── Per-project config file (.aibox) ─────────────────────────────
|
|
257
|
+
PROJECT_DIR="$(pwd)"
|
|
258
|
+
PROJECT_CONF="${PROJECT_DIR}/.aibox"
|
|
259
|
+
|
|
260
|
+
# ── Safety: refuse to run in dangerous directories ───────────────
|
|
261
|
+
_is_safe_project_dir() {
|
|
262
|
+
local dir="$1"
|
|
263
|
+
case "$dir" in
|
|
264
|
+
"$HOME"|/|/tmp|/var|/etc|/usr|/opt|/private|/private/tmp)
|
|
265
|
+
return 1 ;;
|
|
266
|
+
esac
|
|
267
|
+
return 0
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
_require_safe_dir() {
|
|
271
|
+
if ! _is_safe_project_dir "$PROJECT_DIR"; then
|
|
272
|
+
echo "Error: refusing to run in ${PROJECT_DIR}" >&2
|
|
273
|
+
echo "Run ${SCRIPT_NAME} from inside a project directory." >&2
|
|
274
|
+
exit 1
|
|
275
|
+
fi
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
# ── Confirmation prompts ─────────────────────────────────────────
|
|
279
|
+
_confirm_yes() {
|
|
280
|
+
local msg="$1" reply
|
|
281
|
+
printf "%s [Y/n] " "$msg" >&2
|
|
282
|
+
read -r reply
|
|
283
|
+
[[ ! "$reply" =~ ^[Nn]$ ]]
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
_confirm_no() {
|
|
287
|
+
local msg="$1" reply
|
|
288
|
+
printf "%s [y/N] " "$msg" >&2
|
|
289
|
+
read -r reply
|
|
290
|
+
[[ "$reply" =~ ^[Yy]$ ]]
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
# ── Project config persistence ───────────────────────────────────
|
|
294
|
+
save_project_conf() {
|
|
295
|
+
cat > "$PROJECT_CONF" << EOF
|
|
296
|
+
# Auto-generated by ${SCRIPT_NAME}. Safe to edit.
|
|
297
|
+
IMAGE=${IMAGE}
|
|
298
|
+
SHARED_MODULES=${SHARED_MODULES}
|
|
299
|
+
EOF
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
load_project_conf() {
|
|
303
|
+
if [[ -f "$PROJECT_CONF" ]]; then
|
|
304
|
+
local conf_image conf_shared
|
|
305
|
+
conf_image=$(grep '^IMAGE=' "$PROJECT_CONF" | cut -d= -f2- || true)
|
|
306
|
+
conf_shared=$(grep '^SHARED_MODULES=' "$PROJECT_CONF" | cut -d= -f2- || true)
|
|
307
|
+
if [[ "$IMAGE" == "$DEFAULT_IMAGE" && -n "$conf_image" ]]; then
|
|
308
|
+
IMAGE="$conf_image"
|
|
309
|
+
fi
|
|
310
|
+
if [[ "$SHARED_MODULES" == "false" && "$conf_shared" == "true" ]]; then
|
|
311
|
+
SHARED_MODULES=true
|
|
312
|
+
fi
|
|
313
|
+
fi
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
load_project_conf
|
|
317
|
+
|
|
318
|
+
# ── Validate image name ──────────────────────────────────────────
|
|
319
|
+
if ! [[ "$IMAGE" =~ ^[a-zA-Z0-9._/-]+(:[a-zA-Z0-9._-]+)?$ ]]; then
|
|
320
|
+
echo "Error: invalid image name: $IMAGE" >&2
|
|
321
|
+
exit 1
|
|
322
|
+
fi
|
|
323
|
+
|
|
324
|
+
# ── Derived names ────────────────────────────────────────────────
|
|
325
|
+
PROJECT_NAME="$(basename "$PROJECT_DIR" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9_-]/-/g')"
|
|
326
|
+
PROJECT_HASH="$(printf '%s' "$PROJECT_DIR" | shasum | cut -c1-6)"
|
|
327
|
+
BASE_CONTAINER_NAME="${CONTAINER_PREFIX}-${PROJECT_NAME}-${PROJECT_HASH}"
|
|
328
|
+
|
|
329
|
+
if [[ -z "$INSTANCE_NAME" ]]; then
|
|
330
|
+
INSTANCE_NAME="main"
|
|
331
|
+
fi
|
|
332
|
+
if ! [[ "$INSTANCE_NAME" =~ ^[a-z0-9_-]+$ ]]; then
|
|
333
|
+
echo "Error: --name must be lowercase alphanumeric, dashes, underscores" >&2
|
|
334
|
+
exit 1
|
|
335
|
+
fi
|
|
336
|
+
CONTAINER_NAME="${BASE_CONTAINER_NAME}-${INSTANCE_NAME}"
|
|
337
|
+
WORKSPACE_DIR="/${PROJECT_NAME}"
|
|
338
|
+
|
|
339
|
+
# ── Isolation mode setup ─────────────────────────────────────────
|
|
340
|
+
COPY_VOLUME=""
|
|
341
|
+
WORKTREE_DIR=""
|
|
342
|
+
|
|
343
|
+
if [[ "$ISOLATION" == "copy" ]]; then
|
|
344
|
+
COPY_VOLUME="${CONTAINER_NAME}-src"
|
|
345
|
+
elif [[ "$ISOLATION" == "worktree" ]]; then
|
|
346
|
+
WORKTREE_DIR="${CONFIG_DIR}/worktrees/${CONTAINER_NAME}"
|
|
347
|
+
fi
|
|
348
|
+
|
|
349
|
+
# Detect isolation mode from existing container label (reconnect support)
|
|
350
|
+
if [[ -z "$ISOLATION" ]]; then
|
|
351
|
+
_existing_isolation=$(docker inspect "$CONTAINER_NAME" --format '{{index .Config.Labels "aibox.isolation"}}' 2>/dev/null || echo "")
|
|
352
|
+
if [[ "$_existing_isolation" == "copy" ]]; then
|
|
353
|
+
ISOLATION="copy"
|
|
354
|
+
COPY_VOLUME="${CONTAINER_NAME}-src"
|
|
355
|
+
elif [[ "$_existing_isolation" == "worktree" ]]; then
|
|
356
|
+
ISOLATION="worktree"
|
|
357
|
+
WORKTREE_DIR="${CONFIG_DIR}/worktrees/${CONTAINER_NAME}"
|
|
358
|
+
fi
|
|
359
|
+
fi
|
|
360
|
+
|
|
361
|
+
# ── Dockerfile management ────────────────────────────────────────
|
|
362
|
+
ensure_dockerfile() {
|
|
363
|
+
mkdir -p "$CONFIG_DIR"
|
|
364
|
+
local dockerfile="${CONFIG_DIR}/Dockerfile"
|
|
365
|
+
local version_file="${CONFIG_DIR}/version"
|
|
366
|
+
local current_version=""
|
|
367
|
+
|
|
368
|
+
# Check if Dockerfile needs regeneration
|
|
369
|
+
if [[ -f "$version_file" ]]; then
|
|
370
|
+
current_version=$(cat "$version_file")
|
|
371
|
+
fi
|
|
372
|
+
|
|
373
|
+
if [[ "$current_version" != "$AIBOX_VERSION" || ! -f "$dockerfile" ]]; then
|
|
374
|
+
if [[ -f "$dockerfile" && "$current_version" != "$AIBOX_VERSION" ]]; then
|
|
375
|
+
echo "Dockerfile outdated (v${current_version} → v${AIBOX_VERSION}). Regenerating..."
|
|
376
|
+
fi
|
|
377
|
+
cat > "$dockerfile" << DOCKERFILE
|
|
378
|
+
FROM node:20-alpine
|
|
379
|
+
|
|
380
|
+
LABEL aibox.version="${AIBOX_VERSION}"
|
|
381
|
+
|
|
382
|
+
RUN apk add --no-cache \\
|
|
383
|
+
git \\
|
|
384
|
+
curl \\
|
|
385
|
+
ripgrep \\
|
|
386
|
+
bash \\
|
|
387
|
+
zsh \\
|
|
388
|
+
sudo \\
|
|
389
|
+
openssh-client \\
|
|
390
|
+
python3 \\
|
|
391
|
+
make \\
|
|
392
|
+
g++ \\
|
|
393
|
+
iptables \\
|
|
394
|
+
ip6tables \\
|
|
395
|
+
bind-tools \\
|
|
396
|
+
su-exec
|
|
397
|
+
|
|
398
|
+
# Create non-root user (sudo configured at runtime by entrypoint based on mode)
|
|
399
|
+
RUN adduser -D -s /bin/zsh aibox
|
|
400
|
+
|
|
401
|
+
# Copy entrypoint (runs as root, sets up firewall + sudo, drops to aibox)
|
|
402
|
+
COPY entrypoint.sh /usr/local/bin/entrypoint.sh
|
|
403
|
+
RUN chmod +x /usr/local/bin/entrypoint.sh
|
|
404
|
+
|
|
405
|
+
# Install Claude Code as aibox user via native installer
|
|
406
|
+
USER aibox
|
|
407
|
+
ENV PATH="/home/aibox/.local/bin:\$PATH"
|
|
408
|
+
RUN curl -fsSL https://claude.ai/install.sh | bash
|
|
409
|
+
|
|
410
|
+
WORKDIR /workspace
|
|
411
|
+
|
|
412
|
+
# Entrypoint runs as root, then execs sleep as aibox
|
|
413
|
+
USER root
|
|
414
|
+
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
|
415
|
+
CMD ["sleep", "infinity"]
|
|
416
|
+
DOCKERFILE
|
|
417
|
+
|
|
418
|
+
# Generate entrypoint script (runs as root at container start)
|
|
419
|
+
cat > "${CONFIG_DIR}/entrypoint.sh" << 'ENTRYPOINT'
|
|
420
|
+
#!/bin/bash
|
|
421
|
+
set -e
|
|
422
|
+
|
|
423
|
+
# Fix auth volume ownership (Docker creates volumes as root)
|
|
424
|
+
chown -R aibox:aibox /home/aibox/.claude 2>/dev/null || true
|
|
425
|
+
|
|
426
|
+
# ── Mode-dependent setup ──────────────────────────────────────
|
|
427
|
+
MODE="${AIBOX_MODE:-safe}"
|
|
428
|
+
|
|
429
|
+
if [[ "$MODE" == "yolo" ]]; then
|
|
430
|
+
# YOLO: full sudo, no firewall
|
|
431
|
+
echo "aibox ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/aibox
|
|
432
|
+
echo "[aibox] Mode: yolo (full sudo, no firewall)"
|
|
433
|
+
|
|
434
|
+
else
|
|
435
|
+
# SAFE: restricted sudo (chown only), firewall active
|
|
436
|
+
echo "aibox ALL=(root) NOPASSWD: /bin/chown *" > /etc/sudoers.d/aibox
|
|
437
|
+
|
|
438
|
+
# ── Firewall setup ──────────────────────────────────────────
|
|
439
|
+
ALLOWED_DOMAINS=(
|
|
440
|
+
# Claude Code / Anthropic
|
|
441
|
+
"api.anthropic.com"
|
|
442
|
+
"claude.ai"
|
|
443
|
+
"statsig.anthropic.com"
|
|
444
|
+
"statsig.com"
|
|
445
|
+
"sentry.io"
|
|
446
|
+
# npm
|
|
447
|
+
"registry.npmjs.org"
|
|
448
|
+
# GitHub
|
|
449
|
+
"github.com"
|
|
450
|
+
"api.github.com"
|
|
451
|
+
# PyPI
|
|
452
|
+
"pypi.org"
|
|
453
|
+
"files.pythonhosted.org"
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
# Extra domains from env var (comma-separated)
|
|
457
|
+
if [[ -n "${AIBOX_EXTRA_DOMAINS:-}" ]]; then
|
|
458
|
+
IFS=',' read -ra EXTRA <<< "$AIBOX_EXTRA_DOMAINS"
|
|
459
|
+
ALLOWED_DOMAINS+=("${EXTRA[@]}")
|
|
460
|
+
fi
|
|
461
|
+
|
|
462
|
+
echo "Configuring firewall..."
|
|
463
|
+
|
|
464
|
+
# Flush existing
|
|
465
|
+
iptables -F OUTPUT 2>/dev/null || true
|
|
466
|
+
|
|
467
|
+
# Allow loopback
|
|
468
|
+
iptables -A OUTPUT -o lo -j ACCEPT
|
|
469
|
+
|
|
470
|
+
# Allow established/related
|
|
471
|
+
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
|
472
|
+
|
|
473
|
+
# Allow DNS
|
|
474
|
+
iptables -A OUTPUT -p udp --dport 53 -j ACCEPT
|
|
475
|
+
iptables -A OUTPUT -p tcp --dport 53 -j ACCEPT
|
|
476
|
+
|
|
477
|
+
# Allow SSH (git over SSH)
|
|
478
|
+
iptables -A OUTPUT -p tcp --dport 22 -j ACCEPT
|
|
479
|
+
|
|
480
|
+
# Resolve and allow each domain
|
|
481
|
+
for domain in "${ALLOWED_DOMAINS[@]}"; do
|
|
482
|
+
domain=$(echo "$domain" | xargs)
|
|
483
|
+
[[ -z "$domain" ]] && continue
|
|
484
|
+
ips=$(dig +short A "$domain" 2>/dev/null | grep -E '^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' || true)
|
|
485
|
+
for ip in $ips; do
|
|
486
|
+
iptables -A OUTPUT -d "$ip" -j ACCEPT
|
|
487
|
+
done
|
|
488
|
+
done
|
|
489
|
+
|
|
490
|
+
# Allow Docker host (IDE integration)
|
|
491
|
+
host_ip=$(getent hosts host.docker.internal 2>/dev/null | awk '{print $1}' || true)
|
|
492
|
+
if [[ -n "$host_ip" ]]; then
|
|
493
|
+
iptables -A OUTPUT -d "$host_ip" -j ACCEPT
|
|
494
|
+
fi
|
|
495
|
+
|
|
496
|
+
# Default deny
|
|
497
|
+
iptables -A OUTPUT -j DROP
|
|
498
|
+
|
|
499
|
+
echo "[aibox] Mode: safe (firewall active, ${#ALLOWED_DOMAINS[@]} domains allowed)"
|
|
500
|
+
fi
|
|
501
|
+
|
|
502
|
+
chmod 0440 /etc/sudoers.d/aibox
|
|
503
|
+
|
|
504
|
+
# Drop to aibox user and exec CMD (su-exec preserves env and properly execs)
|
|
505
|
+
exec su-exec aibox "$@"
|
|
506
|
+
ENTRYPOINT
|
|
507
|
+
echo "$AIBOX_VERSION" > "$version_file"
|
|
508
|
+
echo "Created Dockerfile (v${AIBOX_VERSION})"
|
|
509
|
+
fi
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
# ── Isolation: copy mode ──────────────────────────────────────────
|
|
513
|
+
_prepare_copy_volume() {
|
|
514
|
+
local branch_name="aibox/${INSTANCE_NAME}"
|
|
515
|
+
|
|
516
|
+
# Warn about uncommitted changes
|
|
517
|
+
if ! git -C "$PROJECT_DIR" diff --quiet 2>/dev/null || ! git -C "$PROJECT_DIR" diff --cached --quiet 2>/dev/null; then
|
|
518
|
+
echo "Warning: uncommitted changes will NOT be included in the copy."
|
|
519
|
+
fi
|
|
520
|
+
|
|
521
|
+
docker volume create "$COPY_VOLUME" >/dev/null 2>&1 || true
|
|
522
|
+
|
|
523
|
+
# Check if volume already has content (--entrypoint bypasses su-exec drop)
|
|
524
|
+
local has_content
|
|
525
|
+
has_content=$(docker run --rm --entrypoint sh -v "${COPY_VOLUME}:/${PROJECT_NAME}" "$IMAGE" -c "ls -A /${PROJECT_NAME} 2>/dev/null | head -1" 2>/dev/null || true)
|
|
526
|
+
|
|
527
|
+
if [[ -n "$has_content" ]]; then
|
|
528
|
+
echo "Copy volume already populated (${COPY_VOLUME}). Reusing."
|
|
529
|
+
return
|
|
530
|
+
fi
|
|
531
|
+
|
|
532
|
+
echo "Copying project into isolated volume (branch: ${branch_name})..."
|
|
533
|
+
|
|
534
|
+
# Create bundle to a temp file first so we can check for failure
|
|
535
|
+
local bundle_file
|
|
536
|
+
bundle_file=$(mktemp)
|
|
537
|
+
if ! git -C "$PROJECT_DIR" bundle create "$bundle_file" --all 2>/dev/null; then
|
|
538
|
+
rm -f "$bundle_file"
|
|
539
|
+
docker volume rm "$COPY_VOLUME" 2>/dev/null || true
|
|
540
|
+
echo "Error: git bundle failed. Is this a git repository with commits?" >&2
|
|
541
|
+
exit 1
|
|
542
|
+
fi
|
|
543
|
+
|
|
544
|
+
if ! cat "$bundle_file" | docker run --rm -i --entrypoint sh \
|
|
545
|
+
-v "${COPY_VOLUME}:/${PROJECT_NAME}" \
|
|
546
|
+
"$IMAGE" -c "
|
|
547
|
+
cat > /tmp/repo.bundle
|
|
548
|
+
cd /${PROJECT_NAME}
|
|
549
|
+
git clone /tmp/repo.bundle .
|
|
550
|
+
rm /tmp/repo.bundle
|
|
551
|
+
git checkout -b '${branch_name}' 2>/dev/null || git checkout '${branch_name}'
|
|
552
|
+
chown -R aibox:aibox /${PROJECT_NAME}
|
|
553
|
+
"; then
|
|
554
|
+
rm -f "$bundle_file"
|
|
555
|
+
docker volume rm "$COPY_VOLUME" 2>/dev/null || true
|
|
556
|
+
echo "Error: failed to clone into volume." >&2
|
|
557
|
+
exit 1
|
|
558
|
+
fi
|
|
559
|
+
|
|
560
|
+
rm -f "$bundle_file"
|
|
561
|
+
echo "Copied to volume ${COPY_VOLUME}"
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
# ── Isolation: worktree mode ─────────────────────────────────────
|
|
565
|
+
_prepare_worktree() {
|
|
566
|
+
local branch_name="aibox/${INSTANCE_NAME}"
|
|
567
|
+
|
|
568
|
+
if [[ -d "$WORKTREE_DIR" ]]; then
|
|
569
|
+
echo "Worktree already exists (${WORKTREE_DIR}). Reusing."
|
|
570
|
+
return
|
|
571
|
+
fi
|
|
572
|
+
|
|
573
|
+
mkdir -p "${CONFIG_DIR}/worktrees"
|
|
574
|
+
|
|
575
|
+
echo "Creating worktree (branch: ${branch_name})..."
|
|
576
|
+
|
|
577
|
+
# Try creating with a new branch, fall back to existing branch
|
|
578
|
+
if git -C "$PROJECT_DIR" worktree add "$WORKTREE_DIR" -b "$branch_name" 2>/dev/null; then
|
|
579
|
+
true
|
|
580
|
+
elif git -C "$PROJECT_DIR" worktree add "$WORKTREE_DIR" "$branch_name" 2>/dev/null; then
|
|
581
|
+
true
|
|
582
|
+
else
|
|
583
|
+
echo "Error: failed to create worktree. Is this a git repository?" >&2
|
|
584
|
+
exit 1
|
|
585
|
+
fi
|
|
586
|
+
|
|
587
|
+
echo "Created worktree at ${WORKTREE_DIR}"
|
|
588
|
+
}
|
|
589
|
+
|
|
590
|
+
# ── Compose YAML generation ──────────────────────────────────────
|
|
591
|
+
# Auth volume shared across all containers using the same base image.
|
|
592
|
+
# name: field prevents docker compose from prefixing with project name.
|
|
593
|
+
AUTH_VOLUME="aibox-auth-$(echo "$IMAGE" | sed 's/[^a-zA-Z0-9]/-/g')"
|
|
594
|
+
|
|
595
|
+
_volumes_yaml() {
|
|
596
|
+
local indent="$1"
|
|
597
|
+
local prefix="$2"
|
|
598
|
+
|
|
599
|
+
if [[ "$ISOLATION" == "copy" ]]; then
|
|
600
|
+
echo "${indent}- ${COPY_VOLUME}:${WORKSPACE_DIR}"
|
|
601
|
+
elif [[ "$ISOLATION" == "worktree" ]]; then
|
|
602
|
+
echo "${indent}- \"${WORKTREE_DIR}:${WORKSPACE_DIR}\""
|
|
603
|
+
else
|
|
604
|
+
echo "${indent}- \"${prefix}:${WORKSPACE_DIR}\""
|
|
605
|
+
fi
|
|
606
|
+
echo "${indent}- ${AUTH_VOLUME}:/home/aibox/.claude"
|
|
607
|
+
|
|
608
|
+
# Mount host's IDE lock files so Claude Code can discover JetBrains/VS Code plugins
|
|
609
|
+
local host_ide_dir="${HOME}/.claude/ide"
|
|
610
|
+
if [[ -d "$host_ide_dir" ]]; then
|
|
611
|
+
echo "${indent}- \"${host_ide_dir}:/home/aibox/.claude/ide:ro\""
|
|
612
|
+
fi
|
|
613
|
+
|
|
614
|
+
if [[ "$SHARED_MODULES" == "false" && -z "$ISOLATION" ]]; then
|
|
615
|
+
echo "${indent}- node_modules:${WORKSPACE_DIR}/node_modules"
|
|
616
|
+
fi
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
_top_volumes_yaml() {
|
|
620
|
+
printf "volumes:\n"
|
|
621
|
+
printf " ${AUTH_VOLUME}:\n"
|
|
622
|
+
printf " external: true\n"
|
|
623
|
+
if [[ "$ISOLATION" == "copy" ]]; then
|
|
624
|
+
printf " ${COPY_VOLUME}:\n"
|
|
625
|
+
printf " external: true\n"
|
|
626
|
+
fi
|
|
627
|
+
if [[ "$SHARED_MODULES" == "false" && -z "$ISOLATION" ]]; then
|
|
628
|
+
printf " node_modules:\n"
|
|
629
|
+
fi
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
_environment_yaml() {
|
|
633
|
+
local indent="$1"
|
|
634
|
+
echo "${indent}- ANTHROPIC_API_KEY=\${ANTHROPIC_API_KEY:-}"
|
|
635
|
+
echo "${indent}- CLAUDE_CONFIG_DIR=/home/aibox/.claude"
|
|
636
|
+
echo "${indent}- AIBOX_MODE=\${AIBOX_MODE:-safe}"
|
|
637
|
+
echo "${indent}- AIBOX_EXTRA_DOMAINS=\${AIBOX_EXTRA_DOMAINS:-}"
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
# Build -e flags for docker exec from current terminal environment
|
|
641
|
+
_docker_exec() {
|
|
642
|
+
# Usage: _docker_exec container command [args...]
|
|
643
|
+
local container="$1"
|
|
644
|
+
shift
|
|
645
|
+
|
|
646
|
+
local env_args=(
|
|
647
|
+
-e "TERM=${TERM:-xterm-256color}"
|
|
648
|
+
-e "COLORTERM=${COLORTERM:-truecolor}"
|
|
649
|
+
-e "LANG=${LANG:-C.UTF-8}"
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
# Forward Claude Code env vars if set on host
|
|
653
|
+
local forward_vars=(
|
|
654
|
+
ANTHROPIC_MODEL
|
|
655
|
+
ANTHROPIC_BASE_URL
|
|
656
|
+
CLAUDE_CODE_USE_BEDROCK
|
|
657
|
+
CLAUDE_CODE_USE_VERTEX
|
|
658
|
+
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC
|
|
659
|
+
)
|
|
660
|
+
for var in "${forward_vars[@]}"; do
|
|
661
|
+
[[ -n "${!var:-}" ]] && env_args+=(-e "$var=${!var}")
|
|
662
|
+
done
|
|
663
|
+
|
|
664
|
+
# Forward JetBrains / VS Code IDE integration env vars
|
|
665
|
+
if [[ "${ENABLE_IDE_INTEGRATION:-}" == "true" ]]; then
|
|
666
|
+
env_args+=(-e "ENABLE_IDE_INTEGRATION=true")
|
|
667
|
+
env_args+=(-e "TERMINAL_EMULATOR=${TERMINAL_EMULATOR:-}")
|
|
668
|
+
[[ -n "${CLAUDE_CODE_SSE_PORT:-}" ]] && env_args+=(-e "CLAUDE_CODE_SSE_PORT=${CLAUDE_CODE_SSE_PORT}")
|
|
669
|
+
[[ -n "${TERM_SESSION_ID:-}" ]] && env_args+=(-e "TERM_SESSION_ID=${TERM_SESSION_ID}")
|
|
670
|
+
env_args+=(-e "CLAUDE_CODE_IDE_HOST_OVERRIDE=host.docker.internal")
|
|
671
|
+
fi
|
|
672
|
+
|
|
673
|
+
docker exec -it -u aibox "${env_args[@]}" "$container" "$@"
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
_labels_yaml() {
|
|
677
|
+
local indent="$1"
|
|
678
|
+
echo "${indent}aibox.project: \"${PROJECT_NAME}\""
|
|
679
|
+
echo "${indent}aibox.path: \"${PROJECT_DIR}\""
|
|
680
|
+
echo "${indent}aibox.instance: \"${INSTANCE_NAME}\""
|
|
681
|
+
echo "${indent}aibox.isolation: \"${ISOLATION}\""
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
generate_compose() {
|
|
685
|
+
cat << YAML
|
|
686
|
+
name: "${CONTAINER_NAME}"
|
|
687
|
+
services:
|
|
688
|
+
dev:
|
|
689
|
+
image: ${IMAGE}
|
|
690
|
+
container_name: ${CONTAINER_NAME}
|
|
691
|
+
cap_add:
|
|
692
|
+
- NET_ADMIN
|
|
693
|
+
extra_hosts:
|
|
694
|
+
- "host.docker.internal:host-gateway"
|
|
695
|
+
labels:
|
|
696
|
+
$(_labels_yaml " ")
|
|
697
|
+
volumes:
|
|
698
|
+
$(_volumes_yaml " " "$PROJECT_DIR")
|
|
699
|
+
working_dir: ${WORKSPACE_DIR}
|
|
700
|
+
environment:
|
|
701
|
+
$(_environment_yaml " ")
|
|
702
|
+
stdin_open: true
|
|
703
|
+
tty: true
|
|
704
|
+
$(_top_volumes_yaml)
|
|
705
|
+
YAML
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
write_project_compose() {
|
|
709
|
+
local target="${PROJECT_DIR}/compose.dev.yaml"
|
|
710
|
+
|
|
711
|
+
cat > "$target" << YAML
|
|
712
|
+
# Generated by ${SCRIPT_NAME} — point WebStorm Node.js interpreter here.
|
|
713
|
+
# Settings → Node.js → Docker Compose → this file → service: dev
|
|
714
|
+
name: "${CONTAINER_NAME}"
|
|
715
|
+
services:
|
|
716
|
+
dev:
|
|
717
|
+
image: ${IMAGE}
|
|
718
|
+
container_name: ${CONTAINER_NAME}
|
|
719
|
+
cap_add:
|
|
720
|
+
- NET_ADMIN
|
|
721
|
+
extra_hosts:
|
|
722
|
+
- "host.docker.internal:host-gateway"
|
|
723
|
+
labels:
|
|
724
|
+
$(_labels_yaml " ")
|
|
725
|
+
volumes:
|
|
726
|
+
$(_volumes_yaml " " ".")
|
|
727
|
+
working_dir: ${WORKSPACE_DIR}
|
|
728
|
+
environment:
|
|
729
|
+
$(_environment_yaml " ")
|
|
730
|
+
stdin_open: true
|
|
731
|
+
tty: true
|
|
732
|
+
$(_top_volumes_yaml)
|
|
733
|
+
YAML
|
|
734
|
+
echo "Created ${target}"
|
|
735
|
+
}
|
|
736
|
+
|
|
737
|
+
# ── WebStorm .idea/workspace.xml ─────────────────────────────────
|
|
738
|
+
_sed_inplace() {
|
|
739
|
+
if sed --version 2>/dev/null | grep -q 'GNU'; then
|
|
740
|
+
sed -i "$@"
|
|
741
|
+
else
|
|
742
|
+
sed -i '' "$@"
|
|
743
|
+
fi
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
configure_webstorm() {
|
|
747
|
+
local idea_dir="${PROJECT_DIR}/.idea"
|
|
748
|
+
local ws_file="${idea_dir}/workspace.xml"
|
|
749
|
+
local compose_path="${PROJECT_DIR}/compose.dev.yaml"
|
|
750
|
+
local interpreter_value="docker-compose://[${compose_path}]:dev//usr/local/bin/node"
|
|
751
|
+
|
|
752
|
+
mkdir -p "$idea_dir"
|
|
753
|
+
|
|
754
|
+
if [[ -f "$ws_file" ]]; then
|
|
755
|
+
if grep -q '"nodejs_interpreter_path"' "$ws_file"; then
|
|
756
|
+
_sed_inplace "s|\"nodejs_interpreter_path\": \"[^\"]*\"|\"nodejs_interpreter_path\": \"${interpreter_value}\"|" "$ws_file"
|
|
757
|
+
echo "Updated nodejs_interpreter_path in .idea/workspace.xml"
|
|
758
|
+
elif grep -q '"keyToString"' "$ws_file"; then
|
|
759
|
+
local tmpfile
|
|
760
|
+
tmpfile=$(mktemp)
|
|
761
|
+
awk -v interp="$interpreter_value" '
|
|
762
|
+
/"keyToString": \{/ {
|
|
763
|
+
print
|
|
764
|
+
print " \"nodejs_interpreter_path\": \"" interp "\","
|
|
765
|
+
print " \"nodejs_package_manager_path\": \"npm\","
|
|
766
|
+
print " \"javascript.preferred.runtime.type.id\": \"node\","
|
|
767
|
+
print " \"credentialsType com.jetbrains.nodejs.remote.NodeJSCreateRemoteSdkForm\": \"Docker Compose\","
|
|
768
|
+
next
|
|
769
|
+
}
|
|
770
|
+
{ print }
|
|
771
|
+
' "$ws_file" > "$tmpfile"
|
|
772
|
+
mv "$tmpfile" "$ws_file"
|
|
773
|
+
echo "Injected Node.js Docker Compose config into .idea/workspace.xml"
|
|
774
|
+
else
|
|
775
|
+
echo "Warning: workspace.xml has unexpected format. Configure manually:"
|
|
776
|
+
echo " Settings → Node.js → Docker Compose → compose.dev.yaml → service: dev"
|
|
777
|
+
fi
|
|
778
|
+
else
|
|
779
|
+
cat > "$ws_file" << WSXML
|
|
780
|
+
<?xml version="1.0" encoding="UTF-8"?>
|
|
781
|
+
<project version="4">
|
|
782
|
+
<component name="PropertiesComponent"><![CDATA[{
|
|
783
|
+
"keyToString": {
|
|
784
|
+
"nodejs_interpreter_path": "${interpreter_value}",
|
|
785
|
+
"nodejs_package_manager_path": "npm",
|
|
786
|
+
"javascript.preferred.runtime.type.id": "node",
|
|
787
|
+
"credentialsType com.jetbrains.nodejs.remote.NodeJSCreateRemoteSdkForm": "Docker Compose"
|
|
788
|
+
}
|
|
789
|
+
}]]></component>
|
|
790
|
+
</project>
|
|
791
|
+
WSXML
|
|
792
|
+
echo "Created .idea/workspace.xml with Docker Compose Node.js runtime"
|
|
793
|
+
fi
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
# ── Docker Compose wrapper ───────────────────────────────────────
|
|
797
|
+
dc() {
|
|
798
|
+
generate_compose | docker compose \
|
|
799
|
+
-f - \
|
|
800
|
+
--project-directory "$PROJECT_DIR" \
|
|
801
|
+
"$@"
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
# ── Commands ─────────────────────────────────────────────────────
|
|
805
|
+
|
|
806
|
+
cmd_build() {
|
|
807
|
+
_check_deps
|
|
808
|
+
ensure_dockerfile
|
|
809
|
+
echo "Building ${IMAGE}..."
|
|
810
|
+
docker build -t "$IMAGE" "$CONFIG_DIR"
|
|
811
|
+
echo "Done. Image: ${IMAGE}"
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
ensure_init() {
|
|
815
|
+
if [[ ! -f "${PROJECT_DIR}/compose.dev.yaml" ]]; then
|
|
816
|
+
_require_safe_dir
|
|
817
|
+
echo "No ${SCRIPT_NAME} config found. Will create compose.dev.yaml, .aibox, and .idea/workspace.xml."
|
|
818
|
+
if _confirm_yes "Initialize?"; then
|
|
819
|
+
_init_files
|
|
820
|
+
echo ""
|
|
821
|
+
else
|
|
822
|
+
echo "Aborted. Run '${SCRIPT_NAME} init' manually when ready." >&2
|
|
823
|
+
exit 1
|
|
824
|
+
fi
|
|
825
|
+
fi
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
cmd_up() {
|
|
829
|
+
_check_deps
|
|
830
|
+
ensure_init
|
|
831
|
+
|
|
832
|
+
if ! docker image inspect "$IMAGE" &>/dev/null; then
|
|
833
|
+
echo "Image '${IMAGE}' not found. Building..."
|
|
834
|
+
cmd_build
|
|
835
|
+
else
|
|
836
|
+
# Check if image version matches script version
|
|
837
|
+
local img_version
|
|
838
|
+
img_version=$(docker inspect "$IMAGE" --format '{{index .Config.Labels "aibox.version"}}' 2>/dev/null || echo "")
|
|
839
|
+
if [[ "$img_version" != "$AIBOX_VERSION" ]]; then
|
|
840
|
+
echo "Image outdated (v${img_version:-0} → v${AIBOX_VERSION}). Rebuilding..."
|
|
841
|
+
cmd_build
|
|
842
|
+
fi
|
|
843
|
+
fi
|
|
844
|
+
|
|
845
|
+
# Prepare isolation volumes/worktrees if needed
|
|
846
|
+
if [[ "$ISOLATION" == "copy" ]]; then
|
|
847
|
+
_prepare_copy_volume
|
|
848
|
+
elif [[ "$ISOLATION" == "worktree" ]]; then
|
|
849
|
+
_prepare_worktree
|
|
850
|
+
fi
|
|
851
|
+
|
|
852
|
+
if ! docker ps --format '{{.Names}}' | grep -Fxq "$CONTAINER_NAME"; then
|
|
853
|
+
# Ensure shared auth volume exists (external: true requires it)
|
|
854
|
+
docker volume create "$AUTH_VOLUME" &>/dev/null || true
|
|
855
|
+
echo "Starting ${CONTAINER_NAME} (image: ${IMAGE})..."
|
|
856
|
+
dc up -d
|
|
857
|
+
echo "Container running. Workspace: ${WORKSPACE_DIR}"
|
|
858
|
+
else
|
|
859
|
+
echo "${CONTAINER_NAME} is already running."
|
|
860
|
+
fi
|
|
861
|
+
}
|
|
862
|
+
|
|
863
|
+
cmd_claude() {
|
|
864
|
+
local skip=false
|
|
865
|
+
local firewall=true
|
|
866
|
+
|
|
867
|
+
if [[ "$SKIP_PERMISSIONS" == "true" ]]; then
|
|
868
|
+
# --yolo: everything loose
|
|
869
|
+
skip=true
|
|
870
|
+
firewall=false
|
|
871
|
+
export AIBOX_MODE="yolo"
|
|
872
|
+
elif [[ "$SAFE_MODE" == "true" ]]; then
|
|
873
|
+
# --safe: everything locked
|
|
874
|
+
skip=false
|
|
875
|
+
firewall=true
|
|
876
|
+
export AIBOX_MODE="safe"
|
|
877
|
+
else
|
|
878
|
+
# No flag: ask about each setting
|
|
879
|
+
echo ""
|
|
880
|
+
echo "Container settings (per-session, not saved):"
|
|
881
|
+
echo ""
|
|
882
|
+
|
|
883
|
+
if _confirm_yes "Skip Claude Code permission prompts? (container is sandboxed)"; then
|
|
884
|
+
skip=true
|
|
885
|
+
fi
|
|
886
|
+
|
|
887
|
+
if _confirm_yes "Enable network firewall? (blocks all except Claude API, npm, GitHub, PyPI)"; then
|
|
888
|
+
firewall=true
|
|
889
|
+
else
|
|
890
|
+
firewall=false
|
|
891
|
+
fi
|
|
892
|
+
|
|
893
|
+
if [[ "$firewall" == "true" ]]; then
|
|
894
|
+
export AIBOX_MODE="safe"
|
|
895
|
+
else
|
|
896
|
+
export AIBOX_MODE="yolo"
|
|
897
|
+
fi
|
|
898
|
+
|
|
899
|
+
echo ""
|
|
900
|
+
fi
|
|
901
|
+
|
|
902
|
+
# If container is running in a different mode, restart it
|
|
903
|
+
if docker ps --format '{{.Names}}' | grep -Fxq "$CONTAINER_NAME"; then
|
|
904
|
+
local current_mode
|
|
905
|
+
current_mode=$(docker exec "$CONTAINER_NAME" sh -c 'echo ${AIBOX_MODE:-safe}' 2>/dev/null || echo "unknown")
|
|
906
|
+
if [[ "$current_mode" != "$AIBOX_MODE" ]]; then
|
|
907
|
+
echo "Restarting container in ${AIBOX_MODE} mode (was: ${current_mode})..."
|
|
908
|
+
dc down --remove-orphans >/dev/null
|
|
909
|
+
fi
|
|
910
|
+
fi
|
|
911
|
+
|
|
912
|
+
cmd_up
|
|
913
|
+
|
|
914
|
+
if [[ "$skip" == "true" ]]; then
|
|
915
|
+
_docker_exec "$CONTAINER_NAME" claude --dangerously-skip-permissions
|
|
916
|
+
else
|
|
917
|
+
_docker_exec "$CONTAINER_NAME" claude
|
|
918
|
+
fi
|
|
919
|
+
}
|
|
920
|
+
|
|
921
|
+
cmd_shell() {
|
|
922
|
+
if ! docker ps --format '{{.Names}}' 2>/dev/null | grep -Fxq "$CONTAINER_NAME"; then
|
|
923
|
+
cmd_up
|
|
924
|
+
fi
|
|
925
|
+
|
|
926
|
+
shift # remove "shell" from args
|
|
927
|
+
if [[ $# -gt 0 ]]; then
|
|
928
|
+
# Inline command: aibox shell echo hello
|
|
929
|
+
_docker_exec "$CONTAINER_NAME" zsh -c "$*"
|
|
930
|
+
else
|
|
931
|
+
_docker_exec "$CONTAINER_NAME" zsh
|
|
932
|
+
fi
|
|
933
|
+
}
|
|
934
|
+
|
|
935
|
+
cmd_down() {
|
|
936
|
+
if [[ "$DOWN_ALL" == "true" ]]; then
|
|
937
|
+
local project_containers
|
|
938
|
+
project_containers=$(docker ps -a --format '{{.Names}}' | grep -F "$BASE_CONTAINER_NAME") || true
|
|
939
|
+
if [[ -n "$project_containers" ]]; then
|
|
940
|
+
echo "Will remove all containers for ${PROJECT_NAME}:"
|
|
941
|
+
echo "$project_containers" | sed 's/^/ /'
|
|
942
|
+
echo ""
|
|
943
|
+
if _confirm_no "Remove all?"; then
|
|
944
|
+
echo "$project_containers" | xargs docker rm -f
|
|
945
|
+
echo "Removed all containers for ${PROJECT_NAME}"
|
|
946
|
+
if [[ "$DOWN_CLEAN" == "true" ]]; then
|
|
947
|
+
# Clean up copy volumes and worktrees for removed containers
|
|
948
|
+
while IFS= read -r cname; do
|
|
949
|
+
docker volume rm "${cname}-src" 2>/dev/null && echo "Removed copy volume ${cname}-src" || true
|
|
950
|
+
local wt="${CONFIG_DIR}/worktrees/${cname}"
|
|
951
|
+
if [[ -d "$wt" ]]; then
|
|
952
|
+
git -C "$PROJECT_DIR" worktree remove "$wt" 2>/dev/null && echo "Removed worktree ${wt}" || true
|
|
953
|
+
fi
|
|
954
|
+
done <<< "$project_containers"
|
|
955
|
+
else
|
|
956
|
+
# Check if any had isolation resources
|
|
957
|
+
local has_isolation=false
|
|
958
|
+
while IFS= read -r cname; do
|
|
959
|
+
if docker volume inspect "${cname}-src" &>/dev/null || [[ -d "${CONFIG_DIR}/worktrees/${cname}" ]]; then
|
|
960
|
+
has_isolation=true
|
|
961
|
+
break
|
|
962
|
+
fi
|
|
963
|
+
done <<< "$project_containers"
|
|
964
|
+
if [[ "$has_isolation" == "true" ]]; then
|
|
965
|
+
echo "Note: copy volumes / worktrees kept. Use --clean to remove."
|
|
966
|
+
fi
|
|
967
|
+
fi
|
|
968
|
+
else
|
|
969
|
+
echo "Aborted."
|
|
970
|
+
fi
|
|
971
|
+
else
|
|
972
|
+
echo "No containers found for ${PROJECT_NAME}"
|
|
973
|
+
fi
|
|
974
|
+
else
|
|
975
|
+
if docker ps -a --format '{{.Names}}' | grep -Fxq "$CONTAINER_NAME"; then
|
|
976
|
+
dc down --remove-orphans
|
|
977
|
+
echo "Removed ${CONTAINER_NAME}"
|
|
978
|
+
else
|
|
979
|
+
echo "No container found: ${CONTAINER_NAME}"
|
|
980
|
+
fi
|
|
981
|
+
# Clean up isolation resources (works even if container is already gone)
|
|
982
|
+
if [[ "$DOWN_CLEAN" == "true" ]]; then
|
|
983
|
+
if [[ -n "$COPY_VOLUME" ]]; then
|
|
984
|
+
docker volume rm "$COPY_VOLUME" 2>/dev/null && echo "Removed copy volume ${COPY_VOLUME}" || true
|
|
985
|
+
fi
|
|
986
|
+
if [[ -n "$WORKTREE_DIR" && -d "$WORKTREE_DIR" ]]; then
|
|
987
|
+
git -C "$PROJECT_DIR" worktree remove "$WORKTREE_DIR" 2>/dev/null && echo "Removed worktree ${WORKTREE_DIR}" || true
|
|
988
|
+
fi
|
|
989
|
+
elif [[ "$ISOLATION" == "copy" && -n "$COPY_VOLUME" ]] && docker volume inspect "$COPY_VOLUME" &>/dev/null; then
|
|
990
|
+
echo "Copy volume kept: ${COPY_VOLUME} (use --clean to remove)"
|
|
991
|
+
elif [[ "$ISOLATION" == "worktree" && -n "$WORKTREE_DIR" && -d "$WORKTREE_DIR" ]]; then
|
|
992
|
+
echo "Worktree kept: ${WORKTREE_DIR} (use --clean to remove)"
|
|
993
|
+
fi
|
|
994
|
+
fi
|
|
995
|
+
}
|
|
996
|
+
|
|
997
|
+
cmd_status() {
|
|
998
|
+
local containers
|
|
999
|
+
containers=$(docker ps -a --filter "name=${CONTAINER_PREFIX}-" -q) || true
|
|
1000
|
+
|
|
1001
|
+
if [[ -z "$containers" ]]; then
|
|
1002
|
+
echo "No ${SCRIPT_NAME} containers found."
|
|
1003
|
+
return
|
|
1004
|
+
fi
|
|
1005
|
+
|
|
1006
|
+
printf "%-30s %-10s %-10s %-6s %-8s %s\n" "CONTAINER" "INSTANCE" "ISOLATION" "MODE" "STATUS" "PROJECT PATH"
|
|
1007
|
+
printf "%-30s %-10s %-10s %-6s %-8s %s\n" "---------" "--------" "---------" "----" "------" "------------"
|
|
1008
|
+
|
|
1009
|
+
docker ps -a \
|
|
1010
|
+
--filter "name=${CONTAINER_PREFIX}-" \
|
|
1011
|
+
--format '{{.Names}}' \
|
|
1012
|
+
| while IFS= read -r name; do
|
|
1013
|
+
local instance isolation mode state path
|
|
1014
|
+
instance=$(docker inspect "$name" --format '{{index .Config.Labels "aibox.instance"}}' 2>/dev/null || echo "")
|
|
1015
|
+
isolation=$(docker inspect "$name" --format '{{index .Config.Labels "aibox.isolation"}}' 2>/dev/null || echo "")
|
|
1016
|
+
state=$(docker inspect "$name" --format '{{.State.Status}}' 2>/dev/null || echo "")
|
|
1017
|
+
path=$(docker inspect "$name" --format '{{index .Config.Labels "aibox.path"}}' 2>/dev/null || echo "")
|
|
1018
|
+
# Extract AIBOX_MODE from container env
|
|
1019
|
+
mode=$(docker inspect "$name" --format '{{range .Config.Env}}{{println .}}{{end}}' 2>/dev/null | grep '^AIBOX_MODE=' | cut -d= -f2- || echo "")
|
|
1020
|
+
[[ -z "$instance" ]] && instance="-"
|
|
1021
|
+
[[ -z "$isolation" ]] && isolation="-"
|
|
1022
|
+
[[ -z "$mode" ]] && mode="-"
|
|
1023
|
+
[[ -z "$path" ]] && path="-"
|
|
1024
|
+
printf "%-30s %-10s %-10s %-6s %-8s %s\n" "$name" "$instance" "$isolation" "$mode" "$state" "$path"
|
|
1025
|
+
done
|
|
1026
|
+
}
|
|
1027
|
+
|
|
1028
|
+
cmd_nuke() {
|
|
1029
|
+
local containers
|
|
1030
|
+
containers=$(docker ps -a --filter "name=${CONTAINER_PREFIX}-" -q) || true
|
|
1031
|
+
if [[ -n "$containers" ]]; then
|
|
1032
|
+
echo "This will remove ALL ${SCRIPT_NAME} containers across ALL projects:"
|
|
1033
|
+
docker ps -a --filter "name=${CONTAINER_PREFIX}-" --format " {{.Names}} ({{.Status}})"
|
|
1034
|
+
echo ""
|
|
1035
|
+
if _confirm_no "Remove all?"; then
|
|
1036
|
+
docker ps -a --filter "name=${CONTAINER_PREFIX}-" -q | xargs docker rm -f
|
|
1037
|
+
echo "All containers removed."
|
|
1038
|
+
else
|
|
1039
|
+
echo "Aborted."
|
|
1040
|
+
fi
|
|
1041
|
+
else
|
|
1042
|
+
echo "No ${SCRIPT_NAME} containers found."
|
|
1043
|
+
fi
|
|
1044
|
+
}
|
|
1045
|
+
|
|
1046
|
+
cmd_volumes() {
|
|
1047
|
+
# List copy volumes
|
|
1048
|
+
local volumes
|
|
1049
|
+
volumes=$(docker volume ls --format '{{.Name}}' | grep -F "${CONTAINER_PREFIX}-" | grep -- '-src$') || true
|
|
1050
|
+
|
|
1051
|
+
# List worktree dirs
|
|
1052
|
+
local worktree_dir="${CONFIG_DIR}/worktrees"
|
|
1053
|
+
local worktrees=""
|
|
1054
|
+
if [[ -d "$worktree_dir" ]]; then
|
|
1055
|
+
worktrees=$(ls -1 "$worktree_dir" 2>/dev/null) || true
|
|
1056
|
+
fi
|
|
1057
|
+
|
|
1058
|
+
if [[ -z "$volumes" && -z "$worktrees" ]]; then
|
|
1059
|
+
echo "No isolation volumes or worktrees found."
|
|
1060
|
+
return
|
|
1061
|
+
fi
|
|
1062
|
+
|
|
1063
|
+
printf "%-50s %-10s %-8s %s\n" "NAME" "TYPE" "STATUS" "CREATED"
|
|
1064
|
+
printf "%-50s %-10s %-8s %s\n" "----" "----" "------" "-------"
|
|
1065
|
+
|
|
1066
|
+
if [[ -n "$volumes" ]]; then
|
|
1067
|
+
while IFS= read -r vol; do
|
|
1068
|
+
local status="orphan"
|
|
1069
|
+
local container_name="${vol%-src}"
|
|
1070
|
+
if docker ps -a --format '{{.Names}}' | grep -Fxq "$container_name" 2>/dev/null; then
|
|
1071
|
+
status="in use"
|
|
1072
|
+
fi
|
|
1073
|
+
local created
|
|
1074
|
+
created=$(docker volume inspect "$vol" --format '{{.CreatedAt}}' 2>/dev/null | cut -d' ' -f1 || echo "-")
|
|
1075
|
+
printf "%-50s %-10s %-8s %s\n" "$vol" "copy" "$status" "$created"
|
|
1076
|
+
done <<< "$volumes"
|
|
1077
|
+
fi
|
|
1078
|
+
|
|
1079
|
+
if [[ -n "$worktrees" ]]; then
|
|
1080
|
+
while IFS= read -r wt; do
|
|
1081
|
+
local status="orphan"
|
|
1082
|
+
if docker ps -a --format '{{.Names}}' | grep -Fxq "$wt" 2>/dev/null; then
|
|
1083
|
+
status="in use"
|
|
1084
|
+
fi
|
|
1085
|
+
local wt_path="${worktree_dir}/${wt}"
|
|
1086
|
+
printf "%-50s %-10s %-8s %s\n" "$wt_path" "worktree" "$status" "-"
|
|
1087
|
+
done <<< "$worktrees"
|
|
1088
|
+
fi
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
_init_files() {
|
|
1092
|
+
write_project_compose
|
|
1093
|
+
save_project_conf
|
|
1094
|
+
configure_webstorm
|
|
1095
|
+
|
|
1096
|
+
local gitignore="${PROJECT_DIR}/.gitignore"
|
|
1097
|
+
local entries=("compose.dev.yaml" ".aibox" ".idea/workspace.xml")
|
|
1098
|
+
for entry in "${entries[@]}"; do
|
|
1099
|
+
if [[ -f "$gitignore" ]]; then
|
|
1100
|
+
grep -qxF "$entry" "$gitignore" || echo "$entry" >> "$gitignore"
|
|
1101
|
+
else
|
|
1102
|
+
echo "$entry" >> "$gitignore"
|
|
1103
|
+
fi
|
|
1104
|
+
done
|
|
1105
|
+
echo "Updated .gitignore"
|
|
1106
|
+
|
|
1107
|
+
echo ""
|
|
1108
|
+
echo "Done! Open this project in WebStorm — Node.js runtime is configured."
|
|
1109
|
+
echo " ${SCRIPT_NAME} up Start the container"
|
|
1110
|
+
echo " ${SCRIPT_NAME} claude Open Claude Code"
|
|
1111
|
+
echo " ${SCRIPT_NAME} shell Open a shell"
|
|
1112
|
+
}
|
|
1113
|
+
|
|
1114
|
+
cmd_init() {
|
|
1115
|
+
_require_safe_dir
|
|
1116
|
+
|
|
1117
|
+
echo "Will create/update in ${PROJECT_DIR}:"
|
|
1118
|
+
echo " compose.dev.yaml, .aibox, .idea/workspace.xml, .gitignore"
|
|
1119
|
+
echo ""
|
|
1120
|
+
|
|
1121
|
+
if ! _confirm_yes "Proceed?"; then
|
|
1122
|
+
echo "Aborted."
|
|
1123
|
+
exit 0
|
|
1124
|
+
fi
|
|
1125
|
+
|
|
1126
|
+
_init_files
|
|
1127
|
+
}
|
|
1128
|
+
|
|
1129
|
+
cmd_help() {
|
|
1130
|
+
awk '/^# aibox/,/^[^#]/{if(/^#/) print}' "$0" | sed 's/^# \{0,1\}//'
|
|
1131
|
+
echo "Version: ${AIBOX_VERSION}"
|
|
1132
|
+
}
|
|
1133
|
+
|
|
1134
|
+
cmd_version() {
|
|
1135
|
+
echo "aibox v${AIBOX_VERSION}"
|
|
1136
|
+
}
|
|
1137
|
+
|
|
1138
|
+
# ── Main ─────────────────────────────────────────────────────────
|
|
1139
|
+
case "${1:-help}" in
|
|
1140
|
+
up) cmd_up ;;
|
|
1141
|
+
claude) cmd_claude ;;
|
|
1142
|
+
shell) cmd_shell "$@" ;;
|
|
1143
|
+
down) cmd_down ;;
|
|
1144
|
+
status) cmd_status ;;
|
|
1145
|
+
volumes) cmd_volumes ;;
|
|
1146
|
+
nuke) cmd_nuke ;;
|
|
1147
|
+
build) cmd_build ;;
|
|
1148
|
+
init) cmd_init ;;
|
|
1149
|
+
help|-h) cmd_help ;;
|
|
1150
|
+
version|-v|--version) cmd_version ;;
|
|
1151
|
+
*)
|
|
1152
|
+
echo "Unknown command: $1"
|
|
1153
|
+
cmd_help
|
|
1154
|
+
exit 1
|
|
1155
|
+
;;
|
|
1156
|
+
esac
|
package/package.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "aibox-cli",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Run AI coding agents in isolated Docker containers",
|
|
5
|
+
"author": "repalash <palash@shaders.app>",
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"repository": {
|
|
8
|
+
"type": "git",
|
|
9
|
+
"url": "https://github.com/blitzdotdev/aibox.git"
|
|
10
|
+
},
|
|
11
|
+
"homepage": "https://github.com/blitzdotdev/aibox",
|
|
12
|
+
"bugs": {
|
|
13
|
+
"url": "https://github.com/blitzdotdev/aibox/issues"
|
|
14
|
+
},
|
|
15
|
+
"keywords": [
|
|
16
|
+
"ai",
|
|
17
|
+
"docker",
|
|
18
|
+
"sandbox",
|
|
19
|
+
"claude",
|
|
20
|
+
"coding-agent",
|
|
21
|
+
"devtools",
|
|
22
|
+
"cli"
|
|
23
|
+
],
|
|
24
|
+
"bin": {
|
|
25
|
+
"aibox": "./bin/aibox"
|
|
26
|
+
},
|
|
27
|
+
"files": [
|
|
28
|
+
"bin/aibox",
|
|
29
|
+
"README.md",
|
|
30
|
+
"LICENSE"
|
|
31
|
+
],
|
|
32
|
+
"os": [
|
|
33
|
+
"darwin",
|
|
34
|
+
"linux"
|
|
35
|
+
],
|
|
36
|
+
"engines": {
|
|
37
|
+
"node": ">=18"
|
|
38
|
+
}
|
|
39
|
+
}
|