augint-shell 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ai_shell/__init__.py ADDED
@@ -0,0 +1,7 @@
1
+ """augint-shell (ai-shell) - Launch AI coding tools and local LLMs in Docker containers."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ __all__ = [
6
+ "__version__",
7
+ ]
File without changes
@@ -0,0 +1,46 @@
1
+ """ai-shell CLI entry point."""
2
+
3
+ import sys
4
+
5
+ import click
6
+
7
+ from ai_shell import __version__
8
+ from ai_shell.cli.commands.llm import llm_group
9
+ from ai_shell.cli.commands.manage import manage_group
10
+ from ai_shell.cli.commands.tools import aider, claude, claude_x, codex, opencode, shell
11
+
12
+
13
+ @click.group()
14
+ @click.version_option(version=__version__, prog_name="ai-shell")
15
+ @click.option("--project", default=None, help="Override project name for container naming.")
16
+ @click.pass_context
17
+ def cli(ctx, project):
18
+ """AI Shell - Launch AI coding tools and local LLMs in Docker containers."""
19
+ ctx.ensure_object(dict)
20
+ ctx.obj["project"] = project
21
+
22
+
23
+ # Tool subcommands
24
+ cli.add_command(claude)
25
+ cli.add_command(claude_x, "claude-x")
26
+ cli.add_command(codex)
27
+ cli.add_command(opencode)
28
+ cli.add_command(aider)
29
+ cli.add_command(shell)
30
+
31
+ # Command groups
32
+ cli.add_command(llm_group, "llm")
33
+ cli.add_command(manage_group, "manage")
34
+
35
+
36
+ def main():
37
+ """Main entry point."""
38
+ try:
39
+ cli()
40
+ except Exception as e:
41
+ click.echo(f"Error: {e}", err=True)
42
+ sys.exit(1)
43
+
44
+
45
+ if __name__ == "__main__":
46
+ main()
File without changes
@@ -0,0 +1,204 @@
1
+ """LLM stack management commands: up, down, pull, setup, status, logs, shell."""
2
+
3
+ import time
4
+ from pathlib import Path
5
+
6
+ import click
7
+ from rich.console import Console
8
+
9
+ from ai_shell.config import load_config
10
+ from ai_shell.container import ContainerManager
11
+ from ai_shell.defaults import OLLAMA_CONTAINER, WEBUI_CONTAINER
12
+
13
+ console = Console(stderr=True)
14
+
15
+
16
+ def _get_manager(ctx) -> ContainerManager:
17
+ """Create ContainerManager from Click context."""
18
+ project = ctx.obj.get("project") if ctx.obj else None
19
+ config = load_config(project_override=project, project_dir=Path.cwd())
20
+ return ContainerManager(config)
21
+
22
+
23
+ @click.group("llm")
24
+ @click.pass_context
25
+ def llm_group(ctx):
26
+ """Manage the local LLM stack (Ollama + Open WebUI)."""
27
+
28
+
29
+ @llm_group.command("up")
30
+ @click.pass_context
31
+ def llm_up(ctx):
32
+ """Start the LLM stack (Ollama + Open WebUI)."""
33
+ manager = _get_manager(ctx)
34
+ console.print("[bold]Starting LLM stack...[/bold]")
35
+
36
+ manager.ensure_ollama()
37
+ console.print(f" Ollama API: http://localhost:{manager.config.ollama_port}")
38
+
39
+ manager.ensure_webui()
40
+ console.print(f" Open WebUI: http://localhost:{manager.config.webui_port}")
41
+
42
+ console.print("\n[bold green]LLM stack is running.[/bold green]")
43
+ console.print("If this is your first time, run: [bold]ai-shell llm setup[/bold]")
44
+
45
+
46
+ @llm_group.command("down")
47
+ @click.pass_context
48
+ def llm_down(ctx):
49
+ """Stop the LLM stack."""
50
+ manager = _get_manager(ctx)
51
+ console.print("[bold]Stopping LLM stack...[/bold]")
52
+
53
+ for name in [WEBUI_CONTAINER, OLLAMA_CONTAINER]:
54
+ status = manager.container_status(name)
55
+ if status == "running":
56
+ manager.stop_container(name)
57
+ console.print(f" Stopped: {name}")
58
+ elif status is not None:
59
+ console.print(f" Already stopped: {name}")
60
+ else:
61
+ console.print(f" Not found: {name}")
62
+
63
+ console.print("[bold green]LLM stack stopped.[/bold green]")
64
+
65
+
66
+ @llm_group.command("pull")
67
+ @click.pass_context
68
+ def llm_pull(ctx):
69
+ """Pull LLM models into Ollama."""
70
+ manager = _get_manager(ctx)
71
+ config = manager.config
72
+
73
+ console.print(f"[bold]Pulling primary model: {config.primary_model}...[/bold]")
74
+ output = manager.exec_in_ollama(["ollama", "pull", config.primary_model])
75
+ console.print(output)
76
+
77
+ console.print(f"\n[bold]Pulling fallback model: {config.fallback_model}...[/bold]")
78
+ output = manager.exec_in_ollama(["ollama", "pull", config.fallback_model])
79
+ console.print(output)
80
+
81
+ console.print("\n[bold]Available models:[/bold]")
82
+ output = manager.exec_in_ollama(["ollama", "list"])
83
+ console.print(output)
84
+
85
+
86
+ @llm_group.command("setup")
87
+ @click.pass_context
88
+ def llm_setup(ctx):
89
+ """First-time setup: start stack, pull models, configure context window."""
90
+ manager = _get_manager(ctx)
91
+ config = manager.config
92
+
93
+ # Start the stack
94
+ console.print("[bold]Starting LLM stack...[/bold]")
95
+ manager.ensure_ollama()
96
+ manager.ensure_webui()
97
+
98
+ # Wait for Ollama to be ready
99
+ console.print("[bold]Waiting for Ollama to be ready...[/bold]")
100
+ for i in range(10):
101
+ try:
102
+ output = manager.exec_in_ollama(["ollama", "list"])
103
+ if output is not None:
104
+ break
105
+ except Exception:
106
+ pass
107
+ console.print(f" Waiting... ({i + 1}/10)")
108
+ time.sleep(2)
109
+ else:
110
+ console.print("[bold red]Ollama failed to start after 20s[/bold red]")
111
+ raise click.Abort()
112
+
113
+ # Pull models
114
+ console.print(f"\n[bold]Pulling primary model: {config.primary_model}...[/bold]")
115
+ output = manager.exec_in_ollama(["ollama", "pull", config.primary_model])
116
+ console.print(output)
117
+
118
+ console.print(f"\n[bold]Pulling fallback model: {config.fallback_model}...[/bold]")
119
+ output = manager.exec_in_ollama(["ollama", "pull", config.fallback_model])
120
+ console.print(output)
121
+
122
+ # Configure context window
123
+ console.print(f"\n[bold]Configuring context window ({config.context_size} tokens)...[/bold]")
124
+ for model in [config.primary_model, config.fallback_model]:
125
+ modelfile = f"FROM {model}\nPARAMETER num_ctx {config.context_size}\n"
126
+ # Write modelfile and create model
127
+ manager.exec_in_ollama(
128
+ [
129
+ "sh",
130
+ "-c",
131
+ f'printf "{modelfile}" > /tmp/Modelfile && '
132
+ f"ollama create {model} -f /tmp/Modelfile && rm -f /tmp/Modelfile",
133
+ ]
134
+ )
135
+
136
+ console.print("\n[bold green]============================================[/bold green]")
137
+ console.print("[bold green] Setup complete![/bold green]")
138
+ console.print(f"\n Open WebUI: http://localhost:{config.webui_port}")
139
+ console.print(f" Ollama API: http://localhost:{config.ollama_port}")
140
+ console.print(f"\n Primary model: {config.primary_model}")
141
+ console.print(f" Fallback model: {config.fallback_model}")
142
+ console.print(f" Context window: {config.context_size} tokens")
143
+ console.print("[bold green]============================================[/bold green]")
144
+
145
+
146
+ @llm_group.command("status")
147
+ @click.pass_context
148
+ def llm_status(ctx):
149
+ """Show status of LLM stack and loaded models."""
150
+ manager = _get_manager(ctx)
151
+
152
+ console.print("[bold]Container status:[/bold]")
153
+ for name in [OLLAMA_CONTAINER, WEBUI_CONTAINER]:
154
+ status = manager.container_status(name)
155
+ if status == "running":
156
+ console.print(f" {name}: [green]{status}[/green]")
157
+ elif status is not None:
158
+ console.print(f" {name}: [yellow]{status}[/yellow]")
159
+ else:
160
+ console.print(f" {name}: [red]not found[/red]")
161
+
162
+ # Show models if ollama is running
163
+ if manager.container_status(OLLAMA_CONTAINER) == "running":
164
+ console.print("\n[bold]Available models:[/bold]")
165
+ output = manager.exec_in_ollama(["ollama", "list"])
166
+ console.print(output)
167
+
168
+
169
+ @llm_group.command("logs")
170
+ @click.option("--follow", "-f", is_flag=True, help="Follow log output.")
171
+ @click.pass_context
172
+ def llm_logs(ctx, follow):
173
+ """Tail logs from the LLM stack."""
174
+ manager = _get_manager(ctx)
175
+ if follow:
176
+ # Use docker CLI for multi-container following
177
+ import os
178
+ import sys
179
+
180
+ sys.stdout.flush()
181
+ sys.stderr.flush()
182
+ os.execvp(
183
+ "docker",
184
+ ["docker", "logs", "-f", OLLAMA_CONTAINER],
185
+ )
186
+ else:
187
+ for name in [OLLAMA_CONTAINER, WEBUI_CONTAINER]:
188
+ status = manager.container_status(name)
189
+ if status is not None:
190
+ console.print(f"\n[bold]--- {name} ---[/bold]")
191
+ manager.container_logs(name, follow=False, tail=50)
192
+
193
+
194
+ @llm_group.command("shell")
195
+ @click.pass_context
196
+ def llm_shell(ctx):
197
+ """Open a bash shell in the Ollama container."""
198
+ manager = _get_manager(ctx)
199
+ status = manager.container_status(OLLAMA_CONTAINER)
200
+ if status != "running":
201
+ console.print("[red]Ollama is not running. Run: ai-shell llm up[/red]")
202
+ raise click.Abort()
203
+ console.print("[bold]Opening shell in Ollama container...[/bold]")
204
+ manager.exec_interactive(OLLAMA_CONTAINER, ["/bin/bash"])
@@ -0,0 +1,98 @@
1
+ """Container management commands: status, stop, clean, logs, pull."""
2
+
3
+ from pathlib import Path
4
+
5
+ import click
6
+ from rich.console import Console
7
+
8
+ from ai_shell.config import load_config
9
+ from ai_shell.container import ContainerManager
10
+ from ai_shell.defaults import dev_container_name
11
+ from ai_shell.exceptions import ContainerNotFoundError
12
+
13
+ console = Console(stderr=True)
14
+
15
+
16
+ def _get_manager(ctx) -> ContainerManager:
17
+ """Create ContainerManager from Click context."""
18
+ project = ctx.obj.get("project") if ctx.obj else None
19
+ config = load_config(project_override=project, project_dir=Path.cwd())
20
+ return ContainerManager(config)
21
+
22
+
23
+ @click.group("manage")
24
+ @click.pass_context
25
+ def manage_group(ctx):
26
+ """Manage dev containers."""
27
+
28
+
29
+ @manage_group.command("status")
30
+ @click.pass_context
31
+ def manage_status(ctx):
32
+ """Show dev container status for current project."""
33
+ manager = _get_manager(ctx)
34
+ name = dev_container_name(manager.config.project_name)
35
+ status = manager.container_status(name)
36
+
37
+ if status is None:
38
+ console.print(
39
+ f"[yellow]No container found for project: {manager.config.project_name}[/yellow]"
40
+ )
41
+ elif status == "running":
42
+ console.print(f"[green]{name}: running[/green]")
43
+ else:
44
+ console.print(f"[yellow]{name}: {status}[/yellow]")
45
+
46
+
47
+ @manage_group.command("stop")
48
+ @click.pass_context
49
+ def manage_stop(ctx):
50
+ """Stop the dev container for current project."""
51
+ manager = _get_manager(ctx)
52
+ name = dev_container_name(manager.config.project_name)
53
+
54
+ try:
55
+ manager.stop_container(name)
56
+ console.print(f"[green]Stopped: {name}[/green]")
57
+ except ContainerNotFoundError:
58
+ console.print(f"[yellow]No container found: {name}[/yellow]")
59
+
60
+
61
+ @manage_group.command("clean")
62
+ @click.option("--force", "-f", is_flag=True, help="Force remove even if running.")
63
+ @click.pass_context
64
+ def manage_clean(ctx, force):
65
+ """Remove the dev container for current project."""
66
+ manager = _get_manager(ctx)
67
+ name = dev_container_name(manager.config.project_name)
68
+
69
+ try:
70
+ manager.remove_container(name, force=force)
71
+ console.print(f"[green]Removed: {name}[/green]")
72
+ except ContainerNotFoundError:
73
+ console.print(f"[yellow]No container found: {name}[/yellow]")
74
+
75
+
76
+ @manage_group.command("logs")
77
+ @click.option("--follow", "-f", is_flag=True, help="Follow log output.")
78
+ @click.pass_context
79
+ def manage_logs(ctx, follow):
80
+ """Tail dev container logs."""
81
+ manager = _get_manager(ctx)
82
+ name = dev_container_name(manager.config.project_name)
83
+
84
+ try:
85
+ manager.container_logs(name, follow=follow)
86
+ except ContainerNotFoundError:
87
+ console.print(f"[yellow]No container found: {name}[/yellow]")
88
+
89
+
90
+ @manage_group.command("pull")
91
+ @click.pass_context
92
+ def manage_pull(ctx):
93
+ """Pull the latest Docker image."""
94
+ manager = _get_manager(ctx)
95
+ image = manager.config.full_image
96
+ console.print(f"[bold]Pulling {image}...[/bold]")
97
+ manager._pull_image_if_needed(image)
98
+ console.print(f"[green]Image ready: {image}[/green]")
@@ -0,0 +1,94 @@
1
+ """AI tool subcommands: claude, codex, opencode, aider, shell."""
2
+
3
+ from pathlib import Path
4
+
5
+ import click
6
+ from rich.console import Console
7
+
8
+ from ai_shell.config import load_config
9
+ from ai_shell.container import ContainerManager
10
+
11
+ console = Console(stderr=True)
12
+
13
+
14
+ def _get_manager(ctx) -> tuple[ContainerManager, str]:
15
+ """Create ContainerManager from Click context and ensure dev container."""
16
+ project = ctx.obj.get("project") if ctx.obj else None
17
+ config = load_config(project_override=project, project_dir=Path.cwd())
18
+ manager = ContainerManager(config)
19
+ container_name = manager.ensure_dev_container()
20
+ return manager, container_name
21
+
22
+
23
+ @click.command()
24
+ @click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
25
+ @click.pass_context
26
+ def claude(ctx, extra_args):
27
+ """Launch Claude Code in the dev container."""
28
+ manager, name = _get_manager(ctx)
29
+ cmd = ["claude", *extra_args]
30
+ console.print(f"[bold]Launching Claude Code in {name}...[/bold]")
31
+ manager.exec_interactive(name, cmd)
32
+
33
+
34
+ @click.command("claude-x")
35
+ @click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
36
+ @click.pass_context
37
+ def claude_x(ctx, extra_args):
38
+ """Launch Claude Code with --dangerously-skip-permissions."""
39
+ manager, name = _get_manager(ctx)
40
+ cmd = ["claude", "--dangerously-skip-permissions", "-c", *extra_args]
41
+ console.print(f"[bold]Launching Claude Code (skip-permissions) in {name}...[/bold]")
42
+ manager.exec_interactive(name, cmd)
43
+
44
+
45
+ @click.command()
46
+ @click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
47
+ @click.pass_context
48
+ def codex(ctx, extra_args):
49
+ """Launch Codex in the dev container."""
50
+ manager, name = _get_manager(ctx)
51
+ cmd = ["codex", "--dangerously-bypass-approvals-and-sandbox", "--search", *extra_args]
52
+ console.print(f"[bold]Launching Codex in {name}...[/bold]")
53
+ manager.exec_interactive(name, cmd)
54
+
55
+
56
+ @click.command()
57
+ @click.pass_context
58
+ def opencode(ctx):
59
+ """Launch opencode in the dev container."""
60
+ manager, name = _get_manager(ctx)
61
+ cmd = ["/root/.opencode/bin/opencode"]
62
+ console.print(f"[bold]Launching opencode in {name}...[/bold]")
63
+ manager.exec_interactive(name, cmd)
64
+
65
+
66
+ @click.command()
67
+ @click.argument("extra_args", nargs=-1, type=click.UNPROCESSED)
68
+ @click.pass_context
69
+ def aider(ctx, extra_args):
70
+ """Launch aider with local LLM in the dev container."""
71
+ project = ctx.obj.get("project") if ctx.obj else None
72
+ config = load_config(project_override=project, project_dir=Path.cwd())
73
+ manager = ContainerManager(config)
74
+ name = manager.ensure_dev_container()
75
+ cmd = [
76
+ "aider",
77
+ "--model",
78
+ config.aider_model,
79
+ "--yes-always",
80
+ "--restore-chat-history",
81
+ *extra_args,
82
+ ]
83
+ extra_env = {"OLLAMA_API_BASE": "http://host.docker.internal:11434"}
84
+ console.print(f"[bold]Launching aider ({config.aider_model}) in {name}...[/bold]")
85
+ manager.exec_interactive(name, cmd, extra_env=extra_env)
86
+
87
+
88
+ @click.command()
89
+ @click.pass_context
90
+ def shell(ctx):
91
+ """Open a bash shell in the dev container."""
92
+ manager, name = _get_manager(ctx)
93
+ console.print(f"[bold]Opening shell in {name}...[/bold]")
94
+ manager.exec_interactive(name, ["/bin/bash"])
ai_shell/config.py ADDED
@@ -0,0 +1,155 @@
1
+ """Configuration loading for ai-shell.
2
+
3
+ Priority (highest wins): CLI flags > env vars > project ai-shell.toml > global config > defaults.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import logging
9
+ import os
10
+ import tomllib
11
+ from dataclasses import dataclass, field
12
+ from pathlib import Path
13
+
14
+ from ai_shell import __version__
15
+ from ai_shell.defaults import (
16
+ DEFAULT_CONTEXT_SIZE,
17
+ DEFAULT_FALLBACK_MODEL,
18
+ DEFAULT_IMAGE,
19
+ DEFAULT_OLLAMA_PORT,
20
+ DEFAULT_PRIMARY_MODEL,
21
+ DEFAULT_WEBUI_PORT,
22
+ )
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ @dataclass
28
+ class AiShellConfig:
29
+ """Configuration for ai-shell."""
30
+
31
+ # Container
32
+ image: str = DEFAULT_IMAGE
33
+ image_tag: str = __version__
34
+ project_name: str = ""
35
+ project_dir: Path = field(default_factory=Path.cwd)
36
+
37
+ # LLM
38
+ primary_model: str = DEFAULT_PRIMARY_MODEL
39
+ fallback_model: str = DEFAULT_FALLBACK_MODEL
40
+ context_size: int = DEFAULT_CONTEXT_SIZE
41
+ ollama_port: int = DEFAULT_OLLAMA_PORT
42
+ webui_port: int = DEFAULT_WEBUI_PORT
43
+
44
+ # Aider
45
+ aider_model: str = f"ollama_chat/{DEFAULT_PRIMARY_MODEL}"
46
+
47
+ # Extra configuration
48
+ extra_env: dict[str, str] = field(default_factory=dict)
49
+ extra_volumes: list[str] = field(default_factory=list)
50
+
51
+ @property
52
+ def full_image(self) -> str:
53
+ """Return the full image reference with tag."""
54
+ return f"{self.image}:{self.image_tag}"
55
+
56
+
57
+ def load_config(
58
+ project_override: str | None = None,
59
+ project_dir: Path | None = None,
60
+ ) -> AiShellConfig:
61
+ """Load configuration from all sources.
62
+
63
+ Priority: CLI overrides > env vars > project toml > global toml > defaults.
64
+ """
65
+ config = AiShellConfig()
66
+
67
+ if project_dir:
68
+ config.project_dir = project_dir
69
+
70
+ # Load global config
71
+ global_config_path = Path.home() / ".config" / "ai-shell" / "config.toml"
72
+ if global_config_path.exists():
73
+ _apply_toml(config, global_config_path)
74
+
75
+ # Load project config
76
+ project_config_path = config.project_dir / "ai-shell.toml"
77
+ if project_config_path.exists():
78
+ _apply_toml(config, project_config_path)
79
+
80
+ # Apply environment variable overrides
81
+ _apply_env_vars(config)
82
+
83
+ # Apply CLI overrides
84
+ if project_override:
85
+ config.project_name = project_override
86
+
87
+ # Auto-derive project name from CWD if not set
88
+ if not config.project_name:
89
+ from ai_shell.defaults import sanitize_project_name
90
+
91
+ config.project_name = sanitize_project_name(config.project_dir)
92
+
93
+ return config
94
+
95
+
96
+ def _apply_toml(config: AiShellConfig, path: Path) -> None:
97
+ """Apply settings from a TOML config file."""
98
+ try:
99
+ with open(path, "rb") as f:
100
+ data = tomllib.load(f)
101
+ except (OSError, tomllib.TOMLDecodeError) as e:
102
+ logger.warning("Failed to load config from %s: %s", path, e)
103
+ return
104
+
105
+ logger.debug("Loading config from %s", path)
106
+
107
+ # [container] section
108
+ container = data.get("container", {})
109
+ if "image" in container:
110
+ config.image = container["image"]
111
+ if "image_tag" in container:
112
+ config.image_tag = container["image_tag"]
113
+ if "extra_env" in container:
114
+ config.extra_env.update(container["extra_env"])
115
+ if "extra_volumes" in container:
116
+ config.extra_volumes.extend(container["extra_volumes"])
117
+
118
+ # [llm] section
119
+ llm = data.get("llm", {})
120
+ if "primary_model" in llm:
121
+ config.primary_model = llm["primary_model"]
122
+ if "fallback_model" in llm:
123
+ config.fallback_model = llm["fallback_model"]
124
+ if "context_size" in llm:
125
+ config.context_size = int(llm["context_size"])
126
+ if "ollama_port" in llm:
127
+ config.ollama_port = int(llm["ollama_port"])
128
+ if "webui_port" in llm:
129
+ config.webui_port = int(llm["webui_port"])
130
+
131
+ # [aider] section
132
+ aider = data.get("aider", {})
133
+ if "model" in aider:
134
+ config.aider_model = aider["model"]
135
+
136
+
137
+ def _apply_env_vars(config: AiShellConfig) -> None:
138
+ """Apply AI_SHELL_* environment variable overrides."""
139
+ env_map: dict[str, tuple[str, type]] = {
140
+ "AI_SHELL_IMAGE": ("image", str),
141
+ "AI_SHELL_IMAGE_TAG": ("image_tag", str),
142
+ "AI_SHELL_PROJECT": ("project_name", str),
143
+ "AI_SHELL_PRIMARY_MODEL": ("primary_model", str),
144
+ "AI_SHELL_FALLBACK_MODEL": ("fallback_model", str),
145
+ "AI_SHELL_CONTEXT_SIZE": ("context_size", int),
146
+ "AI_SHELL_OLLAMA_PORT": ("ollama_port", int),
147
+ "AI_SHELL_WEBUI_PORT": ("webui_port", int),
148
+ "AI_SHELL_AIDER_MODEL": ("aider_model", str),
149
+ }
150
+
151
+ for env_key, (attr, type_fn) in env_map.items():
152
+ value = os.environ.get(env_key)
153
+ if value is not None:
154
+ setattr(config, attr, type_fn(value))
155
+ logger.debug("Config override from env: %s=%s", env_key, value)
ai_shell/container.py ADDED
@@ -0,0 +1,326 @@
1
+ """Docker container lifecycle management.
2
+
3
+ Replaces docker-compose.yml by using Docker SDK to create and manage containers
4
+ with the exact same configuration.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import logging
10
+ import os
11
+ import sys
12
+ from typing import TYPE_CHECKING, NoReturn
13
+
14
+ from docker.errors import APIError, ImageNotFound, NotFound
15
+ from docker.types import DeviceRequest, Mount
16
+
17
+ import docker
18
+ from ai_shell.defaults import (
19
+ OLLAMA_CONTAINER,
20
+ OLLAMA_DATA_VOLUME,
21
+ OLLAMA_IMAGE,
22
+ SHM_SIZE,
23
+ WEBUI_CONTAINER,
24
+ WEBUI_DATA_VOLUME,
25
+ WEBUI_IMAGE,
26
+ build_dev_environment,
27
+ build_dev_mounts,
28
+ dev_container_name,
29
+ )
30
+ from ai_shell.exceptions import (
31
+ ContainerNotFoundError,
32
+ DockerNotAvailableError,
33
+ ImagePullError,
34
+ )
35
+ from ai_shell.gpu import detect_gpu
36
+
37
+ if TYPE_CHECKING:
38
+ from docker.models.containers import Container
39
+
40
+ from ai_shell.config import AiShellConfig
41
+
42
+ logger = logging.getLogger(__name__)
43
+
44
+
45
+ class ContainerManager:
46
+ """Manages Docker containers for ai-shell.
47
+
48
+ Handles the dev container (per-project) and LLM stack (host-level singletons).
49
+ """
50
+
51
+ def __init__(self, config: AiShellConfig) -> None:
52
+ self.config = config
53
+ try:
54
+ self.client = docker.from_env() # type: ignore[attr-defined]
55
+ self.client.ping()
56
+ except docker.errors.DockerException as e:
57
+ raise DockerNotAvailableError(
58
+ f"Docker is not available. Is the Docker daemon running?\n Error: {e}"
59
+ ) from e
60
+
61
+ # =========================================================================
62
+ # Dev container (per-project)
63
+ # =========================================================================
64
+
65
+ def ensure_dev_container(self) -> str:
66
+ """Get or create the dev container for the current project.
67
+
68
+ If the container exists but is stopped, it is started.
69
+ If it doesn't exist, it is created with the full configuration.
70
+
71
+ Returns the container name.
72
+ """
73
+ name = dev_container_name(self.config.project_name)
74
+ container = self._get_container(name)
75
+
76
+ if container is not None:
77
+ if container.status != "running":
78
+ logger.info("Starting existing container: %s", name)
79
+ container.start()
80
+ return name
81
+
82
+ logger.info("Creating dev container: %s", name)
83
+ self._pull_image_if_needed(self.config.full_image)
84
+ self._create_dev_container(name)
85
+ return name
86
+
87
+ def _create_dev_container(self, name: str) -> Container:
88
+ """Create the dev container with all docker-compose config."""
89
+ mounts = build_dev_mounts(self.config.project_dir, self.config.project_name)
90
+ environment = build_dev_environment(self.config.extra_env)
91
+
92
+ # Add any extra volumes from config
93
+ for vol_spec in self.config.extra_volumes:
94
+ parts = vol_spec.split(":")
95
+ if len(parts) >= 2:
96
+ source, target = parts[0], parts[1]
97
+ read_only = len(parts) > 2 and parts[2] == "ro"
98
+ mounts.append(
99
+ Mount(
100
+ target=target,
101
+ source=source,
102
+ type="bind",
103
+ read_only=read_only,
104
+ )
105
+ )
106
+
107
+ container: Container = self.client.containers.run(
108
+ image=self.config.full_image,
109
+ name=name,
110
+ mounts=mounts,
111
+ environment=environment,
112
+ working_dir=f"/root/projects/{self.config.project_name}",
113
+ command="tail -f /dev/null",
114
+ stdin_open=True,
115
+ tty=True,
116
+ shm_size=SHM_SIZE,
117
+ init=True,
118
+ extra_hosts={"host.docker.internal": "host-gateway"},
119
+ ports={"5678/tcp": None, "8000/tcp": None},
120
+ detach=True,
121
+ )
122
+ logger.info("Container created: %s", name)
123
+ return container
124
+
125
+ def exec_interactive(
126
+ self,
127
+ container_name: str,
128
+ command: list[str],
129
+ extra_env: dict[str, str] | None = None,
130
+ ) -> NoReturn:
131
+ """Replace the current process with docker exec -it.
132
+
133
+ This uses os.execvp to hand off to docker CLI for proper TTY support.
134
+ The Python process is replaced and never returns.
135
+ """
136
+ args = ["docker", "exec", "-it"]
137
+
138
+ if extra_env:
139
+ for key, value in extra_env.items():
140
+ args.extend(["-e", f"{key}={value}"])
141
+
142
+ args.append(container_name)
143
+ args.extend(command)
144
+
145
+ logger.debug("exec: %s", " ".join(args))
146
+
147
+ # Flush output before replacing process
148
+ sys.stdout.flush()
149
+ sys.stderr.flush()
150
+
151
+ os.execvp("docker", args)
152
+
153
+ # =========================================================================
154
+ # LLM stack (host-level singletons)
155
+ # =========================================================================
156
+
157
+ def ensure_ollama(self) -> str:
158
+ """Get or create the Ollama container with GPU auto-detection.
159
+
160
+ Returns the container name.
161
+ """
162
+ container = self._get_container(OLLAMA_CONTAINER)
163
+
164
+ if container is not None:
165
+ if container.status != "running":
166
+ logger.info("Starting existing Ollama container")
167
+ container.start()
168
+ return OLLAMA_CONTAINER
169
+
170
+ logger.info("Creating Ollama container")
171
+ self._pull_image_if_needed(OLLAMA_IMAGE)
172
+
173
+ # GPU auto-detection
174
+ gpu_available = detect_gpu()
175
+ device_requests = None
176
+ if gpu_available:
177
+ device_requests = [DeviceRequest(count=1, capabilities=[["gpu"]])]
178
+ logger.info("GPU detected - Ollama will use NVIDIA GPU")
179
+ else:
180
+ logger.warning("No GPU detected - Ollama will run on CPU (slower inference)")
181
+
182
+ kwargs: dict = {
183
+ "image": OLLAMA_IMAGE,
184
+ "name": OLLAMA_CONTAINER,
185
+ "ports": {"11434/tcp": ("0.0.0.0", self.config.ollama_port)}, # nosec B104
186
+ "mounts": [
187
+ Mount(
188
+ target="/root/.ollama",
189
+ source=OLLAMA_DATA_VOLUME,
190
+ type="volume",
191
+ )
192
+ ],
193
+ "restart_policy": {"Name": "unless-stopped"},
194
+ "detach": True,
195
+ }
196
+
197
+ if device_requests:
198
+ kwargs["device_requests"] = device_requests
199
+
200
+ self.client.containers.run(**kwargs)
201
+ logger.info("Ollama container created on port %d", self.config.ollama_port)
202
+ return OLLAMA_CONTAINER
203
+
204
+ def ensure_webui(self) -> str:
205
+ """Get or create the Open WebUI container.
206
+
207
+ Returns the container name.
208
+ """
209
+ container = self._get_container(WEBUI_CONTAINER)
210
+
211
+ if container is not None:
212
+ if container.status != "running":
213
+ logger.info("Starting existing WebUI container")
214
+ container.start()
215
+ return WEBUI_CONTAINER
216
+
217
+ logger.info("Creating Open WebUI container")
218
+ self._pull_image_if_needed(WEBUI_IMAGE)
219
+
220
+ self.client.containers.run(
221
+ image=WEBUI_IMAGE,
222
+ name=WEBUI_CONTAINER,
223
+ ports={"8080/tcp": ("0.0.0.0", self.config.webui_port)}, # nosec B104
224
+ environment={
225
+ "OLLAMA_BASE_URL": f"http://{OLLAMA_CONTAINER}:11434",
226
+ "WEBUI_AUTH": "false",
227
+ },
228
+ mounts=[
229
+ Mount(
230
+ target="/app/backend/data",
231
+ source=WEBUI_DATA_VOLUME,
232
+ type="volume",
233
+ )
234
+ ],
235
+ restart_policy={"Name": "unless-stopped"},
236
+ detach=True,
237
+ # Link to ollama via Docker network
238
+ network_mode=f"container:{OLLAMA_CONTAINER}",
239
+ )
240
+
241
+ logger.info("Open WebUI container created on port %d", self.config.webui_port)
242
+ return WEBUI_CONTAINER
243
+
244
+ def exec_in_ollama(self, command: list[str]) -> str:
245
+ """Run a command in the Ollama container and return stdout.
246
+
247
+ Used for: ollama pull, ollama list, ollama create.
248
+ """
249
+ container = self._get_container(OLLAMA_CONTAINER)
250
+ if container is None or container.status != "running":
251
+ raise ContainerNotFoundError(OLLAMA_CONTAINER)
252
+
253
+ exit_code, output = container.exec_run(
254
+ cmd=command,
255
+ stdout=True,
256
+ stderr=True,
257
+ )
258
+ decoded: str = output.decode("utf-8", errors="replace")
259
+ if exit_code != 0:
260
+ logger.error("Command failed in ollama: %s\n%s", " ".join(command), decoded)
261
+ return decoded
262
+
263
+ # =========================================================================
264
+ # Container lifecycle
265
+ # =========================================================================
266
+
267
+ def stop_container(self, name: str) -> None:
268
+ """Stop a container by name."""
269
+ container = self._get_container(name)
270
+ if container is None:
271
+ raise ContainerNotFoundError(name)
272
+ if container.status == "running":
273
+ container.stop()
274
+ logger.info("Stopped container: %s", name)
275
+
276
+ def remove_container(self, name: str, force: bool = False) -> None:
277
+ """Remove a container by name."""
278
+ container = self._get_container(name)
279
+ if container is None:
280
+ raise ContainerNotFoundError(name)
281
+ container.remove(force=force)
282
+ logger.info("Removed container: %s", name)
283
+
284
+ def container_status(self, name: str) -> str | None:
285
+ """Get the status of a container, or None if it doesn't exist."""
286
+ container = self._get_container(name)
287
+ if container is None:
288
+ return None
289
+ return container.status # type: ignore[no-any-return]
290
+
291
+ def container_logs(self, name: str, follow: bool = False, tail: int = 100) -> None:
292
+ """Print container logs. If follow=True, streams via docker CLI."""
293
+ if follow:
294
+ # Use docker CLI for streaming
295
+ args = ["docker", "logs", "-f", name]
296
+ os.execvp("docker", args)
297
+ else:
298
+ container = self._get_container(name)
299
+ if container is None:
300
+ raise ContainerNotFoundError(name)
301
+ logs = container.logs(tail=tail).decode("utf-8", errors="replace")
302
+ print(logs)
303
+
304
+ # =========================================================================
305
+ # Internal helpers
306
+ # =========================================================================
307
+
308
+ def _get_container(self, name: str) -> Container | None:
309
+ """Get a container by name, or None if it doesn't exist."""
310
+ try:
311
+ return self.client.containers.get(name)
312
+ except NotFound:
313
+ return None
314
+
315
+ def _pull_image_if_needed(self, image: str) -> None:
316
+ """Pull a Docker image if not available locally."""
317
+ try:
318
+ self.client.images.get(image)
319
+ logger.debug("Image already available: %s", image)
320
+ except ImageNotFound:
321
+ logger.info("Pulling image: %s (this may take a while)...", image)
322
+ try:
323
+ self.client.images.pull(*image.rsplit(":", 1))
324
+ logger.info("Image pulled: %s", image)
325
+ except APIError as e:
326
+ raise ImagePullError(image, str(e)) from e
ai_shell/defaults.py ADDED
@@ -0,0 +1,157 @@
1
+ """Constants and configuration builders for augint-shell.
2
+
3
+ Encodes all docker-compose.yml configuration as Python, so no compose file is needed.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import logging
9
+ import os
10
+ import re
11
+ from pathlib import Path
12
+ from typing import TYPE_CHECKING
13
+
14
+ if TYPE_CHECKING:
15
+ from docker.types import Mount
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+ # =============================================================================
20
+ # Image defaults
21
+ # =============================================================================
22
+ DEFAULT_IMAGE = "svange/augint-shell"
23
+ CONTAINER_PREFIX = "augint-shell"
24
+ SHM_SIZE = "2g"
25
+
26
+ # =============================================================================
27
+ # Volume names (prefixed to avoid collisions)
28
+ # =============================================================================
29
+ UV_CACHE_VOLUME = "augint-shell-uv-cache"
30
+ OLLAMA_DATA_VOLUME = "augint-shell-ollama-data"
31
+ WEBUI_DATA_VOLUME = "augint-shell-webui-data"
32
+
33
+ # =============================================================================
34
+ # LLM defaults
35
+ # =============================================================================
36
+ OLLAMA_IMAGE = "ollama/ollama"
37
+ WEBUI_IMAGE = "ghcr.io/open-webui/open-webui:main"
38
+ DEFAULT_PRIMARY_MODEL = "qwen3.5:27b"
39
+ DEFAULT_FALLBACK_MODEL = "qwen3-coder-next"
40
+ DEFAULT_CONTEXT_SIZE = 32768
41
+ DEFAULT_OLLAMA_PORT = 11434
42
+ DEFAULT_WEBUI_PORT = 3000
43
+
44
+ # =============================================================================
45
+ # Container names
46
+ # =============================================================================
47
+ OLLAMA_CONTAINER = "augint-shell-ollama"
48
+ WEBUI_CONTAINER = "augint-shell-webui"
49
+
50
+
51
+ def sanitize_project_name(path: Path) -> str:
52
+ """Derive a safe container name suffix from a directory path.
53
+
54
+ Converts the directory basename to lowercase, replaces non-alphanumeric
55
+ characters with hyphens, and strips leading/trailing hyphens.
56
+ """
57
+ name = path.resolve().name.lower()
58
+ name = re.sub(r"[^a-z0-9-]", "-", name)
59
+ name = re.sub(r"-+", "-", name)
60
+ return name.strip("-") or "project"
61
+
62
+
63
+ def dev_container_name(project_name: str) -> str:
64
+ """Build the dev container name for a project."""
65
+ return f"{CONTAINER_PREFIX}-{project_name}-dev"
66
+
67
+
68
+ def build_dev_mounts(project_dir: Path, project_name: str) -> list[Mount]:
69
+ """Build the full mount list matching docker-compose.yml dev service.
70
+
71
+ Required mounts are always included. Optional mounts are skipped
72
+ if the source path doesn't exist on the host.
73
+ """
74
+ from docker.types import Mount
75
+
76
+ mounts: list[Mount] = []
77
+ home = Path.home()
78
+
79
+ # Required: project directory (rw, delegated)
80
+ mounts.append(
81
+ Mount(
82
+ target=f"/root/projects/{project_name}",
83
+ source=str(project_dir.resolve()),
84
+ type="bind",
85
+ read_only=False,
86
+ consistency="delegated",
87
+ )
88
+ )
89
+
90
+ # Optional bind mounts — skip if source doesn't exist
91
+ optional_binds: list[tuple[Path, str, bool]] = [
92
+ (home / ".codex", "/root/.codex", False),
93
+ (home / ".claude", "/root/.claude", False),
94
+ (home / ".claude.json", "/root/.claude.json", False),
95
+ (home / "projects" / "CLAUDE.md", "/root/projects/CLAUDE.md", True),
96
+ (home / ".ssh", "/root/.ssh", True),
97
+ (home / ".gitconfig", "/root/.gitconfig.windows", True),
98
+ (home / ".aws", "/root/.aws", False),
99
+ ]
100
+
101
+ for source, target, read_only in optional_binds:
102
+ if source.exists():
103
+ mounts.append(
104
+ Mount(
105
+ target=target,
106
+ source=str(source),
107
+ type="bind",
108
+ read_only=read_only,
109
+ )
110
+ )
111
+ else:
112
+ logger.debug("Skipping optional mount (not found): %s", source)
113
+
114
+ # Optional: Docker socket
115
+ docker_sock = Path("/var/run/docker.sock")
116
+ if docker_sock.exists():
117
+ mounts.append(
118
+ Mount(
119
+ target="/var/run/docker.sock",
120
+ source=str(docker_sock),
121
+ type="bind",
122
+ read_only=True,
123
+ )
124
+ )
125
+
126
+ # Named volume: uv cache (shared across all projects)
127
+ mounts.append(
128
+ Mount(
129
+ target="/root/.cache/uv",
130
+ source=UV_CACHE_VOLUME,
131
+ type="volume",
132
+ )
133
+ )
134
+
135
+ return mounts
136
+
137
+
138
+ def build_dev_environment(extra_env: dict[str, str] | None = None) -> dict[str, str]:
139
+ """Build environment variables matching docker-compose.yml dev service.
140
+
141
+ Passes through AWS credentials, GitHub token, and sandbox flag from the host.
142
+ """
143
+ env: dict[str, str] = {
144
+ # AWS region (auth via SSO/OIDC, not static credentials)
145
+ "AWS_REGION": os.environ.get("AWS_REGION", "us-east-1"),
146
+ "AWS_PAGER": "",
147
+ # GitHub token
148
+ "GH_TOKEN": os.environ.get("GH_TOKEN", ""),
149
+ "GITHUB_TOKEN": os.environ.get("GH_TOKEN", ""),
150
+ # Sandbox mode for claude --dangerously-skip-permissions
151
+ "IS_SANDBOX": "1",
152
+ }
153
+
154
+ if extra_env:
155
+ env.update(extra_env)
156
+
157
+ return env
ai_shell/exceptions.py ADDED
@@ -0,0 +1,30 @@
1
+ """Custom exceptions for ai-shell."""
2
+
3
+
4
+ class AiShellError(Exception):
5
+ """Base exception for ai-shell."""
6
+
7
+
8
+ class DockerNotAvailableError(AiShellError):
9
+ """Docker daemon is not running or not installed."""
10
+
11
+
12
+ class ImagePullError(AiShellError):
13
+ """Failed to pull Docker image."""
14
+
15
+ def __init__(self, image: str, reason: str) -> None:
16
+ self.image = image
17
+ self.reason = reason
18
+ super().__init__(f"Failed to pull {image}: {reason}")
19
+
20
+
21
+ class ContainerNotFoundError(AiShellError):
22
+ """Container does not exist."""
23
+
24
+ def __init__(self, name: str) -> None:
25
+ self.name = name
26
+ super().__init__(f"Container '{name}' not found")
27
+
28
+
29
+ class ConfigError(AiShellError):
30
+ """Invalid configuration."""
ai_shell/gpu.py ADDED
@@ -0,0 +1,75 @@
1
+ """NVIDIA GPU detection for Docker containers."""
2
+
3
+ import logging
4
+ import shutil
5
+ import subprocess
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ def detect_gpu() -> bool:
11
+ """Check if NVIDIA GPU and Docker GPU runtime are available.
12
+
13
+ Returns True if both nvidia-smi succeeds and Docker has GPU support.
14
+ Falls back to False with a warning if either check fails.
15
+ """
16
+ if not _check_nvidia_smi():
17
+ return False
18
+ if not _check_docker_gpu_runtime():
19
+ return False
20
+ return True
21
+
22
+
23
+ def _check_nvidia_smi() -> bool:
24
+ """Check if nvidia-smi is available and reports a GPU."""
25
+ if not shutil.which("nvidia-smi"):
26
+ logger.debug("nvidia-smi not found in PATH")
27
+ return False
28
+ try:
29
+ result = subprocess.run(
30
+ ["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"],
31
+ capture_output=True,
32
+ text=True,
33
+ timeout=10,
34
+ )
35
+ if result.returncode == 0 and result.stdout.strip():
36
+ logger.debug("GPU detected: %s", result.stdout.strip().split("\n")[0])
37
+ return True
38
+ logger.debug("nvidia-smi returned no GPUs")
39
+ return False
40
+ except (subprocess.TimeoutExpired, FileNotFoundError, OSError) as e:
41
+ logger.debug("nvidia-smi check failed: %s", e)
42
+ return False
43
+
44
+
45
+ def _check_docker_gpu_runtime() -> bool:
46
+ """Check if Docker supports GPU via nvidia runtime."""
47
+ docker_path = shutil.which("docker")
48
+ if not docker_path:
49
+ logger.debug("docker not found in PATH")
50
+ return False
51
+ try:
52
+ result = subprocess.run(
53
+ ["docker", "info", "--format", "{{.Runtimes}}"],
54
+ capture_output=True,
55
+ text=True,
56
+ timeout=10,
57
+ )
58
+ if result.returncode == 0 and "nvidia" in result.stdout.lower():
59
+ logger.debug("Docker nvidia runtime available")
60
+ return True
61
+ # Also check for default GPU support (newer Docker versions)
62
+ result2 = subprocess.run(
63
+ ["docker", "info", "--format", "{{json .}}"],
64
+ capture_output=True,
65
+ text=True,
66
+ timeout=10,
67
+ )
68
+ if result2.returncode == 0 and "nvidia" in result2.stdout.lower():
69
+ logger.debug("Docker nvidia support detected via docker info")
70
+ return True
71
+ logger.debug("Docker nvidia runtime not found")
72
+ return False
73
+ except (subprocess.TimeoutExpired, FileNotFoundError, OSError) as e:
74
+ logger.debug("Docker GPU runtime check failed: %s", e)
75
+ return False
@@ -0,0 +1,124 @@
1
+ Metadata-Version: 2.3
2
+ Name: augint-shell
3
+ Version: 0.1.0
4
+ Summary: Launch AI coding tools and local LLMs in Docker containers
5
+ Author: svange
6
+ Requires-Dist: docker>=7.0.0
7
+ Requires-Dist: click>=8.1.0
8
+ Requires-Dist: rich>=14.0
9
+ Requires-Dist: pytest>=7.4.0 ; extra == 'dev'
10
+ Requires-Dist: pytest-cov>=4.1.0 ; extra == 'dev'
11
+ Requires-Dist: ruff>=0.8.0 ; extra == 'dev'
12
+ Requires-Dist: mypy>=1.8.0 ; extra == 'dev'
13
+ Requires-Dist: python-semantic-release>=10.3.1 ; extra == 'dev'
14
+ Requires-Dist: pre-commit>=4.0.0 ; extra == 'dev'
15
+ Requires-Dist: bandit>=1.7.0 ; extra == 'dev'
16
+ Requires-Dist: pip-audit>=2.7.0 ; extra == 'dev'
17
+ Requires-Dist: pip-licenses>=5.0.0 ; extra == 'dev'
18
+ Requires-Dist: pytest-html>=4.0.0 ; extra == 'dev'
19
+ Requires-Python: >=3.12
20
+ Provides-Extra: dev
21
+ Description-Content-Type: text/markdown
22
+
23
+ # augint-shell
24
+
25
+ Launch AI coding tools and local LLMs in Docker containers.
26
+
27
+ ## Installation
28
+
29
+ ```bash
30
+ pip install augint-shell
31
+ ```
32
+
33
+ Or as a dev dependency:
34
+
35
+ ```bash
36
+ uv add --dev augint-shell
37
+ ```
38
+
39
+ ## Quick Start
40
+
41
+ ```bash
42
+ # Launch Claude Code in the current project
43
+ ai-shell claude
44
+
45
+ # Launch with extra args
46
+ ai-shell claude -- --debug
47
+
48
+ # Set up local LLM stack (first time)
49
+ ai-shell llm setup
50
+
51
+ # Launch opencode with local LLM
52
+ ai-shell opencode
53
+ ```
54
+
55
+ ## Commands
56
+
57
+ ### AI Tools
58
+
59
+ | Command | Description |
60
+ |---|---|
61
+ | `ai-shell claude` | Launch Claude Code |
62
+ | `ai-shell claude-x` | Claude Code with skip-permissions |
63
+ | `ai-shell codex` | Launch Codex |
64
+ | `ai-shell opencode` | Launch opencode |
65
+ | `ai-shell aider` | Launch aider with local LLM |
66
+ | `ai-shell shell` | Bash shell in dev container |
67
+
68
+ ### LLM Stack
69
+
70
+ | Command | Description |
71
+ |---|---|
72
+ | `ai-shell llm up` | Start Ollama + Open WebUI |
73
+ | `ai-shell llm down` | Stop LLM stack |
74
+ | `ai-shell llm pull` | Pull configured models |
75
+ | `ai-shell llm setup` | First-time setup (up + pull + configure) |
76
+ | `ai-shell llm status` | Show status and available models |
77
+ | `ai-shell llm logs` | Tail LLM stack logs |
78
+ | `ai-shell llm shell` | Shell into Ollama container |
79
+
80
+ ### Container Management
81
+
82
+ | Command | Description |
83
+ |---|---|
84
+ | `ai-shell manage status` | Show dev container status |
85
+ | `ai-shell manage stop` | Stop dev container |
86
+ | `ai-shell manage clean` | Remove container and volumes |
87
+ | `ai-shell manage logs` | Tail dev container logs |
88
+ | `ai-shell manage pull` | Pull latest Docker image |
89
+
90
+ ## Configuration
91
+
92
+ Optional `ai-shell.toml` in your project root:
93
+
94
+ ```toml
95
+ [container]
96
+ image = "svange/augint-shell"
97
+ image_tag = "latest"
98
+ extra_env = { MY_VAR = "value" }
99
+
100
+ [llm]
101
+ primary_model = "qwen3.5:27b"
102
+ fallback_model = "qwen3-coder-next"
103
+ context_size = 32768
104
+ ollama_port = 11434
105
+ webui_port = 3000
106
+
107
+ [aider]
108
+ model = "ollama_chat/qwen3.5:27b"
109
+ ```
110
+
111
+ Global config at `~/.config/ai-shell/config.toml` is also supported.
112
+
113
+ ## How It Works
114
+
115
+ - Pulls a pre-built Docker image from Docker Hub (`svange/augint-shell`)
116
+ - Creates per-project containers named `augint-shell-{project}-dev`
117
+ - Mounts your project directory, SSH keys, AWS credentials, and tool configs
118
+ - Runs AI tools interactively inside the container
119
+ - Supports concurrent instances across multiple projects
120
+
121
+ ## Requirements
122
+
123
+ - Docker
124
+ - Python >= 3.12
@@ -0,0 +1,16 @@
1
+ ai_shell/__init__.py,sha256=7zyXMlfHkgdJMDt1C9RpHA516t35QyjEnkxiz7Jw3mk,149
2
+ ai_shell/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ ai_shell/cli/__main__.py,sha256=SnBm-z-FYBOs27r2SBpap0U9e0_osqkq5qeA_h8ZopU,1108
4
+ ai_shell/cli/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ ai_shell/cli/commands/llm.py,sha256=IzWBeoZKCWFdrlXSNM_NWorPqo0MkXZwFhEn3mdTlsc,7144
6
+ ai_shell/cli/commands/manage.py,sha256=fVeurwfcg5lEgxwqVy1n8gGO2LZnjtjUZ-zzsm5uS7w,3065
7
+ ai_shell/cli/commands/tools.py,sha256=ijcWqEIJVZVCgktN4yI0ez2IfetTUuI8DRpxsok9xqI,3216
8
+ ai_shell/config.py,sha256=inJh2CBQ9F2RGQuYrTqfGgzMBZEVYw-9pMn2C85ZoG0,4771
9
+ ai_shell/container.py,sha256=f9kBf3rQ_Pqx8ohdBnMUksb4JGe9Aer-GAxSj_cpqUM,11408
10
+ ai_shell/defaults.py,sha256=lBeWHQvPOUMBc147woEniqLMZXB5w3CPFqpWyei2D-I,5106
11
+ ai_shell/exceptions.py,sha256=ipAfi_riLpW057prl1xjIskX4ouI5b2bUKhGdfocANs,744
12
+ ai_shell/gpu.py,sha256=vETkk4jFxz0q4yBhAJy-uNjJW_BPhIv6ZtRpsC_SFhA,2554
13
+ augint_shell-0.1.0.dist-info/WHEEL,sha256=bEhYrD-rjlF0iRRHiAnfJ0mEjMsRwm29hhDD7yRgWCY,80
14
+ augint_shell-0.1.0.dist-info/entry_points.txt,sha256=Sz7xOQqBtBHyd7yHEqeOjzZBjhmzh_CIgGqKZwFfdmo,57
15
+ augint_shell-0.1.0.dist-info/METADATA,sha256=4YmpXO9ni9wKbprtSnWs0dk8Ol5x6bBFDdSZSCX-vn8,3161
16
+ augint_shell-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: uv 0.11.3
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ ai-shell = ai_shell.cli.__main__:main
3
+