augint-shell 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,124 @@
1
+ Metadata-Version: 2.3
2
+ Name: augint-shell
3
+ Version: 0.1.0
4
+ Summary: Launch AI coding tools and local LLMs in Docker containers
5
+ Author: svange
6
+ Requires-Dist: docker>=7.0.0
7
+ Requires-Dist: click>=8.1.0
8
+ Requires-Dist: rich>=14.0
9
+ Requires-Dist: pytest>=7.4.0 ; extra == 'dev'
10
+ Requires-Dist: pytest-cov>=4.1.0 ; extra == 'dev'
11
+ Requires-Dist: ruff>=0.8.0 ; extra == 'dev'
12
+ Requires-Dist: mypy>=1.8.0 ; extra == 'dev'
13
+ Requires-Dist: python-semantic-release>=10.3.1 ; extra == 'dev'
14
+ Requires-Dist: pre-commit>=4.0.0 ; extra == 'dev'
15
+ Requires-Dist: bandit>=1.7.0 ; extra == 'dev'
16
+ Requires-Dist: pip-audit>=2.7.0 ; extra == 'dev'
17
+ Requires-Dist: pip-licenses>=5.0.0 ; extra == 'dev'
18
+ Requires-Dist: pytest-html>=4.0.0 ; extra == 'dev'
19
+ Requires-Python: >=3.12
20
+ Provides-Extra: dev
21
+ Description-Content-Type: text/markdown
22
+
23
+ # augint-shell
24
+
25
+ Launch AI coding tools and local LLMs in Docker containers.
26
+
27
+ ## Installation
28
+
29
+ ```bash
30
+ pip install augint-shell
31
+ ```
32
+
33
+ Or as a dev dependency:
34
+
35
+ ```bash
36
+ uv add --dev augint-shell
37
+ ```
38
+
39
+ ## Quick Start
40
+
41
+ ```bash
42
+ # Launch Claude Code in the current project
43
+ ai-shell claude
44
+
45
+ # Launch with extra args
46
+ ai-shell claude -- --debug
47
+
48
+ # Set up local LLM stack (first time)
49
+ ai-shell llm setup
50
+
51
+ # Launch opencode with local LLM
52
+ ai-shell opencode
53
+ ```
54
+
55
+ ## Commands
56
+
57
+ ### AI Tools
58
+
59
+ | Command | Description |
60
+ |---|---|
61
+ | `ai-shell claude` | Launch Claude Code |
62
+ | `ai-shell claude-x` | Claude Code with skip-permissions |
63
+ | `ai-shell codex` | Launch Codex |
64
+ | `ai-shell opencode` | Launch opencode |
65
+ | `ai-shell aider` | Launch aider with local LLM |
66
+ | `ai-shell shell` | Bash shell in dev container |
67
+
68
+ ### LLM Stack
69
+
70
+ | Command | Description |
71
+ |---|---|
72
+ | `ai-shell llm up` | Start Ollama + Open WebUI |
73
+ | `ai-shell llm down` | Stop LLM stack |
74
+ | `ai-shell llm pull` | Pull configured models |
75
+ | `ai-shell llm setup` | First-time setup (up + pull + configure) |
76
+ | `ai-shell llm status` | Show status and available models |
77
+ | `ai-shell llm logs` | Tail LLM stack logs |
78
+ | `ai-shell llm shell` | Shell into Ollama container |
79
+
80
+ ### Container Management
81
+
82
+ | Command | Description |
83
+ |---|---|
84
+ | `ai-shell manage status` | Show dev container status |
85
+ | `ai-shell manage stop` | Stop dev container |
86
+ | `ai-shell manage clean` | Remove container and volumes |
87
+ | `ai-shell manage logs` | Tail dev container logs |
88
+ | `ai-shell manage pull` | Pull latest Docker image |
89
+
90
+ ## Configuration
91
+
92
+ Optional `ai-shell.toml` in your project root:
93
+
94
+ ```toml
95
+ [container]
96
+ image = "svange/augint-shell"
97
+ image_tag = "latest"
98
+ extra_env = { MY_VAR = "value" }
99
+
100
+ [llm]
101
+ primary_model = "qwen3.5:27b"
102
+ fallback_model = "qwen3-coder-next"
103
+ context_size = 32768
104
+ ollama_port = 11434
105
+ webui_port = 3000
106
+
107
+ [aider]
108
+ model = "ollama_chat/qwen3.5:27b"
109
+ ```
110
+
111
+ Global config at `~/.config/ai-shell/config.toml` is also supported.
112
+
113
+ ## How It Works
114
+
115
+ - Pulls a pre-built Docker image from Docker Hub (`svange/augint-shell`)
116
+ - Creates per-project containers named `augint-shell-{project}-dev`
117
+ - Mounts your project directory, SSH keys, AWS credentials, and tool configs
118
+ - Runs AI tools interactively inside the container
119
+ - Supports concurrent instances across multiple projects
120
+
121
+ ## Requirements
122
+
123
+ - Docker
124
+ - Python >= 3.12
@@ -0,0 +1,102 @@
1
+ # augint-shell
2
+
3
+ Launch AI coding tools and local LLMs in Docker containers.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ pip install augint-shell
9
+ ```
10
+
11
+ Or as a dev dependency:
12
+
13
+ ```bash
14
+ uv add --dev augint-shell
15
+ ```
16
+
17
+ ## Quick Start
18
+
19
+ ```bash
20
+ # Launch Claude Code in the current project
21
+ ai-shell claude
22
+
23
+ # Launch with extra args
24
+ ai-shell claude -- --debug
25
+
26
+ # Set up local LLM stack (first time)
27
+ ai-shell llm setup
28
+
29
+ # Launch opencode with local LLM
30
+ ai-shell opencode
31
+ ```
32
+
33
+ ## Commands
34
+
35
+ ### AI Tools
36
+
37
+ | Command | Description |
38
+ |---|---|
39
+ | `ai-shell claude` | Launch Claude Code |
40
+ | `ai-shell claude-x` | Claude Code with skip-permissions |
41
+ | `ai-shell codex` | Launch Codex |
42
+ | `ai-shell opencode` | Launch opencode |
43
+ | `ai-shell aider` | Launch aider with local LLM |
44
+ | `ai-shell shell` | Bash shell in dev container |
45
+
46
+ ### LLM Stack
47
+
48
+ | Command | Description |
49
+ |---|---|
50
+ | `ai-shell llm up` | Start Ollama + Open WebUI |
51
+ | `ai-shell llm down` | Stop LLM stack |
52
+ | `ai-shell llm pull` | Pull configured models |
53
+ | `ai-shell llm setup` | First-time setup (up + pull + configure) |
54
+ | `ai-shell llm status` | Show status and available models |
55
+ | `ai-shell llm logs` | Tail LLM stack logs |
56
+ | `ai-shell llm shell` | Shell into Ollama container |
57
+
58
+ ### Container Management
59
+
60
+ | Command | Description |
61
+ |---|---|
62
+ | `ai-shell manage status` | Show dev container status |
63
+ | `ai-shell manage stop` | Stop dev container |
64
+ | `ai-shell manage clean` | Remove container and volumes |
65
+ | `ai-shell manage logs` | Tail dev container logs |
66
+ | `ai-shell manage pull` | Pull latest Docker image |
67
+
68
+ ## Configuration
69
+
70
+ Optional `ai-shell.toml` in your project root:
71
+
72
+ ```toml
73
+ [container]
74
+ image = "svange/augint-shell"
75
+ image_tag = "latest"
76
+ extra_env = { MY_VAR = "value" }
77
+
78
+ [llm]
79
+ primary_model = "qwen3.5:27b"
80
+ fallback_model = "qwen3-coder-next"
81
+ context_size = 32768
82
+ ollama_port = 11434
83
+ webui_port = 3000
84
+
85
+ [aider]
86
+ model = "ollama_chat/qwen3.5:27b"
87
+ ```
88
+
89
+ Global config at `~/.config/ai-shell/config.toml` is also supported.
90
+
91
+ ## How It Works
92
+
93
+ - Pulls a pre-built Docker image from Docker Hub (`svange/augint-shell`)
94
+ - Creates per-project containers named `augint-shell-{project}-dev`
95
+ - Mounts your project directory, SSH keys, AWS credentials, and tool configs
96
+ - Runs AI tools interactively inside the container
97
+ - Supports concurrent instances across multiple projects
98
+
99
+ ## Requirements
100
+
101
+ - Docker
102
+ - Python >= 3.12
@@ -0,0 +1,122 @@
1
+ [project]
2
+ name = "augint-shell"
3
+ version = "0.1.0"
4
+ description = "Launch AI coding tools and local LLMs in Docker containers"
5
+ authors = [{name = "svange"}]
6
+ readme = "README.md"
7
+ requires-python = ">=3.12"
8
+ dependencies = [
9
+ "docker>=7.0.0",
10
+ "click>=8.1.0",
11
+ "rich>=14.0",
12
+ ]
13
+
14
+ [project.optional-dependencies]
15
+ dev = [
16
+ "pytest>=7.4.0",
17
+ "pytest-cov>=4.1.0",
18
+ "ruff>=0.8.0",
19
+ "mypy>=1.8.0",
20
+ "python-semantic-release>=10.3.1",
21
+ "pre-commit>=4.0.0",
22
+ "bandit>=1.7.0",
23
+ "pip-audit>=2.7.0",
24
+ "pip-licenses>=5.0.0",
25
+ "pytest-html>=4.0.0",
26
+ ]
27
+
28
+ [project.scripts]
29
+ ai-shell = "ai_shell.cli.__main__:main"
30
+
31
+ [build-system]
32
+ requires = ["uv_build>=0.9,<0.11"]
33
+ build-backend = "uv_build"
34
+
35
+ [tool.uv.build-backend]
36
+ module-name = "ai_shell"
37
+
38
+ [tool.ruff]
39
+ line-length = 100
40
+ target-version = "py312"
41
+
42
+ [tool.ruff.lint]
43
+ select = ["E", "F", "I", "W", "B", "C4", "UP", "DTZ"]
44
+ ignore = ["E501"]
45
+
46
+ [tool.ruff.lint.isort]
47
+ known-first-party = ["ai_shell"]
48
+
49
+ [tool.mypy]
50
+ python_version = "3.12"
51
+ warn_return_any = true
52
+ warn_unused_configs = true
53
+ disallow_untyped_defs = true
54
+
55
+ [[tool.mypy.overrides]]
56
+ module = "ai_shell.cli.commands.*"
57
+ disallow_untyped_defs = false
58
+
59
+ [[tool.mypy.overrides]]
60
+ module = "ai_shell.cli.__main__"
61
+ disallow_untyped_defs = false
62
+
63
+ [[tool.mypy.overrides]]
64
+ module = "docker.*"
65
+ ignore_missing_imports = true
66
+
67
+ [tool.pytest.ini_options]
68
+ testpaths = ["tests"]
69
+ python_files = ["test_*.py"]
70
+ addopts = "-ra -q --strict-markers"
71
+
72
+ [tool.coverage.run]
73
+ source = ["src"]
74
+ omit = ["*/tests/*", "*/test_*.py", "*/cli/*"]
75
+
76
+ [tool.semantic_release]
77
+ assets = ["uv.lock"]
78
+ commit_message = "chore(release): augint-shell {version}\n\nAutomatically generated by python-semantic-release [skip ci]"
79
+ commit_parser = "angular"
80
+ logging_use_named_masks = false
81
+ major_on_zero = false
82
+ allow_zero_version = true
83
+ no_git_verify = false
84
+ tag_format = "augint-shell-v{version}"
85
+ version_toml = ["pyproject.toml:project.version"]
86
+ version_variables = ["src/ai_shell/__init__.py:__version__"]
87
+ build_command = "uv lock && uv build"
88
+
89
+ [tool.semantic_release.branches.main]
90
+ match = "main"
91
+ prerelease = false
92
+ prerelease_token = "rc"
93
+
94
+ [tool.semantic_release.changelog]
95
+ mode = "update"
96
+ changelog_file = "CHANGELOG.md"
97
+ exclude_commit_patterns = [
98
+ '''chore(?:\([^)]*?\))?: .+''',
99
+ '''ci(?:\([^)]*?\))?: .+''',
100
+ '''refactor(?:\([^)]*?\))?: .+''',
101
+ '''style(?:\([^)]*?\))?: .+''',
102
+ '''test(?:\([^)]*?\))?: .+''',
103
+ '''build\((?!deps\): .+)''',
104
+ ]
105
+
106
+ [tool.semantic_release.remote]
107
+ name = "origin"
108
+ type = "github"
109
+ ignore_token_for_push = false
110
+ insecure = false
111
+
112
+ [tool.semantic_release.remote.token]
113
+ env = "GH_TOKEN"
114
+
115
+ [tool.semantic_release.publish]
116
+ dist_glob_patterns = ["dist/*"]
117
+ upload_to_vcs_release = false
118
+
119
+ [dependency-groups]
120
+ dev = [
121
+ "augint-github>=1.3.1",
122
+ ]
@@ -0,0 +1,7 @@
1
+ """augint-shell (ai-shell) - Launch AI coding tools and local LLMs in Docker containers."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ __all__ = [
6
+ "__version__",
7
+ ]
File without changes
@@ -0,0 +1,46 @@
1
+ """ai-shell CLI entry point."""
2
+
3
+ import sys
4
+
5
+ import click
6
+
7
+ from ai_shell import __version__
8
+ from ai_shell.cli.commands.llm import llm_group
9
+ from ai_shell.cli.commands.manage import manage_group
10
+ from ai_shell.cli.commands.tools import aider, claude, claude_x, codex, opencode, shell
11
+
12
+
13
+ @click.group()
14
+ @click.version_option(version=__version__, prog_name="ai-shell")
15
+ @click.option("--project", default=None, help="Override project name for container naming.")
16
+ @click.pass_context
17
+ def cli(ctx, project):
18
+ """AI Shell - Launch AI coding tools and local LLMs in Docker containers."""
19
+ ctx.ensure_object(dict)
20
+ ctx.obj["project"] = project
21
+
22
+
23
+ # Tool subcommands
24
+ cli.add_command(claude)
25
+ cli.add_command(claude_x, "claude-x")
26
+ cli.add_command(codex)
27
+ cli.add_command(opencode)
28
+ cli.add_command(aider)
29
+ cli.add_command(shell)
30
+
31
+ # Command groups
32
+ cli.add_command(llm_group, "llm")
33
+ cli.add_command(manage_group, "manage")
34
+
35
+
36
+ def main():
37
+ """Main entry point."""
38
+ try:
39
+ cli()
40
+ except Exception as e:
41
+ click.echo(f"Error: {e}", err=True)
42
+ sys.exit(1)
43
+
44
+
45
+ if __name__ == "__main__":
46
+ main()
@@ -0,0 +1,204 @@
1
+ """LLM stack management commands: up, down, pull, setup, status, logs, shell."""
2
+
3
+ import time
4
+ from pathlib import Path
5
+
6
+ import click
7
+ from rich.console import Console
8
+
9
+ from ai_shell.config import load_config
10
+ from ai_shell.container import ContainerManager
11
+ from ai_shell.defaults import OLLAMA_CONTAINER, WEBUI_CONTAINER
12
+
13
+ console = Console(stderr=True)
14
+
15
+
16
+ def _get_manager(ctx) -> ContainerManager:
17
+ """Create ContainerManager from Click context."""
18
+ project = ctx.obj.get("project") if ctx.obj else None
19
+ config = load_config(project_override=project, project_dir=Path.cwd())
20
+ return ContainerManager(config)
21
+
22
+
23
+ @click.group("llm")
24
+ @click.pass_context
25
+ def llm_group(ctx):
26
+ """Manage the local LLM stack (Ollama + Open WebUI)."""
27
+
28
+
29
+ @llm_group.command("up")
30
+ @click.pass_context
31
+ def llm_up(ctx):
32
+ """Start the LLM stack (Ollama + Open WebUI)."""
33
+ manager = _get_manager(ctx)
34
+ console.print("[bold]Starting LLM stack...[/bold]")
35
+
36
+ manager.ensure_ollama()
37
+ console.print(f" Ollama API: http://localhost:{manager.config.ollama_port}")
38
+
39
+ manager.ensure_webui()
40
+ console.print(f" Open WebUI: http://localhost:{manager.config.webui_port}")
41
+
42
+ console.print("\n[bold green]LLM stack is running.[/bold green]")
43
+ console.print("If this is your first time, run: [bold]ai-shell llm setup[/bold]")
44
+
45
+
46
+ @llm_group.command("down")
47
+ @click.pass_context
48
+ def llm_down(ctx):
49
+ """Stop the LLM stack."""
50
+ manager = _get_manager(ctx)
51
+ console.print("[bold]Stopping LLM stack...[/bold]")
52
+
53
+ for name in [WEBUI_CONTAINER, OLLAMA_CONTAINER]:
54
+ status = manager.container_status(name)
55
+ if status == "running":
56
+ manager.stop_container(name)
57
+ console.print(f" Stopped: {name}")
58
+ elif status is not None:
59
+ console.print(f" Already stopped: {name}")
60
+ else:
61
+ console.print(f" Not found: {name}")
62
+
63
+ console.print("[bold green]LLM stack stopped.[/bold green]")
64
+
65
+
66
+ @llm_group.command("pull")
67
+ @click.pass_context
68
+ def llm_pull(ctx):
69
+ """Pull LLM models into Ollama."""
70
+ manager = _get_manager(ctx)
71
+ config = manager.config
72
+
73
+ console.print(f"[bold]Pulling primary model: {config.primary_model}...[/bold]")
74
+ output = manager.exec_in_ollama(["ollama", "pull", config.primary_model])
75
+ console.print(output)
76
+
77
+ console.print(f"\n[bold]Pulling fallback model: {config.fallback_model}...[/bold]")
78
+ output = manager.exec_in_ollama(["ollama", "pull", config.fallback_model])
79
+ console.print(output)
80
+
81
+ console.print("\n[bold]Available models:[/bold]")
82
+ output = manager.exec_in_ollama(["ollama", "list"])
83
+ console.print(output)
84
+
85
+
86
+ @llm_group.command("setup")
87
+ @click.pass_context
88
+ def llm_setup(ctx):
89
+ """First-time setup: start stack, pull models, configure context window."""
90
+ manager = _get_manager(ctx)
91
+ config = manager.config
92
+
93
+ # Start the stack
94
+ console.print("[bold]Starting LLM stack...[/bold]")
95
+ manager.ensure_ollama()
96
+ manager.ensure_webui()
97
+
98
+ # Wait for Ollama to be ready
99
+ console.print("[bold]Waiting for Ollama to be ready...[/bold]")
100
+ for i in range(10):
101
+ try:
102
+ output = manager.exec_in_ollama(["ollama", "list"])
103
+ if output is not None:
104
+ break
105
+ except Exception:
106
+ pass
107
+ console.print(f" Waiting... ({i + 1}/10)")
108
+ time.sleep(2)
109
+ else:
110
+ console.print("[bold red]Ollama failed to start after 20s[/bold red]")
111
+ raise click.Abort()
112
+
113
+ # Pull models
114
+ console.print(f"\n[bold]Pulling primary model: {config.primary_model}...[/bold]")
115
+ output = manager.exec_in_ollama(["ollama", "pull", config.primary_model])
116
+ console.print(output)
117
+
118
+ console.print(f"\n[bold]Pulling fallback model: {config.fallback_model}...[/bold]")
119
+ output = manager.exec_in_ollama(["ollama", "pull", config.fallback_model])
120
+ console.print(output)
121
+
122
+ # Configure context window
123
+ console.print(f"\n[bold]Configuring context window ({config.context_size} tokens)...[/bold]")
124
+ for model in [config.primary_model, config.fallback_model]:
125
+ modelfile = f"FROM {model}\nPARAMETER num_ctx {config.context_size}\n"
126
+ # Write modelfile and create model
127
+ manager.exec_in_ollama(
128
+ [
129
+ "sh",
130
+ "-c",
131
+ f'printf "{modelfile}" > /tmp/Modelfile && '
132
+ f"ollama create {model} -f /tmp/Modelfile && rm -f /tmp/Modelfile",
133
+ ]
134
+ )
135
+
136
+ console.print("\n[bold green]============================================[/bold green]")
137
+ console.print("[bold green] Setup complete![/bold green]")
138
+ console.print(f"\n Open WebUI: http://localhost:{config.webui_port}")
139
+ console.print(f" Ollama API: http://localhost:{config.ollama_port}")
140
+ console.print(f"\n Primary model: {config.primary_model}")
141
+ console.print(f" Fallback model: {config.fallback_model}")
142
+ console.print(f" Context window: {config.context_size} tokens")
143
+ console.print("[bold green]============================================[/bold green]")
144
+
145
+
146
+ @llm_group.command("status")
147
+ @click.pass_context
148
+ def llm_status(ctx):
149
+ """Show status of LLM stack and loaded models."""
150
+ manager = _get_manager(ctx)
151
+
152
+ console.print("[bold]Container status:[/bold]")
153
+ for name in [OLLAMA_CONTAINER, WEBUI_CONTAINER]:
154
+ status = manager.container_status(name)
155
+ if status == "running":
156
+ console.print(f" {name}: [green]{status}[/green]")
157
+ elif status is not None:
158
+ console.print(f" {name}: [yellow]{status}[/yellow]")
159
+ else:
160
+ console.print(f" {name}: [red]not found[/red]")
161
+
162
+ # Show models if ollama is running
163
+ if manager.container_status(OLLAMA_CONTAINER) == "running":
164
+ console.print("\n[bold]Available models:[/bold]")
165
+ output = manager.exec_in_ollama(["ollama", "list"])
166
+ console.print(output)
167
+
168
+
169
+ @llm_group.command("logs")
170
+ @click.option("--follow", "-f", is_flag=True, help="Follow log output.")
171
+ @click.pass_context
172
+ def llm_logs(ctx, follow):
173
+ """Tail logs from the LLM stack."""
174
+ manager = _get_manager(ctx)
175
+ if follow:
176
+ # Use docker CLI for multi-container following
177
+ import os
178
+ import sys
179
+
180
+ sys.stdout.flush()
181
+ sys.stderr.flush()
182
+ os.execvp(
183
+ "docker",
184
+ ["docker", "logs", "-f", OLLAMA_CONTAINER],
185
+ )
186
+ else:
187
+ for name in [OLLAMA_CONTAINER, WEBUI_CONTAINER]:
188
+ status = manager.container_status(name)
189
+ if status is not None:
190
+ console.print(f"\n[bold]--- {name} ---[/bold]")
191
+ manager.container_logs(name, follow=False, tail=50)
192
+
193
+
194
+ @llm_group.command("shell")
195
+ @click.pass_context
196
+ def llm_shell(ctx):
197
+ """Open a bash shell in the Ollama container."""
198
+ manager = _get_manager(ctx)
199
+ status = manager.container_status(OLLAMA_CONTAINER)
200
+ if status != "running":
201
+ console.print("[red]Ollama is not running. Run: ai-shell llm up[/red]")
202
+ raise click.Abort()
203
+ console.print("[bold]Opening shell in Ollama container...[/bold]")
204
+ manager.exec_interactive(OLLAMA_CONTAINER, ["/bin/bash"])