natshell 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- natshell/__init__.py +0 -0
- natshell/__main__.py +320 -0
- natshell/agent/__init__.py +0 -0
- natshell/agent/context.py +320 -0
- natshell/agent/context_manager.py +205 -0
- natshell/agent/loop.py +428 -0
- natshell/agent/plan.py +118 -0
- natshell/agent/system_prompt.py +78 -0
- natshell/app.py +1138 -0
- natshell/config.default.toml +98 -0
- natshell/config.py +342 -0
- natshell/gpu.py +206 -0
- natshell/inference/__init__.py +0 -0
- natshell/inference/engine.py +52 -0
- natshell/inference/local.py +257 -0
- natshell/inference/ollama.py +139 -0
- natshell/inference/remote.py +125 -0
- natshell/platform.py +39 -0
- natshell/safety/__init__.py +0 -0
- natshell/safety/classifier.py +134 -0
- natshell/tools/__init__.py +0 -0
- natshell/tools/edit_file.py +90 -0
- natshell/tools/execute_shell.py +211 -0
- natshell/tools/list_directory.py +88 -0
- natshell/tools/natshell_help.py +220 -0
- natshell/tools/read_file.py +92 -0
- natshell/tools/registry.py +171 -0
- natshell/tools/run_code.py +208 -0
- natshell/tools/search_files.py +91 -0
- natshell/tools/write_file.py +55 -0
- natshell/ui/__init__.py +0 -0
- natshell/ui/clipboard.py +220 -0
- natshell/ui/commands.py +46 -0
- natshell/ui/styles.tcss +298 -0
- natshell/ui/widgets.py +619 -0
- natshell-0.1.0.dist-info/METADATA +248 -0
- natshell-0.1.0.dist-info/RECORD +41 -0
- natshell-0.1.0.dist-info/WHEEL +5 -0
- natshell-0.1.0.dist-info/entry_points.txt +2 -0
- natshell-0.1.0.dist-info/licenses/LICENSE +21 -0
- natshell-0.1.0.dist-info/top_level.txt +1 -0
natshell/__init__.py
ADDED
|
File without changes
|
natshell/__main__.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
"""NatShell entry point — CLI argument parsing, model setup, and app launch."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
|
|
12
|
+
from natshell.config import load_config
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def main() -> None:
|
|
16
|
+
parser = argparse.ArgumentParser(
|
|
17
|
+
prog="natshell",
|
|
18
|
+
description="NatShell — Natural language shell interface for Linux",
|
|
19
|
+
)
|
|
20
|
+
parser.add_argument(
|
|
21
|
+
"--config", "-c",
|
|
22
|
+
help="Path to config.toml file",
|
|
23
|
+
)
|
|
24
|
+
parser.add_argument(
|
|
25
|
+
"--model", "-m",
|
|
26
|
+
help="Path to a GGUF model file (overrides config)",
|
|
27
|
+
)
|
|
28
|
+
parser.add_argument(
|
|
29
|
+
"--remote",
|
|
30
|
+
help="URL of an OpenAI-compatible API to use instead of local model "
|
|
31
|
+
"(e.g., http://localhost:11434/v1)",
|
|
32
|
+
)
|
|
33
|
+
parser.add_argument(
|
|
34
|
+
"--remote-model",
|
|
35
|
+
help="Model name for the remote API (e.g., qwen3:4b)",
|
|
36
|
+
)
|
|
37
|
+
parser.add_argument(
|
|
38
|
+
"--local",
|
|
39
|
+
action="store_true",
|
|
40
|
+
help="Force local model (ignore remote/Ollama configuration)",
|
|
41
|
+
)
|
|
42
|
+
parser.add_argument(
|
|
43
|
+
"--download",
|
|
44
|
+
action="store_true",
|
|
45
|
+
help="Download the default model and exit",
|
|
46
|
+
)
|
|
47
|
+
parser.add_argument(
|
|
48
|
+
"--update",
|
|
49
|
+
action="store_true",
|
|
50
|
+
help="Pull latest code and reinstall (git installs only)",
|
|
51
|
+
)
|
|
52
|
+
parser.add_argument(
|
|
53
|
+
"--verbose", "-v",
|
|
54
|
+
action="store_true",
|
|
55
|
+
help="Enable verbose logging",
|
|
56
|
+
)
|
|
57
|
+
parser.add_argument(
|
|
58
|
+
"--danger-fast",
|
|
59
|
+
action="store_true",
|
|
60
|
+
help="Skip all confirmation dialogs. BLOCKED commands are still blocked. "
|
|
61
|
+
"Only use this on VMs or test environments.",
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
args = parser.parse_args()
|
|
65
|
+
|
|
66
|
+
# Setup logging
|
|
67
|
+
level = logging.DEBUG if args.verbose else logging.WARNING
|
|
68
|
+
logging.basicConfig(level=level, format="%(name)s: %(message)s")
|
|
69
|
+
|
|
70
|
+
# Handle self-update
|
|
71
|
+
if args.update:
|
|
72
|
+
_self_update()
|
|
73
|
+
return
|
|
74
|
+
|
|
75
|
+
# Load config
|
|
76
|
+
config = load_config(args.config)
|
|
77
|
+
|
|
78
|
+
# Conflict check
|
|
79
|
+
if args.local and args.remote:
|
|
80
|
+
print("Error: --local and --remote cannot be used together.")
|
|
81
|
+
sys.exit(1)
|
|
82
|
+
|
|
83
|
+
# Override config with CLI args
|
|
84
|
+
if args.model:
|
|
85
|
+
config.model.path = args.model
|
|
86
|
+
if args.remote:
|
|
87
|
+
config.remote.url = args.remote
|
|
88
|
+
if args.remote_model:
|
|
89
|
+
config.remote.model = args.remote_model
|
|
90
|
+
|
|
91
|
+
# Handle model download
|
|
92
|
+
if args.download or config.model.path == "auto":
|
|
93
|
+
model_path = _ensure_model(config)
|
|
94
|
+
if args.download:
|
|
95
|
+
print(f"Model ready at: {model_path}")
|
|
96
|
+
return
|
|
97
|
+
config.model.path = model_path
|
|
98
|
+
|
|
99
|
+
# Determine remote URL and model (CLI --remote > [remote] > [ollama])
|
|
100
|
+
remote_url = config.remote.url
|
|
101
|
+
remote_model = config.remote.model
|
|
102
|
+
remote_api_key = config.remote.api_key
|
|
103
|
+
use_remote = bool(remote_url)
|
|
104
|
+
fallback_config = None
|
|
105
|
+
|
|
106
|
+
# Ensure remote URL has a scheme
|
|
107
|
+
if remote_url and not remote_url.startswith(("http://", "https://")):
|
|
108
|
+
remote_url = f"http://{remote_url}"
|
|
109
|
+
|
|
110
|
+
if not remote_url and config.ollama.url:
|
|
111
|
+
from natshell.inference.ollama import normalize_base_url
|
|
112
|
+
base = normalize_base_url(config.ollama.url)
|
|
113
|
+
remote_url = f"{base}/v1"
|
|
114
|
+
remote_model = remote_model or config.ollama.default_model or "qwen3:4b"
|
|
115
|
+
use_remote = True
|
|
116
|
+
|
|
117
|
+
if not remote_model:
|
|
118
|
+
remote_model = "qwen3:4b"
|
|
119
|
+
|
|
120
|
+
# Apply --local override before preference logic
|
|
121
|
+
if args.local:
|
|
122
|
+
use_remote = False
|
|
123
|
+
|
|
124
|
+
# Apply persisted engine preference (CLI flags override)
|
|
125
|
+
cli_forced_remote = bool(args.remote)
|
|
126
|
+
cli_forced_local = bool(args.model) or args.local
|
|
127
|
+
if not cli_forced_remote and not cli_forced_local:
|
|
128
|
+
if config.engine.preferred == "local":
|
|
129
|
+
use_remote = False
|
|
130
|
+
elif config.engine.preferred == "remote":
|
|
131
|
+
pass # keep use_remote as-is (try remote if URL exists)
|
|
132
|
+
|
|
133
|
+
# Build the inference engine
|
|
134
|
+
if use_remote:
|
|
135
|
+
from natshell.inference.ollama import ping_server
|
|
136
|
+
|
|
137
|
+
print(f"Checking remote server: {remote_url}...")
|
|
138
|
+
reachable = asyncio.run(ping_server(remote_url))
|
|
139
|
+
|
|
140
|
+
if reachable:
|
|
141
|
+
from natshell.inference.ollama import get_model_context_length
|
|
142
|
+
from natshell.inference.remote import RemoteEngine
|
|
143
|
+
n_ctx = asyncio.run(get_model_context_length(remote_url, remote_model))
|
|
144
|
+
engine = RemoteEngine(
|
|
145
|
+
base_url=remote_url,
|
|
146
|
+
model=remote_model,
|
|
147
|
+
api_key=remote_api_key,
|
|
148
|
+
n_ctx=n_ctx,
|
|
149
|
+
)
|
|
150
|
+
fallback_config = config.model
|
|
151
|
+
print(f"Using remote model: {remote_model} at {remote_url}")
|
|
152
|
+
else:
|
|
153
|
+
print(f"Remote server unreachable at {remote_url}. Falling back to local model.")
|
|
154
|
+
use_remote = False
|
|
155
|
+
|
|
156
|
+
if not use_remote:
|
|
157
|
+
from natshell.inference.local import LocalEngine
|
|
158
|
+
print(f"Loading model: {config.model.path}...")
|
|
159
|
+
engine = LocalEngine(
|
|
160
|
+
model_path=config.model.path,
|
|
161
|
+
n_ctx=config.model.n_ctx,
|
|
162
|
+
n_threads=config.model.n_threads,
|
|
163
|
+
n_gpu_layers=config.model.n_gpu_layers,
|
|
164
|
+
main_gpu=config.model.main_gpu,
|
|
165
|
+
)
|
|
166
|
+
try:
|
|
167
|
+
from llama_cpp import llama_supports_gpu_offload
|
|
168
|
+
if config.model.n_gpu_layers != 0 and not llama_supports_gpu_offload():
|
|
169
|
+
from natshell.gpu import detect_gpus
|
|
170
|
+
from natshell.platform import is_macos
|
|
171
|
+
|
|
172
|
+
gpus = detect_gpus()
|
|
173
|
+
if is_macos():
|
|
174
|
+
gpu_flag = "-DGGML_METAL=on"
|
|
175
|
+
else:
|
|
176
|
+
gpu_flag = "-DGGML_VULKAN=on"
|
|
177
|
+
|
|
178
|
+
print("WARNING: GPU offloading requested but llama-cpp-python was built without GPU support.")
|
|
179
|
+
if gpus:
|
|
180
|
+
gpu = gpus[0]
|
|
181
|
+
vram = f" ({gpu.vram_mb} MB VRAM)" if gpu.vram_mb else ""
|
|
182
|
+
print(f" Detected GPU: {gpu.name}{vram}")
|
|
183
|
+
|
|
184
|
+
print(f' Reinstall with: CMAKE_ARGS="{gpu_flag}" pip install llama-cpp-python --no-binary llama-cpp-python --force-reinstall')
|
|
185
|
+
|
|
186
|
+
if not is_macos():
|
|
187
|
+
_print_vulkan_dep_hint()
|
|
188
|
+
except ImportError:
|
|
189
|
+
pass
|
|
190
|
+
print("Model loaded.")
|
|
191
|
+
|
|
192
|
+
# Build the tool registry
|
|
193
|
+
from natshell.tools.registry import create_default_registry
|
|
194
|
+
tools = create_default_registry()
|
|
195
|
+
|
|
196
|
+
# Build the safety classifier
|
|
197
|
+
from natshell.safety.classifier import SafetyClassifier
|
|
198
|
+
safety = SafetyClassifier(config.safety)
|
|
199
|
+
|
|
200
|
+
# Inject safety config into the natshell_help tool
|
|
201
|
+
from natshell.tools.natshell_help import set_safety_config
|
|
202
|
+
set_safety_config(config.safety)
|
|
203
|
+
|
|
204
|
+
# Build the agent
|
|
205
|
+
from natshell.agent.loop import AgentLoop
|
|
206
|
+
agent = AgentLoop(
|
|
207
|
+
engine=engine,
|
|
208
|
+
tools=tools,
|
|
209
|
+
safety=safety,
|
|
210
|
+
config=config.agent,
|
|
211
|
+
fallback_config=fallback_config,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Gather system context and initialize agent
|
|
215
|
+
from natshell.agent.context import gather_system_context
|
|
216
|
+
print("Gathering system information...")
|
|
217
|
+
context = asyncio.run(gather_system_context())
|
|
218
|
+
agent.initialize(context)
|
|
219
|
+
|
|
220
|
+
# Launch the TUI
|
|
221
|
+
from natshell.app import NatShellApp
|
|
222
|
+
if args.danger_fast:
|
|
223
|
+
print("WARNING: --danger-fast is active. All confirmations will be skipped.")
|
|
224
|
+
app = NatShellApp(agent=agent, config=config, skip_permissions=args.danger_fast)
|
|
225
|
+
app.run()
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _print_vulkan_dep_hint() -> None:
|
|
229
|
+
"""Print distro-specific instructions for installing Vulkan build deps."""
|
|
230
|
+
import shutil
|
|
231
|
+
from pathlib import Path
|
|
232
|
+
|
|
233
|
+
if shutil.which("rpm-ostree") and Path("/run/ostree-booted").exists():
|
|
234
|
+
print(" Install Vulkan build deps: sudo rpm-ostree install vulkan-devel glslc")
|
|
235
|
+
print(" Then reboot and re-run install.sh")
|
|
236
|
+
elif shutil.which("dnf"):
|
|
237
|
+
print(" Install Vulkan build deps: sudo dnf install vulkan-devel glslc")
|
|
238
|
+
elif shutil.which("apt-get"):
|
|
239
|
+
print(" Install Vulkan build deps: sudo apt install libvulkan-dev glslang-tools")
|
|
240
|
+
elif shutil.which("pacman"):
|
|
241
|
+
print(" Install Vulkan build deps: sudo pacman -S vulkan-headers glslang")
|
|
242
|
+
else:
|
|
243
|
+
print(" Ensure Vulkan development headers and a GLSL shader compiler are installed.")
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def _self_update() -> None:
|
|
247
|
+
"""Pull latest code and reinstall from the git checkout."""
|
|
248
|
+
# Navigate from src/natshell/__main__.py -> project root
|
|
249
|
+
root = Path(__file__).resolve().parent.parent.parent
|
|
250
|
+
if not (root / ".git").exists():
|
|
251
|
+
print("Not a git install. Re-run install.sh to update.")
|
|
252
|
+
sys.exit(1)
|
|
253
|
+
|
|
254
|
+
print(f"Updating NatShell from {root}...")
|
|
255
|
+
result = subprocess.run(
|
|
256
|
+
["git", "-C", str(root), "pull", "--ff-only"],
|
|
257
|
+
capture_output=True,
|
|
258
|
+
text=True,
|
|
259
|
+
)
|
|
260
|
+
if result.returncode != 0:
|
|
261
|
+
print(f"git pull failed:\n{result.stderr.strip()}")
|
|
262
|
+
sys.exit(1)
|
|
263
|
+
print(result.stdout.strip())
|
|
264
|
+
|
|
265
|
+
# Reinstall using the same Python that's running us
|
|
266
|
+
pip = str(Path(sys.executable).parent / "pip")
|
|
267
|
+
result = subprocess.run(
|
|
268
|
+
[pip, "install", "-e", str(root), "-q"],
|
|
269
|
+
capture_output=True,
|
|
270
|
+
text=True,
|
|
271
|
+
)
|
|
272
|
+
if result.returncode != 0:
|
|
273
|
+
print(f"pip install failed:\n{result.stderr.strip()}")
|
|
274
|
+
sys.exit(1)
|
|
275
|
+
|
|
276
|
+
print("Update complete.")
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def _ensure_model(config) -> str:
|
|
280
|
+
"""Ensure the default model is downloaded. Returns the model path."""
|
|
281
|
+
model_dir = Path.home() / ".local" / "share" / "natshell" / "models"
|
|
282
|
+
model_dir.mkdir(parents=True, exist_ok=True)
|
|
283
|
+
|
|
284
|
+
target = model_dir / config.model.hf_file
|
|
285
|
+
if target.exists():
|
|
286
|
+
return str(target)
|
|
287
|
+
|
|
288
|
+
# Prompt user
|
|
289
|
+
print(f"\nNo local model found.")
|
|
290
|
+
print(f"Download {config.model.hf_file} from {config.model.hf_repo}?")
|
|
291
|
+
print(f"This is approximately 2.5 GB.\n")
|
|
292
|
+
|
|
293
|
+
response = input("Download now? [Y/n]: ").strip().lower()
|
|
294
|
+
if response and response != "y":
|
|
295
|
+
print("No model available. Use --model or --remote to specify one.")
|
|
296
|
+
sys.exit(1)
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
from huggingface_hub import hf_hub_download
|
|
300
|
+
|
|
301
|
+
print(f"Downloading from HuggingFace...")
|
|
302
|
+
path = hf_hub_download(
|
|
303
|
+
repo_id=config.model.hf_repo,
|
|
304
|
+
filename=config.model.hf_file,
|
|
305
|
+
local_dir=str(model_dir),
|
|
306
|
+
)
|
|
307
|
+
print(f"Model saved to: {path}")
|
|
308
|
+
return path
|
|
309
|
+
|
|
310
|
+
except ImportError:
|
|
311
|
+
print("huggingface-hub is required for model download.")
|
|
312
|
+
print("Install it: pip install huggingface-hub")
|
|
313
|
+
sys.exit(1)
|
|
314
|
+
except Exception as e:
|
|
315
|
+
print(f"Download failed: {e}")
|
|
316
|
+
sys.exit(1)
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
if __name__ == "__main__":
|
|
320
|
+
main()
|
|
File without changes
|
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
"""Gather system context for injection into the LLM system prompt."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import os
|
|
7
|
+
import subprocess
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
|
|
10
|
+
from natshell.platform import is_macos
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class DiskInfo:
|
|
15
|
+
mount: str
|
|
16
|
+
total: str
|
|
17
|
+
used: str
|
|
18
|
+
available: str
|
|
19
|
+
use_percent: str
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class NetInfo:
|
|
24
|
+
name: str
|
|
25
|
+
ip: str
|
|
26
|
+
subnet: str
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class SystemContext:
|
|
31
|
+
hostname: str = ""
|
|
32
|
+
distro: str = ""
|
|
33
|
+
kernel: str = ""
|
|
34
|
+
arch: str = ""
|
|
35
|
+
cpu: str = ""
|
|
36
|
+
ram_total_gb: float = 0.0
|
|
37
|
+
ram_available_gb: float = 0.0
|
|
38
|
+
username: str = ""
|
|
39
|
+
is_root: bool = False
|
|
40
|
+
has_sudo: bool = False
|
|
41
|
+
shell: str = ""
|
|
42
|
+
package_manager: str = ""
|
|
43
|
+
cwd: str = ""
|
|
44
|
+
disks: list[DiskInfo] = field(default_factory=list)
|
|
45
|
+
network: list[NetInfo] = field(default_factory=list)
|
|
46
|
+
default_gateway: str = ""
|
|
47
|
+
installed_tools: dict[str, bool] = field(default_factory=dict)
|
|
48
|
+
running_services: list[str] = field(default_factory=list)
|
|
49
|
+
containers: list[str] = field(default_factory=list)
|
|
50
|
+
|
|
51
|
+
def to_prompt_text(self) -> str:
|
|
52
|
+
"""Format system context as a compact text block for the system prompt."""
|
|
53
|
+
lines = []
|
|
54
|
+
|
|
55
|
+
# Host info
|
|
56
|
+
lines.append(
|
|
57
|
+
f"Host: {self.hostname} | {self.distro} | {self.kernel} | {self.arch}"
|
|
58
|
+
)
|
|
59
|
+
lines.append(
|
|
60
|
+
f"CPU: {self.cpu} | RAM: {self.ram_total_gb:.1f}GB total, "
|
|
61
|
+
f"{self.ram_available_gb:.1f}GB available"
|
|
62
|
+
)
|
|
63
|
+
sudo_str = "yes" if self.has_sudo else "no"
|
|
64
|
+
lines.append(
|
|
65
|
+
f"User: {self.username} (sudo: {sudo_str}) | Shell: {self.shell} | "
|
|
66
|
+
f"Pkg: {self.package_manager}"
|
|
67
|
+
)
|
|
68
|
+
lines.append(f"CWD: {self.cwd}")
|
|
69
|
+
|
|
70
|
+
# Disks
|
|
71
|
+
if self.disks:
|
|
72
|
+
disk_parts = [f"{d.mount} {d.total} ({d.use_percent} used)" for d in self.disks]
|
|
73
|
+
lines.append(f"Disks: {', '.join(disk_parts)}")
|
|
74
|
+
|
|
75
|
+
# Network
|
|
76
|
+
if self.network:
|
|
77
|
+
net_parts = [f"{n.name} {n.ip}/{n.subnet}" for n in self.network]
|
|
78
|
+
lines.append(f"Network: {' | '.join(net_parts)}")
|
|
79
|
+
if self.default_gateway:
|
|
80
|
+
lines.append(f"Gateway: {self.default_gateway}")
|
|
81
|
+
|
|
82
|
+
# Tools
|
|
83
|
+
if self.installed_tools:
|
|
84
|
+
present = [k + "✓" for k, v in self.installed_tools.items() if v]
|
|
85
|
+
missing = [k + "✗" for k, v in self.installed_tools.items() if not v]
|
|
86
|
+
lines.append(f"Tools: {' '.join(present + missing)}")
|
|
87
|
+
|
|
88
|
+
# Containers
|
|
89
|
+
if self.containers:
|
|
90
|
+
lines.append(f"Containers: {', '.join(self.containers[:10])}")
|
|
91
|
+
|
|
92
|
+
# Services
|
|
93
|
+
if self.running_services:
|
|
94
|
+
lines.append(f"Services: {', '.join(self.running_services[:15])}")
|
|
95
|
+
|
|
96
|
+
return "\n".join(lines)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
async def _run(cmd: str) -> str:
|
|
100
|
+
"""Run a shell command and return stripped stdout, or empty string on failure."""
|
|
101
|
+
try:
|
|
102
|
+
result = await asyncio.to_thread(
|
|
103
|
+
subprocess.run,
|
|
104
|
+
["bash", "-c", cmd],
|
|
105
|
+
capture_output=True, text=True, timeout=5,
|
|
106
|
+
)
|
|
107
|
+
return result.stdout.strip()
|
|
108
|
+
except Exception:
|
|
109
|
+
return ""
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _parse_linux_df(df_output: str) -> list[DiskInfo]:
|
|
113
|
+
"""Parse Linux ``df -h --output=...`` output into DiskInfo entries."""
|
|
114
|
+
disks = []
|
|
115
|
+
for line in df_output.splitlines():
|
|
116
|
+
parts = line.split()
|
|
117
|
+
if len(parts) >= 5:
|
|
118
|
+
disks.append(DiskInfo(
|
|
119
|
+
mount=parts[0], total=parts[1], used=parts[2],
|
|
120
|
+
available=parts[3], use_percent=parts[4],
|
|
121
|
+
))
|
|
122
|
+
return disks
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def _parse_macos_df(df_output: str) -> list[DiskInfo]:
|
|
126
|
+
"""Parse macOS ``df -h`` output (no ``--output`` flag) into DiskInfo entries."""
|
|
127
|
+
disks = []
|
|
128
|
+
for line in df_output.splitlines():
|
|
129
|
+
parts = line.split()
|
|
130
|
+
# macOS df -h: Filesystem Size Used Avail Capacity ... Mounted on
|
|
131
|
+
if len(parts) >= 9 and parts[4].endswith("%"):
|
|
132
|
+
mount = " ".join(parts[8:]) # Mounted on may contain spaces
|
|
133
|
+
disks.append(DiskInfo(
|
|
134
|
+
mount=mount, total=parts[1], used=parts[2],
|
|
135
|
+
available=parts[3], use_percent=parts[4],
|
|
136
|
+
))
|
|
137
|
+
return disks
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _parse_linux_ip(ip_output: str) -> list[NetInfo]:
|
|
141
|
+
"""Parse Linux ``ip -4 -o addr show`` output into NetInfo entries."""
|
|
142
|
+
interfaces = []
|
|
143
|
+
for line in ip_output.splitlines():
|
|
144
|
+
parts = line.split()
|
|
145
|
+
if len(parts) >= 2 and "/" in parts[1]:
|
|
146
|
+
ip, prefix = parts[1].split("/")
|
|
147
|
+
if not ip.startswith("127."):
|
|
148
|
+
interfaces.append(NetInfo(name=parts[0], ip=ip, subnet=prefix))
|
|
149
|
+
return interfaces
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def _parse_macos_ifconfig(ifconfig_output: str) -> list[NetInfo]:
|
|
153
|
+
"""Parse macOS ``ifconfig | grep 'inet '`` output into NetInfo entries."""
|
|
154
|
+
interfaces = []
|
|
155
|
+
for line in ifconfig_output.splitlines():
|
|
156
|
+
line = line.strip()
|
|
157
|
+
# Format: "inet 192.168.1.5 netmask 0xffffff00 broadcast ..."
|
|
158
|
+
parts = line.split()
|
|
159
|
+
if len(parts) >= 4 and parts[0] == "inet" and not parts[1].startswith("127."):
|
|
160
|
+
ip = parts[1]
|
|
161
|
+
netmask = parts[3] if parts[2] == "netmask" else ""
|
|
162
|
+
interfaces.append(NetInfo(name="", ip=ip, subnet=netmask))
|
|
163
|
+
return interfaces
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
async def _gather_linux(ctx: SystemContext) -> None:
|
|
167
|
+
"""Gather system context using Linux commands."""
|
|
168
|
+
(
|
|
169
|
+
ctx.hostname,
|
|
170
|
+
distro_line,
|
|
171
|
+
ctx.kernel,
|
|
172
|
+
ctx.arch,
|
|
173
|
+
cpu_line,
|
|
174
|
+
mem_line,
|
|
175
|
+
sudo_check,
|
|
176
|
+
gateway_line,
|
|
177
|
+
df_output,
|
|
178
|
+
ip_output,
|
|
179
|
+
services_output,
|
|
180
|
+
docker_output,
|
|
181
|
+
) = await asyncio.gather(
|
|
182
|
+
_run("hostname"),
|
|
183
|
+
_run("grep PRETTY_NAME /etc/os-release | cut -d'\"' -f2"),
|
|
184
|
+
_run("uname -r"),
|
|
185
|
+
_run("uname -m"),
|
|
186
|
+
_run("lscpu | grep 'Model name' | sed 's/Model name:\\s*//'"),
|
|
187
|
+
_run("free -b | grep Mem"),
|
|
188
|
+
_run("sudo -n true 2>/dev/null && echo yes || echo no"),
|
|
189
|
+
_run("ip -4 route show default | awk '{print $3}'"),
|
|
190
|
+
_run("df -h --output=target,size,used,avail,pcent -x tmpfs -x devtmpfs -x squashfs 2>/dev/null | tail -n +2"),
|
|
191
|
+
_run("ip -4 -o addr show | awk '{print $2, $4}'"),
|
|
192
|
+
_run("systemctl list-units --type=service --state=running --no-pager -q 2>/dev/null | awk '{print $1}' | sed 's/.service$//' | head -20"),
|
|
193
|
+
_run("docker ps --format '{{.Names}} ({{.Image}})' 2>/dev/null | head -10"),
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
ctx.distro = distro_line or "Unknown"
|
|
197
|
+
ctx.cpu = cpu_line or "Unknown"
|
|
198
|
+
ctx.has_sudo = sudo_check.strip() == "yes"
|
|
199
|
+
ctx.default_gateway = gateway_line
|
|
200
|
+
|
|
201
|
+
# Parse memory (free -b output: Mem: total used free shared buff/cache available)
|
|
202
|
+
if mem_line:
|
|
203
|
+
parts = mem_line.split()
|
|
204
|
+
if len(parts) >= 7:
|
|
205
|
+
try:
|
|
206
|
+
ctx.ram_total_gb = int(parts[1]) / (1024 ** 3)
|
|
207
|
+
ctx.ram_available_gb = int(parts[6]) / (1024 ** 3)
|
|
208
|
+
except (ValueError, IndexError):
|
|
209
|
+
pass
|
|
210
|
+
|
|
211
|
+
ctx.disks = _parse_linux_df(df_output)
|
|
212
|
+
ctx.network = _parse_linux_ip(ip_output)
|
|
213
|
+
|
|
214
|
+
if services_output:
|
|
215
|
+
ctx.running_services = [s.strip() for s in services_output.splitlines() if s.strip()]
|
|
216
|
+
if docker_output:
|
|
217
|
+
ctx.containers = [c.strip() for c in docker_output.splitlines() if c.strip()]
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
async def _gather_macos(ctx: SystemContext) -> None:
|
|
221
|
+
"""Gather system context using macOS commands."""
|
|
222
|
+
(
|
|
223
|
+
ctx.hostname,
|
|
224
|
+
distro_line,
|
|
225
|
+
ctx.kernel,
|
|
226
|
+
ctx.arch,
|
|
227
|
+
cpu_line,
|
|
228
|
+
ram_total_raw,
|
|
229
|
+
ram_avail_raw,
|
|
230
|
+
sudo_check,
|
|
231
|
+
gateway_line,
|
|
232
|
+
df_output,
|
|
233
|
+
ifconfig_output,
|
|
234
|
+
services_output,
|
|
235
|
+
docker_output,
|
|
236
|
+
) = await asyncio.gather(
|
|
237
|
+
_run("hostname"),
|
|
238
|
+
_run("sw_vers -productName -productVersion"),
|
|
239
|
+
_run("uname -r"),
|
|
240
|
+
_run("uname -m"),
|
|
241
|
+
_run("sysctl -n machdep.cpu.brand_string"),
|
|
242
|
+
_run("sysctl -n hw.memsize"),
|
|
243
|
+
_run("vm_stat | awk '/Pages free|Pages inactive/ {sum += $NF} END {print sum * 4096}'"),
|
|
244
|
+
_run("sudo -n true 2>/dev/null && echo yes || echo no"),
|
|
245
|
+
_run("route -n get default 2>/dev/null | awk '/gateway/{print $2}'"),
|
|
246
|
+
_run("df -h | grep -vE '^(devfs|map |Filesystem)' | tail -n +1"),
|
|
247
|
+
_run("ifconfig | grep 'inet '"),
|
|
248
|
+
_run("launchctl list 2>/dev/null | awk 'NR>1 {print $3}' | head -20"),
|
|
249
|
+
_run("docker ps --format '{{.Names}} ({{.Image}})' 2>/dev/null | head -10"),
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
ctx.distro = distro_line.replace("\n", " ") if distro_line else "macOS"
|
|
253
|
+
ctx.cpu = cpu_line or "Unknown"
|
|
254
|
+
ctx.has_sudo = sudo_check.strip() == "yes"
|
|
255
|
+
ctx.default_gateway = gateway_line
|
|
256
|
+
|
|
257
|
+
# Parse memory
|
|
258
|
+
if ram_total_raw:
|
|
259
|
+
try:
|
|
260
|
+
ctx.ram_total_gb = int(ram_total_raw) / (1024 ** 3)
|
|
261
|
+
except ValueError:
|
|
262
|
+
pass
|
|
263
|
+
if ram_avail_raw:
|
|
264
|
+
try:
|
|
265
|
+
ctx.ram_available_gb = int(ram_avail_raw) / (1024 ** 3)
|
|
266
|
+
except ValueError:
|
|
267
|
+
pass
|
|
268
|
+
|
|
269
|
+
ctx.disks = _parse_macos_df(df_output)
|
|
270
|
+
ctx.network = _parse_macos_ifconfig(ifconfig_output)
|
|
271
|
+
|
|
272
|
+
if services_output:
|
|
273
|
+
ctx.running_services = [s.strip() for s in services_output.splitlines() if s.strip()]
|
|
274
|
+
if docker_output:
|
|
275
|
+
ctx.containers = [c.strip() for c in docker_output.splitlines() if c.strip()]
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
async def gather_system_context() -> SystemContext:
|
|
279
|
+
"""Gather system information. Non-blocking, tolerates failures."""
|
|
280
|
+
ctx = SystemContext()
|
|
281
|
+
|
|
282
|
+
if is_macos():
|
|
283
|
+
await _gather_macos(ctx)
|
|
284
|
+
else:
|
|
285
|
+
await _gather_linux(ctx)
|
|
286
|
+
|
|
287
|
+
# Common fields
|
|
288
|
+
ctx.username = os.environ.get("USER", "unknown")
|
|
289
|
+
ctx.is_root = os.geteuid() == 0
|
|
290
|
+
ctx.shell = os.environ.get("SHELL", "/bin/sh")
|
|
291
|
+
ctx.cwd = os.getcwd()
|
|
292
|
+
|
|
293
|
+
# Detect package manager
|
|
294
|
+
pm_list = ["brew", "apt", "dnf", "yum", "pacman", "zypper", "apk", "emerge"]
|
|
295
|
+
if not is_macos():
|
|
296
|
+
# On Linux, don't prioritize brew
|
|
297
|
+
pm_list = ["apt", "dnf", "yum", "pacman", "zypper", "apk", "emerge", "brew"]
|
|
298
|
+
for pm in pm_list:
|
|
299
|
+
check = await _run(f"which {pm} 2>/dev/null")
|
|
300
|
+
if check:
|
|
301
|
+
ctx.package_manager = pm
|
|
302
|
+
break
|
|
303
|
+
|
|
304
|
+
# Check common tools
|
|
305
|
+
tools_to_check = [
|
|
306
|
+
"docker", "git", "nmap", "curl", "wget", "ssh", "python3",
|
|
307
|
+
"node", "go", "rsync", "tmux", "vim", "htop", "jq",
|
|
308
|
+
# Languages & compilers
|
|
309
|
+
"rustc", "gcc", "g++", "clang", "javac", "ruby", "php", "perl",
|
|
310
|
+
# Package managers & build tools
|
|
311
|
+
"pip", "npm", "yarn", "cargo", "composer", "gem", "make", "cmake",
|
|
312
|
+
]
|
|
313
|
+
tool_checks = await asyncio.gather(
|
|
314
|
+
*[_run(f"which {tool} 2>/dev/null") for tool in tools_to_check]
|
|
315
|
+
)
|
|
316
|
+
ctx.installed_tools = {
|
|
317
|
+
tool: bool(result) for tool, result in zip(tools_to_check, tool_checks)
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
return ctx
|