test-ssd 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,71 @@
1
+ Metadata-Version: 2.4
2
+ Name: test-ssd
3
+ Version: 0.1.0
4
+ Summary: UGREEN NAS NVMe SSD screening CLI
5
+ Author: Codex
6
+ Keywords: nvme,ssd,fio,smartctl,nas,ugreen
7
+ Classifier: Development Status :: 3 - Alpha
8
+ Classifier: Environment :: Console
9
+ Classifier: Intended Audience :: System Administrators
10
+ Classifier: Operating System :: POSIX :: Linux
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Topic :: System :: Monitoring
15
+ Classifier: Topic :: System :: Systems Administration
16
+ Classifier: Topic :: Utilities
17
+ Requires-Python: >=3.11
18
+ Description-Content-Type: text/markdown
19
+
20
+ # SSD Test CLI
21
+
22
+ `test-ssd` is a Python 3 CLI for screening refurbished NVMe SSDs on a UGREEN NAS or another Linux host with `smartctl`, `nvme-cli`, `fio`, `lsblk`, and `dmesg` installed.
23
+
24
+ ## Install
25
+
26
+ From PyPI:
27
+
28
+ ```bash
29
+ pipx install test-ssd
30
+ ```
31
+
32
+ or:
33
+
34
+ ```bash
35
+ pip install test-ssd
36
+ ```
37
+
38
+ From this project directory:
39
+
40
+ ```bash
41
+ ./install.sh
42
+ ```
43
+
44
+ That installs the package into the current Python environment and exposes both `test-ssd` and `ssdtest` as shell commands.
45
+
46
+ If `~/.local/bin` is not already on your `PATH`, add it once:
47
+
48
+ ```bash
49
+ export PATH="$HOME/.local/bin:$PATH"
50
+ ```
51
+
52
+ ## Commands
53
+
54
+ ```bash
55
+ test-ssd scan
56
+ test-ssd run --label bb-refurb-01
57
+ test-ssd history
58
+ test-ssd show <run_id>
59
+ ```
60
+
61
+ Module entry point:
62
+
63
+ ```bash
64
+ python3 -m ssdtest scan
65
+ ```
66
+
67
+ ## Notes
68
+
69
+ - `run` requires root.
70
+ - The destructive test writes the full target device.
71
+ - Reports default to `/root/nvme-tests` and include raw command outputs, `report.json`, and `report.md`.
@@ -0,0 +1,52 @@
1
+ # SSD Test CLI
2
+
3
+ `test-ssd` is a Python 3 CLI for screening refurbished NVMe SSDs on a UGREEN NAS or another Linux host with `smartctl`, `nvme-cli`, `fio`, `lsblk`, and `dmesg` installed.
4
+
5
+ ## Install
6
+
7
+ From PyPI:
8
+
9
+ ```bash
10
+ pipx install test-ssd
11
+ ```
12
+
13
+ or:
14
+
15
+ ```bash
16
+ pip install test-ssd
17
+ ```
18
+
19
+ From this project directory:
20
+
21
+ ```bash
22
+ ./install.sh
23
+ ```
24
+
25
+ That installs the package into the current Python environment and exposes both `test-ssd` and `ssdtest` as shell commands.
26
+
27
+ If `~/.local/bin` is not already on your `PATH`, add it once:
28
+
29
+ ```bash
30
+ export PATH="$HOME/.local/bin:$PATH"
31
+ ```
32
+
33
+ ## Commands
34
+
35
+ ```bash
36
+ test-ssd scan
37
+ test-ssd run --label bb-refurb-01
38
+ test-ssd history
39
+ test-ssd show <run_id>
40
+ ```
41
+
42
+ Module entry point:
43
+
44
+ ```bash
45
+ python3 -m ssdtest scan
46
+ ```
47
+
48
+ ## Notes
49
+
50
+ - `run` requires root.
51
+ - The destructive test writes the full target device.
52
+ - Reports default to `/root/nvme-tests` and include raw command outputs, `report.json`, and `report.md`.
@@ -0,0 +1,40 @@
1
+ [build-system]
2
+ requires = ["setuptools>=69", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "test-ssd"
7
+ version = "0.1.0"
8
+ description = "UGREEN NAS NVMe SSD screening CLI"
9
+ readme = "README.md"
10
+ requires-python = ">=3.11"
11
+ authors = [
12
+ { name = "Codex" }
13
+ ]
14
+ keywords = ["nvme", "ssd", "fio", "smartctl", "nas", "ugreen"]
15
+ classifiers = [
16
+ "Development Status :: 3 - Alpha",
17
+ "Environment :: Console",
18
+ "Intended Audience :: System Administrators",
19
+ "Operating System :: POSIX :: Linux",
20
+ "Programming Language :: Python :: 3",
21
+ "Programming Language :: Python :: 3.11",
22
+ "Programming Language :: Python :: 3.12",
23
+ "Topic :: System :: Monitoring",
24
+ "Topic :: System :: Systems Administration",
25
+ "Topic :: Utilities",
26
+ ]
27
+
28
+ [project.scripts]
29
+ ssdtest = "ssdtest.cli:main"
30
+ test-ssd = "ssdtest.cli:main"
31
+
32
+ [tool.setuptools]
33
+ include-package-data = false
34
+
35
+ [tool.setuptools.packages.find]
36
+ include = ["ssdtest*"]
37
+
38
+ [tool.pytest.ini_options]
39
+ testpaths = ["tests"]
40
+ pythonpath = ["."]
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,3 @@
1
+ __all__ = ["__version__"]
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,5 @@
1
+ from ssdtest.cli import main
2
+
3
+
4
+ if __name__ == "__main__":
5
+ raise SystemExit(main())
@@ -0,0 +1,128 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import sys
5
+ from pathlib import Path
6
+
7
+ from ssdtest.reports import find_report, load_history
8
+ from ssdtest.system import inventory_devices
9
+ from ssdtest.workflow import run_interactive
10
+
11
+
12
+ DEFAULT_REPORT_ROOT = Path("/root/nvme-tests")
13
+
14
+
15
+ def build_parser() -> argparse.ArgumentParser:
16
+ prog = Path(sys.argv[0]).name or "test-ssd"
17
+ parser = argparse.ArgumentParser(prog=prog, description="UGREEN NAS NVMe SSD screening CLI")
18
+ subparsers = parser.add_subparsers(dest="command", required=True)
19
+
20
+ scan = subparsers.add_parser("scan", help="list detected NVMe devices")
21
+ scan.add_argument("--report-root", default=str(DEFAULT_REPORT_ROOT))
22
+
23
+ run = subparsers.add_parser("run", help="run the guided destructive SSD screening workflow")
24
+ run.add_argument("--label", help="human-readable label for the test run")
25
+ run.add_argument("--device", help="specific NVMe namespace path, for example /dev/nvme2n1")
26
+ run.add_argument("--report-root", default=str(DEFAULT_REPORT_ROOT))
27
+
28
+ history = subparsers.add_parser("history", help="list historical test reports")
29
+ history.add_argument("--report-root", default=str(DEFAULT_REPORT_ROOT))
30
+
31
+ show = subparsers.add_parser("show", help="show one historical report summary")
32
+ show.add_argument("run_id")
33
+ show.add_argument("--report-root", default=str(DEFAULT_REPORT_ROOT))
34
+ return parser
35
+
36
+
37
+ def main(argv: list[str] | None = None) -> int:
38
+ parser = build_parser()
39
+ args = parser.parse_args(argv)
40
+ report_root = Path(args.report_root)
41
+
42
+ if args.command == "scan":
43
+ devices, _ = inventory_devices()
44
+ _print_scan(devices)
45
+ return 0
46
+ if args.command == "run":
47
+ artifacts = run_interactive(report_root=report_root, label=args.label, requested_device=args.device)
48
+ print("")
49
+ print(f"Verdict: {artifacts.verdict.verdict}")
50
+ print(f"Report directory: {artifacts.log_dir}")
51
+ print(f"Markdown report: {artifacts.log_dir / 'report.md'}")
52
+ print(f"JSON report: {artifacts.log_dir / 'report.json'}")
53
+ if artifacts.verdict.reasons:
54
+ print("Reasons:")
55
+ for item in artifacts.verdict.reasons:
56
+ print(f" - {item}")
57
+ if artifacts.verdict.notes:
58
+ print("Notes:")
59
+ for item in artifacts.verdict.notes:
60
+ print(f" - {item}")
61
+ return 0 if artifacts.verdict.verdict == "PASS" else 1
62
+ if args.command == "history":
63
+ _print_history(load_history(report_root))
64
+ return 0
65
+ if args.command == "show":
66
+ try:
67
+ report = find_report(report_root, args.run_id)
68
+ except FileNotFoundError as exc:
69
+ print(str(exc))
70
+ return 1
71
+ _print_show(report)
72
+ return 0
73
+ parser.error("unknown command")
74
+ return 2
75
+
76
+
77
+ def _print_scan(devices) -> None:
78
+ print("PATH\tCTRL\tSIZE_GIB\tMODEL\tSERIAL\tSTATUS")
79
+ for device in devices:
80
+ status: list[str] = []
81
+ if device.is_system:
82
+ status.append("system")
83
+ if device.danger_reasons:
84
+ status.extend(device.danger_reasons)
85
+ if not status:
86
+ status.append("candidate")
87
+ print(
88
+ f"{device.path}\t{device.ctrl_path}\t{device.size_gib:.1f}\t{device.model}\t{device.serial}\t"
89
+ f"{'; '.join(status)}"
90
+ )
91
+
92
+
93
+ def _print_history(records: list[dict[str, object]]) -> None:
94
+ if not records:
95
+ print("No reports found.")
96
+ return
97
+ print("RUN_ID\tSTARTED\tLABEL\tSERIAL\tVERDICT\tDURATION_S")
98
+ for record in records:
99
+ device = record.get("device", {})
100
+ final = record.get("final", {})
101
+ print(
102
+ f"{record.get('run_id')}\t{record.get('started_at')}\t{record.get('label')}\t"
103
+ f"{device.get('serial')}\t{final.get('verdict')}\t{record.get('duration_seconds')}"
104
+ )
105
+
106
+
107
+ def _print_show(report: dict[str, object]) -> None:
108
+ device = report.get("device", {})
109
+ final = report.get("final", {})
110
+ report_path = report.get("_report_path")
111
+ report_dir = Path(report_path).parent if report_path else None
112
+ print(f"Run ID: {report.get('run_id')}")
113
+ print(f"Label: {report.get('label')}")
114
+ print(f"Device: {device.get('dev')} ({device.get('model')}, {device.get('serial')})")
115
+ print(f"Verdict: {final.get('verdict')}")
116
+ print(f"Completed: {final.get('completed')}")
117
+ print(f"Duration: {report.get('duration_seconds')} seconds")
118
+ print(f"Markdown report: {report_dir / 'report.md' if report_dir else 'unknown'}")
119
+ reasons = final.get("reasons") or []
120
+ notes = final.get("notes") or []
121
+ if reasons:
122
+ print("Reasons:")
123
+ for item in reasons:
124
+ print(f" - {item}")
125
+ if notes:
126
+ print("Notes:")
127
+ for item in notes:
128
+ print(f" - {item}")
@@ -0,0 +1,135 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from pathlib import Path
5
+
6
+
7
+ @dataclass(slots=True)
8
+ class CommandResult:
9
+ args: list[str]
10
+ stdout: str
11
+ stderr: str
12
+ exit_code: int
13
+ duration_seconds: float
14
+
15
+ @property
16
+ def ok(self) -> bool:
17
+ return self.exit_code == 0
18
+
19
+
20
+ @dataclass(slots=True)
21
+ class DeviceInfo:
22
+ name: str
23
+ path: str
24
+ ctrl_path: str
25
+ size_bytes: int
26
+ model: str
27
+ serial: str
28
+ generic_path: str = ""
29
+ mountpoints: list[str] = field(default_factory=list)
30
+ child_mountpoints: list[str] = field(default_factory=list)
31
+ system_reasons: list[str] = field(default_factory=list)
32
+ danger_reasons: list[str] = field(default_factory=list)
33
+
34
+ @property
35
+ def size_gib(self) -> float:
36
+ return self.size_bytes / (1024 ** 3)
37
+
38
+ @property
39
+ def mounted(self) -> bool:
40
+ return bool(self.mountpoints)
41
+
42
+ @property
43
+ def has_mounted_children(self) -> bool:
44
+ return bool(self.child_mountpoints)
45
+
46
+ @property
47
+ def is_system(self) -> bool:
48
+ return bool(self.system_reasons)
49
+
50
+ @property
51
+ def is_eligible(self) -> bool:
52
+ return not self.danger_reasons and not self.is_system
53
+
54
+
55
+ @dataclass(slots=True)
56
+ class HealthSnapshot:
57
+ critical_warning: int | None = None
58
+ temperature_c: int | None = None
59
+ available_spare: int | None = None
60
+ percentage_used: int | None = None
61
+ data_units_written: int | None = None
62
+ media_errors: int | None = None
63
+ num_err_log_entries: int | None = None
64
+ unsafe_shutdowns: int | None = None
65
+
66
+ def as_dict(self) -> dict[str, int | None]:
67
+ return {
68
+ "critical_warning": self.critical_warning,
69
+ "temperature_c": self.temperature_c,
70
+ "available_spare": self.available_spare,
71
+ "percentage_used": self.percentage_used,
72
+ "data_units_written": self.data_units_written,
73
+ "media_errors": self.media_errors,
74
+ "num_err_log_entries": self.num_err_log_entries,
75
+ "unsafe_shutdowns": self.unsafe_shutdowns,
76
+ }
77
+
78
+
79
+ @dataclass(slots=True)
80
+ class StageResult:
81
+ name: str
82
+ ok: bool
83
+ skipped: bool = False
84
+ allow_failure: bool = False
85
+ exit_code: int | None = None
86
+ duration_seconds: float = 0.0
87
+ command: list[str] = field(default_factory=list)
88
+ log_file: str = ""
89
+ note: str = ""
90
+
91
+ def as_dict(self) -> dict[str, object]:
92
+ return {
93
+ "name": self.name,
94
+ "ok": self.ok,
95
+ "skipped": self.skipped,
96
+ "allow_failure": self.allow_failure,
97
+ "exit_code": self.exit_code,
98
+ "duration_seconds": round(self.duration_seconds, 3),
99
+ "command": self.command,
100
+ "log_file": self.log_file,
101
+ "note": self.note,
102
+ }
103
+
104
+
105
+ @dataclass(slots=True)
106
+ class VerdictResult:
107
+ verdict: str
108
+ reasons: list[str]
109
+ notes: list[str]
110
+ completed: bool
111
+
112
+ def as_dict(self) -> dict[str, object]:
113
+ return {
114
+ "verdict": self.verdict,
115
+ "reasons": self.reasons,
116
+ "notes": self.notes,
117
+ "completed": self.completed,
118
+ }
119
+
120
+
121
+ @dataclass(slots=True)
122
+ class RunArtifacts:
123
+ run_id: str
124
+ label: str
125
+ log_dir: Path
126
+ started_at: str
127
+ ended_at: str
128
+ duration_seconds: float
129
+ device: DeviceInfo
130
+ stage_results: dict[str, StageResult]
131
+ verdict: VerdictResult
132
+ health_before: HealthSnapshot
133
+ health_after: HealthSnapshot
134
+ health_deltas: dict[str, int | float | None]
135
+ dmesg_issues: list[str]
@@ -0,0 +1,161 @@
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ import shlex
5
+ from typing import Any
6
+
7
+ from ssdtest.models import HealthSnapshot
8
+
9
+
10
+ def _parse_int(value: str) -> int | None:
11
+ cleaned = value.strip()
12
+ if not cleaned:
13
+ return None
14
+ match = re.search(r"0x[0-9a-fA-F]+|[-+]?\d[\d,]*", cleaned)
15
+ if not match:
16
+ return None
17
+ token = match.group(0).replace(",", "")
18
+ return int(token, 0)
19
+
20
+
21
+ def _parse_percent(value: str) -> int | None:
22
+ match = re.search(r"(\d+)\s*%?", value)
23
+ return int(match.group(1)) if match else None
24
+
25
+
26
+ def _parse_temperature(value: str) -> int | None:
27
+ match = re.search(r"(-?\d+)\s*C", value)
28
+ if match:
29
+ return int(match.group(1))
30
+ return _parse_int(value)
31
+
32
+
33
+ def parse_nvme_list(text: str) -> list[dict[str, str]]:
34
+ entries: list[dict[str, str]] = []
35
+ for line in text.splitlines():
36
+ if not line.strip() or line.startswith("Node") or line.startswith("---"):
37
+ continue
38
+ base = re.match(r"^(?P<node>\S+)\s+(?P<generic>\S+)\s+(?P<serial>\S+)\s+(?P<rest>.+)$", line)
39
+ if not base:
40
+ continue
41
+ extra = re.split(r"\s{2,}", base.group("rest").strip())
42
+ if len(extra) < 5:
43
+ continue
44
+ entries.append(
45
+ {
46
+ "node": base.group("node"),
47
+ "generic": base.group("generic"),
48
+ "serial": base.group("serial"),
49
+ "model": extra[0],
50
+ "namespace": extra[1],
51
+ "usage": extra[2],
52
+ "format": extra[3],
53
+ "fw_rev": extra[4],
54
+ }
55
+ )
56
+ return entries
57
+
58
+
59
+ def parse_lsblk_pairs(text: str) -> list[dict[str, str]]:
60
+ records: list[dict[str, str]] = []
61
+ for line in text.splitlines():
62
+ if not line.strip():
63
+ continue
64
+ record: dict[str, str] = {}
65
+ for token in shlex.split(line):
66
+ if "=" not in token:
67
+ continue
68
+ key, value = token.split("=", 1)
69
+ record[key] = value
70
+ if record:
71
+ records.append(record)
72
+ return records
73
+
74
+
75
+ def parse_nvme_smart_log(text: str) -> HealthSnapshot:
76
+ data: dict[str, Any] = {}
77
+ key_map = {
78
+ "critical_warning": ("critical_warning", _parse_int),
79
+ "temperature": ("temperature_c", _parse_temperature),
80
+ "available_spare": ("available_spare", _parse_percent),
81
+ "percentage_used": ("percentage_used", _parse_percent),
82
+ "data_units_written": ("data_units_written", _parse_int),
83
+ "media_errors": ("media_errors", _parse_int),
84
+ "num_err_log_entries": ("num_err_log_entries", _parse_int),
85
+ "unsafe_shutdowns": ("unsafe_shutdowns", _parse_int),
86
+ }
87
+ for line in text.splitlines():
88
+ if ":" not in line:
89
+ continue
90
+ raw_key, raw_value = line.split(":", 1)
91
+ key = raw_key.strip().lower()
92
+ if key in key_map:
93
+ field_name, parser = key_map[key]
94
+ data[field_name] = parser(raw_value)
95
+ return HealthSnapshot(**data)
96
+
97
+
98
+ def parse_smartctl_x(text: str) -> HealthSnapshot:
99
+ data: dict[str, Any] = {}
100
+ patterns = {
101
+ "critical_warning": (r"Critical Warning:\s*(.+)$", _parse_int),
102
+ "temperature_c": (r"Temperature:\s*(.+)$", _parse_temperature),
103
+ "available_spare": (r"Available Spare:\s*(.+)$", _parse_percent),
104
+ "percentage_used": (r"Percentage Used:\s*(.+)$", _parse_percent),
105
+ "data_units_written": (r"Data Units Written:\s*(.+)$", _parse_int),
106
+ "media_errors": (r"Media and Data Integrity Errors:\s*(.+)$", _parse_int),
107
+ "num_err_log_entries": (r"Error Information Log Entries:\s*(.+)$", _parse_int),
108
+ "unsafe_shutdowns": (r"Unsafe Shutdowns:\s*(.+)$", _parse_int),
109
+ }
110
+ for field_name, (pattern, parser) in patterns.items():
111
+ match = re.search(pattern, text, re.MULTILINE | re.IGNORECASE)
112
+ if match:
113
+ data[field_name] = parser(match.group(1))
114
+ return HealthSnapshot(**data)
115
+
116
+
117
+ def merge_health(primary: HealthSnapshot, fallback: HealthSnapshot) -> HealthSnapshot:
118
+ data: dict[str, int | None] = {}
119
+ for field_name in HealthSnapshot.__dataclass_fields__:
120
+ primary_value = getattr(primary, field_name)
121
+ data[field_name] = primary_value if primary_value is not None else getattr(fallback, field_name)
122
+ return HealthSnapshot(**data)
123
+
124
+
125
+ def parse_nvme_error_log(text: str) -> dict[str, int]:
126
+ counts = [_parse_int(match.group(1)) for match in re.finditer(r"error_count\s*:\s*(.+)$", text, re.MULTILINE)]
127
+ clean_counts = [count for count in counts if count is not None]
128
+ return {
129
+ "entries_seen": len(clean_counts),
130
+ "max_error_count": max(clean_counts) if clean_counts else 0,
131
+ }
132
+
133
+
134
+ def extract_new_dmesg_lines(before: str, after: str) -> list[str]:
135
+ before_lines = before.splitlines()
136
+ after_lines = after.splitlines()
137
+ if len(after_lines) >= len(before_lines) and after_lines[: len(before_lines)] == before_lines:
138
+ return after_lines[len(before_lines) :]
139
+
140
+ index = 0
141
+ while index < min(len(before_lines), len(after_lines)) and before_lines[index] == after_lines[index]:
142
+ index += 1
143
+ return after_lines[index:]
144
+
145
+
146
+ def detect_dmesg_issues(before: str, after: str) -> list[str]:
147
+ issue_lines: list[str] = []
148
+ patterns = [
149
+ r"\bnvme\d+\b.*\b(reset|timeout|abort|failed|error)\b",
150
+ r"\bI/O error\b",
151
+ r"\bcritical medium error\b",
152
+ r"\bcontroller is down\b",
153
+ r"\bPCIe Bus Error\b",
154
+ ]
155
+ for line in extract_new_dmesg_lines(before, after):
156
+ lowered = line.lower()
157
+ if "nvme" not in lowered and "i/o error" not in lowered and "pcie" not in lowered:
158
+ continue
159
+ if any(re.search(pattern, line, re.IGNORECASE) for pattern in patterns):
160
+ issue_lines.append(line)
161
+ return issue_lines