cloudanalyzer 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cloudanalyzer-0.1.0/LICENSE +21 -0
- cloudanalyzer-0.1.0/PKG-INFO +303 -0
- cloudanalyzer-0.1.0/README.md +266 -0
- cloudanalyzer-0.1.0/ca/__init__.py +3 -0
- cloudanalyzer-0.1.0/ca/align.py +60 -0
- cloudanalyzer-0.1.0/ca/baseline_history.py +98 -0
- cloudanalyzer-0.1.0/ca/batch.py +393 -0
- cloudanalyzer-0.1.0/ca/compare.py +97 -0
- cloudanalyzer-0.1.0/ca/convert.py +41 -0
- cloudanalyzer-0.1.0/ca/core/__init__.py +129 -0
- cloudanalyzer-0.1.0/ca/core/check_baseline_evolution.py +376 -0
- cloudanalyzer-0.1.0/ca/core/check_scaffolding.py +206 -0
- cloudanalyzer-0.1.0/ca/core/check_triage.py +346 -0
- cloudanalyzer-0.1.0/ca/core/checks.py +935 -0
- cloudanalyzer-0.1.0/ca/core/ground_evaluate.py +110 -0
- cloudanalyzer-0.1.0/ca/core/web_progressive_loading.py +232 -0
- cloudanalyzer-0.1.0/ca/core/web_sampling.py +109 -0
- cloudanalyzer-0.1.0/ca/core/web_trajectory_sampling.py +243 -0
- cloudanalyzer-0.1.0/ca/crop.py +53 -0
- cloudanalyzer-0.1.0/ca/density_map.py +84 -0
- cloudanalyzer-0.1.0/ca/diff.py +30 -0
- cloudanalyzer-0.1.0/ca/downsample.py +36 -0
- cloudanalyzer-0.1.0/ca/evaluate.py +132 -0
- cloudanalyzer-0.1.0/ca/experiments/__init__.py +1 -0
- cloudanalyzer-0.1.0/ca/experiments/check_baseline_evolution/__init__.py +29 -0
- cloudanalyzer-0.1.0/ca/experiments/check_baseline_evolution/common.py +389 -0
- cloudanalyzer-0.1.0/ca/experiments/check_baseline_evolution/evaluate.py +409 -0
- cloudanalyzer-0.1.0/ca/experiments/check_baseline_evolution/pareto_promote.py +121 -0
- cloudanalyzer-0.1.0/ca/experiments/check_baseline_evolution/stability_window.py +106 -0
- cloudanalyzer-0.1.0/ca/experiments/check_baseline_evolution/threshold_guard.py +80 -0
- cloudanalyzer-0.1.0/ca/experiments/check_scaffolding/__init__.py +23 -0
- cloudanalyzer-0.1.0/ca/experiments/check_scaffolding/common.py +69 -0
- cloudanalyzer-0.1.0/ca/experiments/check_scaffolding/evaluate.py +428 -0
- cloudanalyzer-0.1.0/ca/experiments/check_scaffolding/literal_profiles.py +149 -0
- cloudanalyzer-0.1.0/ca/experiments/check_scaffolding/object_sections.py +224 -0
- cloudanalyzer-0.1.0/ca/experiments/check_scaffolding/pipeline_overlays.py +165 -0
- cloudanalyzer-0.1.0/ca/experiments/check_triage/__init__.py +23 -0
- cloudanalyzer-0.1.0/ca/experiments/check_triage/common.py +246 -0
- cloudanalyzer-0.1.0/ca/experiments/check_triage/evaluate.py +400 -0
- cloudanalyzer-0.1.0/ca/experiments/check_triage/pareto_frontier.py +105 -0
- cloudanalyzer-0.1.0/ca/experiments/check_triage/severity_weighted.py +63 -0
- cloudanalyzer-0.1.0/ca/experiments/check_triage/signature_cluster.py +76 -0
- cloudanalyzer-0.1.0/ca/experiments/ground_evaluate/__init__.py +23 -0
- cloudanalyzer-0.1.0/ca/experiments/ground_evaluate/common.py +113 -0
- cloudanalyzer-0.1.0/ca/experiments/ground_evaluate/evaluate.py +392 -0
- cloudanalyzer-0.1.0/ca/experiments/ground_evaluate/height_band.py +113 -0
- cloudanalyzer-0.1.0/ca/experiments/ground_evaluate/nearest_neighbor.py +65 -0
- cloudanalyzer-0.1.0/ca/experiments/ground_evaluate/voxel_confusion.py +39 -0
- cloudanalyzer-0.1.0/ca/experiments/process_docs.py +156 -0
- cloudanalyzer-0.1.0/ca/experiments/web_progressive_loading/__init__.py +17 -0
- cloudanalyzer-0.1.0/ca/experiments/web_progressive_loading/common.py +113 -0
- cloudanalyzer-0.1.0/ca/experiments/web_progressive_loading/distance_shells.py +61 -0
- cloudanalyzer-0.1.0/ca/experiments/web_progressive_loading/evaluate.py +518 -0
- cloudanalyzer-0.1.0/ca/experiments/web_progressive_loading/grid_tiles.py +66 -0
- cloudanalyzer-0.1.0/ca/experiments/web_progressive_loading/spatial_shuffle.py +65 -0
- cloudanalyzer-0.1.0/ca/experiments/web_sampling/__init__.py +23 -0
- cloudanalyzer-0.1.0/ca/experiments/web_sampling/common.py +9 -0
- cloudanalyzer-0.1.0/ca/experiments/web_sampling/evaluate.py +534 -0
- cloudanalyzer-0.1.0/ca/experiments/web_sampling/functional_voxel.py +89 -0
- cloudanalyzer-0.1.0/ca/experiments/web_sampling/object_random.py +57 -0
- cloudanalyzer-0.1.0/ca/experiments/web_sampling/pipeline_hybrid.py +82 -0
- cloudanalyzer-0.1.0/ca/experiments/web_trajectory_sampling/__init__.py +15 -0
- cloudanalyzer-0.1.0/ca/experiments/web_trajectory_sampling/common.py +131 -0
- cloudanalyzer-0.1.0/ca/experiments/web_trajectory_sampling/distance_accumulator.py +115 -0
- cloudanalyzer-0.1.0/ca/experiments/web_trajectory_sampling/evaluate.py +506 -0
- cloudanalyzer-0.1.0/ca/experiments/web_trajectory_sampling/turn_aware.py +106 -0
- cloudanalyzer-0.1.0/ca/experiments/web_trajectory_sampling/uniform_stride.py +41 -0
- cloudanalyzer-0.1.0/ca/filter.py +44 -0
- cloudanalyzer-0.1.0/ca/ground_evaluate.py +92 -0
- cloudanalyzer-0.1.0/ca/info.py +34 -0
- cloudanalyzer-0.1.0/ca/io.py +40 -0
- cloudanalyzer-0.1.0/ca/log.py +28 -0
- cloudanalyzer-0.1.0/ca/merge.py +33 -0
- cloudanalyzer-0.1.0/ca/metrics.py +61 -0
- cloudanalyzer-0.1.0/ca/normals.py +41 -0
- cloudanalyzer-0.1.0/ca/pareto.py +155 -0
- cloudanalyzer-0.1.0/ca/pipeline.py +82 -0
- cloudanalyzer-0.1.0/ca/plot.py +217 -0
- cloudanalyzer-0.1.0/ca/registration.py +66 -0
- cloudanalyzer-0.1.0/ca/report.py +3467 -0
- cloudanalyzer-0.1.0/ca/run_evaluate.py +424 -0
- cloudanalyzer-0.1.0/ca/sample.py +47 -0
- cloudanalyzer-0.1.0/ca/split.py +102 -0
- cloudanalyzer-0.1.0/ca/stats.py +62 -0
- cloudanalyzer-0.1.0/ca/trajectory.py +586 -0
- cloudanalyzer-0.1.0/ca/view.py +36 -0
- cloudanalyzer-0.1.0/ca/visualization.py +65 -0
- cloudanalyzer-0.1.0/ca/web.py +1959 -0
- cloudanalyzer-0.1.0/cloudanalyzer.egg-info/PKG-INFO +303 -0
- cloudanalyzer-0.1.0/cloudanalyzer.egg-info/SOURCES.txt +151 -0
- cloudanalyzer-0.1.0/cloudanalyzer.egg-info/dependency_links.txt +1 -0
- cloudanalyzer-0.1.0/cloudanalyzer.egg-info/entry_points.txt +2 -0
- cloudanalyzer-0.1.0/cloudanalyzer.egg-info/requires.txt +12 -0
- cloudanalyzer-0.1.0/cloudanalyzer.egg-info/top_level.txt +2 -0
- cloudanalyzer-0.1.0/cloudanalyzer_cli/__init__.py +1 -0
- cloudanalyzer-0.1.0/cloudanalyzer_cli/main.py +1777 -0
- cloudanalyzer-0.1.0/pyproject.toml +80 -0
- cloudanalyzer-0.1.0/setup.cfg +4 -0
- cloudanalyzer-0.1.0/setup.py +8 -0
- cloudanalyzer-0.1.0/tests/test_align.py +34 -0
- cloudanalyzer-0.1.0/tests/test_baseline_history.py +106 -0
- cloudanalyzer-0.1.0/tests/test_batch.py +162 -0
- cloudanalyzer-0.1.0/tests/test_check_baseline_evolution_process.py +141 -0
- cloudanalyzer-0.1.0/tests/test_check_scaffolding_process.py +45 -0
- cloudanalyzer-0.1.0/tests/test_check_suite.py +376 -0
- cloudanalyzer-0.1.0/tests/test_check_triage_process.py +36 -0
- cloudanalyzer-0.1.0/tests/test_cli.py +1388 -0
- cloudanalyzer-0.1.0/tests/test_compare.py +54 -0
- cloudanalyzer-0.1.0/tests/test_convert.py +39 -0
- cloudanalyzer-0.1.0/tests/test_crop.py +32 -0
- cloudanalyzer-0.1.0/tests/test_density_map.py +37 -0
- cloudanalyzer-0.1.0/tests/test_diff.py +23 -0
- cloudanalyzer-0.1.0/tests/test_downsample.py +27 -0
- cloudanalyzer-0.1.0/tests/test_evaluate.py +102 -0
- cloudanalyzer-0.1.0/tests/test_filter.py +32 -0
- cloudanalyzer-0.1.0/tests/test_ground_evaluate.py +150 -0
- cloudanalyzer-0.1.0/tests/test_ground_evaluate_process.py +41 -0
- cloudanalyzer-0.1.0/tests/test_info.py +29 -0
- cloudanalyzer-0.1.0/tests/test_io.py +53 -0
- cloudanalyzer-0.1.0/tests/test_logging.py +33 -0
- cloudanalyzer-0.1.0/tests/test_merge.py +26 -0
- cloudanalyzer-0.1.0/tests/test_metrics.py +50 -0
- cloudanalyzer-0.1.0/tests/test_normals.py +25 -0
- cloudanalyzer-0.1.0/tests/test_output_json.py +189 -0
- cloudanalyzer-0.1.0/tests/test_pareto.py +98 -0
- cloudanalyzer-0.1.0/tests/test_pipeline.py +24 -0
- cloudanalyzer-0.1.0/tests/test_plot.py +61 -0
- cloudanalyzer-0.1.0/tests/test_process_docs.py +70 -0
- cloudanalyzer-0.1.0/tests/test_public_benchmark_pack.py +93 -0
- cloudanalyzer-0.1.0/tests/test_registration.py +35 -0
- cloudanalyzer-0.1.0/tests/test_report.py +353 -0
- cloudanalyzer-0.1.0/tests/test_run_batch.py +264 -0
- cloudanalyzer-0.1.0/tests/test_run_evaluate.py +132 -0
- cloudanalyzer-0.1.0/tests/test_sample.py +30 -0
- cloudanalyzer-0.1.0/tests/test_split.py +41 -0
- cloudanalyzer-0.1.0/tests/test_stats.py +33 -0
- cloudanalyzer-0.1.0/tests/test_threshold.py +56 -0
- cloudanalyzer-0.1.0/tests/test_traj_batch.py +310 -0
- cloudanalyzer-0.1.0/tests/test_trajectory.py +351 -0
- cloudanalyzer-0.1.0/tests/test_visualization.py +44 -0
- cloudanalyzer-0.1.0/tests/test_web.py +498 -0
- cloudanalyzer-0.1.0/tests/test_web_progressive_loading_core.py +76 -0
- cloudanalyzer-0.1.0/tests/test_web_progressive_loading_evaluate.py +111 -0
- cloudanalyzer-0.1.0/tests/test_web_progressive_loading_process.py +54 -0
- cloudanalyzer-0.1.0/tests/test_web_progressive_loading_strategies.py +125 -0
- cloudanalyzer-0.1.0/tests/test_web_sampling_core.py +148 -0
- cloudanalyzer-0.1.0/tests/test_web_sampling_evaluate.py +208 -0
- cloudanalyzer-0.1.0/tests/test_web_sampling_process.py +68 -0
- cloudanalyzer-0.1.0/tests/test_web_sampling_strategies.py +190 -0
- cloudanalyzer-0.1.0/tests/test_web_trajectory_sampling_core.py +123 -0
- cloudanalyzer-0.1.0/tests/test_web_trajectory_sampling_evaluate.py +119 -0
- cloudanalyzer-0.1.0/tests/test_web_trajectory_sampling_process.py +56 -0
- cloudanalyzer-0.1.0/tests/test_web_trajectory_sampling_strategies.py +136 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 rsasaki0109
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cloudanalyzer
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: CLI-first QA toolkit for point cloud, trajectory, and 3D perception outputs
|
|
5
|
+
Author: rsasaki0109
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/rsasaki0109/CloudAnalyzer
|
|
8
|
+
Project-URL: Repository, https://github.com/rsasaki0109/CloudAnalyzer
|
|
9
|
+
Project-URL: Issues, https://github.com/rsasaki0109/CloudAnalyzer/issues
|
|
10
|
+
Project-URL: Documentation, https://github.com/rsasaki0109/CloudAnalyzer#readme
|
|
11
|
+
Keywords: point-cloud,slam,lidar,trajectory,benchmark,qa,open3d
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: Intended Audience :: Science/Research
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: GIS
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
|
22
|
+
Requires-Python: >=3.10
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
License-File: LICENSE
|
|
25
|
+
Requires-Dist: open3d>=0.17.0
|
|
26
|
+
Requires-Dist: numpy>=1.24.0
|
|
27
|
+
Requires-Dist: typer>=0.9.0
|
|
28
|
+
Requires-Dist: matplotlib>=3.7.0
|
|
29
|
+
Requires-Dist: PyYAML>=6.0
|
|
30
|
+
Provides-Extra: dev
|
|
31
|
+
Requires-Dist: build>=1.2.2; extra == "dev"
|
|
32
|
+
Requires-Dist: mypy; extra == "dev"
|
|
33
|
+
Requires-Dist: pkginfo>=1.12.0; extra == "dev"
|
|
34
|
+
Requires-Dist: pytest; extra == "dev"
|
|
35
|
+
Requires-Dist: twine>=6.0.0; extra == "dev"
|
|
36
|
+
Dynamic: license-file
|
|
37
|
+
|
|
38
|
+
# CloudAnalyzer
|
|
39
|
+
|
|
40
|
+
AI-friendly CLI tool for point cloud analysis and evaluation.
|
|
41
|
+
|
|
42
|
+
For the full product overview (Japanese), demos, and tutorials, see the [repository root README](https://github.com/rsasaki0109/CloudAnalyzer/blob/main/README.md).
|
|
43
|
+
|
|
44
|
+
## Install
|
|
45
|
+
|
|
46
|
+
From this directory (the Python package root):
|
|
47
|
+
|
|
48
|
+
```bash
|
|
49
|
+
pip install cloudanalyzer
|
|
50
|
+
|
|
51
|
+
# latest from source
|
|
52
|
+
git clone https://github.com/rsasaki0109/CloudAnalyzer.git
|
|
53
|
+
cd CloudAnalyzer/cloudanalyzer
|
|
54
|
+
pip install -e .
|
|
55
|
+
|
|
56
|
+
# or with Docker
|
|
57
|
+
docker build -t ca .
|
|
58
|
+
docker run ca info cloud.pcd
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
## Release Sanity Check
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
python3 -m pip install -e .[dev]
|
|
65
|
+
python3 -m build
|
|
66
|
+
python3 -m twine check dist/*
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Commands
|
|
70
|
+
|
|
71
|
+
There are **31** CLI subcommands (see `ca --help`). Summary:
|
|
72
|
+
|
|
73
|
+
### Analysis & Evaluation
|
|
74
|
+
|
|
75
|
+
| Command | Description |
|
|
76
|
+
|---|---|
|
|
77
|
+
| `ca compare` | Compare two point clouds with ICP/GICP registration |
|
|
78
|
+
| `ca diff` | Quick distance stats (no registration) |
|
|
79
|
+
| `ca evaluate` | F1, Chamfer, Hausdorff, AUC evaluation |
|
|
80
|
+
| `ca check` | Config-driven unified QA (`cloudanalyzer.yaml`) |
|
|
81
|
+
| `ca init-check` | Emit a starter `cloudanalyzer.yaml` profile |
|
|
82
|
+
| `ca ground-evaluate` | Ground segmentation QA (precision/recall/F1/IoU, optional gates) |
|
|
83
|
+
| `ca traj-evaluate` | ATE, translational RPE, drift evaluation for trajectories |
|
|
84
|
+
| `ca traj-batch` | Batch trajectory benchmark with coverage, gate, and reports |
|
|
85
|
+
| `ca run-evaluate` | Combined map + trajectory QA for one run |
|
|
86
|
+
| `ca run-batch` | Combined map + trajectory benchmark across multiple runs |
|
|
87
|
+
| `ca info` | Point cloud metadata (points, BBox, centroid) |
|
|
88
|
+
| `ca stats` | Detailed statistics (density, spacing distribution) |
|
|
89
|
+
| `ca batch` | Run info on all files in a directory |
|
|
90
|
+
|
|
91
|
+
### Processing
|
|
92
|
+
|
|
93
|
+
| Command | Description |
|
|
94
|
+
|---|---|
|
|
95
|
+
| `ca downsample` | Voxel grid downsampling |
|
|
96
|
+
| `ca sample` | Random point sampling |
|
|
97
|
+
| `ca filter` | Statistical outlier removal |
|
|
98
|
+
| `ca merge` | Merge multiple point clouds |
|
|
99
|
+
| `ca align` | Sequential registration + merge |
|
|
100
|
+
| `ca split` | Split into grid tiles |
|
|
101
|
+
| `ca convert` | Format conversion (pcd/ply/las) |
|
|
102
|
+
| `ca normals` | Normal estimation |
|
|
103
|
+
| `ca crop` | Bounding box crop |
|
|
104
|
+
| `ca pipeline` | filter → downsample → evaluate in one step |
|
|
105
|
+
|
|
106
|
+
### Visualization
|
|
107
|
+
|
|
108
|
+
| Command | Description |
|
|
109
|
+
|---|---|
|
|
110
|
+
| `ca web` | Browser 3D viewer, with optional heatmap, reference overlay, and trajectory run overlay |
|
|
111
|
+
| `ca web-export` | Write a static browser viewer bundle (for demos and sharing) |
|
|
112
|
+
| `ca view` | Interactive 3D viewer |
|
|
113
|
+
| `ca density-map` | 2D density heatmap image |
|
|
114
|
+
| `ca heatmap3d` | 3D distance heatmap snapshot |
|
|
115
|
+
|
|
116
|
+
### Baseline history
|
|
117
|
+
|
|
118
|
+
| Command | Description |
|
|
119
|
+
|---|---|
|
|
120
|
+
| `ca baseline-save` | Save a QA summary JSON into a rotating history directory |
|
|
121
|
+
| `ca baseline-list` | List baselines saved in a history directory |
|
|
122
|
+
| `ca baseline-decision` | Promote / keep / reject a candidate baseline vs history |
|
|
123
|
+
|
|
124
|
+
### Utility
|
|
125
|
+
|
|
126
|
+
| Command | Description |
|
|
127
|
+
|---|---|
|
|
128
|
+
| `ca version` | Print CLI version |
|
|
129
|
+
|
|
130
|
+
## Usage Examples
|
|
131
|
+
|
|
132
|
+
```bash
|
|
133
|
+
# === Evaluation ===
|
|
134
|
+
# F1/Chamfer/Hausdorff evaluation with curve plot
|
|
135
|
+
ca evaluate source.pcd reference.pcd \
|
|
136
|
+
-t 0.05,0.1,0.2,0.5,1.0 --plot f1_curve.png
|
|
137
|
+
|
|
138
|
+
# Trajectory evaluation with quality gate
|
|
139
|
+
ca traj-evaluate estimated.csv reference.csv \
|
|
140
|
+
--max-time-delta 0.05 --max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
141
|
+
--report trajectory_report.html
|
|
142
|
+
# report also writes sibling trajectory overlay and error timeline PNGs
|
|
143
|
+
|
|
144
|
+
# Ignore constant initial translation offset
|
|
145
|
+
ca traj-evaluate estimated.csv reference.csv --align-origin
|
|
146
|
+
|
|
147
|
+
# Fit a rigid transform before scoring
|
|
148
|
+
ca traj-evaluate estimated.csv reference.csv --align-rigid
|
|
149
|
+
|
|
150
|
+
# Batch trajectory benchmark
|
|
151
|
+
ca traj-batch runs/ --reference-dir gt/ \
|
|
152
|
+
--max-time-delta 0.05 --max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
153
|
+
--report traj_batch.html
|
|
154
|
+
# HTML report adds copyable inspection commands plus pass/failed/low-coverage filters and ATE/RPE/coverage sorting
|
|
155
|
+
# low-coverage threshold follows --min-coverage when provided
|
|
156
|
+
|
|
157
|
+
# Combined run QA: map + trajectory in one report
|
|
158
|
+
ca run-evaluate map.pcd map_ref.pcd traj.csv traj_ref.csv \
|
|
159
|
+
--min-auc 0.95 --max-chamfer 0.02 \
|
|
160
|
+
--max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
161
|
+
--report run_report.html
|
|
162
|
+
# inspection commands include a `ca web ... --trajectory ... --trajectory-reference ...` run viewer
|
|
163
|
+
|
|
164
|
+
# Combined run batch QA
|
|
165
|
+
ca run-batch maps/ \
|
|
166
|
+
--map-reference-dir map_refs/ \
|
|
167
|
+
--trajectory-dir trajs/ \
|
|
168
|
+
--trajectory-reference-dir traj_refs/ \
|
|
169
|
+
--min-auc 0.95 --max-chamfer 0.02 \
|
|
170
|
+
--max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
171
|
+
--report run_batch.html
|
|
172
|
+
# HTML report adds pass/failed/map-issue/trajectory-issue filters and map/trajectory sorting
|
|
173
|
+
# summary and CLI output also split map failures vs trajectory failures
|
|
174
|
+
# inspection commands include both a per-run `ca web ...` run viewer and `ca run-evaluate ...` drill-down command
|
|
175
|
+
|
|
176
|
+
# Full pipeline: filter → downsample → evaluate
|
|
177
|
+
ca pipeline noisy.pcd reference.pcd -o clean.pcd -v 0.2
|
|
178
|
+
|
|
179
|
+
# 3D distance heatmap
|
|
180
|
+
ca heatmap3d estimated.pcd reference.pcd -o heatmap.png
|
|
181
|
+
|
|
182
|
+
# Browser heatmap viewer with reference overlay and threshold filter
|
|
183
|
+
ca web estimated.pcd reference.pcd --heatmap
|
|
184
|
+
|
|
185
|
+
# Browser run viewer: map heatmap + trajectory overlay
|
|
186
|
+
ca web map.pcd map_ref.pcd --heatmap \
|
|
187
|
+
--trajectory traj.csv --trajectory-reference traj_ref.csv
|
|
188
|
+
# paired trajectory があると worst ATE pose と worst RPE segment を viewer 上で強調する
|
|
189
|
+
# marker / segment をクリックすると timestamp と error summary を inspection panel に表示する
|
|
190
|
+
# click 時は camera も選択箇所へ寄り、Reset View で全景に戻せる
|
|
191
|
+
# trajectory error timeline も viewer 内に出て、point click で 3D selection と同期する
|
|
192
|
+
|
|
193
|
+
# === Compare ===
|
|
194
|
+
ca compare source.pcd target.pcd \
|
|
195
|
+
--register gicp --json result.json --report report.md \
|
|
196
|
+
--snapshot diff.png --threshold 0.1
|
|
197
|
+
|
|
198
|
+
# Quick diff
|
|
199
|
+
ca diff a.pcd b.pcd --threshold 0.05
|
|
200
|
+
|
|
201
|
+
# === Processing ===
|
|
202
|
+
# Split large map into 100m tiles
|
|
203
|
+
ca split large_map.pcd -o tiles/ -g 100
|
|
204
|
+
|
|
205
|
+
# Downsample
|
|
206
|
+
ca downsample cloud.pcd -o down.pcd -v 0.05
|
|
207
|
+
|
|
208
|
+
# Filter outliers
|
|
209
|
+
ca filter raw.pcd -o clean.pcd -n 20 -s 2.0
|
|
210
|
+
|
|
211
|
+
# Align multiple scans
|
|
212
|
+
ca align scan1.pcd scan2.pcd scan3.pcd -o aligned.pcd -m gicp
|
|
213
|
+
|
|
214
|
+
# Batch info
|
|
215
|
+
ca batch /path/to/pcds/ -r
|
|
216
|
+
|
|
217
|
+
# Batch evaluation
|
|
218
|
+
ca batch /path/to/results/ --evaluate reference.pcd --format-json | jq '.[].auc'
|
|
219
|
+
ca batch /path/to/results/ --evaluate reference.pcd --report batch_report.html
|
|
220
|
+
# report includes inspection commands; HTML adds Copy buttons plus count-badged summary rows, quick actions, failed-first / recommended-first sort presets, and pass/failed/pareto/recommended controls
|
|
221
|
+
ca batch decoded/ --evaluate reference.pcd --compressed-dir compressed/ --baseline-dir original/
|
|
222
|
+
# report also emits a quality-vs-size scatter plot, Pareto candidates, a recommended point, failed-first / recommended-first sort presets, and HTML filters
|
|
223
|
+
ca batch /path/to/results/ --evaluate reference.pcd --min-auc 0.95 --max-chamfer 0.02
|
|
224
|
+
|
|
225
|
+
# Density heatmap
|
|
226
|
+
ca density-map cloud.pcd -o density.png -r 1.0 -a z
|
|
227
|
+
```
|
|
228
|
+
|
|
229
|
+
## Global Options
|
|
230
|
+
|
|
231
|
+
```bash
|
|
232
|
+
ca --verbose ... # Debug output (stderr)
|
|
233
|
+
ca --quiet ... # Suppress non-error output
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
## Output Options
|
|
237
|
+
|
|
238
|
+
- `--output-json <path>` — Dump result as JSON file
|
|
239
|
+
- `--format-json` — Print JSON to stdout for piping
|
|
240
|
+
- `--plot <path>` — F1 curve plot (evaluate only)
|
|
241
|
+
- `--report <path>` — Markdown/HTML report (`batch`, `traj-evaluate`, `traj-batch`, `run-evaluate`, `run-batch`)
|
|
242
|
+
|
|
243
|
+
```bash
|
|
244
|
+
# Pipe JSON to jq
|
|
245
|
+
ca info cloud.pcd --format-json | jq '.num_points'
|
|
246
|
+
ca evaluate a.pcd b.pcd --format-json | jq '.auc'
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
## CI quality gate
|
|
250
|
+
|
|
251
|
+
Point cloud / trajectory / perception QA is usually driven by `ca check` and a `cloudanalyzer.yaml` config (see [docs/ci.md](https://github.com/rsasaki0109/CloudAnalyzer/blob/main/docs/ci.md) and the [map quality gate tutorial](https://github.com/rsasaki0109/CloudAnalyzer/blob/main/docs/tutorial-map-quality-gate.md)).
|
|
252
|
+
|
|
253
|
+
In **this** GitHub repo, reusable workflows run the same gates in CI. Pin to a **tag or SHA** when calling them from another repository (not floating `@main`).
|
|
254
|
+
|
|
255
|
+
```yaml
|
|
256
|
+
jobs:
|
|
257
|
+
qa:
|
|
258
|
+
uses: rsasaki0109/CloudAnalyzer/.github/workflows/config-quality-gate.yml@main
|
|
259
|
+
with:
|
|
260
|
+
config_path: cloudanalyzer.yaml
|
|
261
|
+
|
|
262
|
+
baseline:
|
|
263
|
+
uses: rsasaki0109/CloudAnalyzer/.github/workflows/baseline-gate.yml@main
|
|
264
|
+
with:
|
|
265
|
+
config_path: cloudanalyzer.yaml
|
|
266
|
+
history_dir: qa/history
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
The repo also ships a [manual quality-gate workflow](https://github.com/rsasaki0109/CloudAnalyzer/actions) that accepts source/reference paths and thresholds for ad-hoc runs.
|
|
270
|
+
|
|
271
|
+
## Python API
|
|
272
|
+
|
|
273
|
+
```python
|
|
274
|
+
from ca.evaluate import evaluate, plot_f1_curve
|
|
275
|
+
from ca.plot import plot_multi_f1, heatmap3d
|
|
276
|
+
from ca.pipeline import run_pipeline
|
|
277
|
+
from ca.split import split
|
|
278
|
+
from ca.info import get_info
|
|
279
|
+
from ca.diff import run_diff
|
|
280
|
+
from ca.downsample import downsample
|
|
281
|
+
from ca.filter import filter_outliers
|
|
282
|
+
|
|
283
|
+
# Evaluate
|
|
284
|
+
result = evaluate("estimated.pcd", "reference.pcd")
|
|
285
|
+
print(f"AUC: {result['auc']:.4f}, Chamfer: {result['chamfer_distance']:.4f}")
|
|
286
|
+
plot_f1_curve(result, "f1_curve.png")
|
|
287
|
+
|
|
288
|
+
# Compare multiple results
|
|
289
|
+
results = [evaluate(f"v{v}.pcd", "ref.pcd") for v in [0.1, 0.2, 0.5]]
|
|
290
|
+
plot_multi_f1(results, ["v0.1", "v0.2", "v0.5"], "comparison.png")
|
|
291
|
+
|
|
292
|
+
# Pipeline
|
|
293
|
+
result = run_pipeline("noisy.pcd", "reference.pcd", "clean.pcd", voxel_size=0.2)
|
|
294
|
+
|
|
295
|
+
# Split
|
|
296
|
+
result = split("large.pcd", "tiles/", grid_size=100.0)
|
|
297
|
+
```
|
|
298
|
+
|
|
299
|
+
## Supported Formats
|
|
300
|
+
|
|
301
|
+
- `.pcd` (Point Cloud Data)
|
|
302
|
+
- `.ply` (Polygon File Format)
|
|
303
|
+
- `.las` (LiDAR)
|
|
@@ -0,0 +1,266 @@
|
|
|
1
|
+
# CloudAnalyzer
|
|
2
|
+
|
|
3
|
+
AI-friendly CLI tool for point cloud analysis and evaluation.
|
|
4
|
+
|
|
5
|
+
For the full product overview (Japanese), demos, and tutorials, see the [repository root README](https://github.com/rsasaki0109/CloudAnalyzer/blob/main/README.md).
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
From this directory (the Python package root):
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
pip install cloudanalyzer
|
|
13
|
+
|
|
14
|
+
# latest from source
|
|
15
|
+
git clone https://github.com/rsasaki0109/CloudAnalyzer.git
|
|
16
|
+
cd CloudAnalyzer/cloudanalyzer
|
|
17
|
+
pip install -e .
|
|
18
|
+
|
|
19
|
+
# or with Docker
|
|
20
|
+
docker build -t ca .
|
|
21
|
+
docker run ca info cloud.pcd
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Release Sanity Check
|
|
25
|
+
|
|
26
|
+
```bash
|
|
27
|
+
python3 -m pip install -e .[dev]
|
|
28
|
+
python3 -m build
|
|
29
|
+
python3 -m twine check dist/*
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## Commands
|
|
33
|
+
|
|
34
|
+
There are **31** CLI subcommands (see `ca --help`). Summary:
|
|
35
|
+
|
|
36
|
+
### Analysis & Evaluation
|
|
37
|
+
|
|
38
|
+
| Command | Description |
|
|
39
|
+
|---|---|
|
|
40
|
+
| `ca compare` | Compare two point clouds with ICP/GICP registration |
|
|
41
|
+
| `ca diff` | Quick distance stats (no registration) |
|
|
42
|
+
| `ca evaluate` | F1, Chamfer, Hausdorff, AUC evaluation |
|
|
43
|
+
| `ca check` | Config-driven unified QA (`cloudanalyzer.yaml`) |
|
|
44
|
+
| `ca init-check` | Emit a starter `cloudanalyzer.yaml` profile |
|
|
45
|
+
| `ca ground-evaluate` | Ground segmentation QA (precision/recall/F1/IoU, optional gates) |
|
|
46
|
+
| `ca traj-evaluate` | ATE, translational RPE, drift evaluation for trajectories |
|
|
47
|
+
| `ca traj-batch` | Batch trajectory benchmark with coverage, gate, and reports |
|
|
48
|
+
| `ca run-evaluate` | Combined map + trajectory QA for one run |
|
|
49
|
+
| `ca run-batch` | Combined map + trajectory benchmark across multiple runs |
|
|
50
|
+
| `ca info` | Point cloud metadata (points, BBox, centroid) |
|
|
51
|
+
| `ca stats` | Detailed statistics (density, spacing distribution) |
|
|
52
|
+
| `ca batch` | Run info on all files in a directory |
|
|
53
|
+
|
|
54
|
+
### Processing
|
|
55
|
+
|
|
56
|
+
| Command | Description |
|
|
57
|
+
|---|---|
|
|
58
|
+
| `ca downsample` | Voxel grid downsampling |
|
|
59
|
+
| `ca sample` | Random point sampling |
|
|
60
|
+
| `ca filter` | Statistical outlier removal |
|
|
61
|
+
| `ca merge` | Merge multiple point clouds |
|
|
62
|
+
| `ca align` | Sequential registration + merge |
|
|
63
|
+
| `ca split` | Split into grid tiles |
|
|
64
|
+
| `ca convert` | Format conversion (pcd/ply/las) |
|
|
65
|
+
| `ca normals` | Normal estimation |
|
|
66
|
+
| `ca crop` | Bounding box crop |
|
|
67
|
+
| `ca pipeline` | filter → downsample → evaluate in one step |
|
|
68
|
+
|
|
69
|
+
### Visualization
|
|
70
|
+
|
|
71
|
+
| Command | Description |
|
|
72
|
+
|---|---|
|
|
73
|
+
| `ca web` | Browser 3D viewer, with optional heatmap, reference overlay, and trajectory run overlay |
|
|
74
|
+
| `ca web-export` | Write a static browser viewer bundle (for demos and sharing) |
|
|
75
|
+
| `ca view` | Interactive 3D viewer |
|
|
76
|
+
| `ca density-map` | 2D density heatmap image |
|
|
77
|
+
| `ca heatmap3d` | 3D distance heatmap snapshot |
|
|
78
|
+
|
|
79
|
+
### Baseline history
|
|
80
|
+
|
|
81
|
+
| Command | Description |
|
|
82
|
+
|---|---|
|
|
83
|
+
| `ca baseline-save` | Save a QA summary JSON into a rotating history directory |
|
|
84
|
+
| `ca baseline-list` | List baselines saved in a history directory |
|
|
85
|
+
| `ca baseline-decision` | Promote / keep / reject a candidate baseline vs history |
|
|
86
|
+
|
|
87
|
+
### Utility
|
|
88
|
+
|
|
89
|
+
| Command | Description |
|
|
90
|
+
|---|---|
|
|
91
|
+
| `ca version` | Print CLI version |
|
|
92
|
+
|
|
93
|
+
## Usage Examples
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
# === Evaluation ===
|
|
97
|
+
# F1/Chamfer/Hausdorff evaluation with curve plot
|
|
98
|
+
ca evaluate source.pcd reference.pcd \
|
|
99
|
+
-t 0.05,0.1,0.2,0.5,1.0 --plot f1_curve.png
|
|
100
|
+
|
|
101
|
+
# Trajectory evaluation with quality gate
|
|
102
|
+
ca traj-evaluate estimated.csv reference.csv \
|
|
103
|
+
--max-time-delta 0.05 --max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
104
|
+
--report trajectory_report.html
|
|
105
|
+
# report also writes sibling trajectory overlay and error timeline PNGs
|
|
106
|
+
|
|
107
|
+
# Ignore constant initial translation offset
|
|
108
|
+
ca traj-evaluate estimated.csv reference.csv --align-origin
|
|
109
|
+
|
|
110
|
+
# Fit a rigid transform before scoring
|
|
111
|
+
ca traj-evaluate estimated.csv reference.csv --align-rigid
|
|
112
|
+
|
|
113
|
+
# Batch trajectory benchmark
|
|
114
|
+
ca traj-batch runs/ --reference-dir gt/ \
|
|
115
|
+
--max-time-delta 0.05 --max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
116
|
+
--report traj_batch.html
|
|
117
|
+
# HTML report adds copyable inspection commands plus pass/failed/low-coverage filters and ATE/RPE/coverage sorting
|
|
118
|
+
# low-coverage threshold follows --min-coverage when provided
|
|
119
|
+
|
|
120
|
+
# Combined run QA: map + trajectory in one report
|
|
121
|
+
ca run-evaluate map.pcd map_ref.pcd traj.csv traj_ref.csv \
|
|
122
|
+
--min-auc 0.95 --max-chamfer 0.02 \
|
|
123
|
+
--max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
124
|
+
--report run_report.html
|
|
125
|
+
# inspection commands include a `ca web ... --trajectory ... --trajectory-reference ...` run viewer
|
|
126
|
+
|
|
127
|
+
# Combined run batch QA
|
|
128
|
+
ca run-batch maps/ \
|
|
129
|
+
--map-reference-dir map_refs/ \
|
|
130
|
+
--trajectory-dir trajs/ \
|
|
131
|
+
--trajectory-reference-dir traj_refs/ \
|
|
132
|
+
--min-auc 0.95 --max-chamfer 0.02 \
|
|
133
|
+
--max-ate 0.5 --max-rpe 0.2 --max-drift 1.0 --min-coverage 0.9 \
|
|
134
|
+
--report run_batch.html
|
|
135
|
+
# HTML report adds pass/failed/map-issue/trajectory-issue filters and map/trajectory sorting
|
|
136
|
+
# summary and CLI output also split map failures vs trajectory failures
|
|
137
|
+
# inspection commands include both a per-run `ca web ...` run viewer and `ca run-evaluate ...` drill-down command
|
|
138
|
+
|
|
139
|
+
# Full pipeline: filter → downsample → evaluate
|
|
140
|
+
ca pipeline noisy.pcd reference.pcd -o clean.pcd -v 0.2
|
|
141
|
+
|
|
142
|
+
# 3D distance heatmap
|
|
143
|
+
ca heatmap3d estimated.pcd reference.pcd -o heatmap.png
|
|
144
|
+
|
|
145
|
+
# Browser heatmap viewer with reference overlay and threshold filter
|
|
146
|
+
ca web estimated.pcd reference.pcd --heatmap
|
|
147
|
+
|
|
148
|
+
# Browser run viewer: map heatmap + trajectory overlay
|
|
149
|
+
ca web map.pcd map_ref.pcd --heatmap \
|
|
150
|
+
--trajectory traj.csv --trajectory-reference traj_ref.csv
|
|
151
|
+
# paired trajectory があると worst ATE pose と worst RPE segment を viewer 上で強調する
|
|
152
|
+
# marker / segment をクリックすると timestamp と error summary を inspection panel に表示する
|
|
153
|
+
# click 時は camera も選択箇所へ寄り、Reset View で全景に戻せる
|
|
154
|
+
# trajectory error timeline も viewer 内に出て、point click で 3D selection と同期する
|
|
155
|
+
|
|
156
|
+
# === Compare ===
|
|
157
|
+
ca compare source.pcd target.pcd \
|
|
158
|
+
--register gicp --json result.json --report report.md \
|
|
159
|
+
--snapshot diff.png --threshold 0.1
|
|
160
|
+
|
|
161
|
+
# Quick diff
|
|
162
|
+
ca diff a.pcd b.pcd --threshold 0.05
|
|
163
|
+
|
|
164
|
+
# === Processing ===
|
|
165
|
+
# Split large map into 100m tiles
|
|
166
|
+
ca split large_map.pcd -o tiles/ -g 100
|
|
167
|
+
|
|
168
|
+
# Downsample
|
|
169
|
+
ca downsample cloud.pcd -o down.pcd -v 0.05
|
|
170
|
+
|
|
171
|
+
# Filter outliers
|
|
172
|
+
ca filter raw.pcd -o clean.pcd -n 20 -s 2.0
|
|
173
|
+
|
|
174
|
+
# Align multiple scans
|
|
175
|
+
ca align scan1.pcd scan2.pcd scan3.pcd -o aligned.pcd -m gicp
|
|
176
|
+
|
|
177
|
+
# Batch info
|
|
178
|
+
ca batch /path/to/pcds/ -r
|
|
179
|
+
|
|
180
|
+
# Batch evaluation
|
|
181
|
+
ca batch /path/to/results/ --evaluate reference.pcd --format-json | jq '.[].auc'
|
|
182
|
+
ca batch /path/to/results/ --evaluate reference.pcd --report batch_report.html
|
|
183
|
+
# report includes inspection commands; HTML adds Copy buttons plus count-badged summary rows, quick actions, failed-first / recommended-first sort presets, and pass/failed/pareto/recommended controls
|
|
184
|
+
ca batch decoded/ --evaluate reference.pcd --compressed-dir compressed/ --baseline-dir original/
|
|
185
|
+
# report also emits a quality-vs-size scatter plot, Pareto candidates, a recommended point, failed-first / recommended-first sort presets, and HTML filters
|
|
186
|
+
ca batch /path/to/results/ --evaluate reference.pcd --min-auc 0.95 --max-chamfer 0.02
|
|
187
|
+
|
|
188
|
+
# Density heatmap
|
|
189
|
+
ca density-map cloud.pcd -o density.png -r 1.0 -a z
|
|
190
|
+
```
|
|
191
|
+
|
|
192
|
+
## Global Options
|
|
193
|
+
|
|
194
|
+
```bash
|
|
195
|
+
ca --verbose ... # Debug output (stderr)
|
|
196
|
+
ca --quiet ... # Suppress non-error output
|
|
197
|
+
```
|
|
198
|
+
|
|
199
|
+
## Output Options
|
|
200
|
+
|
|
201
|
+
- `--output-json <path>` — Dump result as JSON file
|
|
202
|
+
- `--format-json` — Print JSON to stdout for piping
|
|
203
|
+
- `--plot <path>` — F1 curve plot (evaluate only)
|
|
204
|
+
- `--report <path>` — Markdown/HTML report (`batch`, `traj-evaluate`, `traj-batch`, `run-evaluate`, `run-batch`)
|
|
205
|
+
|
|
206
|
+
```bash
|
|
207
|
+
# Pipe JSON to jq
|
|
208
|
+
ca info cloud.pcd --format-json | jq '.num_points'
|
|
209
|
+
ca evaluate a.pcd b.pcd --format-json | jq '.auc'
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
## CI quality gate
|
|
213
|
+
|
|
214
|
+
Point cloud / trajectory / perception QA is usually driven by `ca check` and a `cloudanalyzer.yaml` config (see [docs/ci.md](https://github.com/rsasaki0109/CloudAnalyzer/blob/main/docs/ci.md) and the [map quality gate tutorial](https://github.com/rsasaki0109/CloudAnalyzer/blob/main/docs/tutorial-map-quality-gate.md)).
|
|
215
|
+
|
|
216
|
+
In **this** GitHub repo, reusable workflows run the same gates in CI. Pin to a **tag or SHA** when calling them from another repository (not floating `@main`).
|
|
217
|
+
|
|
218
|
+
```yaml
|
|
219
|
+
jobs:
|
|
220
|
+
qa:
|
|
221
|
+
uses: rsasaki0109/CloudAnalyzer/.github/workflows/config-quality-gate.yml@main
|
|
222
|
+
with:
|
|
223
|
+
config_path: cloudanalyzer.yaml
|
|
224
|
+
|
|
225
|
+
baseline:
|
|
226
|
+
uses: rsasaki0109/CloudAnalyzer/.github/workflows/baseline-gate.yml@main
|
|
227
|
+
with:
|
|
228
|
+
config_path: cloudanalyzer.yaml
|
|
229
|
+
history_dir: qa/history
|
|
230
|
+
```
|
|
231
|
+
|
|
232
|
+
The repo also ships a [manual quality-gate workflow](https://github.com/rsasaki0109/CloudAnalyzer/actions) that accepts source/reference paths and thresholds for ad-hoc runs.
|
|
233
|
+
|
|
234
|
+
## Python API
|
|
235
|
+
|
|
236
|
+
```python
|
|
237
|
+
from ca.evaluate import evaluate, plot_f1_curve
|
|
238
|
+
from ca.plot import plot_multi_f1, heatmap3d
|
|
239
|
+
from ca.pipeline import run_pipeline
|
|
240
|
+
from ca.split import split
|
|
241
|
+
from ca.info import get_info
|
|
242
|
+
from ca.diff import run_diff
|
|
243
|
+
from ca.downsample import downsample
|
|
244
|
+
from ca.filter import filter_outliers
|
|
245
|
+
|
|
246
|
+
# Evaluate
|
|
247
|
+
result = evaluate("estimated.pcd", "reference.pcd")
|
|
248
|
+
print(f"AUC: {result['auc']:.4f}, Chamfer: {result['chamfer_distance']:.4f}")
|
|
249
|
+
plot_f1_curve(result, "f1_curve.png")
|
|
250
|
+
|
|
251
|
+
# Compare multiple results
|
|
252
|
+
results = [evaluate(f"v{v}.pcd", "ref.pcd") for v in [0.1, 0.2, 0.5]]
|
|
253
|
+
plot_multi_f1(results, ["v0.1", "v0.2", "v0.5"], "comparison.png")
|
|
254
|
+
|
|
255
|
+
# Pipeline
|
|
256
|
+
result = run_pipeline("noisy.pcd", "reference.pcd", "clean.pcd", voxel_size=0.2)
|
|
257
|
+
|
|
258
|
+
# Split
|
|
259
|
+
result = split("large.pcd", "tiles/", grid_size=100.0)
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
## Supported Formats
|
|
263
|
+
|
|
264
|
+
- `.pcd` (Point Cloud Data)
|
|
265
|
+
- `.ply` (Polygon File Format)
|
|
266
|
+
- `.las` (LiDAR)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
"""Sequential registration and merge (align multiple scans)."""
|
|
2
|
+
|
|
3
|
+
import open3d as o3d
|
|
4
|
+
|
|
5
|
+
from ca.io import load_point_cloud
|
|
6
|
+
from ca.registration import register
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def align(
|
|
10
|
+
paths: list[str],
|
|
11
|
+
output_path: str,
|
|
12
|
+
method: str = "gicp",
|
|
13
|
+
max_correspondence_distance: float = 1.0,
|
|
14
|
+
) -> dict:
|
|
15
|
+
"""Align multiple point clouds sequentially and merge.
|
|
16
|
+
|
|
17
|
+
The first cloud is the reference. Each subsequent cloud is registered
|
|
18
|
+
to the accumulated result, then merged.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
paths: List of point cloud file paths (>= 2).
|
|
22
|
+
output_path: Output file path for merged result.
|
|
23
|
+
method: Registration method ("icp" or "gicp").
|
|
24
|
+
max_correspondence_distance: Max correspondence distance.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Dict with per-step registration results and total points.
|
|
28
|
+
|
|
29
|
+
Raises:
|
|
30
|
+
ValueError: If fewer than 2 paths are given.
|
|
31
|
+
"""
|
|
32
|
+
if len(paths) < 2:
|
|
33
|
+
raise ValueError("At least 2 point clouds are required for alignment")
|
|
34
|
+
|
|
35
|
+
accumulated = load_point_cloud(paths[0])
|
|
36
|
+
steps = []
|
|
37
|
+
|
|
38
|
+
for i, path in enumerate(paths[1:], start=1):
|
|
39
|
+
source = load_point_cloud(path)
|
|
40
|
+
transformed, fitness, rmse = register(
|
|
41
|
+
source, accumulated, method=method,
|
|
42
|
+
max_correspondence_distance=max_correspondence_distance,
|
|
43
|
+
)
|
|
44
|
+
steps.append({
|
|
45
|
+
"step": i,
|
|
46
|
+
"path": path,
|
|
47
|
+
"fitness": fitness,
|
|
48
|
+
"rmse": rmse,
|
|
49
|
+
})
|
|
50
|
+
accumulated += transformed
|
|
51
|
+
|
|
52
|
+
o3d.io.write_point_cloud(output_path, accumulated)
|
|
53
|
+
|
|
54
|
+
return {
|
|
55
|
+
"output": output_path,
|
|
56
|
+
"total_points": len(accumulated.points),
|
|
57
|
+
"num_inputs": len(paths),
|
|
58
|
+
"method": method,
|
|
59
|
+
"steps": steps,
|
|
60
|
+
}
|