cloudanalyzer 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. ca/__init__.py +3 -0
  2. ca/align.py +60 -0
  3. ca/baseline_history.py +98 -0
  4. ca/batch.py +393 -0
  5. ca/compare.py +97 -0
  6. ca/convert.py +41 -0
  7. ca/core/__init__.py +129 -0
  8. ca/core/check_baseline_evolution.py +376 -0
  9. ca/core/check_scaffolding.py +206 -0
  10. ca/core/check_triage.py +346 -0
  11. ca/core/checks.py +935 -0
  12. ca/core/ground_evaluate.py +110 -0
  13. ca/core/web_progressive_loading.py +232 -0
  14. ca/core/web_sampling.py +109 -0
  15. ca/core/web_trajectory_sampling.py +243 -0
  16. ca/crop.py +53 -0
  17. ca/density_map.py +84 -0
  18. ca/diff.py +30 -0
  19. ca/downsample.py +36 -0
  20. ca/evaluate.py +132 -0
  21. ca/experiments/__init__.py +1 -0
  22. ca/experiments/check_baseline_evolution/__init__.py +29 -0
  23. ca/experiments/check_baseline_evolution/common.py +389 -0
  24. ca/experiments/check_baseline_evolution/evaluate.py +409 -0
  25. ca/experiments/check_baseline_evolution/pareto_promote.py +121 -0
  26. ca/experiments/check_baseline_evolution/stability_window.py +106 -0
  27. ca/experiments/check_baseline_evolution/threshold_guard.py +80 -0
  28. ca/experiments/check_scaffolding/__init__.py +23 -0
  29. ca/experiments/check_scaffolding/common.py +69 -0
  30. ca/experiments/check_scaffolding/evaluate.py +428 -0
  31. ca/experiments/check_scaffolding/literal_profiles.py +149 -0
  32. ca/experiments/check_scaffolding/object_sections.py +224 -0
  33. ca/experiments/check_scaffolding/pipeline_overlays.py +165 -0
  34. ca/experiments/check_triage/__init__.py +23 -0
  35. ca/experiments/check_triage/common.py +246 -0
  36. ca/experiments/check_triage/evaluate.py +400 -0
  37. ca/experiments/check_triage/pareto_frontier.py +105 -0
  38. ca/experiments/check_triage/severity_weighted.py +63 -0
  39. ca/experiments/check_triage/signature_cluster.py +76 -0
  40. ca/experiments/ground_evaluate/__init__.py +23 -0
  41. ca/experiments/ground_evaluate/common.py +113 -0
  42. ca/experiments/ground_evaluate/evaluate.py +392 -0
  43. ca/experiments/ground_evaluate/height_band.py +113 -0
  44. ca/experiments/ground_evaluate/nearest_neighbor.py +65 -0
  45. ca/experiments/ground_evaluate/voxel_confusion.py +39 -0
  46. ca/experiments/process_docs.py +156 -0
  47. ca/experiments/web_progressive_loading/__init__.py +17 -0
  48. ca/experiments/web_progressive_loading/common.py +113 -0
  49. ca/experiments/web_progressive_loading/distance_shells.py +61 -0
  50. ca/experiments/web_progressive_loading/evaluate.py +518 -0
  51. ca/experiments/web_progressive_loading/grid_tiles.py +66 -0
  52. ca/experiments/web_progressive_loading/spatial_shuffle.py +65 -0
  53. ca/experiments/web_sampling/__init__.py +23 -0
  54. ca/experiments/web_sampling/common.py +9 -0
  55. ca/experiments/web_sampling/evaluate.py +534 -0
  56. ca/experiments/web_sampling/functional_voxel.py +89 -0
  57. ca/experiments/web_sampling/object_random.py +57 -0
  58. ca/experiments/web_sampling/pipeline_hybrid.py +82 -0
  59. ca/experiments/web_trajectory_sampling/__init__.py +15 -0
  60. ca/experiments/web_trajectory_sampling/common.py +131 -0
  61. ca/experiments/web_trajectory_sampling/distance_accumulator.py +115 -0
  62. ca/experiments/web_trajectory_sampling/evaluate.py +506 -0
  63. ca/experiments/web_trajectory_sampling/turn_aware.py +106 -0
  64. ca/experiments/web_trajectory_sampling/uniform_stride.py +41 -0
  65. ca/filter.py +44 -0
  66. ca/ground_evaluate.py +92 -0
  67. ca/info.py +34 -0
  68. ca/io.py +40 -0
  69. ca/log.py +28 -0
  70. ca/merge.py +33 -0
  71. ca/metrics.py +61 -0
  72. ca/normals.py +41 -0
  73. ca/pareto.py +155 -0
  74. ca/pipeline.py +82 -0
  75. ca/plot.py +217 -0
  76. ca/registration.py +66 -0
  77. ca/report.py +3467 -0
  78. ca/run_evaluate.py +424 -0
  79. ca/sample.py +47 -0
  80. ca/split.py +102 -0
  81. ca/stats.py +62 -0
  82. ca/trajectory.py +586 -0
  83. ca/view.py +36 -0
  84. ca/visualization.py +65 -0
  85. ca/web.py +1959 -0
  86. cloudanalyzer-0.1.0.dist-info/METADATA +303 -0
  87. cloudanalyzer-0.1.0.dist-info/RECORD +93 -0
  88. cloudanalyzer-0.1.0.dist-info/WHEEL +5 -0
  89. cloudanalyzer-0.1.0.dist-info/entry_points.txt +2 -0
  90. cloudanalyzer-0.1.0.dist-info/licenses/LICENSE +21 -0
  91. cloudanalyzer-0.1.0.dist-info/top_level.txt +2 -0
  92. cloudanalyzer_cli/__init__.py +1 -0
  93. cloudanalyzer_cli/main.py +1777 -0
ca/__init__.py ADDED
@@ -0,0 +1,3 @@
1
+ """CloudAnalyzer - AI-friendly CLI tool for point cloud analysis."""
2
+
3
+ __version__ = "0.1.0"
ca/align.py ADDED
@@ -0,0 +1,60 @@
1
+ """Sequential registration and merge (align multiple scans)."""
2
+
3
+ import open3d as o3d
4
+
5
+ from ca.io import load_point_cloud
6
+ from ca.registration import register
7
+
8
+
9
+ def align(
10
+ paths: list[str],
11
+ output_path: str,
12
+ method: str = "gicp",
13
+ max_correspondence_distance: float = 1.0,
14
+ ) -> dict:
15
+ """Align multiple point clouds sequentially and merge.
16
+
17
+ The first cloud is the reference. Each subsequent cloud is registered
18
+ to the accumulated result, then merged.
19
+
20
+ Args:
21
+ paths: List of point cloud file paths (>= 2).
22
+ output_path: Output file path for merged result.
23
+ method: Registration method ("icp" or "gicp").
24
+ max_correspondence_distance: Max correspondence distance.
25
+
26
+ Returns:
27
+ Dict with per-step registration results and total points.
28
+
29
+ Raises:
30
+ ValueError: If fewer than 2 paths are given.
31
+ """
32
+ if len(paths) < 2:
33
+ raise ValueError("At least 2 point clouds are required for alignment")
34
+
35
+ accumulated = load_point_cloud(paths[0])
36
+ steps = []
37
+
38
+ for i, path in enumerate(paths[1:], start=1):
39
+ source = load_point_cloud(path)
40
+ transformed, fitness, rmse = register(
41
+ source, accumulated, method=method,
42
+ max_correspondence_distance=max_correspondence_distance,
43
+ )
44
+ steps.append({
45
+ "step": i,
46
+ "path": path,
47
+ "fitness": fitness,
48
+ "rmse": rmse,
49
+ })
50
+ accumulated += transformed
51
+
52
+ o3d.io.write_point_cloud(output_path, accumulated)
53
+
54
+ return {
55
+ "output": output_path,
56
+ "total_points": len(accumulated.points),
57
+ "num_inputs": len(paths),
58
+ "method": method,
59
+ "steps": steps,
60
+ }
ca/baseline_history.py ADDED
@@ -0,0 +1,98 @@
1
+ """Baseline history management: save, discover, and rotate QA summaries."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import shutil
7
+ from datetime import datetime, timezone
8
+ from pathlib import Path
9
+
10
+
11
+ def _timestamp_label() -> str:
12
+ """Generate a UTC timestamp label for baseline naming."""
13
+ return datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%SZ")
14
+
15
+
16
+ def discover_history(history_dir: str, pattern: str = "*.json") -> list[str]:
17
+ """Find and sort baseline summary JSONs in a directory, oldest first.
18
+
19
+ Files are sorted by name (which embeds a timestamp when saved via
20
+ ``save_baseline``). Non-JSON files and files that fail to parse are
21
+ silently skipped.
22
+ """
23
+ dir_path = Path(history_dir)
24
+ if not dir_path.is_dir():
25
+ return []
26
+ paths: list[Path] = sorted(dir_path.glob(pattern))
27
+ valid: list[str] = []
28
+ for path in paths:
29
+ try:
30
+ data = json.loads(path.read_text(encoding="utf-8"))
31
+ if isinstance(data, dict):
32
+ valid.append(str(path))
33
+ except (json.JSONDecodeError, OSError):
34
+ continue
35
+ return valid
36
+
37
+
38
+ def save_baseline(
39
+ summary_path: str,
40
+ history_dir: str,
41
+ label: str | None = None,
42
+ ) -> str:
43
+ """Copy a QA summary JSON into the history directory with a timestamped name.
44
+
45
+ Returns the destination path.
46
+ """
47
+ source = Path(summary_path).resolve()
48
+ if not source.exists():
49
+ raise FileNotFoundError(f"Summary file not found: {source}")
50
+
51
+ data = json.loads(source.read_text(encoding="utf-8"))
52
+ if not isinstance(data, dict):
53
+ raise ValueError(f"Expected top-level JSON object in: {source}")
54
+
55
+ dest_dir = Path(history_dir)
56
+ dest_dir.mkdir(parents=True, exist_ok=True)
57
+
58
+ stem = label or _timestamp_label()
59
+ dest = dest_dir / f"baseline-{stem}.json"
60
+ shutil.copy2(str(source), str(dest))
61
+ return str(dest)
62
+
63
+
64
+ def rotate_history(history_dir: str, keep: int = 10) -> list[str]:
65
+ """Remove oldest baselines beyond the keep limit.
66
+
67
+ Returns the list of removed file paths.
68
+ """
69
+ if keep < 1:
70
+ raise ValueError("keep must be >= 1")
71
+ all_paths = discover_history(history_dir)
72
+ if len(all_paths) <= keep:
73
+ return []
74
+ to_remove = all_paths[: len(all_paths) - keep]
75
+ for path in to_remove:
76
+ Path(path).unlink(missing_ok=True)
77
+ return to_remove
78
+
79
+
80
+ def list_baselines(history_dir: str) -> list[dict]:
81
+ """Return metadata for each baseline in the history directory."""
82
+ paths = discover_history(history_dir)
83
+ entries: list[dict] = []
84
+ for path in paths:
85
+ file_path = Path(path)
86
+ try:
87
+ data = json.loads(file_path.read_text(encoding="utf-8"))
88
+ except (json.JSONDecodeError, OSError):
89
+ continue
90
+ summary = data.get("summary", {})
91
+ entries.append({
92
+ "path": str(file_path),
93
+ "name": file_path.name,
94
+ "passed": summary.get("passed"),
95
+ "project": data.get("project"),
96
+ "failed_check_ids": summary.get("failed_check_ids", []),
97
+ })
98
+ return entries
ca/batch.py ADDED
@@ -0,0 +1,393 @@
1
+ """Batch processing module."""
2
+
3
+ import shlex
4
+ from pathlib import Path
5
+
6
+ from ca.evaluate import evaluate
7
+ from ca.io import SUPPORTED_EXTENSIONS, load_point_cloud
8
+ from ca.info import get_info
9
+ from ca.log import logger
10
+ from ca.pareto import mark_quality_size_recommended
11
+ from ca.trajectory import SUPPORTED_TRAJECTORY_EXTENSIONS, evaluate_trajectory
12
+
13
+
14
+ def _quality_gate_result(
15
+ auc: float,
16
+ chamfer_distance: float,
17
+ min_auc: float | None = None,
18
+ max_chamfer: float | None = None,
19
+ ) -> dict | None:
20
+ """Build pass/fail metadata for optional batch quality gates."""
21
+ if min_auc is None and max_chamfer is None:
22
+ return None
23
+
24
+ reasons = []
25
+ if min_auc is not None and auc < min_auc:
26
+ reasons.append(f"AUC {auc:.4f} < min_auc {min_auc:.4f}")
27
+ if max_chamfer is not None and chamfer_distance > max_chamfer:
28
+ reasons.append(
29
+ f"Chamfer {chamfer_distance:.4f} > max_chamfer {max_chamfer:.4f}"
30
+ )
31
+
32
+ return {
33
+ "passed": not reasons,
34
+ "min_auc": min_auc,
35
+ "max_chamfer": max_chamfer,
36
+ "reasons": reasons,
37
+ }
38
+
39
+
40
+ def _find_point_cloud_files(directory: str, recursive: bool = False) -> list[Path]:
41
+ """Find supported point cloud files in a directory."""
42
+ dirpath = Path(directory)
43
+ if not dirpath.is_dir():
44
+ raise FileNotFoundError(f"Directory not found: {directory}")
45
+
46
+ pattern = "**/*" if recursive else "*"
47
+ files = sorted(
48
+ f for f in dirpath.glob(pattern)
49
+ if f.is_file() and f.suffix.lower() in SUPPORTED_EXTENSIONS
50
+ )
51
+
52
+ if not files:
53
+ logger.warning("No point cloud files found in %s", directory)
54
+ return []
55
+
56
+ logger.info("Found %d point cloud file(s) in %s", len(files), directory)
57
+ return files
58
+
59
+
60
+ def _find_trajectory_files(directory: str, recursive: bool = False) -> list[Path]:
61
+ """Find supported trajectory files in a directory."""
62
+ dirpath = Path(directory)
63
+ if not dirpath.is_dir():
64
+ raise FileNotFoundError(f"Directory not found: {directory}")
65
+
66
+ pattern = "**/*" if recursive else "*"
67
+ files = sorted(
68
+ f for f in dirpath.glob(pattern)
69
+ if f.is_file() and f.suffix.lower() in SUPPORTED_TRAJECTORY_EXTENSIONS
70
+ )
71
+
72
+ if not files:
73
+ logger.warning("No trajectory files found in %s", directory)
74
+ return []
75
+
76
+ logger.info("Found %d trajectory file(s) in %s", len(files), directory)
77
+ return files
78
+
79
+
80
+ def _find_files(directory: str, recursive: bool = False) -> list[Path]:
81
+ """Find all files in a directory."""
82
+ dirpath = Path(directory)
83
+ if not dirpath.is_dir():
84
+ raise FileNotFoundError(f"Directory not found: {directory}")
85
+
86
+ pattern = "**/*" if recursive else "*"
87
+ return sorted(f for f in dirpath.glob(pattern) if f.is_file())
88
+
89
+
90
+ def _match_by_relative_or_stem(
91
+ source_path: Path,
92
+ source_root: Path,
93
+ target_root: Path,
94
+ target_files: list[Path],
95
+ ) -> Path | None:
96
+ """Match a related artifact file by relative path or unique stem."""
97
+ relative = source_path.relative_to(source_root)
98
+ exact_path = target_root / relative
99
+ if exact_path.is_file():
100
+ return exact_path
101
+
102
+ relative_without_ext = relative.with_suffix("").as_posix()
103
+ same_relative = [
104
+ path for path in target_files
105
+ if path.relative_to(target_root).with_suffix("").as_posix() == relative_without_ext
106
+ ]
107
+ if len(same_relative) == 1:
108
+ return same_relative[0]
109
+ if len(same_relative) > 1:
110
+ logger.warning("Ambiguous artifact match for %s in %s", source_path, target_root)
111
+ return None
112
+
113
+ same_stem = [path for path in target_files if path.stem == source_path.stem]
114
+ if len(same_stem) == 1:
115
+ return same_stem[0]
116
+ if len(same_stem) > 1:
117
+ logger.warning("Ambiguous stem match for %s in %s", source_path, target_root)
118
+ return None
119
+
120
+
121
+ def _compression_stats(
122
+ source_path: Path,
123
+ source_root: Path,
124
+ compressed_root: Path | None = None,
125
+ compressed_files: list[Path] | None = None,
126
+ baseline_root: Path | None = None,
127
+ baseline_files: list[Path] | None = None,
128
+ ) -> dict | None:
129
+ """Collect optional compression-related file size metadata."""
130
+ source_size = source_path.stat().st_size
131
+ baseline_path = source_path
132
+ baseline_size = source_size
133
+ compressed_path = None
134
+ compressed_size = None
135
+
136
+ if baseline_root is not None and baseline_files is not None:
137
+ matched_baseline = _match_by_relative_or_stem(
138
+ source_path,
139
+ source_root,
140
+ baseline_root,
141
+ baseline_files,
142
+ )
143
+ if matched_baseline is not None:
144
+ baseline_path = matched_baseline
145
+ baseline_size = matched_baseline.stat().st_size
146
+
147
+ if compressed_root is None or compressed_files is None:
148
+ return None
149
+
150
+ matched_compressed = _match_by_relative_or_stem(
151
+ source_path,
152
+ source_root,
153
+ compressed_root,
154
+ compressed_files,
155
+ )
156
+ if matched_compressed is None:
157
+ return None
158
+
159
+ compressed_path = matched_compressed
160
+ compressed_size = matched_compressed.stat().st_size
161
+ size_ratio = compressed_size / baseline_size if baseline_size > 0 else 0.0
162
+
163
+ return {
164
+ "source_size_bytes": source_size,
165
+ "baseline_path": str(baseline_path),
166
+ "baseline_size_bytes": baseline_size,
167
+ "compressed_path": str(compressed_path),
168
+ "compressed_size_bytes": compressed_size,
169
+ "size_ratio": size_ratio,
170
+ "space_saving_ratio": 1.0 - size_ratio,
171
+ }
172
+
173
+
174
+ def _inspection_commands(source_path: str, reference_path: str) -> dict[str, str]:
175
+ """Build follow-up commands for interactive inspection."""
176
+ source = shlex.quote(source_path)
177
+ reference = shlex.quote(reference_path)
178
+ source_stem = Path(source_path).stem
179
+ reference_stem = Path(reference_path).stem
180
+ snapshot_name = f"{source_stem}_vs_{reference_stem}_heatmap.png"
181
+ return {
182
+ "web_heatmap": f"ca web {source} {reference} --heatmap",
183
+ "heatmap3d": f"ca heatmap3d {source} {reference} -o {shlex.quote(snapshot_name)}",
184
+ }
185
+
186
+
187
+ def _trajectory_inspection_commands(source_path: str, reference_path: str) -> dict[str, str]:
188
+ """Build follow-up commands for trajectory inspection."""
189
+ source = shlex.quote(source_path)
190
+ reference = shlex.quote(reference_path)
191
+ source_stem = Path(source_path).stem
192
+ reference_stem = Path(reference_path).stem
193
+ report_name = shlex.quote(f"{source_stem}_vs_{reference_stem}_trajectory_report.html")
194
+ aligned_report_name = shlex.quote(
195
+ f"{source_stem}_vs_{reference_stem}_trajectory_aligned_report.html"
196
+ )
197
+ return {
198
+ "traj_evaluate": f"ca traj-evaluate {source} {reference} --report {report_name}",
199
+ "traj_evaluate_aligned": (
200
+ f"ca traj-evaluate {source} {reference} --align-origin --report {aligned_report_name}"
201
+ ),
202
+ "traj_evaluate_rigid": (
203
+ f"ca traj-evaluate {source} {reference} --align-rigid --report "
204
+ f"{shlex.quote(f'{source_stem}_vs_{reference_stem}_trajectory_rigid_report.html')}"
205
+ ),
206
+ }
207
+
208
+
209
+ def batch_info(directory: str, recursive: bool = False) -> list[dict]:
210
+ """Run info on all point cloud files in a directory.
211
+
212
+ Args:
213
+ directory: Directory path to scan.
214
+ recursive: If True, scan subdirectories too.
215
+
216
+ Returns:
217
+ List of info dicts for each file found.
218
+ """
219
+ files = _find_point_cloud_files(directory, recursive=recursive)
220
+ if not files:
221
+ return []
222
+
223
+ results = []
224
+ for f in files:
225
+ logger.debug("Processing: %s", f)
226
+ try:
227
+ info = get_info(str(f))
228
+ results.append(info)
229
+ except (FileNotFoundError, ValueError) as e:
230
+ logger.warning("Skipped %s: %s", f, e)
231
+
232
+ return results
233
+
234
+
235
+ def batch_evaluate(
236
+ directory: str,
237
+ reference_path: str,
238
+ recursive: bool = False,
239
+ thresholds: list[float] | None = None,
240
+ min_auc: float | None = None,
241
+ max_chamfer: float | None = None,
242
+ compressed_dir: str | None = None,
243
+ baseline_dir: str | None = None,
244
+ ) -> list[dict]:
245
+ """Evaluate all point cloud files in a directory against one reference.
246
+
247
+ Args:
248
+ directory: Directory path to scan.
249
+ reference_path: Reference point cloud path.
250
+ recursive: If True, scan subdirectories too.
251
+ thresholds: Optional list of F1 thresholds.
252
+ min_auc: Optional minimum AUC required to pass.
253
+ max_chamfer: Optional maximum Chamfer distance required to pass.
254
+ compressed_dir: Optional directory with compressed artifacts.
255
+ baseline_dir: Optional directory with original uncompressed artifacts.
256
+
257
+ Returns:
258
+ List of evaluation summary dicts for each file found.
259
+ """
260
+ files = _find_point_cloud_files(directory, recursive=recursive)
261
+ if not files:
262
+ return []
263
+ source_root = Path(directory)
264
+
265
+ # Validate the reference once so we fail fast on bad input.
266
+ load_point_cloud(reference_path)
267
+ compressed_root = Path(compressed_dir) if compressed_dir is not None else None
268
+ compressed_files = (
269
+ _find_files(compressed_dir, recursive=recursive)
270
+ if compressed_dir is not None else None
271
+ )
272
+ baseline_root = Path(baseline_dir) if baseline_dir is not None else None
273
+ baseline_files = (
274
+ _find_files(baseline_dir, recursive=recursive)
275
+ if baseline_dir is not None else None
276
+ )
277
+
278
+ results = []
279
+ for f in files:
280
+ logger.debug("Evaluating: %s", f)
281
+ try:
282
+ eval_result = evaluate(str(f), reference_path, thresholds=thresholds)
283
+ best_f1 = max(eval_result["f1_scores"], key=lambda score: score["f1"])
284
+ quality_gate = _quality_gate_result(
285
+ eval_result["auc"],
286
+ eval_result["chamfer_distance"],
287
+ min_auc=min_auc,
288
+ max_chamfer=max_chamfer,
289
+ )
290
+ compression = _compression_stats(
291
+ f,
292
+ source_root,
293
+ compressed_root=compressed_root,
294
+ compressed_files=compressed_files,
295
+ baseline_root=baseline_root,
296
+ baseline_files=baseline_files,
297
+ )
298
+ results.append(
299
+ {
300
+ "path": str(f),
301
+ "num_points": eval_result["source_points"],
302
+ "reference_path": reference_path,
303
+ "reference_points": eval_result["target_points"],
304
+ "chamfer_distance": eval_result["chamfer_distance"],
305
+ "hausdorff_distance": eval_result["hausdorff_distance"],
306
+ "auc": eval_result["auc"],
307
+ "best_f1": best_f1,
308
+ "f1_scores": eval_result["f1_scores"],
309
+ "quality_gate": quality_gate,
310
+ "inspect": _inspection_commands(str(f), reference_path),
311
+ "compression": compression,
312
+ }
313
+ )
314
+ except (FileNotFoundError, ValueError) as e:
315
+ logger.warning("Skipped %s: %s", f, e)
316
+
317
+ mark_quality_size_recommended(
318
+ results,
319
+ min_auc=min_auc,
320
+ max_chamfer=max_chamfer,
321
+ )
322
+ return results
323
+
324
+
325
+ def trajectory_batch_evaluate(
326
+ directory: str,
327
+ reference_dir: str,
328
+ recursive: bool = False,
329
+ max_time_delta: float = 0.05,
330
+ align_origin: bool = False,
331
+ align_rigid: bool = False,
332
+ max_ate: float | None = None,
333
+ max_rpe: float | None = None,
334
+ max_drift: float | None = None,
335
+ min_coverage: float | None = None,
336
+ ) -> list[dict]:
337
+ """Evaluate all trajectory files in a directory against matched references."""
338
+ files = _find_trajectory_files(directory, recursive=recursive)
339
+ if not files:
340
+ return []
341
+
342
+ source_root = Path(directory)
343
+ reference_root = Path(reference_dir)
344
+ if not reference_root.is_dir():
345
+ raise FileNotFoundError(f"Directory not found: {reference_dir}")
346
+ reference_files = _find_trajectory_files(reference_dir, recursive=recursive)
347
+
348
+ results = []
349
+ for f in files:
350
+ reference_path = _match_by_relative_or_stem(
351
+ f,
352
+ source_root,
353
+ reference_root,
354
+ reference_files,
355
+ )
356
+ if reference_path is None:
357
+ logger.warning("Skipped %s: no matched reference trajectory in %s", f, reference_dir)
358
+ continue
359
+
360
+ logger.debug("Evaluating trajectory: %s", f)
361
+ try:
362
+ eval_result = evaluate_trajectory(
363
+ str(f),
364
+ str(reference_path),
365
+ max_time_delta=max_time_delta,
366
+ align_origin=align_origin,
367
+ align_rigid=align_rigid,
368
+ max_ate=max_ate,
369
+ max_rpe=max_rpe,
370
+ max_drift=max_drift,
371
+ min_coverage=min_coverage,
372
+ )
373
+ results.append(
374
+ {
375
+ "path": str(f),
376
+ "reference_path": str(reference_path),
377
+ "alignment": eval_result["alignment"],
378
+ "matching": eval_result["matching"],
379
+ "estimated_poses": eval_result["matching"]["estimated_poses"],
380
+ "reference_poses": eval_result["matching"]["reference_poses"],
381
+ "matched_poses": eval_result["matching"]["matched_poses"],
382
+ "coverage_ratio": eval_result["matching"]["coverage_ratio"],
383
+ "ate": eval_result["ate"],
384
+ "rpe_translation": eval_result["rpe_translation"],
385
+ "drift": eval_result["drift"],
386
+ "quality_gate": eval_result["quality_gate"],
387
+ "inspect": _trajectory_inspection_commands(str(f), str(reference_path)),
388
+ }
389
+ )
390
+ except (FileNotFoundError, ValueError) as e:
391
+ logger.warning("Skipped %s: %s", f, e)
392
+
393
+ return results
ca/compare.py ADDED
@@ -0,0 +1,97 @@
1
+ """Core compare pipeline."""
2
+
3
+ import numpy as np
4
+
5
+ from ca.io import load_point_cloud
6
+ from ca.log import logger
7
+ from ca.registration import register
8
+ from ca.metrics import compute_nn_distance, summarize, threshold_stats
9
+ from ca.visualization import colorize, save_snapshot
10
+ from ca.report import make_json, save_json, make_markdown
11
+
12
+
13
+ def run_compare(
14
+ source_path: str,
15
+ target_path: str,
16
+ method: str | None = "gicp",
17
+ json_path: str | None = None,
18
+ report_path: str | None = None,
19
+ snapshot_path: str | None = None,
20
+ threshold: float | None = None,
21
+ ) -> dict:
22
+ """Run the full compare pipeline.
23
+
24
+ Args:
25
+ source_path: Path to source point cloud.
26
+ target_path: Path to target point cloud.
27
+ method: Registration method ("icp", "gicp") or None to skip.
28
+ json_path: Output path for JSON report.
29
+ report_path: Output path for Markdown report.
30
+ snapshot_path: Output path for snapshot image.
31
+ threshold: Distance threshold to check.
32
+
33
+ Returns:
34
+ Report dict.
35
+ """
36
+ # 1. Load
37
+ logger.info("Loading source: %s", source_path)
38
+ source = load_point_cloud(source_path)
39
+ logger.info(" -> %d points", len(source.points))
40
+
41
+ logger.info("Loading target: %s", target_path)
42
+ target = load_point_cloud(target_path)
43
+ logger.info(" -> %d points", len(target.points))
44
+
45
+ # 2. Register (optional)
46
+ fitness = None
47
+ rmse = None
48
+
49
+ if method:
50
+ logger.info("Registering with %s...", method.upper())
51
+ source, fitness, rmse = register(source, target, method=method)
52
+ logger.info(" -> Fitness: %.4f, RMSE: %.4f", fitness, rmse)
53
+
54
+ # 3. Compute distances
55
+ logger.info("Computing nearest neighbor distances...")
56
+ distances = compute_nn_distance(source, target)
57
+ stats = summarize(distances)
58
+ logger.info(" -> Mean: %.4f, Max: %.4f", stats["mean"], stats["max"])
59
+
60
+ # 3.5 Threshold check (optional)
61
+ thresh_result = None
62
+ if threshold is not None:
63
+ thresh_result = threshold_stats(distances, threshold)
64
+ logger.info(
65
+ " -> Threshold %s: %d/%d (%.1f%%) exceed",
66
+ threshold, thresh_result["exceed_count"],
67
+ thresh_result["total"], thresh_result["exceed_ratio"] * 100,
68
+ )
69
+
70
+ # 4. Colorize
71
+ colorize(source, distances)
72
+
73
+ # 5. Save outputs
74
+ data = make_json(
75
+ source_points=len(source.points),
76
+ target_points=len(target.points),
77
+ fitness=fitness,
78
+ rmse=rmse,
79
+ distance_stats=stats,
80
+ )
81
+ if thresh_result:
82
+ data["threshold"] = thresh_result
83
+
84
+ if json_path:
85
+ save_json(data, json_path)
86
+ logger.info("JSON saved: %s", json_path)
87
+
88
+ if report_path:
89
+ make_markdown(data, report_path)
90
+ logger.info("Report saved: %s", report_path)
91
+
92
+ if snapshot_path:
93
+ logger.info("Saving snapshot: %s", snapshot_path)
94
+ save_snapshot(source, snapshot_path)
95
+
96
+ logger.info("Done.")
97
+ return data
ca/convert.py ADDED
@@ -0,0 +1,41 @@
1
+ """Point cloud format conversion module."""
2
+
3
+ import open3d as o3d
4
+
5
+ from ca.io import load_point_cloud, SUPPORTED_EXTENSIONS
6
+
7
+
8
+ def convert(input_path: str, output_path: str) -> dict:
9
+ """Convert a point cloud file to another format.
10
+
11
+ Args:
12
+ input_path: Input point cloud file path.
13
+ output_path: Output file path (format determined by extension).
14
+
15
+ Returns:
16
+ Dict with input/output info.
17
+
18
+ Raises:
19
+ ValueError: If output format is not supported.
20
+ """
21
+ from pathlib import Path
22
+
23
+ out_ext = Path(output_path).suffix.lower()
24
+ if out_ext not in SUPPORTED_EXTENSIONS:
25
+ raise ValueError(
26
+ f"Unsupported output format: '{out_ext}'. Supported: {', '.join(sorted(SUPPORTED_EXTENSIONS))}"
27
+ )
28
+
29
+ pcd = load_point_cloud(input_path)
30
+ num_points = len(pcd.points)
31
+
32
+ Path(output_path).parent.mkdir(parents=True, exist_ok=True)
33
+ o3d.io.write_point_cloud(output_path, pcd)
34
+
35
+ return {
36
+ "input": input_path,
37
+ "output": output_path,
38
+ "num_points": num_points,
39
+ "input_format": Path(input_path).suffix.lower(),
40
+ "output_format": out_ext,
41
+ }