euler-preprocess 1.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. euler_preprocess-1.0.1/PKG-INFO +268 -0
  2. euler_preprocess-1.0.1/README.md +254 -0
  3. euler_preprocess-1.0.1/euler_preprocess/__init__.py +0 -0
  4. euler_preprocess-1.0.1/euler_preprocess/cli.py +150 -0
  5. euler_preprocess-1.0.1/euler_preprocess/common/__init__.py +0 -0
  6. euler_preprocess-1.0.1/euler_preprocess/common/dataset.py +52 -0
  7. euler_preprocess-1.0.1/euler_preprocess/common/device.py +67 -0
  8. euler_preprocess-1.0.1/euler_preprocess/common/intrinsics.py +75 -0
  9. euler_preprocess-1.0.1/euler_preprocess/common/io.py +122 -0
  10. euler_preprocess-1.0.1/euler_preprocess/common/logging.py +51 -0
  11. euler_preprocess-1.0.1/euler_preprocess/common/noise.py +178 -0
  12. euler_preprocess-1.0.1/euler_preprocess/common/normalize.py +112 -0
  13. euler_preprocess-1.0.1/euler_preprocess/common/sampling.py +58 -0
  14. euler_preprocess-1.0.1/euler_preprocess/common/transform.py +23 -0
  15. euler_preprocess-1.0.1/euler_preprocess/fog/__init__.py +0 -0
  16. euler_preprocess-1.0.1/euler_preprocess/fog/airlight_from_sky.py +49 -0
  17. euler_preprocess-1.0.1/euler_preprocess/fog/dcp_airlight.py +83 -0
  18. euler_preprocess-1.0.1/euler_preprocess/fog/dcp_airlight_torch.py +80 -0
  19. euler_preprocess-1.0.1/euler_preprocess/fog/dcp_heuristic_airlight.py +105 -0
  20. euler_preprocess-1.0.1/euler_preprocess/fog/dcp_heuristic_airlight_torch.py +92 -0
  21. euler_preprocess-1.0.1/euler_preprocess/fog/foggify.py +28 -0
  22. euler_preprocess-1.0.1/euler_preprocess/fog/foggify_logging.py +10 -0
  23. euler_preprocess-1.0.1/euler_preprocess/fog/logging.py +46 -0
  24. euler_preprocess-1.0.1/euler_preprocess/fog/models.py +305 -0
  25. euler_preprocess-1.0.1/euler_preprocess/fog/transform.py +602 -0
  26. euler_preprocess-1.0.1/euler_preprocess/radial/__init__.py +0 -0
  27. euler_preprocess-1.0.1/euler_preprocess/radial/transform.py +81 -0
  28. euler_preprocess-1.0.1/euler_preprocess/sky_depth/__init__.py +0 -0
  29. euler_preprocess-1.0.1/euler_preprocess/sky_depth/transform.py +74 -0
  30. euler_preprocess-1.0.1/euler_preprocess.egg-info/PKG-INFO +268 -0
  31. euler_preprocess-1.0.1/euler_preprocess.egg-info/SOURCES.txt +40 -0
  32. euler_preprocess-1.0.1/euler_preprocess.egg-info/dependency_links.txt +1 -0
  33. euler_preprocess-1.0.1/euler_preprocess.egg-info/entry_points.txt +2 -0
  34. euler_preprocess-1.0.1/euler_preprocess.egg-info/requires.txt +9 -0
  35. euler_preprocess-1.0.1/euler_preprocess.egg-info/top_level.txt +1 -0
  36. euler_preprocess-1.0.1/pyproject.toml +25 -0
  37. euler_preprocess-1.0.1/setup.cfg +4 -0
  38. euler_preprocess-1.0.1/tests/test_airlight_fallback.py +301 -0
  39. euler_preprocess-1.0.1/tests/test_foggify_integration.py +119 -0
  40. euler_preprocess-1.0.1/tests/test_radial.py +144 -0
  41. euler_preprocess-1.0.1/tests/test_sky_depth.py +128 -0
  42. euler_preprocess-1.0.1/tests/test_zip_output.py +183 -0
@@ -0,0 +1,268 @@
1
+ Metadata-Version: 2.4
2
+ Name: euler-preprocess
3
+ Version: 1.0.1
4
+ Summary: Physics-based preprocessing (fog, etc.) for RGB+depth datasets
5
+ Requires-Python: >=3.9
6
+ Description-Content-Type: text/markdown
7
+ Requires-Dist: numpy
8
+ Requires-Dist: Pillow
9
+ Requires-Dist: euler-loading
10
+ Provides-Extra: gpu
11
+ Requires-Dist: torch; extra == "gpu"
12
+ Provides-Extra: progress
13
+ Requires-Dist: tqdm; extra == "progress"
14
+
15
+ # euler-preprocess
16
+
17
+ Physics-based preprocessing transforms for multi-modal RGB+depth datasets. Built on top of [euler-loading](https://github.com/d-rothen/euler-loading) and [ds-crawler](https://github.com/d-rothen/ds-crawler).
18
+
19
+ Available transforms:
20
+
21
+ | Command | Description |
22
+ |---|---|
23
+ | `euler-preprocess fog` | Synthetic fog via the Koschmieder atmospheric scattering model |
24
+ | `euler-preprocess sky-depth` | Override depth values in sky regions with a constant |
25
+ | `euler-preprocess radial` | Convert planar (z-buffer) depth to radial (Euclidean) depth |
26
+
27
+ ## Installation
28
+
29
+ ```bash
30
+ uv pip install "euler-preprocess[gpu,progress] @ git+https://github.com/d-rothen/euler-fog"
31
+ ```
32
+
33
+ ## Usage
34
+
35
+ ```bash
36
+ euler-preprocess fog -c configs/example_dataset_config.json
37
+ euler-preprocess sky-depth -c configs/sky_depth_dataset_config.json
38
+ euler-preprocess radial -c configs/radial_dataset_config.json
39
+ ```
40
+
41
+ ## Configuration
42
+
43
+ Every subcommand takes a **dataset config** JSON that points to the input data and a **transform config**. Each modality path must be a directory indexed by [ds-crawler](https://github.com/d-rothen/ds-crawler) with an `euler_loading` property that specifies the loader and function. This allows euler-loading to auto-select the correct dataset-specific loader.
44
+
45
+ ### Dataset Config
46
+
47
+ ```json
48
+ {
49
+ "transform_config_path": "configs/run1.json",
50
+ "output_path": "/path/to/output",
51
+ "modalities": {
52
+ "rgb": "/path/to/rgb",
53
+ "depth": "/path/to/depth",
54
+ "semantic_segmentation": "/path/to/classSegmentation"
55
+ },
56
+ "hierarchical_modalities": {
57
+ "intrinsics": "/path/to/intrinsics"
58
+ }
59
+ }
60
+ ```
61
+
62
+ | Field | Description |
63
+ |---|---|
64
+ | `transform_config_path` | Path to the transform-specific config (see below). `fog_config_path` is also accepted for backward compatibility. |
65
+ | `output_path` | Directory where outputs are written. |
66
+ | `modalities` | Regular modalities that participate in sample-ID intersection. Which modalities are required depends on the transform (see table below). |
67
+ | `hierarchical_modalities` | Per-scene data (e.g. intrinsics). Loaded once per scene and cached. |
68
+
69
+ **Required modalities per transform:**
70
+
71
+ | Transform | `modalities` | `hierarchical_modalities` |
72
+ |---|---|---|
73
+ | `fog` | `rgb`, `depth`, `semantic_segmentation` | — (intrinsics optional) |
74
+ | `sky-depth` | `depth`, `semantic_segmentation` | — |
75
+ | `radial` | `depth` | `intrinsics` |
76
+
77
+ ---
78
+
79
+ ## Fog Transform
80
+
81
+ ### Fog Config
82
+
83
+ Controls the fog simulation.
84
+
85
+ ```json
86
+ {
87
+ "airlight": "from_sky",
88
+ "seed": 1337,
89
+ "depth_scale": 1.0,
90
+ "resize_depth": true,
91
+ "contrast_threshold": 0.05,
92
+ "device": "cpu",
93
+ "gpu_batch_size": 4,
94
+ "selection": { ... },
95
+ "models": { ... }
96
+ }
97
+ ```
98
+
99
+ | Field | Description |
100
+ |---|---|
101
+ | `airlight` | **Required.** Airlight estimation method: `"from_sky"` (mean sky colour), `"dcp"` (dark channel prior), or `"dcp_heuristic"` (DCP with median heuristic). |
102
+ | `seed` | Random seed for reproducibility. `null` for non-deterministic. |
103
+ | `depth_scale` | Multiplier applied to depth values after loading. |
104
+ | `resize_depth` | Resize the depth map to match the RGB resolution (bilinear). |
105
+ | `contrast_threshold` | Threshold *C_t* used in the visibility-to-attenuation conversion (default `0.05`). |
106
+ | `device` | `"cpu"`, `"cuda"`, `"mps"`, or `"gpu"` (alias for cuda). |
107
+ | `gpu_batch_size` | Batch size when running on GPU. Uniform-model samples are batched; heterogeneous samples are processed individually. |
108
+
109
+ ### Fog Model
110
+
111
+ The core equation is the **Koschmieder model** (atmospheric scattering):
112
+
113
+ ```
114
+ I_fog(x) = I(x) * t(x) + L_s * (1 - t(x))
115
+ ```
116
+
117
+ where:
118
+
119
+ - **I(x)** is the original RGB colour at pixel *x*
120
+ - **t(x) = exp(-k * d(x))** is the transmittance, which falls exponentially with depth *d* and attenuation coefficient *k*
121
+ - **L_s** is the atmospheric light (airlight), i.e. the colour of the fog/sky light scattered towards the camera
122
+ - **k** is derived from a meteorological visibility distance *V*: `k = -ln(C_t) / V`
123
+
124
+ Distant objects are attenuated more (`t` approaches 0) and replaced by airlight, just as in real fog.
125
+
126
+ ### How Each Modality is Used
127
+
128
+ **RGB** — The clean scene image. Normalised to float32 in [0, 1]. This is the *I(x)* term in the fog equation -- it gets blended with the airlight according to transmittance.
129
+
130
+ **Depth** — A per-pixel depth map in **metres**. Provides the *d(x)* term in the transmittance calculation `t(x) = exp(-k * d(x))`. Pixels with greater depth receive more fog. Invalid values (NaN, inf, negative) are clamped to zero (treated as infinitely close, receiving no fog).
131
+
132
+ **Semantic Segmentation** — A per-pixel semantic segmentation map from which a boolean sky mask is derived, loaded via euler-loading's dataset-specific `semantic_segmentation` loader. The sky mask is used for airlight estimation when the `airlight` method is `"from_sky"`: the mean RGB of all sky pixels in the clean image is used as the airlight colour *L_s*.
133
+
134
+ **Intrinsics** *(optional)* — When present, planar (z-buffer) depth is converted to radial (Euclidean) depth before fog is applied.
135
+
136
+ ### Airlight Estimation
137
+
138
+ The `airlight` config key selects how the atmospheric light *L_s* is estimated:
139
+
140
+ | Method | Description |
141
+ |---|---|
142
+ | `from_sky` | Mean RGB of sky pixels in the clean image. Falls back to white `[1, 1, 1]` when no sky pixels exist. |
143
+ | `dcp` | Dark Channel Prior — selects the brightest pixel (by channel sum) among the top 0.1% darkest-channel pixels. |
144
+ | `dcp_heuristic` | DCP with median heuristic — selects the pixel closest to the median intensity (BT.601 grayscale) among the top 0.1% darkest-channel pixels. |
145
+
146
+ GPU-native implementations (`DCPAirlightTorch`, `DCPHeuristicAirlightTorch`) are used automatically when running on GPU.
147
+
148
+ ### Model Selection
149
+
150
+ Each image is assigned a fog model via the `selection` block:
151
+
152
+ ```json
153
+ "selection": {
154
+ "mode": "weighted",
155
+ "weights": {
156
+ "uniform": 1.0,
157
+ "heterogeneous_k": 0.0,
158
+ "heterogeneous_ls": 0.0,
159
+ "heterogeneous_k_ls": 0.0
160
+ }
161
+ }
162
+ ```
163
+
164
+ - **`fixed`** mode: always use a single named model.
165
+ - **`weighted`** mode: randomly select a model per image according to normalised weights.
166
+
167
+ Four models are available:
168
+
169
+ | Model | Description |
170
+ |---|---|
171
+ | `uniform` | Constant *k* and *L_s*. Standard homogeneous fog. |
172
+ | `heterogeneous_k` | Spatially-varying *k*, constant *L_s*. Simulates patchy fog / fog banks. |
173
+ | `heterogeneous_ls` | Constant *k*, spatially-varying *L_s*. Simulates scattered-light colour variation. |
174
+ | `heterogeneous_k_ls` | Both *k* and *L_s* vary spatially. Most expressive model. |
175
+
176
+ ### Visibility Distribution
177
+
178
+ Each model specifies a `visibility_m` distribution from which a visibility distance (in metres) is sampled per image:
179
+
180
+ | `dist` | Parameters | Description |
181
+ |---|---|---|
182
+ | `constant` | `value` | Fixed value. |
183
+ | `uniform` | `min`, `max` | Uniform random in range. |
184
+ | `normal` | `mean`, `std`, optional `min`/`max` | Gaussian, optionally clamped. |
185
+ | `lognormal` | `mean`, `sigma`, optional `min`/`max` | Log-normal. |
186
+ | `choice` | `values`, optional `weights` | Discrete weighted choice. |
187
+
188
+ The sampled visibility *V* is converted to the attenuation coefficient: **k = -ln(C_t) / V**.
189
+
190
+ ### Heterogeneous Noise Fields
191
+
192
+ Both `k_hetero` and `ls_hetero` use Perlin FBM (fractional Brownian motion) to generate spatially-varying factor fields:
193
+
194
+ ```json
195
+ "k_hetero": {
196
+ "scales": "auto",
197
+ "min_scale": 2,
198
+ "max_scale": null,
199
+ "min_factor": 0.0,
200
+ "max_factor": 1.0,
201
+ "normalize_to_mean": true
202
+ }
203
+ ```
204
+
205
+ The noise field (values in [0, 1]) is mapped to a factor field: `factor(x) = min_factor + (max_factor - min_factor) * noise(x)`. When `normalize_to_mean` is `true`, the factor field is rescaled so its spatial mean equals 1.0, preserving the overall fog density while introducing spatial variation.
206
+
207
+ | Parameter | Effect |
208
+ |---|---|
209
+ | `min_factor` / `max_factor` | Range of the multiplicative factor. |
210
+ | `normalize_to_mean` | Rescale factors so the image-wide mean equals the base value. Recommended for `k_hetero`. |
211
+ | `scales` / `min_scale` / `max_scale` | Control spatial frequency content. |
212
+
213
+ ### Fog Output
214
+
215
+ Foggy images are saved as PNG files organised by model name:
216
+
217
+ ```
218
+ <output_path>/
219
+ uniform/
220
+ beta_0.0374_airlight_0.353_0.784_1_rgb_00000.png
221
+ config.json
222
+ heterogeneous_k/
223
+ ...
224
+ ```
225
+
226
+ ---
227
+
228
+ ## Sky-Depth Transform
229
+
230
+ Overrides depth values in sky regions with a configurable constant. Useful for datasets where sky depth is encoded as zero or infinity and needs to be normalised to a large finite value.
231
+
232
+ ### Sky-Depth Config
233
+
234
+ ```json
235
+ {
236
+ "sky_depth_value": 1000.0
237
+ }
238
+ ```
239
+
240
+ | Field | Description |
241
+ |---|---|
242
+ | `sky_depth_value` | Depth value assigned to all sky pixels. Defaults to `1000.0`. |
243
+
244
+ ### Sky-Depth Output
245
+
246
+ Depth maps are saved as `.npy` float32 files preserving the original directory hierarchy.
247
+
248
+ ---
249
+
250
+ ## Radial Transform
251
+
252
+ Converts planar (z-buffer) depth to radial (Euclidean) depth using camera intrinsics. For each pixel *(u, v)*:
253
+
254
+ ```
255
+ d_radial(u, v) = d_planar(u, v) * sqrt(((u - cx)/fx)^2 + ((v - cy)/fy)^2 + 1)
256
+ ```
257
+
258
+ ### Radial Config
259
+
260
+ ```json
261
+ {}
262
+ ```
263
+
264
+ No special parameters are required. The transform reads intrinsics from the `intrinsics` hierarchical modality.
265
+
266
+ ### Radial Output
267
+
268
+ Depth maps are saved as `.npy` float32 files preserving the original directory hierarchy.
@@ -0,0 +1,254 @@
1
+ # euler-preprocess
2
+
3
+ Physics-based preprocessing transforms for multi-modal RGB+depth datasets. Built on top of [euler-loading](https://github.com/d-rothen/euler-loading) and [ds-crawler](https://github.com/d-rothen/ds-crawler).
4
+
5
+ Available transforms:
6
+
7
+ | Command | Description |
8
+ |---|---|
9
+ | `euler-preprocess fog` | Synthetic fog via the Koschmieder atmospheric scattering model |
10
+ | `euler-preprocess sky-depth` | Override depth values in sky regions with a constant |
11
+ | `euler-preprocess radial` | Convert planar (z-buffer) depth to radial (Euclidean) depth |
12
+
13
+ ## Installation
14
+
15
+ ```bash
16
+ uv pip install "euler-preprocess[gpu,progress] @ git+https://github.com/d-rothen/euler-fog"
17
+ ```
18
+
19
+ ## Usage
20
+
21
+ ```bash
22
+ euler-preprocess fog -c configs/example_dataset_config.json
23
+ euler-preprocess sky-depth -c configs/sky_depth_dataset_config.json
24
+ euler-preprocess radial -c configs/radial_dataset_config.json
25
+ ```
26
+
27
+ ## Configuration
28
+
29
+ Every subcommand takes a **dataset config** JSON that points to the input data and a **transform config**. Each modality path must be a directory indexed by [ds-crawler](https://github.com/d-rothen/ds-crawler) with an `euler_loading` property that specifies the loader and function. This allows euler-loading to auto-select the correct dataset-specific loader.
30
+
31
+ ### Dataset Config
32
+
33
+ ```json
34
+ {
35
+ "transform_config_path": "configs/run1.json",
36
+ "output_path": "/path/to/output",
37
+ "modalities": {
38
+ "rgb": "/path/to/rgb",
39
+ "depth": "/path/to/depth",
40
+ "semantic_segmentation": "/path/to/classSegmentation"
41
+ },
42
+ "hierarchical_modalities": {
43
+ "intrinsics": "/path/to/intrinsics"
44
+ }
45
+ }
46
+ ```
47
+
48
+ | Field | Description |
49
+ |---|---|
50
+ | `transform_config_path` | Path to the transform-specific config (see below). `fog_config_path` is also accepted for backward compatibility. |
51
+ | `output_path` | Directory where outputs are written. |
52
+ | `modalities` | Regular modalities that participate in sample-ID intersection. Which modalities are required depends on the transform (see table below). |
53
+ | `hierarchical_modalities` | Per-scene data (e.g. intrinsics). Loaded once per scene and cached. |
54
+
55
+ **Required modalities per transform:**
56
+
57
+ | Transform | `modalities` | `hierarchical_modalities` |
58
+ |---|---|---|
59
+ | `fog` | `rgb`, `depth`, `semantic_segmentation` | — (intrinsics optional) |
60
+ | `sky-depth` | `depth`, `semantic_segmentation` | — |
61
+ | `radial` | `depth` | `intrinsics` |
62
+
63
+ ---
64
+
65
+ ## Fog Transform
66
+
67
+ ### Fog Config
68
+
69
+ Controls the fog simulation.
70
+
71
+ ```json
72
+ {
73
+ "airlight": "from_sky",
74
+ "seed": 1337,
75
+ "depth_scale": 1.0,
76
+ "resize_depth": true,
77
+ "contrast_threshold": 0.05,
78
+ "device": "cpu",
79
+ "gpu_batch_size": 4,
80
+ "selection": { ... },
81
+ "models": { ... }
82
+ }
83
+ ```
84
+
85
+ | Field | Description |
86
+ |---|---|
87
+ | `airlight` | **Required.** Airlight estimation method: `"from_sky"` (mean sky colour), `"dcp"` (dark channel prior), or `"dcp_heuristic"` (DCP with median heuristic). |
88
+ | `seed` | Random seed for reproducibility. `null` for non-deterministic. |
89
+ | `depth_scale` | Multiplier applied to depth values after loading. |
90
+ | `resize_depth` | Resize the depth map to match the RGB resolution (bilinear). |
91
+ | `contrast_threshold` | Threshold *C_t* used in the visibility-to-attenuation conversion (default `0.05`). |
92
+ | `device` | `"cpu"`, `"cuda"`, `"mps"`, or `"gpu"` (alias for cuda). |
93
+ | `gpu_batch_size` | Batch size when running on GPU. Uniform-model samples are batched; heterogeneous samples are processed individually. |
94
+
95
+ ### Fog Model
96
+
97
+ The core equation is the **Koschmieder model** (atmospheric scattering):
98
+
99
+ ```
100
+ I_fog(x) = I(x) * t(x) + L_s * (1 - t(x))
101
+ ```
102
+
103
+ where:
104
+
105
+ - **I(x)** is the original RGB colour at pixel *x*
106
+ - **t(x) = exp(-k * d(x))** is the transmittance, which falls exponentially with depth *d* and attenuation coefficient *k*
107
+ - **L_s** is the atmospheric light (airlight), i.e. the colour of the fog/sky light scattered towards the camera
108
+ - **k** is derived from a meteorological visibility distance *V*: `k = -ln(C_t) / V`
109
+
110
+ Distant objects are attenuated more (`t` approaches 0) and replaced by airlight, just as in real fog.
111
+
112
+ ### How Each Modality is Used
113
+
114
+ **RGB** — The clean scene image. Normalised to float32 in [0, 1]. This is the *I(x)* term in the fog equation -- it gets blended with the airlight according to transmittance.
115
+
116
+ **Depth** — A per-pixel depth map in **metres**. Provides the *d(x)* term in the transmittance calculation `t(x) = exp(-k * d(x))`. Pixels with greater depth receive more fog. Invalid values (NaN, inf, negative) are clamped to zero (treated as infinitely close, receiving no fog).
117
+
118
+ **Semantic Segmentation** — A per-pixel semantic segmentation map from which a boolean sky mask is derived, loaded via euler-loading's dataset-specific `semantic_segmentation` loader. The sky mask is used for airlight estimation when the `airlight` method is `"from_sky"`: the mean RGB of all sky pixels in the clean image is used as the airlight colour *L_s*.
119
+
120
+ **Intrinsics** *(optional)* — When present, planar (z-buffer) depth is converted to radial (Euclidean) depth before fog is applied.
121
+
122
+ ### Airlight Estimation
123
+
124
+ The `airlight` config key selects how the atmospheric light *L_s* is estimated:
125
+
126
+ | Method | Description |
127
+ |---|---|
128
+ | `from_sky` | Mean RGB of sky pixels in the clean image. Falls back to white `[1, 1, 1]` when no sky pixels exist. |
129
+ | `dcp` | Dark Channel Prior — selects the brightest pixel (by channel sum) among the top 0.1% darkest-channel pixels. |
130
+ | `dcp_heuristic` | DCP with median heuristic — selects the pixel closest to the median intensity (BT.601 grayscale) among the top 0.1% darkest-channel pixels. |
131
+
132
+ GPU-native implementations (`DCPAirlightTorch`, `DCPHeuristicAirlightTorch`) are used automatically when running on GPU.
133
+
134
+ ### Model Selection
135
+
136
+ Each image is assigned a fog model via the `selection` block:
137
+
138
+ ```json
139
+ "selection": {
140
+ "mode": "weighted",
141
+ "weights": {
142
+ "uniform": 1.0,
143
+ "heterogeneous_k": 0.0,
144
+ "heterogeneous_ls": 0.0,
145
+ "heterogeneous_k_ls": 0.0
146
+ }
147
+ }
148
+ ```
149
+
150
+ - **`fixed`** mode: always use a single named model.
151
+ - **`weighted`** mode: randomly select a model per image according to normalised weights.
152
+
153
+ Four models are available:
154
+
155
+ | Model | Description |
156
+ |---|---|
157
+ | `uniform` | Constant *k* and *L_s*. Standard homogeneous fog. |
158
+ | `heterogeneous_k` | Spatially-varying *k*, constant *L_s*. Simulates patchy fog / fog banks. |
159
+ | `heterogeneous_ls` | Constant *k*, spatially-varying *L_s*. Simulates scattered-light colour variation. |
160
+ | `heterogeneous_k_ls` | Both *k* and *L_s* vary spatially. Most expressive model. |
161
+
162
+ ### Visibility Distribution
163
+
164
+ Each model specifies a `visibility_m` distribution from which a visibility distance (in metres) is sampled per image:
165
+
166
+ | `dist` | Parameters | Description |
167
+ |---|---|---|
168
+ | `constant` | `value` | Fixed value. |
169
+ | `uniform` | `min`, `max` | Uniform random in range. |
170
+ | `normal` | `mean`, `std`, optional `min`/`max` | Gaussian, optionally clamped. |
171
+ | `lognormal` | `mean`, `sigma`, optional `min`/`max` | Log-normal. |
172
+ | `choice` | `values`, optional `weights` | Discrete weighted choice. |
173
+
174
+ The sampled visibility *V* is converted to the attenuation coefficient: **k = -ln(C_t) / V**.
175
+
176
+ ### Heterogeneous Noise Fields
177
+
178
+ Both `k_hetero` and `ls_hetero` use Perlin FBM (fractional Brownian motion) to generate spatially-varying factor fields:
179
+
180
+ ```json
181
+ "k_hetero": {
182
+ "scales": "auto",
183
+ "min_scale": 2,
184
+ "max_scale": null,
185
+ "min_factor": 0.0,
186
+ "max_factor": 1.0,
187
+ "normalize_to_mean": true
188
+ }
189
+ ```
190
+
191
+ The noise field (values in [0, 1]) is mapped to a factor field: `factor(x) = min_factor + (max_factor - min_factor) * noise(x)`. When `normalize_to_mean` is `true`, the factor field is rescaled so its spatial mean equals 1.0, preserving the overall fog density while introducing spatial variation.
192
+
193
+ | Parameter | Effect |
194
+ |---|---|
195
+ | `min_factor` / `max_factor` | Range of the multiplicative factor. |
196
+ | `normalize_to_mean` | Rescale factors so the image-wide mean equals the base value. Recommended for `k_hetero`. |
197
+ | `scales` / `min_scale` / `max_scale` | Control spatial frequency content. |
198
+
199
+ ### Fog Output
200
+
201
+ Foggy images are saved as PNG files organised by model name:
202
+
203
+ ```
204
+ <output_path>/
205
+ uniform/
206
+ beta_0.0374_airlight_0.353_0.784_1_rgb_00000.png
207
+ config.json
208
+ heterogeneous_k/
209
+ ...
210
+ ```
211
+
212
+ ---
213
+
214
+ ## Sky-Depth Transform
215
+
216
+ Overrides depth values in sky regions with a configurable constant. Useful for datasets where sky depth is encoded as zero or infinity and needs to be normalised to a large finite value.
217
+
218
+ ### Sky-Depth Config
219
+
220
+ ```json
221
+ {
222
+ "sky_depth_value": 1000.0
223
+ }
224
+ ```
225
+
226
+ | Field | Description |
227
+ |---|---|
228
+ | `sky_depth_value` | Depth value assigned to all sky pixels. Defaults to `1000.0`. |
229
+
230
+ ### Sky-Depth Output
231
+
232
+ Depth maps are saved as `.npy` float32 files preserving the original directory hierarchy.
233
+
234
+ ---
235
+
236
+ ## Radial Transform
237
+
238
+ Converts planar (z-buffer) depth to radial (Euclidean) depth using camera intrinsics. For each pixel *(u, v)*:
239
+
240
+ ```
241
+ d_radial(u, v) = d_planar(u, v) * sqrt(((u - cx)/fx)^2 + ((v - cy)/fy)^2 + 1)
242
+ ```
243
+
244
+ ### Radial Config
245
+
246
+ ```json
247
+ {}
248
+ ```
249
+
250
+ No special parameters are required. The transform reads intrinsics from the `intrinsics` hierarchical modality.
251
+
252
+ ### Radial Output
253
+
254
+ Depth maps are saved as `.npy` float32 files preserving the original directory hierarchy.
File without changes
@@ -0,0 +1,150 @@
1
+ """CLI entry point for euler-preprocess.
2
+
3
+ Used both by ``python main.py`` and by the installed ``euler-preprocess`` console
4
+ script. Supports subcommands: ``fog``, ``sky-depth``, ``radial``.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ import argparse
9
+ import json
10
+ from pathlib import Path
11
+
12
+ from euler_preprocess.common.dataset import build_dataset
13
+ from euler_preprocess.common.logging import get_logger, log_dataset_info
14
+
15
+
16
+ # ---------------------------------------------------------------------------
17
+ # Helpers
18
+ # ---------------------------------------------------------------------------
19
+
20
+ def _resolve(path_str: str, config_dir: Path) -> Path:
21
+ """Resolve a path relative to the config file's directory."""
22
+ p = Path(path_str)
23
+ if p.is_absolute():
24
+ return p
25
+ return (config_dir / p).resolve()
26
+
27
+
28
+ def _run_transform(args: argparse.Namespace, transform_class: type) -> int:
29
+ """Shared logic for all subcommands."""
30
+ logger = get_logger()
31
+
32
+ config_path = Path(args.config).resolve()
33
+ config_dir = config_path.parent
34
+
35
+ with open(config_path, "r", encoding="utf-8") as f:
36
+ config = json.load(f)
37
+
38
+ # Resolve the transform-specific config path.
39
+ # Support both ``transform_config_path`` and legacy ``fog_config_path``.
40
+ transform_config_key = "transform_config_path"
41
+ if transform_config_key not in config:
42
+ transform_config_key = "fog_config_path"
43
+ transform_config_path = _resolve(config[transform_config_key], config_dir)
44
+ output_path = config["output_path"]
45
+
46
+ # Read the transform config to determine the device (for dataset logging)
47
+ with open(transform_config_path, "r", encoding="utf-8") as f:
48
+ transform_cfg = json.load(f)
49
+ device = transform_cfg.get("device", "cpu").lower()
50
+ use_gpu = device not in ("cpu",)
51
+
52
+ logger.info("Config: %s", args.config)
53
+ logger.info("Transform config: %s", transform_config_path)
54
+ logger.info("Output path: %s", output_path)
55
+
56
+ required_modalities = transform_class.REQUIRED_MODALITIES
57
+ required_hierarchical = transform_class.REQUIRED_HIERARCHICAL_MODALITIES or None
58
+ dataset = build_dataset(config, required_modalities, required_hierarchical)
59
+ dataset_name = config.get("dataset", "dataset")
60
+
61
+ modality_paths = {
62
+ **config.get("modalities", {}),
63
+ **config.get("hierarchical_modalities", {}),
64
+ }
65
+ log_dataset_info(logger, dataset_name, len(dataset), modality_paths, use_gpu)
66
+
67
+ transform = transform_class(
68
+ config_path=str(transform_config_path),
69
+ out_path=output_path,
70
+ )
71
+
72
+ saved_paths = transform.run(dataset)
73
+
74
+ logger.info("Transform complete. Generated %d outputs.", len(saved_paths))
75
+ return 0
76
+
77
+
78
+ # ---------------------------------------------------------------------------
79
+ # Subcommand handlers
80
+ # ---------------------------------------------------------------------------
81
+
82
+ def _cmd_fog(args: argparse.Namespace) -> int:
83
+ from euler_preprocess.fog.transform import FogTransform
84
+ return _run_transform(args, FogTransform)
85
+
86
+
87
+ def _cmd_sky_depth(args: argparse.Namespace) -> int:
88
+ from euler_preprocess.sky_depth.transform import SkyDepthTransform
89
+ return _run_transform(args, SkyDepthTransform)
90
+
91
+
92
+ def _cmd_radial(args: argparse.Namespace) -> int:
93
+ from euler_preprocess.radial.transform import RadialTransform
94
+ return _run_transform(args, RadialTransform)
95
+
96
+
97
+ # ---------------------------------------------------------------------------
98
+ # Argument parsing
99
+ # ---------------------------------------------------------------------------
100
+
101
+ def parse_args() -> argparse.Namespace:
102
+ parser = argparse.ArgumentParser(
103
+ description="Preprocessing transforms for RGB+depth datasets.",
104
+ )
105
+ subparsers = parser.add_subparsers(dest="command")
106
+
107
+ # --- fog ---
108
+ fog_parser = subparsers.add_parser(
109
+ "fog", help="Apply synthetic fog to RGB images.",
110
+ )
111
+ fog_parser.add_argument(
112
+ "--config", "-c", type=str, required=True,
113
+ help="Path to the dataset configuration JSON file.",
114
+ )
115
+ fog_parser.set_defaults(func=_cmd_fog)
116
+
117
+ # --- sky-depth ---
118
+ sky_depth_parser = subparsers.add_parser(
119
+ "sky-depth", help="Override sky-region depth values.",
120
+ )
121
+ sky_depth_parser.add_argument(
122
+ "--config", "-c", type=str, required=True,
123
+ help="Path to the dataset configuration JSON file.",
124
+ )
125
+ sky_depth_parser.set_defaults(func=_cmd_sky_depth)
126
+
127
+ # --- radial ---
128
+ radial_parser = subparsers.add_parser(
129
+ "radial", help="Convert planar (z-buffer) depth to radial (Euclidean) depth.",
130
+ )
131
+ radial_parser.add_argument(
132
+ "--config", "-c", type=str, required=True,
133
+ help="Path to the dataset configuration JSON file.",
134
+ )
135
+ radial_parser.set_defaults(func=_cmd_radial)
136
+
137
+ args = parser.parse_args()
138
+ if not hasattr(args, "func"):
139
+ parser.print_help()
140
+ return parser.parse_args(["--help"])
141
+ return args
142
+
143
+
144
+ # ---------------------------------------------------------------------------
145
+ # Main
146
+ # ---------------------------------------------------------------------------
147
+
148
+ def main() -> int:
149
+ args = parse_args()
150
+ return args.func(args)