wavedl 1.6.1__tar.gz → 1.6.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {wavedl-1.6.1/src/wavedl.egg-info → wavedl-1.6.3}/PKG-INFO +59 -64
  2. {wavedl-1.6.1 → wavedl-1.6.3}/README.md +58 -63
  3. {wavedl-1.6.1 → wavedl-1.6.3}/pyproject.toml +2 -2
  4. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/__init__.py +1 -1
  5. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/hpo.py +1 -1
  6. wavedl-1.6.1/src/wavedl/hpc.py → wavedl-1.6.3/src/wavedl/launcher.py +135 -61
  7. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/__init__.py +22 -0
  8. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/test.py +8 -0
  9. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/train.py +10 -13
  10. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/data.py +36 -6
  11. {wavedl-1.6.1 → wavedl-1.6.3/src/wavedl.egg-info}/PKG-INFO +59 -64
  12. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl.egg-info/SOURCES.txt +1 -1
  13. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl.egg-info/entry_points.txt +2 -2
  14. {wavedl-1.6.1 → wavedl-1.6.3}/LICENSE +0 -0
  15. {wavedl-1.6.1 → wavedl-1.6.3}/setup.cfg +0 -0
  16. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/_pretrained_utils.py +0 -0
  17. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/_template.py +0 -0
  18. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/base.py +0 -0
  19. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/caformer.py +0 -0
  20. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/cnn.py +0 -0
  21. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/convnext.py +0 -0
  22. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/convnext_v2.py +0 -0
  23. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/densenet.py +0 -0
  24. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/efficientnet.py +0 -0
  25. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/efficientnetv2.py +0 -0
  26. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/efficientvit.py +0 -0
  27. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/fastvit.py +0 -0
  28. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/mamba.py +0 -0
  29. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/maxvit.py +0 -0
  30. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/mobilenetv3.py +0 -0
  31. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/registry.py +0 -0
  32. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/regnet.py +0 -0
  33. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/resnet.py +0 -0
  34. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/resnet3d.py +0 -0
  35. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/swin.py +0 -0
  36. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/tcn.py +0 -0
  37. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/unet.py +0 -0
  38. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/unireplknet.py +0 -0
  39. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/models/vit.py +0 -0
  40. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/__init__.py +0 -0
  41. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/config.py +0 -0
  42. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/constraints.py +0 -0
  43. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/cross_validation.py +0 -0
  44. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/distributed.py +0 -0
  45. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/losses.py +0 -0
  46. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/metrics.py +0 -0
  47. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/optimizers.py +0 -0
  48. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl/utils/schedulers.py +0 -0
  49. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl.egg-info/dependency_links.txt +0 -0
  50. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl.egg-info/requires.txt +0 -0
  51. {wavedl-1.6.1 → wavedl-1.6.3}/src/wavedl.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: wavedl
3
- Version: 1.6.1
3
+ Version: 1.6.3
4
4
  Summary: A Scalable Deep Learning Framework for Wave-Based Inverse Problems
5
5
  Author: Ductho Le
6
6
  License: MIT
@@ -214,77 +214,83 @@ This installs everything you need: training, inference, HPO, ONNX export.
214
214
  ```bash
215
215
  git clone https://github.com/ductho-le/WaveDL.git
216
216
  cd WaveDL
217
- pip install -e .
217
+ pip install -e ".[dev]"
218
218
  ```
219
219
 
220
220
  > [!NOTE]
221
- > Python 3.11+ required. For development setup, see [CONTRIBUTING.md](.github/CONTRIBUTING.md).
221
+ > Python 3.11+ required. For contributor setup (pre-commit hooks), see [CONTRIBUTING.md](.github/CONTRIBUTING.md).
222
222
 
223
223
  ### Quick Start
224
224
 
225
225
  > [!TIP]
226
226
  > In all examples below, replace `<...>` placeholders with your values. See [Configuration](#️-configuration) for defaults and options.
227
227
 
228
- #### Option 1: Using wavedl-hpc (Recommended for HPC)
229
-
230
- The `wavedl-hpc` command automatically configures the environment for HPC systems:
228
+ ### Training
231
229
 
232
230
  ```bash
233
- # Basic training (auto-detects available GPUs)
234
- wavedl-hpc --model <model_name> --data_path <train_data> --batch_size <number> --output_dir <output_folder>
231
+ # Basic training (auto-detects GPUs and environment)
232
+ wavedl-train --model <model_name> --data_path <train_data> --output_dir <output_folder>
235
233
 
236
234
  # Detailed configuration
237
- wavedl-hpc --model <model_name> --data_path <train_data> --batch_size <number> \
235
+ wavedl-train --model <model_name> --data_path <train_data> --batch_size <number> \
238
236
  --lr <number> --epochs <number> --patience <number> --compile --output_dir <output_folder>
239
237
 
240
- # Specify GPU count explicitly
241
- wavedl-hpc --num_gpus 4 --model cnn --data_path train.npz --output_dir results
242
- ```
243
-
244
- #### Option 2: Direct Accelerate Launch
245
-
246
- ```bash
247
- # Local - auto-detects GPUs
248
- accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --batch_size <number> --output_dir <output_folder>
238
+ # Multi-GPU is automatic (uses all available GPUs)
239
+ # Override with --num_gpus if needed
240
+ wavedl-train --model cnn --data_path train.npz --num_gpus 4 --output_dir results
249
241
 
250
242
  # Resume training (automatic - just re-run with same output_dir)
251
- # Manual resume from specific checkpoint:
252
- accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --resume <checkpoint_folder> --output_dir <output_folder>
243
+ wavedl-train --model <model_name> --data_path <train_data> --output_dir <output_folder>
253
244
 
254
245
  # Force fresh start (ignores existing checkpoints)
255
- accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --output_dir <output_folder> --fresh
246
+ wavedl-train --model <model_name> --data_path <train_data> --output_dir <output_folder> --fresh
256
247
 
257
248
  # List available models
258
249
  wavedl-train --list_models
259
250
  ```
260
251
 
261
- > [!TIP]
262
- > **Auto-Resume**: If training crashes or is interrupted, simply re-run with the same `--output_dir`. The framework automatically detects incomplete training and resumes from the last checkpoint. Use `--fresh` to force a fresh start.
252
+ > [!NOTE]
253
+ > `wavedl-train` automatically detects your environment:
254
+ > - **HPC clusters** (SLURM, PBS, etc.): Uses local caching, offline WandB
255
+ > - **Local machines**: Uses standard cache locations (~/.cache)
263
256
  >
264
- > **GPU Auto-Detection**: `wavedl-hpc` automatically detects available GPUs using `nvidia-smi`. Use `--num_gpus` to override.
257
+ > **Auto-Resume**: If training crashes or is interrupted, simply re-run with the same `--output_dir`. The framework automatically detects incomplete training and resumes from the last checkpoint.
265
258
 
266
- ### Testing & Inference
259
+ <details>
260
+ <summary><b>Advanced: Direct Accelerate Launch</b></summary>
261
+
262
+ For fine-grained control over distributed training, you can use `accelerate launch` directly:
267
263
 
268
- After training, use `wavedl.test` to evaluate your model on test data:
264
+ ```bash
265
+ # Custom accelerate configuration
266
+ accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --output_dir <output_folder>
267
+
268
+ # Multi-node training
269
+ accelerate launch --num_machines 2 --main_process_ip <ip> -m wavedl.train --model cnn --data_path train.npz
270
+ ```
271
+
272
+ </details>
273
+
274
+ ### Testing & Inference
269
275
 
270
276
  ```bash
271
277
  # Basic inference
272
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data>
278
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data>
273
279
 
274
280
  # With visualization, CSV export, and multiple file formats
275
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
281
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
276
282
  --plot --plot_format png pdf --save_predictions --output_dir <output_folder>
277
283
 
278
284
  # With custom parameter names
279
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
285
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
280
286
  --param_names '$p_1$' '$p_2$' '$p_3$' --plot
281
287
 
282
288
  # Export model to ONNX for deployment (LabVIEW, MATLAB, C++, etc.)
283
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
289
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
284
290
  --export onnx --export_path <output_file.onnx>
285
291
 
286
292
  # For 3D volumes with small depth (e.g., 8×128×128), override auto-detection
287
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
293
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
288
294
  --input_channels 1
289
295
  ```
290
296
 
@@ -295,7 +301,7 @@ python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
295
301
  - **Format** (with `--plot_format`): Supported formats: `png` (default), `pdf` (vector), `svg` (vector), `eps` (LaTeX), `tiff`, `jpg`, `ps`
296
302
 
297
303
  > [!NOTE]
298
- > `wavedl.test` auto-detects the model architecture from checkpoint metadata. If unavailable, it falls back to folder name parsing. Use `--model` to override if needed.
304
+ > `wavedl-test` auto-detects the model architecture from checkpoint metadata. If unavailable, it falls back to folder name parsing. Use `--model` to override if needed.
299
305
 
300
306
  ### Adding Custom Models
301
307
 
@@ -339,7 +345,7 @@ class MyModel(BaseModel):
339
345
  **Step 2: Train**
340
346
 
341
347
  ```bash
342
- wavedl-hpc --import my_model.py --model my_model --data_path train.npz
348
+ wavedl-train --import my_model.py --model my_model --data_path train.npz
343
349
  ```
344
350
 
345
351
  WaveDL handles everything else: training loop, logging, checkpoints, multi-GPU, early stopping, etc.
@@ -355,10 +361,10 @@ WaveDL/
355
361
  ├── src/
356
362
  │ └── wavedl/ # Main package (namespaced)
357
363
  │ ├── __init__.py # Package init with __version__
358
- │ ├── train.py # Training entry point
364
+ │ ├── train.py # Training script
359
365
  │ ├── test.py # Testing & inference script
360
366
  │ ├── hpo.py # Hyperparameter optimization
361
- │ ├── hpc.py # HPC distributed training launcher
367
+ │ ├── launcher.py # Training launcher (wavedl-train)
362
368
  │ │
363
369
  │ ├── models/ # Model Zoo (69 architectures)
364
370
  │ │ ├── registry.py # Model factory (@register_model)
@@ -389,16 +395,7 @@ WaveDL/
389
395
  ## ⚙️ Configuration
390
396
 
391
397
  > [!NOTE]
392
- > All configuration options below work with **both** `wavedl-hpc` and direct `accelerate launch`. The wrapper script passes all arguments directly to `train.py`.
393
- >
394
- > **Examples:**
395
- > ```bash
396
- > # Using wavedl-hpc
397
- > wavedl-hpc --model cnn --batch_size 256 --lr 5e-4 --compile
398
- >
399
- > # Using accelerate launch directly
400
- > accelerate launch -m wavedl.train --model cnn --batch_size 256 --lr 5e-4 --compile
401
- > ```
398
+ > All configuration options below work with `wavedl-train`. The wrapper script passes all arguments directly to `train.py`.
402
399
 
403
400
  <details>
404
401
  <summary><b>Available Models</b> — 69 architectures</summary>
@@ -642,7 +639,7 @@ WaveDL automatically enables performance optimizations for modern GPUs:
642
639
  </details>
643
640
 
644
641
  <details>
645
- <summary><b>HPC CLI Arguments (wavedl-hpc)</b></summary>
642
+ <summary><b>Distributed Training Arguments</b></summary>
646
643
 
647
644
  | Argument | Default | Description |
648
645
  |----------|---------|-------------|
@@ -674,10 +671,10 @@ WaveDL automatically enables performance optimizations for modern GPUs:
674
671
  **Example:**
675
672
  ```bash
676
673
  # Use Huber loss for noisy NDE data
677
- accelerate launch -m wavedl.train --model cnn --loss huber --huber_delta 0.5
674
+ wavedl-train --model cnn --loss huber --huber_delta 0.5
678
675
 
679
676
  # Weighted MSE: prioritize thickness (first target)
680
- accelerate launch -m wavedl.train --model cnn --loss weighted_mse --loss_weights "2.0,1.0,1.0"
677
+ wavedl-train --model cnn --loss weighted_mse --loss_weights "2.0,1.0,1.0"
681
678
  ```
682
679
 
683
680
  </details>
@@ -697,10 +694,10 @@ accelerate launch -m wavedl.train --model cnn --loss weighted_mse --loss_weights
697
694
  **Example:**
698
695
  ```bash
699
696
  # SGD with Nesterov momentum (often better generalization)
700
- accelerate launch -m wavedl.train --model cnn --optimizer sgd --lr 0.01 --momentum 0.9 --nesterov
697
+ wavedl-train --model cnn --optimizer sgd --lr 0.01 --momentum 0.9 --nesterov
701
698
 
702
699
  # RAdam for more stable training
703
- accelerate launch -m wavedl.train --model cnn --optimizer radam --lr 1e-3
700
+ wavedl-train --model cnn --optimizer radam --lr 1e-3
704
701
  ```
705
702
 
706
703
  </details>
@@ -722,13 +719,13 @@ accelerate launch -m wavedl.train --model cnn --optimizer radam --lr 1e-3
722
719
  **Example:**
723
720
  ```bash
724
721
  # Cosine annealing for 1000 epochs
725
- accelerate launch -m wavedl.train --model cnn --scheduler cosine --epochs 1000 --min_lr 1e-7
722
+ wavedl-train --model cnn --scheduler cosine --epochs 1000 --min_lr 1e-7
726
723
 
727
724
  # OneCycleLR for super-convergence
728
- accelerate launch -m wavedl.train --model cnn --scheduler onecycle --lr 1e-2 --epochs 50
725
+ wavedl-train --model cnn --scheduler onecycle --lr 1e-2 --epochs 50
729
726
 
730
727
  # MultiStep with custom milestones
731
- accelerate launch -m wavedl.train --model cnn --scheduler multistep --milestones "100,200,300"
728
+ wavedl-train --model cnn --scheduler multistep --milestones "100,200,300"
732
729
  ```
733
730
 
734
731
  </details>
@@ -739,16 +736,14 @@ accelerate launch -m wavedl.train --model cnn --scheduler multistep --milestones
739
736
  For robust model evaluation, simply add the `--cv` flag:
740
737
 
741
738
  ```bash
742
- # 5-fold cross-validation (works with both methods!)
743
- wavedl-hpc --model cnn --cv 5 --data_path train_data.npz
744
- # OR
745
- accelerate launch -m wavedl.train --model cnn --cv 5 --data_path train_data.npz
739
+ # 5-fold cross-validation
740
+ wavedl-train --model cnn --cv 5 --data_path train_data.npz
746
741
 
747
742
  # Stratified CV (recommended for unbalanced data)
748
- wavedl-hpc --model cnn --cv 5 --cv_stratify --loss huber --epochs 100
743
+ wavedl-train --model cnn --cv 5 --cv_stratify --loss huber --epochs 100
749
744
 
750
745
  # Full configuration
751
- wavedl-hpc --model cnn --cv 5 --cv_stratify \
746
+ wavedl-train --model cnn --cv 5 --cv_stratify \
752
747
  --loss huber --optimizer adamw --scheduler cosine \
753
748
  --output_dir ./cv_results
754
749
  ```
@@ -773,10 +768,10 @@ Use YAML files for reproducible experiments. CLI arguments can override any conf
773
768
 
774
769
  ```bash
775
770
  # Use a config file
776
- accelerate launch -m wavedl.train --config configs/config.yaml --data_path train.npz
771
+ wavedl-train --config configs/config.yaml --data_path train.npz
777
772
 
778
773
  # Override specific values from config
779
- accelerate launch -m wavedl.train --config configs/config.yaml --lr 5e-4 --epochs 500
774
+ wavedl-train --config configs/config.yaml --lr 5e-4 --epochs 500
780
775
  ```
781
776
 
782
777
  **Example config (`configs/config.yaml`):**
@@ -929,7 +924,7 @@ wavedl-hpo --data_path train.npz --models cnn --n_trials 50 --quick
929
924
 
930
925
  After HPO completes, it prints the optimal command:
931
926
  ```bash
932
- accelerate launch -m wavedl.train --data_path train.npz --model cnn --lr 3.2e-4 --batch_size 128 ...
927
+ wavedl-train --data_path train.npz --model cnn --lr 3.2e-4 --batch_size 128 ...
933
928
  ```
934
929
 
935
930
  ---
@@ -1122,12 +1117,12 @@ The `examples/` folder contains a **complete, ready-to-run example** for **mater
1122
1117
 
1123
1118
  ```bash
1124
1119
  # Run inference on the example data
1125
- python -m wavedl.test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1120
+ wavedl-test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1126
1121
  --data_path ./examples/elasticity_prediction/Test_data_100.mat \
1127
1122
  --plot --save_predictions --output_dir ./examples/elasticity_prediction/test_results
1128
1123
 
1129
1124
  # Export to ONNX (already included as model.onnx)
1130
- python -m wavedl.test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1125
+ wavedl-test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1131
1126
  --data_path ./examples/elasticity_prediction/Test_data_100.mat \
1132
1127
  --export onnx --export_path ./examples/elasticity_prediction/model.onnx
1133
1128
  ```
@@ -166,77 +166,83 @@ This installs everything you need: training, inference, HPO, ONNX export.
166
166
  ```bash
167
167
  git clone https://github.com/ductho-le/WaveDL.git
168
168
  cd WaveDL
169
- pip install -e .
169
+ pip install -e ".[dev]"
170
170
  ```
171
171
 
172
172
  > [!NOTE]
173
- > Python 3.11+ required. For development setup, see [CONTRIBUTING.md](.github/CONTRIBUTING.md).
173
+ > Python 3.11+ required. For contributor setup (pre-commit hooks), see [CONTRIBUTING.md](.github/CONTRIBUTING.md).
174
174
 
175
175
  ### Quick Start
176
176
 
177
177
  > [!TIP]
178
178
  > In all examples below, replace `<...>` placeholders with your values. See [Configuration](#️-configuration) for defaults and options.
179
179
 
180
- #### Option 1: Using wavedl-hpc (Recommended for HPC)
181
-
182
- The `wavedl-hpc` command automatically configures the environment for HPC systems:
180
+ ### Training
183
181
 
184
182
  ```bash
185
- # Basic training (auto-detects available GPUs)
186
- wavedl-hpc --model <model_name> --data_path <train_data> --batch_size <number> --output_dir <output_folder>
183
+ # Basic training (auto-detects GPUs and environment)
184
+ wavedl-train --model <model_name> --data_path <train_data> --output_dir <output_folder>
187
185
 
188
186
  # Detailed configuration
189
- wavedl-hpc --model <model_name> --data_path <train_data> --batch_size <number> \
187
+ wavedl-train --model <model_name> --data_path <train_data> --batch_size <number> \
190
188
  --lr <number> --epochs <number> --patience <number> --compile --output_dir <output_folder>
191
189
 
192
- # Specify GPU count explicitly
193
- wavedl-hpc --num_gpus 4 --model cnn --data_path train.npz --output_dir results
194
- ```
195
-
196
- #### Option 2: Direct Accelerate Launch
197
-
198
- ```bash
199
- # Local - auto-detects GPUs
200
- accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --batch_size <number> --output_dir <output_folder>
190
+ # Multi-GPU is automatic (uses all available GPUs)
191
+ # Override with --num_gpus if needed
192
+ wavedl-train --model cnn --data_path train.npz --num_gpus 4 --output_dir results
201
193
 
202
194
  # Resume training (automatic - just re-run with same output_dir)
203
- # Manual resume from specific checkpoint:
204
- accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --resume <checkpoint_folder> --output_dir <output_folder>
195
+ wavedl-train --model <model_name> --data_path <train_data> --output_dir <output_folder>
205
196
 
206
197
  # Force fresh start (ignores existing checkpoints)
207
- accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --output_dir <output_folder> --fresh
198
+ wavedl-train --model <model_name> --data_path <train_data> --output_dir <output_folder> --fresh
208
199
 
209
200
  # List available models
210
201
  wavedl-train --list_models
211
202
  ```
212
203
 
213
- > [!TIP]
214
- > **Auto-Resume**: If training crashes or is interrupted, simply re-run with the same `--output_dir`. The framework automatically detects incomplete training and resumes from the last checkpoint. Use `--fresh` to force a fresh start.
204
+ > [!NOTE]
205
+ > `wavedl-train` automatically detects your environment:
206
+ > - **HPC clusters** (SLURM, PBS, etc.): Uses local caching, offline WandB
207
+ > - **Local machines**: Uses standard cache locations (~/.cache)
215
208
  >
216
- > **GPU Auto-Detection**: `wavedl-hpc` automatically detects available GPUs using `nvidia-smi`. Use `--num_gpus` to override.
209
+ > **Auto-Resume**: If training crashes or is interrupted, simply re-run with the same `--output_dir`. The framework automatically detects incomplete training and resumes from the last checkpoint.
217
210
 
218
- ### Testing & Inference
211
+ <details>
212
+ <summary><b>Advanced: Direct Accelerate Launch</b></summary>
213
+
214
+ For fine-grained control over distributed training, you can use `accelerate launch` directly:
219
215
 
220
- After training, use `wavedl.test` to evaluate your model on test data:
216
+ ```bash
217
+ # Custom accelerate configuration
218
+ accelerate launch -m wavedl.train --model <model_name> --data_path <train_data> --output_dir <output_folder>
219
+
220
+ # Multi-node training
221
+ accelerate launch --num_machines 2 --main_process_ip <ip> -m wavedl.train --model cnn --data_path train.npz
222
+ ```
223
+
224
+ </details>
225
+
226
+ ### Testing & Inference
221
227
 
222
228
  ```bash
223
229
  # Basic inference
224
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data>
230
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data>
225
231
 
226
232
  # With visualization, CSV export, and multiple file formats
227
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
233
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
228
234
  --plot --plot_format png pdf --save_predictions --output_dir <output_folder>
229
235
 
230
236
  # With custom parameter names
231
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
237
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
232
238
  --param_names '$p_1$' '$p_2$' '$p_3$' --plot
233
239
 
234
240
  # Export model to ONNX for deployment (LabVIEW, MATLAB, C++, etc.)
235
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
241
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
236
242
  --export onnx --export_path <output_file.onnx>
237
243
 
238
244
  # For 3D volumes with small depth (e.g., 8×128×128), override auto-detection
239
- python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
245
+ wavedl-test --checkpoint <checkpoint_folder> --data_path <test_data> \
240
246
  --input_channels 1
241
247
  ```
242
248
 
@@ -247,7 +253,7 @@ python -m wavedl.test --checkpoint <checkpoint_folder> --data_path <test_data> \
247
253
  - **Format** (with `--plot_format`): Supported formats: `png` (default), `pdf` (vector), `svg` (vector), `eps` (LaTeX), `tiff`, `jpg`, `ps`
248
254
 
249
255
  > [!NOTE]
250
- > `wavedl.test` auto-detects the model architecture from checkpoint metadata. If unavailable, it falls back to folder name parsing. Use `--model` to override if needed.
256
+ > `wavedl-test` auto-detects the model architecture from checkpoint metadata. If unavailable, it falls back to folder name parsing. Use `--model` to override if needed.
251
257
 
252
258
  ### Adding Custom Models
253
259
 
@@ -291,7 +297,7 @@ class MyModel(BaseModel):
291
297
  **Step 2: Train**
292
298
 
293
299
  ```bash
294
- wavedl-hpc --import my_model.py --model my_model --data_path train.npz
300
+ wavedl-train --import my_model.py --model my_model --data_path train.npz
295
301
  ```
296
302
 
297
303
  WaveDL handles everything else: training loop, logging, checkpoints, multi-GPU, early stopping, etc.
@@ -307,10 +313,10 @@ WaveDL/
307
313
  ├── src/
308
314
  │ └── wavedl/ # Main package (namespaced)
309
315
  │ ├── __init__.py # Package init with __version__
310
- │ ├── train.py # Training entry point
316
+ │ ├── train.py # Training script
311
317
  │ ├── test.py # Testing & inference script
312
318
  │ ├── hpo.py # Hyperparameter optimization
313
- │ ├── hpc.py # HPC distributed training launcher
319
+ │ ├── launcher.py # Training launcher (wavedl-train)
314
320
  │ │
315
321
  │ ├── models/ # Model Zoo (69 architectures)
316
322
  │ │ ├── registry.py # Model factory (@register_model)
@@ -341,16 +347,7 @@ WaveDL/
341
347
  ## ⚙️ Configuration
342
348
 
343
349
  > [!NOTE]
344
- > All configuration options below work with **both** `wavedl-hpc` and direct `accelerate launch`. The wrapper script passes all arguments directly to `train.py`.
345
- >
346
- > **Examples:**
347
- > ```bash
348
- > # Using wavedl-hpc
349
- > wavedl-hpc --model cnn --batch_size 256 --lr 5e-4 --compile
350
- >
351
- > # Using accelerate launch directly
352
- > accelerate launch -m wavedl.train --model cnn --batch_size 256 --lr 5e-4 --compile
353
- > ```
350
+ > All configuration options below work with `wavedl-train`. The wrapper script passes all arguments directly to `train.py`.
354
351
 
355
352
  <details>
356
353
  <summary><b>Available Models</b> — 69 architectures</summary>
@@ -594,7 +591,7 @@ WaveDL automatically enables performance optimizations for modern GPUs:
594
591
  </details>
595
592
 
596
593
  <details>
597
- <summary><b>HPC CLI Arguments (wavedl-hpc)</b></summary>
594
+ <summary><b>Distributed Training Arguments</b></summary>
598
595
 
599
596
  | Argument | Default | Description |
600
597
  |----------|---------|-------------|
@@ -626,10 +623,10 @@ WaveDL automatically enables performance optimizations for modern GPUs:
626
623
  **Example:**
627
624
  ```bash
628
625
  # Use Huber loss for noisy NDE data
629
- accelerate launch -m wavedl.train --model cnn --loss huber --huber_delta 0.5
626
+ wavedl-train --model cnn --loss huber --huber_delta 0.5
630
627
 
631
628
  # Weighted MSE: prioritize thickness (first target)
632
- accelerate launch -m wavedl.train --model cnn --loss weighted_mse --loss_weights "2.0,1.0,1.0"
629
+ wavedl-train --model cnn --loss weighted_mse --loss_weights "2.0,1.0,1.0"
633
630
  ```
634
631
 
635
632
  </details>
@@ -649,10 +646,10 @@ accelerate launch -m wavedl.train --model cnn --loss weighted_mse --loss_weights
649
646
  **Example:**
650
647
  ```bash
651
648
  # SGD with Nesterov momentum (often better generalization)
652
- accelerate launch -m wavedl.train --model cnn --optimizer sgd --lr 0.01 --momentum 0.9 --nesterov
649
+ wavedl-train --model cnn --optimizer sgd --lr 0.01 --momentum 0.9 --nesterov
653
650
 
654
651
  # RAdam for more stable training
655
- accelerate launch -m wavedl.train --model cnn --optimizer radam --lr 1e-3
652
+ wavedl-train --model cnn --optimizer radam --lr 1e-3
656
653
  ```
657
654
 
658
655
  </details>
@@ -674,13 +671,13 @@ accelerate launch -m wavedl.train --model cnn --optimizer radam --lr 1e-3
674
671
  **Example:**
675
672
  ```bash
676
673
  # Cosine annealing for 1000 epochs
677
- accelerate launch -m wavedl.train --model cnn --scheduler cosine --epochs 1000 --min_lr 1e-7
674
+ wavedl-train --model cnn --scheduler cosine --epochs 1000 --min_lr 1e-7
678
675
 
679
676
  # OneCycleLR for super-convergence
680
- accelerate launch -m wavedl.train --model cnn --scheduler onecycle --lr 1e-2 --epochs 50
677
+ wavedl-train --model cnn --scheduler onecycle --lr 1e-2 --epochs 50
681
678
 
682
679
  # MultiStep with custom milestones
683
- accelerate launch -m wavedl.train --model cnn --scheduler multistep --milestones "100,200,300"
680
+ wavedl-train --model cnn --scheduler multistep --milestones "100,200,300"
684
681
  ```
685
682
 
686
683
  </details>
@@ -691,16 +688,14 @@ accelerate launch -m wavedl.train --model cnn --scheduler multistep --milestones
691
688
  For robust model evaluation, simply add the `--cv` flag:
692
689
 
693
690
  ```bash
694
- # 5-fold cross-validation (works with both methods!)
695
- wavedl-hpc --model cnn --cv 5 --data_path train_data.npz
696
- # OR
697
- accelerate launch -m wavedl.train --model cnn --cv 5 --data_path train_data.npz
691
+ # 5-fold cross-validation
692
+ wavedl-train --model cnn --cv 5 --data_path train_data.npz
698
693
 
699
694
  # Stratified CV (recommended for unbalanced data)
700
- wavedl-hpc --model cnn --cv 5 --cv_stratify --loss huber --epochs 100
695
+ wavedl-train --model cnn --cv 5 --cv_stratify --loss huber --epochs 100
701
696
 
702
697
  # Full configuration
703
- wavedl-hpc --model cnn --cv 5 --cv_stratify \
698
+ wavedl-train --model cnn --cv 5 --cv_stratify \
704
699
  --loss huber --optimizer adamw --scheduler cosine \
705
700
  --output_dir ./cv_results
706
701
  ```
@@ -725,10 +720,10 @@ Use YAML files for reproducible experiments. CLI arguments can override any conf
725
720
 
726
721
  ```bash
727
722
  # Use a config file
728
- accelerate launch -m wavedl.train --config configs/config.yaml --data_path train.npz
723
+ wavedl-train --config configs/config.yaml --data_path train.npz
729
724
 
730
725
  # Override specific values from config
731
- accelerate launch -m wavedl.train --config configs/config.yaml --lr 5e-4 --epochs 500
726
+ wavedl-train --config configs/config.yaml --lr 5e-4 --epochs 500
732
727
  ```
733
728
 
734
729
  **Example config (`configs/config.yaml`):**
@@ -881,7 +876,7 @@ wavedl-hpo --data_path train.npz --models cnn --n_trials 50 --quick
881
876
 
882
877
  After HPO completes, it prints the optimal command:
883
878
  ```bash
884
- accelerate launch -m wavedl.train --data_path train.npz --model cnn --lr 3.2e-4 --batch_size 128 ...
879
+ wavedl-train --data_path train.npz --model cnn --lr 3.2e-4 --batch_size 128 ...
885
880
  ```
886
881
 
887
882
  ---
@@ -1074,12 +1069,12 @@ The `examples/` folder contains a **complete, ready-to-run example** for **mater
1074
1069
 
1075
1070
  ```bash
1076
1071
  # Run inference on the example data
1077
- python -m wavedl.test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1072
+ wavedl-test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1078
1073
  --data_path ./examples/elasticity_prediction/Test_data_100.mat \
1079
1074
  --plot --save_predictions --output_dir ./examples/elasticity_prediction/test_results
1080
1075
 
1081
1076
  # Export to ONNX (already included as model.onnx)
1082
- python -m wavedl.test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1077
+ wavedl-test --checkpoint ./examples/elasticity_prediction/best_checkpoint \
1083
1078
  --data_path ./examples/elasticity_prediction/Test_data_100.mat \
1084
1079
  --export onnx --export_path ./examples/elasticity_prediction/model.onnx
1085
1080
  ```
@@ -85,10 +85,10 @@ dev = [
85
85
  ]
86
86
 
87
87
  [project.scripts]
88
- wavedl-train = "wavedl.train:main"
88
+ wavedl-train = "wavedl.launcher:main" # Universal training (auto-detects HPC vs local)
89
+ wavedl-hpc = "wavedl.launcher:main" # Alias for backwards compatibility
89
90
  wavedl-test = "wavedl.test:main"
90
91
  wavedl-hpo = "wavedl.hpo:main"
91
- wavedl-hpc = "wavedl.hpc:main"
92
92
 
93
93
  [project.urls]
94
94
  Homepage = "https://github.com/ductho-le/WaveDL"
@@ -18,7 +18,7 @@ For inference:
18
18
  # or: python -m wavedl.test --checkpoint best_checkpoint --data_path test.npz
19
19
  """
20
20
 
21
- __version__ = "1.6.1"
21
+ __version__ = "1.6.3"
22
22
  __author__ = "Ductho Le"
23
23
  __email__ = "ductho.le@outlook.com"
24
24
 
@@ -440,7 +440,7 @@ Examples:
440
440
  print("\n" + "=" * 60)
441
441
  print("TO TRAIN WITH BEST PARAMETERS:")
442
442
  print("=" * 60)
443
- cmd_parts = ["accelerate launch -m wavedl.train"]
443
+ cmd_parts = ["wavedl-train"]
444
444
  cmd_parts.append(f"--data_path {args.data_path}")
445
445
  for key, value in study.best_params.items():
446
446
  cmd_parts.append(f"--{key} {value}")