careamics 0.1.0rc4__py3-none-any.whl → 0.1.0rc5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (62) hide show
  1. careamics/careamist.py +12 -11
  2. careamics/config/__init__.py +0 -1
  3. careamics/config/architectures/unet_model.py +1 -0
  4. careamics/config/callback_model.py +1 -0
  5. careamics/config/configuration_example.py +0 -2
  6. careamics/config/configuration_factory.py +112 -42
  7. careamics/config/configuration_model.py +14 -16
  8. careamics/config/data_model.py +59 -157
  9. careamics/config/inference_model.py +19 -20
  10. careamics/config/references/algorithm_descriptions.py +1 -0
  11. careamics/config/references/references.py +1 -0
  12. careamics/config/support/supported_extraction_strategies.py +1 -0
  13. careamics/config/training_model.py +1 -0
  14. careamics/config/transformations/n2v_manipulate_model.py +1 -0
  15. careamics/config/transformations/nd_flip_model.py +6 -11
  16. careamics/config/transformations/normalize_model.py +1 -0
  17. careamics/config/transformations/transform_model.py +1 -0
  18. careamics/config/transformations/xy_random_rotate90_model.py +6 -8
  19. careamics/config/validators/validator_utils.py +1 -0
  20. careamics/conftest.py +1 -0
  21. careamics/dataset/dataset_utils/__init__.py +0 -1
  22. careamics/dataset/dataset_utils/dataset_utils.py +1 -0
  23. careamics/dataset/in_memory_dataset.py +14 -45
  24. careamics/dataset/iterable_dataset.py +13 -68
  25. careamics/dataset/patching/__init__.py +0 -7
  26. careamics/dataset/patching/patching.py +1 -0
  27. careamics/dataset/patching/sequential_patching.py +6 -6
  28. careamics/dataset/patching/tiled_patching.py +10 -6
  29. careamics/lightning_datamodule.py +20 -24
  30. careamics/lightning_module.py +1 -1
  31. careamics/lightning_prediction_datamodule.py +15 -10
  32. careamics/losses/__init__.py +0 -1
  33. careamics/losses/loss_factory.py +1 -0
  34. careamics/model_io/__init__.py +0 -1
  35. careamics/model_io/bioimage/_readme_factory.py +2 -1
  36. careamics/model_io/bioimage/bioimage_utils.py +1 -0
  37. careamics/model_io/bioimage/model_description.py +1 -0
  38. careamics/model_io/bmz_io.py +2 -1
  39. careamics/models/layers.py +1 -0
  40. careamics/models/model_factory.py +1 -0
  41. careamics/models/unet.py +91 -17
  42. careamics/prediction/stitch_prediction.py +1 -0
  43. careamics/transforms/__init__.py +2 -23
  44. careamics/transforms/compose.py +98 -0
  45. careamics/transforms/n2v_manipulate.py +18 -23
  46. careamics/transforms/nd_flip.py +38 -64
  47. careamics/transforms/normalize.py +45 -34
  48. careamics/transforms/pixel_manipulation.py +2 -2
  49. careamics/transforms/transform.py +33 -0
  50. careamics/transforms/tta.py +2 -2
  51. careamics/transforms/xy_random_rotate90.py +41 -68
  52. careamics/utils/__init__.py +0 -1
  53. careamics/utils/context.py +1 -0
  54. careamics/utils/logging.py +1 -0
  55. careamics/utils/metrics.py +1 -0
  56. careamics/utils/torch_utils.py +1 -0
  57. {careamics-0.1.0rc4.dist-info → careamics-0.1.0rc5.dist-info}/METADATA +16 -61
  58. careamics-0.1.0rc5.dist-info/RECORD +111 -0
  59. careamics/dataset/patching/patch_transform.py +0 -44
  60. careamics-0.1.0rc4.dist-info/RECORD +0 -110
  61. {careamics-0.1.0rc4.dist-info → careamics-0.1.0rc5.dist-info}/WHEEL +0 -0
  62. {careamics-0.1.0rc4.dist-info → careamics-0.1.0rc5.dist-info}/licenses/LICENSE +0 -0
careamics/careamist.py CHANGED
@@ -73,8 +73,7 @@ class CAREamist:
73
73
  source: Union[Path, str],
74
74
  work_dir: Optional[str] = None,
75
75
  experiment_name: str = "CAREamics",
76
- ) -> None:
77
- ...
76
+ ) -> None: ...
78
77
 
79
78
  @overload
80
79
  def __init__( # numpydoc ignore=GL08
@@ -82,8 +81,7 @@ class CAREamist:
82
81
  source: Configuration,
83
82
  work_dir: Optional[str] = None,
84
83
  experiment_name: str = "CAREamics",
85
- ) -> None:
86
- ...
84
+ ) -> None: ...
87
85
 
88
86
  def __init__(
89
87
  self,
@@ -478,8 +476,7 @@ class CAREamist:
478
476
  source: CAREamicsPredictData,
479
477
  *,
480
478
  checkpoint: Optional[Literal["best", "last"]] = None,
481
- ) -> Union[list, np.ndarray]:
482
- ...
479
+ ) -> Union[list, np.ndarray]: ...
483
480
 
484
481
  @overload
485
482
  def predict( # numpydoc ignore=GL08
@@ -497,8 +494,7 @@ class CAREamist:
497
494
  read_source_func: Optional[Callable] = None,
498
495
  extension_filter: str = "",
499
496
  checkpoint: Optional[Literal["best", "last"]] = None,
500
- ) -> Union[list, np.ndarray]:
501
- ...
497
+ ) -> Union[list, np.ndarray]: ...
502
498
 
503
499
  @overload
504
500
  def predict( # numpydoc ignore=GL08
@@ -514,8 +510,7 @@ class CAREamist:
514
510
  tta_transforms: bool = True,
515
511
  dataloader_params: Optional[Dict] = None,
516
512
  checkpoint: Optional[Literal["best", "last"]] = None,
517
- ) -> Union[list, np.ndarray]:
518
- ...
513
+ ) -> Union[list, np.ndarray]: ...
519
514
 
520
515
  def predict(
521
516
  self,
@@ -548,6 +543,12 @@ class CAREamist:
548
543
  Test-time augmentation (TTA) can be switched off using the `tta_transforms`
549
544
  parameter.
550
545
 
546
+ Note that if you are using a UNet model and tiling, the tile size must be
547
+ divisible in every dimension by 2**d, where d is the depth of the model. This
548
+ avoids artefacts arising from the broken shift invariance induced by the
549
+ pooling layers of the UNet. If your image has less dimensions, as it may
550
+ happen in the Z dimension, consider padding your image.
551
+
551
552
  Parameters
552
553
  ----------
553
554
  source : Union[CAREamicsClay, Path, str, np.ndarray]
@@ -602,7 +603,7 @@ class CAREamist:
602
603
  )
603
604
  # create predict config, reuse training config if parameters missing
604
605
  prediction_config = create_inference_configuration(
605
- training_configuration=self.cfg,
606
+ configuration=self.cfg,
606
607
  tile_size=tile_size,
607
608
  tile_overlap=tile_overlap,
608
609
  data_type=data_type,
@@ -1,6 +1,5 @@
1
1
  """Configuration module."""
2
2
 
3
-
4
3
  __all__ = [
5
4
  "AlgorithmConfig",
6
5
  "DataConfig",
@@ -39,6 +39,7 @@ class UNetModel(ArchitectureModel):
39
39
  "None", "Sigmoid", "Softmax", "Tanh", "ReLU", "LeakyReLU"
40
40
  ] = Field(default="None", validate_default=True)
41
41
  n2v2: bool = Field(default=False, validate_default=True)
42
+ independent_channels: bool = Field(default=True, validate_default=True)
42
43
 
43
44
  @field_validator("num_channels_init")
44
45
  @classmethod
@@ -1,4 +1,5 @@
1
1
  """Checkpoint saving configuration."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  from datetime import timedelta
@@ -57,11 +57,9 @@ def full_configuration_example() -> Configuration:
57
57
  },
58
58
  {
59
59
  "name": SupportedTransform.NDFLIP.value,
60
- "is_3D": False,
61
60
  },
62
61
  {
63
62
  "name": SupportedTransform.XY_RANDOM_ROTATE90.value,
64
- "is_3D": False,
65
63
  },
66
64
  {
67
65
  "name": SupportedTransform.N2V_MANIPULATE.value,
@@ -2,8 +2,6 @@
2
2
 
3
3
  from typing import Any, Dict, List, Literal, Optional, Tuple, Union
4
4
 
5
- from albumentations import Compose
6
-
7
5
  from .algorithm_model import AlgorithmConfig
8
6
  from .architectures import UNetModel
9
7
  from .configuration_model import Configuration
@@ -28,8 +26,10 @@ def _create_supervised_configuration(
28
26
  batch_size: int,
29
27
  num_epochs: int,
30
28
  use_augmentations: bool = True,
29
+ independent_channels: bool = False,
31
30
  loss: Literal["mae", "mse"] = "mae",
32
- n_channels: int = -1,
31
+ n_channels_in: int = 1,
32
+ n_channels_out: int = 1,
33
33
  logger: Literal["wandb", "tensorboard", "none"] = "none",
34
34
  model_kwargs: Optional[dict] = None,
35
35
  ) -> Configuration:
@@ -54,10 +54,14 @@ def _create_supervised_configuration(
54
54
  Number of epochs.
55
55
  use_augmentations : bool, optional
56
56
  Whether to use augmentations, by default True.
57
+ independent_channels : bool, optional
58
+ Whether to train all channels independently, by default False.
57
59
  loss : Literal["mae", "mse"], optional
58
60
  Loss function to use, by default "mae".
59
- n_channels : int, optional
60
- Number of channels (in and out), by default -1.
61
+ n_channels_in : int, optional
62
+ Number of channels in, by default 1.
63
+ n_channels_out : int, optional
64
+ Number of channels out, by default 1.
61
65
  logger : Literal["wandb", "tensorboard", "none"], optional
62
66
  Logger to use, by default "none".
63
67
  model_kwargs : dict, optional
@@ -69,23 +73,24 @@ def _create_supervised_configuration(
69
73
  Configuration for training CARE or Noise2Noise.
70
74
  """
71
75
  # if there are channels, we need to specify their number
72
- if "C" in axes and n_channels == 1:
76
+ if "C" in axes and n_channels_in == 1:
73
77
  raise ValueError(
74
- f"Number of channels must be specified when using channels "
75
- f"(got {n_channels} channel)."
78
+ f"Number of channels in must be specified when using channels "
79
+ f"(got {n_channels_in} channel)."
76
80
  )
77
- elif "C" not in axes and n_channels > 1:
81
+ elif "C" not in axes and n_channels_in > 1:
78
82
  raise ValueError(
79
83
  f"C is not present in the axes, but number of channels is specified "
80
- f"(got {n_channels} channel)."
84
+ f"(got {n_channels_in} channels)."
81
85
  )
82
86
 
83
87
  # model
84
88
  if model_kwargs is None:
85
89
  model_kwargs = {}
86
90
  model_kwargs["conv_dims"] = 3 if "Z" in axes else 2
87
- model_kwargs["in_channels"] = n_channels
88
- model_kwargs["num_classes"] = n_channels
91
+ model_kwargs["in_channels"] = n_channels_in
92
+ model_kwargs["num_classes"] = n_channels_out
93
+ model_kwargs["independent_channels"] = independent_channels
89
94
 
90
95
  unet_model = UNetModel(
91
96
  architecture=SupportedArchitecture.UNET.value,
@@ -154,8 +159,10 @@ def create_care_configuration(
154
159
  batch_size: int,
155
160
  num_epochs: int,
156
161
  use_augmentations: bool = True,
162
+ independent_channels: bool = False,
157
163
  loss: Literal["mae", "mse"] = "mae",
158
- n_channels: int = 1,
164
+ n_channels_in: int = 1,
165
+ n_channels_out: int = -1,
159
166
  logger: Literal["wandb", "tensorboard", "none"] = "none",
160
167
  model_kwargs: Optional[dict] = None,
161
168
  ) -> Configuration:
@@ -165,10 +172,16 @@ def create_care_configuration(
165
172
  If "Z" is present in `axes`, then `path_size` must be a list of length 3, otherwise
166
173
  2.
167
174
 
168
- If "C" is present in `axes`, then you need to set `n_channels` to the number of
175
+ If "C" is present in `axes`, then you need to set `n_channels_in` to the number of
169
176
  channels. Likewise, if you set the number of channels, then "C" must be present in
170
177
  `axes`.
171
178
 
179
+ To set the number of output channels, use the `n_channels_out` parameter. If it is
180
+ not specified, it will be assumed to be equal to `n_channels_in`.
181
+
182
+ By default, all channels are trained together. To train all channels independently,
183
+ set `independent_channels` to True.
184
+
172
185
  By setting `use_augmentations` to False, the only transformation applied will be
173
186
  normalization.
174
187
 
@@ -188,10 +201,14 @@ def create_care_configuration(
188
201
  Number of epochs.
189
202
  use_augmentations : bool, optional
190
203
  Whether to use augmentations, by default True.
204
+ independent_channels : bool, optional
205
+ Whether to train all channels independently, by default False.
191
206
  loss : Literal["mae", "mse"], optional
192
207
  Loss function to use, by default "mae".
193
- n_channels : int, optional
194
- Number of channels (in and out), by default 1.
208
+ n_channels_in : int, optional
209
+ Number of channels in, by default 1.
210
+ n_channels_out : int, optional
211
+ Number of channels out, by default -1.
195
212
  logger : Literal["wandb", "tensorboard", "none"], optional
196
213
  Logger to use, by default "none".
197
214
  model_kwargs : dict, optional
@@ -202,6 +219,9 @@ def create_care_configuration(
202
219
  Configuration
203
220
  Configuration for training CARE.
204
221
  """
222
+ if n_channels_out == -1:
223
+ n_channels_out = n_channels_in
224
+
205
225
  return _create_supervised_configuration(
206
226
  algorithm="care",
207
227
  experiment_name=experiment_name,
@@ -211,9 +231,10 @@ def create_care_configuration(
211
231
  batch_size=batch_size,
212
232
  num_epochs=num_epochs,
213
233
  use_augmentations=use_augmentations,
234
+ independent_channels=independent_channels,
214
235
  loss=loss,
215
- # TODO in the future we might support different in and out channels for CARE
216
- n_channels=n_channels,
236
+ n_channels_in=n_channels_in,
237
+ n_channels_out=n_channels_out,
217
238
  logger=logger,
218
239
  model_kwargs=model_kwargs,
219
240
  )
@@ -227,6 +248,7 @@ def create_n2n_configuration(
227
248
  batch_size: int,
228
249
  num_epochs: int,
229
250
  use_augmentations: bool = True,
251
+ independent_channels: bool = False,
230
252
  loss: Literal["mae", "mse"] = "mae",
231
253
  n_channels: int = 1,
232
254
  logger: Literal["wandb", "tensorboard", "none"] = "none",
@@ -242,6 +264,9 @@ def create_n2n_configuration(
242
264
  channels. Likewise, if you set the number of channels, then "C" must be present in
243
265
  `axes`.
244
266
 
267
+ By default, all channels are trained together. To train all channels independently,
268
+ set `independent_channels` to True.
269
+
245
270
  By setting `use_augmentations` to False, the only transformation applied will be
246
271
  normalization.
247
272
 
@@ -261,6 +286,8 @@ def create_n2n_configuration(
261
286
  Number of epochs.
262
287
  use_augmentations : bool, optional
263
288
  Whether to use augmentations, by default True.
289
+ independent_channels : bool, optional
290
+ Whether to train all channels independently, by default False.
264
291
  loss : Literal["mae", "mse"], optional
265
292
  Loss function to use, by default "mae".
266
293
  n_channels : int, optional
@@ -284,8 +311,10 @@ def create_n2n_configuration(
284
311
  batch_size=batch_size,
285
312
  num_epochs=num_epochs,
286
313
  use_augmentations=use_augmentations,
314
+ independent_channels=independent_channels,
287
315
  loss=loss,
288
- n_channels=n_channels,
316
+ n_channels_in=n_channels,
317
+ n_channels_out=n_channels,
289
318
  logger=logger,
290
319
  model_kwargs=model_kwargs,
291
320
  )
@@ -299,6 +328,7 @@ def create_n2v_configuration(
299
328
  batch_size: int,
300
329
  num_epochs: int,
301
330
  use_augmentations: bool = True,
331
+ independent_channels: bool = True,
302
332
  use_n2v2: bool = False,
303
333
  n_channels: int = 1,
304
334
  roi_size: int = 11,
@@ -320,11 +350,14 @@ def create_n2v_configuration(
320
350
  or horizontal correlations are present in the noise; it applies an additional mask
321
351
  to the manipulated pixel neighbors.
322
352
 
353
+ If "Z" is present in `axes`, then `path_size` must be a list of length 3, otherwise
354
+ 2.
355
+
323
356
  If "C" is present in `axes`, then you need to set `n_channels` to the number of
324
357
  channels.
325
358
 
326
- If "Z" is present in `axes`, then `path_size` must be a list of length 3, otherwise
327
- 2.
359
+ By default, all channels are trained independently. To train all channels together,
360
+ set `independent_channels` to False.
328
361
 
329
362
  By setting `use_augmentations` to False, the only transformations applied will be
330
363
  normalization and N2V manipulation.
@@ -356,6 +389,8 @@ def create_n2v_configuration(
356
389
  Number of epochs.
357
390
  use_augmentations : bool, optional
358
391
  Whether to use augmentations, by default True.
392
+ independent_channels : bool, optional
393
+ Whether to train all channels together, by default True.
359
394
  use_n2v2 : bool, optional
360
395
  Whether to use N2V2, by default False.
361
396
  n_channels : int, optional
@@ -414,8 +449,20 @@ def create_n2v_configuration(
414
449
  ... struct_n2v_span=7
415
450
  ... )
416
451
 
417
- If you are training multiple channels together, then you need to specify the number
418
- of channels:
452
+ If you are training multiple channels independently, then you need to specify the
453
+ number of channels:
454
+ >>> config = create_n2v_configuration(
455
+ ... experiment_name="n2v_experiment",
456
+ ... data_type="array",
457
+ ... axes="YXC",
458
+ ... patch_size=[64, 64],
459
+ ... batch_size=32,
460
+ ... num_epochs=100,
461
+ ... n_channels=3
462
+ ... )
463
+
464
+ If instead you want to train multiple channels together, you need to turn off the
465
+ `independent_channels` parameter:
419
466
  >>> config = create_n2v_configuration(
420
467
  ... experiment_name="n2v_experiment",
421
468
  ... data_type="array",
@@ -423,6 +470,7 @@ def create_n2v_configuration(
423
470
  ... patch_size=[64, 64],
424
471
  ... batch_size=32,
425
472
  ... num_epochs=100,
473
+ ... independent_channels=False,
426
474
  ... n_channels=3
427
475
  ... )
428
476
 
@@ -457,6 +505,7 @@ def create_n2v_configuration(
457
505
  model_kwargs["conv_dims"] = 3 if "Z" in axes else 2
458
506
  model_kwargs["in_channels"] = n_channels
459
507
  model_kwargs["num_classes"] = n_channels
508
+ model_kwargs["independent_channels"] = independent_channels
460
509
 
461
510
  unet_model = UNetModel(
462
511
  architecture=SupportedArchitecture.UNET.value,
@@ -493,9 +542,11 @@ def create_n2v_configuration(
493
542
  # n2v2 and structn2v
494
543
  nv2_transform = {
495
544
  "name": SupportedTransform.N2V_MANIPULATE.value,
496
- "strategy": SupportedPixelManipulation.MEDIAN.value
497
- if use_n2v2
498
- else SupportedPixelManipulation.UNIFORM.value,
545
+ "strategy": (
546
+ SupportedPixelManipulation.MEDIAN.value
547
+ if use_n2v2
548
+ else SupportedPixelManipulation.UNIFORM.value
549
+ ),
499
550
  "roi_size": roi_size,
500
551
  "masked_pixel_percentage": masked_pixel_percentage,
501
552
  "struct_mask_axis": struct_n2v_axis,
@@ -530,14 +581,13 @@ def create_n2v_configuration(
530
581
  return configuration
531
582
 
532
583
 
533
- # TODO add tests
534
584
  def create_inference_configuration(
535
- training_configuration: Configuration,
585
+ configuration: Configuration,
536
586
  tile_size: Optional[Tuple[int, ...]] = None,
537
587
  tile_overlap: Optional[Tuple[int, ...]] = None,
538
588
  data_type: Optional[Literal["array", "tiff", "custom"]] = None,
539
589
  axes: Optional[str] = None,
540
- transforms: Optional[Union[List[Dict[str, Any]], Compose]] = None,
590
+ transforms: Optional[Union[List[Dict[str, Any]]]] = None,
541
591
  tta_transforms: bool = True,
542
592
  batch_size: Optional[int] = 1,
543
593
  ) -> InferenceConfig:
@@ -549,8 +599,8 @@ def create_inference_configuration(
549
599
 
550
600
  Parameters
551
601
  ----------
552
- training_configuration : Configuration
553
- Configuration used for training.
602
+ configuration : Configuration
603
+ Global configuration.
554
604
  tile_size : Tuple[int, ...], optional
555
605
  Size of the tiles.
556
606
  tile_overlap : Tuple[int, ...], optional
@@ -559,7 +609,7 @@ def create_inference_configuration(
559
609
  Type of the data, by default "tiff".
560
610
  axes : str, optional
561
611
  Axes of the data, by default "YX".
562
- transforms : List[Dict[str, Any]] or Compose, optional
612
+ transforms : List[Dict[str, Any]], optional
563
613
  Transformations to apply to the data, by default None.
564
614
  tta_transforms : bool, optional
565
615
  Whether to apply test-time augmentations, by default True.
@@ -569,14 +619,12 @@ def create_inference_configuration(
569
619
  Returns
570
620
  -------
571
621
  InferenceConfiguration
572
- Configuration for inference with N2V.
622
+ Configuration used to configure CAREamicsPredictData.
573
623
  """
574
- if (
575
- training_configuration.data_config.mean is None
576
- or training_configuration.data_config.std is None
577
- ):
578
- raise ValueError("Mean and std must be provided in the training configuration.")
624
+ if configuration.data_config.mean is None or configuration.data_config.std is None:
625
+ raise ValueError("Mean and std must be provided in the configuration.")
579
626
 
627
+ # minimum transform
580
628
  if transforms is None:
581
629
  transforms = [
582
630
  {
@@ -584,13 +632,35 @@ def create_inference_configuration(
584
632
  },
585
633
  ]
586
634
 
635
+ # tile size for UNets
636
+ if tile_size is not None:
637
+ model = configuration.algorithm_config.model
638
+
639
+ if model.architecture == SupportedArchitecture.UNET.value:
640
+ # tile size must be equal to k*2^n, where n is the number of pooling layers
641
+ # (equal to the depth) and k is an integer
642
+ depth = model.depth
643
+ tile_increment = 2**depth
644
+
645
+ for i, t in enumerate(tile_size):
646
+ if t % tile_increment != 0:
647
+ raise ValueError(
648
+ f"Tile size must be divisible by {tile_increment} along all "
649
+ f"axes (got {t} for axis {i}). If your image size is smaller "
650
+ f"along one axis (e.g. Z), consider padding the image."
651
+ )
652
+
653
+ # tile overlaps must be specified
654
+ if tile_overlap is None:
655
+ raise ValueError("Tile overlap must be specified.")
656
+
587
657
  return InferenceConfig(
588
- data_type=data_type or training_configuration.data_config.data_type,
658
+ data_type=data_type or configuration.data_config.data_type,
589
659
  tile_size=tile_size,
590
660
  tile_overlap=tile_overlap,
591
- axes=axes or training_configuration.data_config.axes,
592
- mean=training_configuration.data_config.mean,
593
- std=training_configuration.data_config.std,
661
+ axes=axes or configuration.data_config.axes,
662
+ mean=configuration.data_config.mean,
663
+ std=configuration.data_config.std,
594
664
  transforms=transforms,
595
665
  tta_transforms=tta_transforms,
596
666
  batch_size=batch_size,
@@ -1,4 +1,5 @@
1
1
  """Pydantic CAREamics configuration."""
2
+
2
3
  from __future__ import annotations
3
4
 
4
5
  import re
@@ -238,25 +239,22 @@ class Configuration(BaseModel):
238
239
  Validated configuration.
239
240
  """
240
241
  if self.algorithm_config.algorithm == SupportedAlgorithm.N2V:
241
- # if we have a list of transform (as opposed to Compose)
242
- if self.data_config.has_transform_list():
243
- # missing N2V_MANIPULATE
244
- if not self.data_config.has_n2v_manipulate():
245
- self.data_config.transforms.append(
246
- N2VManipulateModel(
247
- name=SupportedTransform.N2V_MANIPULATE.value,
248
- )
242
+ # missing N2V_MANIPULATE
243
+ if not self.data_config.has_n2v_manipulate():
244
+ self.data_config.transforms.append(
245
+ N2VManipulateModel(
246
+ name=SupportedTransform.N2V_MANIPULATE.value,
249
247
  )
248
+ )
250
249
 
251
- median = SupportedPixelManipulation.MEDIAN.value
252
- uniform = SupportedPixelManipulation.UNIFORM.value
253
- strategy = median if self.algorithm_config.model.n2v2 else uniform
254
- self.data_config.set_N2V2_strategy(strategy)
250
+ median = SupportedPixelManipulation.MEDIAN.value
251
+ uniform = SupportedPixelManipulation.UNIFORM.value
252
+ strategy = median if self.algorithm_config.model.n2v2 else uniform
253
+ self.data_config.set_N2V2_strategy(strategy)
255
254
  else:
256
- # if we have a list of transform, remove N2V manipulate if present
257
- if self.data_config.has_transform_list():
258
- if self.data_config.has_n2v_manipulate():
259
- self.data_config.remove_n2v_manipulate()
255
+ # remove N2V manipulate if present
256
+ if self.data_config.has_n2v_manipulate():
257
+ self.data_config.remove_n2v_manipulate()
260
258
 
261
259
  return self
262
260