careamics 0.1.0rc1__py3-none-any.whl → 0.1.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (132) hide show
  1. careamics/__init__.py +14 -4
  2. careamics/callbacks/__init__.py +6 -0
  3. careamics/callbacks/hyperparameters_callback.py +42 -0
  4. careamics/callbacks/progress_bar_callback.py +57 -0
  5. careamics/careamist.py +761 -0
  6. careamics/config/__init__.py +27 -3
  7. careamics/config/algorithm_model.py +167 -0
  8. careamics/config/architectures/__init__.py +17 -0
  9. careamics/config/architectures/architecture_model.py +29 -0
  10. careamics/config/architectures/custom_model.py +150 -0
  11. careamics/config/architectures/register_model.py +101 -0
  12. careamics/config/architectures/unet_model.py +96 -0
  13. careamics/config/architectures/vae_model.py +39 -0
  14. careamics/config/callback_model.py +92 -0
  15. careamics/config/configuration_factory.py +460 -0
  16. careamics/config/configuration_model.py +596 -0
  17. careamics/config/data_model.py +555 -0
  18. careamics/config/inference_model.py +283 -0
  19. careamics/config/noise_models.py +162 -0
  20. careamics/config/optimizer_models.py +181 -0
  21. careamics/config/references/__init__.py +45 -0
  22. careamics/config/references/algorithm_descriptions.py +131 -0
  23. careamics/config/references/references.py +38 -0
  24. careamics/config/support/__init__.py +33 -0
  25. careamics/config/support/supported_activations.py +24 -0
  26. careamics/config/support/supported_algorithms.py +18 -0
  27. careamics/config/support/supported_architectures.py +18 -0
  28. careamics/config/support/supported_data.py +82 -0
  29. careamics/{dataset/extraction_strategy.py → config/support/supported_extraction_strategies.py} +5 -2
  30. careamics/config/support/supported_loggers.py +8 -0
  31. careamics/config/support/supported_losses.py +25 -0
  32. careamics/config/support/supported_optimizers.py +55 -0
  33. careamics/config/support/supported_pixel_manipulations.py +15 -0
  34. careamics/config/support/supported_struct_axis.py +19 -0
  35. careamics/config/support/supported_transforms.py +23 -0
  36. careamics/config/tile_information.py +104 -0
  37. careamics/config/training_model.py +65 -0
  38. careamics/config/transformations/__init__.py +14 -0
  39. careamics/config/transformations/n2v_manipulate_model.py +63 -0
  40. careamics/config/transformations/nd_flip_model.py +32 -0
  41. careamics/config/transformations/normalize_model.py +31 -0
  42. careamics/config/transformations/transform_model.py +44 -0
  43. careamics/config/transformations/xy_random_rotate90_model.py +29 -0
  44. careamics/config/validators/__init__.py +5 -0
  45. careamics/config/validators/validator_utils.py +100 -0
  46. careamics/conftest.py +26 -0
  47. careamics/dataset/__init__.py +5 -0
  48. careamics/dataset/dataset_utils/__init__.py +19 -0
  49. careamics/dataset/dataset_utils/dataset_utils.py +100 -0
  50. careamics/dataset/dataset_utils/file_utils.py +140 -0
  51. careamics/dataset/dataset_utils/read_tiff.py +61 -0
  52. careamics/dataset/dataset_utils/read_utils.py +25 -0
  53. careamics/dataset/dataset_utils/read_zarr.py +56 -0
  54. careamics/dataset/in_memory_dataset.py +321 -131
  55. careamics/dataset/iterable_dataset.py +416 -0
  56. careamics/dataset/patching/__init__.py +8 -0
  57. careamics/dataset/patching/patch_transform.py +44 -0
  58. careamics/dataset/patching/patching.py +212 -0
  59. careamics/dataset/patching/random_patching.py +190 -0
  60. careamics/dataset/patching/sequential_patching.py +206 -0
  61. careamics/dataset/patching/tiled_patching.py +158 -0
  62. careamics/dataset/patching/validate_patch_dimension.py +60 -0
  63. careamics/dataset/zarr_dataset.py +149 -0
  64. careamics/lightning_datamodule.py +665 -0
  65. careamics/lightning_module.py +292 -0
  66. careamics/lightning_prediction_datamodule.py +390 -0
  67. careamics/lightning_prediction_loop.py +116 -0
  68. careamics/losses/__init__.py +4 -1
  69. careamics/losses/loss_factory.py +24 -13
  70. careamics/losses/losses.py +65 -5
  71. careamics/losses/noise_model_factory.py +40 -0
  72. careamics/losses/noise_models.py +524 -0
  73. careamics/model_io/__init__.py +8 -0
  74. careamics/model_io/bioimage/__init__.py +11 -0
  75. careamics/model_io/bioimage/_readme_factory.py +120 -0
  76. careamics/model_io/bioimage/bioimage_utils.py +48 -0
  77. careamics/model_io/bioimage/model_description.py +318 -0
  78. careamics/model_io/bmz_io.py +231 -0
  79. careamics/model_io/model_io_utils.py +80 -0
  80. careamics/models/__init__.py +4 -1
  81. careamics/models/activation.py +35 -0
  82. careamics/models/layers.py +244 -0
  83. careamics/models/model_factory.py +21 -202
  84. careamics/models/unet.py +46 -20
  85. careamics/prediction/__init__.py +1 -3
  86. careamics/prediction/stitch_prediction.py +73 -0
  87. careamics/transforms/__init__.py +41 -0
  88. careamics/transforms/n2v_manipulate.py +113 -0
  89. careamics/transforms/nd_flip.py +93 -0
  90. careamics/transforms/normalize.py +109 -0
  91. careamics/transforms/pixel_manipulation.py +383 -0
  92. careamics/transforms/struct_mask_parameters.py +18 -0
  93. careamics/transforms/tta.py +74 -0
  94. careamics/transforms/xy_random_rotate90.py +95 -0
  95. careamics/utils/__init__.py +10 -13
  96. careamics/utils/base_enum.py +32 -0
  97. careamics/utils/context.py +22 -2
  98. careamics/utils/metrics.py +0 -46
  99. careamics/utils/path_utils.py +24 -0
  100. careamics/utils/ram.py +13 -0
  101. careamics/utils/receptive_field.py +102 -0
  102. careamics/utils/running_stats.py +43 -0
  103. careamics/utils/torch_utils.py +89 -56
  104. careamics-0.1.0rc3.dist-info/METADATA +122 -0
  105. careamics-0.1.0rc3.dist-info/RECORD +109 -0
  106. {careamics-0.1.0rc1.dist-info → careamics-0.1.0rc3.dist-info}/WHEEL +1 -1
  107. careamics/bioimage/__init__.py +0 -15
  108. careamics/bioimage/docs/Noise2Void.md +0 -5
  109. careamics/bioimage/docs/__init__.py +0 -1
  110. careamics/bioimage/io.py +0 -271
  111. careamics/config/algorithm.py +0 -231
  112. careamics/config/config.py +0 -296
  113. careamics/config/config_filter.py +0 -44
  114. careamics/config/data.py +0 -194
  115. careamics/config/torch_optim.py +0 -118
  116. careamics/config/training.py +0 -534
  117. careamics/dataset/dataset_utils.py +0 -115
  118. careamics/dataset/patching.py +0 -493
  119. careamics/dataset/prepare_dataset.py +0 -174
  120. careamics/dataset/tiff_dataset.py +0 -211
  121. careamics/engine.py +0 -954
  122. careamics/manipulation/__init__.py +0 -4
  123. careamics/manipulation/pixel_manipulation.py +0 -158
  124. careamics/prediction/prediction_utils.py +0 -102
  125. careamics/utils/ascii_logo.txt +0 -9
  126. careamics/utils/augment.py +0 -65
  127. careamics/utils/normalization.py +0 -55
  128. careamics/utils/validators.py +0 -156
  129. careamics/utils/wandb.py +0 -121
  130. careamics-0.1.0rc1.dist-info/METADATA +0 -80
  131. careamics-0.1.0rc1.dist-info/RECORD +0 -46
  132. {careamics-0.1.0rc1.dist-info → careamics-0.1.0rc3.dist-info}/licenses/LICENSE +0 -0
@@ -1,534 +0,0 @@
1
- """Training configuration."""
2
- from __future__ import annotations
3
-
4
- from typing import Dict, List
5
-
6
- from pydantic import (
7
- BaseModel,
8
- ConfigDict,
9
- Field,
10
- FieldValidationInfo,
11
- field_validator,
12
- model_validator,
13
- )
14
- from torch import optim
15
-
16
- from .config_filter import remove_default_optionals
17
- from .torch_optim import TorchLRScheduler, TorchOptimizer, get_parameters
18
-
19
-
20
- class Optimizer(BaseModel):
21
- """
22
- Torch optimizer.
23
-
24
- Only parameters supported by the corresponding torch optimizer will be taken
25
- into account. For more details, check:
26
- https://pytorch.org/docs/stable/optim.html#algorithms
27
-
28
- Note that mandatory parameters (see the specific Optimizer signature in the
29
- link above) must be provided. For example, SGD requires `lr`.
30
-
31
- Attributes
32
- ----------
33
- name : TorchOptimizer
34
- Name of the optimizer.
35
- parameters : dict
36
- Parameters of the optimizer (see torch documentation).
37
- """
38
-
39
- # Pydantic class configuration
40
- model_config = ConfigDict(
41
- use_enum_values=True,
42
- validate_assignment=True,
43
- )
44
-
45
- # Mandatory field
46
- name: TorchOptimizer
47
-
48
- # Optional parameters
49
- parameters: dict = {}
50
-
51
- @field_validator("parameters")
52
- def filter_parameters(cls, user_params: dict, values: FieldValidationInfo) -> Dict:
53
- """
54
- Validate optimizer parameters.
55
-
56
- This method filters out unknown parameters, given the optimizer name.
57
-
58
- Parameters
59
- ----------
60
- user_params : dict
61
- Parameters passed on to the torch optimizer.
62
- values : FieldValidationInfo
63
- Pydantic field validation info, used to get the optimizer name.
64
-
65
- Returns
66
- -------
67
- Dict
68
- Filtered optimizer parameters.
69
-
70
- Raises
71
- ------
72
- ValueError
73
- If the optimizer name is not specified.
74
- """
75
- if "name" in values.data:
76
- optimizer_name = values.data["name"]
77
-
78
- # retrieve the corresponding optimizer class
79
- optimizer_class = getattr(optim, optimizer_name)
80
-
81
- # filter the user parameters according to the optimizer's signature
82
- return get_parameters(optimizer_class, user_params)
83
- else:
84
- raise ValueError(
85
- "Cannot validate optimizer parameters without `name`, check that it "
86
- "has correctly been specified."
87
- )
88
-
89
- @model_validator(mode="after")
90
- def sgd_lr_parameter(cls, optimizer: Optimizer) -> Optimizer:
91
- """
92
- Check that SGD optimizer has the mandatory `lr` parameter specified.
93
-
94
- Parameters
95
- ----------
96
- optimizer : Optimizer
97
- Optimizer to validate.
98
-
99
- Returns
100
- -------
101
- Optimizer
102
- Validated optimizer.
103
-
104
- Raises
105
- ------
106
- ValueError
107
- If the optimizer is SGD and the lr parameter is not specified.
108
- """
109
- if optimizer.name == TorchOptimizer.SGD and "lr" not in optimizer.parameters:
110
- raise ValueError(
111
- "SGD optimizer requires `lr` parameter, check that it has correctly "
112
- "been specified in `parameters`."
113
- )
114
-
115
- return optimizer
116
-
117
- def model_dump(
118
- self, exclude_optionals: bool = True, *args: List, **kwargs: Dict
119
- ) -> Dict:
120
- """
121
- Override model_dump method.
122
-
123
- The purpose of this method is to ensure smooth export to yaml. It
124
- includes:
125
- - removing entries with None value.
126
- - removing optional values if they have the default value.
127
-
128
- Parameters
129
- ----------
130
- exclude_optionals : bool, optional
131
- Whether to exclude optional arguments if they are default, by default True.
132
- *args : List
133
- Positional arguments, unused.
134
- **kwargs : Dict
135
- Keyword arguments, unused.
136
-
137
- Returns
138
- -------
139
- dict
140
- Dictionary containing the model parameters.
141
- """
142
- dictionary = super().model_dump(exclude_none=True)
143
-
144
- if exclude_optionals:
145
- # remove optional arguments if they are default
146
- default_optionals: dict = {"parameters": {}}
147
-
148
- remove_default_optionals(dictionary, default_optionals)
149
-
150
- return dictionary
151
-
152
-
153
- class LrScheduler(BaseModel):
154
- """
155
- Torch learning rate scheduler.
156
-
157
- Only parameters supported by the corresponding torch lr scheduler will be taken
158
- into account. For more details, check:
159
- https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
160
-
161
- Note that mandatory parameters (see the specific LrScheduler signature in the
162
- link above) must be provided. For example, StepLR requires `step_size`.
163
-
164
- Attributes
165
- ----------
166
- name : TorchLRScheduler
167
- Name of the learning rate scheduler.
168
- parameters : dict
169
- Parameters of the learning rate scheduler (see torch documentation).
170
- """
171
-
172
- # Pydantic class configuration
173
- model_config = ConfigDict(
174
- use_enum_values=True,
175
- validate_assignment=True,
176
- )
177
-
178
- # Mandatory field
179
- name: TorchLRScheduler
180
-
181
- # Optional parameters
182
- parameters: dict = {}
183
-
184
- @field_validator("parameters")
185
- def filter_parameters(cls, user_params: dict, values: FieldValidationInfo) -> Dict:
186
- """
187
- Validate lr scheduler parameters.
188
-
189
- This method filters out unknown parameters, given the lr scheduler name.
190
-
191
- Parameters
192
- ----------
193
- user_params : dict
194
- Parameters passed on to the torch lr scheduler.
195
- values : FieldValidationInfo
196
- Pydantic field validation info, used to get the lr scheduler name.
197
-
198
- Returns
199
- -------
200
- Dict
201
- Filtered lr scheduler parameters.
202
-
203
- Raises
204
- ------
205
- ValueError
206
- If the lr scheduler name is not specified.
207
- """
208
- if "name" in values.data:
209
- lr_scheduler_name = values.data["name"]
210
-
211
- # retrieve the corresponding lr scheduler class
212
- lr_scheduler_class = getattr(optim.lr_scheduler, lr_scheduler_name)
213
-
214
- # filter the user parameters according to the lr scheduler's signature
215
- return get_parameters(lr_scheduler_class, user_params)
216
- else:
217
- raise ValueError(
218
- "Cannot validate lr scheduler parameters without `name`, check that it "
219
- "has correctly been specified."
220
- )
221
-
222
- @model_validator(mode="after")
223
- def step_lr_step_size_parameter(cls, lr_scheduler: LrScheduler) -> LrScheduler:
224
- """
225
- Check that StepLR lr scheduler has `step_size` parameter specified.
226
-
227
- Parameters
228
- ----------
229
- lr_scheduler : LrScheduler
230
- Lr scheduler to validate.
231
-
232
- Returns
233
- -------
234
- LrScheduler
235
- Validated lr scheduler.
236
-
237
- Raises
238
- ------
239
- ValueError
240
- If the lr scheduler is StepLR and the step_size parameter is not specified.
241
- """
242
- if (
243
- lr_scheduler.name == TorchLRScheduler.StepLR
244
- and "step_size" not in lr_scheduler.parameters
245
- ):
246
- raise ValueError(
247
- "StepLR lr scheduler requires `step_size` parameter, check that it has "
248
- "correctly been specified in `parameters`."
249
- )
250
-
251
- return lr_scheduler
252
-
253
- def model_dump(
254
- self, exclude_optionals: bool = True, *args: List, **kwargs: Dict
255
- ) -> Dict:
256
- """
257
- Override model_dump method.
258
-
259
- The purpose of this method is to ensure smooth export to yaml. It includes:
260
- - removing entries with None value.
261
- - removing optional values if they have the default value.
262
-
263
- Parameters
264
- ----------
265
- exclude_optionals : bool, optional
266
- Whether to exclude optional arguments if they are default, by default True.
267
- *args : List
268
- Positional arguments, unused.
269
- **kwargs : Dict
270
- Keyword arguments, unused.
271
-
272
- Returns
273
- -------
274
- dict
275
- Dictionary containing the model parameters.
276
- """
277
- dictionary = super().model_dump(exclude_none=True)
278
-
279
- if exclude_optionals:
280
- # remove optional arguments if they are default
281
- default_optionals: dict = {"parameters": {}}
282
- remove_default_optionals(dictionary, default_optionals)
283
-
284
- return dictionary
285
-
286
-
287
- class AMP(BaseModel):
288
- """
289
- Automatic mixed precision (AMP) parameters.
290
-
291
- See: https://pytorch.org/docs/stable/amp.html.
292
-
293
- Attributes
294
- ----------
295
- use : bool, optional
296
- Whether to use AMP or not, default False.
297
- init_scale : int, optional
298
- Initial scale used for loss scaling, default 1024.
299
- """
300
-
301
- model_config = ConfigDict(
302
- validate_assignment=True,
303
- )
304
-
305
- use: bool = False
306
-
307
- # TODO review init_scale and document better
308
- init_scale: int = Field(default=1024, ge=512, le=65536)
309
-
310
- @field_validator("init_scale")
311
- def power_of_two(cls, scale: int) -> int:
312
- """
313
- Validate that init_scale is a power of two.
314
-
315
- Parameters
316
- ----------
317
- scale : int
318
- Initial scale used for loss scaling.
319
-
320
- Returns
321
- -------
322
- int
323
- Validated initial scale.
324
-
325
- Raises
326
- ------
327
- ValueError
328
- If the init_scale is not a power of two.
329
- """
330
- if not scale & (scale - 1) == 0:
331
- raise ValueError(f"Init scale must be a power of two (got {scale}).")
332
-
333
- return scale
334
-
335
- def model_dump(
336
- self, exclude_optionals: bool = True, *args: List, **kwargs: Dict
337
- ) -> Dict:
338
- """
339
- Override model_dump method.
340
-
341
- The purpose is to ensure export smooth import to yaml. It includes:
342
- - remove entries with None value.
343
- - remove optional values if they have the default value.
344
-
345
- Parameters
346
- ----------
347
- exclude_optionals : bool, optional
348
- Whether to exclude optional arguments if they are default, by default True.
349
- *args : List
350
- Positional arguments, unused.
351
- **kwargs : Dict
352
- Keyword arguments, unused.
353
-
354
- Returns
355
- -------
356
- dict
357
- Dictionary containing the model parameters.
358
- """
359
- dictionary = super().model_dump(exclude_none=True)
360
-
361
- if exclude_optionals:
362
- # remove optional arguments if they are default
363
- defaults = {
364
- "init_scale": 1024,
365
- }
366
-
367
- remove_default_optionals(dictionary, defaults)
368
-
369
- return dictionary
370
-
371
-
372
- class Training(BaseModel):
373
- """
374
- Parameters related to the training.
375
-
376
- Mandatory parameters are:
377
- - num_epochs: number of epochs, greater than 0.
378
- - patch_size: patch size, 2D or 3D, non-zero and divisible by 2.
379
- - batch_size: batch size, greater than 0.
380
- - optimizer: optimizer, see `Optimizer`.
381
- - lr_scheduler: learning rate scheduler, see `LrScheduler`.
382
- - augmentation: whether to use data augmentation or not (True or False).
383
-
384
- The other fields are optional:
385
- - use_wandb: whether to use wandb or not (default True).
386
- - num_workers: number of workers (default 0).
387
- - amp: automatic mixed precision parameters (disabled by default).
388
-
389
- Attributes
390
- ----------
391
- num_epochs : int
392
- Number of epochs, greater than 0.
393
- patch_size : conlist(int, min_length=2, max_length=3)
394
- Patch size, 2D or 3D, non-zero and divisible by 2.
395
- batch_size : int
396
- Batch size, greater than 0.
397
- optimizer : Optimizer
398
- Optimizer.
399
- lr_scheduler : LrScheduler
400
- Learning rate scheduler.
401
- augmentation : bool
402
- Whether to use data augmentation or not.
403
- use_wandb : bool
404
- Optional, whether to use wandb or not (default True).
405
- num_workers : int
406
- Optional, number of workers (default 0).
407
- amp : AMP
408
- Optional, automatic mixed precision parameters (disabled by default).
409
- """
410
-
411
- # Pydantic class configuration
412
- model_config = ConfigDict(
413
- use_enum_values=True,
414
- validate_assignment=True,
415
- )
416
-
417
- # Mandatory fields
418
- num_epochs: int
419
- patch_size: List[int] = Field(..., min_length=2, max_length=3)
420
- batch_size: int
421
-
422
- optimizer: Optimizer
423
- lr_scheduler: LrScheduler
424
-
425
- augmentation: bool
426
-
427
- # Optional fields
428
- use_wandb: bool = False
429
- num_workers: int = Field(default=0, ge=0)
430
- amp: AMP = AMP()
431
-
432
- @field_validator("num_epochs", "batch_size")
433
- def greater_than_0(cls, val: int) -> int:
434
- """
435
- Validate number of epochs.
436
-
437
- Number of epochs must be greater than 0.
438
-
439
- Parameters
440
- ----------
441
- val : int
442
- Number of epochs.
443
-
444
- Returns
445
- -------
446
- int
447
- Validated number of epochs.
448
-
449
- Raises
450
- ------
451
- ValueError
452
- If the number of epochs is 0.
453
- """
454
- if val < 1:
455
- raise ValueError(f"Number of epochs must be greater than 0 (got {val}).")
456
-
457
- return val
458
-
459
- @field_validator("patch_size")
460
- def all_elements_non_zero_divisible_by_2(cls, patch_list: List[int]) -> List[int]:
461
- """
462
- Validate patch size.
463
-
464
- Patch size must be non-zero, positive and divisible by 2.
465
-
466
- Parameters
467
- ----------
468
- patch_list : List[int]
469
- Patch size.
470
-
471
- Returns
472
- -------
473
- List[int]
474
- Validated patch size.
475
-
476
- Raises
477
- ------
478
- ValueError
479
- If the patch size is 0.
480
- ValueError
481
- If the patch size is not divisible by 2.
482
- """
483
- for dim in patch_list:
484
- if dim < 1:
485
- raise ValueError(f"Patch size must be non-zero positive (got {dim}).")
486
-
487
- if dim % 2 != 0:
488
- raise ValueError(f"Patch size must be divisible by 2 (got {dim}).")
489
-
490
- return patch_list
491
-
492
- def model_dump(
493
- self, exclude_optionals: bool = True, *args: List, **kwargs: Dict
494
- ) -> Dict:
495
- """
496
- Override model_dump method.
497
-
498
- The purpose is to ensure export smooth import to yaml. It includes:
499
- - remove entries with None value.
500
- - remove optional values if they have the default value.
501
-
502
- Parameters
503
- ----------
504
- exclude_optionals : bool, optional
505
- Whether to exclude optional arguments if they are default, by default True.
506
- *args : List
507
- Positional arguments, unused.
508
- **kwargs : Dict
509
- Keyword arguments, unused.
510
-
511
- Returns
512
- -------
513
- dict
514
- Dictionary containing the model parameters.
515
- """
516
- dictionary = super().model_dump(exclude_none=True)
517
-
518
- dictionary["optimizer"] = self.optimizer.model_dump(exclude_optionals)
519
- dictionary["lr_scheduler"] = self.lr_scheduler.model_dump(exclude_optionals)
520
-
521
- if self.amp is not None:
522
- dictionary["amp"] = self.amp.model_dump(exclude_optionals)
523
-
524
- if exclude_optionals:
525
- # remove optional arguments if they are default
526
- defaults = {
527
- "use_wandb": False,
528
- "num_workers": 0,
529
- "amp": AMP().model_dump(),
530
- }
531
-
532
- remove_default_optionals(dictionary, defaults)
533
-
534
- return dictionary
@@ -1,115 +0,0 @@
1
- """Convenience methods for datasets."""
2
- import logging
3
- from pathlib import Path
4
- from typing import List, Union
5
-
6
- import numpy as np
7
- import tifffile
8
-
9
-
10
- def list_files(data_path: Union[str, Path], data_format: str) -> List[Path]:
11
- """
12
- Return a list of path to files in a directory.
13
-
14
- Parameters
15
- ----------
16
- data_path : str
17
- Path to the folder containing the data.
18
- data_format : str
19
- Extension of the files to load, without period, e.g. `tif`.
20
-
21
- Returns
22
- -------
23
- List[Path]
24
- List of pathlib.Path objects.
25
- """
26
- files = sorted(Path(data_path).rglob(f"*.{data_format}*"))
27
- return files
28
-
29
-
30
- def _update_axes(array: np.ndarray, axes: str) -> np.ndarray:
31
- """
32
- Update axes of the sample to match the config axes.
33
-
34
- This method concatenate the S and T axes.
35
-
36
- Parameters
37
- ----------
38
- array : np.ndarray
39
- Input array.
40
- axes : str
41
- Description of axes in format STCZYX.
42
-
43
- Returns
44
- -------
45
- np.ndarray
46
- Updated array.
47
- """
48
- # concatenate ST axes to N, return NCZYX
49
- if ("S" in axes or "T" in axes) and array.dtype != "O":
50
- new_axes_len = len(axes.replace("Z", "").replace("YX", ""))
51
- # TODO test reshape as it can scramble data, moveaxis is probably better
52
- array = array.reshape(-1, *array.shape[new_axes_len:]).astype(np.float32)
53
-
54
- elif array.dtype == "O":
55
- for i in range(len(array)):
56
- array[i] = np.expand_dims(array[i], axis=0).astype(np.float32)
57
-
58
- else:
59
- array = np.expand_dims(array, axis=0).astype(np.float32)
60
-
61
- return array
62
-
63
-
64
- def read_tiff(file_path: Path, axes: str) -> np.ndarray:
65
- """
66
- Read a tiff file and return a numpy array.
67
-
68
- Parameters
69
- ----------
70
- file_path : Path
71
- Path to a file.
72
- axes : str
73
- Description of axes in format STCZYX.
74
-
75
- Returns
76
- -------
77
- np.ndarray
78
- Resulting array.
79
-
80
- Raises
81
- ------
82
- ValueError
83
- If the file failed to open.
84
- OSError
85
- If the file failed to open.
86
- ValueError
87
- If the file is not a valid tiff.
88
- ValueError
89
- If the data dimensions are incorrect.
90
- ValueError
91
- If the axes length is incorrect.
92
- """
93
- if file_path.suffix[:4] == ".tif":
94
- try:
95
- sample = tifffile.imread(file_path)
96
- except (ValueError, OSError) as e:
97
- logging.exception(f"Exception in file {file_path}: {e}, skipping it.")
98
- raise e
99
- else:
100
- raise ValueError(f"File {file_path} is not a valid tiff.")
101
-
102
- sample = sample.squeeze()
103
-
104
- if len(sample.shape) < 2 or len(sample.shape) > 4:
105
- raise ValueError(
106
- f"Incorrect data dimensions. Must be 2, 3 or 4 (got {sample.shape} for"
107
- f"file {file_path})."
108
- )
109
-
110
- # check number of axes
111
- if len(axes) != len(sample.shape):
112
- raise ValueError(f"Incorrect axes length (got {axes} for file {file_path}).")
113
- sample = _update_axes(sample, axes)
114
-
115
- return sample