careamics 0.0.11__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of careamics might be problematic. Click here for more details.

Files changed (98) hide show
  1. careamics/careamist.py +24 -7
  2. careamics/cli/utils.py +1 -1
  3. careamics/config/algorithms/n2v_algorithm_model.py +1 -1
  4. careamics/config/architectures/unet_model.py +3 -0
  5. careamics/config/callback_model.py +23 -34
  6. careamics/config/configuration.py +55 -4
  7. careamics/config/configuration_factories.py +288 -23
  8. careamics/config/data/__init__.py +2 -0
  9. careamics/config/data/data_model.py +41 -4
  10. careamics/config/data/ng_data_model.py +381 -0
  11. careamics/config/data/patching_strategies/__init__.py +14 -0
  12. careamics/config/data/patching_strategies/_overlapping_patched_model.py +103 -0
  13. careamics/config/data/patching_strategies/_patched_model.py +56 -0
  14. careamics/config/data/patching_strategies/random_patching_model.py +21 -0
  15. careamics/config/data/patching_strategies/sequential_patching_model.py +25 -0
  16. careamics/config/data/patching_strategies/tiled_patching_model.py +40 -0
  17. careamics/config/data/patching_strategies/whole_patching_model.py +12 -0
  18. careamics/config/inference_model.py +6 -3
  19. careamics/config/optimizer_models.py +1 -3
  20. careamics/config/support/supported_data.py +7 -0
  21. careamics/config/support/supported_patching_strategies.py +22 -0
  22. careamics/config/training_model.py +0 -2
  23. careamics/config/validators/validator_utils.py +4 -3
  24. careamics/dataset/dataset_utils/iterate_over_files.py +2 -2
  25. careamics/dataset/in_memory_dataset.py +2 -1
  26. careamics/dataset/iterable_dataset.py +2 -2
  27. careamics/dataset/iterable_pred_dataset.py +2 -2
  28. careamics/dataset/iterable_tiled_pred_dataset.py +2 -2
  29. careamics/dataset/patching/patching.py +3 -2
  30. careamics/dataset/tiling/lvae_tiled_patching.py +16 -6
  31. careamics/dataset/tiling/tiled_patching.py +2 -1
  32. careamics/dataset_ng/README.md +212 -0
  33. careamics/dataset_ng/dataset.py +229 -0
  34. careamics/dataset_ng/demos/bsd68_demo.ipynb +361 -0
  35. careamics/dataset_ng/demos/care_U2OS_demo.ipynb +330 -0
  36. careamics/dataset_ng/demos/demo_custom_image_stack.ipynb +734 -0
  37. careamics/dataset_ng/demos/demo_datamodule.ipynb +447 -0
  38. careamics/dataset_ng/{demo_dataset.ipynb → demos/demo_dataset.ipynb} +60 -53
  39. careamics/dataset_ng/{demo_patch_extractor.py → demos/demo_patch_extractor.py} +7 -9
  40. careamics/dataset_ng/demos/mouse_nuclei_demo.ipynb +292 -0
  41. careamics/dataset_ng/factory.py +451 -0
  42. careamics/dataset_ng/legacy_interoperability.py +170 -0
  43. careamics/dataset_ng/patch_extractor/__init__.py +3 -8
  44. careamics/dataset_ng/patch_extractor/demo_custom_image_stack_loader.py +7 -5
  45. careamics/dataset_ng/patch_extractor/image_stack/__init__.py +4 -1
  46. careamics/dataset_ng/patch_extractor/image_stack/czi_image_stack.py +360 -0
  47. careamics/dataset_ng/patch_extractor/image_stack/image_stack_protocol.py +5 -1
  48. careamics/dataset_ng/patch_extractor/image_stack/in_memory_image_stack.py +1 -1
  49. careamics/dataset_ng/patch_extractor/image_stack_loader.py +5 -75
  50. careamics/dataset_ng/patch_extractor/patch_extractor.py +5 -4
  51. careamics/dataset_ng/patch_extractor/patch_extractor_factory.py +114 -105
  52. careamics/dataset_ng/patching_strategies/__init__.py +6 -1
  53. careamics/dataset_ng/patching_strategies/patching_strategy_protocol.py +31 -0
  54. careamics/dataset_ng/patching_strategies/random_patching.py +5 -1
  55. careamics/dataset_ng/patching_strategies/sequential_patching.py +5 -5
  56. careamics/dataset_ng/patching_strategies/tiling_strategy.py +172 -0
  57. careamics/dataset_ng/patching_strategies/whole_sample.py +36 -0
  58. careamics/file_io/read/get_func.py +2 -1
  59. careamics/lightning/dataset_ng/__init__.py +1 -0
  60. careamics/lightning/dataset_ng/data_module.py +678 -0
  61. careamics/lightning/dataset_ng/lightning_modules/__init__.py +9 -0
  62. careamics/lightning/dataset_ng/lightning_modules/care_module.py +97 -0
  63. careamics/lightning/dataset_ng/lightning_modules/n2v_module.py +106 -0
  64. careamics/lightning/dataset_ng/lightning_modules/unet_module.py +212 -0
  65. careamics/lightning/lightning_module.py +5 -1
  66. careamics/lightning/predict_data_module.py +2 -1
  67. careamics/lightning/train_data_module.py +2 -1
  68. careamics/losses/loss_factory.py +2 -1
  69. careamics/lvae_training/dataset/__init__.py +8 -3
  70. careamics/lvae_training/dataset/config.py +3 -3
  71. careamics/lvae_training/dataset/ms_dataset_ref.py +1067 -0
  72. careamics/lvae_training/dataset/multich_dataset.py +46 -17
  73. careamics/lvae_training/dataset/multicrop_dset.py +196 -0
  74. careamics/lvae_training/dataset/types.py +3 -3
  75. careamics/lvae_training/dataset/utils/index_manager.py +259 -0
  76. careamics/lvae_training/eval_utils.py +93 -3
  77. careamics/model_io/bioimage/bioimage_utils.py +1 -1
  78. careamics/model_io/bioimage/model_description.py +1 -1
  79. careamics/model_io/bmz_io.py +1 -1
  80. careamics/model_io/model_io_utils.py +2 -2
  81. careamics/models/activation.py +2 -1
  82. careamics/prediction_utils/prediction_outputs.py +1 -1
  83. careamics/prediction_utils/stitch_prediction.py +1 -1
  84. careamics/transforms/compose.py +1 -0
  85. careamics/transforms/n2v_manipulate_torch.py +15 -9
  86. careamics/transforms/normalize.py +18 -7
  87. careamics/transforms/pixel_manipulation_torch.py +59 -92
  88. careamics/utils/lightning_utils.py +25 -11
  89. careamics/utils/metrics.py +2 -1
  90. careamics/utils/torch_utils.py +23 -0
  91. {careamics-0.0.11.dist-info → careamics-0.0.13.dist-info}/METADATA +12 -11
  92. {careamics-0.0.11.dist-info → careamics-0.0.13.dist-info}/RECORD +95 -69
  93. careamics/dataset_ng/dataset/__init__.py +0 -3
  94. careamics/dataset_ng/dataset/dataset.py +0 -184
  95. careamics/dataset_ng/demo_patch_extractor_factory.py +0 -37
  96. {careamics-0.0.11.dist-info → careamics-0.0.13.dist-info}/WHEEL +0 -0
  97. {careamics-0.0.11.dist-info → careamics-0.0.13.dist-info}/entry_points.txt +0 -0
  98. {careamics-0.0.11.dist-info → careamics-0.0.13.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,447 @@
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "from careamics.config.configuration_factories import (\n",
10
+ " _create_ng_data_configuration,\n",
11
+ " _list_spatial_augmentations,\n",
12
+ ")\n",
13
+ "from careamics.lightning.dataset_ng.data_module import CareamicsDataModule"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": null,
19
+ "metadata": {},
20
+ "outputs": [],
21
+ "source": [
22
+ "%load_ext autoreload\n",
23
+ "%autoreload 2\n"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "from pathlib import Path\n",
33
+ "\n",
34
+ "import matplotlib.pyplot as plt\n",
35
+ "import numpy as np\n",
36
+ "import tifffile\n",
37
+ "from careamics_portfolio import PortfolioManager\n",
38
+ "\n",
39
+ "# instantiate data portfolio manage\n",
40
+ "portfolio = PortfolioManager()\n",
41
+ "\n",
42
+ "# and download the data\n",
43
+ "root_path = Path(\"./data\")\n",
44
+ "files = portfolio.denoising.N2V_BSD68.download(root_path)\n",
45
+ "\n",
46
+ "# create paths for the data\n",
47
+ "data_path = Path(root_path / \"denoising-N2V_BSD68.unzip/BSD68_reproducibility_data\")\n",
48
+ "train_path = data_path / \"train\"\n",
49
+ "val_path = data_path / \"val\"\n",
50
+ "test_path = data_path / \"test\" / \"images\"\n",
51
+ "gt_path = data_path / \"test\" / \"gt\""
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "metadata": {},
58
+ "outputs": [],
59
+ "source": [
60
+ "image_std, image_mean = [], []\n",
61
+ "for file in train_path.glob(\"*.tiff\"):\n",
62
+ " image = tifffile.imread(file)\n",
63
+ " image_std.append(image.std())\n",
64
+ " image_mean.append(image.mean())\n",
65
+ "image_std, image_mean = np.mean(image_std), np.mean(image_mean)"
66
+ ]
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": null,
71
+ "metadata": {},
72
+ "outputs": [],
73
+ "source": [
74
+ "# from path, train and val, no target\n",
75
+ "\n",
76
+ "config = _create_ng_data_configuration(\n",
77
+ " data_type=\"tiff\",\n",
78
+ " axes=\"SYX\",\n",
79
+ " patch_size=(64, 64),\n",
80
+ " batch_size=64,\n",
81
+ " augmentations=_list_spatial_augmentations()\n",
82
+ ")\n",
83
+ "\n",
84
+ "config.set_means_and_stds([image_mean], [image_std])\n",
85
+ "config.val_dataloader_params = {\"shuffle\": False}\n",
86
+ "\n",
87
+ "data_module = CareamicsDataModule(\n",
88
+ " data_config=config,\n",
89
+ " train_data=train_path,\n",
90
+ " val_data=val_path,\n",
91
+ ")\n",
92
+ "data_module.setup('fit')\n",
93
+ "data_module.setup('validate')\n",
94
+ "\n",
95
+ "train_batch = next(iter(data_module.train_dataloader()))\n",
96
+ "val_batch = next(iter(data_module.val_dataloader()))\n",
97
+ "\n",
98
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
99
+ "\n",
100
+ "for i in range(8):\n",
101
+ " ax[i].imshow(train_batch[0].data[i][0].numpy(), cmap=\"gray\")\n",
102
+ "\n",
103
+ "\n",
104
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
105
+ "for i in range(8):\n",
106
+ " ax[i].imshow(val_batch[0].data[i][0].numpy(), cmap=\"gray\")"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": null,
112
+ "metadata": {},
113
+ "outputs": [],
114
+ "source": [
115
+ "# from path, only predict\n",
116
+ "\n",
117
+ "from careamics.config.data import NGDataConfig\n",
118
+ "\n",
119
+ "config = NGDataConfig(\n",
120
+ " data_type=\"tiff\",\n",
121
+ " patching={\n",
122
+ " \"name\": \"tiled\",\n",
123
+ " \"patch_size\": (128, 128),\n",
124
+ " \"overlaps\": (32, 32)\n",
125
+ " },\n",
126
+ " axes=\"YX\",\n",
127
+ " batch_size=8,\n",
128
+ " image_means=[image_mean],\n",
129
+ " image_stds=[image_std]\n",
130
+ ")\n",
131
+ "\n",
132
+ "data_module = CareamicsDataModule(\n",
133
+ " data_config=config,\n",
134
+ " pred_data=test_path\n",
135
+ ")\n",
136
+ "data_module.setup('predict')\n",
137
+ "\n",
138
+ "pred_batch = next(iter(data_module.predict_dataloader()))\n",
139
+ "\n",
140
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
141
+ "\n",
142
+ "for i in range(8):\n",
143
+ " ax[i].imshow(pred_batch[0].data[i][0].numpy(), cmap=\"gray\")"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "code",
148
+ "execution_count": null,
149
+ "metadata": {},
150
+ "outputs": [],
151
+ "source": [
152
+ "# test from array"
153
+ ]
154
+ },
155
+ {
156
+ "cell_type": "code",
157
+ "execution_count": null,
158
+ "metadata": {},
159
+ "outputs": [],
160
+ "source": [
161
+ "train_array = tifffile.imread(sorted(train_path.rglob('*'))[0])\n",
162
+ "val_array = tifffile.imread(sorted(val_path.rglob('*'))[0])\n",
163
+ "test_array = tifffile.imread(sorted(test_path.rglob('*'))[0])"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {},
170
+ "outputs": [],
171
+ "source": [
172
+ "# from array, train and val, no target\n",
173
+ "\n",
174
+ "config = _create_ng_data_configuration(\n",
175
+ " data_type=\"array\",\n",
176
+ " axes=\"SYX\",\n",
177
+ " patch_size=(64, 64),\n",
178
+ " batch_size=64,\n",
179
+ " augmentations=_list_spatial_augmentations()\n",
180
+ ")\n",
181
+ "\n",
182
+ "config.set_means_and_stds([image_mean], [image_std])\n",
183
+ "config.val_dataloader_params = {\"shuffle\": False}\n",
184
+ "\n",
185
+ "data_module = CareamicsDataModule(\n",
186
+ " data_config=config,\n",
187
+ " train_data=train_array,\n",
188
+ " val_data=val_array,\n",
189
+ ")\n",
190
+ "data_module.setup('fit')\n",
191
+ "data_module.setup('validate')\n",
192
+ "\n",
193
+ "train_batch = next(iter(data_module.train_dataloader()))\n",
194
+ "val_batch = next(iter(data_module.val_dataloader()))\n",
195
+ "\n",
196
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
197
+ "\n",
198
+ "for i in range(8):\n",
199
+ " ax[i].imshow(train_batch[0].data[i][0].numpy(), cmap=\"gray\")\n",
200
+ "\n",
201
+ "\n",
202
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
203
+ "for i in range(8):\n",
204
+ " ax[i].imshow(val_batch[0].data[i][0].numpy(), cmap=\"gray\")"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": null,
210
+ "metadata": {},
211
+ "outputs": [],
212
+ "source": [
213
+ "# test with target"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": null,
219
+ "metadata": {},
220
+ "outputs": [],
221
+ "source": [
222
+ "import skimage\n",
223
+ "\n",
224
+ "example_data = skimage.data.human_mitosis()\n",
225
+ "\n",
226
+ "markers = np.zeros_like(example_data)\n",
227
+ "markers[example_data < 25] = 1\n",
228
+ "markers[example_data > 50] = 2\n",
229
+ "\n",
230
+ "elevation_map = skimage.filters.sobel(example_data)\n",
231
+ "segmentation = skimage.segmentation.watershed(elevation_map, markers)\n",
232
+ "\n",
233
+ "fig, ax = plt.subplots(1, 2)\n",
234
+ "ax[0].imshow(example_data)\n",
235
+ "ax[1].imshow(segmentation)\n",
236
+ "plt.show()"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "code",
241
+ "execution_count": null,
242
+ "metadata": {},
243
+ "outputs": [],
244
+ "source": [
245
+ "config = _create_ng_data_configuration(\n",
246
+ " data_type=\"array\",\n",
247
+ " axes=\"YX\",\n",
248
+ " patch_size=(64, 64),\n",
249
+ " batch_size=64,\n",
250
+ " augmentations=_list_spatial_augmentations()\n",
251
+ ")\n",
252
+ "config.set_means_and_stds(\n",
253
+ " [example_data.mean()],\n",
254
+ " [example_data.std()],\n",
255
+ " [segmentation.mean()],\n",
256
+ " [segmentation.std()]\n",
257
+ ")\n",
258
+ "\n",
259
+ "data_module = CareamicsDataModule(\n",
260
+ " data_config=config,\n",
261
+ " train_data=[example_data],\n",
262
+ " train_data_target=[segmentation],\n",
263
+ " val_data=[example_data],\n",
264
+ " val_data_target=[segmentation]\n",
265
+ ")\n",
266
+ "data_module.setup('fit')\n",
267
+ "data_module.setup('validate')\n",
268
+ "\n",
269
+ "train_batch = next(iter(data_module.train_dataloader()))\n",
270
+ "val_batch = next(iter(data_module.val_dataloader()))\n",
271
+ "\n",
272
+ "fig, ax = plt.subplots(2, 8, figsize=(10, 3))\n",
273
+ "\n",
274
+ "for i in range(8):\n",
275
+ " ax[0][i].imshow(train_batch[0].data[i][0].numpy(), cmap=\"gray\")\n",
276
+ " ax[1][i].imshow(train_batch[1].data[i][0].numpy())\n",
277
+ "\n",
278
+ "\n",
279
+ "fig, ax = plt.subplots(2, 8, figsize=(10, 3))\n",
280
+ "for i in range(8):\n",
281
+ " ax[0][i].imshow(val_batch[0].data[i][0].numpy(), cmap=\"gray\")\n",
282
+ " ax[1][i].imshow(val_batch[1].data[i][0].numpy())"
283
+ ]
284
+ },
285
+ {
286
+ "cell_type": "code",
287
+ "execution_count": null,
288
+ "metadata": {},
289
+ "outputs": [],
290
+ "source": [
291
+ "# from array, only predict, with target\n",
292
+ "\n",
293
+ "from careamics.config.data import NGDataConfig\n",
294
+ "\n",
295
+ "config = NGDataConfig(\n",
296
+ " data_type=\"array\",\n",
297
+ " patching={\n",
298
+ " \"name\": \"tiled\",\n",
299
+ " \"patch_size\": (128, 128),\n",
300
+ " \"overlaps\": (32, 32)\n",
301
+ " },\n",
302
+ " axes=\"YX\",\n",
303
+ " batch_size=8,\n",
304
+ " image_means=[image_mean],\n",
305
+ " image_stds=[image_std]\n",
306
+ ")\n",
307
+ "\n",
308
+ "data_module = CareamicsDataModule(\n",
309
+ " data_config=config,\n",
310
+ " pred_data=example_data,\n",
311
+ " pred_data_target=segmentation\n",
312
+ ")\n",
313
+ "data_module.setup('predict')\n",
314
+ "\n",
315
+ "pred_batch = next(iter(data_module.predict_dataloader()))\n",
316
+ "\n",
317
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
318
+ "\n",
319
+ "for i in range(8):\n",
320
+ " ax[i].imshow(pred_batch[0].data[i][0].numpy(), cmap=\"gray\")"
321
+ ]
322
+ },
323
+ {
324
+ "cell_type": "code",
325
+ "execution_count": null,
326
+ "metadata": {},
327
+ "outputs": [],
328
+ "source": [
329
+ "# from list of paths"
330
+ ]
331
+ },
332
+ {
333
+ "cell_type": "code",
334
+ "execution_count": null,
335
+ "metadata": {},
336
+ "outputs": [],
337
+ "source": [
338
+ "config = _create_ng_data_configuration(\n",
339
+ " data_type=\"tiff\",\n",
340
+ " axes=\"SYX\",\n",
341
+ " patch_size=(64, 64),\n",
342
+ " batch_size=64,\n",
343
+ " augmentations=_list_spatial_augmentations()\n",
344
+ ")\n",
345
+ "\n",
346
+ "config.set_means_and_stds([image_mean], [image_std])\n",
347
+ "config.val_dataloader_params = {\"shuffle\": False}\n",
348
+ "\n",
349
+ "data_module = CareamicsDataModule(\n",
350
+ " data_config=config,\n",
351
+ " train_data=sorted(train_path.glob(\"*.tiff\")),\n",
352
+ " val_data=sorted(val_path.glob(\"*.tiff\")),\n",
353
+ ")\n",
354
+ "data_module.setup('fit')\n",
355
+ "data_module.setup('validate')\n",
356
+ "\n",
357
+ "train_batch = next(iter(data_module.train_dataloader()))\n",
358
+ "val_batch = next(iter(data_module.val_dataloader()))\n",
359
+ "\n",
360
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
361
+ "\n",
362
+ "for i in range(8):\n",
363
+ " ax[i].imshow(train_batch[0].data[i][0].numpy(), cmap=\"gray\")\n",
364
+ "\n",
365
+ "\n",
366
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
367
+ "for i in range(8):\n",
368
+ " ax[i].imshow(val_batch[0].data[i][0].numpy(), cmap=\"gray\")"
369
+ ]
370
+ },
371
+ {
372
+ "cell_type": "code",
373
+ "execution_count": null,
374
+ "metadata": {},
375
+ "outputs": [],
376
+ "source": [
377
+ "# from custom"
378
+ ]
379
+ },
380
+ {
381
+ "cell_type": "code",
382
+ "execution_count": null,
383
+ "metadata": {},
384
+ "outputs": [],
385
+ "source": [
386
+ "config = _create_ng_data_configuration(\n",
387
+ " data_type=\"custom\",\n",
388
+ " axes=\"SYX\",\n",
389
+ " patch_size=(64, 64),\n",
390
+ " batch_size=64,\n",
391
+ " augmentations=_list_spatial_augmentations()\n",
392
+ ")\n",
393
+ "\n",
394
+ "config.set_means_and_stds([image_mean], [image_std])\n",
395
+ "config.val_dataloader_params = {\"shuffle\": False}\n",
396
+ "\n",
397
+ "def read_source_func(path):\n",
398
+ " image = tifffile.imread(path)\n",
399
+ " image = 255 - image\n",
400
+ " return image\n",
401
+ "\n",
402
+ "data_module = CareamicsDataModule(\n",
403
+ " data_config=config,\n",
404
+ " train_data=sorted(train_path.glob(\"*.tiff\")),\n",
405
+ " val_data=sorted(val_path.glob(\"*.tiff\")),\n",
406
+ " read_source_func=read_source_func\n",
407
+ ")\n",
408
+ "data_module.setup('fit')\n",
409
+ "data_module.setup('validate')\n",
410
+ "\n",
411
+ "train_batch = next(iter(data_module.train_dataloader()))\n",
412
+ "val_batch = next(iter(data_module.val_dataloader()))\n",
413
+ "\n",
414
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
415
+ "\n",
416
+ "for i in range(8):\n",
417
+ " ax[i].imshow(train_batch[0].data[i][0].numpy(), cmap=\"gray\")\n",
418
+ "\n",
419
+ "\n",
420
+ "fig, ax = plt.subplots(1, 8, figsize=(10, 5))\n",
421
+ "for i in range(8):\n",
422
+ " ax[i].imshow(val_batch[0].data[i][0].numpy(), cmap=\"gray\")"
423
+ ]
424
+ }
425
+ ],
426
+ "metadata": {
427
+ "kernelspec": {
428
+ "display_name": "czi",
429
+ "language": "python",
430
+ "name": "python3"
431
+ },
432
+ "language_info": {
433
+ "codemirror_mode": {
434
+ "name": "ipython",
435
+ "version": 3
436
+ },
437
+ "file_extension": ".py",
438
+ "mimetype": "text/x-python",
439
+ "name": "python",
440
+ "nbconvert_exporter": "python",
441
+ "pygments_lexer": "ipython3",
442
+ "version": "3.12.11"
443
+ }
444
+ },
445
+ "nbformat": 4,
446
+ "nbformat_minor": 2
447
+ }
@@ -14,8 +14,12 @@
14
14
  "import skimage\n",
15
15
  "import tifffile\n",
16
16
  "\n",
17
- "from careamics.config import create_n2n_configuration\n",
18
- "from careamics.dataset_ng.dataset.dataset import CareamicsDataset, Mode"
17
+ "from careamics.config.configuration_factories import (\n",
18
+ " _create_ng_data_configuration,\n",
19
+ " _list_spatial_augmentations,\n",
20
+ ")\n",
21
+ "from careamics.dataset_ng.dataset import Mode\n",
22
+ "from careamics.dataset_ng.factory import create_dataset"
19
23
  ]
20
24
  },
21
25
  {
@@ -57,37 +61,36 @@
57
61
  "source": [
58
62
  "# 1. Train val from an array\n",
59
63
  "\n",
60
- "train_data_config = create_n2n_configuration(\n",
61
- " \"test_exp\",\n",
64
+ "train_data_config = _create_ng_data_configuration(\n",
62
65
  " data_type=\"array\",\n",
63
66
  " axes=\"YX\",\n",
64
67
  " patch_size=(32, 32),\n",
65
68
  " batch_size=1,\n",
66
- " num_epochs=1,\n",
67
- ").data_config\n",
69
+ " augmentations=_list_spatial_augmentations()\n",
70
+ ")\n",
68
71
  "\n",
69
- "val_data_config = create_n2n_configuration(\n",
70
- " \"test_exp\",\n",
72
+ "val_data_config = _create_ng_data_configuration(\n",
71
73
  " data_type=\"array\",\n",
72
74
  " axes=\"YX\",\n",
73
75
  " patch_size=(32, 32),\n",
74
76
  " batch_size=1,\n",
75
- " num_epochs=1,\n",
76
77
  " augmentations=[],\n",
77
- ").data_config\n",
78
+ ")\n",
78
79
  "\n",
79
80
  "\n",
80
- "train_dataset = CareamicsDataset(\n",
81
- " data_config=train_data_config,\n",
81
+ "train_dataset = create_dataset(\n",
82
+ " config=train_data_config,\n",
82
83
  " mode=Mode.TRAINING,\n",
83
84
  " inputs=[example_data],\n",
84
85
  " targets=[segmentation],\n",
86
+ " in_memory=True,\n",
85
87
  ")\n",
86
- "val_dataset = CareamicsDataset(\n",
87
- " data_config=val_data_config,\n",
88
+ "val_dataset = create_dataset(\n",
89
+ " config=val_data_config,\n",
88
90
  " mode=Mode.VALIDATING,\n",
89
91
  " inputs=[example_data],\n",
90
92
  " targets=[segmentation],\n",
93
+ " in_memory=True,\n",
91
94
  ")\n",
92
95
  "\n",
93
96
  "fig, ax = plt.subplots(2, 5, figsize=(10, 5))\n",
@@ -119,32 +122,37 @@
119
122
  "tifffile.imwrite(\"example_data2.tiff\", example_data[:256, :256])\n",
120
123
  "tifffile.imwrite(\"example_target2.tiff\", segmentation[:256, :256])\n",
121
124
  "\n",
122
- "train_data_config = create_n2n_configuration(\n",
123
- " \"test_exp\",\n",
125
+ "train_data_config = _create_ng_data_configuration(\n",
124
126
  " data_type=\"tiff\",\n",
125
127
  " axes=\"YX\",\n",
126
128
  " patch_size=(32, 32),\n",
127
129
  " batch_size=1,\n",
128
- " num_epochs=1,\n",
129
- ").data_config\n",
130
+ " augmentations=_list_spatial_augmentations()\n",
131
+ ")\n",
130
132
  "\n",
131
- "val_data_config = create_n2n_configuration(\n",
132
- " \"test_exp\",\n",
133
+ "val_data_config = _create_ng_data_configuration(\n",
133
134
  " data_type=\"tiff\",\n",
134
135
  " axes=\"YX\",\n",
135
136
  " patch_size=(32, 32),\n",
136
137
  " batch_size=1,\n",
137
- " num_epochs=1,\n",
138
138
  " augmentations=[],\n",
139
- ").data_config\n",
139
+ ")\n",
140
140
  "\n",
141
141
  "data = sorted(Path(\"./\").glob(\"example_data*.tiff\"))\n",
142
142
  "targets = sorted(Path(\"./\").glob(\"example_target*.tiff\"))\n",
143
- "train_dataset = CareamicsDataset(\n",
144
- " data_config=train_data_config, inputs=data, targets=targets\n",
143
+ "train_dataset = create_dataset(\n",
144
+ " config=train_data_config,\n",
145
+ " mode=Mode.TRAINING,\n",
146
+ " inputs=data,\n",
147
+ " targets=targets,\n",
148
+ " in_memory=True,\n",
145
149
  ")\n",
146
- "val_dataset = CareamicsDataset(\n",
147
- " data_config=val_data_config, inputs=data, targets=targets\n",
150
+ "val_dataset = create_dataset(\n",
151
+ " config=val_data_config,\n",
152
+ " mode=Mode.VALIDATING,\n",
153
+ " inputs=data,\n",
154
+ " targets=targets,\n",
155
+ " in_memory=True,\n",
148
156
  ")\n",
149
157
  "\n",
150
158
  "fig, ax = plt.subplots(2, 5, figsize=(10, 5))\n",
@@ -171,26 +179,33 @@
171
179
  "metadata": {},
172
180
  "outputs": [],
173
181
  "source": [
174
- "from careamics.config import InferenceConfig\n",
182
+ "from careamics.config.data import NGDataConfig\n",
175
183
  "\n",
176
- "prediction_config = InferenceConfig(\n",
184
+ "prediction_config = NGDataConfig(\n",
177
185
  " data_type=\"array\",\n",
178
- " tile_size=(32, 32),\n",
179
- " tile_overlap=(16, 16),\n",
186
+ " patching={\n",
187
+ " \"name\": \"tiled\",\n",
188
+ " \"patch_size\": (32, 32),\n",
189
+ " \"overlaps\": (16, 16),\n",
190
+ " },\n",
180
191
  " axes=\"YX\",\n",
181
- " image_means=(example_data.mean(),),\n",
182
- " image_stds=(example_data.std(),),\n",
183
- " tta_transforms=False,\n",
184
192
  " batch_size=1,\n",
193
+ " image_means=[example_data.mean()],\n",
194
+ " image_stds=[example_data.std()],\n",
185
195
  ")\n",
186
- "prediction_dataset = CareamicsDataset(\n",
187
- " data_config=prediction_config, mode=Mode.PREDICTING, inputs=[example_data]\n",
196
+ "\n",
197
+ "prediction_dataset = create_dataset(\n",
198
+ " config=prediction_config,\n",
199
+ " mode=Mode.PREDICTING,\n",
200
+ " inputs=[example_data],\n",
201
+ " targets=None,\n",
202
+ " in_memory=True,\n",
188
203
  ")\n",
189
204
  "\n",
190
205
  "fig, ax = plt.subplots(1, 5, figsize=(10, 5))\n",
191
206
  "ax[0].set_title(\"Prediction input\")\n",
192
207
  "for i in range(5):\n",
193
- " sample, _ = prediction_dataset[i]\n",
208
+ " sample, *_ = prediction_dataset[i]\n",
194
209
  " ax[i].imshow(sample.data[0])"
195
210
  ]
196
211
  },
@@ -209,26 +224,26 @@
209
224
  "metadata": {},
210
225
  "outputs": [],
211
226
  "source": [
212
- "train_data_config = create_n2n_configuration(\n",
213
- " \"test_exp\",\n",
227
+ "train_data_config = _create_ng_data_configuration(\n",
214
228
  " data_type=\"custom\",\n",
215
229
  " axes=\"YX\",\n",
216
230
  " patch_size=(32, 32),\n",
217
231
  " batch_size=1,\n",
218
- " num_epochs=1,\n",
219
- ").data_config\n",
232
+ " augmentations=_list_spatial_augmentations(),\n",
233
+ ")\n",
220
234
  "\n",
221
235
  "\n",
222
- "def read_data_func_test(data):\n",
236
+ "def read_data_func_test(example_data):\n",
223
237
  " return 255 - example_data\n",
224
238
  "\n",
225
239
  "\n",
226
240
  "fig, ax = plt.subplots(1, 5, figsize=(10, 5))\n",
227
- "train_dataset = CareamicsDataset(\n",
228
- " data_config=train_data_config,\n",
241
+ "train_dataset = create_dataset(\n",
242
+ " config=train_data_config,\n",
229
243
  " mode=Mode.TRAINING,\n",
230
244
  " inputs=[example_data],\n",
231
245
  " targets=[segmentation],\n",
246
+ " in_memory=True,\n",
232
247
  " read_func=read_data_func_test,\n",
233
248
  " read_kwargs={}\n",
234
249
  ")\n",
@@ -237,19 +252,11 @@
237
252
  " sample, _ = train_dataset[i]\n",
238
253
  " ax[i].imshow(sample.data[0])"
239
254
  ]
240
- },
241
- {
242
- "cell_type": "code",
243
- "execution_count": null,
244
- "id": "10",
245
- "metadata": {},
246
- "outputs": [],
247
- "source": []
248
255
  }
249
256
  ],
250
257
  "metadata": {
251
258
  "kernelspec": {
252
- "display_name": "Python 3",
259
+ "display_name": "czi",
253
260
  "language": "python",
254
261
  "name": "python3"
255
262
  },
@@ -263,7 +270,7 @@
263
270
  "name": "python",
264
271
  "nbconvert_exporter": "python",
265
272
  "pygments_lexer": "ipython3",
266
- "version": "3.11.11"
273
+ "version": "3.12.11"
267
274
  }
268
275
  },
269
276
  "nbformat": 4,
@@ -1,10 +1,12 @@
1
1
  # %%
2
2
  import numpy as np
3
3
 
4
- # %%
5
- from careamics.config.support import SupportedData
6
- from careamics.dataset_ng.patch_extractor import create_patch_extractor
7
4
  from careamics.dataset_ng.patch_extractor.image_stack import InMemoryImageStack
5
+
6
+ # %%
7
+ from careamics.dataset_ng.patch_extractor.patch_extractor_factory import (
8
+ create_array_extractor,
9
+ )
8
10
  from careamics.dataset_ng.patching_strategies import RandomPatchingStrategy
9
11
 
10
12
  # %%
@@ -30,12 +32,8 @@ print(target2)
30
32
 
31
33
  # %%
32
34
  # define example readers
33
- input_patch_extractor = create_patch_extractor(
34
- [array1, array2], axes="SYX", data_type=SupportedData.ARRAY
35
- )
36
- target_patch_extractor = create_patch_extractor(
37
- [target1, target2], axes="SYX", data_type=SupportedData.ARRAY
38
- )
35
+ input_patch_extractor = create_array_extractor([array1, array2], axes="SYX")
36
+ target_patch_extractor = create_array_extractor([target1, target2], axes="SYX")
39
37
 
40
38
  # %%
41
39
  # generate random patch specification