darfix 4.2.0__py3-none-any.whl → 4.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (144) hide show
  1. darfix/core/data_selection.py +11 -2
  2. darfix/core/dataset.py +72 -157
  3. darfix/core/grainplot.py +44 -56
  4. darfix/core/{imageStack.py → image_stack.py} +9 -15
  5. darfix/core/moment_types.py +6 -0
  6. darfix/core/{noiseremoval.py → noise_removal.py} +25 -24
  7. darfix/core/noise_removal_type.py +14 -0
  8. darfix/core/positioners.py +6 -0
  9. darfix/core/rocking_curves.py +6 -3
  10. darfix/core/rocking_curves_map.py +1 -1
  11. darfix/core/{shiftcorrection.py → shift_correction.py} +1 -2
  12. darfix/core/state_of_operation.py +7 -46
  13. darfix/core/utils.py +0 -39
  14. darfix/dtypes.py +1 -9
  15. darfix/gui/{binningWidget.py → binning_widget.py} +2 -29
  16. darfix/gui/{blindSourceSeparationWidget.py → blind_source_separation_widget.py} +4 -16
  17. darfix/gui/{chooseDimensions.py → choose_dimensions.py} +1 -1
  18. darfix/gui/concatenate_scans.py +4 -4
  19. darfix/gui/data_selection/{hdf5_data_selection_widgets.py → hdf5_dataset_selection_widget.py} +3 -56
  20. darfix/gui/data_selection/line_edits.py +54 -8
  21. darfix/gui/data_selection/scan_selection_widgets.py +24 -11
  22. darfix/gui/data_selection/utils.py +11 -0
  23. darfix/gui/data_selection/{WorkingDirSelectionWidget.py → working_dir_selection_widget.py} +15 -14
  24. darfix/gui/{dimensionsWidget.py → dimensions_widget.py} +1 -1
  25. darfix/gui/{displayComponentsWidget.py → display_components_widget.py} +1 -1
  26. darfix/gui/{filterByDimension.py → filter_by_dimension.py} +1 -1
  27. darfix/gui/{grainplot/dimensionRangeSlider2D.py → grain_plot/dimension_range_slider_2d.py} +2 -2
  28. darfix/gui/{grainplot/grainPlotWidget.py → grain_plot/grain_plot_widget.py} +1 -1
  29. darfix/gui/{grainplot/mosaicityWidget.py → grain_plot/mosaicity_widget.py} +21 -23
  30. darfix/gui/{magnificationWidget.py → magnification_widget.py} +1 -1
  31. darfix/gui/{noiseremoval → noise_removal}/noise_removal_widget.py +12 -16
  32. darfix/gui/{noiseremoval → noise_removal}/operation_list_widget.py +2 -2
  33. darfix/gui/{noiseremoval → noise_removal}/parameters_widget.py +6 -6
  34. darfix/gui/{PCAWidget.py → pca_widget.py} +2 -4
  35. darfix/gui/{projectionWidget.py → projection_widget.py} +1 -1
  36. darfix/gui/rocking_curves/{rockingCurvesPlot.py → rocking_curves_plot.py} +13 -13
  37. darfix/gui/rocking_curves/{rockingCurvesWidget.py → rocking_curves_widget.py} +10 -18
  38. darfix/gui/{roiSelectionWidget.py → roi_selection_widget.py} +9 -101
  39. darfix/gui/{shiftcorrection/shiftCorrectionWidget.py → shift_correction/shift_correction_widget.py} +4 -7
  40. darfix/gui/utils/data_path_completer.py +7 -7
  41. darfix/gui/utils/data_path_selection.py +4 -4
  42. darfix/gui/utils/{rangeSlider.py → range_slider.py} +1 -1
  43. darfix/gui/{weakBeamWidget.py → weak_beam_widget.py} +13 -28
  44. darfix/gui/{zSumWidget.py → zsum_widget.py} +1 -2
  45. darfix/main.py +19 -3
  46. darfix/processing/rocking_curves.py +12 -13
  47. darfix/tasks/binning.py +6 -17
  48. darfix/tasks/blind_source_separation.py +121 -0
  49. darfix/tasks/blindsourceseparation.py +8 -131
  50. darfix/tasks/copy.py +0 -2
  51. darfix/tasks/data_partition.py +39 -0
  52. darfix/tasks/datapartition.py +8 -50
  53. darfix/tasks/dimension_definition.py +197 -0
  54. darfix/tasks/dimensiondefinition.py +8 -197
  55. darfix/tasks/grain_plot.py +93 -0
  56. darfix/tasks/grainplot.py +8 -103
  57. darfix/tasks/hdf5_data_selection.py +5 -11
  58. darfix/tasks/hdf5_scans_concatenation.py +4 -4
  59. darfix/tasks/noise_removal.py +88 -0
  60. darfix/tasks/noiseremoval.py +8 -86
  61. darfix/tasks/pca.py +1 -3
  62. darfix/tasks/projection.py +1 -6
  63. darfix/tasks/rocking_curves.py +10 -5
  64. darfix/tasks/roi.py +0 -2
  65. darfix/tasks/shift_correction.py +45 -0
  66. darfix/tasks/shiftcorrection.py +8 -43
  67. darfix/tasks/transformation.py +0 -2
  68. darfix/tasks/weak_beam.py +71 -0
  69. darfix/tasks/weakbeam.py +8 -67
  70. darfix/tasks/zsum.py +1 -1
  71. darfix/tests/conftest.py +1 -1
  72. darfix/tests/gui/test_data_path_completer.py +4 -4
  73. darfix/tests/gui/test_dimension_range_slider_2d.py +2 -2
  74. darfix/tests/gui/test_range_slider_with_spinboxes.py +1 -1
  75. darfix/tests/orange/test_ewoks.py +13 -9
  76. darfix/tests/orange/widgets/test_hdf5_data_selection.py +93 -0
  77. darfix/tests/tasks/test_data_copy.py +0 -2
  78. darfix/tests/tasks/{test_dimensiondefinition.py → test_dimension_definition.py} +1 -1
  79. darfix/tests/tasks/test_weak_beam.py +9 -0
  80. darfix/tests/test_components_matching.py +2 -2
  81. darfix/tests/test_dataset.py +2 -28
  82. darfix/tests/test_dimension.py +1 -1
  83. darfix/tests/test_generate_grain_maps_nxdict.py +4 -5
  84. darfix/tests/test_image_operations.py +4 -4
  85. darfix/tests/test_image_registration.py +17 -17
  86. darfix/tests/test_image_stack.py +2 -13
  87. darfix/tests/test_mask.py +1 -1
  88. darfix/tests/test_moments.py +2 -2
  89. darfix/tests/test_rocking_curves.py +1 -3
  90. darfix/tests/test_shift.py +7 -7
  91. darfix/tests/test_workflow.py +4 -4
  92. darfix/tests/test_zsum.py +3 -6
  93. {darfix-4.2.0.dist-info → darfix-4.3.0.dist-info}/METADATA +5 -3
  94. {darfix-4.2.0.dist-info → darfix-4.3.0.dist-info}/RECORD +141 -135
  95. orangecontrib/darfix/widgets/__init__.py +10 -1
  96. orangecontrib/darfix/widgets/binning.py +3 -3
  97. orangecontrib/darfix/widgets/blindsourceseparation.py +4 -6
  98. orangecontrib/darfix/widgets/concatenateHDF5.py +1 -1
  99. orangecontrib/darfix/widgets/datacopy.py +1 -1
  100. orangecontrib/darfix/widgets/datapartition.py +7 -102
  101. orangecontrib/darfix/widgets/{datasetWidgetBase.py → dataset_widget_base.py} +17 -5
  102. orangecontrib/darfix/widgets/dimensions.py +6 -6
  103. orangecontrib/darfix/widgets/grainplot.py +3 -3
  104. orangecontrib/darfix/widgets/hdf5dataselection.py +34 -14
  105. orangecontrib/darfix/widgets/metadata.py +2 -2
  106. orangecontrib/darfix/widgets/noiseremoval.py +4 -4
  107. orangecontrib/darfix/widgets/{operationWidgetBase.py → operation_widget_base.py} +2 -2
  108. orangecontrib/darfix/widgets/pca.py +2 -2
  109. orangecontrib/darfix/widgets/projection.py +2 -2
  110. orangecontrib/darfix/widgets/rockingcurves.py +5 -2
  111. orangecontrib/darfix/widgets/roiselection.py +24 -106
  112. orangecontrib/darfix/widgets/rsmhistogram.py +2 -2
  113. orangecontrib/darfix/widgets/shiftcorrection.py +3 -3
  114. orangecontrib/darfix/widgets/transformation.py +4 -4
  115. orangecontrib/darfix/widgets/weakbeam.py +20 -103
  116. orangecontrib/darfix/widgets/zsum.py +3 -5
  117. darfix/gui/dataPartitionWidget.py +0 -167
  118. darfix/gui/data_selection/DataSelectionBase.py +0 -59
  119. darfix/tests/tasks/test_datapartition.py +0 -52
  120. /darfix/core/{componentsMatching.py → components_matching.py} +0 -0
  121. /darfix/core/{datapathfinder.py → data_path_finder.py} +0 -0
  122. /darfix/core/{imageRegistration.py → image_registration.py} +0 -0
  123. /darfix/gui/{grainplot → grain_plot}/__init__.py +0 -0
  124. /darfix/gui/{grainplot → grain_plot}/_oridist_toolbar_buttons.py +0 -0
  125. /darfix/gui/{grainplot → grain_plot}/flashlight.py +0 -0
  126. /darfix/gui/{grainplot → grain_plot}/flashlight_mode_action.py +0 -0
  127. /darfix/gui/{grainplot → grain_plot}/oridist_toolbar.py +0 -0
  128. /darfix/gui/{grainplot → grain_plot}/utils.py +0 -0
  129. /darfix/gui/{metadataWidget.py → metadata_widget.py} +0 -0
  130. /darfix/gui/{operationProcess.py → parallel/operation_process.py} +0 -0
  131. /darfix/gui/{operationThread.py → parallel/operation_thread.py} +0 -0
  132. /darfix/gui/rocking_curves/{fitComboBox.py → fit_combobox.py} +0 -0
  133. /darfix/gui/{roiLimitsToolbar.py → roi_limits_toolbar.py} +0 -0
  134. /darfix/gui/{rsmHistogramWidget.py → rsm_histogram_widget.py} +0 -0
  135. /darfix/gui/{rsmWidget.py → rsm_widget.py} +0 -0
  136. /darfix/gui/{shiftcorrection → shift_correction}/__init__.py +0 -0
  137. /darfix/gui/{shiftcorrection/shiftInput.py → shift_correction/shift_input.py} +0 -0
  138. /darfix/gui/utils/{standardButtonBox.py → standard_buttonbox.py} +0 -0
  139. /darfix/processing/{imageOperations.py → image_operations.py} +0 -0
  140. /darfix/tests/{test_datapathfinder.py → test_data_path_finder.py} +0 -0
  141. {darfix-4.2.0.dist-info → darfix-4.3.0.dist-info}/WHEEL +0 -0
  142. {darfix-4.2.0.dist-info → darfix-4.3.0.dist-info}/entry_points.txt +0 -0
  143. {darfix-4.2.0.dist-info → darfix-4.3.0.dist-info}/licenses/LICENSE +0 -0
  144. {darfix-4.2.0.dist-info → darfix-4.3.0.dist-info}/top_level.txt +0 -0
@@ -1,52 +1,10 @@
1
- from __future__ import annotations
1
+ import warnings
2
2
 
3
- from ewokscore import Task
4
- from ewokscore.missing_data import MISSING_DATA
5
- from ewokscore.missing_data import MissingData
6
- from ewokscore.model import BaseInputModel
7
- from pydantic import ConfigDict
3
+ from .data_partition import DataPartition # noqa: F401
8
4
 
9
- from darfix import dtypes
10
-
11
-
12
- class Inputs(BaseInputModel):
13
- model_config = ConfigDict(use_attribute_docstrings=True)
14
- dataset: dtypes.Dataset
15
- """ Input dataset containing a stack of images """
16
- bins: int | MissingData = MISSING_DATA
17
- """ Number of bins to use for partitioning the data. Default is the number of frames in the dataset."""
18
- filter_bottom_bin_idx: int | MissingData = MISSING_DATA
19
- """ index of the bins to retrieve bottom threshold filter value. If not defined, no filtering is applied."""
20
- filter_top_bin_idx: int | MissingData = MISSING_DATA
21
- """ index of the bins to retrieve top threshold filter value. If not defined, no filtering is applied."""
22
-
23
-
24
- class DataPartition(
25
- Task,
26
- input_model=Inputs,
27
- output_names=["dataset"],
28
- ):
29
- """
30
- Filter frames with low intensity.
31
- """
32
-
33
- def run(self):
34
- dataset = self.inputs.dataset
35
- if not isinstance(dataset, dtypes.Dataset):
36
- raise TypeError(
37
- f"dataset is expected to be an instance of {dtypes.Dataset}. Got {type(dataset)}"
38
- )
39
-
40
- darfix_dataset = dataset.dataset
41
-
42
- indices, bg_indices = darfix_dataset.partition_by_intensity(
43
- bins=self.get_input_value("bins", None),
44
- bottom_bin=self.get_input_value("filter_bottom_bin_idx", None),
45
- top_bin=self.get_input_value("filter_top_bin_idx", None),
46
- )
47
- self.outputs.dataset = dtypes.Dataset(
48
- dataset=darfix_dataset,
49
- indices=indices,
50
- bg_indices=bg_indices,
51
- bg_dataset=dataset.bg_dataset,
52
- )
5
+ warnings.warn(
6
+ f"The '{__name__}' module is deprecated and will be removed in a future release. "
7
+ "Please replace module name `datapartition` by `data_partition`",
8
+ DeprecationWarning,
9
+ stacklevel=2,
10
+ )
@@ -0,0 +1,197 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from typing import Any
5
+ from typing import Iterable
6
+
7
+ import numpy
8
+ from ewokscore import Task
9
+ from ewokscore.missing_data import MISSING_DATA
10
+ from ewokscore.missing_data import MissingData
11
+ from ewokscore.model import BaseInputModel
12
+ from pydantic import ConfigDict
13
+ from pydantic import Field
14
+
15
+ from darfix import dtypes
16
+ from darfix.core.dataset import ImageDataset
17
+ from darfix.core.dimension import AcquisitionDims
18
+ from darfix.core.dimension import Dimension
19
+ from darfix.core.dimension import find_dimensions_from_metadata
20
+ from darfix.core.fscan_parser import fscan_get_dimensions
21
+ from darfix.core.zigzag_mode import reorder_frames_of_zigzag_scan
22
+
23
+ _logger = logging.getLogger(__file__)
24
+
25
+
26
+ class Inputs(BaseInputModel):
27
+ model_config = ConfigDict(use_attribute_docstrings=True)
28
+ dataset: dtypes.Dataset
29
+ """ Input dataset containing a stack of images """
30
+ dims: dict[int, Any] | MissingData = Field(
31
+ default=MISSING_DATA,
32
+ examples=[
33
+ {
34
+ 0: {"name": "diffrx", "size": 5, "range": [0.0, 5.0, 1.0]},
35
+ 1: {"name": "diffry", "size": 10, "range": [0.0, 10.0, 1.0]},
36
+ }
37
+ ],
38
+ description="Dimensions to use for the dataset. If not provided, the task will try to find dimensions from metadata.",
39
+ )
40
+ tolerance: float | MissingData = MISSING_DATA
41
+ """Tolerance to use for finding dimensions from metadata. Default is 1e-9."""
42
+ is_zigzag: bool | MissingData = MISSING_DATA
43
+ """Set to True if the scan was a zigzag scan (slow motor moving back and forth). Defaults to False."""
44
+
45
+
46
+ class DimensionDefinition(
47
+ Task,
48
+ input_model=Inputs,
49
+ output_names=["dataset"],
50
+ ):
51
+ """
52
+ Fit dimension of given dataset.
53
+ If dims are provided then will use them. else will call 'find_dimensions' with the provided tolerance or the default one.
54
+ """
55
+
56
+ DEFAULT_TOLERANCE = 1e-9
57
+
58
+ def run(self):
59
+ if not isinstance(self.inputs.dataset, dtypes.Dataset):
60
+ raise TypeError(
61
+ f"'dataset' input should be an instance of {dtypes.Dataset}. Got {type(self.inputs.dataset)}"
62
+ )
63
+
64
+ dataset = self.inputs.dataset.dataset
65
+ if not isinstance(dataset, dtypes.ImageDataset):
66
+ raise TypeError(
67
+ f"self.inputs.dataset is expected to be an instance of {dtypes.ImageDataset}. Get {type(dataset)}"
68
+ )
69
+
70
+ fscan_parameters = fscan_get_dimensions(dataset)
71
+
72
+ dims = self._handle_dims(
73
+ fscan_parameters[1] if fscan_parameters else None,
74
+ dataset.metadata_dict,
75
+ user_input=self._get_dimensions_from_default_inputs(),
76
+ )
77
+ is_zigzag = self._handle_is_zigzag(
78
+ fscan_parameters[0] if fscan_parameters else None,
79
+ user_input=self.get_input_value("is_zigzag", None),
80
+ )
81
+
82
+ assert_dimensions_ok(dataset, dims.values())
83
+
84
+ if is_zigzag:
85
+ reorder_frames_of_zigzag_scan(dims, dataset)
86
+
87
+ # Reshape the dataset with the new dimensions
88
+
89
+ dataset.dims = dims
90
+ dataset.reshape_data()
91
+ self.outputs.dataset = dtypes.Dataset(
92
+ dataset=dataset,
93
+ bg_dataset=self.inputs.dataset.bg_dataset,
94
+ )
95
+
96
+ def _get_dimensions_from_default_inputs(self) -> AcquisitionDims | None:
97
+
98
+ raw_dims = self.get_input_value("dims", None)
99
+
100
+ if raw_dims is None:
101
+ return None
102
+
103
+ try:
104
+ return AcquisitionDims.from_dict(raw_dims)
105
+ except ValueError as e:
106
+ # TODO: Should we really silence the error here?
107
+ _logger.error(f"Encountered {e} when parsing default raw_dims: {raw_dims}")
108
+ return None
109
+
110
+ def _handle_is_zigzag(self, fscan_input: bool | None, user_input: bool | None):
111
+ if user_input is not None:
112
+ _logger.debug("is_zigzag set by user")
113
+ return user_input
114
+
115
+ if fscan_input is not None:
116
+ _logger.debug("is_zigzag set by fscan")
117
+ return fscan_input
118
+
119
+ _logger.debug("Using default value (False) for is_zigzag")
120
+ return False
121
+
122
+ def _handle_dims(
123
+ self,
124
+ fscan_dims: dict[int, Dimension] | None,
125
+ metadata_dict: dict,
126
+ user_input: AcquisitionDims | None,
127
+ ) -> AcquisitionDims:
128
+ if user_input is not None:
129
+ _logger.debug("dims set by user")
130
+ return user_input
131
+
132
+ if fscan_dims is not None:
133
+ _logger.debug("dims set by fscan")
134
+ return AcquisitionDims.from_dict(fscan_dims)
135
+
136
+ _logger.debug("dims computed from metadata")
137
+ return self._compute_dimensions_from_metadata(metadata_dict)
138
+
139
+ def _compute_dimensions_from_metadata(
140
+ self, metadata_dict: dict[str, numpy.ndarray]
141
+ ) -> AcquisitionDims:
142
+
143
+ tolerance = self.get_input_value("tolerance", self.DEFAULT_TOLERANCE)
144
+
145
+ return find_dimensions_from_metadata(
146
+ metadata_dict,
147
+ tolerance,
148
+ )
149
+
150
+
151
+ def assert_dimensions_ok(dataset: ImageDataset, dims: Iterable[Dimension]) -> None:
152
+
153
+ error_msg = get_dimensions_error(dataset, dims)
154
+ if error_msg:
155
+ raise RuntimeError(error_msg)
156
+
157
+
158
+ def get_dimensions_error(
159
+ dataset: ImageDataset, dims: Iterable[Dimension]
160
+ ) -> str | None:
161
+
162
+ shape = []
163
+
164
+ # Check name, min and max
165
+ for dimension in dims:
166
+
167
+ shape.append(dimension.size)
168
+
169
+ if dimension.name not in dataset.metadata_dict:
170
+ return f"Dimension with name '{dimension.name}' is not in the metadata."
171
+ metadata_min = numpy.min(dataset.metadata_dict[dimension.name])
172
+ metadata_max = numpy.max(dataset.metadata_dict[dimension.name])
173
+ tol = metadata_max - metadata_min
174
+ # Tolerance for out‑of‑range values.
175
+ # An erroneous value is considered invalid only if it exceeds the (min‑max) range by at least a factor of two.
176
+ # This tolerance prevents occasional false‑negative validation of scan‑parameter metadata.
177
+ metadata_min_with_tol = metadata_min - tol
178
+ metadata_max_with_tol = metadata_max + tol
179
+
180
+ if not (metadata_min_with_tol <= dimension.start <= metadata_max_with_tol):
181
+ return f"Dimension with name '{dimension.name}' start value = {dimension.start} but this is outside the dimension range [{metadata_min} → {metadata_max}]."
182
+
183
+ if not (metadata_min_with_tol <= dimension.stop <= metadata_max_with_tol):
184
+ return f"Dimension with name '{dimension.name}' stop value = {dimension.stop} but this is outside the dimension range [{metadata_min} → {metadata_max}]."
185
+
186
+ if len(shape) == 0:
187
+ return "None dimension are defined."
188
+
189
+ dims_size = numpy.prod(shape)
190
+
191
+ # Check size
192
+ if dims_size != dataset.nframes:
193
+ product_string = " x ".join([str(size) for size in shape])
194
+
195
+ return f"Dimensions do not match number of frames → {product_string} = {dims_size} ≠ {dataset.nframes}."
196
+
197
+ return None
@@ -1,199 +1,10 @@
1
- from __future__ import annotations
1
+ import warnings
2
2
 
3
- import logging
4
- from typing import Any
5
- from typing import Iterable
3
+ from .dimension_definition import DimensionDefinition # noqa: F401
6
4
 
7
- import numpy
8
- from ewokscore import Task
9
- from ewokscore.missing_data import MISSING_DATA
10
- from ewokscore.missing_data import MissingData
11
- from ewokscore.model import BaseInputModel
12
- from pydantic import ConfigDict
13
- from pydantic import Field
14
-
15
- from darfix import dtypes
16
- from darfix.core.dataset import ImageDataset
17
- from darfix.core.dimension import AcquisitionDims
18
- from darfix.core.dimension import Dimension
19
- from darfix.core.dimension import find_dimensions_from_metadata
20
- from darfix.core.fscan_parser import fscan_get_dimensions
21
- from darfix.core.zigzag_mode import reorder_frames_of_zigzag_scan
22
-
23
- _logger = logging.getLogger(__file__)
24
-
25
-
26
- class Inputs(BaseInputModel):
27
- model_config = ConfigDict(use_attribute_docstrings=True)
28
- dataset: dtypes.Dataset
29
- """ Input dataset containing a stack of images """
30
- dims: dict[int, Any] | MissingData = Field(
31
- default=MISSING_DATA,
32
- examples=[
33
- {
34
- 0: {"name": "diffrx", "size": 5, "range": [0.0, 5.0, 1.0]},
35
- 1: {"name": "diffry", "size": 10, "range": [0.0, 10.0, 1.0]},
36
- }
37
- ],
38
- description="Dimensions to use for the dataset. If not provided, the task will try to find dimensions from metadata.",
39
- )
40
- tolerance: float | MissingData = MISSING_DATA
41
- """Tolerance to use for finding dimensions from metadata. Default is 1e-9."""
42
- is_zigzag: bool | MissingData = MISSING_DATA
43
- """Set to True if the scan was a zigzag scan (slow motor moving back and forth). Defaults to False."""
44
-
45
-
46
- class DimensionDefinition(
47
- Task,
48
- input_model=Inputs,
49
- output_names=["dataset"],
50
- ):
51
- """
52
- Fit dimension of given dataset.
53
- If dims are provided then will use them. else will call 'find_dimensions' with the provided tolerance or the default one.
54
- """
55
-
56
- DEFAULT_TOLERANCE = 1e-9
57
-
58
- def run(self):
59
- if not isinstance(self.inputs.dataset, dtypes.Dataset):
60
- raise TypeError(
61
- f"'dataset' input should be an instance of {dtypes.Dataset}. Got {type(self.inputs.dataset)}"
62
- )
63
-
64
- dataset = self.inputs.dataset.dataset
65
- if not isinstance(dataset, dtypes.ImageDataset):
66
- raise TypeError(
67
- f"self.inputs.dataset is expected to be an instance of {dtypes.ImageDataset}. Get {type(dataset)}"
68
- )
69
-
70
- fscan_parameters = fscan_get_dimensions(dataset)
71
-
72
- dims = self._handle_dims(
73
- fscan_parameters[1] if fscan_parameters else None,
74
- dataset.metadata_dict,
75
- user_input=self._get_dimensions_from_default_inputs(),
76
- )
77
- is_zigzag = self._handle_is_zigzag(
78
- fscan_parameters[0] if fscan_parameters else None,
79
- user_input=self.get_input_value("is_zigzag", None),
80
- )
81
-
82
- assert_dimensions_ok(dataset, dims.values())
83
-
84
- if is_zigzag:
85
- reorder_frames_of_zigzag_scan(dims, dataset)
86
-
87
- # Reshape the dataset with the new dimensions
88
-
89
- dataset.dims = dims
90
- dataset.reshape_data()
91
- self.outputs.dataset = dtypes.Dataset(
92
- dataset=dataset,
93
- indices=self.inputs.dataset.indices,
94
- bg_indices=self.inputs.dataset.bg_indices,
95
- bg_dataset=self.inputs.dataset.bg_dataset,
96
- )
97
-
98
- def _get_dimensions_from_default_inputs(self) -> AcquisitionDims | None:
99
-
100
- raw_dims = self.get_input_value("dims", None)
101
-
102
- if raw_dims is None:
103
- return None
104
-
105
- try:
106
- return AcquisitionDims.from_dict(raw_dims)
107
- except ValueError as e:
108
- # TODO: Should we really silence the error here?
109
- _logger.error(f"Encountered {e} when parsing default raw_dims: {raw_dims}")
110
- return None
111
-
112
- def _handle_is_zigzag(self, fscan_input: bool | None, user_input: bool | None):
113
- if user_input:
114
- _logger.debug("is_zigzag set by user")
115
- return user_input
116
-
117
- if fscan_input:
118
- _logger.debug("is_zigzag set by fscan")
119
- return fscan_input
120
-
121
- _logger.debug("Using default value (False) for is_zigzag")
122
- return False
123
-
124
- def _handle_dims(
125
- self,
126
- fscan_dims: dict[int, Dimension] | None,
127
- metadata_dict: dict,
128
- user_input: AcquisitionDims | None,
129
- ) -> AcquisitionDims:
130
- if user_input is not None:
131
- _logger.debug("dims set by user")
132
- return user_input
133
-
134
- if fscan_dims is not None:
135
- _logger.debug("dims set by fscan")
136
- return AcquisitionDims.from_dict(fscan_dims)
137
-
138
- _logger.debug("dims computed from metadata")
139
- return self._compute_dimensions_from_metadata(metadata_dict)
140
-
141
- def _compute_dimensions_from_metadata(
142
- self, metadata_dict: dict[str, numpy.ndarray]
143
- ) -> AcquisitionDims:
144
-
145
- tolerance = self.get_input_value("tolerance", self.DEFAULT_TOLERANCE)
146
-
147
- return find_dimensions_from_metadata(
148
- metadata_dict,
149
- tolerance,
150
- )
151
-
152
-
153
- def assert_dimensions_ok(dataset: ImageDataset, dims: Iterable[Dimension]) -> None:
154
-
155
- error_msg = get_dimensions_error(dataset, dims)
156
- if error_msg:
157
- raise RuntimeError(error_msg)
158
-
159
-
160
- def get_dimensions_error(
161
- dataset: ImageDataset, dims: Iterable[Dimension]
162
- ) -> str | None:
163
-
164
- shape = []
165
-
166
- # Check name, min and max
167
- for dimension in dims:
168
-
169
- shape.append(dimension.size)
170
-
171
- if dimension.name not in dataset.metadata_dict:
172
- return f"Dimension with name '{dimension.name}' is not in the metadata."
173
- metadata_min = numpy.min(dataset.metadata_dict[dimension.name])
174
- metadata_max = numpy.max(dataset.metadata_dict[dimension.name])
175
- tol = metadata_max - metadata_min
176
- # Tolerance for out‑of‑range values.
177
- # An erroneous value is considered invalid only if it exceeds the (min‑max) range by at least a factor of two.
178
- # This tolerance prevents occasional false‑negative validation of scan‑parameter metadata.
179
- metadata_min_with_tol = metadata_min - tol
180
- metadata_max_with_tol = metadata_max + tol
181
-
182
- if not (metadata_min_with_tol <= dimension.start <= metadata_max_with_tol):
183
- return f"Dimension with name '{dimension.name}' start value = {dimension.start} but this is outside the dimension range [{metadata_min} → {metadata_max}]."
184
-
185
- if not (metadata_min_with_tol <= dimension.stop <= metadata_max_with_tol):
186
- return f"Dimension with name '{dimension.name}' stop value = {dimension.stop} but this is outside the dimension range [{metadata_min} → {metadata_max}]."
187
-
188
- if len(shape) == 0:
189
- return "None dimension are defined."
190
-
191
- dims_size = numpy.prod(shape)
192
-
193
- # Check size
194
- if dims_size != dataset.nframes:
195
- product_string = " x ".join([str(size) for size in shape])
196
-
197
- return f"Dimensions do not match number of frames → {product_string} ≠ {dataset.nframes}."
198
-
199
- return None
5
+ warnings.warn(
6
+ f"The '{__name__}' module is deprecated and will be removed in a future release. "
7
+ "Please replace module name `dimensiondefinition` by `dimension_definition`",
8
+ DeprecationWarning,
9
+ stacklevel=2,
10
+ )
@@ -0,0 +1,93 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import os
5
+ from pathlib import Path
6
+ from typing import Literal
7
+
8
+ import numpy
9
+ from ewokscore import Task
10
+ from ewokscore.missing_data import MISSING_DATA
11
+ from ewokscore.missing_data import MissingData
12
+ from ewokscore.model import BaseInputModel
13
+ from pydantic import ConfigDict
14
+ from silx.io.dictdump import dicttonx
15
+ from silx.math.combo import min_max
16
+
17
+ from darfix import dtypes
18
+ from darfix.core.moment_types import MomentType
19
+
20
+ from ..core.grainplot import DimensionRange
21
+ from ..core.grainplot import GrainPlotData
22
+ from ..core.grainplot import GrainPlotMaps
23
+ from ..core.grainplot import generate_grain_maps_nxdict
24
+
25
+ _logger = logging.getLogger(__file__)
26
+
27
+
28
+ class Inputs(BaseInputModel):
29
+ model_config = ConfigDict(use_attribute_docstrings=True)
30
+ dataset: dtypes.Dataset
31
+ """ Input dataset containing a stack of images """
32
+ dimensions: tuple[int, int] = (0, 1)
33
+ """Dimension indices to use for the maps. Default is (0, 1), which means the two first dimensions."""
34
+ range: tuple[DimensionRange | None, DimensionRange | None] = (None, None)
35
+ """Dimensionrange for the two dimensions. If None, use the Center of Mass min and max for the both dimensions."""
36
+ save_maps: bool = True
37
+ """Whether to save the maps to file. Default is True."""
38
+ filename: str | MissingData = MISSING_DATA
39
+ """Only used if save_maps is True. Filename to save the maps to. Default is 'maps.h5' in the dataset directory."""
40
+ orientation_img_origin: Literal["dims", "center"] = "dims"
41
+ "Origin for the orientation distribution image. Can be 'dims', 'center' or None. Default is 'dims'."
42
+
43
+
44
+ class GrainPlot(
45
+ Task,
46
+ input_model=Inputs,
47
+ output_names=["dataset"],
48
+ ):
49
+ """Generates and saves maps of Center of Mass, FWHM, Skewness, Kurtosis, Orientation distribution and Mosaicity."""
50
+
51
+ def run(self):
52
+
53
+ inputs = Inputs(**self.get_input_values())
54
+
55
+ default_filename = os.path.join(inputs.dataset.dataset._dir, "maps.h5")
56
+ filename: str = self.get_input_value("filename", default_filename)
57
+
58
+ moments = inputs.dataset.dataset.apply_moments()
59
+ grainPlotMaps = GrainPlotMaps.from_dataset(inputs.dataset)
60
+
61
+ # mosaicity and orientation can only be computed for 2D+ datasets
62
+ if grainPlotMaps.dims.ndim > 1 and inputs.save_maps:
63
+ dimension1, dimension2 = inputs.dimensions
64
+ dimension1_range, dimension2_range = inputs.range
65
+
66
+ if dimension1_range is None:
67
+ dimension1_range = _computeMinMax(moments[dimension1][MomentType.COM])
68
+ if dimension2_range is None:
69
+ dimension2_range = _computeMinMax(moments[dimension2][MomentType.COM])
70
+
71
+ orientation_dist_data = GrainPlotData(
72
+ grainPlotMaps,
73
+ x_dimension=dimension1,
74
+ y_dimension=dimension2,
75
+ x_dimension_range=dimension1_range,
76
+ y_dimension_range=dimension2_range,
77
+ )
78
+ assert orientation_dist_data is not None
79
+ else:
80
+ orientation_dist_data = None
81
+
82
+ # Save data if asked
83
+ if inputs.save_maps:
84
+ nxdict = generate_grain_maps_nxdict(grainPlotMaps, orientation_dist_data)
85
+ os.makedirs(Path(filename).parent, exist_ok=True)
86
+ dicttonx(nxdict, filename)
87
+
88
+ self.outputs.dataset = inputs.dataset
89
+
90
+
91
+ def _computeMinMax(array: numpy.ndarray) -> DimensionRange:
92
+ min_max_result = min_max(array)
93
+ return min_max_result.minimum, min_max_result.maximum
darfix/tasks/grainplot.py CHANGED
@@ -1,105 +1,10 @@
1
- from __future__ import annotations
1
+ import warnings
2
2
 
3
- import logging
4
- import os
5
- from pathlib import Path
6
- from typing import Literal
3
+ from .grain_plot import GrainPlot # noqa: F401
7
4
 
8
- import numpy
9
- from ewokscore import Task
10
- from ewokscore.missing_data import MISSING_DATA
11
- from ewokscore.missing_data import MissingData
12
- from ewokscore.model import BaseInputModel
13
- from pydantic import ConfigDict
14
- from silx.io.dictdump import dicttonx
15
- from silx.math.combo import min_max
16
-
17
- from darfix import dtypes
18
- from darfix.core.moment_types import MomentType
19
-
20
- from ..core.grainplot import DimensionRange
21
- from ..core.grainplot import GrainPlotMaps
22
- from ..core.grainplot import OrientationDistData
23
- from ..core.grainplot import compute_mosaicity
24
- from ..core.grainplot import generate_grain_maps_nxdict
25
-
26
- _logger = logging.getLogger(__file__)
27
-
28
-
29
- class Inputs(BaseInputModel):
30
- model_config = ConfigDict(use_attribute_docstrings=True)
31
- dataset: dtypes.Dataset
32
- """ Input dataset containing a stack of images """
33
- dimensions: tuple[int, int] = (0, 1)
34
- """Dimension indices to use for the maps. Default is (0, 1), which means the two first dimensions."""
35
- range: tuple[DimensionRange | None, DimensionRange | None] = (None, None)
36
- """Dimensionrange for the two dimensions. If None, use the Center of Mass min and max for the both dimensions."""
37
- save_maps: bool = True
38
- """Whether to save the maps to file. Default is True."""
39
- filename: str | MissingData = MISSING_DATA
40
- """Only used if save_maps is True. Filename to save the maps to. Default is 'maps.h5' in the dataset directory."""
41
- orientation_img_origin: Literal["dims", "center"] = "dims"
42
- "Origin for the orientation distribution image. Can be 'dims', 'center' or None. Default is 'dims'."
43
-
44
-
45
- class GrainPlot(
46
- Task,
47
- input_model=Inputs,
48
- output_names=["dataset"],
49
- ):
50
- """Generates and saves maps of Center of Mass, FWHM, Skewness, Kurtosis, Orientation distribution and Mosaicity."""
51
-
52
- def run(self):
53
-
54
- inputs = Inputs(**self.get_input_values())
55
-
56
- default_filename = os.path.join(inputs.dataset.dataset._dir, "maps.h5")
57
- filename: str = self.get_input_value("filename", default_filename)
58
-
59
- moments = inputs.dataset.dataset.apply_moments()
60
- grainPlotMaps = GrainPlotMaps.from_dataset(inputs.dataset)
61
-
62
- # mosaicity and orientation can only be computed for 2D+ datasets
63
- if grainPlotMaps.dims.ndim > 1 and inputs.save_maps:
64
- dimension1, dimension2 = inputs.dimensions
65
- dimension1_range, dimension2_range = inputs.range
66
-
67
- if dimension1_range is None:
68
- dimension1_range = _computeMinMax(moments[dimension1][MomentType.COM])
69
- if dimension2_range is None:
70
- dimension2_range = _computeMinMax(moments[dimension2][MomentType.COM])
71
-
72
- mosaicity = compute_mosaicity(
73
- moments,
74
- x_dimension=dimension1,
75
- y_dimension=dimension2,
76
- x_dimension_range=dimension1_range,
77
- y_dimension_range=dimension2_range,
78
- )
79
-
80
- orientation_dist_data = OrientationDistData(
81
- grainPlotMaps,
82
- x_dimension=dimension1,
83
- y_dimension=dimension2,
84
- x_dimension_range=dimension1_range,
85
- y_dimension_range=dimension2_range,
86
- )
87
- assert orientation_dist_data is not None
88
- else:
89
- orientation_dist_data = None
90
- mosaicity = None
91
-
92
- # Save data if asked
93
- if inputs.save_maps:
94
- nxdict = generate_grain_maps_nxdict(
95
- grainPlotMaps, mosaicity, orientation_dist_data
96
- )
97
- os.makedirs(Path(filename).parent, exist_ok=True)
98
- dicttonx(nxdict, filename)
99
-
100
- self.outputs.dataset = inputs.dataset
101
-
102
-
103
- def _computeMinMax(array: numpy.ndarray) -> DimensionRange:
104
- min_max_result = min_max(array)
105
- return min_max_result.minimum, min_max_result.maximum
5
+ warnings.warn(
6
+ f"The '{__name__}' module is deprecated and will be removed in a future release. "
7
+ "Please replace module name `grainplot` by `grain_plot`",
8
+ DeprecationWarning,
9
+ stacklevel=2,
10
+ )