nabu 2023.2.1__py3-none-any.whl → 2024.1.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. doc/conf.py +1 -1
  2. doc/doc_config.py +32 -0
  3. nabu/__init__.py +2 -1
  4. nabu/app/bootstrap_stitching.py +1 -1
  5. nabu/app/cli_configs.py +122 -2
  6. nabu/app/composite_cor.py +27 -2
  7. nabu/app/correct_rot.py +70 -0
  8. nabu/app/create_distortion_map_from_poly.py +42 -18
  9. nabu/app/diag_to_pix.py +358 -0
  10. nabu/app/diag_to_rot.py +449 -0
  11. nabu/app/generate_header.py +4 -3
  12. nabu/app/histogram.py +2 -2
  13. nabu/app/multicor.py +6 -1
  14. nabu/app/parse_reconstruction_log.py +151 -0
  15. nabu/app/prepare_weights_double.py +83 -22
  16. nabu/app/reconstruct.py +5 -1
  17. nabu/app/reconstruct_helical.py +7 -0
  18. nabu/app/reduce_dark_flat.py +6 -3
  19. nabu/app/rotate.py +4 -4
  20. nabu/app/stitching.py +16 -2
  21. nabu/app/tests/test_reduce_dark_flat.py +18 -2
  22. nabu/app/validator.py +4 -4
  23. nabu/cuda/convolution.py +8 -376
  24. nabu/cuda/fft.py +4 -0
  25. nabu/cuda/kernel.py +4 -4
  26. nabu/cuda/medfilt.py +5 -158
  27. nabu/cuda/padding.py +5 -71
  28. nabu/cuda/processing.py +23 -2
  29. nabu/cuda/src/ElementOp.cu +78 -0
  30. nabu/cuda/src/backproj.cu +28 -2
  31. nabu/cuda/src/fourier_wavelets.cu +2 -2
  32. nabu/cuda/src/normalization.cu +23 -0
  33. nabu/cuda/src/padding.cu +2 -2
  34. nabu/cuda/src/transpose.cu +16 -0
  35. nabu/cuda/utils.py +39 -0
  36. nabu/estimation/alignment.py +10 -1
  37. nabu/estimation/cor.py +808 -38
  38. nabu/estimation/cor_sino.py +7 -9
  39. nabu/estimation/tests/test_cor.py +85 -3
  40. nabu/io/reader.py +26 -18
  41. nabu/io/tests/test_cast_volume.py +3 -3
  42. nabu/io/tests/test_detector_distortion.py +3 -3
  43. nabu/io/tiffwriter_zmm.py +2 -2
  44. nabu/io/utils.py +14 -4
  45. nabu/io/writer.py +5 -3
  46. nabu/misc/fftshift.py +6 -0
  47. nabu/misc/histogram.py +5 -285
  48. nabu/misc/histogram_cuda.py +8 -104
  49. nabu/misc/kernel_base.py +3 -121
  50. nabu/misc/padding_base.py +5 -69
  51. nabu/misc/processing_base.py +3 -107
  52. nabu/misc/rotation.py +5 -62
  53. nabu/misc/rotation_cuda.py +5 -65
  54. nabu/misc/transpose.py +6 -0
  55. nabu/misc/unsharp.py +3 -78
  56. nabu/misc/unsharp_cuda.py +5 -52
  57. nabu/misc/unsharp_opencl.py +8 -85
  58. nabu/opencl/fft.py +6 -0
  59. nabu/opencl/kernel.py +21 -6
  60. nabu/opencl/padding.py +5 -72
  61. nabu/opencl/processing.py +27 -5
  62. nabu/opencl/src/backproj.cl +3 -3
  63. nabu/opencl/src/fftshift.cl +65 -12
  64. nabu/opencl/src/padding.cl +2 -2
  65. nabu/opencl/src/roll.cl +96 -0
  66. nabu/opencl/src/transpose.cl +16 -0
  67. nabu/pipeline/config_validators.py +63 -3
  68. nabu/pipeline/dataset_validator.py +2 -2
  69. nabu/pipeline/estimators.py +193 -35
  70. nabu/pipeline/fullfield/chunked.py +34 -17
  71. nabu/pipeline/fullfield/chunked_cuda.py +7 -5
  72. nabu/pipeline/fullfield/computations.py +48 -13
  73. nabu/pipeline/fullfield/nabu_config.py +13 -13
  74. nabu/pipeline/fullfield/processconfig.py +10 -5
  75. nabu/pipeline/fullfield/reconstruction.py +1 -2
  76. nabu/pipeline/helical/fbp.py +5 -0
  77. nabu/pipeline/helical/filtering.py +12 -9
  78. nabu/pipeline/helical/gridded_accumulator.py +179 -33
  79. nabu/pipeline/helical/helical_chunked_regridded.py +262 -151
  80. nabu/pipeline/helical/helical_chunked_regridded_cuda.py +4 -11
  81. nabu/pipeline/helical/helical_reconstruction.py +56 -18
  82. nabu/pipeline/helical/span_strategy.py +1 -1
  83. nabu/pipeline/helical/tests/test_accumulator.py +4 -0
  84. nabu/pipeline/params.py +23 -2
  85. nabu/pipeline/processconfig.py +3 -8
  86. nabu/pipeline/tests/test_chunk_reader.py +78 -0
  87. nabu/pipeline/tests/test_estimators.py +120 -2
  88. nabu/pipeline/utils.py +25 -0
  89. nabu/pipeline/writer.py +2 -0
  90. nabu/preproc/ccd_cuda.py +9 -7
  91. nabu/preproc/ctf.py +21 -26
  92. nabu/preproc/ctf_cuda.py +25 -25
  93. nabu/preproc/double_flatfield.py +14 -2
  94. nabu/preproc/double_flatfield_cuda.py +7 -11
  95. nabu/preproc/flatfield_cuda.py +23 -27
  96. nabu/preproc/phase.py +19 -24
  97. nabu/preproc/phase_cuda.py +21 -21
  98. nabu/preproc/shift_cuda.py +58 -28
  99. nabu/preproc/tests/test_ctf.py +5 -5
  100. nabu/preproc/tests/test_double_flatfield.py +2 -2
  101. nabu/preproc/tests/test_vshift.py +13 -2
  102. nabu/processing/__init__.py +0 -0
  103. nabu/processing/convolution_cuda.py +375 -0
  104. nabu/processing/fft_base.py +163 -0
  105. nabu/processing/fft_cuda.py +256 -0
  106. nabu/processing/fft_opencl.py +54 -0
  107. nabu/processing/fftshift.py +134 -0
  108. nabu/processing/histogram.py +286 -0
  109. nabu/processing/histogram_cuda.py +103 -0
  110. nabu/processing/kernel_base.py +126 -0
  111. nabu/processing/medfilt_cuda.py +159 -0
  112. nabu/processing/muladd.py +29 -0
  113. nabu/processing/muladd_cuda.py +68 -0
  114. nabu/processing/padding_base.py +71 -0
  115. nabu/processing/padding_cuda.py +75 -0
  116. nabu/processing/padding_opencl.py +77 -0
  117. nabu/processing/processing_base.py +123 -0
  118. nabu/processing/roll_opencl.py +64 -0
  119. nabu/processing/rotation.py +63 -0
  120. nabu/processing/rotation_cuda.py +66 -0
  121. nabu/processing/tests/__init__.py +0 -0
  122. nabu/processing/tests/test_fft.py +268 -0
  123. nabu/processing/tests/test_fftshift.py +71 -0
  124. nabu/{misc → processing}/tests/test_histogram.py +2 -4
  125. nabu/{cuda → processing}/tests/test_medfilt.py +1 -1
  126. nabu/processing/tests/test_muladd.py +54 -0
  127. nabu/{cuda → processing}/tests/test_padding.py +119 -75
  128. nabu/processing/tests/test_roll.py +63 -0
  129. nabu/{misc → processing}/tests/test_rotation.py +3 -2
  130. nabu/processing/tests/test_transpose.py +72 -0
  131. nabu/{misc → processing}/tests/test_unsharp.py +41 -8
  132. nabu/processing/transpose.py +126 -0
  133. nabu/processing/unsharp.py +79 -0
  134. nabu/processing/unsharp_cuda.py +53 -0
  135. nabu/processing/unsharp_opencl.py +75 -0
  136. nabu/reconstruction/fbp.py +34 -10
  137. nabu/reconstruction/fbp_base.py +35 -16
  138. nabu/reconstruction/fbp_opencl.py +7 -12
  139. nabu/reconstruction/filtering.py +2 -2
  140. nabu/reconstruction/filtering_cuda.py +13 -14
  141. nabu/reconstruction/filtering_opencl.py +3 -4
  142. nabu/reconstruction/projection.py +2 -0
  143. nabu/reconstruction/rings.py +158 -1
  144. nabu/reconstruction/rings_cuda.py +218 -58
  145. nabu/reconstruction/sinogram_cuda.py +16 -12
  146. nabu/reconstruction/tests/test_deringer.py +116 -14
  147. nabu/reconstruction/tests/test_fbp.py +22 -31
  148. nabu/reconstruction/tests/test_filtering.py +11 -2
  149. nabu/resources/dataset_analyzer.py +89 -26
  150. nabu/resources/nxflatfield.py +2 -2
  151. nabu/resources/tests/test_nxflatfield.py +1 -1
  152. nabu/resources/utils.py +9 -2
  153. nabu/stitching/alignment.py +184 -0
  154. nabu/stitching/config.py +241 -39
  155. nabu/stitching/definitions.py +6 -0
  156. nabu/stitching/frame_composition.py +4 -2
  157. nabu/stitching/overlap.py +99 -3
  158. nabu/stitching/sample_normalization.py +60 -0
  159. nabu/stitching/slurm_utils.py +10 -10
  160. nabu/stitching/tests/test_alignment.py +99 -0
  161. nabu/stitching/tests/test_config.py +16 -1
  162. nabu/stitching/tests/test_overlap.py +68 -2
  163. nabu/stitching/tests/test_sample_normalization.py +49 -0
  164. nabu/stitching/tests/test_slurm_utils.py +5 -5
  165. nabu/stitching/tests/test_utils.py +3 -33
  166. nabu/stitching/tests/test_z_stitching.py +391 -22
  167. nabu/stitching/utils.py +144 -202
  168. nabu/stitching/z_stitching.py +309 -126
  169. nabu/testutils.py +18 -0
  170. nabu/thirdparty/tomocupy_remove_stripe.py +586 -0
  171. nabu/utils.py +32 -6
  172. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/LICENSE +1 -1
  173. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/METADATA +5 -5
  174. nabu-2024.1.0rc3.dist-info/RECORD +296 -0
  175. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/WHEEL +1 -1
  176. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/entry_points.txt +5 -1
  177. nabu/conftest.py +0 -14
  178. nabu/opencl/fftshift.py +0 -92
  179. nabu/opencl/tests/test_fftshift.py +0 -55
  180. nabu/opencl/tests/test_padding.py +0 -84
  181. nabu-2023.2.1.dist-info/RECORD +0 -252
  182. /nabu/cuda/src/{fftshift.cu → dfi_fftshift.cu} +0 -0
  183. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/top_level.txt +0 -0
nabu/misc/histogram.py CHANGED
@@ -1,286 +1,6 @@
1
- from math import log2, ceil
2
- import numpy as np
3
- from silx.math import Histogramnd
4
- from tomoscan.io import HDF5File
5
- from ..utils import check_supported
6
- from ..resources.logger import LoggerOrPrint
1
+ from ..processing.histogram import *
2
+ from ..utils import deprecation_warning
7
3
 
8
-
9
- class PartialHistogram:
10
- """
11
- A class for computing histogram progressively.
12
-
13
- In certain cases, it is cumbersome to compute a histogram directly on a big chunk of
14
- data (ex. data not fitting in memory, disk access too slow) while some parts of the
15
- data are readily available in-memory.
16
- """
17
-
18
- histogram_methods = ["fixed_bins_width", "fixed_bins_number"]
19
- bin_width_policies = ["uint16"]
20
- backends = ["numpy", "silx"]
21
-
22
- def __init__(self, method="fixed_bins_width", bin_width="uint16", num_bins=None, min_bins=None, backend="silx"):
23
- """
24
- Initialize a PartialHistogram class.
25
-
26
- Parameters
27
- ----------
28
- method: str, optional
29
- Partial histogram computing method. Available are:
30
- - `fixed_bins_width`: all the histograms are computed with the same bin
31
- width. The class adapts to the data range and computes the number of
32
- bins accordingly.
33
- - `fixed_bins_number`: all the histograms are computed with the same
34
- number of bins. The class adapts to the data range and computes the
35
- bin width accordingly.
36
- Default is "fixed_bins_width"
37
- bin_width: str or float, optional
38
- Policy for histogram bins when method="fixed_bins_width". Available are:
39
- - "uint16": The bin width is computed so that floating-point elements
40
- `f1` and `f2` satisfying `|f1 - f2| < bin_width` implies
41
- `f1_converted - f2_converted < 1` once cast to uint16.
42
- - A number: all the bins have this fixed width.
43
-
44
- Default is "uint16"
45
- num_bins: int, optional
46
- Number of bins when method = 'fixed_bins_number'.
47
- min_bins: int, optional
48
- Minimum number of bins when method = 'fixed_bins_width'.
49
- backend: str, optional
50
- Which histogram backend to use for computations. Available are "silx", "numpy".
51
- Fastest is "silx".
52
- """
53
- check_supported(method, self.histogram_methods, "histogram computing method")
54
- self.method = method
55
- check_supported(backend, self.backends, "histogram backend")
56
- self.backend = backend
57
- self._set_bin_width(bin_width)
58
- self._set_num_bins(num_bins)
59
- self.min_bins = min_bins
60
- self._set_histogram_methods()
61
-
62
- def _set_bin_width(self, bin_width):
63
- if self.method == "fixed_bins_number":
64
- self.bin_width = None
65
- return
66
- if isinstance(bin_width, str):
67
- check_supported(bin_width, self.bin_width_policies, "bin width policy")
68
- self._fixed_bw = False
69
- else:
70
- bin_width = float(bin_width)
71
- self._fixed_bw = True
72
- self.bin_width = bin_width
73
-
74
- def _set_num_bins(self, num_bins):
75
- if self.method == "fixed_bins_width":
76
- self.num_bins = None
77
- return
78
- if self.method == "fixed_bins_number" and num_bins is None:
79
- raise ValueError("Need to specify num_bins for method='fixed_bins_number'")
80
- self.num_bins = int(num_bins)
81
-
82
- def _set_histogram_methods(self):
83
- self._histogram_methods = {
84
- "fixed_bins_number": {
85
- "compute": self._compute_histogram_fixed_nbins,
86
- "merge": self._merge_histograms_fixed_nbins,
87
- },
88
- "fixed_bins_width": {
89
- "compute": self._compute_histogram_fixed_bw,
90
- "merge": self._merge_histograms_fixed_bw,
91
- },
92
- }
93
- assert set(self._histogram_methods.keys()) == set(self.histogram_methods)
94
-
95
- @staticmethod
96
- def _get_histograms_and_bins(histograms, center=False, dont_truncate_bins=False):
97
- histos = [h[0] for h in histograms]
98
- if dont_truncate_bins:
99
- bins = [h[1] for h in histograms]
100
- else:
101
- if center:
102
- bins = [0.5 * (h[1][1:] + h[1][:-1]) for h in histograms]
103
- else:
104
- bins = [h[1][:-1] for h in histograms]
105
- return histos, bins
106
-
107
- #
108
- # Histogram with fixed number of bins
109
- #
110
-
111
- def _compute_histogram_fixed_nbins(self, data, data_range=None):
112
- if data.ndim > 1:
113
- data = data.ravel()
114
- dmin, dmax = data.min(), data.max() if data_range is None else data_range
115
- if self.backend == "numpy":
116
- res = np.histogram(data, bins=self.num_bins)
117
- elif self.backend == "silx":
118
- histogrammer = Histogramnd(data, n_bins=self.num_bins, histo_range=(dmin, dmax), last_bin_closed=True)
119
- res = histogrammer.histo, histogrammer.edges[0] # pylint: disable=E1136
120
- return res
121
-
122
- def _merge_histograms_fixed_nbins(self, histograms, dont_truncate_bins=False):
123
- histos, bins = self._get_histograms_and_bins(histograms, dont_truncate_bins=dont_truncate_bins)
124
- res = np.histogram(
125
- np.hstack(bins),
126
- weights=np.hstack(histos),
127
- bins=self.num_bins,
128
- )
129
- return res
130
-
131
- #
132
- # Histogram with fixed bin width
133
- #
134
-
135
- def _bin_width_u16(self, dmin, dmax):
136
- return (dmax - dmin) / 65535.0
137
-
138
- def _bin_width_fixed(self, dmin, dmax):
139
- return self.bin_width
140
-
141
- def get_bin_width(self, dmin, dmax):
142
- if self._fixed_bw:
143
- return self._bin_width_fixed(dmin, dmax)
144
- elif self.bin_width == "uint16":
145
- return self._bin_width_u16(dmin, dmax)
146
- else:
147
- raise ValueError()
148
-
149
- def _compute_histogram_fixed_bw(self, data, data_range=None):
150
- dmin, dmax = data.min(), data.max() if data_range is None else data_range
151
- min_bins = self.min_bins or 1
152
- bw_max = self.get_bin_width(dmin, dmax)
153
- nbins = 0
154
- bw_factor = 1
155
- while nbins < min_bins:
156
- bw = 2 ** round(log2(bw_max)) / bw_factor
157
- nbins = int((dmax - dmin) / bw)
158
- bw_factor *= 2
159
- res = np.histogram(data, bins=nbins)
160
- return res
161
-
162
- def _merge_histograms_fixed_bw(self, histograms, **kwargs):
163
- histos, bins = self._get_histograms_and_bins(histograms, center=False)
164
- dmax = max([b[-1] for b in bins])
165
- dmin = min([b[0] for b in bins])
166
- bw_max = max([b[1] - b[0] for b in bins])
167
- res = np.histogram(np.hstack(bins), weights=np.hstack(histos), bins=int((dmax - dmin) / bw_max))
168
- return res
169
-
170
- #
171
- # Dispatch methods
172
- #
173
-
174
- def compute_histogram(self, data, data_range=None):
175
- compute_hist_func = self._histogram_methods[self.method]["compute"]
176
- return compute_hist_func(data, data_range=data_range)
177
-
178
- def merge_histograms(self, histograms, **kwargs):
179
- merge_hist_func = self._histogram_methods[self.method]["merge"]
180
- return merge_hist_func(histograms, **kwargs)
181
-
182
-
183
- class VolumeHistogram:
184
- """
185
- A class for computing the histogram of an entire volume.
186
- Unless explicitly specified, histogram is computed in several passes so that not
187
- all the volume is loaded in memory.
188
- """
189
-
190
- def __init__(self, data_url, chunk_size_slices=100, chunk_size_GB=None, nbins=1e6, logger=None):
191
- """
192
- Initialize a VolumeHistogram object.
193
-
194
- Parameters
195
- ----------
196
- fname: DataUrl
197
- DataUrl to the HDF5 file.
198
- chunk_size_slices: int, optional
199
- Compute partial histograms of groups of slices. This is the default behavior,
200
- where the groups size is 100 slices.
201
- This parameter is mutually exclusive with 'chunk_size_GB'.
202
- chunk_size_GB: float, optional
203
- Maximum memory (in GB) to use when computing the histogram by group of slices.
204
- This parameter is mutually exclusive with 'chunk_size_slices'.
205
- nbins: int, optional
206
- Histogram number of bins. Default is 1e6.
207
- """
208
- self.data_url = data_url
209
- self.logger = LoggerOrPrint(logger)
210
- self._get_data_info()
211
- self._set_chunk_size(chunk_size_slices, chunk_size_GB)
212
- self.nbins = int(nbins)
213
- self._init_histogrammer()
214
-
215
- def _get_data_info(self):
216
- self.fname = self.data_url.file_path()
217
- self.data_path = self.data_url.data_path()
218
- with HDF5File(self.fname, "r") as fid:
219
- try:
220
- data_ptr = fid[self.data_path]
221
- except KeyError:
222
- msg = str(
223
- "Could not access HDF5 path %s in file %s. Please check that this file \
224
- actually contains a reconstruction and that the HDF5 path is correct"
225
- % (self.data_path, self.fname)
226
- )
227
- self.logger.fatal(msg)
228
- raise ValueError(msg)
229
- if data_ptr.ndim != 3:
230
- msg = "Expected data to have 3 dimensions, got %d" % data_ptr.ndim
231
- raise ValueError(msg)
232
- self.data_shape = data_ptr.shape
233
- self.data_dtype = data_ptr.dtype
234
- self.data_nbytes_GB = np.prod(data_ptr.shape) * data_ptr.dtype.itemsize / 1e9
235
-
236
- def _set_chunk_size(self, chunk_size_slices, chunk_size_GB):
237
- if not ((chunk_size_slices is not None) ^ (chunk_size_GB is not None)):
238
- raise ValueError("Please specify either chunk_size_slices or chunk_size_GB")
239
- if chunk_size_slices is None:
240
- chunk_size_slices = int(chunk_size_GB / (np.prod(self.data_shape[1:]) * self.data_dtype.itemsize / 1e9))
241
- self.chunk_size = chunk_size_slices
242
- self.logger.debug("Computing histograms by groups of %d slices" % self.chunk_size)
243
-
244
- def _init_histogrammer(self):
245
- self.histogrammer = PartialHistogram(method="fixed_bins_number", num_bins=self.nbins)
246
-
247
- def _compute_histogram(self, data):
248
- return self.histogrammer.compute_histogram(data.ravel()) # 1D
249
-
250
- def compute_volume_histogram(self):
251
- n_z = self.data_shape[0]
252
- histograms = []
253
- n_steps = ceil(n_z / self.chunk_size)
254
- with HDF5File(self.fname, "r") as fid:
255
- for chunk_id in range(n_steps):
256
- self.logger.debug("Computing histogram %d/%d" % (chunk_id + 1, n_steps))
257
- z_slice = slice(chunk_id * self.chunk_size, (chunk_id + 1) * self.chunk_size)
258
- images_stack = fid[self.data_path][z_slice, :, :]
259
- hist = self._compute_histogram(images_stack)
260
- histograms.append(hist)
261
- res = self.histogrammer.merge_histograms(histograms)
262
- return res
263
-
264
-
265
- def hist_as_2Darray(hist, center=True, dtype="f"):
266
- hist, bins = hist
267
- if bins.size != hist.size:
268
- # assert bins.size == hist.size +1
269
- if center:
270
- bins = 0.5 * (bins[1:] + bins[:-1])
271
- else:
272
- bins = bins[:-1]
273
- res = np.zeros((2, hist.size), dtype=dtype)
274
- res[0] = hist
275
- res[1] = bins.astype(dtype)
276
- return res
277
-
278
-
279
- def add_last_bin(histo_bins):
280
- """
281
- Add the last bin (max value) to a list of bin edges.
282
- """
283
- res = np.zeros(histo_bins.size + 1, dtype=histo_bins.dtype)
284
- res[:-1] = histo_bins[:]
285
- res[-1] = res[-2] + (res[1] - res[0])
286
- return res
4
+ deprecation_warning(
5
+ "nabu.misc.histogram has been moved to nabu.processing.histogram", do_print=True, func_name="histogram"
6
+ )
@@ -1,104 +1,8 @@
1
- import numpy as np
2
- from ..utils import get_cuda_srcfile, updiv
3
- from ..cuda.utils import __has_pycuda__
4
- from .histogram import PartialHistogram, VolumeHistogram
5
-
6
- if __has_pycuda__:
7
- import pycuda.gpuarray as garray
8
- from ..cuda.kernel import CudaKernel
9
- from ..cuda.processing import CudaProcessing
10
-
11
-
12
- class CudaPartialHistogram(PartialHistogram):
13
- def __init__(
14
- self,
15
- method="fixed_bins_number",
16
- bin_width="uint16",
17
- num_bins=None,
18
- min_bins=None,
19
- cuda_options=None,
20
- ):
21
- if method == "fixed_bins_width":
22
- raise NotImplementedError("Histogram with fixed bins width is not implemented with the Cuda backend")
23
- super().__init__(
24
- method=method,
25
- bin_width=bin_width,
26
- num_bins=num_bins,
27
- min_bins=min_bins,
28
- )
29
- self.cuda_processing = CudaProcessing(**(cuda_options or {}))
30
- self._init_cuda_histogram()
31
-
32
- def _init_cuda_histogram(self):
33
- self.cuda_hist = CudaKernel(
34
- "histogram",
35
- filename=get_cuda_srcfile("histogram.cu"),
36
- signature="PiiiffPi",
37
- )
38
- self.d_hist = garray.zeros(self.num_bins, dtype=np.uint32)
39
-
40
- def _compute_histogram_fixed_nbins(self, data, data_range=None):
41
- if isinstance(data, np.ndarray):
42
- data = garray.to_gpu(data)
43
- if data_range is None:
44
- # Should be possible to do both in one single pass with ReductionKernel
45
- # and garray.vec.float2, but the last step in volatile shared memory
46
- # still gives errors. To be investigated...
47
- data_min = garray.min(data).get()[()]
48
- data_max = garray.max(data).get()[()]
49
- else:
50
- data_min, data_max = data_range
51
- Nz, Ny, Nx = data.shape
52
- block = (16, 16, 4)
53
- grid = (
54
- updiv(Nx, block[0]),
55
- updiv(Ny, block[1]),
56
- updiv(Nz, block[2]),
57
- )
58
- self.d_hist.fill(0)
59
- self.cuda_hist(
60
- data,
61
- Nx,
62
- Ny,
63
- Nz,
64
- data_min,
65
- data_max,
66
- self.d_hist,
67
- self.num_bins,
68
- grid=grid,
69
- block=block,
70
- )
71
- # Return a result in the same format as numpy.histogram
72
- res_hist = self.d_hist.get()
73
- res_bins = np.linspace(data_min, data_max, num=self.num_bins + 1, endpoint=True)
74
- return res_hist, res_bins
75
-
76
-
77
- class CudaVolumeHistogram(VolumeHistogram):
78
- def __init__(
79
- self,
80
- data_url,
81
- chunk_size_slices=100,
82
- chunk_size_GB=None,
83
- nbins=1e6,
84
- logger=None,
85
- cuda_options=None,
86
- ):
87
- self.cuda_options = cuda_options
88
- super().__init__(
89
- data_url,
90
- chunk_size_slices=chunk_size_slices,
91
- chunk_size_GB=chunk_size_GB,
92
- nbins=nbins,
93
- logger=logger,
94
- )
95
-
96
- def _init_histogrammer(self):
97
- self.histogrammer = CudaPartialHistogram(
98
- method="fixed_bins_number",
99
- num_bins=self.nbins,
100
- cuda_options=self.cuda_options,
101
- )
102
-
103
- def _compute_histogram(self, data):
104
- return self.histogrammer.compute_histogram(data) # 3D
1
+ from ..processing.histogram_cuda import *
2
+ from ..utils import deprecation_warning
3
+
4
+ deprecation_warning(
5
+ "nabu.misc.histogram_cuda has been moved to nabu.processing.histogram_cuda",
6
+ do_print=True,
7
+ func_name="histogram_cuda",
8
+ )
nabu/misc/kernel_base.py CHANGED
@@ -1,122 +1,4 @@
1
- from ..utils import updiv
1
+ from nabu.processing.kernel_base import KernelBase
2
+ from ..utils import deprecated_class
2
3
 
3
-
4
- class KernelBase:
5
- """
6
- A base class for OpenCL and Cuda kernels.
7
-
8
- Parameters
9
- -----------
10
- kernel_name: str
11
- Name of the CUDA kernel.
12
- filename: str, optional
13
- Path to the file name containing kernels definitions
14
- src: str, optional
15
- Source code of kernels definitions
16
- automation_params: dict, optional
17
- Automation parameters, see below
18
-
19
- Automation parameters
20
- ----------------------
21
- automation_params is a dictionary with the following keys and default values.
22
- guess_block: bool (True)
23
- If block is not specified during calls, choose a block size based on
24
- the size/dimensions of the first array.
25
- Mind that it is unlikely to be the optimal choice.
26
- guess_grid: bool (True):
27
- If the grid size is not specified during calls, choose a grid size
28
- based on the size of the first array.
29
- follow_device_ptr: bool (True)
30
- specify gpuarray.gpudata for all cuda GPUArrays (and pyopencl.array.data for pyopencl arrays).
31
- Otherwise, raise an error.
32
- """
33
-
34
- _default_automation_params = {
35
- "guess_block": True,
36
- "guess_grid": True,
37
- "follow_device_ptr": True,
38
- }
39
-
40
- def __init__(
41
- self,
42
- kernel_name,
43
- filename=None,
44
- src=None,
45
- automation_params=None,
46
- ):
47
- self.check_filename_src(filename, src)
48
- self.set_automation_params(automation_params)
49
-
50
- def check_filename_src(self, filename, src):
51
- err_msg = "Please provide either filename or src"
52
- if filename is None and src is None:
53
- raise ValueError(err_msg)
54
- if filename is not None and src is not None:
55
- raise ValueError(err_msg)
56
- if filename is not None:
57
- with open(filename) as fid:
58
- src = fid.read()
59
- self.filename = filename
60
- self.src = src
61
-
62
- def set_automation_params(self, automation_params):
63
- self.automation_params = self._default_automation_params.copy()
64
- self.automation_params.update(automation_params or {})
65
-
66
- @staticmethod
67
- def guess_grid_size(shape, block_size):
68
- # python: (z, y, x) -> cuda: (x, y, z)
69
- res = tuple(map(lambda x: updiv(x[0], x[1]), zip(shape[::-1], block_size)))
70
- if len(res) == 2:
71
- res += (1,)
72
- return res
73
-
74
- @staticmethod
75
- def guess_block_size(shape):
76
- """
77
- Guess a block size based on the shape of an array.
78
- """
79
- ndim = len(shape)
80
- if ndim == 1:
81
- return (128, 1, 1)
82
- if ndim == 2:
83
- return (32, 32, 1)
84
- else:
85
- return (16, 8, 8)
86
-
87
- def get_block_grid(self, *args, **kwargs):
88
- block = None
89
- grid = None
90
- if ("block" not in kwargs) or (kwargs["block"] is None):
91
- if self.automation_params["guess_block"]:
92
- block = self.guess_block_size(args[0].shape)
93
- else:
94
- raise ValueError("Please provide block size")
95
- else:
96
- block = kwargs["block"]
97
- if ("grid" not in kwargs) or (kwargs["grid"] is None):
98
- if self.automation_params["guess_grid"]:
99
- grid = self.guess_grid_size(args[0].shape, block)
100
- else:
101
- raise ValueError("Please provide block grid")
102
- else:
103
- grid = kwargs["grid"]
104
- self.last_block_size = block
105
- self.last_grid_size = grid
106
- return block, grid
107
-
108
- def follow_device_arr(self, args):
109
- raise ValueError("Base class")
110
-
111
- def _prepare_call(self, *args, **kwargs):
112
- block, grid = self.get_block_grid(*args, **kwargs)
113
- # pycuda crashes when any element of block/grid is not a python int (ex. numpy.int64).
114
- # A weird behavior once observed is "data.shape" returning (np.int64, int, int) (!).
115
- # Ensure that everything is a python integer.
116
- grid = tuple(int(x) for x in grid)
117
- if block is not None:
118
- block = tuple(int(x) for x in block)
119
- #
120
- args = self.follow_device_arr(args)
121
-
122
- return grid, block, args, kwargs
4
+ KernelBase = deprecated_class("KernelBase has been moved to nabu.processing", do_print=True)(KernelBase)
nabu/misc/padding_base.py CHANGED
@@ -1,70 +1,6 @@
1
- import numpy as np
2
- from ..utils import check_supported
1
+ from ..processing.padding_base import *
2
+ from ..utils import deprecation_warning
3
3
 
4
-
5
- class PaddingBase:
6
- """
7
- A class for performing padding based on coordinate transform.
8
- The Cuda and OpenCL backends will subclass this class.
9
- """
10
-
11
- supported_modes = ["constant", "edge", "reflect", "symmetric", "wrap"]
12
-
13
- def __init__(self, shape, pad_width, mode="constant", **kwargs):
14
- """
15
- Initialize a Padding object.
16
-
17
- Parameters
18
- ----------
19
- shape: tuple
20
- Image shape
21
- pad_width: tuple
22
- Padding width for each axis. Please see the documentation of numpy.pad().
23
- mode: str
24
- Padding mode
25
-
26
- Other parameters
27
- ----------------
28
- constant_values: tuple
29
- Tuple containing the values to fill when mode="constant" (as in numpy.pad)
30
- """
31
- if len(shape) != 2:
32
- raise ValueError("This class only works on images")
33
- self.shape = shape
34
- self._set_mode(mode, **kwargs)
35
- self._get_padding_arrays(pad_width)
36
-
37
- def _set_mode(self, mode, **kwargs):
38
- # COMPAT.
39
- if mode == "edges":
40
- mode = "edge"
41
- #
42
- check_supported(mode, self.supported_modes, "padding mode")
43
- self.mode = mode
44
- self._kwargs = kwargs
45
-
46
- def _get_padding_arrays(self, pad_width):
47
- self.pad_width = pad_width
48
- if isinstance(pad_width, tuple) and isinstance(pad_width[0], np.ndarray):
49
- # user-defined coordinate transform
50
- if len(pad_width) != 2:
51
- raise ValueError(
52
- "pad_width must be either a scalar, a tuple in the form ((a, b), (c, d)), or a tuple of two numpy arrays"
53
- )
54
- if self.mode == "constant":
55
- raise ValueError("Custom coordinate transform does not work with mode='constant'")
56
- self.mode = "custom"
57
- self.coords_rows, self.coords_cols = pad_width
58
- else:
59
- if self.mode == "constant":
60
- # no need for coordinate transform here
61
- constant_values = self._kwargs.get("constant_values", 0)
62
- self.padded_array_constant = np.pad(
63
- np.zeros(self.shape, dtype="f"), self.pad_width, mode="constant", constant_values=constant_values
64
- )
65
- self.padded_shape = self.padded_array_constant.shape
66
- return
67
- R, C = np.indices(self.shape, dtype=np.int32)
68
- self.coords_rows = np.pad(R, self.pad_width, mode=self.mode)
69
- self.coords_cols = np.pad(C, self.pad_width, mode=self.mode)
70
- self.padded_shape = self.coords_rows.shape
4
+ deprecation_warning(
5
+ "nabu.misc.padding has been moved to nabu.processing.padding_base", do_print=True, func_name="padding_base"
6
+ )