nabu 2025.1.0.dev12__py3-none-any.whl → 2025.1.0.dev14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nabu/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2025.1.0-dev12"
1
+ __version__ = "2025.1.0-dev14"
2
2
  __nabu_modules__ = [
3
3
  "app",
4
4
  "cuda",
nabu/app/cli_configs.py CHANGED
@@ -162,7 +162,7 @@ MultiCorConfig = ReconstructConfig.copy()
162
162
  MultiCorConfig.update(
163
163
  {
164
164
  "cor": {
165
- "help": "Positions of the center of rotation. It must be a list of comma-separated scalars, or in the form start:stop:step, where start, stop and step can all be floating-point values.",
165
+ "help": "Absolute positions of the center of rotation. It must be a list of comma-separated scalars, or in the form start:stop:step, where start, stop and step can all be floating-point values.",
166
166
  "default": "",
167
167
  "mandatory": True,
168
168
  },
nabu/app/multicor.py CHANGED
@@ -59,23 +59,34 @@ def main():
59
59
  ######
60
60
 
61
61
  cors = get_user_cors(args["cor"])
62
+ options = reconstructor.process_config.processing_options["reconstruction"]
63
+ reconstruct_from_sinos_stack = (options["method"].lower() == "cone") or (
64
+ options["method"].lower() == "mlem" and options["implementation"].lower() == "corrct"
65
+ )
66
+ do_halftomo = pipeline.process_config.do_halftomo
62
67
 
63
68
  rec_instance = pipeline.reconstruction
64
69
 
70
+ # Get sinogram
71
+ if reconstruct_from_sinos_stack:
72
+ sino = pipeline._d_radios.transpose(axes=(1, 0, 2))
73
+ else:
74
+ # Get sinogram into contiguous array
75
+ # TODO Can't do memcpy2D ?! It used to work in cuda 11.
76
+ # For now: transfer to host... not optimal
77
+ sino = pipeline._d_radios[:, pipeline._d_radios.shape[1] // 2, :].get() # pylint: disable=E1136
78
+
65
79
  for cor in cors:
66
80
  # Re-configure with new CoR
67
81
  pipeline.processing_options["reconstruction"]["rotation_axis_position"] = cor
68
82
  pipeline.processing_options["save"]["file_prefix"] = file_prefix + "_%.03f" % cor
69
83
  pipeline._init_writer(create_subfolder=False, single_output_file_initialized=False)
70
84
 
71
- # Get sinogram into contiguous array
72
- # TODO Can't do memcpy2D ?! It used to work in cuda 11.
73
- # For now: transfer to host... not optimal
74
- sino = pipeline._d_radios[:, pipeline._d_radios.shape[1] // 2, :].get() # pylint: disable=E1136
75
-
76
- if pipeline.process_config.do_halftomo:
85
+ # Reconfigure center of rotation
86
+ if not (do_halftomo):
87
+ pipeline.reconstruction.reset_rot_center(cor)
88
+ else:
77
89
  # re-initialize FBP object, because in half-tomography the output slice size is a function of CoR
78
- options = pipeline.processing_options["reconstruction"]
79
90
  rec_instance = pipeline.FBPClass(
80
91
  sino.shape,
81
92
  angles=options["angles"],
@@ -92,11 +103,15 @@ def main():
92
103
  "filter_cutoff": options["fbp_filter_cutoff"],
93
104
  },
94
105
  )
95
- else:
96
- pipeline.reconstruction.reset_rot_center(cor)
97
106
 
98
107
  # Run reconstruction
99
- rec = rec_instance.fbp(sino)
108
+ if reconstruct_from_sinos_stack:
109
+ # Need to copy the sino each time, as it is modified by FDK
110
+ rec = rec_instance.reconstruct(sino.copy())
111
+ # take the middle slice
112
+ rec = rec[rec.shape[0] // 2]
113
+ else:
114
+ rec = rec_instance.fbp(sino)
100
115
  # if return_all_recs:
101
116
  # all_recs.append(rec)
102
117
  rec_3D = view_as_images_stack(rec) # writer wants 3D data
nabu/io/cast_volume.py CHANGED
@@ -206,6 +206,9 @@ def cast_volume(
206
206
  data_min = data_min if data_min is not None else found_data_min
207
207
  data_max = data_max if data_max is not None else found_data_max
208
208
 
209
+ if isinstance(output_volume, JP2KVolume):
210
+ output_volume.rescale_data = False
211
+
209
212
  data = []
210
213
  for input_slice, frame_dumper in zip(
211
214
  input_volume.browse_slices(),
@@ -577,6 +577,7 @@ class ProcessConfig(ProcessConfigBase):
577
577
  self.rec_params,
578
578
  [
579
579
  "method",
580
+ "iterations",
580
581
  "implementation",
581
582
  "fbp_filter_type",
582
583
  "fbp_filter_cutoff",
@@ -613,8 +614,6 @@ class ProcessConfig(ProcessConfigBase):
613
614
  voxel_size,
614
615
  ) # pix size is in microns in dataset_info
615
616
 
616
- rec_options["iterations"] = nabu_config["reconstruction"]["iterations"]
617
-
618
617
  # x/y/z position information
619
618
  def get_mean_pos(position_array):
620
619
  if position_array is None:
@@ -636,6 +635,8 @@ class ProcessConfig(ProcessConfigBase):
636
635
  rec_options["position"] = mean_positions_xyz
637
636
  if rec_options["method"] == "cone" and rec_options["sample_detector_dist"] is None:
638
637
  rec_options["sample_detector_dist"] = self.dataset_info.distance # was checked to be not None earlier
638
+ if rec_options["method"].lower() == "mlem" and rec_options["implementation"] in [None, ""]:
639
+ rec_options["implementation"] = "corrct"
639
640
 
640
641
  # New key
641
642
  rec_options["cor_estimated_auto"] = isinstance(nabu_config["reconstruction"]["rotation_axis_position"], str)
@@ -223,7 +223,7 @@ class TestCtf:
223
223
  # phase_fft = ctf_fft.retrieve_phase(img)
224
224
  self.check_result(phase_r2c, self.ref_plain, "Something wrong with CtfFilter-FFT")
225
225
 
226
- @pytest.mark.skipif(not (__has_pycuda__ and __has_cufft__), reason="pycuda and (scikit-cuda or vkfft)")
226
+ @pytest.mark.skipif(not (__has_pycuda__ and __has_cufft__), reason="pycuda and (cupy? or vkfft)")
227
227
  def test_cuda_ctf(self):
228
228
  data = nabu_get_data("brain_phantom.npz")["data"]
229
229
  delta_beta = 50.0
@@ -77,9 +77,7 @@ class TestPaganin:
77
77
  errmax = np.max(np.abs(res - res_tomopy) / np.max(res_tomopy))
78
78
  assert errmax < self.rtol_pag, "Max error is too high"
79
79
 
80
- @pytest.mark.skipif(
81
- not (__has_pycuda__ and __has_cufft__), reason="Need pycuda and (scikit-cuda or vkfft) for this test"
82
- )
80
+ @pytest.mark.skipif(not (__has_pycuda__ and __has_cufft__), reason="Need pycuda and (cupy? or vkfft) for this test")
83
81
  @pytest.mark.parametrize("config", scenarios)
84
82
  def test_gpu_paganin(self, config):
85
83
  paganin, data, pag_kwargs = self.get_paganin_instance_and_data(config, self.data)
@@ -3,9 +3,8 @@ import warnings
3
3
  from functools import lru_cache
4
4
  from multiprocessing import get_context
5
5
  from multiprocessing.pool import Pool
6
- import numpy as np
7
6
  from ..utils import BaseClassError, check_supported, no_decorator
8
- from .fft_base import _BaseFFT, _BaseVKFFT
7
+ from .fft_base import _BaseVKFFT
9
8
 
10
9
  try:
11
10
  from pyvkfft.cuda import VkFFTApp as CudaVkFFTApp
@@ -16,139 +15,9 @@ except (ImportError, OSError):
16
15
  CudaVkFFTApp = BaseClassError
17
16
  from ..cuda.processing import CudaProcessing
18
17
 
19
- Plan = None
20
- cu_fft = None
21
- cu_ifft = None
22
- __has_skcuda__ = None
23
-
24
18
  n_cached_ffts = int(os.getenv("NABU_FFT_CACHE", "0"))
25
19
 
26
20
 
27
- def init_skcuda():
28
- # This needs to be done here, because scikit-cuda creates a Cuda context at import,
29
- # which can mess things up in some cases.
30
- # Ugly solution to an ugly problem.
31
- # ruff: noqa: PLW0603
32
- global __has_skcuda__, Plan, cu_fft, cu_ifft
33
- try:
34
- from skcuda.fft import Plan
35
- from skcuda.fft import fft as cu_fft
36
- from skcuda.fft import ifft as cu_ifft
37
-
38
- __has_skcuda__ = True
39
- except ImportError:
40
- __has_skcuda__ = False
41
-
42
-
43
- class SKCUFFT(_BaseFFT):
44
- implem = "skcuda"
45
- backend = "cuda"
46
- ProcessingCls = CudaProcessing
47
-
48
- def _configure_batched_transform(self):
49
- if __has_skcuda__ is None:
50
- init_skcuda()
51
- if not (__has_skcuda__):
52
- raise ImportError("Please install pycuda and scikit-cuda to use the CUDA back-end")
53
-
54
- self.cufft_batch_size = 1
55
- self.cufft_shape = self.shape
56
- self._cufft_plan_kwargs = {}
57
- if (self.axes is not None) and (len(self.axes) < len(self.shape)):
58
- # In the easiest case, the transform is computed along the fastest dimensions:
59
- # - 1D transforms of lines of 2D data
60
- # - 2D transforms of images of 3D data (stacked along slow dim)
61
- # - 1D transforms of 3D data along fastest dim
62
- # Otherwise, we have to configure cuda "advanced memory layout".
63
- data_ndims = len(self.shape)
64
-
65
- if data_ndims == 2:
66
- n_y, n_x = self.shape
67
- along_fast_dim = self.axes[0] == 1
68
- self.cufft_shape = n_x if along_fast_dim else n_y
69
- self.cufft_batch_size = n_y if along_fast_dim else n_x
70
- if not (along_fast_dim):
71
- # Batched vertical 1D FFT on 2D data need advanced data layout
72
- # http://docs.nvidia.com/cuda/cufft/#advanced-data-layout
73
- self._cufft_plan_kwargs = {
74
- "inembed": np.int32([0]),
75
- "istride": n_x,
76
- "idist": 1,
77
- "onembed": np.int32([0]),
78
- "ostride": n_x,
79
- "odist": 1,
80
- }
81
-
82
- if data_ndims == 3:
83
- # TODO/FIXME - the following work for C2C but not R2C ?!
84
- # fast_axes = [(1, 2), (2, 1), (2,)]
85
- fast_axes = [(2,)]
86
- if self.axes not in fast_axes:
87
- raise NotImplementedError(
88
- "With the CUDA backend, batched transform on 3D data is only supported along fastest dimensions"
89
- )
90
- self.cufft_batch_size = self.shape[0]
91
- self.cufft_shape = self.shape[1:]
92
- if len(self.axes) == 1:
93
- # 1D transform on 3D data: here only supported along fast dim, so batch_size is Nx*Ny
94
- self.cufft_batch_size = np.prod(self.shape[:2])
95
- self.cufft_shape = (self.shape[-1],)
96
- if len(self.cufft_shape) == 1:
97
- self.cufft_shape = self.cufft_shape[0]
98
-
99
- def _configure_normalization(self, normalize):
100
- self.normalize = normalize
101
- if self.normalize == "ortho":
102
- # TODO
103
- raise NotImplementedError("Normalization mode 'ortho' is not implemented with CUDA backend yet.")
104
- self.cufft_scale_inverse = self.normalize == "rescale"
105
-
106
- def _compute_fft_plans(self):
107
- self.plan_forward = Plan( # pylint: disable = E1102
108
- self.cufft_shape,
109
- self.dtype,
110
- self.dtype_out,
111
- batch=self.cufft_batch_size,
112
- stream=self.processing.stream,
113
- **self._cufft_plan_kwargs,
114
- # cufft extensible plan API is only supported after 0.5.1
115
- # (commit 65288d28ca0b93e1234133f8d460dc6becb65121)
116
- # but there is still no official 0.5.2
117
- # ~ auto_allocate=True # cufft extensible plan API
118
- )
119
- self.plan_inverse = Plan( # pylint: disable = E1102
120
- self.cufft_shape, # not shape_out
121
- self.dtype_out,
122
- self.dtype,
123
- batch=self.cufft_batch_size,
124
- stream=self.processing.stream,
125
- **self._cufft_plan_kwargs,
126
- # cufft extensible plan API is only supported after 0.5.1
127
- # (commit 65288d28ca0b93e1234133f8d460dc6becb65121)
128
- # but there is still no official 0.5.2
129
- # ~ auto_allocate=True
130
- )
131
-
132
- def fft(self, array, output=None):
133
- if output is None:
134
- output = self.output_fft = self.processing.allocate_array(
135
- "output_fft", self.shape_out, dtype=self.dtype_out
136
- )
137
- cu_fft(array, output, self.plan_forward, scale=False) # pylint: disable = E1102
138
- return output
139
-
140
- def ifft(self, array, output=None):
141
- if output is None:
142
- output = self.output_ifft = self.processing.allocate_array("output_ifft", self.shape, dtype=self.dtype)
143
- cu_ifft( # pylint: disable = E1102
144
- array,
145
- output,
146
- self.plan_inverse,
147
- scale=self.cufft_scale_inverse,
148
- )
149
- return output
150
-
151
-
152
21
  maybe_cached = lru_cache(maxsize=n_cached_ffts) if n_cached_ffts > 0 else no_decorator
153
22
 
154
23
 
@@ -212,45 +81,9 @@ def has_vkfft(safe=True):
212
81
  return v
213
82
 
214
83
 
215
- def _has_skfft(x):
216
- # should be run from within a Process
217
- try:
218
- from nabu.processing.fft_cuda import SKCUFFT
219
-
220
- _ = SKCUFFT((16,), "f")
221
- avail = True
222
- except (ImportError, RuntimeError, OSError, NameError):
223
- avail = False
224
- return avail
225
-
226
-
227
- @lru_cache(maxsize=2)
228
- def has_skcuda(safe=True):
229
- """
230
- Determine whether scikit-cuda/CUFFT is available.
231
- Currently, scikit-cuda will create a Cuda context for Cublas, which can mess up the current execution.
232
- Do it in a separate thread.
233
- """
234
- if not safe:
235
- return _has_skfft(None)
236
- try:
237
- ctx = get_context("spawn")
238
- with Pool(1, context=ctx) as p:
239
- v = p.map(_has_skfft, [1])[0]
240
- except AssertionError:
241
- # Can get AssertionError: daemonic processes are not allowed to have children
242
- # if the calling code is already a subprocess
243
- return _has_skfft(None)
244
- return v
245
-
246
-
247
84
  @lru_cache(maxsize=2)
248
85
  def get_fft_class(backend="vkfft"):
249
86
  backends = {
250
- "scikit-cuda": SKCUFFT,
251
- "skcuda": SKCUFFT,
252
- "cufft": SKCUFFT,
253
- "scikit": SKCUFFT,
254
87
  "vkfft": VKCUFFT,
255
88
  "pyvkfft": VKCUFFT,
256
89
  }
@@ -266,7 +99,7 @@ def get_fft_class(backend="vkfft"):
266
99
 
267
100
  avail_fft_implems = get_available_fft_implems()
268
101
  if len(avail_fft_implems) == 0:
269
- raise RuntimeError("Could not any Cuda FFT implementation. Please install either scikit-cuda or pyvkfft")
102
+ raise RuntimeError("Could not any Cuda FFT implementation. Please install pyvkfft")
270
103
  if backend not in avail_fft_implems:
271
104
  warnings.warn("Could not get FFT backend '%s'" % backend, RuntimeWarning)
272
105
  backend = avail_fft_implems[0]
@@ -279,6 +112,4 @@ def get_available_fft_implems():
279
112
  avail_implems = []
280
113
  if has_vkfft(safe=True):
281
114
  avail_implems.append("vkfft")
282
- if has_skcuda(safe=True):
283
- avail_implems.append("skcuda")
284
115
  return avail_implems
@@ -4,14 +4,13 @@ import numpy as np
4
4
  from scipy.fft import fftn, ifftn, rfftn, irfftn
5
5
  from nabu.testutils import generate_tests_scenarios, get_data, get_array_of_given_shape, __do_long_tests__
6
6
  from nabu.cuda.utils import get_cuda_context, __has_pycuda__
7
- from nabu.processing.fft_cuda import SKCUFFT, VKCUFFT, get_available_fft_implems
7
+ from nabu.processing.fft_cuda import VKCUFFT, get_available_fft_implems
8
8
  from nabu.opencl.utils import __has_pyopencl__, get_opencl_context
9
9
  from nabu.processing.fft_opencl import VKCLFFT, has_vkfft as has_cl_vkfft
10
10
  from nabu.processing.fft_base import is_fast_axes
11
11
 
12
12
  available_cuda_fft = get_available_fft_implems()
13
13
  __has_vkfft__ = "vkfft" in available_cuda_fft
14
- __has_skcuda__ = "skcuda" in available_cuda_fft
15
14
 
16
15
 
17
16
  scenarios = {
@@ -113,67 +112,6 @@ class TestFFT:
113
112
  ref = ref_ifft_func(data, axes=axes)
114
113
  return ref
115
114
 
116
- @pytest.mark.skipif(
117
- not (__has_skcuda__ and __has_pycuda__), reason="Need pycuda and (scikit-cuda or vkfft) for this test"
118
- )
119
- @pytest.mark.parametrize("config", scenarios)
120
- def test_sckcuda(self, config):
121
- r2c = config["r2c"]
122
- shape = config["shape"]
123
- precision = config["precision"]
124
- ndim = len(shape)
125
- if ndim == 3 and not (__do_long_tests__):
126
- pytest.skip("3D FFTs are done only for long tests - use NABU_LONG_TESTS=1")
127
-
128
- data = self._get_data_array(config)
129
-
130
- res, cufft = self._do_fft(data, r2c, return_fft_obj=True, backend_cls=SKCUFFT)
131
- ref = self._do_reference_fft(data, r2c)
132
-
133
- tol = self.abs_tol[precision][ndim]
134
- self.check_result(res, ref, config, tol, name="skcuda")
135
-
136
- # Complex-to-complex can also be performed on real data (as in numpy.fft.fft(real_data))
137
- if not (r2c):
138
- res = self._do_fft(data, False, backend_cls=SKCUFFT)
139
- ref = self._do_reference_fft(data, False)
140
- self.check_result(res, ref, config, tol, name="skcuda")
141
-
142
- # IFFT
143
- res = cufft.ifft(cufft.output_fft).get()
144
- self.check_result(res, data, config, tol, name="skcuda")
145
- # Perhaps we should also check against numpy/scipy ifft,
146
- # but it does not yield the good shape for R2C on odd-sized data
147
-
148
- @pytest.mark.skipif(
149
- not (__has_skcuda__ and __has_pycuda__), reason="Need pycuda and (scikit-cuda or vkfft) for this test"
150
- )
151
- @pytest.mark.parametrize("config", scenarios)
152
- def test_skcuda_batched(self, config):
153
- shape = config["shape"]
154
- if len(shape) == 1:
155
- return
156
- elif len(shape) == 3 and not (__do_long_tests__):
157
- pytest.skip("3D FFTs are done only for long tests - use NABU_LONG_TESTS=1")
158
- r2c = config["r2c"]
159
- tol = self.abs_tol[config["precision"]][len(shape)]
160
-
161
- data = self._get_data_array(config)
162
-
163
- if data.ndim == 2:
164
- axes_to_test = [(0,), (1,)]
165
- elif data.ndim == 3:
166
- # axes_to_test = [(1, 2), (2, 1), (2,)] # See fft.py: works for C2C but not R2C ?
167
- axes_to_test = [(2,)]
168
-
169
- for axes in axes_to_test:
170
- res, cufft = self._do_fft(data, r2c, axes=axes, return_fft_obj=True, backend_cls=SKCUFFT)
171
- ref = self._do_reference_fft(data, r2c, axes=axes)
172
- self.check_result(res, ref, config, tol, name="skcuda batched axes=%s" % (str(axes)))
173
- # IFFT
174
- res = cufft.ifft(cufft.output_fft).get()
175
- self.check_result(res, data, config, tol, name="skcuda")
176
-
177
115
  @pytest.mark.parametrize("config", scenarios)
178
116
  def test_vkfft(self, config):
179
117
  backend = config["backend"]
@@ -292,6 +292,11 @@ class ConebeamReconstructor:
292
292
  # The translation is exactly the amount that brought the detector up or down, but in the opposite direction.
293
293
  vecs[:, 2] = -self.relative_z_position
294
294
 
295
+ def reset_rot_center(self, rot_center):
296
+ self.rot_center = rot_center
297
+ self._cor_shift = (self.sinos_shape[-1] - 1) / 2.0 - rot_center
298
+ self._create_astra_proj_geometry(self.relative_z_position)
299
+
295
300
  def _set_output(self, volume):
296
301
  if volume is not None:
297
302
  expected_shape = self.vol_shape # if not (self._crop_data) else self._output_cropped_shape
@@ -57,7 +57,7 @@ class MLEMReconstructor:
57
57
 
58
58
  def _set_shifts(self, shifts_uv, cor):
59
59
  if shifts_uv is None:
60
- self.shifts_uv = np.zeros([self.n_angles, 2])
60
+ self.shifts_uv = None
61
61
  else:
62
62
  if shifts_uv.shape[0] != self.n_angles:
63
63
  raise ValueError(
@@ -66,6 +66,9 @@ class MLEMReconstructor:
66
66
  self.shifts_uv = shifts_uv.copy()
67
67
  self.cor = cor
68
68
 
69
+ def reset_rot_center(self, cor):
70
+ self.cor = cor - (self.sinos_shape[-1] - 1) // 2
71
+
69
72
  def reconstruct(self, data_vwu):
70
73
  """
71
74
  data_align_vwu: numpy.ndarray or pycuda.gpuarray
@@ -79,10 +82,13 @@ class MLEMReconstructor:
79
82
 
80
83
  # MLEM recons
81
84
  self.vol_geom_align = cct.models.VolumeGeometry.get_default_from_data(data_vwu)
82
- self.prj_geom_align = cct.models.ProjectionGeometry.get_default_parallel()
83
- # Vertical shifts were handled in pipeline. Set them to ZERO
84
- self.shifts_uv[:, 1] = 0.0
85
- self.prj_geom_align.set_detector_shifts_vu(self.shifts_uv.T[::-1])
85
+ if self.shifts_uv is not None:
86
+ self.prj_geom_align = cct.models.ProjectionGeometry.get_default_parallel()
87
+ # Vertical shifts were handled in pipeline. Set them to ZERO
88
+ self.shifts_uv[:, 1] = 0.0
89
+ self.prj_geom_align.set_detector_shifts_vu(self.shifts_uv.T[::-1])
90
+ else:
91
+ self.prj_geom_align = None
86
92
 
87
93
  variances_align = cct.processing.compute_variance_poisson(data_vwu)
88
94
  self.weights_align = cct.processing.compute_variance_weight(variances_align, normalized=True) # , use_std=True
@@ -38,7 +38,7 @@ if __do_long_tests__:
38
38
  "sigma": [1.0, 2.0],
39
39
  "wname": ["db15", "haar", "rbio4.4"],
40
40
  "padding": [None, (100, 100), (50, 71)],
41
- "fft_implem": ["skcuda", "vkfft"],
41
+ "fft_implem": ["vkfft"],
42
42
  }
43
43
  )
44
44
 
@@ -107,7 +107,7 @@ class TestDeringer:
107
107
 
108
108
  @pytest.mark.skipif(
109
109
  not (__has_cuda_deringer__) or munchetal_filter is None,
110
- reason="Need pycuda, pycudwt and (scikit-cuda or pyvkfft) for this test",
110
+ reason="Need pycuda, pycudwt and (cupy? or pyvkfft) for this test",
111
111
  )
112
112
  @pytest.mark.parametrize("config", fw_scenarios)
113
113
  def test_cuda_munch_deringer(self, config):
@@ -7,10 +7,10 @@ from nabu.testutils import get_data, generate_tests_scenarios, __do_long_tests__
7
7
  from nabu.cuda.utils import get_cuda_context, __has_pycuda__
8
8
  from nabu.opencl.utils import get_opencl_context, __has_pyopencl__
9
9
 
10
- from nabu.processing.fft_cuda import has_skcuda, has_vkfft as has_vkfft_cu
10
+ from nabu.processing.fft_cuda import has_vkfft as has_vkfft_cu
11
11
  from nabu.processing.fft_opencl import has_vkfft as has_vkfft_cl
12
12
 
13
- __has_pycuda__ = __has_pycuda__ and (has_skcuda() or has_vkfft_cu())
13
+ __has_pycuda__ = __has_pycuda__ and has_vkfft_cu()
14
14
  __has_pyopencl__ = __has_pyopencl__ and has_vkfft_cl()
15
15
 
16
16
  if __has_pycuda__:
@@ -62,7 +62,7 @@ class TestFBP:
62
62
  def _get_backprojector(self, config, *bp_args, **bp_kwargs):
63
63
  if config["backend"] == "cuda":
64
64
  if not (__has_pycuda__):
65
- pytest.skip("Need pycuda + (scikit-cuda or pyvkfft)")
65
+ pytest.skip("Need pycuda + (cupy? or pyvkfft)")
66
66
  Backprojector = CudaBackprojector
67
67
  ctx = self.cuda_ctx
68
68
  else:
@@ -42,7 +42,7 @@ class TestHalftomo:
42
42
  def _get_backprojector(self, config, *bp_args, **bp_kwargs):
43
43
  if config["backend"] == "cuda":
44
44
  if not (__has_pycuda__):
45
- pytest.skip("Need pycuda + scikit-cuda or vkfft")
45
+ pytest.skip("Need pycuda + cupy? or vkfft")
46
46
  Backprojector = CudaBackprojector
47
47
  ctx = self.cuda_ctx
48
48
  else:
@@ -48,7 +48,7 @@ def bootstrap(request):
48
48
  )
49
49
  @pytest.mark.usefixtures("bootstrap")
50
50
  class TestReconstructor:
51
- @pytest.mark.skipif(not (__has_cuda_fbp__), reason="need pycuda and (scikit-cuda or vkfft)")
51
+ @pytest.mark.skipif(not (__has_cuda_fbp__), reason="need pycuda and (cupy? or vkfft)")
52
52
  @pytest.mark.parametrize("config", scenarios)
53
53
  def test_cuda_reconstructor(self, config):
54
54
  data = self.projs
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: nabu
3
- Version: 2025.1.0.dev12
3
+ Version: 2025.1.0.dev14
4
4
  Summary: Nabu - Tomography software
5
5
  Author-email: Pierre Paleo <pierre.paleo@esrf.fr>, Henri Payno <henri.payno@esrf.fr>, Alessandro Mirone <mirone@esrf.fr>, Jérôme Lesaint <jerome.lesaint@esrf.fr>
6
6
  Maintainer-email: Pierre Paleo <pierre.paleo@esrf.fr>
@@ -1,8 +1,7 @@
1
1
  doc/conf.py,sha256=3xtCarCHrXPr50GbeRDuH-o3Jzojw7mpr7vpGfZPLAE,3787
2
2
  doc/create_conf_doc.py,sha256=IVOdP70KvbW9WS_UQu3Iyd0YfS60E2fJ5IDtQ_s4cDw,1143
3
- doc/doc_config.py,sha256=anqeOVjqE2e7eVzg7yuh9dvIneTkrA5doGl1cVBqT7Q,730
4
3
  doc/get_mathjax.py,sha256=VIvKRCdDuF2VoY8JD3mSey9XX13AZMmwTJBHdt1tUs4,1012
5
- nabu/__init__.py,sha256=SepTZaSbWOl9F3YCRbhVcmClf__zXKUWG0_aoxyUv4I,276
4
+ nabu/__init__.py,sha256=Lgb48elUmH-YevNEaVmLMkcg510J3eCdpEviDuB7MhA,276
6
5
  nabu/tests.py,sha256=hOJD1GGxn_KE1bWMoxfjnjzI7d9JBUpoc9B2_tVFiEk,1370
7
6
  nabu/testutils.py,sha256=PmQVGkQmLjq1oX8C_rE8N5yceVNbUifPuNMiE6cQq7k,8667
8
7
  nabu/utils.py,sha256=XFk8_sUupngl5g1aFEE7IL_AzL9eJUV_ikmjiGfJNDA,26727
@@ -10,7 +9,7 @@ nabu/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
9
  nabu/app/bootstrap.py,sha256=3yLZJmrmQBmPJMBtE2ih2cspfqOy5T_UN2U8B3i_hkI,3266
11
10
  nabu/app/bootstrap_stitching.py,sha256=wCKgugOQr6-lPMWEn4AYQeric0tCuRc9O-RnpBuTWAA,2230
12
11
  nabu/app/cast_volume.py,sha256=fBjVWOgLWS_JBF5qEdN-b-7GCOQVx15cwerh4BmAsTQ,11171
13
- nabu/app/cli_configs.py,sha256=x32idqLU2iMWxMi8ysYneUnq8mgpyAcT_-1udvAWWuM,22141
12
+ nabu/app/cli_configs.py,sha256=wM5p6cJNhsIWu4kqCiuO8UDKA3eXqkMXG0HiUjZqS7k,22150
14
13
  nabu/app/compare_volumes.py,sha256=3qm3QsxV-D_myLAkhM_LlX0DTrDmhzfhrnNak-1538Q,3337
15
14
  nabu/app/composite_cor.py,sha256=-qAbMJCFa0NmSb1hO2G1QvdW4fwEXSMBBbSFCnQXmCc,5068
16
15
  nabu/app/correct_rot.py,sha256=rIgBZZ_q-WPTy0mH22_XUXSXYqmR9DQkKlgEw_ol1kI,1988
@@ -20,7 +19,7 @@ nabu/app/diag_to_rot.py,sha256=tBaF1Oy0-bjCqkLFK-Bu_n74easzLlRkI_FTC1cAH2Q,16933
20
19
  nabu/app/double_flatfield.py,sha256=1yquRUIHYXmrIg7NGHMDIXpkUCS_muEioUeqLyWf8PI,5641
21
20
  nabu/app/generate_header.py,sha256=Voo-FAvwS_mU5gtDxyqpZnSpP_mlMSfd_6bEtgdi_tg,8919
22
21
  nabu/app/histogram.py,sha256=gyLXKwFrU5WPQMkM1k8OdpIXSwGEEKC-f8RcTHKOho4,7930
23
- nabu/app/multicor.py,sha256=25v7tmN2_p1OQNhsI0qbCn3rpBmXzpJUDhHSVwzkLKo,4068
22
+ nabu/app/multicor.py,sha256=Q9sxHSaCmahhf9hFZxX2jlfhk0me4ORBnHYfEwLo2dY,4725
24
23
  nabu/app/nx_z_splitter.py,sha256=p54jR-PAAw-AkGolM9fZE5lM2vbNLspCNCy5zBnJNP4,4976
25
24
  nabu/app/parse_reconstruction_log.py,sha256=msOtA3xaqLZpISRqS0F9_SrkvbdvKNPE99tdWhPrkY0,4745
26
25
  nabu/app/prepare_weights_double.py,sha256=jy78aP1UNKqSk82Wy6ZCkKjpYXxaGmlIj_vjB4SxS8A,5443
@@ -79,7 +78,7 @@ nabu/estimation/tests/test_focus.py,sha256=cMxEeTLlfVHOvG_4oGMLpp6bVI6okYz0u4aNc
79
78
  nabu/estimation/tests/test_tilt.py,sha256=KIgTJqQvNfWndm8f3aRSdznWFl3AdQhYXiZPKLseYOs,1672
80
79
  nabu/estimation/tests/test_translation.py,sha256=RkOnCYgk9DZGKlIka1snqTv4wbIz_nG7-EHAxnBHsJU,2999
81
80
  nabu/io/__init__.py,sha256=AbQgj4-fCCHOKynO_PyAR9ejnFSuWKgroxxhxWVpjyQ,120
82
- nabu/io/cast_volume.py,sha256=DVLCohBte7mkZooSIs3CyoDIqIY1HLDU1_2px0SiXrE,17483
81
+ nabu/io/cast_volume.py,sha256=56HDNQ3mr7krylMGWW8aBXZ6d3FAGfKK-syppDv1P8k,17573
83
82
  nabu/io/detector_distortion.py,sha256=qO1Z6gejkBrixThvU_sLwH3UfLAe8aAO63YQ8z7PH78,11750
84
83
  nabu/io/reader.py,sha256=D5pOhOPadySvl7YkZmIIb9ZUJGpv5dITvb6r5RHZAkE,40525
85
84
  nabu/io/reader_helical.py,sha256=q3LOmu6F_4Uxi3rZZWJ-rsix2Lgu_saXXdiJF8TLi24,4533
@@ -144,7 +143,7 @@ nabu/pipeline/fullfield/computations.py,sha256=uqf7LvuDPm7n51BpP8eb8vTewDgRFyzSD
144
143
  nabu/pipeline/fullfield/dataset_validator.py,sha256=HK_bmlII9pc59PXCgKJOyLv7Xu3DYv_jbH3RmQSgzvI,2933
145
144
  nabu/pipeline/fullfield/get_double_flatfield.py,sha256=uYFDAii6Nw4RCUQO_6Id6tXLdmtVbj_pxAHQWennSeE,5411
146
145
  nabu/pipeline/fullfield/nabu_config.py,sha256=gSsWXEyQsCpQnrWtqhMSYAiNZLXdYnWwCiQzmYplxWI,32700
147
- nabu/pipeline/fullfield/processconfig.py,sha256=lF874kU1_TbPCKDtB8gYZ4iZQ46fJUUyWY5JfOFDZHM,37616
146
+ nabu/pipeline/fullfield/processconfig.py,sha256=MISwEwb57hJRWr4Vih2cPA503CVMccqrsaTzmUt-BFk,37726
148
147
  nabu/pipeline/fullfield/reconstruction.py,sha256=sXI0NIFUc9nAhbAW6M52mbqalc6mFO315rrCl5r1RoY,37775
149
148
  nabu/pipeline/helical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
150
149
  nabu/pipeline/helical/dataset_validator.py,sha256=HdKjUSj3PIpJb1dKSzJg8s4zXbAnMPWaPn8kvp_xQEs,657
@@ -182,16 +181,16 @@ nabu/preproc/shift.py,sha256=CT1i-G7Bs36hK5CGYBik05qAcKj82YpBZKNL3brcbzw,3419
182
181
  nabu/preproc/shift_cuda.py,sha256=4YOVRZ4oECEVWOQ6MzDBh8cOY2RtOOhlU7tir0Tp77M,4125
183
182
  nabu/preproc/tests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
184
183
  nabu/preproc/tests/test_ccd_corr.py,sha256=KIpwaWYWT_6OPVXyVuLxXIIWreBkYF0rOnDMiegiLMU,2249
185
- nabu/preproc/tests/test_ctf.py,sha256=5C-PKzMcIQQFR4RsNh6au8TVGqbKhPV3_5djVaSB9p8,9992
184
+ nabu/preproc/tests/test_ctf.py,sha256=4KtjLdiMqwtv3ZJgk-q5xzxoxSnKL_wleZtEgEgEw2A,9986
186
185
  nabu/preproc/tests/test_double_flatfield.py,sha256=qUmgAktFHqzKJBHHfj3GJzorey0yk7R0e4nr8dRmbTo,2870
187
186
  nabu/preproc/tests/test_flatfield.py,sha256=8_vmTtfmrpbuKUr4fW8NmtH-PtGWkSQkMBFD6hs0ul4,21055
188
- nabu/preproc/tests/test_paganin.py,sha256=aHfNsJcVALVYkbNHrhcO9lFYfco2qYA5PnzAjKTG2BU,2998
187
+ nabu/preproc/tests/test_paganin.py,sha256=Ixlp624o0xtFs12akN6sDn-gHWXm1wVlcdumm2prqh4,2978
189
188
  nabu/preproc/tests/test_vshift.py,sha256=Gq9sDtbbAAdLTJALH2qcbeYXE5dINQ8QpBnCgZM0tFQ,2972
190
189
  nabu/processing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
191
190
  nabu/processing/azim.py,sha256=GIv_C1NYjUP3IYd51qvfECTDu-rfs3Dl2Hb5-ZhdhHY,7080
192
191
  nabu/processing/convolution_cuda.py,sha256=jOLKdEwzBwNZPTEjXIBWStZp22ieeukc6KGnuSNVnaA,15421
193
192
  nabu/processing/fft_base.py,sha256=wSLiyJEwX51chfZG3CWPl0DvKp182qAmTnRY8N1mWW0,5741
194
- nabu/processing/fft_cuda.py,sha256=9afmklpiN9F5Xqh7ikC6thBvVPJ8sR6XJEOTABf_8yo,9891
193
+ nabu/processing/fft_cuda.py,sha256=0JNzdSPPQ1xS38mjsgriN3-Cj9MXCtADLVHzIQjQjWc,3467
195
194
  nabu/processing/fft_opencl.py,sha256=Qhd9F32XCN22tet-en8S-yGkZDNsOMapqHlS0eTiPp0,2082
196
195
  nabu/processing/fftshift.py,sha256=mFOKuqQtxDPXoFPPP_G-YMA4WFmmO0oPFXjBWrrfDO8,4880
197
196
  nabu/processing/histogram.py,sha256=CumzHFjOw0JaHWlMnhCnj7I2Guam1HYqPVJeijKqC40,11376
@@ -212,7 +211,7 @@ nabu/processing/unsharp.py,sha256=XqoieKlX5sEHalxObi5JBOjauRUSbiHc6rqYT_buTa4,26
212
211
  nabu/processing/unsharp_cuda.py,sha256=uKFCEk8bRqgJzR8HB_OeKYuwC5jKqaXpFI6pRv_pDY4,2152
213
212
  nabu/processing/unsharp_opencl.py,sha256=vBVq5wA-rxOkULE-sv6WzNt5Pq9hc3gFcp4mM0kCo44,2651
214
213
  nabu/processing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
215
- nabu/processing/tests/test_fft.py,sha256=BlUvHjpLtyrn_kpAhM7rldhrdQ8tb_APY1Q2IwntoN0,10260
214
+ nabu/processing/tests/test_fft.py,sha256=9UnMcqnmzlBz5SHeJV9b8VOX-cAm35DBIbEP84l4nlM,7587
216
215
  nabu/processing/tests/test_fftshift.py,sha256=Qwg3oOwNh_MRmg9BS1kMVhQSSfzQzrCOsppsvY3v5rg,2599
217
216
  nabu/processing/tests/test_histogram.py,sha256=25CLs1WZpLF9xZ2DR82x4_YokA5Z76Qsnn6zY8YdJj8,2283
218
217
  nabu/processing/tests/test_medfilt.py,sha256=rnReK3MMCsc74-NoycYSRSMp7bN4Qdg4cZbHfWQ9ZWQ,2652
@@ -223,8 +222,7 @@ nabu/processing/tests/test_rotation.py,sha256=vedRXV9RePJywBKoyBkGANP1dhZCjphbYO
223
222
  nabu/processing/tests/test_transpose.py,sha256=hTG17wTaB5Wv6twbW3ZFhBv6BYfqJY7DTQPoO0-KdkM,2760
224
223
  nabu/processing/tests/test_unsharp.py,sha256=R3ovbwDDp3ccy2A8t6CcUVELXRWkED5EnQdN2FQOfQM,4391
225
224
  nabu/reconstruction/__init__.py,sha256=EmKVvx_-FJvzJngG4ielIC7FhMCpI1Waaflg_lF44tk,163
226
- nabu/reconstruction/astra.py,sha256=qnFYabU-Bzgys8hXjIBcwO2NazrvhNXUYFIkMHc6BmM,10444
227
- nabu/reconstruction/cone.py,sha256=JH3o5gYP3CsvMWwVwDWrhj2_BXd55-zAXUcz8_OP4sc,20338
225
+ nabu/reconstruction/cone.py,sha256=tSjaMDHeFV-h_IFbxUqSbhqlWmvlBcJQ8u89Y9Q9gg8,20559
228
226
  nabu/reconstruction/fbp.py,sha256=Tiz-CkYnDZ6YL6YB-e-BfwPdQEbw1X4GhGPX8gUs7Ng,5520
229
227
  nabu/reconstruction/fbp_base.py,sha256=NaHGF33TERTB-mCeMJPYLEn_FkZCKCUioSYkNXaA-Xg,17553
230
228
  nabu/reconstruction/fbp_opencl.py,sha256=coEGLq65PCuvWnhAbIyLPHACkWjMB0XOceMp9ZIDWtc,3274
@@ -232,7 +230,7 @@ nabu/reconstruction/filtering.py,sha256=monJnA_kk9k_Gy7bMAos9I-XgU8czWhf9vBth6ik
232
230
  nabu/reconstruction/filtering_cuda.py,sha256=_S-BZMhtnNt8ugePSmf-LF7JvMPCOyGPUMSseymgwZw,4019
233
231
  nabu/reconstruction/filtering_opencl.py,sha256=s65EWrjfUFdScicmNAGyRv8-8OQgapy_HvwMm-J7Lh4,3705
234
232
  nabu/reconstruction/hbp.py,sha256=Qll7i20LWxUo1-SHRxemkYAolBTP8HScwt1OvWmD2r0,18642
235
- nabu/reconstruction/mlem.py,sha256=66_porIK6Fn4KO6NIO3ZZHBPEEA1rf2Qk23YYwaZwzg,3568
233
+ nabu/reconstruction/mlem.py,sha256=cBnzbrT9epp4zuwBlxAty1NmFi6FpMGybvQubBkcOFw,3747
236
234
  nabu/reconstruction/projection.py,sha256=SNocaOY9HuNiHs-VxkW9IS707JPJnd3sDjAbf7aIw2E,9081
237
235
  nabu/reconstruction/reconstructor.py,sha256=16xxHcK4iie-uh-trf6x_IuvgxJKBvQRTE5B8tnc4F8,7358
238
236
  nabu/reconstruction/reconstructor_cuda.py,sha256=m_3GzG44PRyiSEfTvYjgr5atLwl26hMfZOMyqTWxp0g,1644
@@ -243,13 +241,13 @@ nabu/reconstruction/sinogram_cuda.py,sha256=DBF06ruzkZ5zn8x06Ron2NkB_FBKiZC2oM7k
243
241
  nabu/reconstruction/sinogram_opencl.py,sha256=vxJa5BeOd2NVdUayXYfQGAfO1AEbJfTGotuijT8qgCs,1486
244
242
  nabu/reconstruction/tests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
245
243
  nabu/reconstruction/tests/test_cone.py,sha256=yRx0k1Nv1bTP2IyiwygtIoafQ6v18-qf9J1Rkg_OxSI,22732
246
- nabu/reconstruction/tests/test_deringer.py,sha256=HsEPlqa5sup1NoayjZjBATy67Y7MBlB-PsN780y02Rw,8368
247
- nabu/reconstruction/tests/test_fbp.py,sha256=rZxMke2AYoQm8bh9BhLOm2dXqumGa3jibDGfRaZqZMo,16404
244
+ nabu/reconstruction/tests/test_deringer.py,sha256=3xT7YltisnXa-NUd7aoMe88wOfTiTj-uxC6JFxGXtUA,8352
245
+ nabu/reconstruction/tests/test_fbp.py,sha256=SLdNjpWp3c7XVRUlR9nKyUvvzEcu9CkFE7WaP6nFxQc,16368
248
246
  nabu/reconstruction/tests/test_filtering.py,sha256=RXQAEvNIjwiQ-zU6xrkV4h-MkDg3uVBAUyz0tOSIfIw,5581
249
- nabu/reconstruction/tests/test_halftomo.py,sha256=FFv58sSA-5WDcJ_Y5tSm1P7QQwNX4SUIYtHQQb_G3xM,6532
247
+ nabu/reconstruction/tests/test_halftomo.py,sha256=zCAOFGRaLicr2PLw3628Kdcuj_bhGZz8QZvWBL-c7As,6526
250
248
  nabu/reconstruction/tests/test_mlem.py,sha256=JWby9nqWqockgUh2RQoqrC8BtfB4v5SCNayk4bBJ4TM,3791
251
249
  nabu/reconstruction/tests/test_projector.py,sha256=QcHLnNWQ6HC9pJo9CdalaaeEDQ_vFpNMn5yXEpfBDXI,6259
252
- nabu/reconstruction/tests/test_reconstructor.py,sha256=3p2Wk_OqgZqkNOkhK_NJWlHkOIENTJhLuwVRI8Y1_Ak,3385
250
+ nabu/reconstruction/tests/test_reconstructor.py,sha256=xzfEM0j48ScQoGqWVcAK74HG97tcF7FsVVLzvX_TgHE,3379
253
251
  nabu/reconstruction/tests/test_sino_normalization.py,sha256=qNnpVUp3UcFGyLlSP0rCzE5hxdV6YENL9AF6mo72WcQ,3669
254
252
  nabu/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
255
253
  nabu/resources/cor.py,sha256=-mcrTbj3G7o4PP5E_gIRo2j6_-ADmMkkOc_0CyQv84c,170
@@ -314,9 +312,9 @@ nabu/thirdparty/pore3d_deringer_munch.py,sha256=o4bisnFc-wMjuohWBT8wgWmfNehPQGtC
314
312
  nabu/thirdparty/tomocupy_remove_stripe.py,sha256=Khe4zFf0kRzu65Yxnvq58gt1ljOztqJGdMDhVAiM7lM,24363
315
313
  nabu/thirdparty/tomopy_phase.py,sha256=hK4oPpkogLOhv23XzzEXQY2u3r8fJvASY_bINVs6ERE,8634
316
314
  nabu/thirdparty/tomwer_load_flats_darks.py,sha256=ZNoVAinUb_wGYbfvs_4BVnWsjsQmNxSvCh1bWhR2WWg,5611
317
- nabu-2025.1.0.dev12.dist-info/licenses/LICENSE,sha256=1eAIPSnEsnSFNUODnLtNtQTs76exG3ZxJ1DJR6zoUBA,1066
318
- nabu-2025.1.0.dev12.dist-info/METADATA,sha256=T4u5bDQORSj-JCgYQ-CRSK07KKomiK3iR6HMZsIerOo,4327
319
- nabu-2025.1.0.dev12.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
320
- nabu-2025.1.0.dev12.dist-info/entry_points.txt,sha256=cJKGkBeykVL7uK3E4R0RLRqMXifTL2qdO573syPAvJc,1288
321
- nabu-2025.1.0.dev12.dist-info/top_level.txt,sha256=fsm_N3eXLRZk2QXF9OSKPNDPFXOz8FAQjHh5avT3dok,9
322
- nabu-2025.1.0.dev12.dist-info/RECORD,,
315
+ nabu-2025.1.0.dev14.dist-info/licenses/LICENSE,sha256=1eAIPSnEsnSFNUODnLtNtQTs76exG3ZxJ1DJR6zoUBA,1066
316
+ nabu-2025.1.0.dev14.dist-info/METADATA,sha256=e9px--rVeYTmFlOJhZETMdJ8GtzN8Wy_s5VZELrLZD0,4327
317
+ nabu-2025.1.0.dev14.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
318
+ nabu-2025.1.0.dev14.dist-info/entry_points.txt,sha256=cJKGkBeykVL7uK3E4R0RLRqMXifTL2qdO573syPAvJc,1288
319
+ nabu-2025.1.0.dev14.dist-info/top_level.txt,sha256=fsm_N3eXLRZk2QXF9OSKPNDPFXOz8FAQjHh5avT3dok,9
320
+ nabu-2025.1.0.dev14.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (79.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
doc/doc_config.py DELETED
@@ -1,32 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from nabu.resources.nabu_config import nabu_config
4
-
5
-
6
- def generate(file_):
7
- def write(content):
8
- print(content, file=file_)
9
- for section, values in nabu_config.items():
10
- if section == "about":
11
- continue
12
- write("## %s\n" % section)
13
- for key, val in values.items():
14
- if val["type"] == "unsupported":
15
- continue
16
- write(val["help"] + "\n")
17
- write(
18
- "```ini\n%s = %s\n```"
19
- % (key, val["default"])
20
- )
21
-
22
-
23
-
24
- if __name__ == "__main__":
25
-
26
- import sys, os
27
- print(os.path.abspath(__file__))
28
- exit(0)
29
-
30
- fname = "/tmp/test.md"
31
- with open(fname, "w") as f:
32
- generate(f)
@@ -1,245 +0,0 @@
1
- # ruff: noqa
2
- try:
3
- import astra
4
-
5
- __have_astra__ = True
6
- except ImportError:
7
- __have_astra__ = False
8
- astra = None
9
-
10
-
11
- class AstraReconstructor:
12
- """
13
- Base class for reconstructors based on the Astra toolbox
14
- """
15
-
16
- default_extra_options = {
17
- "axis_correction": None,
18
- "clip_outer_circle": False,
19
- "scale_factor": None,
20
- "filter_cutoff": 1.0,
21
- "outer_circle_value": 0.0,
22
- }
23
-
24
- def __init__(
25
- self,
26
- sinos_shape,
27
- angles=None,
28
- volume_shape=None,
29
- rot_center=None,
30
- pixel_size=None,
31
- padding_mode="zeros",
32
- filter_name=None,
33
- slice_roi=None,
34
- cuda_options=None,
35
- extra_options=None,
36
- ):
37
- self._configure_extra_options(extra_options)
38
- self._init_cuda(cuda_options)
39
- self._set_sino_shape(sinos_shape)
40
- self._orig_prog_geom = None
41
- self._init_geometry(
42
- source_origin_dist,
43
- origin_detector_dist,
44
- pixel_size,
45
- angles,
46
- volume_shape,
47
- rot_center,
48
- relative_z_position,
49
- slice_roi,
50
- )
51
- self._init_fdk(padding_mode, filter_name)
52
- self._alg_id = None
53
- self._vol_id = None
54
- self._proj_id = None
55
-
56
- def _configure_extra_options(self, extra_options):
57
- self.extra_options = self.default_extra_options.copy()
58
- self.extra_options.update(extra_options or {})
59
-
60
- def _init_cuda(self, cuda_options):
61
- cuda_options = cuda_options or {}
62
- self.cuda = CudaProcessing(**cuda_options)
63
-
64
- def _set_sino_shape(self, sinos_shape):
65
- if len(sinos_shape) != 3:
66
- raise ValueError("Expected a 3D shape")
67
- self.sinos_shape = sinos_shape
68
- self.n_sinos, self.n_angles, self.prj_width = sinos_shape
69
-
70
- def _set_pixel_size(self, pixel_size):
71
- if pixel_size is None:
72
- det_spacing_y = det_spacing_x = 1
73
- elif np.iterable(pixel_size):
74
- det_spacing_y, det_spacing_x = pixel_size
75
- else:
76
- # assuming scalar
77
- det_spacing_y = det_spacing_x = pixel_size
78
- self._det_spacing_y = det_spacing_y
79
- self._det_spacing_x = det_spacing_x
80
-
81
- def _set_slice_roi(self, slice_roi):
82
- self.slice_roi = slice_roi
83
- self._vol_geom_n_x = self.n_x
84
- self._vol_geom_n_y = self.n_y
85
- self._crop_data = True
86
- if slice_roi is None:
87
- return
88
- start_x, end_x, start_y, end_y = slice_roi
89
- if roi_is_centered(self.volume_shape[1:], (slice(start_y, end_y), slice(start_x, end_x))):
90
- # Astra can only reconstruct subregion centered around the origin
91
- self._vol_geom_n_x = self.n_x - start_x * 2
92
- self._vol_geom_n_y = self.n_y - start_y * 2
93
- else:
94
- raise NotImplementedError(
95
- "Astra supports only slice_roi centered around origin (got slice_roi=%s with n_x=%d, n_y=%d)"
96
- % (str(slice_roi), self.n_x, self.n_y)
97
- )
98
-
99
- def _init_geometry(
100
- self,
101
- source_origin_dist,
102
- origin_detector_dist,
103
- pixel_size,
104
- angles,
105
- volume_shape,
106
- rot_center,
107
- relative_z_position,
108
- slice_roi,
109
- ):
110
- if angles is None:
111
- self.angles = np.linspace(0, 2 * np.pi, self.n_angles, endpoint=True)
112
- else:
113
- self.angles = angles
114
- if volume_shape is None:
115
- volume_shape = (self.sinos_shape[0], self.sinos_shape[2], self.sinos_shape[2])
116
- self.volume_shape = volume_shape
117
- self.n_z, self.n_y, self.n_x = self.volume_shape
118
- self.source_origin_dist = source_origin_dist
119
- self.origin_detector_dist = origin_detector_dist
120
- self.magnification = 1 + origin_detector_dist / source_origin_dist
121
- self._set_slice_roi(slice_roi)
122
- self.vol_geom = astra.create_vol_geom(self._vol_geom_n_y, self._vol_geom_n_x, self.n_z)
123
- self.vol_shape = astra.geom_size(self.vol_geom)
124
- self._cor_shift = 0.0
125
- self.rot_center = rot_center
126
- if rot_center is not None:
127
- self._cor_shift = (self.sinos_shape[-1] - 1) / 2.0 - rot_center
128
- self._set_pixel_size(pixel_size)
129
- self._axis_corrections = self.extra_options.get("axis_correction", None)
130
- self._create_astra_proj_geometry(relative_z_position)
131
-
132
- def _create_astra_proj_geometry(self, relative_z_position):
133
- # This object has to be re-created each time, because once the modifications below are done,
134
- # it is no more a "cone" geometry but a "cone_vec" geometry, and cannot be updated subsequently
135
- # (see astra/functions.py:271)
136
- self.proj_geom = astra.create_proj_geom(
137
- "cone",
138
- self._det_spacing_x,
139
- self._det_spacing_y,
140
- self.n_sinos,
141
- self.prj_width,
142
- self.angles,
143
- self.source_origin_dist,
144
- self.origin_detector_dist,
145
- )
146
- self.relative_z_position = relative_z_position or 0.0
147
- # This will turn the geometry of type "cone" into a geometry of type "cone_vec"
148
- if self._orig_prog_geom is None:
149
- self._orig_prog_geom = self.proj_geom
150
- self.proj_geom = astra.geom_postalignment(self.proj_geom, (self._cor_shift, 0))
151
- # (src, detector_center, u, v) = (srcX, srcY, srcZ, dX, dY, dZ, uX, uY, uZ, vX, vY, vZ)
152
- vecs = self.proj_geom["Vectors"]
153
-
154
- # To adapt the center of rotation:
155
- # dX = cor_shift * cos(theta) - origin_detector_dist * sin(theta)
156
- # dY = origin_detector_dist * cos(theta) + cor_shift * sin(theta)
157
- if self._axis_corrections is not None:
158
- # should we check that dX and dY match the above formulas ?
159
- cor_shifts = self._cor_shift + self._axis_corrections
160
- vecs[:, 3] = cor_shifts * np.cos(self.angles) - self.origin_detector_dist * np.sin(self.angles)
161
- vecs[:, 4] = self.origin_detector_dist * np.cos(self.angles) + cor_shifts * np.sin(self.angles)
162
-
163
- # To adapt the z position:
164
- # Component 2 of vecs is the z coordinate of the source, component 5 is the z component of the detector position
165
- # We need to re-create the same inclination of the cone beam, thus we need to keep the inclination of the two z positions.
166
- # The detector is centered on the rotation axis, thus moving it up or down, just moves it out of the reconstruction volume.
167
- # We can bring back the detector in the correct volume position, by applying a rigid translation of both the detector and the source.
168
- # The translation is exactly the amount that brought the detector up or down, but in the opposite direction.
169
- vecs[:, 2] = -self.relative_z_position
170
-
171
- def _set_output(self, volume):
172
- if volume is not None:
173
- expected_shape = self.vol_shape # if not (self._crop_data) else self._output_cropped_shape
174
- self.cuda.check_array(volume, expected_shape)
175
- self.cuda.set_array("output", volume)
176
- if volume is None:
177
- self.cuda.allocate_array("output", self.vol_shape)
178
- d_volume = self.cuda.get_array("output")
179
- z, y, x = d_volume.shape
180
- self._vol_link = astra.data3d.GPULink(d_volume.ptr, x, y, z, d_volume.strides[-2])
181
- self._vol_id = astra.data3d.link("-vol", self.vol_geom, self._vol_link)
182
-
183
- def _set_input(self, sinos):
184
- self.cuda.check_array(sinos, self.sinos_shape)
185
- self.cuda.set_array("sinos", sinos) # self.cuda.sinos is now a GPU array
186
- # TODO don't create new link/proj_id if ptr is the same ?
187
- # But it seems Astra modifies the input sinogram while doing FDK, so this might be not relevant
188
- d_sinos = self.cuda.get_array("sinos")
189
-
190
- # self._proj_data_link = astra.data3d.GPULink(d_sinos.ptr, self.prj_width, self.n_angles, self.n_z, sinos.strides[-2])
191
- self._proj_data_link = astra.data3d.GPULink(
192
- d_sinos.ptr, self.prj_width, self.n_angles, self.n_sinos, d_sinos.strides[-2]
193
- )
194
- self._proj_id = astra.data3d.link("-sino", self.proj_geom, self._proj_data_link)
195
-
196
- def _preprocess_data(self):
197
- d_sinos = self.cuda.sinos
198
- for i in range(d_sinos.shape[0]):
199
- self.sino_filter.filter_sino(d_sinos[i], output=d_sinos[i])
200
-
201
- def _update_reconstruction(self):
202
- cfg = astra.astra_dict("BP3D_CUDA")
203
- cfg["ReconstructionDataId"] = self._vol_id
204
- cfg["ProjectionDataId"] = self._proj_id
205
- if self._alg_id is not None:
206
- astra.algorithm.delete(self._alg_id)
207
- self._alg_id = astra.algorithm.create(cfg)
208
-
209
- def reconstruct(self, sinos, output=None, relative_z_position=None):
210
- """
211
- sinos: numpy.ndarray or pycuda.gpuarray
212
- Sinograms, with shape (n_sinograms, n_angles, width)
213
- output: pycuda.gpuarray, optional
214
- Output array. If not provided, a new numpy array is returned
215
- relative_z_position: int, optional
216
- Position of the central slice of the slab, with respect to the full stack of slices.
217
- By default it is set to zero, meaning that the current slab is assumed in the middle of the stack
218
- """
219
- self._create_astra_proj_geometry(relative_z_position)
220
- self._set_input(sinos)
221
- self._set_output(output)
222
- self._preprocess_data()
223
- self._update_reconstruction()
224
- astra.algorithm.run(self._alg_id)
225
- #
226
- # NB: Could also be done with
227
- # from astra.experimental import direct_BP3D
228
- # projector_id = astra.create_projector("cuda3d", self.proj_geom, self.vol_geom, options=None)
229
- # direct_BP3D(projector_id, self._vol_link, self._proj_data_link)
230
- #
231
- result = self.cuda.get_array("output")
232
- if output is None:
233
- result = result.get()
234
- if self.extra_options.get("scale_factor", None) is not None:
235
- result *= np.float32(self.extra_options["scale_factor"]) # in-place for pycuda
236
- self.cuda.recover_arrays_references(["sinos", "output"])
237
- return result
238
-
239
- def __del__(self):
240
- if getattr(self, "_alg_id", None) is not None:
241
- astra.algorithm.delete(self._alg_id)
242
- if getattr(self, "_vol_id", None) is not None:
243
- astra.data3d.delete(self._vol_id)
244
- if getattr(self, "_proj_id", None) is not None:
245
- astra.data3d.delete(self._proj_id)