nabu 2022.3.0a1__py3-none-any.whl → 2023.1.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. nabu/__init__.py +1 -1
  2. nabu/app/bootstrap.py +7 -1
  3. nabu/app/cast_volume.py +8 -2
  4. nabu/app/cli_configs.py +69 -0
  5. nabu/app/composite_cor.py +97 -0
  6. nabu/app/create_distortion_map_from_poly.py +118 -0
  7. nabu/app/nx_z_splitter.py +1 -1
  8. nabu/app/prepare_weights_double.py +21 -16
  9. nabu/app/reconstruct_helical.py +0 -1
  10. nabu/app/utils.py +10 -5
  11. nabu/cuda/processing.py +1 -0
  12. nabu/cuda/tests/test_padding.py +1 -0
  13. nabu/cuda/utils.py +1 -0
  14. nabu/distributed/__init__.py +0 -0
  15. nabu/distributed/utils.py +57 -0
  16. nabu/distributed/worker.py +543 -0
  17. nabu/estimation/cor.py +3 -7
  18. nabu/estimation/cor_sino.py +2 -1
  19. nabu/estimation/distortion.py +6 -4
  20. nabu/io/cast_volume.py +10 -1
  21. nabu/io/detector_distortion.py +305 -0
  22. nabu/io/reader.py +37 -7
  23. nabu/io/reader_helical.py +0 -3
  24. nabu/io/tests/test_cast_volume.py +16 -4
  25. nabu/io/tests/test_detector_distortion.py +178 -0
  26. nabu/io/tests/test_writers.py +2 -2
  27. nabu/io/tiffwriter_zmm.py +2 -3
  28. nabu/io/writer.py +84 -1
  29. nabu/io/writer_BACKUP_193259.py +556 -0
  30. nabu/io/writer_BACKUP_193381.py +556 -0
  31. nabu/io/writer_BASE_193259.py +548 -0
  32. nabu/io/writer_BASE_193381.py +548 -0
  33. nabu/io/writer_LOCAL_193259.py +550 -0
  34. nabu/io/writer_LOCAL_193381.py +550 -0
  35. nabu/io/writer_REMOTE_193259.py +557 -0
  36. nabu/io/writer_REMOTE_193381.py +557 -0
  37. nabu/misc/fourier_filters.py +2 -0
  38. nabu/misc/rotation.py +0 -1
  39. nabu/misc/tests/test_rotation.py +1 -0
  40. nabu/pipeline/config_validators.py +10 -0
  41. nabu/pipeline/datadump.py +1 -1
  42. nabu/pipeline/dataset_validator.py +0 -1
  43. nabu/pipeline/detector_distortion_provider.py +20 -0
  44. nabu/pipeline/estimators.py +35 -21
  45. nabu/pipeline/fallback_utils.py +1 -1
  46. nabu/pipeline/fullfield/chunked.py +30 -15
  47. nabu/pipeline/fullfield/chunked_black.py +881 -0
  48. nabu/pipeline/fullfield/chunked_cuda.py +34 -4
  49. nabu/pipeline/fullfield/chunked_fb.py +966 -0
  50. nabu/pipeline/fullfield/chunked_google.py +921 -0
  51. nabu/pipeline/fullfield/chunked_pep8.py +920 -0
  52. nabu/pipeline/fullfield/computations.py +7 -6
  53. nabu/pipeline/fullfield/dataset_validator.py +1 -1
  54. nabu/pipeline/fullfield/grouped_cuda.py +6 -0
  55. nabu/pipeline/fullfield/nabu_config.py +15 -3
  56. nabu/pipeline/fullfield/processconfig.py +5 -0
  57. nabu/pipeline/fullfield/reconstruction.py +1 -2
  58. nabu/pipeline/helical/gridded_accumulator.py +1 -8
  59. nabu/pipeline/helical/helical_chunked_regridded.py +48 -33
  60. nabu/pipeline/helical/helical_reconstruction.py +1 -9
  61. nabu/pipeline/helical/nabu_config.py +11 -14
  62. nabu/pipeline/helical/span_strategy.py +11 -4
  63. nabu/pipeline/helical/tests/test_accumulator.py +0 -3
  64. nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -6
  65. nabu/pipeline/helical/tests/test_strategy.py +0 -1
  66. nabu/pipeline/helical/weight_balancer.py +0 -1
  67. nabu/pipeline/params.py +4 -0
  68. nabu/pipeline/processconfig.py +6 -2
  69. nabu/pipeline/writer.py +9 -4
  70. nabu/preproc/distortion.py +4 -3
  71. nabu/preproc/double_flatfield.py +16 -4
  72. nabu/preproc/double_flatfield_cuda.py +3 -2
  73. nabu/preproc/double_flatfield_variable_region.py +13 -4
  74. nabu/preproc/flatfield.py +29 -7
  75. nabu/preproc/flatfield_cuda.py +0 -1
  76. nabu/preproc/flatfield_variable_region.py +5 -2
  77. nabu/preproc/phase.py +0 -1
  78. nabu/preproc/phase_cuda.py +0 -1
  79. nabu/preproc/tests/test_ctf.py +4 -3
  80. nabu/preproc/tests/test_flatfield.py +6 -7
  81. nabu/reconstruction/fbp_opencl.py +1 -1
  82. nabu/reconstruction/filtering.py +0 -1
  83. nabu/reconstruction/tests/test_fbp.py +1 -0
  84. nabu/resources/dataset_analyzer.py +0 -1
  85. nabu/resources/templates/bm05_pag.conf +34 -0
  86. nabu/resources/templates/id16_ctf.conf +2 -1
  87. nabu/resources/tests/test_nxflatfield.py +0 -1
  88. nabu/resources/tests/test_units.py +0 -1
  89. nabu/stitching/frame_composition.py +7 -1
  90. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/METADATA +2 -7
  91. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/RECORD +96 -75
  92. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/WHEEL +1 -1
  93. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/entry_points.txt +2 -1
  94. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/LICENSE +0 -0
  95. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/top_level.txt +0 -0
  96. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/zip-safe +0 -0
@@ -0,0 +1,543 @@
1
+ """
2
+ This file is obsolete. Don't rely on it.
3
+ """
4
+
5
+ from math import ceil
6
+ from socket import gethostname
7
+ from multiprocessing import cpu_count
8
+
9
+ from distributed import Client, get_worker, worker_client, LocalCluster, Nanny, SpecCluster, Scheduler
10
+
11
+ from ..utils import check_supported
12
+ from ..resources.computations import estimate_chunk_size
13
+ from ..resources.processconfig import ProcessConfig
14
+ from ..resources.gpu import pick_gpus
15
+ from ..resources.utils import get_memory_per_node, get_threads_per_node
16
+ from ..cuda.utils import collect_cuda_gpus
17
+ from ..opencl.utils import collect_opencl_gpus, collect_opencl_cpus, pick_opencl_cpu_platform
18
+ from ..app.logger import Logger, LoggerOrPrint
19
+ from ..app.process import WorkerProcess
20
+
21
+
22
+ def get_dataset(dataset_name, client=None):
23
+ if client is None:
24
+ with worker_client() as client:
25
+ res = client.datasets[dataset_name]
26
+ else:
27
+ res = client.datasets[dataset_name]
28
+ return res
29
+
30
+
31
+ def get_dask_worker():
32
+ try:
33
+ w = get_worker()
34
+ except Exception as exc:
35
+ w = None
36
+ return w
37
+
38
+
39
+ def get_worker_resources(try_cuda=True, try_opencl=True):
40
+ current_worker = get_worker()
41
+ vm = virtual_memory()
42
+ resources = {
43
+ "worker_name": current_worker.name,
44
+ "worker_addr": current_worker.address,
45
+ "host": gethostname(),
46
+ "mem_total_GB": vm.total / 1e9,
47
+ "mem_avail_GB": vm.available / 1e9,
48
+ "cpu_cores": cpu_count(),
49
+ "cuda_gpus": {},
50
+ "opencl_gpus": {},
51
+ }
52
+ if try_cuda:
53
+ resources["cuda_gpus"] = collect_cuda_gpus()
54
+ if try_opencl:
55
+ resources["opencl_gpus"] = collect_opencl_gpus()
56
+ return resources
57
+
58
+
59
+ def get_workers_resources(client, try_cuda=True, try_opencl=True):
60
+ workers = list(client.has_what().keys())
61
+ resources = {}
62
+ for worker_addr in workers:
63
+ f = client.submit(
64
+ get_worker_resources, try_cuda=try_cuda, try_opencl=try_opencl,
65
+ workers=[worker_addr], pure=False
66
+ )
67
+ resources[worker_addr] = f.result()
68
+ return resources
69
+
70
+
71
+ def get_gpu_workers(workers_resources, max_gpu_per_worker=1):
72
+ """
73
+ Get the workers that can use a GPU.
74
+
75
+ Parameters
76
+ ----------
77
+ workers_resources: dict
78
+ Dictionary of workers resources obtained with `get_workers_resources`
79
+ max_gpu_per_worker: int, optional
80
+ Maximum GPUs allower per worker. If a worker can "see" more than
81
+ `max_gpu_per_worker`, it will raise an error.
82
+ Returns
83
+ -------
84
+ gpu_workers: dict
85
+ Dictionary of workers that can use a GPU, along with the usable GPUs.
86
+ nongpu_workers: list
87
+ List of workers that cannot use a GPU
88
+ """
89
+ gpu_workers = {}
90
+ nongpu_workers = []
91
+ for worker_name, resources in workers_resources.items():
92
+ w_gpus = {"cuda_gpus": {}, "opencl_gpus": {}}
93
+ for gpu_type in ["cuda_gpus", "opencl_gpus"]:
94
+ if gpu_type not in resources:
95
+ continue
96
+ gpus = resources[gpu_type] or {}
97
+ if len(gpus) > max_gpu_per_worker:
98
+ raise ValueError(
99
+ "Expected at most %d GPUs, got %d"
100
+ % (max_gpu_per_worker, len(gpus))
101
+ )
102
+ if len(gpus) == 0:
103
+ continue
104
+ w_gpus[gpu_type] = gpus
105
+ if len(w_gpus["opencl_gpus"]) == 0 and len(w_gpus["cuda_gpus"]) == 0:
106
+ nongpu_workers.append(worker_name)
107
+ else:
108
+ gpu_workers[worker_name] = w_gpus
109
+ return gpu_workers, nongpu_workers
110
+
111
+
112
+
113
+
114
+ # TODO callbacks ? Several options:
115
+ # - one callback after processing a chunk (i.e after this function)
116
+ # either the worker or the client can do the callback (although data is on the worker side)
117
+ # - one callback after each individual sub-processing (i.e phase retrieval, etc)
118
+ # in this case, this should be registered in processing_options.
119
+ # The callback is done on the worker side.
120
+ def worker_process_chunk(
121
+ sub_region, chunk_size, chunk_id,
122
+ logfile=None, loglevel="debug", extra_options=None,
123
+ destroy_workerprocess_class=True,
124
+ ):
125
+ """
126
+ Entry point for a `dask.distributed` `Worker`.
127
+
128
+ Parameters
129
+ -----------
130
+ sub_region: tuple
131
+ Tuple describing the region to process in the data volume, in the form
132
+ `(start_x, end_x, start_z, end_z)`.
133
+ chunk_size: int
134
+ Size of the chunk ("delta z")
135
+ chunk_id: int
136
+ Index of the current chunk.
137
+ logfile: str, optional
138
+ Name of the log file.
139
+ extra_options: dict
140
+ Dictionary of extra options for `WorkerProcess`.
141
+ """
142
+ worker = get_worker()
143
+
144
+ worker_conf = worker.client.datasets[worker.name]
145
+ process_config = worker.client.datasets["process_config"]
146
+
147
+ logname = "nabu-%s" % worker.name
148
+ if logfile is None:
149
+ logfile = logname + ".log"
150
+ logger = Logger(logname, level=loglevel, logfile=logfile)
151
+
152
+ ##
153
+ extra_options = extra_options or {}
154
+ extra_options["clear_gpu_memory_after_buildsino"] = True
155
+ ##
156
+
157
+ logger.debug("%s: spawning WorkerProcess" % worker.name)
158
+ W = WorkerProcess(
159
+ process_config, sub_region, chunk_size=chunk_size,
160
+ use_cuda=worker_conf["use_cuda"], use_opencl=worker_conf["use_opencl"],
161
+ logger=logger, extra_options=extra_options
162
+ )
163
+ logger.debug("%s: start processing chunk" % worker.name)
164
+ W.process_chunk()
165
+ # DEBUG
166
+ if destroy_workerprocess_class:
167
+ W._destroy_gpu_context()
168
+ del W
169
+ import gc
170
+ gc.collect()
171
+ #
172
+
173
+
174
+
175
+
176
+
177
+
178
+
179
+ def actor_process_chunk(sub_region, chunk_id):
180
+ w = get_worker()
181
+ worker_process = w.actors[list(w.actors.keys())[0]]
182
+ worker_process.logger.info("Will process subregion %s" % str(sub_region))
183
+ worker_process.process_chunk(sub_region=sub_region)
184
+
185
+
186
+
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+ def _get_n_cpu_workers(n_cpu_workers, n_gpu_workers):
195
+ if n_cpu_workers < 0:
196
+ # (Poor) convention: total number of CPU workers
197
+ return -n_cpu_workers
198
+ # "X cpus workers by gpu worker
199
+ return n_cpu_workers * n_gpu_workers
200
+
201
+
202
+
203
+
204
+ class WorkersManager:
205
+ """
206
+ A class for managing "Nabu workers". It has several purposes:
207
+ - Get the requested computing resources from user-provided configuration
208
+ - Get the available computing resources
209
+ - Spwan the workers (either local or distributed)
210
+ - Distribute the work to be done among workers (here chunks or images)
211
+ """
212
+
213
+ def __init__(self, process_config, logger=None, extra_options=None):
214
+ """
215
+ Initialize a WorkersManager.
216
+
217
+ Parameters
218
+ ----------
219
+ process_config: `nabu.resources.processing.ProcessConfig`
220
+ Structure describing the user parameters and dataset.
221
+ logger: `logging.logger` or `nabu.app.logger.Logger`, optional
222
+ Logging object.
223
+ extra_options: dict, optional
224
+ Dictionary of advanced options. Current options are:
225
+ - gpu_pick_method: "cuda" or "auto"
226
+ - chunk_size_method: "same" or "cpugpu"
227
+ - max_chunk_size: int
228
+ """
229
+ self.process_config = process_config
230
+ self.logger = LoggerOrPrint(logger)
231
+ self._set_extra_options(extra_options)
232
+ self._get_requested_resources()
233
+
234
+
235
+ def _set_extra_options(self, extra_options):
236
+ if extra_options is None:
237
+ extra_options = {}
238
+ advanced_options = {
239
+ "gpu_pick_method": "cuda",
240
+ "chunk_size_method": "same",
241
+ "max_chunk_size": None,
242
+ }
243
+ advanced_options.update(extra_options)
244
+ self.extra_options = advanced_options
245
+ self._max_chunk_size = self.extra_options["max_chunk_size"]
246
+
247
+
248
+ def _get_requested_resources(self):
249
+ resources_cfg = self.process_config.nabu_config["resources"]
250
+ self.distribution_method = resources_cfg["method"]
251
+ #
252
+ if self.distribution_method == "slurm":
253
+ raise NotImplementedError()
254
+ #
255
+ gpu_ids = resources_cfg["gpu_id"]
256
+ n_gpu_workers = resources_cfg["gpus"]
257
+ n_cpu_workers = _get_n_cpu_workers(resources_cfg["cpu_workers"], n_gpu_workers)
258
+ if (n_cpu_workers == 0) and (n_gpu_workers == 0):
259
+ raise ValueError("Got 0 CPU workers and 0 GPU workers. Need at least one worker.")
260
+ self.n_gpu_workers = n_gpu_workers
261
+ self.n_cpu_workers = n_cpu_workers
262
+
263
+
264
+ def _configure_chunk_size_same(self):
265
+ """
266
+ Configure the chunk size used by workers.
267
+ In this approach, the same chunk size is used for all workers.
268
+ Pros: simpler, especially when it comes to distribute work with client.submit()
269
+ Cons: if max_available_chunk_size differs too much between workers, might be inefficient
270
+ """
271
+ chunk_sizes = []
272
+ for worker_name, worker_desc in self.workers.items():
273
+ chunk_sizes.append(worker_desc["chunk_size"])
274
+ self._common_chunk_size = min(chunk_sizes)
275
+ if self._max_chunk_size is not None:
276
+ self._common_chunk_size = min(self._max_chunk_size, chunk_sizes)
277
+ self._chunk_sizes = dict.fromkeys(self.workers.keys(), self._common_chunk_size)
278
+
279
+
280
+ def _configure_chunk_size_cpu_gpu(self, gpu_cpu_ratio=2):
281
+ """
282
+ Configure the chunk size used by workers.
283
+ In this approach, we use a different chunk size for GPU workers and CPU
284
+ workers. The rationale is that GPU workers are likely to process their
285
+ chunk much faster, so we decrease the chunk size of CPU worker by a certain
286
+ factor in order to re-equilibrate workload.
287
+ """
288
+ res = {}
289
+ for worker_name, worker_desc in self.workers.items():
290
+ chunk_size = worker_desc["chunk_size"]
291
+ if worker_desc["type"] == "CPU":
292
+ chunk_size = chunk_size / gpu_cpu_ratio
293
+ res[worker_name] = chunk_size
294
+ self._chunk_sizes = res
295
+
296
+
297
+ def _configure_chunk_size(self):
298
+ method = self.extra_options["chunk_size_method"]
299
+ if method == "same":
300
+ self._configure_chunk_size_same()
301
+ elif method == "cpugpu":
302
+ self._configure_chunk_size_cpu_gpu()
303
+ else:
304
+ raise ValueError("Unknown chunk size method")
305
+
306
+
307
+
308
+ class LocalWorkersManager(WorkersManager):
309
+
310
+ def __init__(self, process_config, logger=None, extra_options=None, scheduler_kwargs=None):
311
+ """
312
+ Initialize a LocalWorkersManager.
313
+
314
+ Parameters
315
+ ----------
316
+ process_config: `nabu.resources.processing.ProcessConfig`
317
+ Structure describing the user parameters and dataset.
318
+ logger: `logging.logger` or `nabu.app.logger.Logger`, optional
319
+ Logging object.
320
+ extra_options: dict, optional
321
+ Dictionary of advanced options. Current options are:
322
+ - gpu_pick_method: "cuda" or "auto"
323
+ scheduler_kwargs: dict, optional
324
+ Extra options to pass to `distributed.Scheduler`.
325
+ """
326
+ super().__init__(process_config, logger=logger, extra_options=extra_options)
327
+ self.scheduler_kwargs = scheduler_kwargs or {}
328
+ self._configure_worker_resources()
329
+ self._spawn_workers()
330
+ self._estimate_workers_chunk_size()
331
+ self._configure_workers_options()
332
+ self._create_tasks()
333
+ self._spawn_workers_pipelines()
334
+
335
+
336
+ def _configure_worker_resources(self):
337
+ resources_cfg = self.process_config.nabu_config["resources"]
338
+ # Get memory per worker
339
+ mem_per_node = resources_cfg["memory_per_node"]
340
+ memory = get_memory_per_node(mem_per_node[0], is_percentage=mem_per_node[1]) * 1e9
341
+ # Get threads per node
342
+ threads_per_node = resources_cfg["threads_per_node"]
343
+ threads = get_threads_per_node(threads_per_node[0], is_percentage=threads_per_node[1])
344
+ # Pick GPUs
345
+ self.gpus = pick_gpus(
346
+ self.extra_options["gpu_pick_method"],
347
+ collect_cuda_gpus(),
348
+ collect_opencl_gpus(),
349
+ self.n_gpu_workers
350
+ )
351
+ # Pick CPU
352
+ self.cpu = None
353
+ if self.n_cpu_workers > 0:
354
+ self.cpu = pick_opencl_cpu_platform(collect_opencl_cpus())
355
+ # GPU workers specification
356
+ n_workers = self.n_cpu_workers + self.n_gpu_workers
357
+ gpu_workers_spec = {}
358
+ workers = {}
359
+ mem_used_by_gpu_workers = 0
360
+ for i in range(self.n_gpu_workers):
361
+ mem_limit = min(memory/n_workers, self.gpus[i]["memory_GB"] * 1e9 * 2)
362
+ mem_used_by_gpu_workers += mem_limit
363
+ worker_name = "GPU-worker-%02d" % i
364
+ gpu_workers_spec[i] = {
365
+ "cls": Nanny,
366
+ "options": {
367
+ "nthreads": 1, #2,
368
+ "name": worker_name,
369
+ "memory_limit": mem_limit,
370
+ },
371
+ }
372
+ workers[worker_name] = {
373
+ "name": worker_name,
374
+ "CPU_memory_GB": mem_limit / 1e9,
375
+ "GPU": self.gpus[i],
376
+ }
377
+ # CPU workers specification
378
+ cpu_workers_spec = {}
379
+ for i in range(self.n_cpu_workers):
380
+ mem_limit = (memory - mem_used_by_gpu_workers)/n_workers
381
+ worker_name = "CPU-worker-%02d" % i
382
+ cpu_workers_spec[self.n_gpu_workers + i] = {
383
+ "cls": Nanny,
384
+ "options": {
385
+ "nthreads": int((threads - 2*self.n_gpu_workers)/self.n_cpu_workers),
386
+ "name": worker_name,
387
+ "memory_limit": mem_limit,
388
+ },
389
+ }
390
+ workers[worker_name] = {
391
+ "name": worker_name,
392
+ "CPU_memory_GB": mem_limit / 1e9
393
+ }
394
+ self.workers = workers
395
+ self._gpu_workers_spec = gpu_workers_spec
396
+ self._cpu_workers_spec = cpu_workers_spec
397
+ self._workers_spec = {}
398
+ self._workers_spec.update(self._gpu_workers_spec)
399
+ self._workers_spec.update(self._cpu_workers_spec)
400
+
401
+
402
+ def _spawn_workers(self):
403
+ self.logger.debug("Creating SpecCluster()")
404
+ self.scheduler_spec = {
405
+ "cls": Scheduler,
406
+ "options": self.scheduler_kwargs
407
+ }
408
+ self.cluster = SpecCluster(
409
+ scheduler=self.scheduler_spec,
410
+ workers=self._workers_spec
411
+ )
412
+ self.client = Client(self.cluster.scheduler_address)
413
+ self._get_workers_addresses()
414
+
415
+
416
+ def _get_workers_addresses(self):
417
+ def get_worker_name_type():
418
+ curr_worker = get_worker()
419
+ worker_type = "GPU" if "GPU" in curr_worker.name else "CPU"
420
+ return curr_worker.name, worker_type
421
+ for worker_addr in self.client.has_what().keys():
422
+ f = self.client.submit(get_worker_name_type, workers=[worker_addr], pure=False)
423
+ w_name, w_type = f.result()
424
+ self.workers[w_name].update({"type": w_type, "address": worker_addr})
425
+
426
+
427
+ def _estimate_workers_chunk_size(self):
428
+ self.logger.debug("Estimating workers chunk size")
429
+ chunks = {}
430
+ for worker_name in self.workers.keys():
431
+ worker_resources = self.workers[worker_name]
432
+ mem = worker_resources["CPU_memory_GB"]
433
+ if self.workers[worker_name]["type"] == "GPU":
434
+ mem = min(mem, worker_resources["GPU"]["memory_GB"]*0.95)
435
+ self.workers[worker_name]["chunk_size"] = estimate_chunk_size(
436
+ mem, self.process_config, chunk_step=20
437
+ )
438
+
439
+
440
+ def _configure_workers_options(self):
441
+ self.client.datasets.clear() # not mandatory
442
+ workers_conf = {"process_config": self.process_config}
443
+ # Enable/Disable cuda for GPU/CPU workers
444
+ for worker_name, worker_desc in self.workers.items():
445
+ workers_conf[worker_name] = {}
446
+ use_cuda = (worker_desc["type"] == "GPU")
447
+ use_opencl = not(use_cuda) #
448
+ workers_conf[worker_name]["use_cuda"] = use_cuda
449
+ workers_conf[worker_name]["use_opencl"] = use_opencl
450
+ self.workers[worker_name]["use_cuda"] = use_cuda
451
+ self.workers[worker_name]["use_opencl"] = use_opencl
452
+ # Broadcast
453
+ self.client.datasets.update(workers_conf)
454
+
455
+
456
+ def _create_tasks(self):
457
+ cfg = self.process_config.nabu_config
458
+ start_z = cfg["reconstruction"]["start_z"]
459
+ end_z = cfg["reconstruction"]["end_z"]
460
+ self._configure_chunk_size()
461
+ tasks = []
462
+ # method == "same"
463
+ # TODO implement other methods
464
+ chunk_size = self._common_chunk_size
465
+ n_chunks = ceil((end_z - start_z)/chunk_size)
466
+ for chunk_id in range(n_chunks):
467
+ sub_region = (
468
+ None, None,
469
+ start_z + chunk_id * chunk_size,
470
+ min(end_z, start_z + (chunk_id + 1) * chunk_size)
471
+ )
472
+ # ~ tasks.append((sub_region, chunk_size, chunk_id))
473
+ tasks.append((sub_region, chunk_id))
474
+ self._tasks = tasks
475
+
476
+
477
+ def _spawn_workers_pipelines(self):
478
+ actors = {}
479
+ for worker_name, worker_conf in self.workers.items():
480
+ fut = self.client.submit(
481
+ WorkerProcess,
482
+ self.process_config,
483
+ (None, None, 0, worker_conf["chunk_size"]), # placeholder !
484
+ chunk_size=worker_conf["chunk_size"],
485
+ use_cuda=worker_conf["use_cuda"],
486
+ use_opencl=worker_conf["use_opencl"],
487
+ logger=None, # TODO
488
+ extra_options={"clear_gpu_memory_when_possible": True},
489
+ workers=[worker_conf["address"]],
490
+ actor=True
491
+ )
492
+ actors[worker_name] = fut.result()
493
+ self._actors = actors
494
+
495
+
496
+ def get_workers_type(self, worker_type):
497
+ """
498
+ Return the address of a type of worker.
499
+
500
+ Parameters
501
+ -----------
502
+ worker_type: str
503
+ Type of worker. Can be "CPU" or "GPU".
504
+ """
505
+ return list(filter(lambda x: x["type"] == worker_type, self.workers.values()))
506
+
507
+
508
+ # WIP !
509
+ def reconstruct_volume(self):
510
+ futures = []
511
+ for task in self._tasks:
512
+ f = self.client.submit(
513
+ # ~ worker_process_chunk,
514
+ actor_process_chunk,
515
+ *task
516
+ )
517
+ futures.append(f)
518
+ self.futures = futures
519
+
520
+
521
+
522
+ '''
523
+
524
+ LOCAL
525
+ -------
526
+ memory needed
527
+ gpu worker: 2.5*gpu mem
528
+ cpu worker: avail_mem / n_cpu_workers
529
+
530
+ limit:
531
+ upper_tot = min(sys_avail_mem, user_requested_mem["memory_per_node"])
532
+ upper_worker = upper_tot/n_workers
533
+
534
+
535
+ DISTRIBUTED
536
+ -----------
537
+ one gpu worker comes with its cpu companion
538
+ we assume there are 2 gpus per node, so we take sys_avail_mem/2 at most
539
+
540
+
541
+
542
+
543
+ '''
nabu/estimation/cor.py CHANGED
@@ -703,7 +703,6 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
703
703
  found_centers = []
704
704
  x_cor = lim_1
705
705
  while x_cor < lim_2:
706
-
707
706
  tmp_sigma = (
708
707
  min(
709
708
  (img_1.shape[1] - x_cor),
@@ -729,12 +728,9 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
729
728
  roi_yxhw=roi_yxhw,
730
729
  )
731
730
  except ValueError as err:
732
- message = "ValueError from base class CenterOfRotation.find_shift in CenterOfRotationAdaptiveSearch.find_shift : {err}".format(
733
- err=err
734
- )
735
- self.logger.warning(message)
736
- x_cor = min(x_cor + x_cor * self.step_fraction, x_cor + (dim_radio - x_cor) * self.step_fraction)
737
- continue
731
+ if "positions are outside the input margins" in str(err):
732
+ x_cor = min(x_cor + x_cor * self.step_fraction, x_cor + (dim_radio - x_cor) * self.step_fraction)
733
+ continue
738
734
  except:
739
735
  message = "Unexpected error from base class CenterOfRotation.find_shift in CenterOfRotationAdaptiveSearch.find_shift : {err}".format(
740
736
  err=err
@@ -181,4 +181,5 @@ class SinoCorInterface:
181
181
  cor_finder = SinoCor(sinogram, logger=self._logger)
182
182
  cor_finder.estimate_cor_coarse(side=side, window_width=window_width)
183
183
  cor = cor_finder.estimate_cor_fine(neighborhood=neighborhood, shift_value=shift_value)
184
- return cor
184
+ # offset will be added later - keep compatibility with result from AlignmentBase.find_shift()
185
+ return cor - img_1.shape[1] / 2
@@ -92,11 +92,13 @@ def estimate_flat_distortion(
92
92
  h_ticks = np.arange(image.shape[1]).astype(np.float32)
93
93
  v_ticks = np.arange(image.shape[0]).astype(np.float32)
94
94
 
95
- foo = scipy.interpolate.interp2d(hp, vp, cor1, kind=interpolation_kind)
96
- cor1 = foo(h_ticks, v_ticks)
95
+ spline_degree = {"linear": 1, "cubic": 3}[interpolation_kind]
97
96
 
98
- foo = scipy.interpolate.interp2d(hp, vp, cor2, kind=interpolation_kind)
99
- cor2 = foo(h_ticks, v_ticks)
97
+ interpolator = scipy.interpolate.RectBivariateSpline(vp, hp, cor1, kx=spline_degree, ky=spline_degree)
98
+ cor1 = interpolator(h_ticks, v_ticks)
99
+
100
+ interpolator = scipy.interpolate.RectBivariateSpline(vp, hp, cor2, kx=spline_degree, ky=spline_degree)
101
+ cor2 = interpolator(h_ticks, v_ticks)
100
102
 
101
103
  hh = np.arange(image.shape[1]).astype(np.float32)
102
104
  vv = np.arange(image.shape[0]).astype(np.float32)
nabu/io/cast_volume.py CHANGED
@@ -276,7 +276,16 @@ def find_histogram(volume: VolumeBase, scan: Optional[TomoScanBase] = None) -> O
276
276
  elif isinstance(volume, HDF5Volume):
277
277
  histogram_file = volume.data_url.file_path()
278
278
  if volume.url is not None:
279
- data_path = "/".join((volume.url.data_path(), "histogram/results/data"))
279
+ data_path = volume.url.data_path()
280
+ if data_path.endswith("reconstruction"):
281
+ data_path = "/".join(
282
+ [
283
+ *data_path.split("/")[:-1],
284
+ "histogram/results/data",
285
+ ]
286
+ )
287
+ else:
288
+ data_path = "/".join((volume.url.data_path(), "histogram/results/data"))
280
289
  else:
281
290
  # TODO: FIXME: in some case (if the users provides the full data_url and if the 'DATA_DATASET_NAME' is not used we
282
291
  # will endup with an invalid data_path. Hope this case will not happen. Anyway this is a case that we can't handle.)