rapidtide 3.0.10__py3-none-any.whl → 3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. rapidtide/Colortables.py +492 -27
  2. rapidtide/OrthoImageItem.py +1053 -47
  3. rapidtide/RapidtideDataset.py +1533 -86
  4. rapidtide/_version.py +3 -3
  5. rapidtide/calccoherence.py +196 -29
  6. rapidtide/calcnullsimfunc.py +191 -40
  7. rapidtide/calcsimfunc.py +245 -42
  8. rapidtide/correlate.py +1210 -393
  9. rapidtide/data/examples/src/testLD +56 -0
  10. rapidtide/data/examples/src/testalign +1 -1
  11. rapidtide/data/examples/src/testdelayvar +0 -1
  12. rapidtide/data/examples/src/testfmri +19 -1
  13. rapidtide/data/examples/src/testglmfilt +5 -5
  14. rapidtide/data/examples/src/testhappy +30 -1
  15. rapidtide/data/examples/src/testppgproc +17 -0
  16. rapidtide/data/examples/src/testrolloff +11 -0
  17. rapidtide/data/models/model_cnn_pytorch/best_model.pth +0 -0
  18. rapidtide/data/models/model_cnn_pytorch/loss.png +0 -0
  19. rapidtide/data/models/model_cnn_pytorch/loss.txt +1 -0
  20. rapidtide/data/models/model_cnn_pytorch/model.pth +0 -0
  21. rapidtide/data/models/model_cnn_pytorch/model_meta.json +68 -0
  22. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm.nii.gz +0 -0
  23. rapidtide/data/reference/JHU-ArterialTerritoriesNoVent-LVL1_space-MNI152NLin2009cAsym_2mm_mask.nii.gz +0 -0
  24. rapidtide/decorators.py +91 -0
  25. rapidtide/dlfilter.py +2225 -108
  26. rapidtide/dlfiltertorch.py +4843 -0
  27. rapidtide/externaltools.py +327 -12
  28. rapidtide/fMRIData_class.py +79 -40
  29. rapidtide/filter.py +1899 -810
  30. rapidtide/fit.py +2004 -574
  31. rapidtide/genericmultiproc.py +93 -18
  32. rapidtide/happy_supportfuncs.py +2044 -171
  33. rapidtide/helper_classes.py +584 -43
  34. rapidtide/io.py +2363 -370
  35. rapidtide/linfitfiltpass.py +341 -75
  36. rapidtide/makelaggedtcs.py +211 -20
  37. rapidtide/maskutil.py +423 -53
  38. rapidtide/miscmath.py +827 -121
  39. rapidtide/multiproc.py +210 -22
  40. rapidtide/patchmatch.py +234 -33
  41. rapidtide/peakeval.py +32 -30
  42. rapidtide/ppgproc.py +2203 -0
  43. rapidtide/qualitycheck.py +352 -39
  44. rapidtide/refinedelay.py +422 -57
  45. rapidtide/refineregressor.py +498 -184
  46. rapidtide/resample.py +671 -185
  47. rapidtide/scripts/applyppgproc.py +28 -0
  48. rapidtide/simFuncClasses.py +1052 -77
  49. rapidtide/simfuncfit.py +260 -46
  50. rapidtide/stats.py +540 -238
  51. rapidtide/tests/happycomp +9 -0
  52. rapidtide/tests/test_dlfiltertorch.py +627 -0
  53. rapidtide/tests/test_findmaxlag.py +24 -8
  54. rapidtide/tests/test_fullrunhappy_v1.py +0 -2
  55. rapidtide/tests/test_fullrunhappy_v2.py +0 -2
  56. rapidtide/tests/test_fullrunhappy_v3.py +1 -0
  57. rapidtide/tests/test_fullrunhappy_v4.py +2 -2
  58. rapidtide/tests/test_fullrunrapidtide_v7.py +1 -1
  59. rapidtide/tests/test_simroundtrip.py +8 -8
  60. rapidtide/tests/utils.py +9 -8
  61. rapidtide/tidepoolTemplate.py +142 -38
  62. rapidtide/tidepoolTemplate_alt.py +165 -44
  63. rapidtide/tidepoolTemplate_big.py +189 -52
  64. rapidtide/util.py +1217 -118
  65. rapidtide/voxelData.py +684 -37
  66. rapidtide/wiener.py +19 -12
  67. rapidtide/wiener2.py +113 -7
  68. rapidtide/wiener_doc.py +255 -0
  69. rapidtide/workflows/adjustoffset.py +105 -3
  70. rapidtide/workflows/aligntcs.py +85 -2
  71. rapidtide/workflows/applydlfilter.py +87 -10
  72. rapidtide/workflows/applyppgproc.py +522 -0
  73. rapidtide/workflows/atlasaverage.py +210 -47
  74. rapidtide/workflows/atlastool.py +100 -3
  75. rapidtide/workflows/calcSimFuncMap.py +294 -64
  76. rapidtide/workflows/calctexticc.py +201 -9
  77. rapidtide/workflows/ccorrica.py +97 -4
  78. rapidtide/workflows/cleanregressor.py +168 -29
  79. rapidtide/workflows/delayvar.py +163 -10
  80. rapidtide/workflows/diffrois.py +81 -3
  81. rapidtide/workflows/endtidalproc.py +144 -4
  82. rapidtide/workflows/fdica.py +195 -15
  83. rapidtide/workflows/filtnifti.py +70 -3
  84. rapidtide/workflows/filttc.py +74 -3
  85. rapidtide/workflows/fitSimFuncMap.py +206 -48
  86. rapidtide/workflows/fixtr.py +73 -3
  87. rapidtide/workflows/gmscalc.py +113 -3
  88. rapidtide/workflows/happy.py +813 -201
  89. rapidtide/workflows/happy2std.py +144 -12
  90. rapidtide/workflows/happy_parser.py +149 -8
  91. rapidtide/workflows/histnifti.py +118 -2
  92. rapidtide/workflows/histtc.py +84 -3
  93. rapidtide/workflows/linfitfilt.py +117 -4
  94. rapidtide/workflows/localflow.py +328 -28
  95. rapidtide/workflows/mergequality.py +79 -3
  96. rapidtide/workflows/niftidecomp.py +322 -18
  97. rapidtide/workflows/niftistats.py +174 -4
  98. rapidtide/workflows/pairproc.py +88 -2
  99. rapidtide/workflows/pairwisemergenifti.py +85 -2
  100. rapidtide/workflows/parser_funcs.py +1421 -40
  101. rapidtide/workflows/physiofreq.py +137 -11
  102. rapidtide/workflows/pixelcomp.py +208 -5
  103. rapidtide/workflows/plethquality.py +103 -21
  104. rapidtide/workflows/polyfitim.py +151 -11
  105. rapidtide/workflows/proj2flow.py +75 -2
  106. rapidtide/workflows/rankimage.py +111 -4
  107. rapidtide/workflows/rapidtide.py +272 -15
  108. rapidtide/workflows/rapidtide2std.py +98 -2
  109. rapidtide/workflows/rapidtide_parser.py +109 -9
  110. rapidtide/workflows/refineDelayMap.py +143 -33
  111. rapidtide/workflows/refineRegressor.py +682 -93
  112. rapidtide/workflows/regressfrommaps.py +152 -31
  113. rapidtide/workflows/resamplenifti.py +85 -3
  114. rapidtide/workflows/resampletc.py +91 -3
  115. rapidtide/workflows/retrolagtcs.py +98 -6
  116. rapidtide/workflows/retroregress.py +165 -9
  117. rapidtide/workflows/roisummarize.py +173 -5
  118. rapidtide/workflows/runqualitycheck.py +71 -3
  119. rapidtide/workflows/showarbcorr.py +147 -4
  120. rapidtide/workflows/showhist.py +86 -2
  121. rapidtide/workflows/showstxcorr.py +160 -3
  122. rapidtide/workflows/showtc.py +159 -3
  123. rapidtide/workflows/showxcorrx.py +184 -4
  124. rapidtide/workflows/showxy.py +185 -15
  125. rapidtide/workflows/simdata.py +262 -36
  126. rapidtide/workflows/spatialfit.py +77 -2
  127. rapidtide/workflows/spatialmi.py +251 -27
  128. rapidtide/workflows/spectrogram.py +305 -32
  129. rapidtide/workflows/synthASL.py +154 -3
  130. rapidtide/workflows/tcfrom2col.py +76 -2
  131. rapidtide/workflows/tcfrom3col.py +74 -2
  132. rapidtide/workflows/tidepool.py +2972 -133
  133. rapidtide/workflows/utils.py +19 -14
  134. rapidtide/workflows/utils_doc.py +293 -0
  135. rapidtide/workflows/variabilityizer.py +116 -3
  136. {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/METADATA +10 -9
  137. {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/RECORD +141 -122
  138. {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/entry_points.txt +1 -0
  139. {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/WHEEL +0 -0
  140. {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/licenses/LICENSE +0 -0
  141. {rapidtide-3.0.10.dist-info → rapidtide-3.1.dist-info}/top_level.txt +0 -0
rapidtide/multiproc.py CHANGED
@@ -20,7 +20,10 @@ import multiprocessing as mp
20
20
  import sys
21
21
  import threading as thread
22
22
  from platform import python_version, system
23
+ from typing import Any, Callable, List, Optional, Tuple
23
24
 
25
+ import numpy as np
26
+ from numpy.typing import NDArray
24
27
  from tqdm import tqdm
25
28
 
26
29
  try:
@@ -29,14 +32,86 @@ except ImportError:
29
32
  import Queue as thrQueue
30
33
 
31
34
 
32
- def maxcpus(reservecpu=True):
35
+ def maxcpus(reservecpu: bool = True) -> int:
36
+ """Return the maximum number of CPUs that can be used for parallel processing.
37
+
38
+ This function returns the total number of CPU cores available on the system,
39
+ with an option to reserve one CPU core for system operations.
40
+
41
+ Parameters
42
+ ----------
43
+ reservecpu : bool, default=True
44
+ If True, reserves one CPU core for system operations by returning
45
+ `cpu_count() - 1`. If False, returns the total number of CPU cores
46
+ available without reservation.
47
+
48
+ Returns
49
+ -------
50
+ int
51
+ The maximum number of CPUs available for parallel processing.
52
+ If `reservecpu=True`, returns `cpu_count() - 1`.
53
+ If `reservecpu=False`, returns `cpu_count()`.
54
+
55
+ Notes
56
+ -----
57
+ This function uses `multiprocessing.cpu_count()` to determine the number
58
+ of available CPU cores. The reserved CPU core helps maintain system
59
+ responsiveness during parallel processing tasks.
60
+
61
+ Examples
62
+ --------
63
+ >>> maxcpus()
64
+ 7
65
+ >>> maxcpus(reservecpu=False)
66
+ 8
67
+ """
33
68
  if reservecpu:
34
69
  return mp.cpu_count() - 1
35
70
  else:
36
71
  return mp.cpu_count()
37
72
 
38
73
 
39
- def _process_data(data_in, inQ, outQ, showprogressbar=True, chunksize=10000):
74
+ def _process_data(
75
+ data_in: List[Any], inQ: Any, outQ: Any, showprogressbar: bool = True, chunksize: int = 10000
76
+ ) -> List[Any]:
77
+ """Process input data in chunks using multiprocessing queues.
78
+
79
+ This function distributes data into chunks and processes them using
80
+ provided input and output queues. It supports progress tracking and
81
+ handles both complete chunks and a final remainder chunk.
82
+
83
+ Parameters
84
+ ----------
85
+ data_in : List[Any]
86
+ Input data to be processed.
87
+ inQ : Any
88
+ Input queue for sending data to worker processes.
89
+ outQ : Any
90
+ Output queue for receiving processed data from worker processes.
91
+ showprogressbar : bool, optional
92
+ If True, display a progress bar during processing. Default is True.
93
+ chunksize : int, optional
94
+ Size of data chunks to process at a time. Default is 10000.
95
+
96
+ Returns
97
+ -------
98
+ List[Any]
99
+ List of processed data items retrieved from the output queue.
100
+
101
+ Notes
102
+ -----
103
+ This function assumes that `inQ` and `outQ` are properly configured
104
+ multiprocessing queues and that worker processes are running and
105
+ consuming from `inQ` and producing to `outQ`.
106
+
107
+ Examples
108
+ --------
109
+ >>> from multiprocessing import Queue
110
+ >>> data = list(range(1000))
111
+ >>> in_q = Queue()
112
+ >>> out_q = Queue()
113
+ >>> result = _process_data(data, in_q, out_q)
114
+ """
40
115
  # send pos/data to workers
41
116
  data_out = []
42
117
  totalnum = len(data_in)
@@ -84,16 +159,72 @@ def _process_data(data_in, inQ, outQ, showprogressbar=True, chunksize=10000):
84
159
 
85
160
 
86
161
  def run_multiproc(
87
- consumerfunc,
88
- inputshape,
89
- maskarray,
90
- nprocs=1,
91
- verbose=True,
92
- indexaxis=0,
93
- procunit="voxels",
94
- showprogressbar=True,
95
- chunksize=1000,
96
- ):
162
+ consumerfunc: Callable[[Any, Any], None],
163
+ inputshape: Tuple[int, ...],
164
+ maskarray: Optional[NDArray] = None,
165
+ nprocs: int = 1,
166
+ verbose: bool = True,
167
+ indexaxis: int = 0,
168
+ procunit: str = "voxels",
169
+ showprogressbar: bool = True,
170
+ chunksize: int = 1000,
171
+ ) -> List[Any]:
172
+ """
173
+ Execute a function in parallel across multiple processes using multiprocessing.
174
+
175
+ This function initializes a set of worker processes and distributes input data
176
+ across them for parallel processing. It supports optional masking of data
177
+ along a specified axis and provides progress reporting.
178
+
179
+ Parameters
180
+ ----------
181
+ consumerfunc : callable
182
+ Function to be executed in parallel. Must accept two arguments: an input queue
183
+ and an output queue for inter-process communication.
184
+ inputshape : tuple of int
185
+ Shape of the input data along all axes. The dimension along `indexaxis` is
186
+ used to determine the number of items to process.
187
+ maskarray : ndarray, optional
188
+ Boolean or binary mask array used to filter indices. Only indices where
189
+ `maskarray[d] > 0.5` are processed. If None, all indices are processed.
190
+ nprocs : int, optional
191
+ Number of worker processes to use. Default is 1 (single-threaded).
192
+ verbose : bool, optional
193
+ If True, print information about the number of units being processed.
194
+ Default is True.
195
+ indexaxis : int, optional
196
+ Axis along which to iterate for processing. Default is 0.
197
+ procunit : str, optional
198
+ Unit of processing, used for logging messages. Default is "voxels".
199
+ showprogressbar : bool, optional
200
+ If True, display a progress bar during processing. Default is True.
201
+ chunksize : int, optional
202
+ Number of items to process in each chunk. Default is 1000.
203
+
204
+ Returns
205
+ -------
206
+ list
207
+ List of results returned by the worker processes.
208
+
209
+ Notes
210
+ -----
211
+ - On Python 3.8+ and non-Windows systems, the function uses the 'fork' context
212
+ for better performance.
213
+ - The function will exit with an error if `maskarray` is provided but its
214
+ length does not match the size of the `indexaxis` dimension of `inputshape`.
215
+
216
+ Examples
217
+ --------
218
+ >>> def worker_func(inQ, outQ):
219
+ ... while True:
220
+ ... item = inQ.get()
221
+ ... if item is None:
222
+ ... break
223
+ ... outQ.put(item * 2)
224
+ ...
225
+ >>> shape = (100, 100)
226
+ >>> result = run_multiproc(worker_func, shape, nprocs=4)
227
+ """
97
228
  # initialize the workers and the queues
98
229
  __spec__ = None
99
230
  n_workers = nprocs
@@ -148,16 +279,73 @@ def run_multiproc(
148
279
 
149
280
 
150
281
  def run_multithread(
151
- consumerfunc,
152
- inputshape,
153
- maskarray,
154
- verbose=True,
155
- nprocs=1,
156
- indexaxis=0,
157
- procunit="voxels",
158
- showprogressbar=True,
159
- chunksize=1000,
160
- ):
282
+ consumerfunc: Callable[[Any, Any], None],
283
+ inputshape: Tuple[int, ...],
284
+ maskarray: Optional[NDArray] = None,
285
+ verbose: bool = True,
286
+ nprocs: int = 1,
287
+ indexaxis: int = 0,
288
+ procunit: str = "voxels",
289
+ showprogressbar: bool = True,
290
+ chunksize: int = 1000,
291
+ ) -> List[Any]:
292
+ """
293
+ Execute a multithreaded processing task using a specified consumer function.
294
+
295
+ This function initializes a set of worker threads that process data in parallel
296
+ according to the provided consumer function. It supports optional masking,
297
+ progress tracking, and configurable chunking for efficient processing.
298
+
299
+ Parameters
300
+ ----------
301
+ consumerfunc : callable
302
+ A function that takes two arguments (input queue, output queue) and
303
+ processes data in a loop until signaled to stop.
304
+ inputshape : tuple of int
305
+ Shape of the input data along all axes. The dimension along `indexaxis`
306
+ determines how many items will be processed.
307
+ maskarray : ndarray, optional
308
+ Boolean or integer array used to filter which indices are processed.
309
+ Must match the size of the axis specified by `indexaxis`.
310
+ verbose : bool, optional
311
+ If True, print information about the number of items being processed
312
+ and the number of threads used. Default is True.
313
+ nprocs : int, optional
314
+ Number of worker threads to spawn. Default is 1.
315
+ indexaxis : int, optional
316
+ Axis along which the indexing is performed. Default is 0.
317
+ procunit : str, optional
318
+ Unit of processing, used in verbose output. Default is "voxels".
319
+ showprogressbar : bool, optional
320
+ If True, display a progress bar during processing. Default is True.
321
+ chunksize : int, optional
322
+ Number of items to process in each chunk. Default is 1000.
323
+
324
+ Returns
325
+ -------
326
+ list
327
+ A list of results returned by the consumer function for each processed item.
328
+
329
+ Notes
330
+ -----
331
+ - The function uses `threading.Queue` for inter-thread communication.
332
+ - If `maskarray` is provided, only indices where `maskarray[d] > 0` are processed.
333
+ - The `consumerfunc` is expected to read from `inQ` and write to `outQ` until
334
+ a `None` is received on `inQ`, signaling the end of processing.
335
+
336
+ Examples
337
+ --------
338
+ >>> def my_consumer(inQ, outQ):
339
+ ... while True:
340
+ ... item = inQ.get()
341
+ ... if item is None:
342
+ ... break
343
+ ... result = item * 2
344
+ ... outQ.put(result)
345
+ ...
346
+ >>> shape = (100, 50)
347
+ >>> result = run_multithread(my_consumer, shape, nprocs=4)
348
+ """
161
349
  # initialize the workers and the queues
162
350
  n_workers = nprocs
163
351
  inQ = thrQueue.Queue()
rapidtide/patchmatch.py CHANGED
@@ -21,8 +21,10 @@ import math
21
21
  import os
22
22
  import sys
23
23
  import warnings
24
+ from typing import Any
24
25
 
25
26
  import numpy as np
27
+ from numpy.typing import NDArray
26
28
  from scipy.interpolate import griddata
27
29
  from scipy.ndimage import distance_transform_edt, gaussian_filter1d
28
30
  from skimage.filters import threshold_multiotsu
@@ -31,7 +33,9 @@ from skimage.segmentation import flood_fill
31
33
  import rapidtide.io as tide_io
32
34
 
33
35
 
34
- def interpolate_masked_voxels(data, mask, method="linear", extrapolate=True):
36
+ def interpolate_masked_voxels(
37
+ data: NDArray, mask: NDArray, method: str = "linear", extrapolate: bool = True
38
+ ) -> NDArray:
35
39
  """
36
40
  Replaces masked voxels in a 3D numpy array with interpolated values
37
41
  from the unmasked region. Supports boundary extrapolation and multiple interpolation methods.
@@ -87,18 +91,41 @@ def interpolate_masked_voxels(data, mask, method="linear", extrapolate=True):
87
91
  return interpolated_data
88
92
 
89
93
 
90
- def get_bounding_box(mask, value, buffer=0):
94
+ def get_bounding_box(mask: NDArray, value: int, buffer: int = 0) -> tuple[tuple, tuple]:
91
95
  """
92
96
  Computes the 3D bounding box that contains all the voxels in the mask with value value.
93
97
 
94
- Parameters:
95
- mask (np.ndarray): A 3D binary mask where non-zero values indicate the masked region.
96
- value (int): The masked region value.
98
+ Parameters
99
+ ----------
100
+ mask : np.ndarray
101
+ A 3D binary mask where non-zero values indicate the masked region.
102
+ value : int
103
+ The masked region value to compute the bounding box for.
104
+ buffer : int, optional
105
+ Buffer to add around the bounding box in all directions. Default is 0.
97
106
 
98
- Returns:
99
- tuple: Two tuples defining the bounding box:
100
- ((min_x, min_y, min_z), (max_x, max_y, max_z)),
101
- where min and max are inclusive coordinates of the bounding box.
107
+ Returns
108
+ -------
109
+ tuple of tuple of int
110
+ Two tuples defining the bounding box:
111
+ ((min_x, min_y, min_z), (max_x, max_y, max_z)),
112
+ where min and max are inclusive coordinates of the bounding box.
113
+
114
+ Notes
115
+ -----
116
+ The function handles edge cases where the buffer extends beyond the mask boundaries
117
+ by clamping the coordinates to the valid range [0, shape[axis]-1].
118
+
119
+ Examples
120
+ --------
121
+ >>> import numpy as np
122
+ >>> mask = np.zeros((10, 10, 10), dtype=int)
123
+ >>> mask[3:7, 3:7, 3:7] = 1
124
+ >>> get_bounding_box(mask, 1)
125
+ ((3, 3, 3), (6, 6, 6))
126
+
127
+ >>> get_bounding_box(mask, 1, buffer=1)
128
+ ((2, 2, 2), (7, 7, 7))
102
129
  """
103
130
  if mask.ndim != 3:
104
131
  raise ValueError("Input mask must be a 3D array.")
@@ -119,21 +146,94 @@ def get_bounding_box(mask, value, buffer=0):
119
146
  return tuple(min_coords), tuple(max_coords)
120
147
 
121
148
 
122
- def flood3d(
123
- image,
124
- newvalue,
125
- ):
149
+ def flood3d(image: NDArray, newvalue: int) -> NDArray:
150
+ """
151
+ Apply flood fill to each slice of a 3D image.
152
+
153
+ This function performs a connected-component flood fill operation on each
154
+ 2D slice of a 3D image, starting from the top-left corner (0, 0).
155
+
156
+ Parameters
157
+ ----------
158
+ image : NDArray
159
+ Input 3D image array of shape (height, width, depth)
160
+ newvalue : int
161
+ The value to fill the connected component with
162
+
163
+ Returns
164
+ -------
165
+ NDArray
166
+ 3D image array of the same shape as input, with flood fill applied
167
+ to each slice
168
+
169
+ Notes
170
+ -----
171
+ - Uses 4-connectivity (rook-style connectivity) for flood fill
172
+ - Each slice is processed independently
173
+ - The fill operation starts from position (0, 0) in each slice
174
+ - Original image values are preserved in the output where fill did not occur
175
+
176
+ Examples
177
+ --------
178
+ >>> import numpy as np
179
+ >>> image = np.array([[[1, 1, 0],
180
+ ... [1, 0, 0],
181
+ ... [0, 0, 0]],
182
+ ... [[1, 1, 0],
183
+ ... [1, 0, 0],
184
+ ... [0, 0, 0]]])
185
+ >>> result = flood3d(image, 5)
186
+ >>> print(result)
187
+ """
126
188
  filledim = image * 0
127
189
  for slice in range(image.shape[2]):
128
190
  filledim[:, :, slice] = flood_fill(image[:, :, slice], (0, 0), newvalue, connectivity=1)
129
191
  return filledim
130
192
 
131
193
 
132
- def invertedflood3D(image, newvalue):
194
+ def invertedflood3D(image: NDArray, newvalue: int) -> NDArray:
195
+ """
196
+ Apply inverted flood fill operation to a 3D image.
197
+
198
+ This function performs an inverted flood fill by adding the new value to the
199
+ original image and subtracting the result of a standard flood3d operation.
200
+
201
+ Parameters
202
+ ----------
203
+ image : NDArray
204
+ Input 3D image array to process
205
+ newvalue : int
206
+ Value to be added during the inverted flood fill operation
207
+
208
+ Returns
209
+ -------
210
+ NDArray
211
+ Resulting image after inverted flood fill operation
212
+
213
+ Notes
214
+ -----
215
+ The function relies on a `flood3d` function which is assumed to be defined
216
+ elsewhere in the codebase. The inverted flood fill is computed as:
217
+ result = image + newvalue - flood3d(image, newvalue)
218
+
219
+ Examples
220
+ --------
221
+ >>> import numpy as np
222
+ >>> image = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
223
+ >>> result = invertedflood3D(image, 10)
224
+ >>> print(result)
225
+ """
133
226
  return image + newvalue - flood3d(image, newvalue)
134
227
 
135
228
 
136
- def growregion(image, location, value, separatedimage, regionsize, debug=False):
229
+ def growregion(
230
+ image: NDArray,
231
+ location: tuple[int, int, int],
232
+ value: int,
233
+ separatedimage: NDArray,
234
+ regionsize: int,
235
+ debug: bool = False,
236
+ ) -> int:
137
237
  separatedimage[location[0], location[1], location[2]] = value
138
238
  regionsize += 1
139
239
  if debug:
@@ -157,7 +257,7 @@ def growregion(image, location, value, separatedimage, regionsize, debug=False):
157
257
  return regionsize
158
258
 
159
259
 
160
- def separateclusters(image, sizethresh=0, debug=False):
260
+ def separateclusters(image: NDArray, sizethresh: int = 0, debug: bool = False) -> NDArray:
161
261
  separatedclusters = image * 0
162
262
  stop = False
163
263
  value = 1
@@ -219,25 +319,50 @@ def separateclusters(image, sizethresh=0, debug=False):
219
319
  # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
220
320
  # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
221
321
  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
222
- def clamp(low, high, value):
223
- """bound an integer to a range
322
+ def clamp(low: int, high: int, value: int) -> int:
323
+ """
324
+ Bound an integer to a range.
325
+
326
+ This function clamps a value to ensure it falls within the inclusive range [low, high].
327
+ If the value is less than low, it returns low. If the value is greater than high,
328
+ it returns high. Otherwise, it returns the value unchanged.
224
329
 
225
330
  Parameters
226
331
  ----------
227
332
  low : int
333
+ The lower bound of the range (inclusive).
228
334
  high : int
335
+ The upper bound of the range (inclusive).
229
336
  value : int
337
+ The value to be clamped.
230
338
 
231
339
  Returns
232
340
  -------
233
- result : int
341
+ int
342
+ The clamped value within the range [low, high].
343
+
344
+ Notes
345
+ -----
346
+ The function assumes that `low <= high`. If this condition is not met,
347
+ the behavior is undefined and may return unexpected results.
348
+
349
+ Examples
350
+ --------
351
+ >>> clamp(0, 10, 5)
352
+ 5
353
+ >>> clamp(0, 10, -1)
354
+ 0
355
+ >>> clamp(0, 10, 15)
356
+ 10
234
357
  """
235
358
  return max(low, min(high, value))
236
359
 
237
360
 
238
- def dehaze(fdata, level, debug=False):
239
- """use Otsu to threshold https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_multiotsu.html
240
- n.b. threshold used to mask image: dark values are zeroed, but result is NOT binary
361
+ def dehaze(fdata: NDArray, level: int, debug: bool = False) -> NDArray:
362
+ """
363
+ use Otsu to threshold https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_multiotsu.html
364
+ n.b. threshold used to mask image: dark values are zeroed, but result is NOT binary
365
+
241
366
  Parameters
242
367
  ----------
243
368
  fdata : numpy.memmap from Niimg-like object
@@ -272,8 +397,15 @@ def dehaze(fdata, level, debug=False):
272
397
 
273
398
 
274
399
  # https://github.com/nilearn/nilearn/blob/1607b52458c28953a87bbe6f42448b7b4e30a72f/nilearn/image/image.py#L164
275
- def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):
276
- """Smooth images by applying a Gaussian filter.
400
+ def _smooth_array(
401
+ arr: NDArray,
402
+ affine: NDArray | None,
403
+ fwhm: float | NDArray | tuple | list | str | None = None,
404
+ ensure_finite: bool = True,
405
+ copy: bool = True,
406
+ ) -> NDArray:
407
+ """
408
+ Smooth images by applying a Gaussian filter.
277
409
 
278
410
  Apply a Gaussian filter along the three first dimensions of `arr`.
279
411
 
@@ -355,8 +487,10 @@ def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True):
355
487
  return arr
356
488
 
357
489
 
358
- def binary_zero_crossing(fdata):
359
- """binarize (negative voxels are zero)
490
+ def binary_zero_crossing(fdata: NDArray) -> NDArray:
491
+ """
492
+ binarize (negative voxels are zero)
493
+
360
494
  Parameters
361
495
  ----------
362
496
  fdata : numpy.memmap from Niimg-like object
@@ -372,8 +506,11 @@ def binary_zero_crossing(fdata):
372
506
  return edge
373
507
 
374
508
 
375
- def difference_of_gaussian(fdata, affine, fwhmNarrow, ratioopt=True, debug=False):
376
- """Apply Difference of Gaussian (DoG) filter.
509
+ def difference_of_gaussian(
510
+ fdata: NDArray, affine: NDArray, fwhmNarrow: float, ratioopt: bool = True, debug: bool = False
511
+ ) -> NDArray:
512
+ """
513
+ Apply Difference of Gaussian (DoG) filter.
377
514
  https://en.wikipedia.org/wiki/Difference_of_Gaussians
378
515
  https://en.wikipedia.org/wiki/Marr–Hildreth_algorithm
379
516
  D. Marr and E. C. Hildreth. Theory of edge detection. Proceedings of the Royal Society, London B, 207:187-217, 1980
@@ -415,8 +552,16 @@ def difference_of_gaussian(fdata, affine, fwhmNarrow, ratioopt=True, debug=False
415
552
  # We are operating on data in memory that are closely associated with the source
416
553
  # NIFTI files, so the affine and sizes fields are easy to come by, but unlike the
417
554
  # original library, we are not working directly with NIFTI images.
418
- def calc_DoG(thedata, theaffine, thesizes, fwhm=3, ratioopt=True, debug=False):
419
- """Find edges of a NIfTI image using the Difference of Gaussian (DoG).
555
+ def calc_DoG(
556
+ thedata: NDArray,
557
+ theaffine: NDArray,
558
+ thesizes: tuple,
559
+ fwhm: float = 3,
560
+ ratioopt: bool = True,
561
+ debug: bool = False,
562
+ ) -> NDArray:
563
+ """
564
+ Find edges of a NIfTI image using the Difference of Gaussian (DoG).
420
565
  Parameters
421
566
  ----------
422
567
  thedata : 3D data array
@@ -440,7 +585,15 @@ def calc_DoG(thedata, theaffine, thesizes, fwhm=3, ratioopt=True, debug=False):
440
585
  return difference_of_gaussian(dehazed_data, theaffine, fwhm, ratioopt=ratioopt, debug=debug)
441
586
 
442
587
 
443
- def getclusters(theimage, theaffine, thesizes, fwhm=5, ratioopt=True, sizethresh=10, debug=False):
588
+ def getclusters(
589
+ theimage: NDArray,
590
+ theaffine: NDArray,
591
+ thesizes: tuple,
592
+ fwhm: float = 5,
593
+ ratioopt: bool = True,
594
+ sizethresh: int = 10,
595
+ debug: bool = False,
596
+ ) -> NDArray:
444
597
  if debug:
445
598
  print("Detecting clusters..")
446
599
  print(f"\t{theimage.shape=}")
@@ -455,7 +608,54 @@ def getclusters(theimage, theaffine, thesizes, fwhm=5, ratioopt=True, sizethresh
455
608
  )
456
609
 
457
610
 
458
- def interppatch(img_data, separatedimage, method="linear", debug=False):
611
+ def interppatch(
612
+ img_data: NDArray, separatedimage: NDArray, method: str = "linear", debug: bool = False
613
+ ) -> tuple[NDArray, NDArray]:
614
+ """
615
+ Interpolate voxel values within labeled regions of a 3D image.
616
+
617
+ This function applies interpolation to each labeled region in a separated image,
618
+ using the specified interpolation method. It returns both the interpolated image
619
+ and a copy of the original image with the same spatial extent.
620
+
621
+ Parameters
622
+ ----------
623
+ img_data : NDArray
624
+ A 3D array representing the input image data to be interpolated.
625
+ separatedimage : NDArray
626
+ A 3D array of integers where each unique positive integer represents a
627
+ distinct region. Zero values are treated as background.
628
+ method : str, optional
629
+ The interpolation method to use. Default is "linear". Other options may
630
+ include "nearest", "cubic", etc., depending on the implementation of
631
+ `interpolate_masked_voxels`.
632
+ debug : bool, optional
633
+ If True, print debug information for each region being processed.
634
+ Default is False.
635
+
636
+ Returns
637
+ -------
638
+ tuple[NDArray, NDArray]
639
+ A tuple containing:
640
+ - `interpolated`: The image with interpolated values in each region.
641
+ - `justboxes`: A copy of the original image data, with the same shape
642
+ as `img_data`, used for reference or visualization purposes.
643
+
644
+ Notes
645
+ -----
646
+ - Each region is processed independently using its bounding box.
647
+ - The function modifies `img_data` only within the bounds of each region.
648
+ - The `interpolate_masked_voxels` function is assumed to handle the actual
649
+ interpolation logic for masked voxels.
650
+
651
+ Examples
652
+ --------
653
+ >>> import numpy as np
654
+ >>> img = np.random.rand(10, 10, 10)
655
+ >>> labels = np.zeros((10, 10, 10))
656
+ >>> labels[3:7, 3:7, 3:7] = 1
657
+ >>> interpolated, boxes = interppatch(img, labels, method="linear")
658
+ """
459
659
  interpolated = img_data + 0.0
460
660
  justboxes = img_data * 0.0
461
661
  numregions = np.max(separatedimage)
@@ -493,7 +693,8 @@ def interppatch(img_data, separatedimage, method="linear", debug=False):
493
693
 
494
694
 
495
695
  if __name__ == "__main__":
496
- """Apply Gaussian smooth to image
696
+ """
697
+ Apply Gaussian smooth to image
497
698
  Parameters
498
699
  ----------
499
700
  fnm : str