mediml 0.9.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. MEDiml/MEDscan.py +1696 -0
  2. MEDiml/__init__.py +21 -0
  3. MEDiml/biomarkers/BatchExtractor.py +806 -0
  4. MEDiml/biomarkers/BatchExtractorTexturalFilters.py +840 -0
  5. MEDiml/biomarkers/__init__.py +16 -0
  6. MEDiml/biomarkers/diagnostics.py +125 -0
  7. MEDiml/biomarkers/get_oriented_bound_box.py +158 -0
  8. MEDiml/biomarkers/glcm.py +1602 -0
  9. MEDiml/biomarkers/gldzm.py +523 -0
  10. MEDiml/biomarkers/glrlm.py +1315 -0
  11. MEDiml/biomarkers/glszm.py +555 -0
  12. MEDiml/biomarkers/int_vol_hist.py +527 -0
  13. MEDiml/biomarkers/intensity_histogram.py +615 -0
  14. MEDiml/biomarkers/local_intensity.py +89 -0
  15. MEDiml/biomarkers/morph.py +1756 -0
  16. MEDiml/biomarkers/ngldm.py +780 -0
  17. MEDiml/biomarkers/ngtdm.py +414 -0
  18. MEDiml/biomarkers/stats.py +373 -0
  19. MEDiml/biomarkers/utils.py +389 -0
  20. MEDiml/filters/TexturalFilter.py +299 -0
  21. MEDiml/filters/__init__.py +9 -0
  22. MEDiml/filters/apply_filter.py +134 -0
  23. MEDiml/filters/gabor.py +215 -0
  24. MEDiml/filters/laws.py +283 -0
  25. MEDiml/filters/log.py +147 -0
  26. MEDiml/filters/mean.py +121 -0
  27. MEDiml/filters/textural_filters_kernels.py +1738 -0
  28. MEDiml/filters/utils.py +107 -0
  29. MEDiml/filters/wavelet.py +237 -0
  30. MEDiml/learning/DataCleaner.py +198 -0
  31. MEDiml/learning/DesignExperiment.py +480 -0
  32. MEDiml/learning/FSR.py +667 -0
  33. MEDiml/learning/Normalization.py +112 -0
  34. MEDiml/learning/RadiomicsLearner.py +714 -0
  35. MEDiml/learning/Results.py +2237 -0
  36. MEDiml/learning/Stats.py +694 -0
  37. MEDiml/learning/__init__.py +10 -0
  38. MEDiml/learning/cleaning_utils.py +107 -0
  39. MEDiml/learning/ml_utils.py +1015 -0
  40. MEDiml/processing/__init__.py +6 -0
  41. MEDiml/processing/compute_suv_map.py +121 -0
  42. MEDiml/processing/discretisation.py +149 -0
  43. MEDiml/processing/interpolation.py +275 -0
  44. MEDiml/processing/resegmentation.py +66 -0
  45. MEDiml/processing/segmentation.py +912 -0
  46. MEDiml/utils/__init__.py +25 -0
  47. MEDiml/utils/batch_patients.py +45 -0
  48. MEDiml/utils/create_radiomics_table.py +131 -0
  49. MEDiml/utils/data_frame_export.py +42 -0
  50. MEDiml/utils/find_process_names.py +16 -0
  51. MEDiml/utils/get_file_paths.py +34 -0
  52. MEDiml/utils/get_full_rad_names.py +21 -0
  53. MEDiml/utils/get_institutions_from_ids.py +16 -0
  54. MEDiml/utils/get_patient_id_from_scan_name.py +22 -0
  55. MEDiml/utils/get_patient_names.py +26 -0
  56. MEDiml/utils/get_radiomic_names.py +27 -0
  57. MEDiml/utils/get_scan_name_from_rad_name.py +22 -0
  58. MEDiml/utils/image_reader_SITK.py +37 -0
  59. MEDiml/utils/image_volume_obj.py +22 -0
  60. MEDiml/utils/imref.py +340 -0
  61. MEDiml/utils/initialize_features_names.py +62 -0
  62. MEDiml/utils/inpolygon.py +159 -0
  63. MEDiml/utils/interp3.py +43 -0
  64. MEDiml/utils/json_utils.py +78 -0
  65. MEDiml/utils/mode.py +31 -0
  66. MEDiml/utils/parse_contour_string.py +58 -0
  67. MEDiml/utils/save_MEDscan.py +30 -0
  68. MEDiml/utils/strfind.py +32 -0
  69. MEDiml/utils/textureTools.py +188 -0
  70. MEDiml/utils/texture_features_names.py +115 -0
  71. MEDiml/utils/write_radiomics_csv.py +47 -0
  72. MEDiml/wrangling/DataManager.py +1724 -0
  73. MEDiml/wrangling/ProcessDICOM.py +512 -0
  74. MEDiml/wrangling/__init__.py +3 -0
  75. mediml-0.9.9.dist-info/LICENSE.md +674 -0
  76. mediml-0.9.9.dist-info/METADATA +232 -0
  77. mediml-0.9.9.dist-info/RECORD +78 -0
  78. mediml-0.9.9.dist-info/WHEEL +4 -0
@@ -0,0 +1,389 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import math
5
+ from typing import List, Tuple, Union
6
+
7
+ import numpy as np
8
+ from skimage.measure import marching_cubes
9
+
10
+
11
+ def find_i_x(levels: np.ndarray,
12
+ fract_vol: np.ndarray,
13
+ x: float) -> np.ndarray:
14
+ """Computes intensity at volume fraction.
15
+
16
+ Args:
17
+ levels (ndarray): COMPLETE INTEGER grey-levels.
18
+ fract_vol (ndarray): Fractional volume.
19
+ x (float): Fraction percentage, between 0 and 100.
20
+
21
+ Returns:
22
+ ndarray: Array of minimum discretised intensity present in at most :math:`x` % of the volume.
23
+
24
+ """
25
+ ind = np.where(fract_vol <= x/100)[0][0]
26
+ ix = levels[ind]
27
+
28
+ return ix
29
+
30
+ def find_v_x(fract_int: np.ndarray,
31
+ fract_vol: np.ndarray,
32
+ x: float) -> np.ndarray:
33
+ """Computes volume at intensity fraction.
34
+
35
+ Args:
36
+ fract_int (ndarray): Intensity fraction.
37
+ fract_vol (ndarray): Fractional volume.
38
+ x (float): Fraction percentage, between 0 and 100.
39
+
40
+ Returns:
41
+ ndarray: Array of largest volume fraction ``fract_vol`` that has an
42
+ intensity fraction ``fract_int`` of at least :math:`x` %.
43
+
44
+ """
45
+ ind = np.where(fract_int >= x/100)[0][0]
46
+ vx = fract_vol[ind]
47
+
48
+ return vx
49
+
50
+ def get_area_dens_approx(a: float,
51
+ b: float,
52
+ c: float,
53
+ n: float) -> float:
54
+ """Computes area density - minimum volume enclosing ellipsoid
55
+
56
+ Args:
57
+ a (float): Major semi-axis length.
58
+ b (float): Minor semi-axis length.
59
+ c (float): Least semi-axis length.
60
+ n (int): Number of iterations.
61
+
62
+ Returns:
63
+ float: Area density - minimum volume enclosing ellipsoid.
64
+
65
+ """
66
+ alpha = np.sqrt(1 - b**2/a**2)
67
+ beta = np.sqrt(1 - c**2/a**2)
68
+ ab = alpha * beta
69
+ point = (alpha**2+beta**2) / (2*ab)
70
+ a_ell = 0
71
+
72
+ for v in range(0, n+1):
73
+ coef = [0]*v + [1]
74
+ legen = np.polynomial.legendre.legval(x=point, c=coef)
75
+ a_ell = a_ell + ab**v / (1-4*v**2) * legen
76
+
77
+ a_ell = a_ell * 4 * np.pi * a * b
78
+
79
+ return a_ell
80
+
81
+ def get_axis_lengths(xyz: np.ndarray) -> Tuple[float, float, float]:
82
+ """Computes AxisLengths.
83
+
84
+ Args:
85
+ xyz (ndarray): Array of three column vectors, defining the [X,Y,Z]
86
+ positions of the points in the ROI (1's) of the mask volume. In mm.
87
+
88
+ Returns:
89
+ Tuple[float, float, float]: Array of three column vectors
90
+ [Major axis lengths, Minor axis lengths, Least axis lengths].
91
+
92
+ """
93
+ xyz = xyz.copy()
94
+
95
+ # Getting the geometric centre of mass
96
+ com_geom = np.sum(xyz, 0)/np.shape(xyz)[0] # [1 X 3] vector
97
+
98
+ # Subtracting the centre of mass
99
+ xyz[:, 0] = xyz[:, 0] - com_geom[0]
100
+ xyz[:, 1] = xyz[:, 1] - com_geom[1]
101
+ xyz[:, 2] = xyz[:, 2] - com_geom[2]
102
+
103
+ # Getting the covariance matrix
104
+ cov_mat = np.cov(xyz, rowvar=False)
105
+
106
+ # Getting the eigenvalues
107
+ eig_val, _ = np.linalg.eig(cov_mat)
108
+ eig_val = np.sort(eig_val)
109
+
110
+ major = eig_val[2]
111
+ minor = eig_val[1]
112
+ least = eig_val[0]
113
+
114
+ return major, minor, least
115
+
116
+ def get_glcm_cross_diag_prob(p_ij: np.ndarray) -> np.ndarray:
117
+ """Computes cross diagonal probabilities.
118
+
119
+ Args:
120
+ p_ij (ndarray): Joint probability of grey levels
121
+ i and j occurring in neighboring voxels. (Elements
122
+ of the probability distribution for grey level
123
+ co-occurrences).
124
+
125
+ Returns:
126
+ ndarray: Array of the cross diagonal probability.
127
+
128
+ """
129
+ n_g = np.size(p_ij, 0)
130
+ val_k = np.arange(2, 2*n_g + 100*np.finfo(float).eps)
131
+ n_k = np.size(val_k)
132
+ p_iplusj = np.zeros(n_k)
133
+
134
+ for iteration_k in range(0, n_k):
135
+ k = val_k[iteration_k]
136
+ p = 0
137
+ for i in range(0, n_g):
138
+ for j in range(0, n_g):
139
+ if (k - (i+j+2)) == 0:
140
+ p += p_ij[i, j]
141
+
142
+ p_iplusj[iteration_k] = p
143
+
144
+ return p_iplusj
145
+
146
+ def get_glcm_diag_prob(p_ij: np.ndarray) -> np.ndarray:
147
+ """Computes diagonal probabilities.
148
+
149
+ Args:
150
+ p_ij (ndarray): Joint probability of grey levels
151
+ i and j occurring in neighboring voxels. (Elements
152
+ of the probability distribution for grey level
153
+ co-occurrences).
154
+
155
+ Returns:
156
+ ndarray: Array of the diagonal probability.
157
+
158
+ """
159
+
160
+ n_g = np.size(p_ij, 0)
161
+ val_k = np.arange(0, n_g)
162
+ n_k = np.size(val_k)
163
+ p_iminusj = np.zeros(n_k)
164
+
165
+ for iteration_k in range(0, n_k):
166
+ k = val_k[iteration_k]
167
+ p = 0
168
+ for i in range(0, n_g):
169
+ for j in range(0, n_g):
170
+ if (k - abs(i-j)) == 0:
171
+ p += p_ij[i, j]
172
+
173
+ p_iminusj[iteration_k] = p
174
+
175
+ return p_iminusj
176
+
177
+ def get_com(xgl_int: np.ndarray,
178
+ xgl_morph: np.ndarray,
179
+ xyz_int: np.ndarray,
180
+ xyz_morph: np.ndarray) -> Union[float,
181
+ np.ndarray]:
182
+ """Calculates center of mass shift (in mm, since resolution is in mm).
183
+
184
+ Note:
185
+ Row positions of "x_gl" and "xyz" must correspond for each point.
186
+
187
+ Args:
188
+ xgl_int (ndarray): Vector of intensity values in the volume to analyze
189
+ (only values in the intensity mask).
190
+ xgl_morph (ndarray): Vector of intensity values in the volume to analyze
191
+ (only values in the morphological mask).
192
+ xyz_int (ndarray): [n_points X 3] matrix of three column vectors, defining the [X,Y,Z]
193
+ positions of the points in the ROI (1's) of the mask volume (In mm).
194
+ (Mesh-based volume calculated from the ROI intensity mesh)
195
+ xyz_morph (ndarray): [n_points X 3] matrix of three column vectors, defining the [X,Y,Z]
196
+ positions of the points in the ROI (1's) of the mask volume (In mm).
197
+ (Mesh-based volume calculated from the ROI morphological mesh)
198
+
199
+ Returns:
200
+ Union[float, np.ndarray]: The ROI volume centre of mass.
201
+
202
+ """
203
+
204
+ # Getting the geometric centre of mass
205
+ n_v = np.size(xgl_morph)
206
+
207
+ com_geom = np.sum(xyz_morph, 0)/n_v # [1 X 3] vector
208
+
209
+ # Getting the density centre of mass
210
+ xyz_int[:, 0] = xgl_int*xyz_int[:, 0]
211
+ xyz_int[:, 1] = xgl_int*xyz_int[:, 1]
212
+ xyz_int[:, 2] = xgl_int*xyz_int[:, 2]
213
+ com_gl = np.sum(xyz_int, 0)/np.sum(xgl_int, 0) # [1 X 3] vector
214
+
215
+ # Calculating the shift
216
+ com = np.linalg.norm(com_geom - com_gl)
217
+
218
+ return com
219
+
220
+ def get_loc_peak(img_obj: np.ndarray,
221
+ roi_obj: np.ndarray,
222
+ res: np.ndarray) -> float:
223
+ """Computes Local intensity peak.
224
+
225
+ Note:
226
+ This works only in 3D for now.
227
+
228
+ Args:
229
+ img_obj (ndarray): Continuos image intensity distribution, with no NaNs
230
+ outside the ROI.
231
+ roi_obj (ndarray): Array of the mask defining the ROI.
232
+ res (List[float]): [a,b,c] vector specifying the resolution of the volume in mm.
233
+ xyz resolution (world), or JIK resolution (intrinsic matlab).
234
+
235
+ Returns:
236
+ float: Value of the local intensity peak.
237
+
238
+ """
239
+ # INITIALIZATION
240
+ # About 6.2 mm, as defined in document
241
+ dist_thresh = (3/(4*math.pi))**(1/3)*10
242
+
243
+ # Insert -inf outside ROI
244
+ temp = img_obj.copy()
245
+ img_obj = img_obj.copy()
246
+ img_obj[roi_obj == 0] = -np.inf
247
+
248
+ # Find the location(s) of the maximal voxel
249
+ max_val = np.max(img_obj)
250
+ I, J, K = np.nonzero(img_obj == max_val)
251
+ n_max = np.size(I)
252
+
253
+ # Reconverting to full object without -Inf
254
+ img_obj = temp
255
+
256
+ # Get a meshgrid first
257
+ x = res[0]*(np.arange(img_obj.shape[1])+0.5)
258
+ y = res[1]*(np.arange(img_obj.shape[0])+0.5)
259
+ z = res[2]*(np.arange(img_obj.shape[2])+0.5)
260
+ X, Y, Z = np.meshgrid(x, y, z) # In mm
261
+
262
+ # Calculate the local peak
263
+ max_val = -np.inf
264
+
265
+ for n in range(n_max):
266
+ temp_x = X - X[I[n], J[n], K[n]]
267
+ temp_y = Y - Y[I[n], J[n], K[n]]
268
+ temp_z = Z - Z[I[n], J[n], K[n]]
269
+ temp_dist_mesh = (np.sqrt(np.power(temp_x, 2) +
270
+ np.power(temp_y, 2) +
271
+ np.power(temp_z, 2)))
272
+ val = img_obj[temp_dist_mesh <= dist_thresh]
273
+ val[np.isnan(val)] = []
274
+
275
+ if np.size(val) == 0:
276
+ temp_local_peak = img_obj[I[n], J[n], K[n]]
277
+ else:
278
+ temp_local_peak = np.mean(val)
279
+ if temp_local_peak > max_val:
280
+ max_val = temp_local_peak
281
+
282
+ local_peak = max_val
283
+
284
+ return local_peak
285
+
286
+ def get_mesh(mask: np.ndarray,
287
+ res: Union[np.ndarray, List]) -> Tuple[np.ndarray,
288
+ np.ndarray,
289
+ np.ndarray]:
290
+ """Compute Mesh.
291
+
292
+ Note:
293
+ Make sure the `mask` is padded with a layer of 0's in all
294
+ dimensions to reduce potential isosurface computation errors.
295
+
296
+ Args:
297
+ mask (ndarray): Contains only 0's and 1's.
298
+ res (ndarray or List): [a,b,c] vector specifying the resolution of the volume in mm.
299
+ xyz resolution (world), or JIK resolution (intrinsic matlab).
300
+
301
+ Returns:
302
+ Tuple[np.ndarray, np.ndarray, np.ndarray]:
303
+ - Array of the [X,Y,Z] positions of the ROI.
304
+ - Array of the spatial coordinates for `mask` unique mesh vertices.
305
+ - Array of triangular faces via referencing vertex indices from vertices.
306
+ """
307
+ # Getting the grid of X,Y,Z positions, where the coordinate reference
308
+ # system (0,0,0) is located at the upper left corner of the first voxel
309
+ # (-0.5: half a voxel distance). For the whole volume defining the mask,
310
+ # no matter if it is a 1 or a 0.
311
+ mask = mask.copy()
312
+ res = res.copy()
313
+
314
+ x = res[0]*((np.arange(1, np.shape(mask)[0]+1))-0.5)
315
+ y = res[1]*((np.arange(1, np.shape(mask)[1]+1))-0.5)
316
+ z = res[2]*((np.arange(1, np.shape(mask)[2]+1))-0.5)
317
+ X, Y, Z = np.meshgrid(x, y, z, indexing='ij')
318
+
319
+ # Getting the isosurface of the mask
320
+ vertices, faces, _, _ = marching_cubes(volume=mask, level=0.5, spacing=res)
321
+
322
+ # Getting the X,Y,Z positions of the ROI (i.e. 1's) of the mask
323
+ X = np.reshape(X, (np.size(X), 1), order='F')
324
+ Y = np.reshape(Y, (np.size(Y), 1), order='F')
325
+ Z = np.reshape(Z, (np.size(Z), 1), order='F')
326
+
327
+ xyz = np.concatenate((X, Y, Z), axis=1)
328
+ xyz = xyz[np.where(np.reshape(mask, np.size(mask), order='F') == 1)[0], :]
329
+
330
+ return xyz, faces, vertices
331
+
332
+ def get_glob_peak(img_obj: np.ndarray,
333
+ roi_obj: np.ndarray,
334
+ res: np.ndarray) -> float:
335
+ """Computes Global intensity peak.
336
+
337
+ Note:
338
+ This works only in 3D for now.
339
+
340
+ Args:
341
+ img_obj (ndarray): Continuos image intensity distribution, with no NaNs
342
+ outside the ROI.
343
+ roi_obj (ndarray): Array of the mask defining the ROI.
344
+ res (List[float]): [a,b,c] vector specifying the resolution of the volume in mm.
345
+ xyz resolution (world), or JIK resolution (intrinsic matlab).
346
+
347
+ Returns:
348
+ float: Value of the global intensity peak.
349
+
350
+ """
351
+ # INITIALIZATION
352
+ # About 6.2 mm, as defined in document
353
+ dist_thresh = (3/(4*math.pi))**(1/3)*10
354
+
355
+ # Find the location(s) of all voxels within the ROI
356
+ indices = np.nonzero(np.reshape(roi_obj, np.size(roi_obj), order='F') == 1)[0]
357
+ I, J, K = np.unravel_index(indices, np.shape(img_obj), order='F')
358
+ n_max = np.size(I)
359
+
360
+ # Get a meshgrid first
361
+ x = res[0]*(np.arange(img_obj.shape[1])+0.5)
362
+ y = res[1]*(np.arange(img_obj.shape[0])+0.5)
363
+ z = res[2]*(np.arange(img_obj.shape[2])+0.5)
364
+ X, Y, Z = np.meshgrid(x, y, z) # In mm
365
+
366
+ # Calculate the local peak
367
+ max_val = -np.inf
368
+
369
+ for n in range(n_max):
370
+ temp_x = X - X[I[n], J[n], K[n]]
371
+ temp_y = Y - Y[I[n], J[n], K[n]]
372
+ temp_z = Z - Z[I[n], J[n], K[n]]
373
+ temp_dist_mesh = (np.sqrt(np.power(temp_x, 2) +
374
+ np.power(temp_y, 2) +
375
+ np.power(temp_z, 2)))
376
+ val = img_obj[temp_dist_mesh <= dist_thresh]
377
+ val[np.isnan(val)] = []
378
+
379
+ if np.size(val) == 0:
380
+ temp_local_peak = img_obj[I[n], J[n], K[n]]
381
+ else:
382
+ temp_local_peak = np.mean(val)
383
+ if temp_local_peak > max_val:
384
+ max_val = temp_local_peak
385
+
386
+ global_peak = max_val
387
+
388
+ return global_peak
389
+
@@ -0,0 +1,299 @@
1
+ from copy import deepcopy
2
+ from typing import Union
3
+
4
+ import numpy as np
5
+ try:
6
+ import pycuda.autoinit
7
+ import pycuda.driver as cuda
8
+ from pycuda.autoinit import context
9
+ from pycuda.compiler import SourceModule
10
+ except Exception as e:
11
+ print("PyCUDA is not installed. Please install it to use the textural filters.", e)
12
+ import_failed = True
13
+
14
+ from ..processing.discretisation import discretize
15
+ from .textural_filters_kernels import glcm_kernel, single_glcm_kernel
16
+
17
+
18
+ class TexturalFilter():
19
+ """The Textural filter class. This class is used to apply textural filters to an image. The textural filters are
20
+ chosen from the following families: GLCM, NGTDM, GLDZM, GLSZM, NGLDM, GLRLM. The computation is done using CUDA."""
21
+
22
+ def __init__(
23
+ self,
24
+ family: str,
25
+ size: int = 3,
26
+ local: bool = False
27
+ ):
28
+
29
+ """
30
+ The constructor for the textural filter class.
31
+
32
+ Args:
33
+ family (str): The family of the textural filter.
34
+ size (int, optional): The size of the kernel, which will define the filter kernel dimension.
35
+ local (bool, optional): If true, the discrete will be computed locally, else globally.
36
+
37
+ Returns:
38
+ None.
39
+ """
40
+
41
+ assert size % 2 == 1 and size > 0, "size should be a positive odd number."
42
+ assert isinstance(family, str) and family.upper() in ["GLCM", "NGTDM", "GLDZM", "GLSZM", "NGLDM", "GLRLM"],\
43
+ "family should be a string and should be one of the following: GLCM, NGTDM, GLDZM, GLSZM, NGLDM, GLRLM."
44
+
45
+ self.family = family
46
+ self.size = size
47
+ self.local = local
48
+ self.glcm_features = [
49
+ "Fcm_joint_max",
50
+ "Fcm_joint_avg",
51
+ "Fcm_joint_var",
52
+ "Fcm_joint_entr",
53
+ "Fcm_diff_avg",
54
+ "Fcm_diff_var",
55
+ "Fcm_diff_entr",
56
+ "Fcm_sum_avg",
57
+ "Fcm_sum_var",
58
+ "Fcm_sum_entr",
59
+ "Fcm_energy",
60
+ "Fcm_contrast",
61
+ "Fcm_dissimilarity",
62
+ "Fcm_inv_diff",
63
+ "Fcm_inv_diff_norm",
64
+ "Fcm_inv_diff_mom",
65
+ "Fcm_inv_diff_mom_norm",
66
+ "Fcm_inv_var",
67
+ "Fcm_corr",
68
+ "Fcm_auto_corr",
69
+ "Fcm_clust_tend",
70
+ "Fcm_clust_shade",
71
+ "Fcm_clust_prom",
72
+ "Fcm_info_corr1",
73
+ "Fcm_info_corr2"
74
+ ]
75
+
76
+ def __glcm_filter(
77
+ self,
78
+ input_images: np.ndarray,
79
+ discretization : dict,
80
+ user_set_min_val: float,
81
+ feature = None
82
+ ) -> np.ndarray:
83
+ """
84
+ Apply a textural filter to the input image.
85
+
86
+ Args:
87
+ input_images (ndarray): The images to filter.
88
+ discretization (dict): The discretization parameters.
89
+ user_set_min_val (float): The minimum value to use for the discretization.
90
+ family (str, optional): The family of the textural filter.
91
+ feature (str, optional): The feature to extract from the family. if not specified, all the features of the
92
+ family will be extracted.
93
+
94
+ Returns:
95
+ ndarray: The filtered image.
96
+ """
97
+
98
+ if feature:
99
+ if isinstance(feature, str):
100
+ assert feature in self.glcm_features,\
101
+ "feature should be a string or an integer and should be one of the following: " + ", ".join(self.glcm_features) + "."
102
+ elif isinstance(feature, int):
103
+ assert feature in range(len(self.glcm_features)),\
104
+ "feature's index should be an integer between 0 and " + str(len(self.glcm_features) - 1) + "."
105
+ else:
106
+ raise TypeError("feature should be an integer or a string from the following list: " + ", ".join(self.glcm_features) + ".")
107
+
108
+
109
+ # Pre-processing of the input volume
110
+ padding_size = (self.size - 1) // 2
111
+ input_images = np.pad(input_images[:, :, :], padding_size, mode="constant", constant_values=np.nan)
112
+ input_images_copy = deepcopy(input_images)
113
+
114
+ # Set up the strides
115
+ strides = (
116
+ input_images_copy.shape[2] * input_images_copy.shape[1] * input_images_copy.dtype.itemsize,
117
+ input_images_copy.shape[2] * input_images_copy.dtype.itemsize,
118
+ input_images_copy.dtype.itemsize
119
+ )
120
+ input_images = np.lib.stride_tricks.as_strided(input_images, shape=input_images.shape, strides=strides)
121
+ input_images[:,:,:] = input_images_copy[:, :, :]
122
+
123
+ if self.local:
124
+ # Discretization (to get the global max value)
125
+ if discretization['type'] == "FBS":
126
+ print("Warning: FBS local discretization is equivalent to global discretization.")
127
+ n_q = discretization['bw']
128
+ elif discretization['type'] == "FBN" and discretization['adapted']:
129
+ n_q = (np.nanmax(input_images) - np.nanmin(input_images)) // discretization['bw']
130
+ user_set_min_val = np.nanmin(input_images)
131
+ elif discretization['type'] == "FBN":
132
+ n_q = discretization['bn']
133
+ user_set_min_val = np.nanmin(input_images)
134
+ else:
135
+ raise ValueError("Discretization should be either FBS or FBN.")
136
+
137
+ temp_vol, _ = discretize(
138
+ vol_re=input_images,
139
+ discr_type=discretization['type'],
140
+ n_q=n_q,
141
+ user_set_min_val=user_set_min_val,
142
+ ivh=False
143
+ )
144
+
145
+ # Initialize the filtering parameters
146
+ max_vol = np.nanmax(temp_vol)
147
+
148
+ del temp_vol
149
+
150
+ else:
151
+ # Discretization
152
+ if discretization['type'] == "FBS":
153
+ n_q = discretization['bw']
154
+ elif discretization['type'] == "FBN":
155
+ n_q = discretization['bn']
156
+ user_set_min_val = np.nanmin(input_images)
157
+ else:
158
+ raise ValueError("Discretization should be either FBS or FBN.")
159
+
160
+ input_images, _ = discretize(
161
+ vol_re=input_images,
162
+ discr_type=discretization['type'],
163
+ n_q=n_q,
164
+ user_set_min_val=user_set_min_val,
165
+ ivh=False
166
+ )
167
+
168
+ # Initialize the filtering parameters
169
+ max_vol = np.nanmax(input_images)
170
+
171
+ volume_copy = deepcopy(input_images)
172
+
173
+ # Filtering
174
+ if feature is not None:
175
+ # Select the feature to compute
176
+ feature = self.glcm_features.index(feature) if isinstance(feature, str) else feature
177
+
178
+ # Initialize the kernel
179
+ kernel_glcm = single_glcm_kernel.substitute(
180
+ max_vol=int(max_vol),
181
+ filter_size=self.size,
182
+ shape_volume_0=int(volume_copy.shape[0]),
183
+ shape_volume_1=int(volume_copy.shape[1]),
184
+ shape_volume_2=int(volume_copy.shape[2]),
185
+ discr_type=discretization['type'],
186
+ n_q=n_q,
187
+ min_val=user_set_min_val,
188
+ feature_index=feature
189
+ )
190
+
191
+ else:
192
+ # Create the final volume to store the results
193
+ input_images = np.zeros((input_images.shape[0], input_images.shape[1], input_images.shape[2], 25), dtype=np.float32)
194
+
195
+ # Fill with nan
196
+ input_images[:] = np.nan
197
+
198
+ # Initialize the kernel
199
+ kernel_glcm = glcm_kernel.substitute(
200
+ max_vol=int(max_vol),
201
+ filter_size=self.size,
202
+ shape_volume_0=int(volume_copy.shape[0]),
203
+ shape_volume_1=int(volume_copy.shape[1]),
204
+ shape_volume_2=int(volume_copy.shape[2]),
205
+ discr_type=discretization['type'],
206
+ n_q=n_q,
207
+ min_val=user_set_min_val
208
+ )
209
+
210
+ # Compile the CUDA kernel
211
+ if not import_failed:
212
+ mod = SourceModule(kernel_glcm, no_extern_c=True)
213
+ if self.local:
214
+ process_loop_kernel = mod.get_function("glcm_filter_local")
215
+ else:
216
+ process_loop_kernel = mod.get_function("glcm_filter_global")
217
+
218
+ # Allocate GPU memory
219
+ volume_gpu = cuda.mem_alloc(input_images.nbytes)
220
+ volume_gpu_copy = cuda.mem_alloc(volume_copy.nbytes)
221
+
222
+ # Copy data to the GPU
223
+ cuda.memcpy_htod(volume_gpu, input_images)
224
+ cuda.memcpy_htod(volume_gpu_copy, volume_copy)
225
+
226
+ # Set up the grid and block dimensions
227
+ block_dim = (16, 16, 1) # threads per block
228
+ grid_dim = (
229
+ int((volume_copy.shape[0] - 1) // block_dim[0] + 1),
230
+ int((volume_copy.shape[1] - 1) // block_dim[1] + 1),
231
+ int((volume_copy.shape[2] - 1) // block_dim[2] + 1)
232
+ ) # blocks in the grid
233
+
234
+ # Run the kernel
235
+ process_loop_kernel(volume_gpu, volume_gpu_copy, block=block_dim, grid=grid_dim)
236
+
237
+ # Synchronize to ensure all CUDA operations are complete
238
+ context.synchronize()
239
+
240
+ # Copy data back to the CPU
241
+ cuda.memcpy_dtoh(input_images, volume_gpu)
242
+
243
+ # Free the allocated GPU memory
244
+ volume_gpu.free()
245
+ volume_gpu_copy.free()
246
+ del volume_copy
247
+
248
+ # unpad the volume
249
+ if feature: # 3D (single-feature)
250
+ input_images = input_images[padding_size:-padding_size, padding_size:-padding_size, padding_size:-padding_size]
251
+ else: # 4D (all features)
252
+ input_images = input_images[padding_size:-padding_size, padding_size:-padding_size, padding_size:-padding_size, :]
253
+
254
+ return input_images
255
+
256
+ else:
257
+ return None
258
+
259
+ def __call__(
260
+ self,
261
+ input_images: np.ndarray,
262
+ discretization : dict,
263
+ user_set_min_val: float,
264
+ family: str = "GLCM",
265
+ feature : str = None,
266
+ size: int = None,
267
+ local: bool = False
268
+ ) -> np.ndarray:
269
+ """
270
+ Apply a textural filter to the input image.
271
+
272
+ Args:
273
+ input_images (ndarray): The images to filter.
274
+ discretization (dict): The discretization parameters.
275
+ user_set_min_val (float): The minimum value to use for the discretization.
276
+ family (str, optional): The family of the textural filter.
277
+ feature (str, optional): The feature to extract from the family. if not specified, all the features of the
278
+ family will be extracted.
279
+ size (int, optional): The filter size.
280
+ local (bool, optional): If true, the discretization will be computed locally, else globally.
281
+
282
+ Returns:
283
+ ndarray: The filtered image.
284
+ """
285
+ # Initialization
286
+ if family:
287
+ self.family = family
288
+ if size:
289
+ self.size = size
290
+ if local:
291
+ self.local = local
292
+
293
+ # Filtering
294
+ if self.family.lower() == "glcm":
295
+ filtered_images = self.__glcm_filter(input_images, discretization, user_set_min_val, feature)
296
+ else:
297
+ raise NotImplementedError("Only GLCM is implemented for now.")
298
+
299
+ return filtered_images
@@ -0,0 +1,9 @@
1
+ from . import *
2
+ from .apply_filter import *
3
+ from .gabor import *
4
+ from .laws import *
5
+ from .log import *
6
+ from .mean import *
7
+ from .TexturalFilter import *
8
+ from .utils import *
9
+ from .wavelet import *