brkraw 0.3.11__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. brkraw/__init__.py +9 -3
  2. brkraw/apps/__init__.py +12 -0
  3. brkraw/apps/addon/__init__.py +30 -0
  4. brkraw/apps/addon/core.py +35 -0
  5. brkraw/apps/addon/dependencies.py +402 -0
  6. brkraw/apps/addon/installation.py +500 -0
  7. brkraw/apps/addon/io.py +21 -0
  8. brkraw/apps/hook/__init__.py +25 -0
  9. brkraw/apps/hook/core.py +636 -0
  10. brkraw/apps/loader/__init__.py +10 -0
  11. brkraw/apps/loader/core.py +622 -0
  12. brkraw/apps/loader/formatter.py +288 -0
  13. brkraw/apps/loader/helper.py +797 -0
  14. brkraw/apps/loader/info/__init__.py +11 -0
  15. brkraw/apps/loader/info/scan.py +85 -0
  16. brkraw/apps/loader/info/scan.yaml +90 -0
  17. brkraw/apps/loader/info/study.py +69 -0
  18. brkraw/apps/loader/info/study.yaml +156 -0
  19. brkraw/apps/loader/info/transform.py +92 -0
  20. brkraw/apps/loader/types.py +220 -0
  21. brkraw/cli/__init__.py +5 -0
  22. brkraw/cli/commands/__init__.py +2 -0
  23. brkraw/cli/commands/addon.py +327 -0
  24. brkraw/cli/commands/config.py +205 -0
  25. brkraw/cli/commands/convert.py +903 -0
  26. brkraw/cli/commands/hook.py +348 -0
  27. brkraw/cli/commands/info.py +74 -0
  28. brkraw/cli/commands/init.py +214 -0
  29. brkraw/cli/commands/params.py +106 -0
  30. brkraw/cli/commands/prune.py +288 -0
  31. brkraw/cli/commands/session.py +371 -0
  32. brkraw/cli/hook_args.py +80 -0
  33. brkraw/cli/main.py +83 -0
  34. brkraw/cli/utils.py +60 -0
  35. brkraw/core/__init__.py +13 -0
  36. brkraw/core/config.py +380 -0
  37. brkraw/core/entrypoints.py +25 -0
  38. brkraw/core/formatter.py +367 -0
  39. brkraw/core/fs.py +495 -0
  40. brkraw/core/jcamp.py +600 -0
  41. brkraw/core/layout.py +451 -0
  42. brkraw/core/parameters.py +781 -0
  43. brkraw/core/zip.py +1121 -0
  44. brkraw/dataclasses/__init__.py +14 -0
  45. brkraw/dataclasses/node.py +139 -0
  46. brkraw/dataclasses/reco.py +33 -0
  47. brkraw/dataclasses/scan.py +61 -0
  48. brkraw/dataclasses/study.py +131 -0
  49. brkraw/default/__init__.py +3 -0
  50. brkraw/default/pruner_specs/deid4share.yaml +42 -0
  51. brkraw/default/rules/00_default.yaml +4 -0
  52. brkraw/default/specs/metadata_dicom.yaml +236 -0
  53. brkraw/default/specs/metadata_transforms.py +92 -0
  54. brkraw/resolver/__init__.py +7 -0
  55. brkraw/resolver/affine.py +539 -0
  56. brkraw/resolver/datatype.py +69 -0
  57. brkraw/resolver/fid.py +90 -0
  58. brkraw/resolver/helpers.py +36 -0
  59. brkraw/resolver/image.py +188 -0
  60. brkraw/resolver/nifti.py +370 -0
  61. brkraw/resolver/shape.py +235 -0
  62. brkraw/schema/__init__.py +3 -0
  63. brkraw/schema/context_map.yaml +62 -0
  64. brkraw/schema/meta.yaml +57 -0
  65. brkraw/schema/niftiheader.yaml +95 -0
  66. brkraw/schema/pruner.yaml +55 -0
  67. brkraw/schema/remapper.yaml +128 -0
  68. brkraw/schema/rules.yaml +154 -0
  69. brkraw/specs/__init__.py +10 -0
  70. brkraw/specs/hook/__init__.py +12 -0
  71. brkraw/specs/hook/logic.py +31 -0
  72. brkraw/specs/hook/validator.py +22 -0
  73. brkraw/specs/meta/__init__.py +5 -0
  74. brkraw/specs/meta/validator.py +156 -0
  75. brkraw/specs/pruner/__init__.py +15 -0
  76. brkraw/specs/pruner/logic.py +361 -0
  77. brkraw/specs/pruner/validator.py +119 -0
  78. brkraw/specs/remapper/__init__.py +27 -0
  79. brkraw/specs/remapper/logic.py +924 -0
  80. brkraw/specs/remapper/validator.py +314 -0
  81. brkraw/specs/rules/__init__.py +6 -0
  82. brkraw/specs/rules/logic.py +263 -0
  83. brkraw/specs/rules/validator.py +103 -0
  84. brkraw-0.5.0.dist-info/METADATA +81 -0
  85. brkraw-0.5.0.dist-info/RECORD +88 -0
  86. {brkraw-0.3.11.dist-info → brkraw-0.5.0.dist-info}/WHEEL +1 -2
  87. brkraw-0.5.0.dist-info/entry_points.txt +13 -0
  88. brkraw/lib/__init__.py +0 -4
  89. brkraw/lib/backup.py +0 -641
  90. brkraw/lib/bids.py +0 -0
  91. brkraw/lib/errors.py +0 -125
  92. brkraw/lib/loader.py +0 -1220
  93. brkraw/lib/orient.py +0 -194
  94. brkraw/lib/parser.py +0 -48
  95. brkraw/lib/pvobj.py +0 -301
  96. brkraw/lib/reference.py +0 -245
  97. brkraw/lib/utils.py +0 -471
  98. brkraw/scripts/__init__.py +0 -0
  99. brkraw/scripts/brk_backup.py +0 -106
  100. brkraw/scripts/brkraw.py +0 -744
  101. brkraw/ui/__init__.py +0 -0
  102. brkraw/ui/config.py +0 -17
  103. brkraw/ui/main_win.py +0 -214
  104. brkraw/ui/previewer.py +0 -225
  105. brkraw/ui/scan_info.py +0 -72
  106. brkraw/ui/scan_list.py +0 -73
  107. brkraw/ui/subj_info.py +0 -128
  108. brkraw-0.3.11.dist-info/METADATA +0 -25
  109. brkraw-0.3.11.dist-info/RECORD +0 -28
  110. brkraw-0.3.11.dist-info/entry_points.txt +0 -3
  111. brkraw-0.3.11.dist-info/top_level.txt +0 -2
  112. tests/__init__.py +0 -0
  113. {brkraw-0.3.11.dist-info → brkraw-0.5.0.dist-info/licenses}/LICENSE +0 -0
brkraw/lib/loader.py DELETED
@@ -1,1220 +0,0 @@
1
- from .errors import *
2
- from .orient import build_affine_from_orient_info, reversed_pose_correction, get_origin
3
- from .pvobj import PvDatasetDir, PvDatasetZip
4
- from .utils import *
5
- from .orient import to_matvec
6
- from .reference import ERROR_MESSAGES, ISSUE_REPORT
7
- import numpy as np
8
- import zipfile
9
- import pathlib
10
- import os
11
- import re
12
- import warnings
13
- np.set_printoptions(formatter={'float_kind':'{:f}'.format})
14
- import enum
15
-
16
-
17
- @enum.unique
18
- class DataType(enum.Enum):
19
- PVDATASET = 1
20
- NIFTI1 = 2
21
-
22
-
23
- def load(path):
24
- path = pathlib.Path(path)
25
- if os.path.isdir(path):
26
- return PvDatasetDir(path)
27
- elif os.path.isfile(path):
28
- if zipfile.is_zipfile(path):
29
- return PvDatasetZip(path)
30
- else:
31
- raise FileNotValidError(path, DataType.PVDATASET)
32
- else:
33
- raise FileNotValidError(path, DataType.PVDATASET)
34
-
35
-
36
- class BrukerLoader():
37
- """ The front-end handler for Bruker PvDataset
38
-
39
- This class is designed to use for handle PvDataset and optimized for PV 6.0.1, but
40
- also provide backward compatibility with PV 5.1. This class can import naive
41
- PvDataset with directory as well as compressed dataset by zip and Paravision 6.0.1
42
- (*.zip and *.PvDatasets).
43
-
44
- Attributes:
45
- num_scans (int): The number of scan objects on the loaded dataset.
46
- num_recos (int): The number of reco objects on the loaded dataset.
47
- is_pvdataset (bool): Return True if imported path is PvDataset, else False
48
-
49
- Methods:
50
- - get method for data object
51
- get_dataobj(scan_id, reco_id)
52
- return dataobj without reshape (numpy.array)
53
- get_fid(scan_id)
54
- return binary fid object
55
- get_niftiobj(scan_id, reco_id)
56
- return nibabel's NifTi1Image object
57
-
58
- - get method for parameter objects
59
- get_acqp(scan_id)
60
- return acqp parameter object
61
- get_method(scan_id)
62
- return method parameter object
63
- get_visu_pars(scan_id, reco_id)
64
- return visu_pars parameter object
65
-
66
- - get method for image parameters
67
- get_matrix_size(scan_id, reco_id)
68
- return matrix shape to reshape dataobj
69
- get_affine(scan_id, reco_id)
70
- return affine transform matrix
71
- get_bdata(scan_id, reco_id)
72
- return bvals, bvecs, as string
73
- get_scan_time(visu_pars=None)
74
- return dictionary contains the datetime object for session initiate time
75
- if visu_pars parameter object is given, it will contains scan start time
76
-
77
- - method to generate files
78
- save_nifti(scan_id, reco_id, filename, dir='./', ext='nii.gz')
79
- generate NifTi1 file
80
- save_bdata(scan_id, filename, dir='./')
81
- generate FSL's Bdata files for DTI image processing
82
- save_json(scan_id, reco_id, filename, dir='./')
83
- generate JSON with given filename for BIDS MRI parameters
84
-
85
- - method to print meta information
86
- print_bids(scan_id, reco_id, fobj=None)
87
- print out BIDS MRI parameters defined at reference.py
88
- if fileobject is given, it will be written in file instead of stdout
89
- info(fobj=None)
90
- print out the PvDataset major parameters
91
- if fileobject is given, it will be written in file instead of stdout
92
-
93
- - method to override header
94
- override_subjtype(subjtype)
95
- override subject type (e.g. Biped)
96
- override_position(position_string)
97
- override position of subject (e.g. Head_Prone)
98
- """
99
- def __init__(self, path):
100
- """ class method to initiate object.
101
- Args:
102
- path (str): Path of PvDataset.
103
- """
104
- self._pvobj = load(path)
105
- self._override_position = None
106
- self._override_type = None
107
-
108
- if (self.num_scans > 0) and (self._subject != None):
109
- self._is_pvdataset = True
110
- else:
111
- self._is_pvdataset = False
112
-
113
- @property
114
- def pvobj(self):
115
- return self._pvobj
116
-
117
- @property
118
- def num_scans(self):
119
- # [20210820] Add-paravision 360 related.
120
- len_scans = len(self._pvobj._fid.keys())
121
- if len_scans > 0:
122
- return len_scans
123
- else:
124
- return len(self._pvobj._2dseq.keys())
125
-
126
- @property
127
- def num_recos(self):
128
- return sum([len(r) for r in self._avail.values()])
129
-
130
- @property
131
- def is_pvdataset(self):
132
- return self._is_pvdataset
133
-
134
- def override_subjtype(self, subjtype):
135
- """ override subject type
136
- Arge:
137
- subtype(str): subject type that supported by PV
138
- """
139
- err_msg = 'Unknown subject type [{}]'.format(subjtype)
140
- if subjtype not in ['Biped', 'Quadruped', 'Phantom', 'Other', 'OtherAnimal']:
141
- raise Exception(err_msg)
142
- self._override_type = subjtype
143
-
144
- def override_position(self, position_string):
145
- """ override subject position
146
- Arge:
147
- position_string: subject position that supported by PV
148
- """
149
- err_msg = 'Unknown position string [{}]'.format(position_string)
150
- try:
151
- part, side = position_string.split('_')
152
- if part not in ['Head', 'Foot', 'Tail']:
153
- raise Exception(err_msg)
154
- if side not in ['Supine', 'Prone', 'Left', 'Right']:
155
- raise Exception(err_msg)
156
- self._override_position = position_string
157
- except:
158
- raise Exception(err_msg)
159
-
160
- def close(self):
161
- self._pvobj.close()
162
- self._pvobj = None
163
-
164
- def get_affine(self, scan_id, reco_id):
165
- visu_pars = self._get_visu_pars(scan_id, reco_id)
166
- method = self._method[scan_id]
167
- return self._get_affine(visu_pars, method)
168
-
169
- def _get_dataobj(self, scan_id, reco_id):
170
- dataobj = self._pvobj.get_dataobj(scan_id, reco_id)
171
- return dataobj
172
-
173
- def _get_dataslp(self, visu_pars):
174
- """ Return data slope and offset for value correction
175
- Args:
176
- visu_pars:
177
-
178
- Returns:
179
- data_slp
180
- data_off
181
- """
182
- data_slp = get_value(visu_pars, 'VisuCoreDataSlope')
183
- data_off = get_value(visu_pars, 'VisuCoreDataOffs')
184
- if isinstance(data_slp, list):
185
- data_slp = data_slp[0] if is_all_element_same(data_slp) else data_slp
186
- if isinstance(data_off, list):
187
- data_off = data_off[0] if is_all_element_same(data_off) else data_off
188
- return data_slp, data_off
189
-
190
- def get_dataobj(self, scan_id, reco_id, slope=True, offset=True):
191
- """ Return dataobj that has 3D(spatial) + extra frame
192
- Args:
193
- scan_id: scan id
194
- reco_id: reco id
195
- slope: if True correct slope
196
- Returns:
197
- dataobj
198
- """
199
- visu_pars = self._get_visu_pars(scan_id, reco_id)
200
- dim = self._get_dim_info(visu_pars)[0]
201
- fg_info = self._get_frame_group_info(visu_pars)
202
- matrix_size = self.get_matrix_size(scan_id, reco_id)
203
- dataobj = self._get_dataobj(scan_id, reco_id)
204
- group_id = fg_info['group_id']
205
-
206
- data_slp, data_off = self._get_dataslp(visu_pars)
207
-
208
- if slope:
209
- # This option apply the slope to data array directly instead of header
210
- f = fg_info['frame_size']
211
- if isinstance(data_slp, list):
212
- if f != len(data_slp):
213
- raise UnexpectedError(message='data_slp mismatch;{}'.format(ISSUE_REPORT))
214
- else:
215
- if dim == 2:
216
- x, y = matrix_size[:2]
217
- _dataobj = dataobj.reshape([f, x * y]).T
218
- elif dim == 3:
219
- x, y, z = matrix_size[:3]
220
- _dataobj = dataobj.reshape([f, x * y * z]).T
221
- else:
222
- raise UnexpectedError(message='Unexpected frame shape on DTI image;{}'.format(ISSUE_REPORT))
223
- dataobj = (_dataobj * data_slp).T
224
- else:
225
- dataobj = dataobj * data_slp
226
-
227
- if offset:
228
- # This option apply the offset to data array directly instead of header
229
- f = fg_info['frame_size']
230
- if isinstance(data_off, list):
231
- if f != len(data_off):
232
- raise UnexpectedError(message='data_off mismatch;{}'.format(ISSUE_REPORT))
233
- else:
234
- if dim == 2:
235
- x, y = matrix_size[:2]
236
- _dataobj = dataobj.reshape([f, x * y]).T
237
- elif dim == 3:
238
- x, y, z = matrix_size[:3]
239
- _dataobj = dataobj.reshape([f, x * y * z]).T
240
- else:
241
- raise UnexpectedError(message='Unexpected frame shape on DTI image;{}'.format(ISSUE_REPORT))
242
- dataobj = (_dataobj + data_off).T
243
- else:
244
- dataobj = dataobj + data_off
245
-
246
- dataobj = dataobj.reshape(matrix_size[::-1]).T
247
-
248
- def swap_slice_axis(group_id_, dataobj_):
249
- """ swap slice axis to third axis """
250
- slice_code = 'FG_SLICE'
251
- if slice_code not in group_id_:
252
- pass
253
- else:
254
- slice_axis_ = group_id_.index(slice_code) + 2
255
- dataobj_ = np.swapaxes(dataobj_, 2, slice_axis_)
256
- return dataobj_
257
-
258
- if fg_info['frame_type'] != None:
259
- if group_id[0] == 'FG_SLICE':
260
- pass
261
-
262
- elif group_id[0] == 'FG_ECHO': # multi-echo
263
- if self.is_multi_echo(scan_id, reco_id):
264
- # push echo to last axis for BIDS
265
- if 'FG_SLICE' not in group_id:
266
- dataobj = np.swapaxes(dataobj, dim, -1)
267
- else:
268
- slice_axis = group_id.index('FG_SLICE') + 2
269
- dataobj = np.swapaxes(dataobj, slice_axis, -1)
270
- dataobj = np.swapaxes(dataobj, 2, -1)
271
-
272
- elif group_id[0] in ['FG_DIFFUSION', 'FG_DTI', 'FG_MOVIE', 'FG_COIL',
273
- 'FG_CYCLE', 'FG_COMPLEX', 'FG_CARDIAC_MOVIE']:
274
- dataobj = swap_slice_axis(group_id, dataobj)
275
- else:
276
- # the output data will have default matrix shape and order.
277
- warnings.warn('Unexpected frame group combination;{}'.format(ISSUE_REPORT), UserWarning)
278
- return dataobj
279
-
280
- def get_fid(self, scan_id):
281
- return self._pvobj.get_fid(scan_id)
282
-
283
- @property
284
- def get_visu_pars(self):
285
- return self._get_visu_pars
286
-
287
- def get_method(self, scan_id):
288
- return self._method[scan_id]
289
-
290
- def get_acqp(self, scan_id):
291
- return self._acqp[scan_id]
292
-
293
- def get_bdata(self, scan_id):
294
- method = self.get_method(scan_id)
295
- return self._get_bdata(method)
296
-
297
- def get_matrix_size(self, scan_id, reco_id):
298
- visu_pars = self._get_visu_pars(scan_id, reco_id)
299
- dataobj = self._get_dataobj(scan_id, reco_id)
300
- return self._get_matrix_size(visu_pars, dataobj)
301
-
302
- def is_multi_echo(self, scan_id, reco_id):
303
- visu_pars = self._get_visu_pars(scan_id, reco_id)
304
- fg_info = self._get_frame_group_info(visu_pars)
305
- group_id = fg_info['group_id']
306
- if 'FG_ECHO' in group_id and 'FieldMap' not in fg_info['group_comment']: #FieldMap will be treated different
307
- return fg_info['matrix_shape'][group_id.index('FG_ECHO')] # return number of echos
308
- else:
309
- return False
310
-
311
- # methods to dump data into file object
312
- ## - NifTi1
313
- def get_niftiobj(self, scan_id, reco_id, crop=None, slope=False, offset=False):
314
- """ return nibabel nifti object
315
- Args:
316
- scan_id:
317
- reco_id:
318
- crop: frame crop range
319
- slope: if True, slope correction, else, header update
320
- offset: if True, offset correction, else, header update
321
- Returns:
322
- nibabel.Nifti1Image
323
- """
324
- from nibabel import Nifti1Image
325
- visu_pars = self._get_visu_pars(scan_id, reco_id)
326
- method = self._method[scan_id]
327
- affine = self._get_affine(visu_pars, method)
328
-
329
- data_slp, data_off = self._get_dataslp(visu_pars)
330
- if isinstance(data_slp, list) and slope != None:
331
- slope = True
332
- if isinstance(data_off, list) and offset != None:
333
- offset = True
334
-
335
- imgobj = self.get_dataobj(scan_id, reco_id, slope=slope, offset=offset)
336
-
337
- if isinstance(affine, list):
338
- parser = []
339
- slice_info = self._get_slice_info(visu_pars)
340
- num_slice_packs = slice_info['num_slice_packs']
341
-
342
- for spack_idx in range(num_slice_packs):
343
- num_slices_each_pack = slice_info['num_slices_each_pack']
344
- start = int(spack_idx * num_slices_each_pack[spack_idx])
345
- end = start + num_slices_each_pack[spack_idx]
346
- seg_imgobj = imgobj[..., start:end]
347
- niiobj = Nifti1Image(seg_imgobj, affine[spack_idx])
348
- niiobj = self._set_nifti_header(niiobj, visu_pars, method, slope=slope, offset=offset)
349
- parser.append(niiobj)
350
- return parser
351
-
352
- if self.is_multi_echo(scan_id, reco_id):
353
- # multi-echo image must be splitted
354
- parser = []
355
- for e in range(imgobj.shape[-1]):
356
- imgobj_ = imgobj[..., e]
357
- if len(imgobj_.shape) > 4:
358
- x, y, z = imgobj_.shape[:3]
359
- f = multiply_all(imgobj_.shape[3:])
360
- # all converted nifti must be 4D
361
- imgobj_ = imgobj_.reshape([x, y, z, f])
362
- if crop != None:
363
- if crop[0] is None:
364
- niiobj_ = Nifti1Image(imgobj_[..., :crop[1]], affine)
365
- elif crop[1] is None:
366
- niiobj_ = Nifti1Image(imgobj_[..., crop[0]:], affine)
367
- else:
368
- niiobj_ = Nifti1Image(imgobj_[..., crop[0]:crop[1]], affine)
369
- else:
370
- niiobj_ = Nifti1Image(imgobj_, affine)
371
- niiobj_ = self._set_nifti_header(niiobj_, visu_pars, method, slope=slope, offset=offset)
372
- parser.append(niiobj_)
373
- return parser
374
- else:
375
- if len(imgobj.shape) > 4:
376
- x, y, z = imgobj.shape[:3]
377
- f = multiply_all(imgobj.shape[3:])
378
- # all converted nifti must be 4D
379
- imgobj = imgobj.reshape([x, y, z, f])
380
- if crop != None:
381
- if crop[0] is None:
382
- niiobj = Nifti1Image(imgobj[..., :crop[1]], affine)
383
- elif crop[1] is None:
384
- niiobj = Nifti1Image(imgobj[..., crop[0]:], affine)
385
- else:
386
- niiobj = Nifti1Image(imgobj[..., crop[0]:crop[1]], affine)
387
- else:
388
- niiobj = Nifti1Image(imgobj, affine)
389
- niiobj = self._set_nifti_header(niiobj, visu_pars, method, slope=slope, offset=offset)
390
- return niiobj
391
-
392
- def get_sitkimg(self, scan_id, reco_id, slope=True, offset=True, is_vector=False):
393
- """ return SimpleITK image obejct instead Nibabel NIFTI obj"""
394
- try:
395
- import SimpleITK as sitk
396
- except ModuleNotFoundError:
397
- raise ModuleNotFoundError('The BrkRaw did not be installed with SimpleITK (optional requirement).\n'
398
- '\t\t\t\t\t Please install SimpleITK to activate this method.')
399
-
400
- visu_pars = self._get_visu_pars(scan_id, reco_id)
401
- method = self._method[scan_id]
402
- res = self._get_spatial_info(visu_pars)['spatial_resol']
403
- dataobj = self.get_dataobj(scan_id, reco_id, slope=slope, offset=offset)
404
- affine = self._get_affine(visu_pars, method)
405
-
406
- if isinstance(affine, list):
407
- parser = []
408
- slice_info = self._get_slice_info(visu_pars)
409
- num_slice_packs = slice_info['num_slice_packs']
410
- for spack_idx in range(num_slice_packs):
411
- num_slices_each_pack = slice_info['num_slices_each_pack']
412
- start = int(spack_idx * num_slices_each_pack[spack_idx])
413
- end = start + num_slices_each_pack[spack_idx]
414
- seg_imgobj = dataobj[..., start:end]
415
- sitkobj = sitk.GetImageFromArray(seg_imgobj.T)
416
- sitkaff = np.matmul(np.diag([-1, -1, 1, 1]), affine[spack_idx])
417
- sitkdir, sitkorg = to_matvec(sitkaff)
418
- sitkdir = sitkdir.dot(np.linalg.inv(np.diag(res[spack_idx])))
419
- sitkobj.SetDirection(sitkdir.flatten().tolist())
420
- sitkobj.SetOrigin(sitkorg)
421
- sitkobj.SetSpacing(res[spack_idx])
422
- parser.append(sitkobj)
423
- return parser
424
-
425
- affine = np.matmul(np.diag([-1, -1, 1, 1]), affine) # RAS to LPS
426
- direction_, origin_ = to_matvec(affine)
427
- direction_ = direction_.dot(np.linalg.inv(np.diag(res[0])))
428
- imgobj = sitk.GetImageFromArray(dataobj.T, isVector=is_vector)
429
-
430
- if len(dataobj.shape) > 3:
431
- res = [list(res[0]) + [self._get_temp_info(visu_pars)['temporal_resol']]]
432
- direction = np.eye(4)
433
- direction[:3, :3] = direction_
434
- direction = direction.flatten()
435
- origin = np.zeros([4])
436
- origin[:3] = origin_
437
- else:
438
- direction = direction_
439
- origin = origin_
440
- imgobj.SetDirection(direction.flatten().tolist())
441
- imgobj.SetOrigin(origin)
442
- imgobj.SetSpacing(res[0])
443
- # header update
444
- imgobj = self._set_dicom_header(imgobj, visu_pars, method, slope, offset)
445
- return imgobj
446
-
447
- def _set_dicom_header(self, sitk_img, visu_pars, method, slope, offset):
448
- """ TODO: need to update sitk header (DICOM format) """
449
- return sitk_img
450
-
451
- def save_sitk(self, io_type=None):
452
- """ TODO: mha, nrrd format with header """
453
- pass
454
-
455
- @property
456
- def save_as(self):
457
- return self.save_nifti
458
-
459
- def _inspect_ids(self, scan_id, reco_id):
460
- if scan_id not in self._avail.keys():
461
- print('[Error] Invalid Scan ID.\n'
462
- ' - Your input: {}\n'
463
- ' - Available Scan IDs: {}'.format(scan_id, list(self._avail.keys())))
464
- raise ValueError
465
- else:
466
- if reco_id not in self._avail[scan_id]:
467
- print('[Error] Invalid Reco ID.\n'
468
- ' - Your input: {}\n'
469
- ' - Available Reco IDs: {}'.format(reco_id, self._avail[scan_id]))
470
- raise ValueError
471
-
472
- def save_nifti(self, scan_id, reco_id, filename, dir='./', ext='nii.gz',
473
- crop=None, slope=False, offset=False):
474
- niiobj = self.get_niftiobj(scan_id, reco_id, crop=crop, slope=slope, offset=offset)
475
- if isinstance(niiobj, list):
476
- for i, nii in enumerate(niiobj):
477
- output_path = os.path.join(dir,
478
- '{}-{}.{}'.format(filename,
479
- str(i+1).zfill(2), ext))
480
- nii.to_filename(output_path)
481
- else:
482
- output_path = os.path.join(dir, '{}.{}'.format(filename, ext))
483
- niiobj.to_filename(output_path)
484
-
485
- # - FSL bval, bvec, and bmat
486
- def save_bdata(self, scan_id, filename, dir='./'):
487
- method = self._method[scan_id]
488
- # bval, bvec, bmat = self._get_bdata(method) # [220201] bmat seems not necessary
489
- bvals, bvecs = self._get_bdata(method)
490
- output_path = os.path.join(dir, filename)
491
-
492
- with open('{}.bval'.format(output_path), 'w') as bval_fobj:
493
- bval_fobj.write(' '.join(bvals.astype('str')) + '\n')
494
-
495
- with open('{}.bvec'.format(output_path), 'w') as bvec_fobj:
496
- for row in bvecs:
497
- bvec_fobj.write(' '.join(row.astype('str')) + '\n')
498
-
499
- # BIDS JSON
500
- def _parse_json(self, scan_id, reco_id, metadata=None):
501
- acqp = self._acqp[scan_id]
502
- method = self._method[scan_id]
503
- visu_pars = self._get_visu_pars(scan_id, reco_id)
504
-
505
- json_obj = dict()
506
- encdir_dic = {0: 'i', 1: 'j', 2: 'k'}
507
-
508
- if metadata is None:
509
- metadata = COMMON_META_REF.copy()
510
- for k, v in metadata.items():
511
- val = meta_get_value(v, acqp, method, visu_pars)
512
- if k in ['PhaseEncodingDirection', 'SliceEncodingDirection']:
513
- # Convert the encoding direction meta data into BIDS format
514
- if val != None:
515
- if isinstance(val, int):
516
- val = encdir_dic[val]
517
- else:
518
- if isinstance(val, list):
519
- if is_all_element_same(val):
520
- val = val[0]
521
- else:
522
- # handling condition of multiple phase encoding direction
523
- updated_val = []
524
- for v in val:
525
- if isinstance(v, int):
526
- # in PV 6 if each slice package has distinct phase encoding direction
527
- updated_val.append(encdir_dic[v])
528
- else:
529
- # in PV 5.1, element wise code conversion
530
- encdirs = encdir_code_converter(v)
531
- if 'phase_enc' in encdirs:
532
- pe_idx = encdirs.index('phase_enc')
533
- updated_val.append(encdir_dic[pe_idx])
534
- else:
535
- updated_val.append(None)
536
- val = updated_val
537
- elif isinstance(val, str):
538
- # in PV 5.1, single value code conversion
539
- encdirs = encdir_code_converter(val)
540
- if 'phase_enc' in encdirs:
541
- pe_idx = encdirs.index('phase_enc')
542
- val = encdir_dic[pe_idx]
543
- else:
544
- val = None
545
- else:
546
- raise UnexpectedError('Unexpected phase encoding direction in PV5.1.')
547
- if isinstance(val, np.ndarray):
548
- val = val.tolist()
549
- json_obj[k] = val
550
- return json_obj
551
-
552
- def save_json(self, scan_id, reco_id, filename, dir='./', metadata=None, condition=None):
553
- json_obj = self._parse_json(scan_id, reco_id, metadata)
554
- if condition != None:
555
- code, idx = condition
556
- if code == 'me': # multi-echo
557
- if 'EchoTime' in json_obj.keys():
558
- te = json_obj['EchoTime']
559
- if isinstance(te, list):
560
- json_obj['EchoTime'] = te[idx]
561
- else:
562
- raise InvalidApproach('SingleTE data')
563
- elif code == 'fm':
564
- visu_pars = self._get_visu_pars(scan_id, reco_id)
565
- json_obj['Units'] = get_value(visu_pars, 'VisuCoreDataUnits')[0]
566
- json_obj['IntendFor'] = ["func/*_bold.nii.gz"]
567
- else:
568
- raise InvalidApproach('Invalid datatype code for json creation')
569
-
570
- # remove all null fields
571
- for k, v in json_obj.items():
572
- if v is None:
573
- json_obj[k] = 'Value was not specified'
574
-
575
- # RepetitionTime is mutually exclusive with VolumeTiming, here default with RepetitionTime.
576
- # https://bids-specification.readthedocs.io/en/latest/04-modality-specific-files/01-magnetic-resonance-imaging-data.html#required-fields
577
- # To use VolumeTiming, remove the RepetitionTime item in .json file generated from bids_helper.
578
-
579
- if ('RepetitionTime' in json_obj.keys()) and ('VolumeTiming' in json_obj.keys()):
580
- if type(json_obj['RepetitionTime']) == int or type(json_obj['RepetitionTime']) == float:
581
- del json_obj['VolumeTiming']
582
- msg = "Both 'RepetitionTime' and 'VolumeTiming' exist in your .json file, removed 'VolumeTiming' to make it valid for BIDS.\
583
- \n To use VolumeTiming, remove the RepetitionTime item but keep VolumeTiming from the .json file generated from bids_helper."
584
- warnings.warn(msg)
585
-
586
- with open(os.path.join(dir, '{}.json'.format(filename)), 'w') as f:
587
- import json
588
- json.dump(json_obj, f, indent=4)
589
-
590
- def get_scan_time(self, visu_pars=None):
591
- import datetime as dt
592
- subject_date = get_value(self._subject, 'SUBJECT_date')
593
- subject_date = subject_date[0] if isinstance(subject_date, list) else subject_date
594
- pattern_1 = r'(\d{2}:\d{2}:\d{2})\s+(\d+\s\w+\s\d{4})'
595
- pattern_2 = r'(\d{4}-\d{2}-\d{2})[T](\d{2}:\d{2}:\d{2})'
596
- if re.match(pattern_1, subject_date):
597
- # start time
598
- start_time = dt.time(*map(int, re.sub(pattern_1, r'\1', subject_date).split(':')))
599
- # date
600
- date = dt.datetime.strptime(re.sub(pattern_1, r'\2', subject_date), '%d %b %Y').date()
601
- # end time
602
- if visu_pars != None:
603
- last_scan_time = get_value(visu_pars, 'VisuAcqDate')
604
- last_scan_time = dt.time(*map(int, re.sub(pattern_1, r'\1', last_scan_time).split(':')))
605
- acq_time = get_value(visu_pars, 'VisuAcqScanTime') / 1000.0
606
- time_delta = dt.timedelta(0, acq_time)
607
- scan_time = (dt.datetime.combine(date, last_scan_time) + time_delta).time()
608
- return dict(date=date,
609
- start_time=start_time,
610
- scan_time=scan_time)
611
- elif re.match(pattern_2, subject_date):
612
- # start time
613
- # subject_date = get_value(self._subject, 'SUBJECT_date')[0]
614
- start_time = dt.time(*map(int, re.sub(pattern_2, r'\2', subject_date).split(':')))
615
- # date
616
- date = dt.date(*map(int, re.sub(pattern_2, r'\1', subject_date).split('-')))
617
-
618
- # end date
619
- if visu_pars != None:
620
- scan_time = get_value(visu_pars, 'VisuCreationDate')[0]
621
- scan_time = dt.time(*map(int, re.sub(pattern_2, r'\2', scan_time).split(':')))
622
- return dict(date=date,
623
- start_time=start_time,
624
- scan_time=scan_time)
625
- else:
626
- raise Exception(ERROR_MESSAGES['NotIntegrated'])
627
-
628
- return dict(date=date,
629
- start_time=start_time)
630
-
631
- # printing functions / help documents
632
- def print_bids(self, scan_id, reco_id, fobj=None, metadata=None):
633
- if fobj == None:
634
- import sys
635
- fobj = sys.stdout
636
- json_obj = self._parse_json(scan_id, reco_id, metadata)
637
- for k, val in json_obj.items():
638
- n_tap = int(5 - int(len(k) / 8))
639
- if len(k) % 8 >= 7:
640
- n_tap -= 1
641
- tap = ''.join(['\t'] * n_tap)
642
- print('{}:{}{}'.format(k, tap, val), file=fobj)
643
-
644
- def info(self, io_handler=None):
645
- """ Prints out the information of the internal contents in Bruker raw data
646
- Args:
647
- io_handler: IO handler where to print out
648
- """
649
- if io_handler == None:
650
- import sys
651
- io_handler = sys.stdout
652
-
653
- pvobj = self._pvobj
654
- user_account = pvobj.user_account
655
- subj_id = pvobj.subj_id
656
- study_id = pvobj.study_id
657
- session_id = pvobj.session_id
658
- user_name = pvobj.user_name
659
- subj_entry = pvobj.subj_entry
660
- subj_pose = pvobj.subj_pose
661
- subj_sex = pvobj.subj_sex
662
- subj_type = pvobj.subj_type
663
- subj_weight = pvobj.subj_weight
664
- subj_dob = pvobj.subj_dob
665
-
666
- lines = []
667
- for i, (scan_id, recos) in enumerate(self._avail.items()):
668
- for j, reco_id in enumerate(recos):
669
- visu_pars = self._get_visu_pars(scan_id, reco_id)
670
- if i == 0 and j == 0:
671
- sw_version = get_value(visu_pars, 'VisuCreatorVersion')
672
-
673
- title = 'Paravision {}'.format(sw_version)
674
- lines.append(title)
675
- lines.append('-' * len(title))
676
-
677
- try:
678
- datetime = self.get_scan_time()
679
- except:
680
- datetime = dict(date='None')
681
- lines.append('UserAccount:\t{}'.format(user_account))
682
- lines.append('Date:\t\t{}'.format(datetime['date']))
683
- lines.append('Researcher:\t{}'.format(user_name))
684
- lines.append('Subject ID:\t{}'.format(subj_id))
685
- lines.append('Session ID:\t{}'.format(session_id))
686
- lines.append('Study ID:\t{}'.format(study_id))
687
- lines.append('Date of Birth:\t{}'.format(subj_dob))
688
- lines.append('Sex:\t\t{}'.format(subj_sex))
689
- lines.append('Weight:\t\t{} kg'.format(subj_weight))
690
- lines.append('Subject Type:\t{}'.format(subj_type))
691
- lines.append('Position:\t{}\t\tEntry:\t{}'.format(subj_pose, subj_entry))
692
-
693
- lines.append('\n[ScanID]\tSequence::Protocol::[Parameters]')
694
- # try:
695
- tr = get_value(visu_pars, 'VisuAcqRepetitionTime')
696
- tr = ','.join(map(str, tr)) if isinstance(tr, list) else tr
697
- te = get_value(visu_pars, 'VisuAcqEchoTime')
698
- te = 0 if te is None else te
699
- te = ','.join(map(str, te)) if isinstance(te, list) else te
700
- pixel_bw = get_value(visu_pars, 'VisuAcqPixelBandwidth')
701
- flip_angle = get_value(visu_pars, 'VisuAcqFlipAngle')
702
- acqpars = self.get_acqp(int(scan_id))
703
- scanname = acqpars._parameters['ACQ_scan_name']
704
- param_values = [tr, te, pixel_bw, flip_angle]
705
- for k, v in enumerate(param_values):
706
- if v is None:
707
- param_values[k] = ''
708
- if isinstance(v, float):
709
- param_values[k] = '{0:.2f}'.format(v)
710
- if j == 0:
711
- params = "[ TR: {0} ms, TE: {1} ms, pixelBW: {2} Hz, FlipAngle: {3} degree]".format(
712
- *param_values)
713
- protocol_name = get_value(visu_pars, 'VisuAcquisitionProtocol')
714
- sequence_name = get_value(visu_pars, 'VisuAcqSequenceName')
715
- lines.append('[{}]\t{}::{}::{}\n\t{}'.format(str(scan_id).zfill(3),
716
- sequence_name,
717
- protocol_name,
718
- scanname,
719
- params))
720
-
721
- dim, cls = self._get_dim_info(visu_pars)
722
- if cls == 'spatial_only':
723
- size = self._get_matrix_size(visu_pars)
724
- size = ' x '.join(map(str, size))
725
- spatial_info = self._get_spatial_info(visu_pars)
726
- temp_info = self._get_temp_info(visu_pars)
727
- s_resol = spatial_info['spatial_resol']
728
- fov_size = spatial_info['fov_size']
729
- fov_size = ' x '.join(map(str, fov_size))
730
- s_unit = spatial_info['unit']
731
- t_resol = '{0:.3f}'.format(temp_info['temporal_resol'])
732
- t_unit = temp_info['unit']
733
- s_resol = list(s_resol[0]) if is_all_element_same(s_resol) else s_resol
734
- s_resol = ' x '.join(['{0:.3f}'.format(r) for r in s_resol])
735
-
736
- lines.append(' [{}] dim: {}D, matrix_size: {}, fov_size: {} (unit:mm)\n'
737
- ' spatial_resol: {} (unit:{}), temporal_resol: {} (unit:{})'.format(
738
- str(reco_id).zfill(2), dim, size,
739
- fov_size,
740
- s_resol, s_unit,
741
- t_resol, t_unit))
742
- else:
743
- lines.append(' [{}] dim: {}, {}'.format(str(reco_id).zfill(2), dim, cls))
744
- lines.append('\n')
745
- print('\n'.join(lines), file=io_handler)
746
-
747
- # method to parse information of each scan
748
- # methods of protocol specific
749
-
750
- def _set_nifti_header(self, niiobj, visu_pars, method, slope, offset):
751
- slice_info = self._get_slice_info(visu_pars)
752
- niiobj.header.default_x_flip = False
753
- temporal_resol = self._get_temp_info(visu_pars)['temporal_resol']
754
- temporal_resol = float(temporal_resol) / 1000
755
- slice_order = get_value(method, 'PVM_ObjOrderScheme')
756
- acq_method = get_value(method, 'Method')
757
-
758
- data_slp, data_off = self._get_dataslp(visu_pars)
759
-
760
- if re.search('epi', acq_method, re.IGNORECASE) and not \
761
- re.search('dti', acq_method, re.IGNORECASE):
762
-
763
- niiobj.header.set_xyzt_units(xyz=2, t=8)
764
- niiobj.header['pixdim'][4] = temporal_resol
765
- niiobj.header.set_dim_info(slice=2)
766
- num_slices = slice_info['num_slices_each_pack'][0]
767
- niiobj.header['slice_duration'] = temporal_resol / num_slices
768
-
769
- if slice_order == 'User_defined_slice_scheme':
770
- niiobj.header['slice_code'] = 0
771
- elif slice_order == 'Sequential':
772
- niiobj.header['slice_code'] = 1
773
- elif slice_order == 'Reverse_sequential':
774
- niiobj.header['slice_code'] = 2
775
- elif slice_order == 'Interlaced':
776
- niiobj.header['slice_code'] = 3
777
- elif slice_order == 'Reverse_interlacesd':
778
- niiobj.header['slice_code'] = 4
779
- elif slice_order == 'Angiopraphy':
780
- niiobj.header['slice_code'] = 0
781
- else:
782
- raise Exception(ERROR_MESSAGES['NotIntegrated'])
783
- niiobj.header['slice_start'] = 0
784
- niiobj.header['slice_end'] = num_slices - 1
785
- else:
786
- niiobj.header.set_xyzt_units('mm')
787
- if not slope:
788
- if slope != None:
789
- if isinstance(data_slp, list):
790
- raise InvalidApproach('Invalid slope size;'
791
- 'The vector type scl_slope cannot be set in nifti header.')
792
- niiobj.header['scl_slope'] = data_slp
793
- else:
794
- niiobj.header['scl_slope'] = 1
795
- else:
796
- niiobj.header['scl_slope'] = 1
797
- if not offset:
798
- if offset != None:
799
- if isinstance(data_off, list):
800
- raise InvalidApproach('Invalid offset size;'
801
- 'The vector type scl_offset cannot be set in nifti header.')
802
- niiobj.header['scl_inter'] = data_off
803
- else:
804
- niiobj.header['scl_inter'] = 0
805
- else:
806
- niiobj.header['scl_inter'] = 0
807
- niiobj.set_qform(niiobj.affine, 1)
808
- niiobj.set_sform(niiobj.affine, 0)
809
- return niiobj
810
-
811
- # EPI
812
- def _get_temp_info(self, visu_pars):
813
- """return temporal resolution for each volume of image"""
814
- total_time = get_value(visu_pars, 'VisuAcqScanTime')
815
- fg_info = self._get_frame_group_info(visu_pars)
816
- parser = []
817
- if fg_info['frame_type'] != None:
818
- for id, fg in enumerate(fg_info['group_id']):
819
- if not re.search('slice', fg, re.IGNORECASE):
820
- parser.append(fg_info['matrix_shape'][id])
821
- frame_size = multiply_all(parser) if len(parser) > 0 else 1
822
- if total_time is None: # derived reco data
823
- total_time = 0
824
- return dict(temporal_resol=(total_time / frame_size),
825
- num_frames=frame_size,
826
- unit='msec')
827
-
828
- # DTI
829
- @staticmethod
830
- def _get_bdata(method):
831
- """Extract, format, and return diffusion bval and bvec"""
832
- bvals = np.array(get_value(method, 'PVM_DwEffBval'))
833
- bvecs = np.array(get_value(method, 'PVM_DwGradVec').T)
834
- # Correct for single b-vals
835
- if np.size(bvals) < 2:
836
- bvals = np.array([bvals])
837
- # Normalize bvecs
838
- bvecs_axis = 0
839
- bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis))
840
- bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1
841
- bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis)
842
- return bvals, bvecs
843
-
844
- # Generals
845
- @staticmethod
846
- def _get_gradient_encoding_info(visu_pars):
847
- version = get_value(visu_pars, 'VisuVersion')
848
-
849
- if version == 1: # case PV 5.1, prepare compatible form of variable
850
- phase_enc = get_value(visu_pars, 'VisuAcqImagePhaseEncDir')
851
- phase_enc = phase_enc[0] if is_all_element_same(phase_enc) else phase_enc
852
- if isinstance(phase_enc, list) and len(phase_enc) > 1:
853
- encoding_axis = []
854
- for d in phase_enc:
855
- encoding_axis.append(encdir_code_converter(d))
856
- else:
857
- encoding_axis = encdir_code_converter(phase_enc)
858
- else: # case PV 6.0.1
859
- encoding_axis = get_value(visu_pars, 'VisuAcqGradEncoding')
860
- return encoding_axis
861
-
862
- def _get_dim_info(self, visu_pars):
863
- """check if the frame contains only spatial components"""
864
- dim = get_value(visu_pars, 'VisuCoreDim')
865
- dim_desc = get_value(visu_pars, 'VisuCoreDimDesc')
866
-
867
- if not all(map(lambda x: x == 'spatial', dim_desc)):
868
- if 'spectroscopic' in dim_desc:
869
- return dim, 'contain_spectroscopic' # spectroscopic data
870
- elif 'temporal' in dim_desc:
871
- return dim, 'contain_temporal' # unexpected data
872
- else:
873
- return dim, 'spatial_only'
874
-
875
- def _get_spatial_info(self, visu_pars):
876
- dim, dim_type = self._get_dim_info(visu_pars)
877
- if dim_type != 'spatial_only':
878
- if dim != 1:
879
- raise Exception(ERROR_MESSAGES['DimType'])
880
- else:
881
- # experimental approaches
882
- matrix_size = get_value(visu_pars, 'VisuCoreSize')
883
- fov_size = get_value(visu_pars, 'VisuCoreExtent')
884
- voxel_resol = np.divide(fov_size, matrix_size).tolist()
885
- return dict(spatial_resol = [voxel_resol],
886
- matrix_size = [matrix_size],
887
- fov_size = fov_size,
888
- unit = 'mm',
889
- )
890
- else:
891
- matrix_size = get_value(visu_pars, 'VisuCoreSize')
892
- fov_size = get_value(visu_pars, 'VisuCoreExtent')
893
- voxel_resol = np.divide(fov_size, matrix_size).tolist()
894
- slice_resol = self._get_slice_info(visu_pars)
895
-
896
- if dim == 3:
897
- spatial_resol = [voxel_resol]
898
- matrix_size = [matrix_size]
899
- elif dim == 2:
900
- xr, yr = voxel_resol
901
- xm, ym = matrix_size
902
- spatial_resol = [(xr, yr, zr) for zr in slice_resol['slice_distances_each_pack']]
903
- matrix_size = [(xm, ym, zm) for zm in slice_resol['num_slices_each_pack']]
904
- else:
905
- raise Exception(ERROR_MESSAGES['DimSize'])
906
- return dict(spatial_resol = spatial_resol,
907
- matrix_size = matrix_size,
908
- fov_size=fov_size,
909
- unit = 'mm',
910
- )
911
-
912
- def _get_slice_info(self, visu_pars, method=None):
913
- version = get_value(visu_pars, 'VisuVersion')
914
- fg_info = self._get_frame_group_info(visu_pars)
915
- num_slice_packs = None
916
- num_slices_each_pack = []
917
- slice_distances_each_pack = []
918
-
919
- if fg_info['frame_type'] is None:
920
- num_slice_packs = 1
921
- # below will be 1 in 3D protocol
922
- num_slices_each_pack = [get_value(visu_pars, 'VisuCoreFrameCount')]
923
- # below will be size of slice_enc axis in 3D protocol
924
- slice_distances_each_pack = [get_value(visu_pars, 'VisuCoreFrameThickness')]
925
- else:
926
- frame_groups = fg_info['group_id']
927
- if version == 1: # PV 5.1 support
928
- try:
929
- phase_enc_dir = get_value(visu_pars, 'VisuAcqImagePhaseEncDir')
930
- phase_enc_dir = [phase_enc_dir[0]] if is_all_element_same(phase_enc_dir) else phase_enc_dir
931
- num_slice_packs = len(phase_enc_dir)
932
- except:
933
- num_slice_packs = 1
934
- matrix_shape = fg_info['matrix_shape']
935
- frame_thickness = get_value(visu_pars, 'VisuCoreFrameThickness')
936
- num_slice_frames = 0
937
- # for id, fg in enumerate(frame_groups):
938
- for _, fg in enumerate(frame_groups):
939
- if re.search('slice', fg, re.IGNORECASE):
940
- num_slice_frames += 1
941
- if num_slice_frames > 2:
942
- raise Exception(ERROR_MESSAGES['SlicePacksSlices'])
943
- if num_slice_packs > 1:
944
- for s in range(num_slice_packs):
945
- num_slices_each_pack.append(int(matrix_shape[0]/num_slice_packs))
946
- else:
947
- num_slices_each_pack.append(matrix_shape[0])
948
- slice_distances_each_pack = [frame_thickness for _ in range(num_slice_packs)]
949
- else:
950
- if version not in (3, 4, 5):
951
- warnings.warn('Unexpected version[VisuVersion];{}'.format(version), UserWarning)
952
-
953
- num_slice_packs = get_value(visu_pars, 'VisuCoreSlicePacksDef')
954
- if num_slice_packs is None:
955
- num_slice_packs = 1
956
- else:
957
- num_slice_packs = num_slice_packs[0][1]
958
-
959
- slices_info_in_pack = get_value(visu_pars, 'VisuCoreSlicePacksSlices')
960
- slice_distance = get_value(visu_pars, 'VisuCoreSlicePacksSliceDist')
961
- num_slice_frames = 0
962
- for _, fg in enumerate(frame_groups):
963
- if re.search('slice', fg, re.IGNORECASE):
964
- num_slice_frames += 1
965
- if num_slice_frames > 2:
966
- raise Exception(ERROR_MESSAGES['SlicePacksSlices'])
967
- try:
968
- num_slices_each_pack = [slices_info_in_pack[0][1] for _ in range(num_slice_packs)]
969
- except:
970
- raise Exception(ERROR_MESSAGES['SlicePacksSlices'])
971
- if isinstance(slice_distance, list):
972
- slice_distances_each_pack = [slice_distance[0] for _ in range(num_slice_packs)]
973
- elif isinstance(slice_distance, float) or isinstance(slice_distance, int):
974
- slice_distances_each_pack = [slice_distance for _ in range(num_slice_packs)]
975
- else:
976
- raise Exception(ERROR_MESSAGES['SliceDistDatatype'])
977
- if len(slice_distances_each_pack) == 0:
978
- slice_distances_each_pack = [get_value(visu_pars, 'VisuCoreFrameThickness')]
979
- else:
980
- for i, d in enumerate(slice_distances_each_pack):
981
- if d == 0:
982
- slice_distances_each_pack[i] = get_value(visu_pars, 'VisuCoreFrameThickness')
983
- if len(num_slices_each_pack) == 0:
984
- num_slices_each_pack = [1]
985
-
986
- return dict(num_slice_packs = num_slice_packs,
987
- num_slices_each_pack = num_slices_each_pack,
988
- slice_distances_each_pack = slice_distances_each_pack,
989
- unit_slice_distances = 'mm'
990
- )
991
-
992
- def _get_orient_info(self, visu_pars, method):
993
-
994
- def get_axis_orient(orient_matrix):
995
- """return indice of axis orientation profiles"""
996
- return [np.argmax(abs(orient_matrix[:, 0])),
997
- np.argmax(abs(orient_matrix[:, 1])),
998
- np.argmax(abs(orient_matrix[:, 2]))]
999
-
1000
- omatrix_parser = []
1001
- oorder_parser = []
1002
- vposition_parser = []
1003
-
1004
- orient_matrix = get_value(visu_pars, 'VisuCoreOrientation').tolist()
1005
- slice_info = self._get_slice_info(visu_pars)
1006
- slice_position = get_value(visu_pars, 'VisuCorePosition')
1007
- if self._override_position != None: # add option to override
1008
- subj_position = self._override_position
1009
- else:
1010
- subj_position = get_value(visu_pars, 'VisuSubjectPosition')
1011
- gradient_orient = get_value(method, 'PVM_SPackArrGradOrient')
1012
-
1013
- if slice_info['num_slice_packs'] > 1:
1014
- num_ori_mat = len(orient_matrix)
1015
- num_slice_packs = slice_info['num_slice_packs']
1016
- if num_ori_mat != num_slice_packs:
1017
- mpms = True
1018
- if not num_slice_packs % num_ori_mat:
1019
- raise Exception(ERROR_MESSAGES['NumOrientMatrix'])
1020
- else:
1021
- # multi slice packs and multi slices, each slice packs must be identical on element.
1022
- # TODO: If error occurred it means the existing of exception for this.
1023
- cut_idx = 0
1024
- num_slices = int(num_ori_mat / num_slice_packs)
1025
- _orient_matrix = []
1026
- _slice_position = []
1027
- for ci in range(num_slice_packs):
1028
- om_set = orient_matrix[cut_idx:cut_idx + num_slices]
1029
- sp_set = slice_position[cut_idx:cut_idx + num_slices]
1030
- if is_all_element_same(om_set):
1031
- _orient_matrix.append(om_set[0])
1032
- _slice_position.append(sp_set)
1033
- else:
1034
- raise Exception(ERROR_MESSAGES['NumOrientMatrix'])
1035
- cut_idx += num_slices
1036
- orient_matrix = _orient_matrix
1037
- slice_position = _slice_position
1038
- else:
1039
- mpms = False
1040
-
1041
- for id, _om in enumerate(orient_matrix):
1042
- om = np.asarray(_om).reshape([3, 3])
1043
- omatrix_parser.append(om)
1044
- oorder_parser.append(get_axis_orient(om))
1045
- if mpms:
1046
- vposition_parser.append(get_origin(slice_position[id], gradient_orient))
1047
- else:
1048
- vposition_parser.append(slice_position[id])
1049
-
1050
- else:
1051
- # check num_slices of first slice_pack
1052
- if is_all_element_same(orient_matrix):
1053
- orient_matrix = orient_matrix[0]
1054
- else:
1055
- raise Exception(ERROR_MESSAGES['NumOrientMatrix'])
1056
- try:
1057
- slice_position = get_origin(slice_position, gradient_orient)
1058
- except:
1059
- raise Exception(ERROR_MESSAGES['NumSlicePosition'])
1060
-
1061
- omatrix_parser = np.asarray(orient_matrix).reshape([3, 3])
1062
- oorder_parser = get_axis_orient(omatrix_parser)
1063
- vposition_parser = slice_position
1064
-
1065
- if self._override_type != None: # add option to override
1066
- subj_type = self._override_type
1067
- else:
1068
- subj_type = get_value(visu_pars, 'VisuSubjectType')
1069
-
1070
- return dict(subject_type = subj_type,
1071
- subject_position = subj_position,
1072
- volume_position = vposition_parser,
1073
- orient_matrix = omatrix_parser,
1074
- orient_order = oorder_parser,
1075
- gradient_orient = gradient_orient,
1076
- )
1077
-
1078
- def _get_affine(self, visu_pars, method):
1079
- is_reversed = True if self._get_disk_slice_order(visu_pars) == 'reverse' else False
1080
- slice_info = self._get_slice_info(visu_pars)
1081
- spatial_info = self._get_spatial_info(visu_pars)
1082
- orient_info = self._get_orient_info(visu_pars, method)
1083
- slice_orient_map = {0: 'sagital', 1: 'coronal', 2: 'axial'}
1084
- num_slice_packs = slice_info['num_slice_packs']
1085
- subj_pose = orient_info['subject_position']
1086
- subj_type = orient_info['subject_type']
1087
-
1088
- if num_slice_packs > 1:
1089
- affine = []
1090
- for slice_idx in range(num_slice_packs):
1091
- sidx = orient_info['orient_order'][slice_idx].index(2)
1092
- slice_orient = slice_orient_map[sidx]
1093
- resol = spatial_info['spatial_resol'][slice_idx]
1094
- rmat = orient_info['orient_matrix'][slice_idx]
1095
- pose = orient_info['volume_position'][slice_idx]
1096
- if is_reversed:
1097
- raise UnexpectedError('Invalid VisuCoreDiskSliceOrder;'
1098
- 'The multi-slice-packs dataset reversed is not tested data.'
1099
- '{}'.format(ISSUE_REPORT))
1100
- affine.append(build_affine_from_orient_info(resol, rmat, pose,
1101
- subj_pose, subj_type,
1102
- slice_orient))
1103
- else:
1104
- sidx = orient_info['orient_order'].index(2)
1105
- slice_orient = slice_orient_map[sidx]
1106
- resol = spatial_info['spatial_resol'][0]
1107
- rmat = orient_info['orient_matrix']
1108
- pose = orient_info['volume_position']
1109
- if is_reversed:
1110
- distance = slice_info['slice_distances_each_pack']
1111
- pose = reversed_pose_correction(pose, rmat, distance)
1112
- affine = build_affine_from_orient_info(resol, rmat, pose,
1113
- subj_pose, subj_type,
1114
- slice_orient)
1115
- return affine
1116
-
1117
- def _get_matrix_size(self, visu_pars, dataobj=None):
1118
-
1119
- spatial_info = self._get_spatial_info(visu_pars)
1120
- slice_info = self._get_slice_info(visu_pars)
1121
- temporal_info = self._get_temp_info(visu_pars)
1122
- # patch the case of multi-echo
1123
- fg_info = self._get_frame_group_info(visu_pars)
1124
-
1125
- matrix_size = spatial_info['matrix_size']
1126
- num_temporal_frame = temporal_info['num_frames']
1127
- num_slice_packs = slice_info['num_slice_packs']
1128
-
1129
- if num_slice_packs > 1:
1130
- if is_all_element_same(matrix_size):
1131
- matrix_size = list(matrix_size[0])
1132
- total_num_slices = sum(slice_info['num_slices_each_pack'])
1133
- matrix_size[-1] = total_num_slices
1134
- else:
1135
- raise UnexpectedError('Matrix size mismatch with multi-slice-packs dataobj;'
1136
- '{}{}'.format(matrix_size, ISSUE_REPORT))
1137
- else:
1138
- matrix_size = list(matrix_size[0])
1139
- if 'FG_SLICE' in fg_info['group_id']:
1140
- if fg_info['group_id'].index('FG_SLICE'): # in the case the slicing frame group happen later
1141
- matrix_size = matrix_size[:2]
1142
- matrix_size.extend(fg_info['matrix_shape'])
1143
- else:
1144
- if num_temporal_frame > 1:
1145
- matrix_size.append(num_temporal_frame)
1146
- else:
1147
- if num_temporal_frame > 1:
1148
- matrix_size.append(num_temporal_frame)
1149
-
1150
- if isinstance(dataobj, np.ndarray):
1151
- # matrix size inspection
1152
- dataobj_shape = dataobj.shape[0]
1153
- if multiply_all(matrix_size) != dataobj_shape:
1154
- raise UnexpectedError('Matrix size mismatch with dataobj;'
1155
- '{} != {}{}'.format(multiply_all(matrix_size),
1156
- dataobj_shape,
1157
- ISSUE_REPORT))
1158
- return matrix_size
1159
-
1160
- @staticmethod
1161
- def _get_disk_slice_order(visu_pars):
1162
- # check disk_slice_order #
1163
- _fo = get_value(visu_pars, 'VisuCoreDiskSliceOrder')
1164
- if _fo in [None, 'disk_normal_slice_order']:
1165
- disk_slice_order = 'normal'
1166
- elif _fo == 'disk_reverse_slice_order':
1167
- disk_slice_order = 'reverse'
1168
- else:
1169
- raise UnexpectedError('Invalid VisuCoreDiskSliceOrder:{};{}'.format(_fo, ISSUE_REPORT))
1170
- return disk_slice_order
1171
-
1172
- def _get_visu_pars(self, scan_id, reco_id):
1173
- # test validation of scan_id and reco_id here
1174
- self._inspect_ids(scan_id, reco_id)
1175
- return self._pvobj.get_visu_pars(scan_id, reco_id)
1176
-
1177
- @staticmethod
1178
- def _get_frame_group_info(visu_pars):
1179
- frame_group = get_value(visu_pars, 'VisuFGOrderDescDim')
1180
- parser = dict(frame_type=None,
1181
- frame_size=0, matrix_shape=[],
1182
- group_id=[], group_comment=[],
1183
- dependent_vals=[])
1184
- if frame_group is None:
1185
- # there are no frame group exist
1186
- return parser
1187
- else:
1188
- parser['frame_type'] = get_value(visu_pars, 'VisuCoreFrameType')
1189
- for idx, d in enumerate(get_value(visu_pars, 'VisuFGOrderDesc')):
1190
- (num_fg_elements, fg_id, fg_commt,
1191
- valsStart, valsCnt) = d
1192
- # calsCnt = Number of dependent parameters
1193
- # valsStart = index of starting of dependent parameter (described in 'VisuGroupDepVals')
1194
- # e.g. if calcCnt is 2, and valsStart is 1, parameter index will be 1, and 2
1195
- parser['matrix_shape'].append(num_fg_elements)
1196
- parser['group_id'].append(fg_id)
1197
- parser['group_comment'].append(fg_commt)
1198
- parser['dependent_vals'].append([])
1199
- if valsCnt > 0:
1200
- for i in range(valsCnt):
1201
- parser['dependent_vals'][idx].append(get_value(visu_pars, 'VisuGroupDepVals')[valsStart + i])
1202
- parser['frame_size'] = reduce(lambda x, y: x * y, parser['matrix_shape'])
1203
- return parser
1204
-
1205
- @property
1206
- def _subject(self):
1207
- return self._pvobj._subject
1208
-
1209
- @property
1210
- def _acqp(self):
1211
- return self._pvobj._acqp
1212
-
1213
- @property
1214
- def _method(self):
1215
- return self._pvobj._method
1216
-
1217
- @property
1218
- def _avail(self):
1219
- return self._pvobj.avail_reco_id
1220
-