biomedisa 2024.5.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. biomedisa/__init__.py +53 -0
  2. biomedisa/__main__.py +18 -0
  3. biomedisa/biomedisa_features/DataGenerator.py +299 -0
  4. biomedisa/biomedisa_features/DataGeneratorCrop.py +121 -0
  5. biomedisa/biomedisa_features/PredictDataGenerator.py +87 -0
  6. biomedisa/biomedisa_features/PredictDataGeneratorCrop.py +74 -0
  7. biomedisa/biomedisa_features/__init__.py +0 -0
  8. biomedisa/biomedisa_features/active_contour.py +434 -0
  9. biomedisa/biomedisa_features/amira_to_np/__init__.py +0 -0
  10. biomedisa/biomedisa_features/amira_to_np/amira_data_stream.py +980 -0
  11. biomedisa/biomedisa_features/amira_to_np/amira_grammar.py +369 -0
  12. biomedisa/biomedisa_features/amira_to_np/amira_header.py +290 -0
  13. biomedisa/biomedisa_features/amira_to_np/amira_helper.py +72 -0
  14. biomedisa/biomedisa_features/assd.py +167 -0
  15. biomedisa/biomedisa_features/biomedisa_helper.py +801 -0
  16. biomedisa/biomedisa_features/create_slices.py +286 -0
  17. biomedisa/biomedisa_features/crop_helper.py +586 -0
  18. biomedisa/biomedisa_features/curvop_numba.py +149 -0
  19. biomedisa/biomedisa_features/django_env.py +172 -0
  20. biomedisa/biomedisa_features/keras_helper.py +1219 -0
  21. biomedisa/biomedisa_features/nc_reader.py +179 -0
  22. biomedisa/biomedisa_features/pid.py +52 -0
  23. biomedisa/biomedisa_features/process_image.py +253 -0
  24. biomedisa/biomedisa_features/pycuda_test.py +84 -0
  25. biomedisa/biomedisa_features/random_walk/__init__.py +0 -0
  26. biomedisa/biomedisa_features/random_walk/gpu_kernels.py +183 -0
  27. biomedisa/biomedisa_features/random_walk/pycuda_large.py +826 -0
  28. biomedisa/biomedisa_features/random_walk/pycuda_large_allx.py +806 -0
  29. biomedisa/biomedisa_features/random_walk/pycuda_small.py +414 -0
  30. biomedisa/biomedisa_features/random_walk/pycuda_small_allx.py +493 -0
  31. biomedisa/biomedisa_features/random_walk/pyopencl_large.py +760 -0
  32. biomedisa/biomedisa_features/random_walk/pyopencl_small.py +441 -0
  33. biomedisa/biomedisa_features/random_walk/rw_large.py +390 -0
  34. biomedisa/biomedisa_features/random_walk/rw_small.py +310 -0
  35. biomedisa/biomedisa_features/remove_outlier.py +399 -0
  36. biomedisa/biomedisa_features/split_volume.py +274 -0
  37. biomedisa/deeplearning.py +519 -0
  38. biomedisa/interpolation.py +371 -0
  39. biomedisa/mesh.py +406 -0
  40. biomedisa-2024.5.14.dist-info/LICENSE +191 -0
  41. biomedisa-2024.5.14.dist-info/METADATA +306 -0
  42. biomedisa-2024.5.14.dist-info/RECORD +44 -0
  43. biomedisa-2024.5.14.dist-info/WHEEL +5 -0
  44. biomedisa-2024.5.14.dist-info/top_level.txt +1 -0
@@ -0,0 +1,369 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Grammar to parse headers in Amira files
4
+ """
5
+
6
+ import sys
7
+ import re
8
+ import time
9
+ import collections
10
+
11
+ from pprint import pprint
12
+
13
+ TOKEN_NAME = 'name'
14
+ TOKEN_NUMBER = 'number'
15
+ TOKEN_STRING = 'string'
16
+ TOKEN_OP = 'op'
17
+ TOKEN_COMMENT = 'comment'
18
+ TOKEN_NEWLINE = 'newline'
19
+ TOKEN_COMMA = 'comma'
20
+ TOKEN_COLON = 'colon'
21
+ TOKEN_EQUALS = 'equals'
22
+ TOKEN_ENDMARKER = 'endmarker'
23
+ TOKEN_BYTEDATA_INFO = 'bytedata_info'
24
+
25
+ class Matcher:
26
+ def __init__(self,rexp):
27
+ self.rexp = rexp
28
+ def __call__(self, buf ):
29
+ matchobj = self.rexp.match( buf )
30
+ return matchobj is not None
31
+
32
+ re_file_info = re.compile(r'^#(\s*)(AmiraMesh|HyperSurface|Avizo)(\s*)(?:3D)?(\s+)(BINARY-LITTLE-ENDIAN|BINARY|ASCII)(\s+)(\d+\.\d*)$')
33
+ is_file_info = Matcher(re_file_info)
34
+
35
+ re_string_literal = re.compile(r'^".*"$')
36
+ is_string_literal = Matcher(re_string_literal)
37
+
38
+ re_bytedata_info = re.compile(r'^(Lattice)(\s+)\{(\s*)(\w+)(\s+)(\w+)(\s*)\}(\s+)@(\d+)(\((\w+),(\d+)\))?$')
39
+ is_bytedata_info = Matcher(re_bytedata_info)
40
+
41
+ re_float = re.compile(r'^[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?$')
42
+ is_number = Matcher(re_float)
43
+
44
+ re_name = re.compile(r'^[a-zA-Z0-9_&:-]+(\[\d\])?$')
45
+ is_name = Matcher(re_name)
46
+
47
+ re_quoted_whitespace_splitter = re.compile(r'(".*")|[ \t\n]')
48
+
49
+ def lim_repr(value):
50
+ full = repr(value)
51
+ if len(full) > 100:
52
+ full = full[:97]+'...'
53
+ return full
54
+
55
+ class Tokenizer:
56
+ def __init__( self, str ):
57
+ self.buf = str
58
+ self.last_tokens = []
59
+ self._designation = {}
60
+ self._data_pointers = {}
61
+ self._definitions = {}
62
+ self._parameters = {}
63
+
64
+ def add_defines(self, define_dict ):
65
+ self._definitions.update(define_dict)
66
+ def add_parameters(self, dict ):
67
+ self._parameters.update(dict)
68
+ def get_tokens( self ):
69
+ # keep a running accumulation of last 2 tokens
70
+ for token_enum,token in enumerate(self._get_tokens()):
71
+ self.last_tokens.append( token )
72
+ while len(self.last_tokens) > 3:
73
+ self.last_tokens.pop(0)
74
+ if token_enum==0:
75
+ if token[0] == TOKEN_COMMENT:
76
+ matchobj = re_file_info.match( token[1] )
77
+ if matchobj is not None:
78
+ items = list(filter(lambda x : x is not None and len(x.strip())>0, matchobj.groups()))
79
+ if "3D" in items:
80
+ self._designation = {'filetype':items[0],
81
+ 'dimension':items[1],
82
+ 'format': items[-2],
83
+ 'version':items[-1]}
84
+ else:
85
+ self._designation = {'filetype':items[0],
86
+ 'format': items[-2],
87
+ 'version':items[-1]}
88
+ else:
89
+ warnings.warn('Unknown file type. Parsing may fail.')
90
+ yield token
91
+ def _get_tokens( self ):
92
+ newline = '\n' #b'\n'
93
+ lineno = 0
94
+ while len(self.buf) > 0:
95
+ # get the next line -------
96
+ idx = self.buf.index(newline)+1
97
+ this_line, self.buf = self.buf[:idx], self.buf[idx:]
98
+ lineno += 1
99
+
100
+ # now parse the line into tokens ----
101
+ if this_line.lstrip().startswith('#'):
102
+ yield ( TOKEN_COMMENT, this_line[:-1], (lineno,0), (lineno, len(this_line)-1), this_line )
103
+ yield ( TOKEN_NEWLINE, this_line[-1:], (lineno,len(this_line)-1), (lineno, len(this_line)), this_line )
104
+ elif this_line==newline:
105
+ yield ( TOKEN_NEWLINE, this_line, (lineno,0), (lineno, 1), this_line )
106
+ elif is_bytedata_info( this_line ):
107
+ matchobj = re_bytedata_info.match( this_line )
108
+ items = list(filter(lambda x : x is not None and len(x.strip())>0, matchobj.groups()))
109
+ assert(len(items)>=4)
110
+ esdict = {'pointer_name':items[0],
111
+ 'data_type':items[1],
112
+ 'data_name':items[2],
113
+ 'data_index': int(items[3])}
114
+ if len(items)>4:
115
+ esdict['data_format'] = items[-2]
116
+ esdict['data_length'] = int(items[-1])
117
+ self._data_pointers[items[3]]=esdict
118
+ yield ( TOKEN_BYTEDATA_INFO, this_line, (lineno,0), (lineno, 1), this_line )
119
+ else:
120
+ parts = re_quoted_whitespace_splitter.split(this_line)
121
+ parts.append(newline) # append newline
122
+ parts = [p for p in parts if p is not None and len(p)]
123
+
124
+ maybe_comma_part_idx = len(parts)-2 if len(parts) >= 2 else None
125
+
126
+ colno = 0
127
+ for part_idx, part in enumerate(parts):
128
+ startcol = colno
129
+ endcol = len(part)+startcol
130
+ colno = endcol + 1
131
+
132
+ if part_idx == maybe_comma_part_idx:
133
+ if len(part) > 1 and part.endswith(','):
134
+ # Remove the comma from further processing
135
+ part = part[:-1]
136
+ endcol -= 1
137
+ # Emit a comma token.
138
+ yield ( TOKEN_COMMA, part, (lineno,endcol), (lineno, endcol+1), this_line )
139
+ if part in ['{','}']:
140
+ yield ( TOKEN_OP, part, (lineno,startcol), (lineno, endcol), this_line )
141
+ elif part==newline:
142
+ yield ( TOKEN_NEWLINE, part, (lineno,startcol-1), (lineno, endcol-1), this_line )
143
+ elif part==':':
144
+ yield ( TOKEN_COLON, part, (lineno,startcol-1), (lineno, endcol-1), this_line )
145
+ elif part=='=':
146
+ yield ( TOKEN_EQUALS, part, (lineno,startcol-1), (lineno, endcol-1), this_line )
147
+ elif part==',':
148
+ yield ( TOKEN_COMMA, part, (lineno,startcol-1), (lineno, endcol-1), this_line )
149
+ elif is_number(part):
150
+ yield ( TOKEN_NUMBER, part, (lineno,startcol), (lineno, endcol), this_line )
151
+ elif is_name(part):
152
+ yield ( TOKEN_NAME, part, (lineno,startcol), (lineno, endcol), this_line )
153
+ elif is_string_literal(part):
154
+ yield ( TOKEN_STRING, part, (lineno,startcol), (lineno, endcol), this_line )
155
+ else:
156
+ raise NotImplementedError( 'cannot tokenize part %r (line %r)'%(lim_repr(part), lim_repr(this_line)) )
157
+ yield ( TOKEN_ENDMARKER, '', (lineno,0), (lineno, 0), '' )
158
+
159
+ def atom( src, token, tokenizer, level=0, block_descent=False ):
160
+ space = ' '*level
161
+ end_block = None
162
+ if token[0]==TOKEN_NAME:
163
+ name = token[1]
164
+
165
+ if block_descent:
166
+ result = name
167
+ else:
168
+ next_token = next(src)
169
+
170
+ if next_token[0] == TOKEN_OP and next_token[1]=='{':
171
+ # this name begins a '{' block
172
+ value, ended_with = atom( src, next_token, tokenizer, level=level+1 ) # create {}
173
+ result = {name: value}
174
+
175
+ else:
176
+ # continue until newline
177
+ elements = []
178
+ ended_with = None
179
+ force_colon = False
180
+ while not (next_token[0] == TOKEN_NEWLINE):
181
+
182
+ if next_token[0] == TOKEN_COLON:
183
+ force_colon = True
184
+ next_token = next(src)
185
+
186
+ value, ended_with = atom( src, next_token, tokenizer, level=level+1, block_descent=force_colon ) # fill element of []
187
+ elements.append( value )
188
+ if ended_with is not None:
189
+ break
190
+ next_token = next(src)
191
+
192
+ if ended_with is not None:
193
+ end_block = ended_with
194
+ else:
195
+ # loop ended because we hit a newline
196
+ end_block = 'newline'
197
+
198
+ elements = [e for e in elements if e is not None]
199
+ if len(elements)==0:
200
+ result = name
201
+ elif len(elements)==1:
202
+ result = {name: elements[0]}
203
+ else:
204
+ result = {name: elements}
205
+ elif token[0] in [TOKEN_COMMENT, TOKEN_COMMA]:
206
+ result = None
207
+ elif token[0] == TOKEN_OP and token[1]=='}':
208
+ result = None
209
+ end_block = 'block'
210
+ elif token[0] == TOKEN_NEWLINE:
211
+ result = None
212
+ end_block = 'newline'
213
+ elif token[0] == TOKEN_OP and token[1]=='{':
214
+ if block_descent:
215
+ raise RuntimeError('descent blocked but encountered block')
216
+
217
+ elements = []
218
+
219
+ # parse to end of block
220
+ next_token = next(src)
221
+ while not (next_token[0] == TOKEN_OP and next_token[1] == '}'):
222
+ value, ended_with = atom( src, next_token, tokenizer, level=level+1 ) # fill element of {}
223
+ elements.append( value )
224
+ if ended_with=='block':
225
+ break
226
+ next_token = next(src)
227
+
228
+ elements = [e for e in elements if e is not None]
229
+ result = collections.OrderedDict()
230
+ for element in elements:
231
+ if isinstance(element,dict):
232
+ for key in element:
233
+ assert key not in result
234
+ result[key] = element[key]
235
+ else:
236
+ assert isinstance(element,type(u'unicode string'))
237
+ assert element not in result
238
+ result[element] = None
239
+ elif token[0]==TOKEN_NUMBER:
240
+ try:
241
+ value = int(token[1])
242
+ except ValueError:
243
+ value = float(token[1])
244
+ result = value
245
+ elif token[0]==TOKEN_STRING:
246
+ value = token[1]
247
+ result = value
248
+ elif token[0]==TOKEN_BYTEDATA_INFO:
249
+ result = None
250
+ elif token[0]==TOKEN_EQUALS:
251
+ result = None
252
+ else:
253
+ raise ValueError('unexpected token type: %r'%(token[0],))
254
+
255
+ return result, end_block
256
+
257
+ def detect_format(fn, format_bytes=50, *args, **kwargs):
258
+ """Detect Amira file format (AmiraMesh or HyperSurface)
259
+
260
+ :param str fn: file name
261
+ :param int format_bytes: number of bytes in which to search for the format [default: 50]
262
+ :return str file_format: either ``AmiraMesh`` or ``HyperSurface``
263
+ """
264
+ assert format_bytes > 0
265
+
266
+ with open(fn, 'rb') as f:
267
+ rough_header = f.read(format_bytes)
268
+
269
+ if re.match(r'.*AmiraMesh.*', str(rough_header)):
270
+ file_format = "AmiraMesh"
271
+ elif re.match(r'.*Avizo.*', str(rough_header)):
272
+ file_format = "Avizo"
273
+ elif re.match(r'.*HyperSurface.*', str(rough_header)):
274
+ file_format = "HyperSurface"
275
+ else:
276
+ file_format = "Undefined"
277
+
278
+ return file_format
279
+
280
+
281
+ def get_header(fn, file_format, header_bytes=536870912, *args, **kwargs): #2097152
282
+ """Apply rules for detecting the boundary of the header
283
+
284
+ :param str fn: file name
285
+ :param str file_format: either ``AmiraMesh`` or ``Avizo``or ``HyperSurface``
286
+ :param int header_bytes: number of bytes in which to search for the header [default: 20000]
287
+ :return str data: the header as per the ``file_format``
288
+ """
289
+ assert header_bytes > 0
290
+ assert file_format in ['AmiraMesh', 'Avizo', 'HyperSurface']
291
+ header_count = header_bytes
292
+
293
+ while (True):
294
+ m = None
295
+ with open(fn, 'rb') as f:
296
+ rough_header = f.read(header_count)
297
+ if file_format == "AmiraMesh" or file_format == "Avizo":
298
+ m = re.search(b'(?P<data>)\n@1\n', rough_header, flags=re.S)
299
+ elif file_format == "HyperSurface":
300
+ m = re.search(b'(?P<data>)\nVertices [0-9]*\n', rough_header, flags=re.S)
301
+ elif file_format == "Undefined":
302
+ raise ValueError("Unable to parse undefined file")
303
+ if m is None:
304
+ header_count += header_bytes
305
+ else:
306
+ # select the data
307
+ # data = m.group('data')
308
+ idx = m.start()
309
+ data = rough_header[:idx]
310
+ return data
311
+
312
+ def parse_header(data, *args, **kwargs):
313
+ """Parse the data using the grammar specified in this module
314
+
315
+ :param str data: delimited data to be parsed for metadata
316
+ :return list parsed_data: structured metadata
317
+ """
318
+ # the parser
319
+ tokenizer = Tokenizer( data )
320
+ src = tokenizer.get_tokens()
321
+
322
+ token = next(src)
323
+
324
+ while token[0] != TOKEN_ENDMARKER:
325
+ this_atom, ended_with = atom(src, token, tokenizer) # get top-level atom
326
+ if this_atom is not None:
327
+ if isinstance( this_atom, dict ):
328
+ if 'define' in this_atom:
329
+ tokenizer.add_defines( this_atom['define'] )
330
+ else:
331
+ tokenizer.add_parameters( this_atom )
332
+ token = next(src)
333
+
334
+ result = {'designation' : tokenizer._designation,
335
+ 'definitions' : tokenizer._definitions,
336
+ 'data_pointers': tokenizer._data_pointers,
337
+ 'parameters': tokenizer._parameters}
338
+ return result
339
+
340
+ def get_parsed_header(data, *args, **kwargs):
341
+ """All above functions as a single function
342
+
343
+ :param str fn: file name
344
+ :return list parsed_data: structured metadata
345
+ """
346
+ assert(isinstance(data, str) or isinstance(data, bytes))
347
+
348
+ # if data is str, it is a filename
349
+ if isinstance(data, str):
350
+ file_format = detect_format(data, *args, **kwargs)
351
+ raw_header = get_header(data, file_format, *args, **kwargs)
352
+ else: # otherwise data is raw header in bytes
353
+ raw_header = data
354
+
355
+ # remove parameters block
356
+ p = raw_header.find(b'Parameters {')
357
+ x = raw_header.find(b'Lattice {')
358
+ header_wo_params = raw_header[:p] + raw_header[x:]
359
+
360
+ # clean text header from reading as binary - remove b'', "\\n" to '\n'
361
+ header_wo_params = str(header_wo_params).strip("b'").replace("\\n", '\n')
362
+
363
+ # end in new line
364
+ if header_wo_params[-1] != '\n':
365
+ header_wo_params += '\n'
366
+
367
+ parsed_data = parse_header(header_wo_params, *args, **kwargs)
368
+
369
+ return raw_header, parsed_data
@@ -0,0 +1,290 @@
1
+ # -*- coding: utf-8 -*-
2
+ # amira_header.py
3
+ """
4
+ Module to convert parsed data from an Amira header into a set of nested objects. The key class is :py:class:``AmiraHeader``.
5
+
6
+ Usage:
7
+
8
+ ::
9
+
10
+ >>> from amira.header import AmiraHeader
11
+ >>> ah = AmiraHeader.from_file('file.am')
12
+ >>> print(ah)
13
+
14
+ Each nested object is constructed from the :py:class:``Block`` class defined.
15
+
16
+ There are four top-level attributes that every ``AmiraHeader`` will have:
17
+
18
+ * designation
19
+
20
+ * definitions
21
+
22
+ * parameters
23
+
24
+ * data_pointers
25
+
26
+ Each attribute can be queried using the ``attrs`` attribute.
27
+
28
+ ::
29
+
30
+ >>> print(ah.data_pointers.attrs)
31
+ ['data_pointer_1', 'data_pointer_2', 'data_pointer_3', 'data_pointer_4', 'data_pointer_5', 'data_pointer_6']
32
+ >>> print(ah.data_pointers.data_pointer_1)
33
+ data_pointer_1
34
+ pointer_name: VERTEX
35
+ data_format: None
36
+ data_dimension: 3
37
+ data_type: float
38
+ data_name: VertexCoordinates
39
+ data_index: 1
40
+ data_length: None
41
+
42
+ Data pointers are identified by the name ``data_pointer_<n>``.
43
+
44
+ """
45
+ import sys
46
+ import pprint
47
+
48
+ from .amira_grammar import get_parsed_header
49
+
50
+ class Block(object):
51
+ """Generic block to be loaded with attributes"""
52
+ def __init__(self, name):
53
+ self.name = name
54
+ self.attrs = list()
55
+
56
+ def add_attr(self, name, value):
57
+ """Add an attribute to an ``Block`` object"""
58
+ setattr(self, str(name), value)
59
+ self.attrs.append(str(name))
60
+
61
+ def __str__(self):
62
+ string = "{}\n".format(self.name)
63
+ for attr in self.attrs:
64
+ if isinstance(getattr(self, str(attr)), Block):
65
+ string += "{}\n".format(getattr(self, str(attr)))
66
+ else:
67
+ string += "{}: {}\n".format(attr, getattr(self, str(attr)))
68
+ return string
69
+
70
+ @property
71
+ def ids(self):
72
+ """Convenience method to get the ids for Materials present"""
73
+ assert self.name == "Materials"
74
+ ids = list()
75
+ for attr in self.attrs:
76
+ attr_obj = getattr(self, attr)
77
+ if hasattr(attr_obj, 'Id'):
78
+ ids.append(getattr(attr_obj, 'Id'))
79
+ return ids
80
+
81
+ def __getitem__(self, index):
82
+ """Convenience method to get an attribute with 'Id' for a Material"""
83
+ assert self.name == "Materials"
84
+ assert isinstance(index, int)
85
+ for attr in self.attrs:
86
+ attr_obj = getattr(self, attr)
87
+ if hasattr(attr_obj, 'Id'):
88
+ if getattr(attr_obj, 'Id') == index:
89
+ return attr_obj
90
+ else:
91
+ continue # next attr
92
+ else:
93
+ return None
94
+
95
+
96
+ class AmiraHeader(object):
97
+ """Class to encapsulate Amira metadata"""
98
+ def __init__(self, raw_data, parsed_data):
99
+ self._raw_data = raw_data
100
+ self._parsed_data = parsed_data
101
+ self._load()
102
+
103
+ @classmethod
104
+ def from_file(cls, fn, *args, **kwargs):
105
+ """Constructor to build an AmiraHeader object from a file
106
+
107
+ :param str fn: Amira file
108
+ :return ah: object of class ``AmiraHeader`` containing header metadata
109
+ :rtype: ah: :py:class:`ahds.header.AmiraHeader`
110
+ """
111
+ raw, parsed = get_parsed_header(fn, *args, **kwargs)
112
+ return AmiraHeader(raw, parsed)
113
+
114
+ @classmethod
115
+ def from_str(cls, raw, *args, **kwargs):
116
+ """Constructor to build an AmiraHeader object from a string
117
+
118
+ :param str raw: Amira raw header
119
+ :return ah: object of class ``AmiraHeader`` containing header metadata
120
+ :rtype: ah: :py:class:`ahds.header.AmiraHeader`
121
+ """
122
+ raw, parsed = get_parsed_header(raw, *args, **kwargs)
123
+ return AmiraHeader(raw, parsed)
124
+
125
+ @property
126
+ def raw_header(self):
127
+ """Show the raw header data"""
128
+ return self._raw_data
129
+
130
+ @property
131
+ def parsed_header(self):
132
+ """Show the raw header data"""
133
+ return self._parsed_data
134
+
135
+ def _load(self):
136
+ self._load_designation(self._parsed_data['designation'])
137
+ self._load_definitions(self._parsed_data['definitions'])
138
+ self._load_data_pointers(self._parsed_data['data_pointers'])
139
+ # self._load_parameters(self._parsed_data['parameters'])
140
+
141
+ @property
142
+ def designation(self):
143
+ """Designation of the Amira file defined in the first row
144
+
145
+ Designations consist of some or all of the following data:
146
+
147
+ * filetype e.g. ``AmiraMesh`` or ``Avizo`` or``HyperSurface``
148
+
149
+ * dimensions e.g. ``3D``
150
+
151
+ * format e.g. ``BINARY-LITTLE-ENDIAN``
152
+
153
+ * version e.g. ``2.1``
154
+
155
+ * extra format e.g. ``<hxsurface>``
156
+ """
157
+ return self._designation
158
+
159
+ @property
160
+ def definitions(self):
161
+ """Definitions consist of a key-value pair specified just after the
162
+ designation preceded by the key-word 'define'
163
+ """
164
+ return self._definitions
165
+
166
+ @property
167
+ def parameters(self):
168
+ """The set of parameters for each of the segments specified
169
+ e.g. colour, data pointer etc.
170
+ """
171
+ return self._parameters
172
+
173
+ @property
174
+ def data_pointers(self):
175
+ """The list of data pointers together with a name, data type, dimension,
176
+ index, format and length
177
+ """
178
+ return self._data_pointers
179
+
180
+ def _load_designation(self, block_data):
181
+ self._designation = Block('designation')
182
+ if 'filetype' in block_data:
183
+ self._designation.add_attr('filetype', block_data['filetype'])
184
+ else:
185
+ self._designation.add_attr('filetype', None)
186
+ if 'dimension' in block_data:
187
+ self._designation.add_attr('dimension', block_data['dimension'])
188
+ else:
189
+ self._designation.add_attr('dimension', None)
190
+ if 'format' in block_data:
191
+ self._designation.add_attr('format', block_data['format'])
192
+ else:
193
+ self._designation.add_attr('format', None)
194
+ if 'version' in block_data:
195
+ self._designation.add_attr('version', block_data['version'])
196
+ else:
197
+ self._designation.add_attr('version', None)
198
+ if 'extra_format' in block_data:
199
+ self._designation.add_attr('extra_format', block_data['extra_format'])
200
+ else:
201
+ self._designation.add_attr('extra_format', None)
202
+
203
+ def _load_definitions(self, block_data):
204
+ self._definitions = Block('definitions')
205
+ for key in block_data:
206
+ self._definitions.add_attr(key, block_data[key])
207
+
208
+ def _load_parameters(self, block_data):
209
+ self._parameters = Block('parameters')
210
+ for parameter in block_data:
211
+ if 'nested_parameter' in parameter:
212
+ nested_parameter = parameter['nested_parameter']
213
+ self._parameters.add_attr(nested_parameter['nested_parameter_name'], Block(nested_parameter['nested_parameter_name']))
214
+ nested_parameter_obj = getattr(self._parameters, str(nested_parameter['nested_parameter_name']))
215
+ for nested_parameter_value in nested_parameter['nested_parameter_values']:
216
+ if 'attributes' in nested_parameter_value:
217
+ if nested_parameter_value['attributes']:
218
+ nested_parameter_obj.add_attr(nested_parameter_value['name'], Block(nested_parameter_value['name']))
219
+ nested_parameter_value_obj = getattr(nested_parameter_obj, str(nested_parameter_value['name']))
220
+ for attribute in nested_parameter_value['attributes']:
221
+ nested_parameter_value_obj.add_attr(attribute['attribute_name'], attribute['attribute_value'])
222
+ else:
223
+ nested_parameter_obj.add_attr(nested_parameter_value['name'], None)
224
+ elif 'nested_attributes' in nested_parameter_value:
225
+ nested_parameter_obj.add_attr(nested_parameter_value['name'], Block(nested_parameter_value['name']))
226
+ for nested_attribute in nested_parameter_value['nested_attributes']:
227
+ nested_attribute_obj = getattr(nested_parameter_obj, str(nested_parameter_value['name']))
228
+ nested_attribute_obj.add_attr(nested_attribute['nested_attribute_name'], Block(nested_attribute['nested_attribute_name']))
229
+ nested_attribute_value_obj = getattr(nested_attribute_obj, str(nested_attribute['nested_attribute_name']))
230
+ for nested_attribute_value in nested_attribute['nested_attribute_values']:
231
+ nested_attribute_value_obj.add_attr(nested_attribute_value['nested_attribute_value_name'], nested_attribute_value['nested_attribute_value_value'])
232
+ else:
233
+ nested_parameter_obj.add_attr(nested_parameter_value['name'], nested_parameter_value['inline_parameter_value'])
234
+ if 'inline_parameter' in parameter:
235
+ inline_parameter = parameter['inline_parameter']
236
+ self._parameters.add_attr(inline_parameter['inline_parameter_name'], inline_parameter['inline_parameter_value'])
237
+
238
+ def _load_data_pointers(self, block_data):
239
+ self._data_pointers = Block('data_pointers')
240
+ for data_index in block_data:
241
+ data_pointer_name = "data_pointer_{}".format(data_index)
242
+ self._data_pointers.add_attr(data_pointer_name, Block(data_pointer_name))
243
+ pointer_obj = getattr(self._data_pointers, data_pointer_name)
244
+ data_pointer = block_data[data_index]
245
+
246
+ if 'pointer_name' in data_pointer:
247
+ pointer_obj.add_attr('pointer_name', data_pointer['pointer_name'])
248
+ else:
249
+ pointer_obj.add_attr('pointer_name', None)
250
+ if 'data_format' in data_pointer:
251
+ pointer_obj.add_attr('data_format', data_pointer['data_format'])
252
+ else:
253
+ pointer_obj.add_attr('data_format', None)
254
+ if 'data_dimension' in data_pointer:
255
+ pointer_obj.add_attr('data_dimension', data_pointer['data_dimension'])
256
+ else:
257
+ pointer_obj.add_attr('data_dimension', None)
258
+ if 'data_type' in data_pointer:
259
+ pointer_obj.add_attr('data_type', data_pointer['data_type'])
260
+ else:
261
+ pointer_obj.add_attr('data_type', None)
262
+ if 'data_name' in data_pointer:
263
+ pointer_obj.add_attr('data_name', data_pointer['data_name'])
264
+ else:
265
+ pointer_obj.add_attr('data_name', None)
266
+ if 'data_index' in data_pointer:
267
+ pointer_obj.add_attr('data_index', data_pointer['data_index'])
268
+ else:
269
+ pointer_obj.add_attr('data_index', None)
270
+ if 'data_length' in data_pointer:
271
+ pointer_obj.add_attr('data_length', data_pointer['data_length'])
272
+ else:
273
+ pointer_obj.add_attr('data_length', None)
274
+
275
+ def __repr__(self):
276
+ return "<AmiraHeader with {:,} bytes>".format(len(self))
277
+
278
+ def __str__(self):
279
+ string = "*" * 50 + "\n"
280
+ string += "AMIRA HEADER\n"
281
+ string += "-" * 50 + "\n"
282
+ string += "{}\n".format(self.designation)
283
+ string += "-" * 50 + "\n"
284
+ string += "{}\n".format(self.definitions)
285
+ string += "-" * 50 + "\n"
286
+ string += "{}\n".format(self.parameters)
287
+ string += "-" * 50 + "\n"
288
+ string += "{}\n".format(self.data_pointers)
289
+ string += "*" * 50
290
+ return string