VTKio 0.1.0.dev2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vtkio/__init__.py +27 -0
- vtkio/_git.py +45 -0
- vtkio/helpers.py +110 -0
- vtkio/reader/__init__.py +15 -0
- vtkio/reader/hdf5.py +379 -0
- vtkio/reader/xml.py +712 -0
- vtkio/simplified.py +621 -0
- vtkio/utilities.py +222 -0
- vtkio/version.py +78 -0
- vtkio/vtk_cell_types.py +98 -0
- vtkio/vtk_structures.py +306 -0
- vtkio/writer/__init__.py +16 -0
- vtkio/writer/pvd_writer.py +132 -0
- vtkio/writer/vtkhdf.py +1184 -0
- vtkio/writer/writers.py +393 -0
- vtkio/writer/xml_writer.py +1597 -0
- vtkio-0.1.0.dev2.dist-info/METADATA +86 -0
- vtkio-0.1.0.dev2.dist-info/RECORD +20 -0
- vtkio-0.1.0.dev2.dist-info/WHEEL +4 -0
- vtkio-0.1.0.dev2.dist-info/licenses/LICENSE +28 -0
|
@@ -0,0 +1,1597 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
"""
|
|
3
|
+
VTKWriter Class for creating VTK's XML based format.
|
|
4
|
+
|
|
5
|
+
Supports ASCII, Base64 and Appended Raw encoding of data.
|
|
6
|
+
|
|
7
|
+
Classes
|
|
8
|
+
_______
|
|
9
|
+
XML_MultiBlockWriter()
|
|
10
|
+
Multiblock writing class for XML files
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
Created at 13:01, 24 Feb, 2022
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
__author__ = 'J.P. Morrissey'
|
|
17
|
+
__copyright__ = 'Copyright 2022-2025'
|
|
18
|
+
__maintainer__ = 'J.P. Morrissey'
|
|
19
|
+
__email__ = 'morrissey.jp@gmail.com'
|
|
20
|
+
__status__ = 'Development'
|
|
21
|
+
|
|
22
|
+
# Standard Library
|
|
23
|
+
from dataclasses import dataclass
|
|
24
|
+
import struct
|
|
25
|
+
import sys
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
|
|
28
|
+
import numpy as np
|
|
29
|
+
import pybase64
|
|
30
|
+
|
|
31
|
+
## Local Imports
|
|
32
|
+
from ..helpers import _parse_bytecount_type
|
|
33
|
+
from ..utilities import first_key, flatten
|
|
34
|
+
from ..vtk_cell_types import *
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class VTK_xml_filetype:
|
|
39
|
+
name: str
|
|
40
|
+
extension: str
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
VTKImageData = VTK_xml_filetype('ImageData', '.vti')
|
|
44
|
+
VTKPolyData = VTK_xml_filetype('PolyData', '.vtp')
|
|
45
|
+
VTKRectilinearGrid = VTK_xml_filetype('RectilinearGrid', '.vtr')
|
|
46
|
+
VTKStructuredGrid = VTK_xml_filetype('StructuredGrid', '.vts')
|
|
47
|
+
VTKUnstructuredGrid = VTK_xml_filetype('UnstructuredGrid', '.vtu')
|
|
48
|
+
|
|
49
|
+
__all__ = ['vtk_multiblock_writer']
|
|
50
|
+
|
|
51
|
+
class XMLWriterBase:
|
|
52
|
+
"""Base class for XML VTK file writers."""
|
|
53
|
+
|
|
54
|
+
_text_encoding = 'utf-8'
|
|
55
|
+
_filetypes = {
|
|
56
|
+
"ImageData": ".vti",
|
|
57
|
+
"PolyData": ".vtp",
|
|
58
|
+
"RectilinearGrid": ".vtr",
|
|
59
|
+
"StructuredGrid": ".vts",
|
|
60
|
+
"UnstructuredGrid": ".vtu",
|
|
61
|
+
"vtkMultiBlockDataSet": ".vtm"
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
_np_to_struct = {'int8': 'b',
|
|
65
|
+
'uint8': 'B',
|
|
66
|
+
'int16': 'h',
|
|
67
|
+
'uint16': 'H',
|
|
68
|
+
'int32': 'i',
|
|
69
|
+
'uint32': 'I',
|
|
70
|
+
'int64': 'q',
|
|
71
|
+
'uint64': 'Q',
|
|
72
|
+
'float32': 'f',
|
|
73
|
+
'float64': 'd'
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
# Map numpy to VTK data types
|
|
77
|
+
_np_to_vtk = {
|
|
78
|
+
"int8": "Int8",
|
|
79
|
+
"uint8": "UInt8",
|
|
80
|
+
"int16": "Int16",
|
|
81
|
+
"uint16": "UInt16",
|
|
82
|
+
"int": "Int32",
|
|
83
|
+
"int32": "Int32",
|
|
84
|
+
"uint32": "UInt32",
|
|
85
|
+
"int64": "Int64",
|
|
86
|
+
"uint64": "UInt64",
|
|
87
|
+
"float": "Float32",
|
|
88
|
+
"float32": "Float32",
|
|
89
|
+
"float64": "Float64",
|
|
90
|
+
"StringDType()": "String",
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
# Map VTK data types to numpy
|
|
94
|
+
_vtk_to_np = {v: k for k, v in _np_to_vtk.items()}
|
|
95
|
+
|
|
96
|
+
# CELL TYPES
|
|
97
|
+
_cellType = {
|
|
98
|
+
"Vertex": VTK_Vertex,
|
|
99
|
+
"PolyVertex": VTK_PolyVertex,
|
|
100
|
+
"Line": VTK_Line,
|
|
101
|
+
"PolyLine": VTK_PolyLine,
|
|
102
|
+
"Triangle": VTK_Triangle,
|
|
103
|
+
"TriangleStrip": VTK_TriangleStrip,
|
|
104
|
+
"Polygon": VTK_Polygon,
|
|
105
|
+
"Pixel": VTK_Pixel,
|
|
106
|
+
"Quad": VTK_Quad,
|
|
107
|
+
"Tetra": VTK_Tetra,
|
|
108
|
+
"Voxel": VTK_Voxel,
|
|
109
|
+
"Hexahedron": VTK_Hexahedron,
|
|
110
|
+
"Wedge": VTK_Wedge,
|
|
111
|
+
"Pyramid": VTK_Pyramid,
|
|
112
|
+
"Pentagonal_Prism": VTK_Pentagonal_Prism,
|
|
113
|
+
"Hexagonal_Prism": VTK_Hexagonal_Prism,
|
|
114
|
+
"Quadratic_Edge": VTK_Quadratic_Edge,
|
|
115
|
+
"Quadratic_Triangle": VTK_Quadratic_Triangle,
|
|
116
|
+
"Quadratic_Quad": VTK_Quadratic_Quad,
|
|
117
|
+
"Quadratic_Tetra": VTK_Quadratic_Tetra,
|
|
118
|
+
"Quadratic_Hexahedron": VTK_Quadratic_Hexahedron,
|
|
119
|
+
"Quadratic_Wedge": VTK_Quadratic_Wedge,
|
|
120
|
+
"Quadratic_Pyramid": VTK_Quadratic_Pyramid,
|
|
121
|
+
"BiQuadratic_Quad": VTK_BiQuadratic_Quad,
|
|
122
|
+
"TriQuadratic_Hexahedron": VTK_TriQuadratic_Hexahedron,
|
|
123
|
+
"Quadratic_Linear_Quad": VTK_Quadratic_Linear_Quad,
|
|
124
|
+
"Quadratic_Linear_Wedge": VTK_Quadratic_Linear_Wedge,
|
|
125
|
+
"BiQuadratic_Quadratic_Wedge": VTK_BiQuadratic_Quadratic_Wedge,
|
|
126
|
+
"BiQuadratic_Quadratic_Hexahedron": VTK_BiQuadratic_Quadratic_Hexahedron,
|
|
127
|
+
"BiQuadratic_Triangle": VTK_BiQuadratic_Triangle,
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
# VTK data types
|
|
131
|
+
_DataType = {"Int8": 1,
|
|
132
|
+
"UInt8": 1,
|
|
133
|
+
"Int16": 2,
|
|
134
|
+
"UInt16": 2,
|
|
135
|
+
"Int32": 4,
|
|
136
|
+
"UInt32": 4,
|
|
137
|
+
"Int64": 8,
|
|
138
|
+
"UInt64": 8,
|
|
139
|
+
"Float32": 4,
|
|
140
|
+
"Float64": 8,
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
def __init__(self, filepath, filetype, encoding='ascii', ascii_precision=16, ascii_ncolumns=6, declaration=True,
|
|
144
|
+
appended_encoding='base64', version='1.0', compression=None):
|
|
145
|
+
# check if extension provided and strip - correct extension is added next
|
|
146
|
+
self.path = Path(filepath).with_suffix(self._filetypes[filetype])
|
|
147
|
+
self.filetype = filetype
|
|
148
|
+
if encoding not in ['ascii', 'binary', 'base64', 'appended']:
|
|
149
|
+
raise ValueError('Encoding must be ascii, binary, base64 or appended.')
|
|
150
|
+
self.encoding = encoding
|
|
151
|
+
self.ascii_precision = ascii_precision
|
|
152
|
+
self.ascii_ncolumns = ascii_ncolumns
|
|
153
|
+
self._add_declaration = declaration
|
|
154
|
+
self.vtk_version = version
|
|
155
|
+
if self.vtk_version == '1.0':
|
|
156
|
+
self.header_type = 'UInt64'
|
|
157
|
+
elif self.vtk_version == '0.1':
|
|
158
|
+
self.header_type = 'UInt32'
|
|
159
|
+
else:
|
|
160
|
+
raise ValueError('Invalid VTK version specified.')
|
|
161
|
+
|
|
162
|
+
self._byteorder = 'LittleEndian' if sys.byteorder == "little" else 'BigEndian'
|
|
163
|
+
self._byteorder_char = '<' if sys.byteorder == "little" else '>'
|
|
164
|
+
|
|
165
|
+
# set attributes for vtk files that ar enot multiblock files
|
|
166
|
+
if self.filetype != 'VTKMultiBlockDataSet':
|
|
167
|
+
self._appended_data = b''
|
|
168
|
+
if appended_encoding not in ['base64', 'raw']:
|
|
169
|
+
raise ValueError('Appended encoding must be base64 or raw.')
|
|
170
|
+
self._appended_encoding = appended_encoding
|
|
171
|
+
self._offset = 0
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
if compression is None or compression is False or compression == 0:
|
|
175
|
+
self.compression = 0
|
|
176
|
+
elif compression is True:
|
|
177
|
+
self.compression = -1
|
|
178
|
+
raise NotImplementedError("To be implemented at a later date")
|
|
179
|
+
elif compression in list(range(-1, 10)):
|
|
180
|
+
self.compression = compression
|
|
181
|
+
raise NotImplementedError("To be implemented at a later date")
|
|
182
|
+
else:
|
|
183
|
+
raise ValueError(f'compression level {compression} is not recognized by zlib')
|
|
184
|
+
|
|
185
|
+
# set data attributes
|
|
186
|
+
# num points and cells can be calculated from the extents for ImageData, StructuredGrid and RectilinearGrid
|
|
187
|
+
self.npoints = 0
|
|
188
|
+
self.ncells = 0
|
|
189
|
+
|
|
190
|
+
# needs to be calculated from the data first and passed at instantiation
|
|
191
|
+
self.nverts = 0
|
|
192
|
+
self.nlines = 0
|
|
193
|
+
self.nstrips = 0
|
|
194
|
+
self.npolys = 0
|
|
195
|
+
|
|
196
|
+
def open_file(self):
|
|
197
|
+
"""
|
|
198
|
+
Open a file for writing and optionally add a declaration for encoding.
|
|
199
|
+
|
|
200
|
+
This method handles opening a file in write-binary mode based on the provided
|
|
201
|
+
file path. If specified by internal flags, it appends an encoding declaration
|
|
202
|
+
to the file, particularly for base64 encoding. The operation may modify the
|
|
203
|
+
internal representation of the file stream.
|
|
204
|
+
|
|
205
|
+
Attributes
|
|
206
|
+
----------
|
|
207
|
+
file : file object
|
|
208
|
+
The open file stream associated with the provided path.
|
|
209
|
+
|
|
210
|
+
Parameters
|
|
211
|
+
----------
|
|
212
|
+
self : object
|
|
213
|
+
Refers to the class instance.
|
|
214
|
+
|
|
215
|
+
Notes
|
|
216
|
+
-----
|
|
217
|
+
Ensure that the `__path` attribute has been set before calling this method,
|
|
218
|
+
as it specifies the file location to be opened. The `_add_declaration` and
|
|
219
|
+
`_appended_encoding` attributes must also be correctly configured to apply
|
|
220
|
+
the encoding declaration if needed.
|
|
221
|
+
"""
|
|
222
|
+
self.file = open(self.path, "wb")
|
|
223
|
+
if self._add_declaration and self._appended_encoding == 'base64':
|
|
224
|
+
self.add_declaration()
|
|
225
|
+
|
|
226
|
+
# Move common methods here:
|
|
227
|
+
def add_declaration(self):
|
|
228
|
+
"""
|
|
229
|
+
Add an XML declaration to the start of the file.
|
|
230
|
+
|
|
231
|
+
This can be included in all files.
|
|
232
|
+
However, it should be noted that XML files with an encoding of `appended` may be considered invalid XML.
|
|
233
|
+
|
|
234
|
+
"""
|
|
235
|
+
self.file.write(b'<?xml version="1.0"?>\n')
|
|
236
|
+
|
|
237
|
+
def add_filetype(self):
|
|
238
|
+
"""
|
|
239
|
+
Add XML root node and file type node.
|
|
240
|
+
|
|
241
|
+
"""
|
|
242
|
+
# add vtk root node
|
|
243
|
+
if self.vtk_version == '1.0':
|
|
244
|
+
vtk_filestr = (f'<VTKFile type="{self.filetype}" version="1.0" '
|
|
245
|
+
f'byte_order="{self._byteorder}" header_type="{self.header_type}">')
|
|
246
|
+
elif self.vtk_version == '0.1':
|
|
247
|
+
vtk_filestr = f'<VTKFile type="{self.filetype}" version="0.1" byte_order="{self._byteorder}">'
|
|
248
|
+
|
|
249
|
+
self.file.write((vtk_filestr + "\n").encode(self._text_encoding))
|
|
250
|
+
|
|
251
|
+
# gather appropriate file type attributes
|
|
252
|
+
attrs = []
|
|
253
|
+
if self.filetype == 'ImageData' or self.filetype == 'RectilinearGrid' or self.filetype == 'StructuredGrid':
|
|
254
|
+
fmt = ' '.join(['%.16g'] * 6)
|
|
255
|
+
attrs.append(f'WholeExtent="{fmt % tuple(self.whole_extent)}"')
|
|
256
|
+
|
|
257
|
+
if self.filetype == 'ImageData':
|
|
258
|
+
fmt2 = ' '.join(['%.16g'] * 3)
|
|
259
|
+
fmt3 = ' '.join(['%.16g'] * 9)
|
|
260
|
+
attrs.append(f'Origin="{fmt2 % tuple(self.origin)}"')
|
|
261
|
+
attrs.append(f'Spacing="{fmt2 % tuple(self.grid_spacing)}"')
|
|
262
|
+
attrs.append(f'Direction="{fmt3 % tuple(self.direction)}"')
|
|
263
|
+
|
|
264
|
+
# write file type node and attributes
|
|
265
|
+
file_type_str = f" <{self.filetype} " + " ".join(attrs) + ">\n"
|
|
266
|
+
|
|
267
|
+
else:
|
|
268
|
+
# write file type node
|
|
269
|
+
file_type_str = f" <{self.filetype}" + ">\n"
|
|
270
|
+
|
|
271
|
+
self.file.write(file_type_str.encode(self._text_encoding))
|
|
272
|
+
|
|
273
|
+
def close_filetype(self):
|
|
274
|
+
"""Add closing file tag."""
|
|
275
|
+
self.file.write(f' </{self.filetype}>\n'.encode(self._text_encoding))
|
|
276
|
+
|
|
277
|
+
def close_file(self, verbose=False):
|
|
278
|
+
"""
|
|
279
|
+
Close file after writing data to it.
|
|
280
|
+
|
|
281
|
+
Returns
|
|
282
|
+
-------
|
|
283
|
+
None
|
|
284
|
+
|
|
285
|
+
"""
|
|
286
|
+
self.file.write("</VTKFile>".encode(self._text_encoding))
|
|
287
|
+
self.file.close()
|
|
288
|
+
|
|
289
|
+
if verbose:
|
|
290
|
+
print(' File successfully written.')
|
|
291
|
+
|
|
292
|
+
def add_fielddata(self, field_data):
|
|
293
|
+
"""
|
|
294
|
+
Add field data to the file in a specific format if the provided field_data is not None.
|
|
295
|
+
|
|
296
|
+
Writes the opening and closing FieldData tags and processes each entry within the field_data dictionary.
|
|
297
|
+
|
|
298
|
+
Parameters
|
|
299
|
+
----------
|
|
300
|
+
field_data : dict or None
|
|
301
|
+
A dictionary containing the field data to be added, where each key is the
|
|
302
|
+
name of the field and the corresponding value represents the associated
|
|
303
|
+
data. If None, no field data is written.
|
|
304
|
+
|
|
305
|
+
"""
|
|
306
|
+
if field_data is not None:
|
|
307
|
+
self.file.write(b' <FieldData>\n')
|
|
308
|
+
|
|
309
|
+
for key, value in field_data.items():
|
|
310
|
+
self._add_dataarray(value, name=key, data_type='field_data')
|
|
311
|
+
|
|
312
|
+
self.file.write(b' </FieldData>\n')
|
|
313
|
+
|
|
314
|
+
def open_piece(self):
|
|
315
|
+
"""
|
|
316
|
+
Open `Piece` element of XML file.
|
|
317
|
+
|
|
318
|
+
Each piece file contains the attribute tags for the number of points,
|
|
319
|
+
cells, vertices, lines, strips and polydata items.
|
|
320
|
+
|
|
321
|
+
"""
|
|
322
|
+
attrs = []
|
|
323
|
+
|
|
324
|
+
if self.filetype == 'ImageData' or self.filetype == 'RectilinearGrid' or self.filetype == 'StructuredGrid':
|
|
325
|
+
fmt = ' '.join(['%.16g'] * 6)
|
|
326
|
+
attrs.append(f'Extent="{fmt % tuple(self.piece_extent)}"')
|
|
327
|
+
|
|
328
|
+
if self.filetype == 'UnstructuredGrid':
|
|
329
|
+
attrs.append(f'NumberOfPoints="{self.npoints}"')
|
|
330
|
+
attrs.append(f'NumberOfCells="{self.ncells}"')
|
|
331
|
+
|
|
332
|
+
if self.filetype == 'PolyData':
|
|
333
|
+
attrs.append(f'NumberOfPoints="{self.npoints}"')
|
|
334
|
+
attrs.append(f'NumberOfVerts="{self.nverts}"')
|
|
335
|
+
attrs.append(f'NumberOfLines="{self.nlines}"')
|
|
336
|
+
attrs.append(f'NumberOfStrips="{self.nstrips}"')
|
|
337
|
+
attrs.append(f'NumberOfPolys="{self.npolys}"')
|
|
338
|
+
|
|
339
|
+
piece_str = " <Piece " + " ".join(attrs) + ">\n"
|
|
340
|
+
|
|
341
|
+
self.file.write(piece_str.encode(self._text_encoding))
|
|
342
|
+
|
|
343
|
+
def open_element_base(self, element, attrs=None, indent_lvl=2, self_closing=False):
|
|
344
|
+
"""Open a generic XML element with common functionality."""
|
|
345
|
+
if attrs is None:
|
|
346
|
+
attrs = []
|
|
347
|
+
|
|
348
|
+
line_end = "/>\n" if self_closing else ">\n"
|
|
349
|
+
|
|
350
|
+
element_str = ' ' * indent_lvl + f'<{element}'
|
|
351
|
+
|
|
352
|
+
if attrs:
|
|
353
|
+
element_str += " " + " ".join(attrs)
|
|
354
|
+
|
|
355
|
+
element_str += line_end
|
|
356
|
+
|
|
357
|
+
self.file.write(element_str.encode(self._text_encoding))
|
|
358
|
+
|
|
359
|
+
def close_element(self, tag_name, indent_lvl=0):
|
|
360
|
+
"""
|
|
361
|
+
Add element closing tag to file.
|
|
362
|
+
|
|
363
|
+
Returns
|
|
364
|
+
-------
|
|
365
|
+
None
|
|
366
|
+
|
|
367
|
+
"""
|
|
368
|
+
closing_tag = ' ' * indent_lvl + f'</{tag_name}>\n'
|
|
369
|
+
self.file.write(closing_tag.encode(self._text_encoding))
|
|
370
|
+
|
|
371
|
+
def calculate_blocksize(self, data_array):
|
|
372
|
+
"""
|
|
373
|
+
Calculate and return the block size of a given NumPy array in bytes.
|
|
374
|
+
|
|
375
|
+
The result is packed as a 64-bit unsigned integer in accordance with the machine's byte order.
|
|
376
|
+
This function determines the byte order from the instance's attribute `_byteorder_char`
|
|
377
|
+
and uses it to format the packed result.
|
|
378
|
+
|
|
379
|
+
Parameters
|
|
380
|
+
----------
|
|
381
|
+
data_array : numpy.ndarray
|
|
382
|
+
The NumPy array for which the block size is to be calculated.
|
|
383
|
+
|
|
384
|
+
Returns
|
|
385
|
+
-------
|
|
386
|
+
bytes
|
|
387
|
+
The size of the data array in bytes, packed as a 64-bit unsigned
|
|
388
|
+
integer.
|
|
389
|
+
|
|
390
|
+
Raises
|
|
391
|
+
------
|
|
392
|
+
TypeError
|
|
393
|
+
If `data_array` is not a NumPy array.
|
|
394
|
+
|
|
395
|
+
"""
|
|
396
|
+
if isinstance(data_array, np.ndarray):
|
|
397
|
+
# Write size as unsigned long long == 64 bits unsigned integer
|
|
398
|
+
fmt_block = _parse_bytecount_type(self.header_type, self._byteorder)[0]
|
|
399
|
+
block_size_bytes = struct.pack(fmt_block, data_array.nbytes)
|
|
400
|
+
|
|
401
|
+
return block_size_bytes
|
|
402
|
+
else:
|
|
403
|
+
print('Expected NumPy Array.')
|
|
404
|
+
|
|
405
|
+
def convert_blockdata(self, data_array):
|
|
406
|
+
"""
|
|
407
|
+
Convert a NumPy array into a binary data block for VTK, ensuring compatibility with specific memory layouts.
|
|
408
|
+
|
|
409
|
+
Multidimensional arrays are processed to produce a contiguous binary representation, depending on their
|
|
410
|
+
memory order (Fortran or C layout). This function requires the input data to be of type `numpy.ndarray`.
|
|
411
|
+
|
|
412
|
+
Parameters
|
|
413
|
+
----------
|
|
414
|
+
data_array : numpy.ndarray
|
|
415
|
+
The input array to be converted into a binary data block. The function expects
|
|
416
|
+
either Fortran-contiguous or C-contiguous memory layout for proper handling.
|
|
417
|
+
|
|
418
|
+
Returns
|
|
419
|
+
-------
|
|
420
|
+
bytes
|
|
421
|
+
The binary representation of the input data, formatted in C-contiguous layout.
|
|
422
|
+
|
|
423
|
+
Raises
|
|
424
|
+
------
|
|
425
|
+
TypeError
|
|
426
|
+
If the input is not an instance of `numpy.ndarray`.
|
|
427
|
+
"""
|
|
428
|
+
if isinstance(data_array, np.ndarray):
|
|
429
|
+
# NOTE: VTK expects binary data in FORTRAN order
|
|
430
|
+
if data_array.flags["F_CONTIGUOUS"]:
|
|
431
|
+
# This is only needed when a multidimensional array has F-layout
|
|
432
|
+
data_block = np.ravel(data_array, order='C').tobytes()
|
|
433
|
+
else:
|
|
434
|
+
# This is needed when a multidimensional array has C-layout
|
|
435
|
+
data_block = np.ravel(data_array, order='C').tobytes()
|
|
436
|
+
|
|
437
|
+
return data_block
|
|
438
|
+
else:
|
|
439
|
+
raise TypeError('Expected NumPy Array.')
|
|
440
|
+
|
|
441
|
+
def ascii_encoder(self, array):
|
|
442
|
+
"""
|
|
443
|
+
Encode a given NumPy array into an ASCII-formatted string and write it to a file.
|
|
444
|
+
|
|
445
|
+
This function converts the flattened input array into rows of ASCII text, where each
|
|
446
|
+
row contains a fixed or remaining number of elements formatted to a specified decimal
|
|
447
|
+
precision. The ASCII representation is written to a pre-defined file using a specific
|
|
448
|
+
text encoding. It supports arrays that may not divide evenly by the defined number
|
|
449
|
+
of columns, ensuring the final row contains the residual elements if present.
|
|
450
|
+
|
|
451
|
+
Parameters
|
|
452
|
+
----------
|
|
453
|
+
array : numpy.ndarray
|
|
454
|
+
The array containing the numerical data to be formatted as ASCII text.
|
|
455
|
+
|
|
456
|
+
Returns
|
|
457
|
+
-------
|
|
458
|
+
str
|
|
459
|
+
A string containing the ASCII-formatted representation of the array.
|
|
460
|
+
|
|
461
|
+
"""
|
|
462
|
+
# set precision for formatting
|
|
463
|
+
if np.issubdtype(array.dtype, np.integer):
|
|
464
|
+
precision = 0
|
|
465
|
+
try:
|
|
466
|
+
int_str_length = len(str(array.max()))
|
|
467
|
+
except:
|
|
468
|
+
int_str_length = 255
|
|
469
|
+
num_cols = min(120 // (int_str_length + 3), 40)
|
|
470
|
+
else:
|
|
471
|
+
precision = self.ascii_precision
|
|
472
|
+
num_cols = 132 // (precision + 6)
|
|
473
|
+
|
|
474
|
+
fmt = ' '.join([f'%.{precision}f'] * num_cols) + '\n'
|
|
475
|
+
data_str = [' ' * 10]
|
|
476
|
+
|
|
477
|
+
# calculate number of full rows
|
|
478
|
+
nrows = 1 if array.size <= num_cols else int(array.size / num_cols)
|
|
479
|
+
|
|
480
|
+
array = array.flatten(order='C')
|
|
481
|
+
|
|
482
|
+
# write full rows
|
|
483
|
+
if array.size > num_cols:
|
|
484
|
+
for row in range(nrows):
|
|
485
|
+
shift = row * num_cols
|
|
486
|
+
data_str.append(fmt % tuple(array[shift:num_cols + shift]))
|
|
487
|
+
data_str.append(' ' * 10)
|
|
488
|
+
|
|
489
|
+
# write any remaining data in last non-full row
|
|
490
|
+
if array.size % num_cols != 0:
|
|
491
|
+
rem = array[num_cols + shift::]
|
|
492
|
+
fmt = ' '.join([f'%.{precision}f'] * len(rem)) + '\n'
|
|
493
|
+
data_str.append(fmt % tuple(rem))
|
|
494
|
+
|
|
495
|
+
else:
|
|
496
|
+
rem = array
|
|
497
|
+
fmt = ' '.join([f'%.{precision}f'] * len(rem)) + '\n'
|
|
498
|
+
data_str.append(fmt % tuple(rem))
|
|
499
|
+
|
|
500
|
+
data_str = ''.join(data_str)
|
|
501
|
+
self.file.write(data_str.encode(self._text_encoding))
|
|
502
|
+
|
|
503
|
+
return data_str
|
|
504
|
+
|
|
505
|
+
def Base64Encoder(self, data_array):
|
|
506
|
+
"""
|
|
507
|
+
Encode the given data array using Base64 encoding and write the encoded result to a file.
|
|
508
|
+
|
|
509
|
+
This method takes an input array, encodes it into Base64 format, and appends
|
|
510
|
+
the encoded content to a file. Initial padding may also be written to the file
|
|
511
|
+
depending on implementation.
|
|
512
|
+
|
|
513
|
+
Parameters
|
|
514
|
+
----------
|
|
515
|
+
data_array :
|
|
516
|
+
The data array to be encoded into Base64 format.
|
|
517
|
+
"""
|
|
518
|
+
self.file.write(b' ')
|
|
519
|
+
encoded = self.b64_encode_array(data_array)
|
|
520
|
+
self.file.write(encoded)
|
|
521
|
+
|
|
522
|
+
def b64_encode_array(self, data_array):
|
|
523
|
+
"""
|
|
524
|
+
Encode a given data array into a Base64 encoded byte string.
|
|
525
|
+
|
|
526
|
+
This method takes a data array, calculates a block size for encoding,
|
|
527
|
+
converts the data into block data format suitable for Base64 encoding,
|
|
528
|
+
and returns the Base64 encoded result.
|
|
529
|
+
|
|
530
|
+
Parameters
|
|
531
|
+
----------
|
|
532
|
+
data_array : Any
|
|
533
|
+
The input data array to be encoded. The exact type of this array
|
|
534
|
+
will depend on the use case and internal implementation of your
|
|
535
|
+
data handling.
|
|
536
|
+
|
|
537
|
+
Returns
|
|
538
|
+
-------
|
|
539
|
+
bytes
|
|
540
|
+
The Base64 encoded representation of the input data array as a
|
|
541
|
+
byte string.
|
|
542
|
+
"""
|
|
543
|
+
block_size = self.calculate_blocksize(data_array)
|
|
544
|
+
block_data = self.convert_blockdata(data_array)
|
|
545
|
+
encoded = pybase64.b64encode(block_size + block_data)
|
|
546
|
+
|
|
547
|
+
return encoded
|
|
548
|
+
|
|
549
|
+
def appended_encoder(self, data_array):
|
|
550
|
+
"""
|
|
551
|
+
Encode and append the provided data according to the specified encoding scheme.
|
|
552
|
+
|
|
553
|
+
Parameters
|
|
554
|
+
----------
|
|
555
|
+
data_array : numpy.ndarray
|
|
556
|
+
The input data array that needs to be encoded and appended. The type and
|
|
557
|
+
structure of the array are determined by its usage in the encoding
|
|
558
|
+
process.
|
|
559
|
+
|
|
560
|
+
Notes
|
|
561
|
+
-----
|
|
562
|
+
The behavior of the encoding depends on the value of the `_appended_encoding`
|
|
563
|
+
attribute of the class. The supported encoding schemes are:
|
|
564
|
+
- 'raw': Data is appended in raw format along with a calculated blocksize.
|
|
565
|
+
- 'base64' or 'binary': Data is encoded using the base64 encoding and appended.
|
|
566
|
+
|
|
567
|
+
The method ensures the internal `_offset` and `_appended_data` attributes are
|
|
568
|
+
updated to reflect the appended data and current position.
|
|
569
|
+
|
|
570
|
+
This method relies on helper methods like `calculate_blocksize` and
|
|
571
|
+
`convert_blockdata` for 'raw' encoding, and `b64_encode_array` for 'base64' or
|
|
572
|
+
'binary' encoding. These helper methods are assumed to be present within
|
|
573
|
+
the same class and are utilized to transform the `data_array` accordingly.
|
|
574
|
+
"""
|
|
575
|
+
# write appended data in raw format
|
|
576
|
+
if self._appended_encoding == 'raw':
|
|
577
|
+
self._offset += data_array.nbytes + 8
|
|
578
|
+
self._appended_data += self.calculate_blocksize(data_array)
|
|
579
|
+
self._appended_data += self.convert_blockdata(data_array)
|
|
580
|
+
|
|
581
|
+
# write appended data in base64 binary
|
|
582
|
+
if self._appended_encoding == 'base64' or self._appended_encoding == 'binary':
|
|
583
|
+
encoded = self.b64_encode_array(data_array)
|
|
584
|
+
self._offset += len(encoded)
|
|
585
|
+
self._appended_data += self.b64_encode_array(data_array)
|
|
586
|
+
|
|
587
|
+
# Common XML Operations
|
|
588
|
+
|
|
589
|
+
def add_appended_data(self):
|
|
590
|
+
"""
|
|
591
|
+
Write appended data in a specific format to a file.
|
|
592
|
+
|
|
593
|
+
The method generates an XML-like structure with encoding and appended data included.
|
|
594
|
+
The data is written directly to a file-like object provided during the initialization of the calling instance.
|
|
595
|
+
|
|
596
|
+
Attributes
|
|
597
|
+
----------
|
|
598
|
+
_appended_encoding : str
|
|
599
|
+
Encoding type for the `AppendedData` XML tag.
|
|
600
|
+
_text_encoding : str
|
|
601
|
+
Encoding used for converting strings into bytes before writing to the file.
|
|
602
|
+
_appended_data : bytes
|
|
603
|
+
The actual binary data to be inserted between `<AppendedData>` tags.
|
|
604
|
+
"""
|
|
605
|
+
self.file.write((f' <AppendedData encoding="{self._appended_encoding}">\n').encode(self._text_encoding))
|
|
606
|
+
self.file.write(b' _')
|
|
607
|
+
self.file.write(self._appended_data)
|
|
608
|
+
self.file.write(('\n </AppendedData>\n').encode(self._text_encoding))
|
|
609
|
+
|
|
610
|
+
def _add_dataarray(self, data, name, data_type='CellData', vtk_type=None):
|
|
611
|
+
"""
|
|
612
|
+
Add a data array to the VTK file with specified attributes and encoding.
|
|
613
|
+
|
|
614
|
+
The method processes the provided data, determines its properties such as
|
|
615
|
+
minimum, maximum, and number of components, and generates attributes required
|
|
616
|
+
for the VTK output. It also encodes the data into the specified VTK format
|
|
617
|
+
(e.g., ASCII, binary, or appended) and writes it to the file. If `vtk_type`
|
|
618
|
+
is not provided, the method determines the appropriate VTK type from the
|
|
619
|
+
data's NumPy dtype.
|
|
620
|
+
|
|
621
|
+
Parameters
|
|
622
|
+
----------
|
|
623
|
+
data : numpy.ndarray
|
|
624
|
+
Numerical data to be added to the VTK file. Should be convertible to
|
|
625
|
+
a NumPy ndarray.
|
|
626
|
+
name : str
|
|
627
|
+
Name of the data array in the VTK file (e.g., 'Varaible1', 'Temp').
|
|
628
|
+
data_type : str, default 'CellData'
|
|
629
|
+
Specifies the type of data to be added. Expected types are 'CellData',
|
|
630
|
+
'PointData', or 'field_data'.
|
|
631
|
+
vtk_type : str, optional
|
|
632
|
+
Specific VTK data type for this array. If not provided, it will be
|
|
633
|
+
inferred based on the NumPy dtype of the input array.
|
|
634
|
+
|
|
635
|
+
Raises
|
|
636
|
+
------
|
|
637
|
+
TypeError
|
|
638
|
+
If the input data cannot be converted to a NumPy ndarray.
|
|
639
|
+
ValueError
|
|
640
|
+
If the data has unsupported dimensions or invalid input values.
|
|
641
|
+
|
|
642
|
+
"""
|
|
643
|
+
# convert data to np ndarray for ease of use
|
|
644
|
+
data = np.asarray(data)
|
|
645
|
+
if np.issubdtype(data.dtype, np.number):
|
|
646
|
+
try:
|
|
647
|
+
# numpy (>1.26) seems to be quicker than bottleneck for arrays where there will definitely be no nans
|
|
648
|
+
data_min = np.min(data)
|
|
649
|
+
data_max = np.max(data)
|
|
650
|
+
except:
|
|
651
|
+
data_min = 1e+299
|
|
652
|
+
data_max = -1e+299
|
|
653
|
+
else:
|
|
654
|
+
# data = np.array(data, dtype=StringDType())
|
|
655
|
+
# vtk_type = self._np_to_vtk[str(data.dtype)]
|
|
656
|
+
vtk_type = "String"
|
|
657
|
+
|
|
658
|
+
# Calculate string lengths
|
|
659
|
+
# string_lengths = np.array([len(s) for s in data], dtype=np.int32)
|
|
660
|
+
|
|
661
|
+
char_array = []
|
|
662
|
+
# chars = data.view('int32')
|
|
663
|
+
# convert to ascii characters
|
|
664
|
+
for element in data:
|
|
665
|
+
char_array.append(np.array(list(element)).view('int32'))
|
|
666
|
+
char_array.append(0)
|
|
667
|
+
|
|
668
|
+
data = np.hstack(char_array)
|
|
669
|
+
|
|
670
|
+
# set attributes
|
|
671
|
+
attrs = []
|
|
672
|
+
|
|
673
|
+
# check data type
|
|
674
|
+
if vtk_type is None:
|
|
675
|
+
vtk_type = self._np_to_vtk[str(data.dtype)]
|
|
676
|
+
|
|
677
|
+
# count num components
|
|
678
|
+
num_components = data.shape[1] if len(data.shape) > 1 else 1
|
|
679
|
+
if data_type == 'field_data':
|
|
680
|
+
attrs.append(f'NumberOfTuples="{num_components}"')
|
|
681
|
+
else:
|
|
682
|
+
if name not in ['connectivity', 'offsets', 'types']:
|
|
683
|
+
attrs.append(f'NumberOfComponents="{num_components}"')
|
|
684
|
+
|
|
685
|
+
# add remaining attributes to list for writing to file
|
|
686
|
+
attrs.append(f'type="{vtk_type}" format="{self.encoding}"')
|
|
687
|
+
if vtk_type != "String":
|
|
688
|
+
attrs.append(f'RangeMin="{data_min}" RangeMax="{data_max}"')
|
|
689
|
+
|
|
690
|
+
# encode array
|
|
691
|
+
if self.encoding == 'ascii' or self.encoding == 'binary':
|
|
692
|
+
data_opentag = f' <DataArray Name="{name}" ' + " ".join(attrs) + ">\n"
|
|
693
|
+
data_closetag = ' </DataArray>\n'
|
|
694
|
+
|
|
695
|
+
# add opening tag to file
|
|
696
|
+
self.file.write(data_opentag.encode(self._text_encoding))
|
|
697
|
+
|
|
698
|
+
# add data to file if not appended data
|
|
699
|
+
if self.encoding == 'ascii':
|
|
700
|
+
self.ascii_encoder(data)
|
|
701
|
+
|
|
702
|
+
if self.encoding == 'binary':
|
|
703
|
+
self.Base64Encoder(data)
|
|
704
|
+
data_closetag = '\n' + data_closetag
|
|
705
|
+
|
|
706
|
+
# add closing tag to file
|
|
707
|
+
self.file.write(data_closetag.encode(self._text_encoding))
|
|
708
|
+
|
|
709
|
+
elif self.encoding == 'appended':
|
|
710
|
+
# combined opening and closing tag for appended data
|
|
711
|
+
data_tag = f' <DataArray Name="{name}" ' + " ".join(attrs) + f' offset="{self._offset}"/>\n'
|
|
712
|
+
|
|
713
|
+
self.file.write(data_tag.encode(self._text_encoding))
|
|
714
|
+
self.appended_encoder(data)
|
|
715
|
+
else:
|
|
716
|
+
raise ValueError("Invalid encoding type. Expected 'ascii', 'binary', or 'appended'.")
|
|
717
|
+
|
|
718
|
+
@staticmethod
|
|
719
|
+
def _check_array_sizes(array_data):
|
|
720
|
+
"""
|
|
721
|
+
Check the size of all data arrays to be written to ensure they are all the same length.
|
|
722
|
+
|
|
723
|
+
Parameters
|
|
724
|
+
----------
|
|
725
|
+
array_data : dictionary of data arrays to be written.
|
|
726
|
+
|
|
727
|
+
Returns
|
|
728
|
+
-------
|
|
729
|
+
Array Size : Int
|
|
730
|
+
|
|
731
|
+
"""
|
|
732
|
+
# flatten dictionary to check sizes more easily
|
|
733
|
+
flattened_arrays = flatten(array_data, parent_key='', separator='_')
|
|
734
|
+
|
|
735
|
+
sizes = []
|
|
736
|
+
for _key, val in flattened_arrays.items():
|
|
737
|
+
if val is not None:
|
|
738
|
+
if isinstance(val, np.ndarray):
|
|
739
|
+
if val.ndim == 1:
|
|
740
|
+
sizes.append(val.size)
|
|
741
|
+
else:
|
|
742
|
+
sizes.append(val.shape[0])
|
|
743
|
+
else:
|
|
744
|
+
sizes.append(len(val))
|
|
745
|
+
else:
|
|
746
|
+
raise ValueError("Warning: `None` is not a valid data array.")
|
|
747
|
+
|
|
748
|
+
all_equal = all(sizes)
|
|
749
|
+
|
|
750
|
+
if all_equal:
|
|
751
|
+
return sizes[0]
|
|
752
|
+
else:
|
|
753
|
+
raise ValueError("Warning: Arrays provided are not all the same length. Data not written to file.")
|
|
754
|
+
|
|
755
|
+
@staticmethod
|
|
756
|
+
def _convert_to_array(list_1d):
|
|
757
|
+
if (list_1d is not None) and (type(list_1d).__name__ != "ndarray"):
|
|
758
|
+
assert isinstance(list_1d, (list, tuple))
|
|
759
|
+
return np.array(list_1d)
|
|
760
|
+
return None
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
class XMLwriter(XMLWriterBase):
|
|
764
|
+
"""Regular XML VTK file writer."""
|
|
765
|
+
|
|
766
|
+
def __init__(self, filepath, filetype, point_data=None, cell_data=None, field_data=None, encoding='ascii',
|
|
767
|
+
ascii_precision=16, ascii_ncolumns=6, declaration=True, appended_encoding='base64', version='1.0'):
|
|
768
|
+
|
|
769
|
+
super().__init__(filepath, filetype, encoding, ascii_precision, ascii_ncolumns, declaration, appended_encoding,
|
|
770
|
+
version)
|
|
771
|
+
|
|
772
|
+
# set data attributes
|
|
773
|
+
if isinstance(point_data, str):
|
|
774
|
+
raise TypeError("Expected a dictionary of data arrays to be written to the VTK file for point_data.")
|
|
775
|
+
if isinstance(cell_data, str):
|
|
776
|
+
raise TypeError("Expected a dictionary of data arrays to be written to the VTK file for cell_data.")
|
|
777
|
+
if isinstance(field_data, str):
|
|
778
|
+
raise TypeError("Expected a dictionary of data arrays to be written to the VTK file for field_data.")
|
|
779
|
+
|
|
780
|
+
# assign data after checking type
|
|
781
|
+
self.point_data = point_data
|
|
782
|
+
self.cell_data = cell_data
|
|
783
|
+
self.field_data = field_data
|
|
784
|
+
|
|
785
|
+
# Keep specialized methods for regular XML writing:
|
|
786
|
+
def check_array_sizes_for_cell_data(self, cell_data):
|
|
787
|
+
"""Compare size of cell data with the number of cells in the file."""
|
|
788
|
+
if cell_data is not None:
|
|
789
|
+
cell_data_size = self._check_array_sizes(cell_data)
|
|
790
|
+
if cell_data_size != self.ncells:
|
|
791
|
+
raise ValueError('Cells and cell data sizes do not match')
|
|
792
|
+
|
|
793
|
+
def check_array_sizes_for_point_data(self, point_data):
|
|
794
|
+
"""Compare size of point data with the number of points in the file."""
|
|
795
|
+
if point_data is not None:
|
|
796
|
+
point_data_size = self._check_array_sizes(point_data)
|
|
797
|
+
if point_data_size != self.npoints:
|
|
798
|
+
raise ValueError('Points and point data sizes do not match')
|
|
799
|
+
|
|
800
|
+
# Data Structure Methods
|
|
801
|
+
def add_points(self, points):
|
|
802
|
+
"""
|
|
803
|
+
Add a collection of points to the data structure.
|
|
804
|
+
|
|
805
|
+
This method processes and adds a structured set of points to
|
|
806
|
+
the internal data storage. It uses auxiliary methods to
|
|
807
|
+
handle specifics of the data array creation, data structure
|
|
808
|
+
settings, and integration of the points.
|
|
809
|
+
|
|
810
|
+
Parameters
|
|
811
|
+
----------
|
|
812
|
+
points : Any
|
|
813
|
+
The collection of points to be added. The parameter
|
|
814
|
+
should conform to the data type `points` as expected
|
|
815
|
+
by the `_add_dataarray` method.
|
|
816
|
+
|
|
817
|
+
Notes
|
|
818
|
+
-----
|
|
819
|
+
Points are only required for `UnstructuredGrid` and `PolyData`.
|
|
820
|
+
|
|
821
|
+
"""
|
|
822
|
+
self.open_element("Points")
|
|
823
|
+
self._add_dataarray(points, "Points", data_type='points')
|
|
824
|
+
self.close_element('Points', 3)
|
|
825
|
+
|
|
826
|
+
def add_data_from_dict(self, data_type, data_dict):
|
|
827
|
+
"""
|
|
828
|
+
Add data to the object using a dictionary of data and a specified data type.
|
|
829
|
+
|
|
830
|
+
This method iterates through a dictionary of data and adds each individual
|
|
831
|
+
data array to the object. The name of each property in the dictionary is
|
|
832
|
+
used as the identifier for the corresponding data array.
|
|
833
|
+
|
|
834
|
+
Parameters
|
|
835
|
+
----------
|
|
836
|
+
data_type : str
|
|
837
|
+
A string indicating the type of data being added. This value is passed
|
|
838
|
+
to the `_add_dataarray` method to categorize the data.
|
|
839
|
+
data_dict : dict
|
|
840
|
+
A dictionary where keys represent property names, and values represent
|
|
841
|
+
the corresponding data to be added. Each key-value pair is processed
|
|
842
|
+
separately to add the data to the object.
|
|
843
|
+
"""
|
|
844
|
+
for prop_name, prop_data in data_dict.items():
|
|
845
|
+
self._add_dataarray(prop_data, prop_name, data_type=data_type)
|
|
846
|
+
|
|
847
|
+
def add_data_array(self, data, data_type="PointData"):
|
|
848
|
+
"""
|
|
849
|
+
Add data arrays to a specified vtk data type.
|
|
850
|
+
|
|
851
|
+
This inclides processing nested dictionary structures
|
|
852
|
+
and managing data elements such as scalars, vectors, tensors, normals, and texture
|
|
853
|
+
coordinates. The function categorizes and processes the data based on its structure
|
|
854
|
+
and type while ensuring proper opening and closing of the relevant elements.
|
|
855
|
+
|
|
856
|
+
Parameters
|
|
857
|
+
----------
|
|
858
|
+
data : dict
|
|
859
|
+
The data to be added. It can be provided either as a flat dictionary or a nested
|
|
860
|
+
dictionary with keys such as 'scalars', 'vectors', 'tensors', 'normals', and
|
|
861
|
+
'texture_coords'.
|
|
862
|
+
|
|
863
|
+
data_type : str, optional
|
|
864
|
+
Specifies the vtk data type to which the data should be added. Defaults to
|
|
865
|
+
"PointData".
|
|
866
|
+
"""
|
|
867
|
+
# Process Data items
|
|
868
|
+
if data is not None:
|
|
869
|
+
# check if sorted by variable
|
|
870
|
+
is_nested = any(isinstance(i, dict) for i in data.values())
|
|
871
|
+
|
|
872
|
+
if is_nested:
|
|
873
|
+
# split data into types if provided as nested dict
|
|
874
|
+
scalars = data.get('scalars')
|
|
875
|
+
vectors = data.get('vectors')
|
|
876
|
+
tensors = data.get('tensors')
|
|
877
|
+
normals = data.get('normals')
|
|
878
|
+
texture_coords = data.get('texture_coords')
|
|
879
|
+
|
|
880
|
+
# open element
|
|
881
|
+
self.open_element(data_type, Scalars=scalars, Vectors=vectors, Tensors=tensors, Normals=normals,
|
|
882
|
+
TCoords=texture_coords)
|
|
883
|
+
|
|
884
|
+
if scalars is not None:
|
|
885
|
+
self.add_data_from_dict(data_type, scalars)
|
|
886
|
+
|
|
887
|
+
if vectors is not None:
|
|
888
|
+
self.add_data_from_dict(data_type, vectors)
|
|
889
|
+
|
|
890
|
+
if tensors is not None:
|
|
891
|
+
self.add_data_from_dict(data_type, tensors)
|
|
892
|
+
|
|
893
|
+
if normals is not None:
|
|
894
|
+
self.add_data_from_dict(data_type, normals)
|
|
895
|
+
|
|
896
|
+
if texture_coords is not None:
|
|
897
|
+
self.add_data_from_dict(data_type, texture_coords)
|
|
898
|
+
|
|
899
|
+
else:
|
|
900
|
+
# open element without setting any active data types
|
|
901
|
+
self.open_element(data_type)
|
|
902
|
+
|
|
903
|
+
# add data arrays to file
|
|
904
|
+
self.add_data_from_dict(data_type, data)
|
|
905
|
+
|
|
906
|
+
# close data array element
|
|
907
|
+
self.close_element(data_type, 3)
|
|
908
|
+
|
|
909
|
+
# Element Methods (Standard Version)
|
|
910
|
+
def open_element(self, element="Points", Scalars=None, Vectors=None,
|
|
911
|
+
Tensors=None, TCoords=None, Normals=None, indent_lvl=2):
|
|
912
|
+
"""Specialized for VTK data types."""
|
|
913
|
+
attrs = []
|
|
914
|
+
if Scalars is not None:
|
|
915
|
+
attrs.append(f'Scalars="{first_key(Scalars)}"')
|
|
916
|
+
if Vectors is not None:
|
|
917
|
+
attrs.append(f'Vectors="{first_key(Vectors)}"')
|
|
918
|
+
if Tensors is not None:
|
|
919
|
+
attrs.append(f'Tensors="{first_key(Tensors)}"')
|
|
920
|
+
if TCoords is not None:
|
|
921
|
+
attrs.append(f'TCoords="{first_key(TCoords)}"')
|
|
922
|
+
if Normals is not None:
|
|
923
|
+
attrs.append(f'Normals="{first_key(Normals)}"')
|
|
924
|
+
|
|
925
|
+
# if len(attrs) == 0:
|
|
926
|
+
# element = f" <{element}" + ">\n"
|
|
927
|
+
# else:
|
|
928
|
+
# element = f" <{element} " + " ".join(attrs) + ">\n"
|
|
929
|
+
|
|
930
|
+
self.open_element_base(element, attrs, indent_lvl=indent_lvl)
|
|
931
|
+
|
|
932
|
+
|
|
933
|
+
class XMLImageDataWriter(XMLwriter):
|
|
934
|
+
"""VTI XML Writer Class."""
|
|
935
|
+
|
|
936
|
+
def __init__(self, filepath, whole_extent: object, piece_extent: object = None, spacing: object = (1, 1, 1),
|
|
937
|
+
origin: object = (0, 0, 0), direction: object = (1, 0, 0, 0, 1, 0, 0, 0, 1), point_data: object = None,
|
|
938
|
+
cell_data: object = None, field_data: object = None, encoding='ascii', ascii_precision=16,
|
|
939
|
+
ascii_ncolumns=6, declaration=True, appended_encoding='base64', version='1.0'):
|
|
940
|
+
|
|
941
|
+
super().__init__(filepath, 'ImageData', point_data, cell_data, field_data, encoding,
|
|
942
|
+
ascii_precision, ascii_ncolumns, declaration, appended_encoding, version)
|
|
943
|
+
|
|
944
|
+
self.whole_extent = np.asarray(whole_extent).astype(int)
|
|
945
|
+
if len(self.whole_extent) != 6:
|
|
946
|
+
raise ValueError("whole_extent must be a list or array of length 6.")
|
|
947
|
+
|
|
948
|
+
# check if piece_extent is provided, otherwise use whole_extent
|
|
949
|
+
if piece_extent is None:
|
|
950
|
+
self.piece_extent = np.asarray(whole_extent).astype(int)
|
|
951
|
+
else:
|
|
952
|
+
self.piece_extent = np.asarray(piece_extent).astype(int)
|
|
953
|
+
|
|
954
|
+
self.grid_spacing = np.asarray(spacing)
|
|
955
|
+
self.origin = np.asarray(origin)
|
|
956
|
+
self.direction = np.asarray(direction)
|
|
957
|
+
|
|
958
|
+
# check input data
|
|
959
|
+
self.num_cells = self.piece_extent[1::2] - self.piece_extent[0::2]
|
|
960
|
+
self.ncells = np.prod(self.num_cells)
|
|
961
|
+
self.npoints = np.prod(self.num_cells + 1)
|
|
962
|
+
|
|
963
|
+
self.check_array_sizes_for_point_data(point_data)
|
|
964
|
+
self.check_array_sizes_for_cell_data(cell_data)
|
|
965
|
+
|
|
966
|
+
def write_xml_file(self):
|
|
967
|
+
"""Write VTI file in XML format."""
|
|
968
|
+
# open xml file
|
|
969
|
+
self.open_file()
|
|
970
|
+
self.add_filetype()
|
|
971
|
+
|
|
972
|
+
# open piece and add counts
|
|
973
|
+
self.open_piece()
|
|
974
|
+
|
|
975
|
+
# add FieldData
|
|
976
|
+
self.add_fielddata(self.field_data)
|
|
977
|
+
|
|
978
|
+
# add variables at points (PointData)
|
|
979
|
+
self.add_data_array(self.point_data, data_type="PointData")
|
|
980
|
+
|
|
981
|
+
# add variables at cells (CellData)
|
|
982
|
+
self.add_data_array(self.cell_data, data_type="CellData")
|
|
983
|
+
|
|
984
|
+
# file.close_piece()
|
|
985
|
+
self.close_element('Piece', 2)
|
|
986
|
+
self.close_filetype()
|
|
987
|
+
|
|
988
|
+
# add any appended data
|
|
989
|
+
if self.encoding == 'appended':
|
|
990
|
+
self.add_appended_data()
|
|
991
|
+
|
|
992
|
+
# close file
|
|
993
|
+
self.close_file()
|
|
994
|
+
|
|
995
|
+
class XMLRectilinearGridWriter(XMLwriter):
|
|
996
|
+
"""VTR XML Writer Class."""
|
|
997
|
+
|
|
998
|
+
def __init__(self, filepath, x, y, z, whole_extent=None, piece_extent=None, point_data: object = None,
|
|
999
|
+
cell_data: object = None, field_data: object = None, encoding='ascii', ascii_precision=16,
|
|
1000
|
+
ascii_ncolumns=6, declaration=True, appended_encoding='base64', version='1.0'):
|
|
1001
|
+
|
|
1002
|
+
# simple check on coordinates - must first be a list or array
|
|
1003
|
+
if not (isinstance(x, (np.ndarray, list, tuple))):
|
|
1004
|
+
raise TypeError("x must be a numpy array, tuple or list")
|
|
1005
|
+
if not (isinstance(y, (np.ndarray, list, tuple))):
|
|
1006
|
+
raise TypeError("y must be a numpy array, tuple or list")
|
|
1007
|
+
if not (isinstance(z, (np.ndarray, list, tuple))):
|
|
1008
|
+
raise TypeError("z must be a numpy array, tuple or list")
|
|
1009
|
+
|
|
1010
|
+
# check if list or array is numeric
|
|
1011
|
+
x = np.asarray(x)
|
|
1012
|
+
y = np.asarray(y)
|
|
1013
|
+
z = np.asarray(z)
|
|
1014
|
+
if not (np.issubdtype(x.dtype, np.number)):
|
|
1015
|
+
raise TypeError("x must be a numeric numpy array or list")
|
|
1016
|
+
if not (np.issubdtype(y.dtype, np.number)):
|
|
1017
|
+
raise TypeError("y must be a numeric numpy array or list")
|
|
1018
|
+
if not (np.issubdtype(z.dtype, np.number)):
|
|
1019
|
+
raise TypeError("z must be a numeric numpy array or list")
|
|
1020
|
+
|
|
1021
|
+
|
|
1022
|
+
super().__init__(filepath, 'RectilinearGrid', point_data, cell_data, field_data, encoding,
|
|
1023
|
+
ascii_precision, ascii_ncolumns, declaration, appended_encoding, version)
|
|
1024
|
+
|
|
1025
|
+
self.x = x
|
|
1026
|
+
self.y = y
|
|
1027
|
+
self.z = z
|
|
1028
|
+
if whole_extent is None:
|
|
1029
|
+
self.whole_extent = np.array([0, len(x) - 1, 0, len(y) - 1, 0, len(z) - 1])
|
|
1030
|
+
else:
|
|
1031
|
+
self.whole_extent = np.asarray(whole_extent)
|
|
1032
|
+
|
|
1033
|
+
if piece_extent is None:
|
|
1034
|
+
self.piece_extent = np.array([0, len(x) - 1, 0, len(y) - 1, 0, len(z) - 1])
|
|
1035
|
+
else:
|
|
1036
|
+
self.piece_extent = np.asarray(piece_extent)
|
|
1037
|
+
|
|
1038
|
+
# check input data
|
|
1039
|
+
self.num_cells = self.piece_extent[1::2] - self.piece_extent[0::2]
|
|
1040
|
+
self.ncells = np.prod(self.num_cells)
|
|
1041
|
+
self.npoints = np.prod(self.num_cells + 1)
|
|
1042
|
+
|
|
1043
|
+
self.check_array_sizes_for_point_data(point_data)
|
|
1044
|
+
self.check_array_sizes_for_cell_data(cell_data)
|
|
1045
|
+
|
|
1046
|
+
|
|
1047
|
+
def write_xml_file(self):
|
|
1048
|
+
"""Write VTR file in XML format."""
|
|
1049
|
+
# open xml file
|
|
1050
|
+
self.open_file()
|
|
1051
|
+
self.add_filetype()
|
|
1052
|
+
|
|
1053
|
+
# open piece and add counts
|
|
1054
|
+
self.open_piece()
|
|
1055
|
+
|
|
1056
|
+
# add FieldData
|
|
1057
|
+
self.add_fielddata(self.field_data)
|
|
1058
|
+
|
|
1059
|
+
# add variables at points (PointData)
|
|
1060
|
+
self.add_data_array(self.point_data, data_type="PointData")
|
|
1061
|
+
|
|
1062
|
+
# add variables at cells (CellData)
|
|
1063
|
+
self.add_data_array(self.cell_data, data_type="CellData")
|
|
1064
|
+
|
|
1065
|
+
# add coordinates
|
|
1066
|
+
self.add_coordinates()
|
|
1067
|
+
|
|
1068
|
+
# file.close_piece()
|
|
1069
|
+
self.close_element('Piece', 2)
|
|
1070
|
+
self.close_filetype()
|
|
1071
|
+
|
|
1072
|
+
# add any appended data
|
|
1073
|
+
if self.encoding == 'appended':
|
|
1074
|
+
self.add_appended_data()
|
|
1075
|
+
|
|
1076
|
+
# close file
|
|
1077
|
+
self.close_file()
|
|
1078
|
+
|
|
1079
|
+
def add_coordinates(self):
|
|
1080
|
+
"""
|
|
1081
|
+
Add 3D coordinate arrays to the corresponding elements in a structured data format.
|
|
1082
|
+
|
|
1083
|
+
The method adds arrays representing X, Y, and Z coordinates to a data element
|
|
1084
|
+
while creating a hierarchical organisation. These arrays are tagged as
|
|
1085
|
+
'coordinates' to denote their type of data. The method ensures each coordinate
|
|
1086
|
+
array is associated with its respective spatial dimension.
|
|
1087
|
+
|
|
1088
|
+
"""
|
|
1089
|
+
self.open_element("Coordinates")
|
|
1090
|
+
self._add_dataarray(self.x, "XCoordinates", data_type='coordinates')
|
|
1091
|
+
self._add_dataarray(self.y, "YCoordinates", data_type='coordinates')
|
|
1092
|
+
self._add_dataarray(self.z, "ZCoordinates", data_type='coordinates')
|
|
1093
|
+
self.close_element('Coordinates', 3)
|
|
1094
|
+
|
|
1095
|
+
|
|
1096
|
+
class XMLStructuredGridWriter(XMLwriter):
|
|
1097
|
+
"""VTS XML Writer Class."""
|
|
1098
|
+
|
|
1099
|
+
def __init__(self, filepath, points, whole_extent, piece_extent=None,
|
|
1100
|
+
point_data: object = None, cell_data: object = None, field_data: object = None, encoding='ascii',
|
|
1101
|
+
ascii_precision=16, ascii_ncolumns=6, declaration=True, appended_encoding='base64', version='1.0'):
|
|
1102
|
+
|
|
1103
|
+
if not (isinstance(points, (np.ndarray, list))):
|
|
1104
|
+
raise TypeError("points must be a numpy array")
|
|
1105
|
+
|
|
1106
|
+
# convert to numpy array and do final checks
|
|
1107
|
+
points = np.asarray(points)
|
|
1108
|
+
if not (np.issubdtype(points.dtype, np.number)):
|
|
1109
|
+
raise TypeError("points must be a numeric numpy array")
|
|
1110
|
+
if points.ndim != 2:
|
|
1111
|
+
raise TypeError("points must be a 2D numpy array")
|
|
1112
|
+
|
|
1113
|
+
|
|
1114
|
+
super().__init__(filepath, 'StructuredGrid', point_data, cell_data, field_data, encoding,
|
|
1115
|
+
ascii_precision, ascii_ncolumns, declaration, appended_encoding, version)
|
|
1116
|
+
|
|
1117
|
+
if whole_extent is None:
|
|
1118
|
+
raise ValueError('Warning: The whole extent or num_cells must be provided for the VTS data type.')
|
|
1119
|
+
else:
|
|
1120
|
+
self.whole_extent = np.asarray(whole_extent).astype(int)
|
|
1121
|
+
|
|
1122
|
+
if piece_extent is None:
|
|
1123
|
+
self.piece_extent = np.asarray(whole_extent).astype(int)
|
|
1124
|
+
else:
|
|
1125
|
+
self.piece_extent = np.asarray(piece_extent).astype(int)
|
|
1126
|
+
|
|
1127
|
+
self.points = points
|
|
1128
|
+
|
|
1129
|
+
# check input data
|
|
1130
|
+
self.num_cells = self.piece_extent[1::2] - self.piece_extent[0::2]
|
|
1131
|
+
self.ncells = np.prod(self.num_cells)
|
|
1132
|
+
self.npoints = np.prod(self.num_cells + 1)
|
|
1133
|
+
|
|
1134
|
+
if self.points.shape[0] != self.npoints:
|
|
1135
|
+
raise ValueError('Warning: The number of points does not match the number of points in the whole extent.')
|
|
1136
|
+
|
|
1137
|
+
self.check_array_sizes_for_point_data(point_data)
|
|
1138
|
+
self.check_array_sizes_for_cell_data(cell_data)
|
|
1139
|
+
|
|
1140
|
+
def write_xml_file(self):
|
|
1141
|
+
"""Write VTS file in XML format."""
|
|
1142
|
+
# open xml file
|
|
1143
|
+
self.open_file()
|
|
1144
|
+
self.add_filetype()
|
|
1145
|
+
|
|
1146
|
+
# open piece and add counts
|
|
1147
|
+
self.open_piece()
|
|
1148
|
+
|
|
1149
|
+
# add FieldData
|
|
1150
|
+
self.add_fielddata(self.field_data)
|
|
1151
|
+
|
|
1152
|
+
# add variables at points (PointData)
|
|
1153
|
+
self.add_data_array(self.point_data, data_type="PointData")
|
|
1154
|
+
|
|
1155
|
+
# add variables at cells (CellData)
|
|
1156
|
+
self.add_data_array(self.cell_data, data_type="CellData")
|
|
1157
|
+
|
|
1158
|
+
# add coordinates
|
|
1159
|
+
self.add_points(self.points)
|
|
1160
|
+
|
|
1161
|
+
# file.close_piece()
|
|
1162
|
+
self.close_element('Piece', 2)
|
|
1163
|
+
self.close_filetype()
|
|
1164
|
+
|
|
1165
|
+
# add any appended data
|
|
1166
|
+
if self.encoding == 'appended':
|
|
1167
|
+
self.add_appended_data()
|
|
1168
|
+
|
|
1169
|
+
# close file
|
|
1170
|
+
self.close_file()
|
|
1171
|
+
|
|
1172
|
+
|
|
1173
|
+
class XMLUnstructuredGridWriter(XMLwriter):
|
|
1174
|
+
"""VTU XML Writer Class."""
|
|
1175
|
+
|
|
1176
|
+
def __init__(self, filepath, nodes, cell_type: object = None, connectivity: object = None, offsets: object = None,
|
|
1177
|
+
point_data: object = None,
|
|
1178
|
+
cell_data: object = None, field_data: object = None, encoding='ascii', ascii_precision=16,
|
|
1179
|
+
ascii_ncolumns=6,
|
|
1180
|
+
declaration=True, appended_encoding='base64', version='1.0'):
|
|
1181
|
+
|
|
1182
|
+
super().__init__(filepath, 'UnstructuredGrid', point_data, cell_data, field_data, encoding,
|
|
1183
|
+
ascii_precision, ascii_ncolumns, declaration, appended_encoding, version)
|
|
1184
|
+
|
|
1185
|
+
# check input and allow for blank file
|
|
1186
|
+
if cell_type is not None:
|
|
1187
|
+
self.cell_types = np.asarray(cell_type)
|
|
1188
|
+
else:
|
|
1189
|
+
self.cell_types = None
|
|
1190
|
+
|
|
1191
|
+
if connectivity is not None:
|
|
1192
|
+
self.connectivity = np.asarray(connectivity)
|
|
1193
|
+
else:
|
|
1194
|
+
self.connectivity = None
|
|
1195
|
+
|
|
1196
|
+
if offsets is not None:
|
|
1197
|
+
self.offsets = np.asarray(offsets)
|
|
1198
|
+
else:
|
|
1199
|
+
self.offsets = None
|
|
1200
|
+
|
|
1201
|
+
if nodes is not None:
|
|
1202
|
+
self.nodes = np.asarray(nodes)
|
|
1203
|
+
else:
|
|
1204
|
+
self.nodes = None
|
|
1205
|
+
|
|
1206
|
+
|
|
1207
|
+
if point_data is not None:
|
|
1208
|
+
self.npoints = len(nodes)
|
|
1209
|
+
|
|
1210
|
+
if cell_data is not None:
|
|
1211
|
+
self.ncells = len(cell_type)
|
|
1212
|
+
|
|
1213
|
+
# check topology
|
|
1214
|
+
# In xml file this should be an array of the same length as number of cells
|
|
1215
|
+
# if not bail out now before writing and raise error
|
|
1216
|
+
if self.cell_types is not None and len(self.cell_types) != len(self.offsets):
|
|
1217
|
+
raise ValueError("Offsets array must the same length as the cell types array.")
|
|
1218
|
+
|
|
1219
|
+
# check input data
|
|
1220
|
+
self.check_array_sizes_for_point_data(point_data)
|
|
1221
|
+
self.check_array_sizes_for_cell_data(cell_data)
|
|
1222
|
+
|
|
1223
|
+
def write_xml_file(self):
|
|
1224
|
+
"""Write VTU file in XML format."""
|
|
1225
|
+
# open xml file
|
|
1226
|
+
self.open_file()
|
|
1227
|
+
self.add_filetype()
|
|
1228
|
+
|
|
1229
|
+
# open piece and add counts
|
|
1230
|
+
self.open_piece()
|
|
1231
|
+
|
|
1232
|
+
# add FieldData
|
|
1233
|
+
self.add_fielddata(self.field_data)
|
|
1234
|
+
|
|
1235
|
+
# add variables at points (PointData)
|
|
1236
|
+
self.add_data_array(self.point_data, data_type="PointData")
|
|
1237
|
+
|
|
1238
|
+
# add variables at cells (CellData)
|
|
1239
|
+
self.add_data_array(self.cell_data, data_type="CellData")
|
|
1240
|
+
|
|
1241
|
+
# add points and cells
|
|
1242
|
+
self.add_points()
|
|
1243
|
+
self.add_unstruct_cells()
|
|
1244
|
+
|
|
1245
|
+
# file.close_piece()
|
|
1246
|
+
self.close_element('Piece', 2)
|
|
1247
|
+
self.close_filetype()
|
|
1248
|
+
|
|
1249
|
+
# add any appended data
|
|
1250
|
+
if self.encoding == 'appended':
|
|
1251
|
+
self.add_appended_data()
|
|
1252
|
+
|
|
1253
|
+
# close file
|
|
1254
|
+
self.close_file()
|
|
1255
|
+
|
|
1256
|
+
# Unstructured Grid Methods
|
|
1257
|
+
def add_points(self):
|
|
1258
|
+
"""
|
|
1259
|
+
Add a set of points to the PolyData structure.
|
|
1260
|
+
|
|
1261
|
+
This method is used to add a collection of points to a PolyData element. The
|
|
1262
|
+
points are passed as input and are processed, encapsulated in a data array,
|
|
1263
|
+
and added to the PolyData structure. The method ensures proper encapsulation
|
|
1264
|
+
and manages the hierarchy of elements by opening and closing the relevant
|
|
1265
|
+
tag for the points.
|
|
1266
|
+
|
|
1267
|
+
|
|
1268
|
+
"""
|
|
1269
|
+
self.open_element("Points")
|
|
1270
|
+
if self.nodes is not None:
|
|
1271
|
+
# Add data for points
|
|
1272
|
+
self._add_dataarray(self.nodes, "Points", data_type='Points')
|
|
1273
|
+
self.close_element('Points', 3)
|
|
1274
|
+
|
|
1275
|
+
def add_unstruct_cells(self):
|
|
1276
|
+
"""
|
|
1277
|
+
Add unstructured cell data to an XML VTK file.
|
|
1278
|
+
|
|
1279
|
+
The method manages the addition of connectivity, offsets, and types arrays
|
|
1280
|
+
required to define cells in the VTK format. If `unstruct_cells` contains None values,
|
|
1281
|
+
empty arrays are created for the missing data.
|
|
1282
|
+
|
|
1283
|
+
"""
|
|
1284
|
+
# add data for cells
|
|
1285
|
+
self.open_element("Cells")
|
|
1286
|
+
if self.connectivity is not None:
|
|
1287
|
+
# by default, paraview only supports 32bit ints unless it is specifically compiled with 64 bit option in
|
|
1288
|
+
# cmake, therefore, 32 bit will be the default value used here.
|
|
1289
|
+
# Note that the numpy array of ints being received is more than likely 64 bit but since this is about
|
|
1290
|
+
# conenctivty and offset we can usually safely just declare as 32 bit
|
|
1291
|
+
self._add_dataarray(self.connectivity.astype(np.int32, copy=False),
|
|
1292
|
+
"connectivity", data_type='cells', vtk_type="UInt32")
|
|
1293
|
+
self._add_dataarray(self.offsets.astype(np.int32, copy=False),
|
|
1294
|
+
"offsets", data_type='cells', vtk_type="UInt32")
|
|
1295
|
+
self._add_dataarray(self.cell_types.astype(np.int8, copy=False),
|
|
1296
|
+
"types", data_type='cells', vtk_type="UInt8")
|
|
1297
|
+
else:
|
|
1298
|
+
self._add_dataarray(np.empty(0, dtype=np.int32), "connectivity", data_type='cells')
|
|
1299
|
+
self._add_dataarray(np.empty(0, dtype=np.int32), "offsets", data_type='cells')
|
|
1300
|
+
self._add_dataarray(np.empty(0, dtype=np.uint8), "types", data_type='cells')
|
|
1301
|
+
|
|
1302
|
+
self.close_element('Cells', 3)
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
class XMLPolyDataWriter(XMLwriter):
|
|
1306
|
+
"""VTP XML Writer Class."""
|
|
1307
|
+
|
|
1308
|
+
def __init__(self, filepath, points=None, verts=None, lines=None, strips=None, polys=None,
|
|
1309
|
+
point_data: object = None, cell_data: object = None, field_data: object = None,
|
|
1310
|
+
encoding='ascii', ascii_precision=16, ascii_ncolumns=6, declaration=True, appended_encoding='base64',
|
|
1311
|
+
version='1.0'):
|
|
1312
|
+
|
|
1313
|
+
super().__init__(filepath, 'PolyData', point_data, cell_data, field_data, encoding,
|
|
1314
|
+
ascii_precision, ascii_ncolumns, declaration, appended_encoding, version)
|
|
1315
|
+
|
|
1316
|
+
self.points = points
|
|
1317
|
+
self.verts = verts
|
|
1318
|
+
self.lines = lines
|
|
1319
|
+
self.strips = strips
|
|
1320
|
+
self.polys = polys
|
|
1321
|
+
|
|
1322
|
+
# set topology attributes
|
|
1323
|
+
if self.points is not None:
|
|
1324
|
+
self.npoints = len(points)
|
|
1325
|
+
if self.verts is not None:
|
|
1326
|
+
self.nverts = len(verts[1])
|
|
1327
|
+
if self.lines is not None:
|
|
1328
|
+
self.nlines = len(lines[1])
|
|
1329
|
+
if self.strips is not None:
|
|
1330
|
+
self.nstrips = len(strips[1])
|
|
1331
|
+
if self.polys is not None:
|
|
1332
|
+
self.npolys = len(polys[1])
|
|
1333
|
+
|
|
1334
|
+
if self.cell_data:
|
|
1335
|
+
self.ncells = self.nverts + self.nlines + self.nstrips + self.npolys
|
|
1336
|
+
|
|
1337
|
+
# data size checks
|
|
1338
|
+
self.check_array_sizes_for_point_data(point_data)
|
|
1339
|
+
self.check_array_sizes_for_cell_data(cell_data)
|
|
1340
|
+
|
|
1341
|
+
|
|
1342
|
+
def write_xml_file(self):
|
|
1343
|
+
"""Write a VTP file in XMK format."""
|
|
1344
|
+
# open xml file
|
|
1345
|
+
self.open_file()
|
|
1346
|
+
self.add_filetype()
|
|
1347
|
+
|
|
1348
|
+
# open piece and add counts
|
|
1349
|
+
self.open_piece()
|
|
1350
|
+
|
|
1351
|
+
# add FieldData
|
|
1352
|
+
self.add_fielddata(self.field_data)
|
|
1353
|
+
|
|
1354
|
+
# add variables at points (PointData)
|
|
1355
|
+
self.add_data_array(self.point_data, data_type="PointData")
|
|
1356
|
+
|
|
1357
|
+
# add variables at cells (CellData)
|
|
1358
|
+
self.add_data_array(self.cell_data, data_type="CellData")
|
|
1359
|
+
|
|
1360
|
+
# add polydata topology
|
|
1361
|
+
self.add_points()
|
|
1362
|
+
self.add_verts()
|
|
1363
|
+
self.add_lines()
|
|
1364
|
+
self.add_strips()
|
|
1365
|
+
self.add_polys()
|
|
1366
|
+
|
|
1367
|
+
# file.close_piece()
|
|
1368
|
+
self.close_element('Piece', 2)
|
|
1369
|
+
self.close_filetype()
|
|
1370
|
+
|
|
1371
|
+
# add any appended data
|
|
1372
|
+
if self.encoding == 'appended':
|
|
1373
|
+
self.add_appended_data()
|
|
1374
|
+
|
|
1375
|
+
# close file
|
|
1376
|
+
self.close_file()
|
|
1377
|
+
|
|
1378
|
+
# PolyData Methods
|
|
1379
|
+
def add_points(self):
|
|
1380
|
+
"""
|
|
1381
|
+
Add a set of points to the PolyData structure.
|
|
1382
|
+
|
|
1383
|
+
This method is used to add a collection of points to a PolyData element. The
|
|
1384
|
+
points are passed as input and are processed, encapsulated in a data array,
|
|
1385
|
+
and added to the PolyData structure. The method ensures proper encapsulation
|
|
1386
|
+
and manages the hierarchy of elements by opening and closing the relevant
|
|
1387
|
+
tag for the points.
|
|
1388
|
+
|
|
1389
|
+
"""
|
|
1390
|
+
self.open_element("Points", indent_lvl=3)
|
|
1391
|
+
if self.points is not None:
|
|
1392
|
+
# Add data for points
|
|
1393
|
+
self._add_dataarray(self.points, "Points", data_type='polydata')
|
|
1394
|
+
self.close_element('Points', 3)
|
|
1395
|
+
|
|
1396
|
+
def add_lines(self):
|
|
1397
|
+
"""
|
|
1398
|
+
Add polydata lines to the current XML structure.
|
|
1399
|
+
|
|
1400
|
+
The polydata lines include
|
|
1401
|
+
both connectivity and offsets data, specified in the input parameter. The
|
|
1402
|
+
method adds these details into the current element context.
|
|
1403
|
+
|
|
1404
|
+
|
|
1405
|
+
"""
|
|
1406
|
+
self.open_element("Lines", indent_lvl=3)
|
|
1407
|
+
if self.lines is not None:
|
|
1408
|
+
# add data for Lines
|
|
1409
|
+
self._add_dataarray(self.lines[0], "connectivity", data_type='polydata')
|
|
1410
|
+
self._add_dataarray(self.lines[1], "offsets", data_type='polydata')
|
|
1411
|
+
self.close_element('Lines', 3)
|
|
1412
|
+
|
|
1413
|
+
def add_verts(self):
|
|
1414
|
+
"""
|
|
1415
|
+
Add vertex data for a polydata element in a VTK file structure.
|
|
1416
|
+
|
|
1417
|
+
The method
|
|
1418
|
+
opens a `Verts` element, incorporates the provided vertex connectivity
|
|
1419
|
+
and offsets data, and subsequently closes the element after processing.
|
|
1420
|
+
|
|
1421
|
+
"""
|
|
1422
|
+
self.open_element("Verts", indent_lvl=3)
|
|
1423
|
+
if self.verts is not None:
|
|
1424
|
+
# add data for Verts
|
|
1425
|
+
self._add_dataarray(self.verts[0], "connectivity", data_type='polydata')
|
|
1426
|
+
self._add_dataarray(self.verts[1], "offsets", data_type='polydata')
|
|
1427
|
+
self.close_element('Verts', 3)
|
|
1428
|
+
|
|
1429
|
+
def add_strips(self):
|
|
1430
|
+
"""
|
|
1431
|
+
Add polygonal data in the form of strips.
|
|
1432
|
+
|
|
1433
|
+
This method allows adding connectivity and offsets data for polygonal strips
|
|
1434
|
+
to the current instance. The input strips must contain two arrays: one for
|
|
1435
|
+
connectivity and another for offsets. These arrays are handled and added
|
|
1436
|
+
to the instance in a structured manner with specific data types.
|
|
1437
|
+
|
|
1438
|
+
"""
|
|
1439
|
+
self.open_element("Strips", indent_lvl=3)
|
|
1440
|
+
if self.strips is not None:
|
|
1441
|
+
# add data for Strips
|
|
1442
|
+
self._add_dataarray(self.strips[0], "connectivity", data_type='polydata')
|
|
1443
|
+
self._add_dataarray(self.strips[1], "offsets", data_type='polydata')
|
|
1444
|
+
self.close_element('Strips', 3)
|
|
1445
|
+
|
|
1446
|
+
def add_polys(self):
|
|
1447
|
+
"""
|
|
1448
|
+
Add polygonal data to the output.
|
|
1449
|
+
|
|
1450
|
+
This method is responsible for adding polygonal element data, such as
|
|
1451
|
+
connectivity and offsets, to the data structure. The "Polys" element
|
|
1452
|
+
is opened, and relevant data arrays are added if provided. The element
|
|
1453
|
+
is closed after processing.
|
|
1454
|
+
|
|
1455
|
+
"""
|
|
1456
|
+
self.open_element("Polys", indent_lvl=3)
|
|
1457
|
+
if self.polys is not None:
|
|
1458
|
+
# add data for Polys
|
|
1459
|
+
self._add_dataarray(self.polys[0], "connectivity", data_type='polydata')
|
|
1460
|
+
self._add_dataarray(self.polys[1], "offsets", data_type='polydata')
|
|
1461
|
+
self.close_element('Polys', 3)
|
|
1462
|
+
|
|
1463
|
+
|
|
1464
|
+
class XML_MultiBlockWriter(XMLWriterBase):
|
|
1465
|
+
"""
|
|
1466
|
+
Represents an XML MultiBlock Writer for VTK (Visualization Toolkit) files.
|
|
1467
|
+
|
|
1468
|
+
This class facilitates writing structured grid-like hierarchical datasets in XML
|
|
1469
|
+
format using Visual Toolkit-compliant structures.
|
|
1470
|
+
This class is built on top of the `XMLWriterBase` and provides additional functionalities
|
|
1471
|
+
specific to multi-block structures.
|
|
1472
|
+
|
|
1473
|
+
It is designed to work with multiple file types such as ImageData, PolyData,
|
|
1474
|
+
RectilinearGrid, among others. This class is useful for generating complex
|
|
1475
|
+
visualization data files programmatically.
|
|
1476
|
+
|
|
1477
|
+
Attributes
|
|
1478
|
+
----------
|
|
1479
|
+
filetype : str
|
|
1480
|
+
Type of the file being written (e.g., 'ImageData', 'RectilinearGrid', 'StructuredGrid').
|
|
1481
|
+
encoding : str
|
|
1482
|
+
Character set used to encode the XML file.
|
|
1483
|
+
declaration : bool
|
|
1484
|
+
Indicates if an XML declaration is included at the beginning of the file.
|
|
1485
|
+
whole_extents : tuple of float
|
|
1486
|
+
Coordinates defining the global extents of the dataset in space.
|
|
1487
|
+
origin : tuple of float
|
|
1488
|
+
Origin of the structured grid or image data defined within the file.
|
|
1489
|
+
grid_spacing : tuple of float
|
|
1490
|
+
Spacing between grid points for structured data.
|
|
1491
|
+
direction : tuple of float
|
|
1492
|
+
Direction cosine matrix to describe the orientation of the grid in space.
|
|
1493
|
+
_text_encoding : str
|
|
1494
|
+
Encoding format used for writing text elements to the file.
|
|
1495
|
+
_byteorder : str
|
|
1496
|
+
Byte ordering format of the data, either 'LittleEndian' or 'BigEndian'.
|
|
1497
|
+
file : object
|
|
1498
|
+
File handle to which the XML content is being written.
|
|
1499
|
+
"""
|
|
1500
|
+
|
|
1501
|
+
def __init__(self, filepath, filetype, encoding='ascii', declaration=True):
|
|
1502
|
+
super().__init__(filepath, filetype, encoding, declaration)
|
|
1503
|
+
|
|
1504
|
+
# open file and set header
|
|
1505
|
+
self.open_file()
|
|
1506
|
+
self.add_filetype()
|
|
1507
|
+
|
|
1508
|
+
|
|
1509
|
+
# Keep only MultiBlock-specific methods:
|
|
1510
|
+
def open_element(self, element="Block", index=0, name=None, file=None, indent_lvl=2, self_closing=False):
|
|
1511
|
+
"""Specialized for multi-block structure."""
|
|
1512
|
+
attrs = []
|
|
1513
|
+
attrs.append(f'index="{index}"')
|
|
1514
|
+
if name is not None:
|
|
1515
|
+
attrs.append(f'name="{name}"')
|
|
1516
|
+
if file is not None:
|
|
1517
|
+
attrs.append(f'file="{file}"')
|
|
1518
|
+
|
|
1519
|
+
self.open_element_base(element, attrs, indent_lvl, self_closing)
|
|
1520
|
+
|
|
1521
|
+
def close_element(self, tag_name, indent_lvl=0):
|
|
1522
|
+
"""
|
|
1523
|
+
Add element closing tag to file.
|
|
1524
|
+
|
|
1525
|
+
Returns
|
|
1526
|
+
-------
|
|
1527
|
+
None
|
|
1528
|
+
|
|
1529
|
+
"""
|
|
1530
|
+
closing_tag = ' ' * indent_lvl + f'</{tag_name}>\n'
|
|
1531
|
+
self.file.write(closing_tag.encode(self._text_encoding))
|
|
1532
|
+
|
|
1533
|
+
# Block-specific Methods
|
|
1534
|
+
def add_filetype(self):
|
|
1535
|
+
"""
|
|
1536
|
+
Add XML root node and file type node.
|
|
1537
|
+
|
|
1538
|
+
"""
|
|
1539
|
+
# add vtk root node
|
|
1540
|
+
vtk_filestr = (f'<VTKFile type="{self.filetype}" version="1.0" '
|
|
1541
|
+
f'byte_order="{self._byteorder}" header_type="{self.header_type}">')
|
|
1542
|
+
self.file.write((vtk_filestr + "\n").encode(self._text_encoding))
|
|
1543
|
+
|
|
1544
|
+
# write file type node
|
|
1545
|
+
file_type_str = f" <{self.filetype}" + ">\n"
|
|
1546
|
+
|
|
1547
|
+
self.file.write(file_type_str.encode(self._text_encoding))
|
|
1548
|
+
|
|
1549
|
+
|
|
1550
|
+
def vtk_multiblock_writer(filepath, block_data, add_declaration=True):
|
|
1551
|
+
"""
|
|
1552
|
+
Write VTK `MultiBlockData` to VTK files.
|
|
1553
|
+
|
|
1554
|
+
A multi-block dataset allows several datasets to be combined as a single dataset. In VTK XML format this means
|
|
1555
|
+
each dataset will have an independent xml file and a `.vtm` files that is a catalogue of the respective xml files.
|
|
1556
|
+
|
|
1557
|
+
Parameters
|
|
1558
|
+
----------
|
|
1559
|
+
filepath : str
|
|
1560
|
+
The filepath of the VTK vtm file. This can be a local file name or a complete filename and file path.
|
|
1561
|
+
block_data: dictionary
|
|
1562
|
+
A `dictionary` containing the block name to be used as a key and the
|
|
1563
|
+
add_declaration: bool, default True
|
|
1564
|
+
Add declaration to file for valid XML file.
|
|
1565
|
+
|
|
1566
|
+
|
|
1567
|
+
"""
|
|
1568
|
+
writer = XML_MultiBlockWriter(filepath, 'vtkMultiBlockDataSet', declaration=add_declaration)
|
|
1569
|
+
|
|
1570
|
+
# add blocks
|
|
1571
|
+
for indx, block in enumerate(block_data):
|
|
1572
|
+
writer.open_element(index=indx, name=block)
|
|
1573
|
+
|
|
1574
|
+
# add piece
|
|
1575
|
+
for piece_indx, piece_file in enumerate(block_data[block]['files']):
|
|
1576
|
+
|
|
1577
|
+
if 'names' in block_data[block]:
|
|
1578
|
+
piece_name = block_data[block]['names'][piece_indx]
|
|
1579
|
+
else:
|
|
1580
|
+
piece_name = f'block_{indx}_{piece_indx}'
|
|
1581
|
+
|
|
1582
|
+
writer.open_element(element="Piece", index=piece_indx, name=piece_name, indent_lvl=3)
|
|
1583
|
+
# add Dataset
|
|
1584
|
+
dataset_indx = 0
|
|
1585
|
+
writer.open_element(element="DataSet", index=dataset_indx, name=None, file=piece_file, indent_lvl=4,
|
|
1586
|
+
self_closing=True)
|
|
1587
|
+
|
|
1588
|
+
writer.close_element("Piece", indent_lvl=3)
|
|
1589
|
+
|
|
1590
|
+
# close block
|
|
1591
|
+
writer.close_element("Block", indent_lvl=2)
|
|
1592
|
+
|
|
1593
|
+
# close Multiblock filetype
|
|
1594
|
+
writer.close_filetype()
|
|
1595
|
+
|
|
1596
|
+
# close file
|
|
1597
|
+
writer.close_file()
|