fiqus 2026.1.0__py3-none-any.whl → 2026.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. fiqus/MainFiQuS.py +1 -8
  2. fiqus/data/DataConductor.py +4 -8
  3. fiqus/data/DataFiQuSMultipole.py +358 -167
  4. fiqus/data/DataModelCommon.py +30 -15
  5. fiqus/data/DataMultipole.py +33 -10
  6. fiqus/data/DataWindingsCCT.py +37 -37
  7. fiqus/data/RegionsModelFiQuS.py +1 -1
  8. fiqus/geom_generators/GeometryMultipole.py +751 -54
  9. fiqus/getdp_runners/RunGetdpMultipole.py +181 -31
  10. fiqus/mains/MainMultipole.py +109 -17
  11. fiqus/mesh_generators/MeshCCT.py +209 -209
  12. fiqus/mesh_generators/MeshMultipole.py +938 -263
  13. fiqus/parsers/ParserCOND.py +2 -1
  14. fiqus/parsers/ParserDAT.py +16 -16
  15. fiqus/parsers/ParserGetDPOnSection.py +212 -212
  16. fiqus/parsers/ParserGetDPTimeTable.py +134 -134
  17. fiqus/parsers/ParserMSH.py +53 -53
  18. fiqus/parsers/ParserRES.py +142 -142
  19. fiqus/plotters/PlotPythonCCT.py +133 -133
  20. fiqus/plotters/PlotPythonMultipole.py +18 -18
  21. fiqus/post_processors/PostProcessMultipole.py +16 -6
  22. fiqus/pre_processors/PreProcessCCT.py +175 -175
  23. fiqus/pro_assemblers/ProAssembler.py +3 -3
  24. fiqus/pro_material_functions/ironBHcurves.pro +246 -246
  25. fiqus/pro_templates/combined/CC_Module.pro +1213 -0
  26. fiqus/pro_templates/combined/ConductorAC_template.pro +1025 -0
  27. fiqus/pro_templates/combined/Multipole_template.pro +2738 -1338
  28. fiqus/pro_templates/combined/TSA_materials.pro +102 -2
  29. fiqus/pro_templates/combined/materials.pro +54 -3
  30. fiqus/utils/Utils.py +18 -25
  31. fiqus/utils/update_data_settings.py +1 -1
  32. {fiqus-2026.1.0.dist-info → fiqus-2026.1.2.dist-info}/METADATA +64 -68
  33. {fiqus-2026.1.0.dist-info → fiqus-2026.1.2.dist-info}/RECORD +42 -40
  34. {fiqus-2026.1.0.dist-info → fiqus-2026.1.2.dist-info}/WHEEL +1 -1
  35. tests/test_geometry_generators.py +29 -32
  36. tests/test_mesh_generators.py +35 -34
  37. tests/test_solvers.py +32 -31
  38. tests/utils/fiqus_test_classes.py +396 -147
  39. tests/utils/generate_reference_files_ConductorAC.py +57 -57
  40. tests/utils/helpers.py +76 -1
  41. {fiqus-2026.1.0.dist-info → fiqus-2026.1.2.dist-info}/LICENSE.txt +0 -0
  42. {fiqus-2026.1.0.dist-info → fiqus-2026.1.2.dist-info}/top_level.txt +0 -0
@@ -1,134 +1,134 @@
1
- import re
2
- import math
3
-
4
-
5
- class ParserGetDPTimeTable:
6
- """
7
- This class parses GetDP's TimeTable format output files.
8
- """
9
-
10
- def __init__(self, filePath):
11
- self.time_values = []
12
- self.values = []
13
- # Parse data:
14
- with open(filePath) as file:
15
- # If the first line starts with #, we skip it.
16
- first_line = file.readline()
17
- if not first_line.startswith("#"):
18
- number_of_entries = len(
19
- re.findall(r"(-?\d+\.?\d*e?[-+]*\d*)", first_line)
20
- )
21
- # readline() moves the cursor to the next line, so we need to go back to
22
- # the beginning of the file.
23
- file.seek(0)
24
- else:
25
- second_line = file.readline()
26
- number_of_entries = len(
27
- re.findall(r"(-?\d+\.?\d*e?[-+]*\d*)", second_line)
28
- )
29
- # Seek to the second line
30
- file.seek(len(first_line) + 1)
31
-
32
- data = file.read()
33
-
34
- entries = re.findall(r"(-?\d+\.?\d*e?[-+]*\d*)", data)
35
- if number_of_entries == 2:
36
- # Global scalar value:
37
- time_index = 0
38
- value_index = 1
39
- self.data_type = "scalar"
40
- elif number_of_entries == 6:
41
- # Local scalar value probed at a point:
42
- time_index = 1
43
- value_index = 5
44
- self.data_type = "scalar"
45
- elif number_of_entries == 8:
46
- # Local vector value probed at a point:
47
- time_index = 1
48
- value_index = [5, 6, 7]
49
- self.data_type = "vector"
50
- elif number_of_entries == 14:
51
- # Local tensor value probed at a point:
52
- time_index = 1
53
- value_index = [[5, 6, 7], [8, 9, 10], [11, 12, 13]]
54
- self.data_type = "tensor"
55
- else:
56
- raise ValueError(f"{filePath} contains an unexpected type of data.")
57
-
58
- # Pack entries for each line:
59
- entries = [
60
- entries[i : i + number_of_entries]
61
- for i in range(0, len(entries), number_of_entries)
62
- ]
63
-
64
- for entry in entries:
65
- if self.data_type == "scalar":
66
- self.time_values.append(float(entry[time_index]))
67
- self.values.append(float(entry[value_index]))
68
- elif self.data_type == "vector":
69
- self.time_values.append(float(entry[time_index]))
70
- self.values.append(
71
- (
72
- float(entry[value_index[0]]),
73
- float(entry[value_index[1]]),
74
- float(entry[value_index[2]]),
75
- )
76
- )
77
- elif self.data_type == "tensor":
78
- self.time_values.append(float(entry[time_index]))
79
- self.values.append(
80
- [
81
- [
82
- float(entry[value_index[0][0]]),
83
- float(entry[value_index[0][1]]),
84
- float(entry[value_index[0][2]]),
85
- ],
86
- [
87
- float(entry[value_index[1][0]]),
88
- float(entry[value_index[1][1]]),
89
- float(entry[value_index[1][2]]),
90
- ],
91
- [
92
- float(entry[value_index[2][0]]),
93
- float(entry[value_index[2][1]]),
94
- float(entry[value_index[2][2]]),
95
- ],
96
- ]
97
- )
98
-
99
- def get_equivalent_scalar_values(self):
100
- """
101
- Returns the same scalar if self.data_type is scalar.
102
- Returns the magnitude of the vectors if self.data_type is vector.
103
- Returns the von misses equivalents of the tensors if self.data_type is tensor.
104
- """
105
-
106
- if self.data_type == "scalar":
107
- return self.values
108
- elif self.data_type == "vector":
109
- magnitudes = [
110
- math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)
111
- for v in self.values
112
- ]
113
- return magnitudes
114
- elif self.data_type == "tensor":
115
- von_misses_equivalents = [
116
- math.sqrt(
117
- 0.5
118
- * (
119
- (v[0][0] - v[1][1]) ** 2
120
- + (v[1][1] - v[2][2]) ** 2
121
- + (v[2][2] - v[0][0]) ** 2
122
- + 6
123
- * (
124
- ((v[0][1] + v[1][0]) / 2) ** 2
125
- + ((v[1][2] + v[2][1]) / 2) ** 2
126
- + ((v[0][2] + v[2][0]) / 2) ** 2
127
- )
128
- )
129
- )
130
- for v in self.values
131
- ]
132
- return von_misses_equivalents
133
- else:
134
- raise RuntimeError("Data type not recognized.")
1
+ import re
2
+ import math
3
+
4
+
5
+ class ParserGetDPTimeTable:
6
+ """
7
+ This class parses GetDP's TimeTable format output files.
8
+ """
9
+
10
+ def __init__(self, filePath):
11
+ self.time_values = []
12
+ self.values = []
13
+ # Parse data:
14
+ with open(filePath) as file:
15
+ # If the first line starts with #, we skip it.
16
+ first_line = file.readline()
17
+ if not first_line.startswith("#"):
18
+ number_of_entries = len(
19
+ re.findall(r"(-?\d+\.?\d*e?[-+]*\d*)", first_line)
20
+ )
21
+ # readline() moves the cursor to the next line, so we need to go back to
22
+ # the beginning of the file.
23
+ file.seek(0)
24
+ else:
25
+ second_line = file.readline()
26
+ number_of_entries = len(
27
+ re.findall(r"(-?\d+\.?\d*e?[-+]*\d*)", second_line)
28
+ )
29
+ # Seek to the second line
30
+ file.seek(len(first_line) + 1)
31
+
32
+ data = file.read()
33
+
34
+ entries = re.findall(r"(-?\d+\.?\d*e?[-+]*\d*)", data)
35
+ if number_of_entries == 2:
36
+ # Global scalar value:
37
+ time_index = 0
38
+ value_index = 1
39
+ self.data_type = "scalar"
40
+ elif number_of_entries == 6:
41
+ # Local scalar value probed at a point:
42
+ time_index = 1
43
+ value_index = 5
44
+ self.data_type = "scalar"
45
+ elif number_of_entries == 8:
46
+ # Local vector value probed at a point:
47
+ time_index = 1
48
+ value_index = [5, 6, 7]
49
+ self.data_type = "vector"
50
+ elif number_of_entries == 14:
51
+ # Local tensor value probed at a point:
52
+ time_index = 1
53
+ value_index = [[5, 6, 7], [8, 9, 10], [11, 12, 13]]
54
+ self.data_type = "tensor"
55
+ else:
56
+ raise ValueError(f"{filePath} contains an unexpected type of data.")
57
+
58
+ # Pack entries for each line:
59
+ entries = [
60
+ entries[i : i + number_of_entries]
61
+ for i in range(0, len(entries), number_of_entries)
62
+ ]
63
+
64
+ for entry in entries:
65
+ if self.data_type == "scalar":
66
+ self.time_values.append(float(entry[time_index]))
67
+ self.values.append(float(entry[value_index]))
68
+ elif self.data_type == "vector":
69
+ self.time_values.append(float(entry[time_index]))
70
+ self.values.append(
71
+ (
72
+ float(entry[value_index[0]]),
73
+ float(entry[value_index[1]]),
74
+ float(entry[value_index[2]]),
75
+ )
76
+ )
77
+ elif self.data_type == "tensor":
78
+ self.time_values.append(float(entry[time_index]))
79
+ self.values.append(
80
+ [
81
+ [
82
+ float(entry[value_index[0][0]]),
83
+ float(entry[value_index[0][1]]),
84
+ float(entry[value_index[0][2]]),
85
+ ],
86
+ [
87
+ float(entry[value_index[1][0]]),
88
+ float(entry[value_index[1][1]]),
89
+ float(entry[value_index[1][2]]),
90
+ ],
91
+ [
92
+ float(entry[value_index[2][0]]),
93
+ float(entry[value_index[2][1]]),
94
+ float(entry[value_index[2][2]]),
95
+ ],
96
+ ]
97
+ )
98
+
99
+ def get_equivalent_scalar_values(self):
100
+ """
101
+ Returns the same scalar if self.data_type is scalar.
102
+ Returns the magnitude of the vectors if self.data_type is vector.
103
+ Returns the von misses equivalents of the tensors if self.data_type is tensor.
104
+ """
105
+
106
+ if self.data_type == "scalar":
107
+ return self.values
108
+ elif self.data_type == "vector":
109
+ magnitudes = [
110
+ math.sqrt(v[0] ** 2 + v[1] ** 2 + v[2] ** 2)
111
+ for v in self.values
112
+ ]
113
+ return magnitudes
114
+ elif self.data_type == "tensor":
115
+ von_misses_equivalents = [
116
+ math.sqrt(
117
+ 0.5
118
+ * (
119
+ (v[0][0] - v[1][1]) ** 2
120
+ + (v[1][1] - v[2][2]) ** 2
121
+ + (v[2][2] - v[0][0]) ** 2
122
+ + 6
123
+ * (
124
+ ((v[0][1] + v[1][0]) / 2) ** 2
125
+ + ((v[1][2] + v[2][1]) / 2) ** 2
126
+ + ((v[0][2] + v[2][0]) / 2) ** 2
127
+ )
128
+ )
129
+ )
130
+ for v in self.values
131
+ ]
132
+ return von_misses_equivalents
133
+ else:
134
+ raise RuntimeError("Data type not recognized.")
@@ -1,53 +1,53 @@
1
- import gmsh
2
- import statistics
3
-
4
- class ParserMSH:
5
- def __init__(self, mesh_file_path):
6
- """
7
- Read msh file and returns mesh format and physical names as class attributes.
8
- :param mesh_file_path: Full path to .msh file, including file name and extension.
9
- """
10
- self.mesh_file_path = mesh_file_path
11
-
12
- self._mesh_format_markers = {'s': '$MeshFormat', 'e': '$EndMeshFormat'}
13
- self._physical_name_markers = {'s': 'PhysicalNames', 'e': '$EndPhysicalNames'}
14
-
15
- with open(mesh_file_path) as f:
16
- self._contents = f.read()
17
-
18
- def __get_content(self, markers_dict):
19
- """
20
- Gets text string between two markers specified in markers_dict
21
- """
22
- return self._contents[self._contents.find(markers_dict['s']) + len(markers_dict['s']):self._contents.find(markers_dict['e'])]
23
-
24
- def get_average_mesh_quality(self):
25
- """
26
- Gets the lowest mesh quality from the mesh file
27
- """
28
- gmsh.initialize()
29
- gmsh.open(self.mesh_file_path)
30
-
31
- # SICN not implemented in 1D!
32
- allElementsDim2 = gmsh.model.mesh.getElements(dim=2)[1]
33
- allElementsDim3 = gmsh.model.mesh.getElements(dim=3)[1]
34
- allElements = list(allElementsDim2[0]) + (list(allElementsDim3[0]) if allElementsDim3 else [])
35
- lowestQuality = statistics.fmean(gmsh.model.mesh.getElementQualities(allElements))
36
-
37
- gmsh.finalize()
38
-
39
- return lowestQuality
40
-
41
- @property
42
- def mesh_format(self):
43
- """
44
- Parse mesh_generators field and assign it to the class attribute
45
- """
46
- return self.__get_content(self._mesh_format_markers)
47
-
48
- @property
49
- def physical_names(self):
50
- """
51
- Parse physical_names field and assign it to the class attribute
52
- """
53
- return self.__get_content(self._physical_name_markers)
1
+ import gmsh
2
+ import statistics
3
+
4
+ class ParserMSH:
5
+ def __init__(self, mesh_file_path):
6
+ """
7
+ Read msh file and returns mesh format and physical names as class attributes.
8
+ :param mesh_file_path: Full path to .msh file, including file name and extension.
9
+ """
10
+ self.mesh_file_path = mesh_file_path
11
+
12
+ self._mesh_format_markers = {'s': '$MeshFormat', 'e': '$EndMeshFormat'}
13
+ self._physical_name_markers = {'s': 'PhysicalNames', 'e': '$EndPhysicalNames'}
14
+
15
+ with open(mesh_file_path) as f:
16
+ self._contents = f.read()
17
+
18
+ def __get_content(self, markers_dict):
19
+ """
20
+ Gets text string between two markers specified in markers_dict
21
+ """
22
+ return self._contents[self._contents.find(markers_dict['s']) + len(markers_dict['s']):self._contents.find(markers_dict['e'])]
23
+
24
+ def get_average_mesh_quality(self):
25
+ """
26
+ Gets the lowest mesh quality from the mesh file
27
+ """
28
+ gmsh.initialize()
29
+ gmsh.open(self.mesh_file_path)
30
+
31
+ # SICN not implemented in 1D!
32
+ allElementsDim2 = gmsh.model.mesh.getElements(dim=2)[1]
33
+ allElementsDim3 = gmsh.model.mesh.getElements(dim=3)[1]
34
+ allElements = list(allElementsDim2[0]) + (list(allElementsDim3[0]) if allElementsDim3 else [])
35
+ lowestQuality = statistics.fmean(gmsh.model.mesh.getElementQualities(allElements))
36
+
37
+ gmsh.finalize()
38
+
39
+ return lowestQuality
40
+
41
+ @property
42
+ def mesh_format(self):
43
+ """
44
+ Parse mesh_generators field and assign it to the class attribute
45
+ """
46
+ return self.__get_content(self._mesh_format_markers)
47
+
48
+ @property
49
+ def physical_names(self):
50
+ """
51
+ Parse physical_names field and assign it to the class attribute
52
+ """
53
+ return self.__get_content(self._physical_name_markers)
@@ -1,143 +1,143 @@
1
- import pandas as pd
2
- import re
3
- from collections import defaultdict
4
-
5
- class ParserRES:
6
-
7
- def __init__(self, res_file_path, write_data=None):
8
- """
9
- TO BE DONE!!
10
- Read res file and returns its content as object attribute .pqv (postprocessed quantity value) that is a float
11
- :param dat_file_path: Full path to .pos file, including file name and extension.
12
- :return: nothing, keeps attribute pqv (postprocessed quantity value)
13
- """
14
- self._res_format_markers = {'s': '$ResFormat', 'e': '$EndResFormat'}
15
- self._getdp_version_markers = {'s': '/* ', 'e': ','}
16
- self._encoding_markers = {'s': ', ', 'e': ' */'}
17
- # the 1.1 is hard-coded according to the GetDP documentation,
18
- # see https://getdp.info/doc/texinfo/getdp.html#File-formats
19
- self._res_file_format = {'s': '1.1 ', 'e': '\n$EndResFormat'}
20
-
21
- self.solution = defaultdict(dict)
22
-
23
- if write_data:
24
- self._res_file_path = res_file_path
25
- self._write_data = write_data
26
- self._write_res_file()
27
- else:
28
- # read contents of .res file
29
- with open(res_file_path) as f:
30
- self._contents = f.read()
31
- self._parse_res_file()
32
-
33
-
34
- def __get_content_between_markers(self, markers_dict):
35
- """
36
- Gets text string between two markers specified in markers_dict
37
- """
38
- return self._contents[self._contents.find(markers_dict['s']) + len(markers_dict['s']):self._contents.find(markers_dict['e'])]
39
-
40
- def _res_header(self):
41
- """
42
- Parse the header of the .res file.
43
- Add the attributes:
44
- - getdp_version: GetDP version that created the .res file
45
- - encoding: encoding of the .res file
46
- - res_file_format: format of the .res file
47
- """
48
- self.getdp_version = self.__get_content_between_markers(self._getdp_version_markers)
49
- self.encoding = self.__get_content_between_markers(self._encoding_markers)
50
- self.res_file_format = self.__get_content_between_markers(self._res_file_format)
51
-
52
- def _get_all_solution_blocks(self):
53
- """
54
- Add all unparsed solution blocks to the attribute _solution_blocks
55
- using regular expressions. It is a list of lists which each sub-list
56
- containing exactly one solution block.
57
- """
58
- solution_string = self._contents[self._contents.find('$Solution'):]
59
- self._solution_blocks = re.findall(r'\$Solution.*?\$EndSolution', solution_string, re.DOTALL)
60
-
61
- def _parse_res_file_single_solution_block(self, solution_block_split_by_line):
62
-
63
- # the first line is ignored
64
- header = solution_block_split_by_line[1]
65
- header_split = header.split()
66
- dof_data = int(header_split[0])
67
- time_real = float(header_split[1])
68
- time_imag = float(header_split[2])
69
- time_step = int(header_split[3])
70
- solution = [float(entry) for entry in solution_block_split_by_line[2:-1]]
71
-
72
- if "time_real" not in self.solution:
73
- self.solution['time_real'] = [time_real]
74
- else:
75
- self.solution['time_real'].append(time_real)
76
-
77
- if "time_imag" not in self.solution:
78
- self.solution['time_imag'] = [time_imag]
79
- else:
80
- self.solution['time_imag'].append(time_imag)
81
-
82
- if "time_step" not in self.solution:
83
- self.solution['time_step'] = [time_step]
84
- else:
85
- self.solution['time_step'].append(time_step)
86
-
87
- if "dof_data" not in self.solution:
88
- self.solution['dof_data'] = [dof_data]
89
- else:
90
- self.solution['dof_data'].append(dof_data)
91
-
92
- if "solution" not in self.solution:
93
- self.solution['solution'] = [solution]
94
- else:
95
- self.solution['solution'].append(solution)
96
-
97
- @staticmethod
98
- def __get_lines(data_str):
99
- """
100
- Converts text string into a list of lines
101
- """
102
- data_str = re.sub('\n', "'", data_str)
103
- data_str = re.sub('"', '', data_str)
104
- str_list = re.split("'", data_str)
105
- return str_list
106
-
107
- def _parse_res_file_solution_blocks(self):
108
- """
109
-
110
- """
111
- for solution_block in self._solution_blocks:
112
- # split by line
113
- solution_block_split_by_line = self.__get_lines(solution_block)
114
- self._parse_res_file_single_solution_block(solution_block_split_by_line)
115
-
116
- def _parse_res_file(self):
117
- self._res_header()
118
- self._get_all_solution_blocks()
119
- self._parse_res_file_solution_blocks()
120
-
121
- def _write_res_file(self):
122
- with open(self._res_file_path, "w") as f:
123
- # write header
124
- f.write(f"$ResFormat /* {self._write_data.getdp_version}, {self._write_data.encoding} */\n")
125
- # write res file format
126
- f.write(f"1.1 {self._write_data.res_file_format}\n")
127
- f.write(f"$EndResFormat\n")
128
-
129
- self._write_solution_block(f)
130
-
131
- def _write_solution_block(self, f):
132
- for time_real, time_imag, time_step, dof_data, solution in zip(self._write_data.solution['time_real'], self._write_data.solution['time_imag'], self._write_data.solution['time_step'], self._write_data.solution['dof_data'], self._write_data.solution['solution']):
133
-
134
- f.write(f"$Solution /* DofData #{dof_data} */\n")
135
- f.write(f"{dof_data} {time_real:.16g} {time_imag:.16g} {time_step}\n")
136
- f.write('\n'.join('{0:.16g}'.format(sol_entry) for sol_entry in solution))
137
- f.write(f"\n$EndSolution\n")
138
-
139
- # ==============================================================================
140
- #parsedRes = ParserRES('test.res')
141
- #ParserRES('test_written.res', write_data=parsedRes)
142
- #import filecmp
1
+ import pandas as pd
2
+ import re
3
+ from collections import defaultdict
4
+
5
+ class ParserRES:
6
+
7
+ def __init__(self, res_file_path, write_data=None):
8
+ """
9
+ TO BE DONE!!
10
+ Read res file and returns its content as object attribute .pqv (postprocessed quantity value) that is a float
11
+ :param dat_file_path: Full path to .pos file, including file name and extension.
12
+ :return: nothing, keeps attribute pqv (postprocessed quantity value)
13
+ """
14
+ self._res_format_markers = {'s': '$ResFormat', 'e': '$EndResFormat'}
15
+ self._getdp_version_markers = {'s': '/* ', 'e': ','}
16
+ self._encoding_markers = {'s': ', ', 'e': ' */'}
17
+ # the 1.1 is hard-coded according to the GetDP documentation,
18
+ # see https://getdp.info/doc/texinfo/getdp.html#File-formats
19
+ self._res_file_format = {'s': '1.1 ', 'e': '\n$EndResFormat'}
20
+
21
+ self.solution = defaultdict(dict)
22
+
23
+ if write_data:
24
+ self._res_file_path = res_file_path
25
+ self._write_data = write_data
26
+ self._write_res_file()
27
+ else:
28
+ # read contents of .res file
29
+ with open(res_file_path) as f:
30
+ self._contents = f.read()
31
+ self._parse_res_file()
32
+
33
+
34
+ def __get_content_between_markers(self, markers_dict):
35
+ """
36
+ Gets text string between two markers specified in markers_dict
37
+ """
38
+ return self._contents[self._contents.find(markers_dict['s']) + len(markers_dict['s']):self._contents.find(markers_dict['e'])]
39
+
40
+ def _res_header(self):
41
+ """
42
+ Parse the header of the .res file.
43
+ Add the attributes:
44
+ - getdp_version: GetDP version that created the .res file
45
+ - encoding: encoding of the .res file
46
+ - res_file_format: format of the .res file
47
+ """
48
+ self.getdp_version = self.__get_content_between_markers(self._getdp_version_markers)
49
+ self.encoding = self.__get_content_between_markers(self._encoding_markers)
50
+ self.res_file_format = self.__get_content_between_markers(self._res_file_format)
51
+
52
+ def _get_all_solution_blocks(self):
53
+ """
54
+ Add all unparsed solution blocks to the attribute _solution_blocks
55
+ using regular expressions. It is a list of lists which each sub-list
56
+ containing exactly one solution block.
57
+ """
58
+ solution_string = self._contents[self._contents.find('$Solution'):]
59
+ self._solution_blocks = re.findall(r'\$Solution.*?\$EndSolution', solution_string, re.DOTALL)
60
+
61
+ def _parse_res_file_single_solution_block(self, solution_block_split_by_line):
62
+
63
+ # the first line is ignored
64
+ header = solution_block_split_by_line[1]
65
+ header_split = header.split()
66
+ dof_data = int(header_split[0])
67
+ time_real = float(header_split[1])
68
+ time_imag = float(header_split[2])
69
+ time_step = int(header_split[3])
70
+ solution = [float(entry) for entry in solution_block_split_by_line[2:-1]]
71
+
72
+ if "time_real" not in self.solution:
73
+ self.solution['time_real'] = [time_real]
74
+ else:
75
+ self.solution['time_real'].append(time_real)
76
+
77
+ if "time_imag" not in self.solution:
78
+ self.solution['time_imag'] = [time_imag]
79
+ else:
80
+ self.solution['time_imag'].append(time_imag)
81
+
82
+ if "time_step" not in self.solution:
83
+ self.solution['time_step'] = [time_step]
84
+ else:
85
+ self.solution['time_step'].append(time_step)
86
+
87
+ if "dof_data" not in self.solution:
88
+ self.solution['dof_data'] = [dof_data]
89
+ else:
90
+ self.solution['dof_data'].append(dof_data)
91
+
92
+ if "solution" not in self.solution:
93
+ self.solution['solution'] = [solution]
94
+ else:
95
+ self.solution['solution'].append(solution)
96
+
97
+ @staticmethod
98
+ def __get_lines(data_str):
99
+ """
100
+ Converts text string into a list of lines
101
+ """
102
+ data_str = re.sub('\n', "'", data_str)
103
+ data_str = re.sub('"', '', data_str)
104
+ str_list = re.split("'", data_str)
105
+ return str_list
106
+
107
+ def _parse_res_file_solution_blocks(self):
108
+ """
109
+
110
+ """
111
+ for solution_block in self._solution_blocks:
112
+ # split by line
113
+ solution_block_split_by_line = self.__get_lines(solution_block)
114
+ self._parse_res_file_single_solution_block(solution_block_split_by_line)
115
+
116
+ def _parse_res_file(self):
117
+ self._res_header()
118
+ self._get_all_solution_blocks()
119
+ self._parse_res_file_solution_blocks()
120
+
121
+ def _write_res_file(self):
122
+ with open(self._res_file_path, "w") as f:
123
+ # write header
124
+ f.write(f"$ResFormat /* {self._write_data.getdp_version}, {self._write_data.encoding} */\n")
125
+ # write res file format
126
+ f.write(f"1.1 {self._write_data.res_file_format}\n")
127
+ f.write(f"$EndResFormat\n")
128
+
129
+ self._write_solution_block(f)
130
+
131
+ def _write_solution_block(self, f):
132
+ for time_real, time_imag, time_step, dof_data, solution in zip(self._write_data.solution['time_real'], self._write_data.solution['time_imag'], self._write_data.solution['time_step'], self._write_data.solution['dof_data'], self._write_data.solution['solution']):
133
+
134
+ f.write(f"$Solution /* DofData #{dof_data} */\n")
135
+ f.write(f"{dof_data} {time_real:.16g} {time_imag:.16g} {time_step}\n")
136
+ f.write('\n'.join('{0:.16g}'.format(sol_entry) for sol_entry in solution))
137
+ f.write(f"\n$EndSolution\n")
138
+
139
+ # ==============================================================================
140
+ #parsedRes = ParserRES('test.res')
141
+ #ParserRES('test_written.res', write_data=parsedRes)
142
+ #import filecmp
143
143
  #print(filecmp.cmp('test.res', 'test_written.res'))