fiqus 2024.7.0__py3-none-any.whl → 2024.12.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. fiqus/MainFiQuS.py +290 -134
  2. fiqus/data/DataConductor.py +301 -301
  3. fiqus/data/DataFiQuS.py +128 -84
  4. fiqus/data/DataFiQuSCCT.py +150 -150
  5. fiqus/data/DataFiQuSConductor.py +84 -84
  6. fiqus/data/DataFiQuSConductorAC_Strand.py +565 -565
  7. fiqus/data/DataFiQuSMultipole.py +716 -42
  8. fiqus/data/DataFiQuSPancake3D.py +737 -278
  9. fiqus/data/DataMultipole.py +180 -15
  10. fiqus/data/DataRoxieParser.py +90 -51
  11. fiqus/data/DataSettings.py +121 -0
  12. fiqus/data/DataWindingsCCT.py +37 -37
  13. fiqus/data/RegionsModelFiQuS.py +18 -6
  14. fiqus/geom_generators/GeometryCCT.py +905 -905
  15. fiqus/geom_generators/GeometryConductorAC_Strand.py +1391 -1391
  16. fiqus/geom_generators/GeometryMultipole.py +1827 -227
  17. fiqus/geom_generators/GeometryPancake3D.py +316 -117
  18. fiqus/geom_generators/GeometryPancake3DUtils.py +549 -0
  19. fiqus/getdp_runners/RunGetdpCCT.py +4 -4
  20. fiqus/getdp_runners/RunGetdpConductorAC_Strand.py +201 -201
  21. fiqus/getdp_runners/RunGetdpMultipole.py +115 -42
  22. fiqus/getdp_runners/RunGetdpPancake3D.py +28 -6
  23. fiqus/mains/MainCCT.py +2 -2
  24. fiqus/mains/MainConductorAC_Strand.py +132 -132
  25. fiqus/mains/MainMultipole.py +113 -62
  26. fiqus/mains/MainPancake3D.py +63 -23
  27. fiqus/mesh_generators/MeshCCT.py +209 -209
  28. fiqus/mesh_generators/MeshConductorAC_Strand.py +656 -656
  29. fiqus/mesh_generators/MeshMultipole.py +1243 -181
  30. fiqus/mesh_generators/MeshPancake3D.py +275 -192
  31. fiqus/parsers/ParserCOND.py +825 -0
  32. fiqus/parsers/ParserDAT.py +16 -16
  33. fiqus/parsers/ParserGetDPOnSection.py +212 -212
  34. fiqus/parsers/ParserGetDPTimeTable.py +134 -134
  35. fiqus/parsers/ParserMSH.py +53 -53
  36. fiqus/parsers/ParserPOS.py +214 -214
  37. fiqus/parsers/ParserRES.py +142 -142
  38. fiqus/plotters/PlotPythonCCT.py +133 -133
  39. fiqus/plotters/PlotPythonConductorAC.py +855 -855
  40. fiqus/plotters/PlotPythonMultipole.py +18 -18
  41. fiqus/post_processors/PostProcessCCT.py +440 -440
  42. fiqus/post_processors/PostProcessConductorAC.py +49 -49
  43. fiqus/post_processors/PostProcessMultipole.py +353 -229
  44. fiqus/post_processors/PostProcessPancake3D.py +8 -13
  45. fiqus/pre_processors/PreProcessCCT.py +175 -175
  46. fiqus/pro_assemblers/ProAssembler.py +14 -6
  47. fiqus/pro_material_functions/ironBHcurves.pro +246 -246
  48. fiqus/pro_templates/combined/CCT_template.pro +274 -274
  49. fiqus/pro_templates/combined/ConductorAC_template.pro +1025 -1025
  50. fiqus/pro_templates/combined/Multipole_template.pro +1694 -126
  51. fiqus/pro_templates/combined/Pancake3D_template.pro +2294 -1103
  52. fiqus/pro_templates/combined/TSA_materials.pro +162 -0
  53. fiqus/pro_templates/combined/materials.pro +36 -18
  54. fiqus/utils/Utils.py +508 -110
  55. fiqus/utils/update_data_settings.py +33 -0
  56. fiqus-2024.12.1.dist-info/METADATA +132 -0
  57. fiqus-2024.12.1.dist-info/RECORD +84 -0
  58. {fiqus-2024.7.0.dist-info → fiqus-2024.12.1.dist-info}/WHEEL +1 -1
  59. tests/test_FiQuS.py +1 -1
  60. tests/test_geometry_generators.py +101 -2
  61. tests/test_mesh_generators.py +154 -1
  62. tests/test_solvers.py +115 -21
  63. tests/utils/fiqus_test_classes.py +85 -21
  64. tests/utils/generate_reference_files_ConductorAC.py +57 -57
  65. tests/utils/generate_reference_files_Pancake3D.py +4 -5
  66. tests/utils/helpers.py +97 -97
  67. fiqus-2024.7.0.dist-info/METADATA +0 -103
  68. fiqus-2024.7.0.dist-info/RECORD +0 -79
  69. {fiqus-2024.7.0.dist-info → fiqus-2024.12.1.dist-info}/top_level.txt +0 -0
tests/test_solvers.py CHANGED
@@ -16,10 +16,19 @@ class TestSolvers(FiQuSSolverTests):
16
16
  "TEST_Pancake3D_TSA",
17
17
  "TEST_Pancake3D_TSAStructured",
18
18
  "TEST_Pancake3D_TSAInsulating",
19
+ "TEST_Pancake3D_TSAInsulatingJcVsLength",
20
+ "TEST_Pancake3D_TSAInsulatingJcVsLength_thermalOnly",
19
21
  ]
20
- solve_types = ["electromagnetic", "weaklyCoupled", "stronglyCoupled"]
22
+ solve_types = ["electromagnetic", "weaklyCoupled", "stronglyCoupled", "thermal"]
21
23
  for model_name in model_names:
22
24
  for solve_type in solve_types:
25
+
26
+ if solve_type == "thermal" and not model_name =="TEST_Pancake3D_TSAInsulatingJcVsLength_thermalOnly":
27
+ continue
28
+
29
+ if solve_type != "thermal" and model_name =="TEST_Pancake3D_TSAInsulatingJcVsLength_thermalOnly":
30
+ continue
31
+
23
32
  with self.subTest(model_name=model_name, solve_type=solve_type):
24
33
  data_model: FDM = self.get_data_model(model_name)
25
34
 
@@ -28,7 +37,7 @@ class TestSolvers(FiQuSSolverTests):
28
37
  data_model.run.solution = solve_type
29
38
 
30
39
  if solve_type in ["weaklyCoupled", "stronglyCoupled"]:
31
- data_model.magnet.solve.save = [
40
+ data_model.magnet.solve.quantitiesToBeSaved = [
32
41
  Pancake3D.Pancake3DSolveSaveQuantity(
33
42
  quantity="magneticField",
34
43
  ),
@@ -40,7 +49,7 @@ class TestSolvers(FiQuSSolverTests):
40
49
  ),
41
50
  ]
42
51
  elif solve_type == "electromagnetic":
43
- data_model.magnet.solve.save = [
52
+ data_model.magnet.solve.quantitiesToBeSaved = [
44
53
  Pancake3D.Pancake3DSolveSaveQuantity(
45
54
  quantity="magneticField",
46
55
  ),
@@ -48,6 +57,12 @@ class TestSolvers(FiQuSSolverTests):
48
57
  quantity="currentDensity",
49
58
  ),
50
59
  ]
60
+ elif solve_type == "thermal":
61
+ data_model.magnet.solve.quantitiesToBeSaved = [
62
+ Pancake3D.Pancake3DSolveSaveQuantity(
63
+ quantity="temperature",
64
+ ),
65
+ ]
51
66
 
52
67
  self.solve(data_model, model_name)
53
68
 
@@ -65,19 +80,20 @@ class TestSolvers(FiQuSSolverTests):
65
80
  self.compare_text_files(pro_file, reference_pro_file)
66
81
 
67
82
  # Compare the results files:
68
- pos_file = self.get_path_to_generated_file(
69
- data_model=data_model,
70
- file_name="MagneticField-DefaultFormat",
71
- file_extension="pos",
72
- )
73
- reference_pos_file = self.get_path_to_reference_file(
74
- data_model=data_model,
75
- file_name="MagneticField-DefaultFormat",
76
- file_extension="pos",
77
- )
78
- self.compare_pos_files(pos_file, reference_pos_file)
83
+ if solve_type in ["electromagnetic", "weaklyCoupled", "stronglyCoupled"]:
84
+ pos_file = self.get_path_to_generated_file(
85
+ data_model=data_model,
86
+ file_name="MagneticField-DefaultFormat",
87
+ file_extension="pos",
88
+ )
89
+ reference_pos_file = self.get_path_to_reference_file(
90
+ data_model=data_model,
91
+ file_name="MagneticField-DefaultFormat",
92
+ file_extension="pos",
93
+ )
94
+ self.compare_pos_files(pos_file, reference_pos_file, rel_tolerance=1e-3, abs_tolerance=1e-3)
79
95
 
80
- if solve_type in ["weaklyCoupled", "stronglyCoupled"]:
96
+ if solve_type in ["weaklyCoupled", "stronglyCoupled", "thermal"]:
81
97
  pos_file = self.get_path_to_generated_file(
82
98
  data_model=data_model,
83
99
  file_name="Temperature-DefaultFormat",
@@ -88,15 +104,16 @@ class TestSolvers(FiQuSSolverTests):
88
104
  file_name="Temperature-DefaultFormat",
89
105
  file_extension="pos",
90
106
  )
91
- self.compare_pos_files(pos_file, reference_pos_file)
107
+ self.compare_pos_files(pos_file, reference_pos_file, rel_tolerance=1e-3, abs_tolerance=1e-3)
92
108
 
93
109
  def test_ConductorAC_Strand(self):
94
110
  """
95
- Checks if ConductorAC_Strand solvers work correctly by comparing the results to the
111
+ Checks if CACStrand solvers work correctly by comparing the results to the
96
112
  reference results that were checked manually.
97
113
  """
98
114
  model_names = [
99
115
  "TEST_CAC_Strand_hexFilaments",
116
+ "TEST_CAC_Strand_adaptiveMesh",
100
117
  "TEST_CAC_wireInChannel",
101
118
  ]
102
119
  for model_name in model_names:
@@ -105,7 +122,7 @@ class TestSolvers(FiQuSSolverTests):
105
122
 
106
123
  self.solve(data_model, model_name)
107
124
 
108
- # Compare the pro files:
125
+ # Compare the pro files:
109
126
  pro_file = self.get_path_to_generated_file(
110
127
  data_model=data_model,
111
128
  file_name=model_name,
@@ -116,7 +133,9 @@ class TestSolvers(FiQuSSolverTests):
116
133
  file_name=model_name,
117
134
  file_extension="pro",
118
135
  )
119
- self.compare_text_files(pro_file, reference_pro_file)
136
+ # This makes no sense as long as the development on the Strand model pro-template is ongoing ...
137
+ # Comparing the field solutions should ensure solver consistency without relying on the exact template structure. Skipping the pro-templates for now ~ AG
138
+ #self.compare_text_files(pro_file, reference_pro_file)
120
139
 
121
140
  # Compare the magnetic flux density files:
122
141
  pos_file = self.get_path_to_generated_file(
@@ -129,7 +148,7 @@ class TestSolvers(FiQuSSolverTests):
129
148
  file_name="b",
130
149
  file_extension="pos",
131
150
  )
132
- self.compare_pos_files(pos_file, reference_pos_file)
151
+ self.compare_pos_files(pos_file, reference_pos_file, rel_tolerance=1e-3, abs_tolerance=1e-10)
133
152
 
134
153
  # Compare the current density files:
135
154
  pos_file = self.get_path_to_generated_file(
@@ -142,7 +161,82 @@ class TestSolvers(FiQuSSolverTests):
142
161
  file_name="j",
143
162
  file_extension="pos",
144
163
  )
145
- self.compare_pos_files(pos_file, reference_pos_file)
164
+ self.compare_pos_files(pos_file, reference_pos_file, rel_tolerance=1e-3, abs_tolerance=1e-10)
165
+
166
+
167
+ def test_Multipole(self):
168
+ """
169
+ Checks if Multipole solvers work correctly by comparing the results to the
170
+ reference results that were checked manually.
171
+ """
172
+ model_names = [
173
+ "TEST_MULTIPOLE_MBH_1in1_TSA_withQH",
174
+ "TEST_MULTIPOLE_MBH_1in1_TSA",
175
+ "TEST_MULTIPOLE_MBH_1in1_REF",
176
+ "TEST_MULTIPOLE_SMC_TSA_withQH",
177
+ "TEST_MULTIPOLE_SMC_TSA",
178
+ "TEST_MULTIPOLE_SMC_REF",
179
+ "TEST_MULTIPOLE_4COND_TSA",
180
+ ]
181
+ for model_name in model_names:
182
+ with self.subTest(model_name=model_name):
183
+ data_model: FDM = self.get_data_model(model_name)
184
+
185
+ self.solve(data_model, model_name)
186
+
187
+ # Compare the pro files:
188
+ pro_file = self.get_path_to_generated_file(
189
+ data_model=data_model,
190
+ file_name=model_name,
191
+ file_extension="pro",
192
+ )
193
+ reference_pro_file = self.get_path_to_reference_file(
194
+ data_model=data_model,
195
+ file_name=model_name,
196
+ file_extension="pro",
197
+ )
198
+
199
+ self.compare_text_files(pro_file, reference_pro_file, exclude_lines_keywords=["NameOfMesh"], exclude_first_n_lines=1)
200
+
201
+ # Compare the magnetic flux density files:
202
+ pos_file = self.get_path_to_generated_file(
203
+ data_model=data_model,
204
+ file_name="b_Omega",
205
+ file_extension="pos",
206
+ )
207
+ reference_pos_file = self.get_path_to_reference_file(
208
+ data_model=data_model,
209
+ file_name="b_Omega",
210
+ file_extension="pos",
211
+ )
212
+ self.compare_pos_files(pos_file, reference_pos_file, rel_tolerance=1e-3, abs_tolerance=1e-2)
213
+
214
+ # Compare the temperature files:
215
+ pos_file = self.get_path_to_generated_file(
216
+ data_model=data_model,
217
+ file_name="T_Omega_c",
218
+ file_extension="pos",
219
+ )
220
+ reference_pos_file = self.get_path_to_reference_file(
221
+ data_model=data_model,
222
+ file_name="T_Omega_c",
223
+ file_extension="pos",
224
+ )
225
+ self.compare_pos_files(pos_file, reference_pos_file, rel_tolerance=1e-3, abs_tolerance=1e-2)
226
+
227
+ # Compare the solve yaml files
228
+ solve_file = self.get_path_to_generated_file(
229
+ data_model=data_model,
230
+ file_name="solve",
231
+ file_extension="yaml",
232
+ )
233
+ reference_solve_file = self.get_path_to_reference_file(
234
+ data_model=data_model,
235
+ file_name="solve",
236
+ file_extension="yaml",
237
+ )
238
+ self.compare_json_or_yaml_files(solve_file, reference_solve_file)
239
+
146
240
 
147
241
  if __name__ == "__main__":
148
242
  unittest.main()
@@ -148,10 +148,10 @@ class BaseClassesForTests(unittest.TestCase):
148
148
  )
149
149
 
150
150
  shutil.copytree(
151
- reference_geometry_folder, output_geometry_folder, dirs_exist_ok=True
151
+ reference_geometry_folder, output_geometry_folder, dirs_exist_ok=True, ignore=shutil.ignore_patterns('Mesh*')
152
152
  )
153
153
  shutil.copytree(
154
- reference_mesh_folder, output_mesh_folder, dirs_exist_ok=True
154
+ reference_mesh_folder, output_mesh_folder, dirs_exist_ok=True, ignore=shutil.ignore_patterns('Solution*')
155
155
  )
156
156
 
157
157
  # Run FiQuS:
@@ -159,7 +159,7 @@ class BaseClassesForTests(unittest.TestCase):
159
159
  model_folder=model_folder,
160
160
  input_file_path=self.get_input_file_path(model_name),
161
161
  fdm=data_model,
162
- verbose=False,
162
+ # verbose=False,
163
163
  )
164
164
 
165
165
  def get_path_to_generated_file(
@@ -414,7 +414,7 @@ class BaseClassesForTests(unittest.TestCase):
414
414
 
415
415
  return reference_file
416
416
 
417
- def compare_json_or_yaml_files(self, file_1, file_2, tolerance=0):
417
+ def compare_json_or_yaml_files(self, file_1, file_2, tolerance=0,excluded_keys=None):
418
418
  """
419
419
  This method compares the contents of two JSON or YAML files. It is used to
420
420
  check that the generated files are the same as the reference.
@@ -423,6 +423,10 @@ class BaseClassesForTests(unittest.TestCase):
423
423
  :type file_1: Union[str, os.PathLike]
424
424
  :param file_2: path to the second file
425
425
  :type file_2: Union[str, os.PathLike]
426
+ :param tolerance: tolerance for numeric differences (default is 0)
427
+ :type tolerance: int or float
428
+ :param excluded_keys: keys to exclude from comparison (default is None)
429
+ :type excluded_keys: List[str]
426
430
  """
427
431
  try:
428
432
  # YAML is a superset of JSON, so we can use the same parser for both:
@@ -435,6 +439,11 @@ class BaseClassesForTests(unittest.TestCase):
435
439
  except:
436
440
  raise ValueError("The files must be JSON or YAML files!")
437
441
 
442
+ # Remove excluded keys from both dictionaries
443
+ if excluded_keys:
444
+ file_1_dictionary = self._remove_excluded_keys(file_1_dictionary, excluded_keys)
445
+ file_2_dictionary = self._remove_excluded_keys(file_2_dictionary, excluded_keys)
446
+
438
447
  # Compare the dictionaries:
439
448
  if tolerance == 0:
440
449
  self.assertDictEqual(
@@ -445,6 +454,27 @@ class BaseClassesForTests(unittest.TestCase):
445
454
  else:
446
455
  self.compare_dicts(file_1_dictionary, file_2_dictionary, tolerance)
447
456
 
457
+
458
+ def _remove_excluded_keys(self, data, excluded_keys):
459
+ """
460
+ Recursively removes excluded keys from a dictionary.
461
+
462
+ :param data: the dictionary to process
463
+ :type data: dict
464
+ :param excluded_keys: the keys to remove
465
+ :type excluded_keys: List[str]
466
+ :return: the dictionary without excluded keys
467
+ :rtype: dict
468
+ """
469
+ if not isinstance(data, dict):
470
+ return data # Return non-dict types unchanged
471
+
472
+ return {
473
+ key: self._remove_excluded_keys(value, excluded_keys)
474
+ for key, value in data.items()
475
+ if key not in excluded_keys
476
+ }
477
+
448
478
  def compare_dicts(self, dict1, dict2, tolerance):
449
479
  """
450
480
  This method compares the contents of two dictionaries, taking into account
@@ -493,8 +523,23 @@ class BaseClassesForTests(unittest.TestCase):
493
523
  filecmp.cmp(file_1, file_2),
494
524
  msg=f"{file_1} did not match {file_2}!",
495
525
  )
526
+
527
+ def filter_content(self, file_path, keywords, n):
528
+ """
529
+ Read a file and return its content as a string,
530
+ excluding lines containing any of the specified keywords.
531
+ It also skips the first n lines.
532
+ This looping is slower than the filecmp.cmp method, but it is more flexible.
533
+ """
534
+ with open(file_path, 'r', encoding='utf-8') as f:
535
+ # Skip the first N lines
536
+ for _ in range(n):
537
+ next(f, None)
538
+ # Filter remaining lines
539
+ return ''.join(line for line in f if not any(keyword in line for keyword in keywords))
496
540
 
497
- def compare_text_files(self, file_1, file_2):
541
+
542
+ def compare_text_files(self, file_1, file_2, exclude_lines_keywords: list = None, exclude_first_n_lines: int = 0):
498
543
  """
499
544
  This method compares the contents of two files. It is used to check that the
500
545
  generated files are the same as the reference.
@@ -504,11 +549,17 @@ class BaseClassesForTests(unittest.TestCase):
504
549
  :param file_2: path to the second file
505
550
  :type file_2: Union[str, os.PathLike]
506
551
  """
507
- # Compare the files:
508
- self.assertTrue(
509
- filecmp.cmp(file_1, file_2),
510
- msg=f"{file_1} did not match {file_2}!",
511
- )
552
+ if exclude_lines_keywords:
553
+ # more complicated check that needs to loop through the lines
554
+ filtered_content1 = self.filter_content(file_1, exclude_lines_keywords, exclude_first_n_lines)
555
+ filtered_content2 = self.filter_content(file_2, exclude_lines_keywords, exclude_first_n_lines)
556
+ self.assertTrue(filtered_content1 == filtered_content2)
557
+ else:
558
+ # Compare the files with a binary check
559
+ self.assertTrue(
560
+ filecmp.cmp(file_1, file_2),
561
+ msg=f"{file_1} did not match {file_2}!",
562
+ )
512
563
 
513
564
 
514
565
  class FiQuSGeometryTests(BaseClassesForTests):
@@ -547,7 +598,7 @@ class FiQuSGeometryTests(BaseClassesForTests):
547
598
  """
548
599
  # Initialize gmsh:
549
600
  gmsh_utils = GmshUtils(verbose=False)
550
- gmsh_utils.initialize()
601
+ gmsh_utils.initialize(verbosity_Gmsh=0)
551
602
 
552
603
  # Open the geometry files and get the entities:
553
604
  model_entities = {}
@@ -619,7 +670,7 @@ class FiQuSMeshTests(BaseClassesForTests):
619
670
  """
620
671
  # Initialize gmsh:
621
672
  gmsh_utils = GmshUtils(verbose=False)
622
- gmsh_utils.initialize()
673
+ gmsh_utils.initialize(verbosity_Gmsh=0)
623
674
 
624
675
  # Open the mesh files and get the average mesh quality:
625
676
  average_mesh_qualities = []
@@ -669,7 +720,7 @@ class FiQuSSolverTests(BaseClassesForTests):
669
720
  self.model_name = model_name
670
721
 
671
722
  def compare_pos_files(
672
- self, pos_file_1: Union[str, os.PathLike], pos_file_2: Union[str, os.PathLike]
723
+ self, pos_file_1: Union[str, os.PathLike], pos_file_2: Union[str, os.PathLike], rel_tolerance: float = 1e-10, abs_tolerance: float = 0
673
724
  ):
674
725
  """
675
726
  This method compares the contents of two pos files. It is used to check that
@@ -682,15 +733,19 @@ class FiQuSSolverTests(BaseClassesForTests):
682
733
  """
683
734
  # Initialize gmsh:
684
735
  gmsh_utils = GmshUtils(verbose=False)
685
- gmsh_utils.initialize()
736
+ gmsh_utils.initialize(verbosity_Gmsh=0)
686
737
 
687
738
  # Open the pos files and get the model data:
688
739
  model_datas = []
689
- for pos_file in [pos_file_1, pos_file_2]:
740
+ time_steps = [0, 0]
741
+ for idx, pos_file in enumerate([pos_file_1, pos_file_2]):
742
+ # remove all old views
743
+ gmsh.clear()
690
744
  # Open the pos file:
691
745
  gmsh.open(pos_file)
692
746
  data_all_steps = []
693
- for time_step in range(1, 100):
747
+
748
+ while True:
694
749
  # Save all available time steps up to 100:
695
750
  try:
696
751
  (
@@ -699,18 +754,27 @@ class FiQuSSolverTests(BaseClassesForTests):
699
754
  data,
700
755
  time,
701
756
  numComponents,
702
- ) = gmsh.view.getHomogeneousModelData(tag=0, step=time_step)
757
+ ) = gmsh.view.getHomogeneousModelData(tag=0, step=time_steps[idx])
703
758
  data_all_steps.extend(list(data))
759
+ time_steps[idx] += 1 # Move to the next time step
704
760
  except:
761
+ print(f"Finished reading {pos_file} at time step {time_steps[idx]}.")
705
762
  break
706
763
 
707
764
  model_datas.append(data_all_steps)
708
765
 
709
- # Make sure the pos files are close enough:
766
+ # Make sure the number of time steps are the same:
767
+ self.assertEqual(
768
+ time_steps[0],
769
+ time_steps[1],
770
+ msg=f"{pos_file_1} and {pos_file_2} do not have the same number of time steps!",
771
+ )
772
+
773
+ # Make sure the pos files are the same length:
710
774
  self.assertEqual(
711
775
  len(model_datas[0]),
712
776
  len(model_datas[1]),
713
- msg=f"{pos_file_1} and {pos_file_2} did not match!",
777
+ msg=f"{pos_file_1} and {pos_file_2} are not the same length!",
714
778
  )
715
779
 
716
780
  # Convert to numppy array:
@@ -721,8 +785,8 @@ class FiQuSSolverTests(BaseClassesForTests):
721
785
  np.testing.assert_allclose(
722
786
  model_data1,
723
787
  model_data2,
724
- rtol=1e-05,
725
- atol=1e-08,
788
+ rtol=rel_tolerance,
789
+ atol=abs_tolerance,
726
790
  )
727
791
 
728
792
  def get_path_to_generated_file(
@@ -1,57 +1,57 @@
1
- import os
2
- import shutil
3
- import sys
4
- sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) # Add the path to the fiqus package to the system path
5
- from fiqus.data.DataFiQuS import FDM
6
- from fiqus.utils.Utils import FilesAndFolders as Util
7
- from fiqus import MainFiQuS as mf
8
-
9
-
10
- # Generate reference files for the models below:
11
- model_names = [
12
- "TEST_CAC_Strand_adaptiveMesh",
13
- "TEST_CAC_Strand_hexFilaments",
14
- "TEST_CAC_wireInChannel",
15
- ]
16
- # The run types for the models above:
17
- run_types = [
18
- 'geometry_and_mesh',
19
- 'start_from_yaml',
20
- 'start_from_yaml',
21
- ]
22
-
23
- for model_name, run_type in zip(model_names, run_types):
24
- # get path to the input file:
25
- input_file = os.path.join(
26
- os.path.dirname(os.path.dirname(__file__)),
27
- "_inputs",
28
- model_name,
29
- f"{model_name}.yaml",
30
- )
31
-
32
- # select _references folder as the output folder:
33
- output_folder = os.path.join(
34
- os.path.dirname(os.path.dirname(__file__)), "_references", model_name
35
- )
36
-
37
- # if the output folder exists, remove it:
38
- if os.path.exists(output_folder):
39
- shutil.rmtree(output_folder)
40
-
41
- # Create the output folder:
42
- os.makedirs(output_folder)
43
-
44
- # Cast input yaml file to FDM
45
- data_model: FDM = Util.read_data_from_yaml(input_file, FDM)
46
-
47
- data_model.run.overwrite = True
48
-
49
- # Make the run type start_from_yaml:
50
- data_model.run.type = run_type
51
-
52
- fiqus_instance = mf.MainFiQuS(
53
- fdm=data_model, model_folder=output_folder, input_file_path=input_file
54
- )
55
-
56
- # remove fiqus_instance to avoid memory issues:
57
- del fiqus_instance
1
+ import os
2
+ import shutil
3
+ import sys
4
+ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) # Add the path to the fiqus package to the system path
5
+ from fiqus.data.DataFiQuS import FDM
6
+ from fiqus.utils.Utils import FilesAndFolders as Util
7
+ from fiqus import MainFiQuS as mf
8
+
9
+
10
+ # Generate reference files for the models below:
11
+ model_names = [
12
+ "TEST_CAC_Strand_adaptiveMesh",
13
+ "TEST_CAC_Strand_hexFilaments",
14
+ "TEST_CAC_wireInChannel",
15
+ ]
16
+ # The run types for the models above:
17
+ run_types = [
18
+ 'geometry_and_mesh',
19
+ 'start_from_yaml',
20
+ 'start_from_yaml',
21
+ ]
22
+
23
+ for model_name, run_type in zip(model_names, run_types):
24
+ # get path to the input file:
25
+ input_file = os.path.join(
26
+ os.path.dirname(os.path.dirname(__file__)),
27
+ "_inputs",
28
+ model_name,
29
+ f"{model_name}.yaml",
30
+ )
31
+
32
+ # select _references folder as the output folder:
33
+ output_folder = os.path.join(
34
+ os.path.dirname(os.path.dirname(__file__)), "_references", model_name
35
+ )
36
+
37
+ # if the output folder exists, remove it:
38
+ if os.path.exists(output_folder):
39
+ shutil.rmtree(output_folder)
40
+
41
+ # Create the output folder:
42
+ os.makedirs(output_folder)
43
+
44
+ # Cast input yaml file to FDM
45
+ data_model: FDM = Util.read_data_from_yaml(input_file, FDM)
46
+
47
+ data_model.run.overwrite = True
48
+
49
+ # Make the run type start_from_yaml:
50
+ data_model.run.type = run_type
51
+
52
+ fiqus_instance = mf.MainFiQuS(
53
+ fdm=data_model, model_folder=output_folder, input_file_path=input_file
54
+ )
55
+
56
+ # remove fiqus_instance to avoid memory issues:
57
+ del fiqus_instance
@@ -1,7 +1,6 @@
1
1
  import os
2
2
  import shutil
3
- import sys
4
- sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../'))) # Add the path to the fiqus package to the system path
3
+
5
4
  import fiqus.data.DataFiQuSPancake3D as Pancake3D
6
5
  from fiqus.data.DataFiQuS import FDM
7
6
  from fiqus.utils.Utils import FilesAndFolders as Util
@@ -31,7 +30,7 @@ for model_name in model_names:
31
30
 
32
31
  # Cast input yaml file to FDM
33
32
  data_model: FDM = Util.read_data_from_yaml(input_file, FDM)
34
- data_model.magnet.postproc = Pancake3D.Pancake3DPostprocess()
33
+
35
34
  data_model.run.overwrite = True
36
35
  data_model.run.launch_gui = False
37
36
 
@@ -61,7 +60,7 @@ for model_name in model_names:
61
60
  data_model.magnet.solve.type = solve_type
62
61
  data_model.run.solution = solve_type
63
62
  if solve_type in ["weaklyCoupled", "stronglyCoupled"]:
64
- data_model.magnet.solve.save = [
63
+ data_model.magnet.solve.quantitiesToBeSaved = [
65
64
  Pancake3D.Pancake3DSolveSaveQuantity(
66
65
  quantity="magneticField",
67
66
  ),
@@ -73,7 +72,7 @@ for model_name in model_names:
73
72
  ),
74
73
  ]
75
74
  elif solve_type == "electromagnetic":
76
- data_model.magnet.solve.save = [
75
+ data_model.magnet.solve.quantitiesToBeSaved = [
77
76
  Pancake3D.Pancake3DSolveSaveQuantity(
78
77
  quantity="magneticField",
79
78
  ),