fiqus 2024.5.2__py3-none-any.whl → 2024.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. fiqus/MainFiQuS.py +15 -5
  2. fiqus/data/DataConductor.py +301 -0
  3. fiqus/data/DataFiQuS.py +5 -2
  4. fiqus/data/DataFiQuSConductor.py +84 -0
  5. fiqus/data/DataFiQuSConductorAC_Strand.py +565 -0
  6. fiqus/data/DataFiQuSPancake3D.py +149 -39
  7. fiqus/data/RegionsModelFiQuS.py +4 -2
  8. fiqus/geom_generators/GeometryCCT.py +19 -17
  9. fiqus/geom_generators/GeometryConductorAC_Strand.py +1391 -0
  10. fiqus/getdp_runners/RunGetdpConductorAC_Strand.py +202 -0
  11. fiqus/getdp_runners/RunGetdpMultipole.py +4 -4
  12. fiqus/mains/MainConductorAC_Strand.py +133 -0
  13. fiqus/mesh_generators/MeshCCT.py +8 -8
  14. fiqus/mesh_generators/MeshConductorAC_Strand.py +657 -0
  15. fiqus/mesh_generators/MeshMultipole.py +11 -8
  16. fiqus/mesh_generators/MeshPancake3D.py +20 -18
  17. fiqus/plotters/PlotPythonConductorAC.py +840 -0
  18. fiqus/post_processors/PostProcessConductorAC.py +49 -0
  19. fiqus/pro_assemblers/ProAssembler.py +4 -3
  20. fiqus/pro_templates/combined/CCT_template.pro +25 -25
  21. fiqus/pro_templates/combined/ConductorAC_template.pro +1025 -0
  22. fiqus/pro_templates/combined/Multipole_template.pro +5 -5
  23. fiqus/pro_templates/combined/Pancake3D_template.pro +131 -46
  24. fiqus/pro_templates/combined/materials.pro +13 -9
  25. {fiqus-2024.5.2.dist-info → fiqus-2024.6.0.dist-info}/METADATA +2 -1
  26. {fiqus-2024.5.2.dist-info → fiqus-2024.6.0.dist-info}/RECORD +34 -22
  27. {fiqus-2024.5.2.dist-info → fiqus-2024.6.0.dist-info}/WHEEL +1 -1
  28. tests/test_geometry_generators.py +41 -0
  29. tests/test_mesh_generators.py +45 -0
  30. tests/test_solvers.py +52 -0
  31. tests/utils/fiqus_test_classes.py +42 -6
  32. tests/utils/generate_reference_files_ConductorAC.py +57 -0
  33. tests/utils/generate_reference_files_Pancake3D.py +92 -0
  34. {fiqus-2024.5.2.dist-info → fiqus-2024.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,840 @@
1
+ import os
2
+ import numpy as np
3
+ import re
4
+ import matplotlib.pyplot as plt
5
+ from matplotlib.ticker import FuncFormatter
6
+ from matplotlib.animation import FuncAnimation
7
+ import pandas as pd
8
+ from scipy import integrate, interpolate
9
+ from ruamel.yaml import YAML
10
+
11
+ # from fiqus.utils.Utils import FilesAndFolders as Util
12
+ # from fiqus.data.DataFiQuSConductorAC_Strand import CACStrandSolve, CACStrandPostproc, CACStrandMesh, CACStrandGeometry
13
+
14
+ def create_non_overwriting_filepath(folder_path, base_name, extension, overwrite):
15
+ """
16
+ Creates a filepath that does not overwrite any existing files.
17
+
18
+ This function checks if a file already exists at the specified filepath. If the file exists and `overwrite` is False,
19
+ it modifies the filepath to create a new file instead of overwriting the existing one.
20
+ If `overwrite` is True or the file does not exist, it returns the filepath as it is.
21
+
22
+ Parameters
23
+ ----------
24
+ folder_path : str
25
+ The path to the folder where the file will be created.
26
+ base_name : str
27
+ The base name of the file.
28
+ extension : str
29
+ The extension of the file.
30
+ overwrite : bool, optional
31
+ If True, the function will overwrite an existing file. If False, the function will modify the filepath to avoid overwriting. Defaults to False.
32
+
33
+ Returns
34
+ -------
35
+ str
36
+ The final filepath. If `overwrite` is False and a file already exists at the original filepath, this will be a new filepath that does not overwrite any existing files.
37
+ """
38
+ if os.path.exists(os.path.join(folder_path, base_name+extension)) and not overwrite:
39
+ counter = 1
40
+ new_name = base_name + f"_{counter}" + extension
41
+ while os.path.exists(os.path.join(folder_path, new_name)):
42
+ new_name = base_name + f"_{counter}" + extension
43
+ counter += 1
44
+ return os.path.join(folder_path, new_name)
45
+
46
+ return os.path.join(folder_path, base_name+extension)
47
+
48
+ class YamlWrapper:
49
+ """
50
+ A wrapper class for YAML data that allows accessing dictionary data using dot notation.
51
+ """
52
+ def __init__(self, data):
53
+ for key, value in data.items():
54
+ if isinstance(value, dict):
55
+ value = YamlWrapper(value)
56
+ self.__dict__[key] = value
57
+
58
+ def __getattr__(self, item):
59
+ return self.__dict__.get(item)
60
+
61
+ def __setattr__(self, key, value):
62
+ self.__dict__[key] = value
63
+
64
+ def load_yaml(file_path):
65
+ """
66
+ Load a YAML file and return the data as a YamlWrapper object. This enables accessing the data using dot notation (e.g., data.key.subkey), without needing a predefined pydantic model, allowing for better backwards compatibility.
67
+ """
68
+ yaml = YAML()
69
+ with open(file_path, 'r') as file:
70
+ data = yaml.load(file)
71
+ return YamlWrapper(data)
72
+
73
+
74
+ class SimulationData:
75
+ """
76
+ Class used to store and manage data from a single simulation.
77
+
78
+ This class is responsible for loading and organizing the data from a single simulation.
79
+ It stores the data in various attributes and provides methods for retrieving and processing the data.
80
+
81
+ """
82
+ def __init__(self, model_data_output_path, geometry_name, mesh_name, solution_name) -> None:
83
+ self.model_data_output_path = model_data_output_path # This is the path to the folder where the model output data is stored (e.g. geometries)
84
+ self.geometry_name = geometry_name # Name of the geometry folder
85
+ self.mesh_name = mesh_name # Name of the mesh folder
86
+ self.solution_name = solution_name # Name of the solution folder
87
+
88
+ # Organize the folders:
89
+ self.geometry_folder = os.path.join(self.model_data_output_path, geometry_name) # Path to the geometry folder
90
+ self.mesh_folder = os.path.join(self.geometry_folder, mesh_name) # Path to the mesh folder
91
+ self.solution_folder = os.path.join(self.mesh_folder, solution_name) # Path to the solution folder
92
+
93
+ # Store the YAML input-files in a data model, fdm:
94
+ self.geometry, self.mesh, self.solve = self.retrieve_fiqusDataModel()
95
+
96
+ # Store losses, simulation time and check if the simulation crashed:
97
+ temp_file_path = os.path.join(self.solution_folder, 'test_temporary')
98
+ loss_file = [f for f in os.listdir(temp_file_path) if f.startswith('power') and f.endswith('.txt')][0]
99
+ self.power_columns = ['Time', 'FilamentLoss', 'CouplingLoss', 'EddyLoss', 'TotalLoss', 'CouplingLoss_dyn', 'TotalLoss_dyn'] # Only in the case dynamic correction is used, must be changed later
100
+ self.power = pd.read_csv(os.path.join(self.solution_folder, 'test_temporary', loss_file), sep = ' ', names=self.power_columns) # Store instantaneous losses as pandas dataframe
101
+ self.crash = True if 'crash_report.txt' in os.listdir(temp_file_path) else False
102
+
103
+ # Add a row of zeros at the beginning of the dataframe to account for the initial condition:
104
+ self.power = pd.concat([pd.DataFrame({col: 0 for col in self.power_columns}, index=[0]), self.power]).reset_index(drop=True)
105
+ # Integrate the losses to obtain the cumulative power and the total power per cycle:
106
+ self.cumulative_power, self.total_power_per_cycle = self.integrate_power() # Store cumulative power and total cumulative power per cycle
107
+ # Store simulation time:
108
+ try:
109
+ with open(os.path.join(self.solution_folder, 'test_temporary', 'simulation_time.txt'), 'r') as f:
110
+ self.simulation_time = float(f.readline().strip())
111
+ except:
112
+ self.simulation_time = None # If the simulation time file does not exist, the simulation has not finished running.
113
+
114
+ # Store the rest of the post-processing data:
115
+ self.time = self.power['Time']
116
+ self.instantaneous_temperature = self.load_standard_data(os.path.join(temp_file_path, 'temperature.txt'), 1, add_initial_zero=True)
117
+ self.temperature = self.load_standard_data(os.path.join(temp_file_path, 'temperature.txt'), 2, add_initial_zero=True)
118
+ self.I_transport = self.load_standard_data(os.path.join(temp_file_path, 'I_transport.txt'), 1)
119
+ self.V_transport = self.load_standard_data(os.path.join(temp_file_path, 'V_transport.txt'), 1)
120
+ self.hs_val = self.load_standard_data(os.path.join(temp_file_path, 'hs_val.txt'), 1)
121
+ self.magn_fil = self.load_standard_data(os.path.join(temp_file_path, 'magn_fil.txt'), [1, 2, 3])
122
+ self.magn_matrix = self.load_standard_data(os.path.join(temp_file_path, 'magn_matrix.txt'), [1, 2, 3])
123
+ self.I = self.load_standard_data(os.path.join(temp_file_path, 'I.txt'), 1, len(self.time))
124
+ self.V = self.load_standard_data(os.path.join(temp_file_path, 'V.txt'), 1, len(self.time))
125
+ self.I_integral = self.load_standard_data(os.path.join(temp_file_path, 'I_integral.txt'), 1, len(self.time))
126
+ self.I_abs_integral = self.load_standard_data(os.path.join(temp_file_path, 'I_abs_integral.txt'), 1, len(self.time))
127
+ self.magnetic_energy_internal = self.load_standard_data(os.path.join(temp_file_path, 'magnetic_energy_internal.txt'), 1)
128
+ self.Ip = self.load_standard_data(os.path.join(temp_file_path, 'Ip.txt'), 1, len(self.time))
129
+ self.Vp = self.load_standard_data(os.path.join(temp_file_path, 'Vp.txt'), 1, len(self.time))
130
+
131
+
132
+ def load_standard_data(self, file_path, columns, reshape = None, add_initial_zero = False):
133
+ """
134
+ There are many output .txt-files with similar format. This function loads the data from one of these files and returns it as a numpy array.
135
+ If the file does not exist, None is returned without raising an error.
136
+ """
137
+ try:
138
+ data = np.loadtxt(file_path, comments='#', usecols=columns)
139
+ if reshape:
140
+ data = data.reshape(-1, reshape).T
141
+ except IOError:
142
+ return None
143
+
144
+ if add_initial_zero:
145
+ if len(data.shape) == 1:
146
+ data = np.insert(data, 0, 0)
147
+ else:
148
+ zeros = np.zeros((1, data.shape[1]))
149
+ data = np.vstack((zeros, data))
150
+ return data
151
+
152
+ def retrieve_fiqusDataModel(self):
153
+ """
154
+ This function reads the YAML input-files for geometry, mesh and solve and stores them in three dictionaries which are returned.
155
+ This function is to be called only once, when the object is created.
156
+ """
157
+ geometry_dataModel = load_yaml(os.path.join(self.geometry_folder, 'geometry.yaml'))
158
+ mesh_dataModel = load_yaml(os.path.join(self.mesh_folder, 'mesh.yaml'))
159
+ solution_dataModel = load_yaml(os.path.join(self.solution_folder, 'solve.yaml'))
160
+
161
+ return geometry_dataModel, mesh_dataModel, solution_dataModel
162
+
163
+ def integrate_power(self):
164
+ """
165
+ This function integrates the instantaneous power over time to obtain the cumulative power.
166
+ It also calculates the total cumulative power per cycle.
167
+ The cumulative power is returned as a pandas dataframe and the total cumulative power per cycle is returned as a dictionary.
168
+ """
169
+ find_closest_idx = lambda arr, val: np.abs(arr - val).argmin()
170
+
171
+ t = np.array(self.power['Time'])
172
+ t_final = t[-1]
173
+ t_init = find_closest_idx(t, t_final/2)
174
+
175
+ cumulative_power = pd.DataFrame(columns= self.power_columns)
176
+ total_power_per_cycle = {}
177
+
178
+ cumulative_power['Time'] = self.power["Time"]
179
+ for column in self.power_columns[1:]:
180
+ cumulative_power[column] = np.insert(integrate.cumulative_trapezoid(self.power[column], t), 0, 0)
181
+ total_power_per_cycle[column] = 2 * (cumulative_power[column].iloc[-1]-cumulative_power[column].iloc[t_init]) # / (np.pi*matrix_radius**2 * loss_factor) # Why do we divide by pi*matrix_radius**2*loss_factor?
182
+
183
+ return cumulative_power, total_power_per_cycle
184
+
185
+ def plot_instantaneous_power(self, show:bool = True, title:str = "Power", save_plot:bool = False, save_folder_path:str = None, save_file_name:str = None, overwrite:bool = False):
186
+ plt.figure()
187
+ plt.plot(self.power['Time'], self.power[self.power_columns[1:]] , label = self.power_columns[1:])
188
+ plt.xlabel('Time [s]')
189
+ plt.ylabel('Power [W/m]')
190
+ plt.legend()
191
+
192
+ # Configure title:
193
+ # Class attributes can be accessed by using '<< ... >>' in the title string.
194
+ commands = re.findall('<<(.*?)>>', title)
195
+ for c in commands:
196
+ title = title.replace(f"<<{c}>>", str(eval('self.'+c)))
197
+ plt.title(title)
198
+
199
+
200
+ if save_plot: # Save the plot
201
+ filePath = create_non_overwriting_filepath(save_folder_path, save_file_name, '.png', overwrite)
202
+ plt.savefig(filePath)
203
+
204
+ if show:
205
+ plt.show()
206
+ else:
207
+ plt.close()
208
+
209
+ class PlotPython:
210
+ """
211
+ This class loads and stores the data from the simulations specified in a csv file and can apply various postprocessing operations on the data.
212
+ The data from each simulation is saved as a SimulationData object which is subsequently stored in a list in this class.
213
+ """
214
+ def __init__(self, fdm, csv_filename = None, lossMap_gridData_folder = None, inputs_folder_path='', outputs_folder_path='') -> None:
215
+ self.fdm = fdm
216
+ self.inputs_folder_path = inputs_folder_path # This is the path to the folder where the input data is stored
217
+ self.model_data_output_path = outputs_folder_path # This is the path to the folder where the model output data is stored (e.g. geometries)
218
+ self.outputs_folder_path = os.path.join(outputs_folder_path, fdm.magnet.postproc.batch_postproc.output_folder) # This is the path to the folder where the postprocessed data is written
219
+
220
+
221
+ if not os.path.exists(self.outputs_folder_path):
222
+ os.makedirs(self.outputs_folder_path)
223
+
224
+ if csv_filename is not None:
225
+ try:
226
+ self.input_csv = pd.read_csv(os.path.join(self.inputs_folder_path, f'{csv_filename}.csv')) # Read the csv file with the input data
227
+ except:
228
+ raise FileNotFoundError(f'No csv file with the name {fdm.magnet.postproc.batch_postproc.postProc_csv}.csv was found in the inputs folder.')
229
+
230
+ self.simulation_collection = self.retrieve_simulation_data()
231
+
232
+ self.avg_simulation_time = np.mean([sd.simulation_time for sd in self.simulation_collection])
233
+ self.total_simulation_time = np.sum([sd.simulation_time for sd in self.simulation_collection])
234
+
235
+ print('Number of simulations considered: ', len(self.simulation_collection) )
236
+ print('Average simulation time: ', self.avg_simulation_time, 's')
237
+ print('Total simulation time: ', self.total_simulation_time, 's')
238
+
239
+
240
+
241
+ elif lossMap_gridData_folder is not None:
242
+ self.input_csv = None
243
+ self.simulation_collection = None
244
+
245
+ self.totalLoss_gridData = self.load_lossMap_gridData('TotalLoss', lossMap_gridData_folder)
246
+ self.filamentLoss_gridData = self.load_lossMap_gridData('FilamentLoss', lossMap_gridData_folder)
247
+ self.eddyLoss_gridData = self.load_lossMap_gridData('EddyLoss', lossMap_gridData_folder)
248
+ self.couplingLoss_gridData = self.load_lossMap_gridData('CouplingLoss', lossMap_gridData_folder)
249
+ else:
250
+ raise ValueError('No input data specified. Either a csv file or a folder with loss map grid data must be provided.')
251
+
252
+
253
+
254
+
255
+ def retrieve_simulation_data(self):
256
+ """
257
+ This function iterates over the input CSV-file (specifying which simulations to postprocess) and returns a list of SimulationData objects
258
+ containing all the simulation data. If no CSV-file is specified, the data from the single simulation specified in the input YAML-file is returned.
259
+ """
260
+ if self.input_csv is not None:
261
+ simulationCollection = []
262
+ for index, row in self.input_csv.iterrows():
263
+ if pd.isna(row['input.run.geometry']) and pd.isna(row['input.run.mesh']) and pd.isna(row['input.run.solution']):
264
+ continue
265
+ geometry_name = 'Geometry_'+str(row['input.run.geometry'])
266
+ mesh_name = 'Mesh_'+str(row['input.run.mesh'])
267
+
268
+ if isinstance(row['input.run.solution'], float) and row['input.run.solution'].is_integer():
269
+ solution_name = 'Solution_'+str(int(row['input.run.solution']))
270
+ else:
271
+ solution_name = 'Solution_'+str(row['input.run.solution'])
272
+
273
+ # Check if the row refers to a valid simulation by checking if the solution folder exists:
274
+ # solution_folder = os.path.join(os.getcwd(), 'tests', '_outputs', self.fdm.general.magnet_name, geometry_name, mesh_name, solution_name)
275
+ solution_folder = os.path.join(self.model_data_output_path, geometry_name, mesh_name, solution_name)
276
+ if os.path.exists(solution_folder): # If the solution folder exists, add the simulation to the simulationCollection
277
+ sd = SimulationData(self.model_data_output_path, geometry_name, mesh_name, solution_name)
278
+ if sd.simulation_time is not None: # Only add the simulation if it has finished running (and therefore has written the simulation time to a file)
279
+ simulationCollection.append(sd)
280
+ else:
281
+ simulationCollection = [SimulationData(self.model_data_output_path, 'Geometry_'+self.fdm.run.geometry, 'Mesh_'+self.fdm.run.mesh, 'Solution_'+self.fdm.run.solution)]
282
+
283
+ return self.sort_simulationCollection(self.filter_simulationCollection(simulationCollection))
284
+
285
+ def filter_simulationCollection(self, simulationCollection):
286
+ """
287
+ This function is used to filter the simulationCollection based on the filter criterion specified in the yaml input file.
288
+ An example of a filter criterion is '<<solve.source_parameters.sine.frequency>> == 18', which will disregard all simulations with frequency != 18Hz.
289
+ """
290
+ if self.fdm.magnet.postproc.batch_postproc.filter.apply_filter:
291
+ filter_criterion = self.fdm.magnet.postproc.batch_postproc.filter.filter_criterion
292
+ class_params = re.findall('<<(.*?)>>', filter_criterion)
293
+ for cp in class_params:
294
+ filter_criterion = filter_criterion.replace(f"<<{cp}>>", 'sd.'+cp)
295
+ filtering_function = eval(f'lambda sd: {filter_criterion}')
296
+ return list(filter(filtering_function, simulationCollection))
297
+ else:
298
+ return simulationCollection
299
+
300
+ def sort_simulationCollection(self, simulationCollection):
301
+ """
302
+ This function is used to sort the simulationCollection based on the sort key specified in the yaml input file.
303
+ An example of a sort key is 'sd.fdm.solve.source_parameters.sine.frequency', which will sort the simulations based on frequency.
304
+ """
305
+ if self.fdm.magnet.postproc.batch_postproc.sort.apply_sort:
306
+ sorting_function = eval(f'lambda sd: sd.{self.fdm.magnet.postproc.batch_postproc.sort.sort_key}')
307
+ return sorted(simulationCollection, key=sorting_function)
308
+ else:
309
+ return simulationCollection
310
+
311
+ def lossMap_createGridData(self, lossType = 'TotalLoss', x_val_to_include = None, y_val_to_include = None):
312
+ """
313
+ This function creates the grid data needed for the loss map, based on the yaml input file.
314
+ Given a collection of simulations it interpolates the loss data between the datapoints to a grid and returns the grid data.
315
+ """
316
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
317
+
318
+ # Extract data from simulation collection and normalize
319
+ x_arr = np.array([eval('sd.'+lm.x_val)/lm.x_norm for sd in self.simulation_collection])
320
+ y_arr = np.array([eval('sd.'+lm.y_val)/lm.y_norm for sd in self.simulation_collection])
321
+ loss = np.array([sd.total_power_per_cycle[lossType]/lm.loss_norm for sd in self.simulation_collection])
322
+
323
+ # Logarithmic scaling
324
+ if lm.x_log: x_arr = np.log10(x_arr)
325
+ if lm.y_log: y_arr = np.log10(y_arr)
326
+ if lm.loss_log: loss = np.log10(loss)
327
+
328
+ x_arr_interpolated = np.linspace(min(x_arr), max(x_arr), lm.x_steps)
329
+ y_arr_interpolated = np.linspace(min(y_arr), max(y_arr), lm.y_steps)
330
+ # Insert specific values to the grid if they are not already included (useful for cross sections)
331
+ if x_val_to_include is not None and x_val_to_include not in x_arr_interpolated:
332
+ x_arr_interpolated = np.insert(x_arr_interpolated, np.where(x_arr_interpolated > x_val_to_include)[0][0], x_val_to_include)
333
+ if y_val_to_include is not None and y_val_to_include not in y_arr_interpolated:
334
+ y_arr_interpolated = np.insert(y_arr_interpolated, np.where(y_arr_interpolated > y_val_to_include)[0][0], y_val_to_include)
335
+
336
+ # Create grid
337
+ X, Y = np.meshgrid(x_arr_interpolated, y_arr_interpolated, indexing='ij')
338
+ gridPoints = np.c_[X.ravel(), Y.ravel()]
339
+ dataPoints = np.c_[x_arr, y_arr]
340
+
341
+ # Interpolate the simulation data onto the grid
342
+ V = interpolate.griddata(
343
+ dataPoints,
344
+ loss,
345
+ gridPoints,
346
+ method='linear' # Cubic produces cleaner plots. Any incentive to go back to linear?
347
+ ).reshape(X.shape)
348
+
349
+ return X, Y, V, dataPoints
350
+
351
+ def save_lossMap_gridData(self, save_folder_name = 'lossMap_gridData'):
352
+ """
353
+ This function calls the lossMap_createGridData function and saves the grid data.
354
+ """
355
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
356
+
357
+ lossTypes = ['TotalLoss', 'FilamentLoss', 'EddyLoss', 'CouplingLoss', 'CouplingLoss_dyn', 'TotalLoss_dyn'] # Only in the case dynamic correction is used, must be changed later
358
+ # 1) Create a folder to store the output files
359
+ gridData_folder_path = create_non_overwriting_filepath(self.outputs_folder_path, save_folder_name, '', self.fdm.run.overwrite)
360
+ if not os.path.exists(gridData_folder_path): os.makedirs(gridData_folder_path)
361
+ # 2) Create the grid data for each loss type and save it
362
+ for lossType in lossTypes:
363
+ X, Y, V, _ = self.lossMap_createGridData(lossType)
364
+ if lm.x_log: X = np.power(10, X)
365
+ if lm.y_log: Y = np.power(10, Y)
366
+ if lm.loss_log: V = np.power(10, V)
367
+ np.savetxt(os.path.join(gridData_folder_path, f'{lossType}.txt'), np.column_stack((X.ravel(), Y.ravel(), V.ravel())), delimiter=' ', header=f'{lm.x_val} {lm.y_val} {lossType}', comments='')
368
+
369
+ def load_lossMap_gridData(self, lossType = 'TotalLoss', save_folder_name = 'lossMap_gridData'):
370
+ """
371
+ This function loads the grid data for a given loss type.
372
+ """
373
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
374
+ gridData_folder_path = os.path.join(self.inputs_folder_path, save_folder_name)
375
+
376
+ if not os.path.exists(gridData_folder_path):
377
+ raise FileNotFoundError(f'The folder {gridData_folder_path} does not exist.')
378
+
379
+ X, Y, V = np.loadtxt(os.path.join(gridData_folder_path, f'{lossType}.txt'), unpack=True, skiprows=1)
380
+
381
+ if lm.x_log: X = np.log10(X)
382
+ if lm.y_log: Y = np.log10(Y)
383
+ if lm.loss_log: V = np.log10(V)
384
+
385
+ # Get the unique counts of X and Y
386
+ unique_X = np.unique(X)
387
+ unique_Y = np.unique(Y)
388
+
389
+ # Reshape the data
390
+ X = X.reshape((len(unique_X), len(unique_Y)))
391
+ Y = Y.reshape((len(unique_X), len(unique_Y)))
392
+ V = V.reshape((len(unique_X), len(unique_Y)))
393
+
394
+ return X, Y, V
395
+
396
+ # def add_value_to_gridData(self, X, Y, V, x_val = None, y_val = None):
397
+ # """
398
+ # This function adds a value to the grid data.
399
+ # Steps:
400
+ # 1) Revert the grid data to 1D arrays
401
+ # 2) Add x or y value or both to the arrays
402
+ # 3) Reshape the arrays back to grid data, interpolating the loss to the new grid points
403
+ # """
404
+ # gridPoints
405
+
406
+ def save_magnetization(self):
407
+ """
408
+ This function saves the magnetization data for all simulations in the simulation collection.
409
+ """
410
+ magnetization_folder_path = create_non_overwriting_filepath(self.outputs_folder_path, 'magnetization', '', self.fdm.run.overwrite)
411
+ if not os.path.exists(magnetization_folder_path): os.makedirs(magnetization_folder_path)
412
+ for sd in self.simulation_collection:
413
+ magnetization = sd.magn_fil + sd.magn_matrix
414
+ magnetization = np.c_[sd.time, magnetization]
415
+ np.savetxt(os.path.join(magnetization_folder_path, f'magn_f{sd.solve.source_parameters.sine.frequency}_b{sd.solve.source_parameters.sine.field_amplitude}_I{sd.solve.source_parameters.sine.current_amplitude}.txt'), magnetization, delimiter=' ', header='t x y z', comments='')
416
+
417
+
418
+
419
+
420
+
421
+
422
+ def lossMap_crossSection(self, slice_value, axis_to_cut = 'x'):
423
+ """
424
+ This function returns the data corresponding to a cross section of the loss map, for all loss types.
425
+ Given an axis and a value, it sweeps the other axis for the closest value and returns the data.
426
+ Example: Given slice value 0 and axis x, it returns the data for the cross section at x = 0.
427
+ """
428
+
429
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
430
+ if axis_to_cut == 'x':
431
+ x_val_to_include = slice_value
432
+ y_val_to_include = None
433
+ elif axis_to_cut == 'y':
434
+ x_val_to_include = None
435
+ y_val_to_include = slice_value
436
+ X, Y, V, dataPoints = self.lossMap_createGridData('TotalLoss', x_val_to_include, y_val_to_include)
437
+ _,_,FilamentLoss, _ = self.lossMap_createGridData('FilamentLoss', x_val_to_include, y_val_to_include)
438
+ _,_,EddyLoss, _ = self.lossMap_createGridData('EddyLoss', x_val_to_include, y_val_to_include)
439
+ _,_,CouplingLoss, _ = self.lossMap_createGridData('CouplingLoss', x_val_to_include, y_val_to_include)
440
+
441
+
442
+ if axis_to_cut == 'x':
443
+ index = np.abs(X[:, 0] - slice_value).argmin()
444
+ slice_vals = Y[index, :]
445
+
446
+ elif axis_to_cut == 'y':
447
+ index = np.abs(Y[0, :] - slice_value).argmin()
448
+ slice_vals = X[:, index]
449
+
450
+ # Extract the loss values for the constant frequency across all applied fields
451
+ totalLoss = V[index, :] if axis_to_cut == 'x' else V[:, index]
452
+ filamentLoss = FilamentLoss[index, :] if axis_to_cut == 'x' else FilamentLoss[:, index]
453
+ eddyLoss = EddyLoss[index, :] if axis_to_cut == 'x' else EddyLoss[:, index]
454
+ couplingLoss = CouplingLoss[index, :] if axis_to_cut == 'x' else CouplingLoss[:, index]
455
+
456
+ return slice_vals, totalLoss, filamentLoss, eddyLoss, couplingLoss
457
+
458
+ def plot_lossMap_crossSection(self):
459
+ """
460
+ This function calls the lossMap_crossSection function and plots the data it returns, which is the loss for all values of one axis, given a constant value of the other axis.
461
+ """
462
+
463
+
464
+ plt.rcParams['text.usetex'] = True
465
+ plt.rcParams['font.family'] = 'times'
466
+ plt.rcParams['font.size'] = 20
467
+
468
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
469
+ slice_value = lm.cross_section.cut_value
470
+ axis_to_cut = lm.cross_section.axis_to_cut
471
+
472
+ if (lm.x_log and axis_to_cut == 'x') or (lm.y_log and axis_to_cut == 'y'):
473
+ slice_value = np.log10(slice_value)
474
+
475
+ slice_vals, totalLoss, filamentLoss, eddyLoss, couplingLoss = self.lossMap_crossSection(slice_value, axis_to_cut = axis_to_cut)
476
+
477
+ def log_formatter(x, pos):
478
+ """
479
+ Format the tick labels on the plot.
480
+ """
481
+ return f"$10^{{{int(x)}}}$"
482
+
483
+ # Plot the loss with respect to applied field for the constant frequency
484
+ fig, ax = plt.subplots(figsize=(8, 6))
485
+ ax.plot(slice_vals, totalLoss, label=f'Total Loss')
486
+ ax.plot(slice_vals, filamentLoss, label=f'Filament Loss')
487
+ ax.plot(slice_vals, eddyLoss, label=f'Eddy Loss')
488
+ ax.plot(slice_vals, couplingLoss, label=f'Coupling Loss')
489
+
490
+ tick_formatter = FuncFormatter(log_formatter)
491
+ if lm.x_log and axis_to_cut == 'y' or lm.y_log and axis_to_cut == 'x':
492
+ ax.xaxis.set_major_formatter(tick_formatter)
493
+ if lm.loss_log:
494
+ ax.yaxis.set_major_formatter(tick_formatter)
495
+
496
+
497
+ title = lm.cross_section.title.replace('<<cut_value>>', str(round(10**slice_value, 3)))
498
+ ax.set_title(title)
499
+ ax.set_xlabel(lm.ylabel if axis_to_cut == 'x' else lm.xlabel)
500
+ ax.set_ylabel(lm.cross_section.ylabel)
501
+ ax.legend()
502
+
503
+ # np.savetxt(os.path.join(self.outputs_folder_path, 'lossMaps_cut_0p2T_0A.txt'), np.column_stack((10**slice_vals, 10**totalLoss, 10**eddyLoss, 10**couplingLoss, 10**filamentLoss)), delimiter=' ', header='f total eddy coupling filament', comments='')
504
+
505
+ if lm.cross_section.save_plot:
506
+ filePath = create_non_overwriting_filepath(self.outputs_folder_path, lm.cross_section.filename, '.png', self.fdm.run.overwrite)
507
+ plt.savefig(filePath)
508
+
509
+ if self.fdm.run.launch_gui: plt.show()
510
+
511
+ def animate_lossMap_crossSection(self):
512
+ """
513
+ This function is similar to the plot_lossMap_crossSection function, but instead of plotting the loss for at a constant crossection,
514
+ it sweeps the crossection over a chosen axis and plots the loss for each crossection as an animation.
515
+ """
516
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
517
+ axis = lm.cross_section_sweep.axis_to_sweep
518
+
519
+ X, Y, V, dataPoints = self.lossMap_createGridData('TotalLoss')
520
+ x_vals = X[:, 0] # x-values from the loss map
521
+ y_vals = Y[0, :] # y-values from the loss map
522
+
523
+ if axis == 'x':
524
+ A = np.zeros((lm.y_steps, 4, lm.x_steps))
525
+ axis_to_sweep = x_vals
526
+ constant_axis = y_vals
527
+ elif axis == 'y':
528
+ A = np.zeros((lm.x_steps, 4, lm.y_steps))
529
+ axis_to_sweep = y_vals
530
+ constant_axis = x_vals
531
+
532
+
533
+ for i, val in enumerate(axis_to_sweep):
534
+ _, totalLoss, filamentLoss, eddyLoss, couplingLoss = self.lossMap_crossSection(val, axis_to_cut = axis)
535
+ A[:, 0, i] = totalLoss
536
+ A[:, 1, i] = filamentLoss
537
+ A[:, 2, i] = eddyLoss
538
+ A[:, 3, i] = couplingLoss
539
+
540
+ # Initialize the plot
541
+ fig, ax = plt.subplots()
542
+ lines = ax.plot(constant_axis, A[:, :, 0], lw=2, label = ['total Loss', 'filament Loss', 'eddy Loss', 'coupling Loss'])
543
+
544
+ # Set plot limits and labels
545
+ ax.set_xlim(constant_axis[0], constant_axis[-1])
546
+ ax.set_ylim(np.min(A), np.max(A))
547
+ ax.set_xlabel(lm.ylabel if axis == 'x' else lm.xlabel)
548
+ ax.set_ylabel(lm.cross_section_sweep.ylabel)
549
+
550
+
551
+ # Define the animation update function
552
+ def update(frame):
553
+ for i, line in enumerate(lines):
554
+ line.set_ydata(A[:, i, frame])
555
+
556
+ if axis == 'x':
557
+ if lm.x_log:
558
+ sweep_value = 10**x_vals[frame]
559
+ else:
560
+ sweep_value = x_vals[frame]
561
+ elif axis == 'y':
562
+ if lm.y_log:
563
+ sweep_value = 10**y_vals[frame]
564
+ else:
565
+ sweep_value = y_vals[frame]
566
+
567
+ title = lm.cross_section_sweep.title.replace('<<sweep_value>>', str(round(sweep_value, 3)))
568
+ ax.set_title(title)
569
+ return lines,# line1, line2, line3, line4
570
+
571
+ # Create the animation
572
+ dt = 0.1
573
+ ani = FuncAnimation(fig, update, frames=lm.x_steps if axis == 'x' else lm.y_steps, interval=dt*1000, blit=False)
574
+
575
+ # Show the animation
576
+ plt.legend()
577
+ plt.grid()
578
+
579
+ if lm.cross_section_sweep.save_plot:
580
+ filepath = create_non_overwriting_filepath(folder_path=self.outputs_folder_path, base_name=lm.cross_section_sweep.filename, extension='.gif', overwrite=self.fdm.run.overwrite)
581
+ ani.save(filepath, writer='imagemagick', fps=1/dt)
582
+
583
+ if self.fdm.run.launch_gui: plt.show()
584
+
585
+ def create_lossMap(self):
586
+ """
587
+ This function creates a loss map based on the inputs given in the loss_map section of the input file.
588
+ The loss-map can be plotted and saved as a .png file.
589
+ """
590
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
591
+
592
+ if self.simulation_collection:
593
+ X, Y, V, dataPoints = self.lossMap_createGridData(lm.loss_type)
594
+ else:
595
+ X, Y, V = self.totalLoss_gridData
596
+
597
+ plt.rcParams['text.usetex'] = True
598
+ plt.rcParams['font.family'] = 'times'
599
+ plt.rcParams['font.size'] = 20
600
+
601
+ fig, ax = plt.subplots(figsize=(10,8))
602
+
603
+ c = plt.pcolormesh(X, Y, V, shading='gouraud', cmap='plasma_r')
604
+ c_min = min([np.ceil(np.min(V)) for V in [V]])
605
+ c_max = max([np.floor(np.max(V)) for V in [V]])
606
+ c_ticks = [int(val) for val in np.arange(c_min, c_max+1)]
607
+ cont = plt.contour(X,Y,V, c_ticks, colors='k', linestyles='dashed')
608
+
609
+ if lm.show_datapoints:
610
+ plt.scatter(dataPoints[:, 0], dataPoints[:, 1], s=50, edgecolors='k')
611
+
612
+
613
+ if lm.show_loss_type_dominance_contour:
614
+ sigmoid = lambda x: 1/(1+np.exp(-x))
615
+ if self.simulation_collection:
616
+ _, _, FilamentLoss, _ = self.lossMap_createGridData(lossType='FilamentLoss')
617
+ _, _, CouplingLoss, _ = self.lossMap_createGridData(lossType='CouplingLoss')
618
+ _, _, EddyLoss, _ = self.lossMap_createGridData(lossType='EddyLoss')
619
+ # else:
620
+ # _, _, FilamentLoss = self.filamentLoss_gridData
621
+ # _, _, CouplingLoss = self.couplingLoss_gridData
622
+ # _, _, EddyLoss = self.eddyLoss_gridData
623
+ fil_vs_coupling_loss = np.maximum(FilamentLoss, EddyLoss) - CouplingLoss
624
+ fil_vs_eddy_loss = EddyLoss - np.maximum(FilamentLoss, CouplingLoss)
625
+ plt.contour(X,Y,sigmoid(fil_vs_coupling_loss),[0.5], colors='k')
626
+ plt.contour(X,Y,sigmoid(fil_vs_eddy_loss),[0.5], colors='k')
627
+
628
+ cbar = fig.colorbar(c, ticks=c_ticks)#, labels=c_labels)
629
+ # cbar.ax.set_xticks([-7, -6, -5, -4, -3, -2, -1, 0, 1])
630
+ # cbar.ax.set_yticklabels([r'$10^{-7}$', r'$10^{-6}$', r'$10^{-5}$', r'$10^{-4}$', r'$10^{-3}$', r'$10^{-2}$', r'$10^{-1}$', r'$10^0$', r'$10^1$'])
631
+ cbar.ax.set_yticklabels([f"$10^{{{val}}}$" for val in c_ticks])
632
+ # plt.grid(alpha=0.5)
633
+ # plt.title(lm.title)
634
+ # plt.xlabel(lm.xlabel)
635
+ # plt.ylabel(lm.ylabel)
636
+ plt.title(r'Loss per cycle (J/m)')
637
+ plt.xlabel(r'Frequency $f$ (Hz)')
638
+ plt.ylabel(r'Field amplitude $b$ (T)')
639
+
640
+
641
+ # plt.annotate(r'Coupling', (np.log10(1.0), np.log10(0.007)), color='white')
642
+ # plt.annotate(r'Filament', (np.log10(0.012), np.log10(0.74)), color='white')
643
+ # plt.annotate(r'(uncoupled)', (np.log10(0.012), np.log10(0.55)), color='white')
644
+ # plt.annotate(r'Filament', (np.log10(45), np.log10(0.38)), color='white')
645
+ # plt.annotate(r'(coupled)', (np.log10(45), np.log10(0.28)), color='white')
646
+ # plt.annotate(r'Eddy', (np.log10(2000), np.log10(0.03)), color='white')
647
+
648
+ # ax.plot(np.log10(0.03), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(0.03), np.log10(0.12)), arrowprops=dict(facecolor='black', shrink=0.02))
649
+ # ax.plot(np.log10(30), np.log10(1), 'o', color='white')#, xytext=(np.log10(40), np.log10(0.8)), arrowprops=dict(facecolor='black', shrink=0.02))
650
+ # ax.plot(np.log10(3), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(2), np.log10(0.2)), arrowprops=dict(facecolor='black', shrink=0.02))
651
+ # ax.plot(np.log10(5000), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(5000), np.log10(0.1)), arrowprops=dict(facecolor='black', shrink=0.02))
652
+
653
+ # ax.annotate('(a)', xy=(np.log10(0.03), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
654
+ # ax.annotate('(b)', xy=(np.log10(3), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
655
+ # ax.annotate('(c)', xy=(np.log10(30), np.log10(1)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
656
+ # ax.annotate('(d)', xy=(np.log10(5000), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
657
+
658
+ # Define custom tick labels for x-axis
659
+ x_min_log = int(np.log10(min([eval('sd.'+lm.x_val) for sd in self.simulation_collection])))
660
+ x_max_log = int(np.log10(max([eval('sd.'+lm.x_val) for sd in self.simulation_collection])))
661
+ x = np.arange(x_min_log, x_max_log+1)
662
+ # Create a list of minor ticks
663
+ minor_x_labels = []
664
+ # 1) Add the ticks from x_min_log to ceil(x_min_log) to the minor_x_test list
665
+ new_ticks = np.linspace(10.0**np.floor(x_min_log), 10.0**np.ceil(x_min_log), 10)[:-1]
666
+ new_ticks = np.unique(new_ticks[new_ticks >= 10.0**x_min_log])
667
+ minor_x_labels.extend(new_ticks)
668
+ # 2) Add the ticks from ceil(x_min_log) to floor(x_max_log) to the minor_x_test list
669
+ for x_val in x:
670
+ new_ticks = np.linspace(10.0**x_val, 10.0**(x_val+1), 10)[1:-1]
671
+ if x_val == x[-1]:
672
+ new_ticks = new_ticks[new_ticks <= 10.0**x_max_log]
673
+ minor_x_labels.extend(new_ticks)
674
+ minor_x = [np.log10(val) for val in minor_x_labels]
675
+
676
+ new_x_labels = [f"$10^{{{val}}}$" for val in x]
677
+ plt.xticks(x, new_x_labels)
678
+ plt.xticks(minor_x, minor=True)
679
+
680
+ # Define custom tick labels for y-axis
681
+ y_min_log = np.log10(min([eval('sd.'+lm.y_val) for sd in self.simulation_collection]))
682
+ y_max_log = np.log10(max([eval('sd.'+lm.y_val) for sd in self.simulation_collection]))
683
+ y = np.arange(np.ceil(y_min_log), np.floor(y_max_log)+1)
684
+ # Create a list of minor ticks
685
+ minor_y_labels = []
686
+ # 1) Add the ticks from y_min_log to ceil(y_min_log) to the minor_y_test list
687
+ new_ticks = np.linspace(10.0**np.floor(y_min_log), 10.0**np.ceil(y_min_log), 10)[:-1]
688
+ new_ticks = np.unique(new_ticks[new_ticks >= 10.0**y_min_log])
689
+ minor_y_labels.extend(new_ticks)
690
+ # 2) Add the ticks from ceil(y_min_log) to floor(y_max_log) to the minor_y_test list
691
+ for y_val in y:
692
+ new_ticks = np.linspace(10.0**y_val, 10.0**(y_val+1), 10)[1:-1]
693
+ if y_val == y[-1]:
694
+ new_ticks = new_ticks[new_ticks <= 10.0**y_max_log]
695
+ minor_y_labels.extend(new_ticks)
696
+
697
+ new_y_labels = [f"$10^{{{int(val)}}}$" for val in y]
698
+ minor_y = [np.log10(val) for val in minor_y_labels]
699
+ plt.yticks(y, new_y_labels)
700
+ plt.yticks(minor_y, minor=True)
701
+
702
+ # plt.savefig('C:/Users/jdular/cernbox/Documents/Reports/CERN_Reports/linkedFluxPaper/fig/loss_map_54fil_noI.pdf', bbox_inches='tight')
703
+
704
+
705
+ if lm.save_plot:
706
+ filePath = create_non_overwriting_filepath(self.outputs_folder_path, lm.filename, '.pdf', self.fdm.run.overwrite)
707
+ plt.savefig(filePath, bbox_inches='tight')
708
+
709
+ if self.fdm.run.launch_gui: plt.show()
710
+
711
+
712
+ def plot2d(self):
713
+ """
714
+ This function is used to create a 2d plot. It is supposed to be flexible and work for various kinds of plots one may want to create.
715
+ """
716
+ plt.rcParams['text.usetex'] = True
717
+ plt.rcParams['font.family'] = 'times'
718
+ # plt.rcParams['font.size'] = 20
719
+
720
+ # Create the title (or titles if combined_plot is False)
721
+ title = self.fdm.magnet.postproc.batch_postproc.plot2d.title
722
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot:
723
+ sd = self.simulation_collection[0]
724
+ commands = re.findall('<<(.*?)>>', title)
725
+ for c in commands:
726
+ title = title.replace(f"<<{c}>>", str(eval('sd.'+c)))
727
+ else:
728
+ titles = []
729
+ for sd in self.simulation_collection:
730
+ commands = re.findall('<<(.*?)>>', title)
731
+ title_i = title
732
+ for c in commands:
733
+ title_i = title_i.replace(f"<<{c}>>", str(eval('sd.'+c)))
734
+ titles.append(title_i)
735
+
736
+ # Create the labels
737
+ label_list = self.fdm.magnet.postproc.batch_postproc.plot2d.labels
738
+ labels = np.zeros((len(self.simulation_collection), len(label_list)), dtype=object)
739
+ for i, sd in enumerate(self.simulation_collection):
740
+ simulation_labels = []
741
+ for l in label_list:
742
+ commands = re.findall('<<(.*?)>>', l)
743
+ for c in commands:
744
+ l = l.replace(f"<<{c}>>", str(eval('sd.'+c)))
745
+
746
+ simulation_labels.append(l)
747
+ labels[i, :] = simulation_labels
748
+
749
+ colors = plt.cm.get_cmap('magma').resampled(len(self.simulation_collection)).colors
750
+
751
+ # Load the x-values:
752
+ x_val = self.fdm.magnet.postproc.batch_postproc.plot2d.x_val
753
+ commands = re.findall('<<(.*?)>>', x_val)
754
+ for c in commands:
755
+ x_val = x_val.replace(f"<<{c}>>", str('sd.'+c))
756
+ x_arr = np.array([eval(x_val) for sd in self.simulation_collection], dtype=object)
757
+
758
+ # Load the y-values:
759
+ y_vals = self.fdm.magnet.postproc.batch_postproc.plot2d.y_vals
760
+ y_arr = np.zeros((len(self.simulation_collection), len(y_vals)), dtype=object)
761
+ for i, sd in enumerate(self.simulation_collection):
762
+ for j, y_val in enumerate(y_vals):
763
+ commands = re.findall('<<(.*?)>>', y_val)
764
+ for c in commands:
765
+ y_val = y_val.replace(f"<<{c}>>", str('sd.'+c))
766
+ y_arr[i, j] = eval(y_val)
767
+
768
+ # data = np.column_stack((x_arr, y_arr))
769
+ # np.savetxt(os.path.join(self.outputs_folder_path, self.fdm.magnet.postproc.batch_postproc.plot2d.filename+'.txt'), data, delimiter=' ', header='f total eddy coupling filament', comments='')
770
+
771
+
772
+
773
+
774
+ # Plot and save the data:
775
+ if not self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
776
+ # Create a folder to save the plots if combined_plot is False and save_plot is True:
777
+ filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
778
+ folder_path = create_non_overwriting_filepath(self.outputs_folder_path, filename, '', self.fdm.run.overwrite)
779
+ if not os.path.exists(folder_path): os.makedirs(folder_path)
780
+
781
+ # Check if the y-values are all floats:
782
+ y_is_float = np.all(np.apply_along_axis(lambda arr: np.all(np.vectorize(isinstance)(arr, float)), axis=1, arr=y_arr))
783
+ # If they are all floats, we can make a single plot, spanning all simulations, instead of one plot per simulation.
784
+ if y_is_float:
785
+ for column in range(y_arr.shape[1]):
786
+ plt.plot(x_arr, y_arr[:, column], label=label_list[column])
787
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.legend:
788
+ plt.legend()
789
+ plt.grid()
790
+ plt.xlabel(self.fdm.magnet.postproc.batch_postproc.plot2d.xlabel)
791
+ plt.ylabel(self.fdm.magnet.postproc.batch_postproc.plot2d.ylabel)
792
+ plt.title(title)
793
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.x_log:
794
+ plt.xscale('log')
795
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.y_log:
796
+ plt.yscale('log')
797
+
798
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
799
+ filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
800
+ filePath = create_non_overwriting_filepath(self.outputs_folder_path, filename, '.png', self.fdm.run.overwrite)
801
+ plt.savefig(filePath)
802
+
803
+
804
+ else:
805
+ for i, (x, y_vals_per_sim, labels_per_sim, color) in enumerate(zip(x_arr, y_arr, labels, colors)):
806
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot:
807
+ # If combined_plot is true, plot all the data in the same figure:
808
+ # x_sin = 2*np.sin(2*np.pi*x/x.iloc[-1]) #temporary for missing hs_val
809
+ for y, label in zip(y_vals_per_sim, labels_per_sim):
810
+ # plt.plot(x_sin, x_sin+1.6*y, self.fdm.magnet.postproc.batch_postproc.plot2d.linestyle, label=label, color = color) #temporary for missing hs_val
811
+ plt.plot(x, y, self.fdm.magnet.postproc.batch_postproc.plot2d.linestyle, label=label, color = color)
812
+ else:
813
+ # If combined_plot is false, plot data from each simulation in a separate figure:
814
+ plt.figure()
815
+ for y, label in zip(y_vals_per_sim, labels_per_sim):
816
+ plt.plot(x, y, self.fdm.magnet.postproc.batch_postproc.plot2d.linestyle, label=label)
817
+
818
+ # Set the plot options
819
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.legend:
820
+ plt.legend()
821
+ plt.grid()
822
+ plt.xlabel(self.fdm.magnet.postproc.batch_postproc.plot2d.xlabel)
823
+ plt.ylabel(self.fdm.magnet.postproc.batch_postproc.plot2d.ylabel)
824
+ plt.title(title if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot else titles[i])
825
+
826
+
827
+ if not self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
828
+ # If combined_plot is false we expect a lot of plots and save them all into a folder:
829
+ filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
830
+ plt.savefig(os.path.join(folder_path, filename+f"_{i}.png"))
831
+
832
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
833
+ # If combined_plot is true we expect only one plot and save it in the main folder:
834
+ filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
835
+ filePath = create_non_overwriting_filepath(self.outputs_folder_path, filename, '.png', self.fdm.run.overwrite)
836
+ plt.savefig(filePath, dpi=300)
837
+
838
+ if self.fdm.run.launch_gui: plt.show()
839
+
840
+