fiqus 2025.2.0__py3-none-any.whl → 2025.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. fiqus/MainFiQuS.py +4 -9
  2. fiqus/data/DataConductor.py +350 -301
  3. fiqus/data/DataFiQuS.py +42 -115
  4. fiqus/data/DataFiQuSCCT.py +150 -150
  5. fiqus/data/DataFiQuSConductor.py +97 -84
  6. fiqus/data/DataFiQuSConductorAC_Strand.py +701 -565
  7. fiqus/data/DataModelCommon.py +439 -0
  8. fiqus/data/DataMultipole.py +0 -13
  9. fiqus/data/DataRoxieParser.py +7 -0
  10. fiqus/data/DataWindingsCCT.py +37 -37
  11. fiqus/data/RegionsModelFiQuS.py +61 -104
  12. fiqus/geom_generators/GeometryCCT.py +904 -905
  13. fiqus/geom_generators/GeometryConductorAC_Strand.py +1863 -1391
  14. fiqus/geom_generators/GeometryMultipole.py +5 -4
  15. fiqus/geom_generators/GeometryPancake3D.py +1 -1
  16. fiqus/getdp_runners/RunGetdpCCT.py +13 -4
  17. fiqus/getdp_runners/RunGetdpConductorAC_Strand.py +341 -201
  18. fiqus/getdp_runners/RunGetdpPancake3D.py +2 -2
  19. fiqus/mains/MainConductorAC_Strand.py +141 -133
  20. fiqus/mains/MainMultipole.py +6 -5
  21. fiqus/mains/MainPancake3D.py +3 -4
  22. fiqus/mesh_generators/MeshCCT.py +209 -209
  23. fiqus/mesh_generators/MeshConductorAC_Strand.py +709 -656
  24. fiqus/mesh_generators/MeshMultipole.py +43 -46
  25. fiqus/parsers/ParserDAT.py +16 -16
  26. fiqus/parsers/ParserGetDPOnSection.py +212 -212
  27. fiqus/parsers/ParserGetDPTimeTable.py +134 -134
  28. fiqus/parsers/ParserMSH.py +53 -53
  29. fiqus/parsers/ParserPOS.py +214 -214
  30. fiqus/parsers/ParserRES.py +142 -142
  31. fiqus/plotters/PlotPythonCCT.py +133 -133
  32. fiqus/plotters/PlotPythonConductorAC.py +1079 -855
  33. fiqus/plotters/PlotPythonMultipole.py +18 -18
  34. fiqus/post_processors/PostProcessCCT.py +444 -440
  35. fiqus/post_processors/PostProcessConductorAC.py +997 -49
  36. fiqus/post_processors/PostProcessMultipole.py +19 -19
  37. fiqus/pre_processors/PreProcessCCT.py +175 -175
  38. fiqus/pro_material_functions/ironBHcurves.pro +246 -246
  39. fiqus/pro_templates/combined/CCT_template.pro +275 -274
  40. fiqus/pro_templates/combined/ConductorAC_template.pro +1474 -1025
  41. fiqus/pro_templates/combined/Multipole_template.pro +5 -5
  42. fiqus/utils/Utils.py +12 -7
  43. {fiqus-2025.2.0.dist-info → fiqus-2025.10.0.dist-info}/METADATA +65 -63
  44. fiqus-2025.10.0.dist-info/RECORD +86 -0
  45. {fiqus-2025.2.0.dist-info → fiqus-2025.10.0.dist-info}/WHEEL +1 -1
  46. tests/test_geometry_generators.py +4 -0
  47. tests/test_mesh_generators.py +5 -0
  48. tests/test_solvers.py +41 -4
  49. tests/utils/fiqus_test_classes.py +15 -6
  50. tests/utils/generate_reference_files_ConductorAC.py +57 -57
  51. tests/utils/helpers.py +97 -97
  52. fiqus-2025.2.0.dist-info/RECORD +0 -85
  53. {fiqus-2025.2.0.dist-info → fiqus-2025.10.0.dist-info}/LICENSE.txt +0 -0
  54. {fiqus-2025.2.0.dist-info → fiqus-2025.10.0.dist-info}/top_level.txt +0 -0
@@ -1,855 +1,1079 @@
1
- import os
2
- import numpy as np
3
- import re
4
- import matplotlib.pyplot as plt
5
- from matplotlib.ticker import FuncFormatter
6
- from matplotlib.animation import FuncAnimation
7
- import pandas as pd
8
- from scipy import integrate, interpolate
9
- from ruamel.yaml import YAML
10
- import subprocess
11
-
12
- # from fiqus.utils.Utils import FilesAndFolders as Util
13
- # from fiqus.data.DataFiQuSConductorAC_Strand import CACStrandSolve, CACStrandPostproc, CACStrandMesh, CACStrandGeometry
14
-
15
- def create_non_overwriting_filepath(folder_path, base_name, extension, overwrite):
16
- """
17
- Creates a filepath that does not overwrite any existing files.
18
-
19
- This function checks if a file already exists at the specified filepath. If the file exists and `overwrite` is False,
20
- it modifies the filepath to create a new file instead of overwriting the existing one.
21
- If `overwrite` is True or the file does not exist, it returns the filepath as it is.
22
-
23
- Parameters
24
- ----------
25
- folder_path : str
26
- The path to the folder where the file will be created.
27
- base_name : str
28
- The base name of the file.
29
- extension : str
30
- The extension of the file.
31
- overwrite : bool, optional
32
- If True, the function will overwrite an existing file. If False, the function will modify the filepath to avoid overwriting. Defaults to False.
33
-
34
- Returns
35
- -------
36
- str
37
- The final filepath. If `overwrite` is False and a file already exists at the original filepath, this will be a new filepath that does not overwrite any existing files.
38
- """
39
- if os.path.exists(os.path.join(folder_path, base_name+extension)) and not overwrite:
40
- counter = 1
41
- new_name = base_name + f"_{counter}" + extension
42
- while os.path.exists(os.path.join(folder_path, new_name)):
43
- new_name = base_name + f"_{counter}" + extension
44
- counter += 1
45
- return os.path.join(folder_path, new_name)
46
-
47
- return os.path.join(folder_path, base_name+extension)
48
-
49
- class YamlWrapper:
50
- """
51
- A wrapper class for YAML data that allows accessing dictionary data using dot notation.
52
- """
53
- def __init__(self, data):
54
- for key, value in data.items():
55
- if isinstance(value, dict):
56
- value = YamlWrapper(value)
57
- self.__dict__[key] = value
58
-
59
- def __getattr__(self, item):
60
- return self.__dict__.get(item)
61
-
62
- def __setattr__(self, key, value):
63
- self.__dict__[key] = value
64
-
65
- def load_yaml(file_path):
66
- """
67
- Load a YAML file and return the data as a YamlWrapper object. This enables accessing the data using dot notation (e.g., data.key.subkey), without needing a predefined pydantic model, allowing for better backwards compatibility.
68
- """
69
- yaml = YAML()
70
- with open(file_path, 'r') as file:
71
- data = yaml.load(file)
72
- return YamlWrapper(data)
73
-
74
- def is_latex_installed():
75
- """
76
- Check if LaTeX is installed on the system.
77
- """
78
- try:
79
- subprocess.run(['pdflatex', '--version'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
80
- return True
81
- except subprocess.CalledProcessError:
82
- return False
83
- except FileNotFoundError:
84
- return False
85
-
86
- class SimulationData:
87
- """
88
- Class used to store and manage data from a single simulation.
89
-
90
- This class is responsible for loading and organizing the data from a single simulation.
91
- It stores the data in various attributes and provides methods for retrieving and processing the data.
92
-
93
- """
94
- def __init__(self, model_data_output_path, geometry_name, mesh_name, solution_name) -> None:
95
- self.model_data_output_path = model_data_output_path # This is the path to the folder where the model output data is stored (e.g. geometries)
96
- self.geometry_name = geometry_name # Name of the geometry folder
97
- self.mesh_name = mesh_name # Name of the mesh folder
98
- self.solution_name = solution_name # Name of the solution folder
99
-
100
- # Organize the folders:
101
- self.geometry_folder = os.path.join(self.model_data_output_path, geometry_name) # Path to the geometry folder
102
- self.mesh_folder = os.path.join(self.geometry_folder, mesh_name) # Path to the mesh folder
103
- self.solution_folder = os.path.join(self.mesh_folder, solution_name) # Path to the solution folder
104
-
105
- # Store the YAML input-files in a data model, fdm:
106
- self.geometry, self.mesh, self.solve = self.retrieve_fiqusDataModel()
107
-
108
- # Store losses, simulation time and check if the simulation crashed:
109
- temp_file_path = os.path.join(self.solution_folder, 'test_temporary')
110
- loss_file = [f for f in os.listdir(temp_file_path) if f.startswith('power') and f.endswith('.txt')][0]
111
- self.power_columns = ['Time', 'FilamentLoss', 'CouplingLoss', 'EddyLoss', 'TotalLoss', 'CouplingLoss_dyn', 'TotalLoss_dyn'] # Only in the case dynamic correction is used, must be changed later
112
- self.power = pd.read_csv(os.path.join(self.solution_folder, 'test_temporary', loss_file), sep = ' ', names=self.power_columns) # Store instantaneous losses as pandas dataframe
113
- self.crash = True if 'crash_report.txt' in os.listdir(temp_file_path) else False
114
-
115
- # Add a row of zeros at the beginning of the dataframe to account for the initial condition:
116
- self.power = pd.concat([pd.DataFrame({col: 0 for col in self.power_columns}, index=[0]), self.power]).reset_index(drop=True)
117
- # Integrate the losses to obtain the cumulative power and the total power per cycle:
118
- self.cumulative_power, self.total_power_per_cycle = self.integrate_power() # Store cumulative power and total cumulative power per cycle
119
- # Store simulation time:
120
- try:
121
- with open(os.path.join(self.solution_folder, 'test_temporary', 'simulation_time.txt'), 'r') as f:
122
- self.simulation_time = float(f.readline().strip())
123
- except:
124
- self.simulation_time = None # If the simulation time file does not exist, the simulation has not finished running.
125
-
126
- # Store the rest of the post-processing data:
127
- self.time = self.power['Time']
128
- self.instantaneous_temperature = self.load_standard_data(os.path.join(temp_file_path, 'temperature.txt'), 1, add_initial_zero=True)
129
- self.temperature = self.load_standard_data(os.path.join(temp_file_path, 'temperature.txt'), 2, add_initial_zero=True)
130
- self.I_transport = self.load_standard_data(os.path.join(temp_file_path, 'I_transport.txt'), 1)
131
- self.V_transport = self.load_standard_data(os.path.join(temp_file_path, 'V_transport.txt'), 1)
132
- self.hs_val = self.load_standard_data(os.path.join(temp_file_path, 'hs_val.txt'), 1)
133
- self.magn_fil = self.load_standard_data(os.path.join(temp_file_path, 'magn_fil.txt'), [1, 2, 3])
134
- self.magn_matrix = self.load_standard_data(os.path.join(temp_file_path, 'magn_matrix.txt'), [1, 2, 3])
135
- self.I = self.load_standard_data(os.path.join(temp_file_path, 'I.txt'), 1, len(self.time))
136
- self.V = self.load_standard_data(os.path.join(temp_file_path, 'V.txt'), 1, len(self.time))
137
- self.I_integral = self.load_standard_data(os.path.join(temp_file_path, 'I_integral.txt'), 1, len(self.time))
138
- self.I_abs_integral = self.load_standard_data(os.path.join(temp_file_path, 'I_abs_integral.txt'), 1, len(self.time))
139
- self.magnetic_energy_internal = self.load_standard_data(os.path.join(temp_file_path, 'magnetic_energy_internal.txt'), 1)
140
- self.Ip = self.load_standard_data(os.path.join(temp_file_path, 'Ip.txt'), 1, len(self.time))
141
- self.Vp = self.load_standard_data(os.path.join(temp_file_path, 'Vp.txt'), 1, len(self.time))
142
-
143
-
144
- def load_standard_data(self, file_path, columns, reshape = None, add_initial_zero = False):
145
- """
146
- There are many output .txt-files with similar format. This function loads the data from one of these files and returns it as a numpy array.
147
- If the file does not exist, None is returned without raising an error.
148
- """
149
- try:
150
- data = np.loadtxt(file_path, comments='#', usecols=columns)
151
- if reshape:
152
- data = data.reshape(-1, reshape).T
153
- except IOError:
154
- return None
155
-
156
- if add_initial_zero:
157
- if len(data.shape) == 1:
158
- data = np.insert(data, 0, 0)
159
- else:
160
- zeros = np.zeros((1, data.shape[1]))
161
- data = np.vstack((zeros, data))
162
- return data
163
-
164
- def retrieve_fiqusDataModel(self):
165
- """
166
- This function reads the YAML input-files for geometry, mesh and solve and stores them in three dictionaries which are returned.
167
- This function is to be called only once, when the object is created.
168
- """
169
- geometry_dataModel = load_yaml(os.path.join(self.geometry_folder, 'geometry.yaml'))
170
- mesh_dataModel = load_yaml(os.path.join(self.mesh_folder, 'mesh.yaml'))
171
- solution_dataModel = load_yaml(os.path.join(self.solution_folder, 'solve.yaml'))
172
-
173
- return geometry_dataModel, mesh_dataModel, solution_dataModel
174
-
175
- def integrate_power(self):
176
- """
177
- This function integrates the instantaneous power over time to obtain the cumulative power.
178
- It also calculates the total cumulative power per cycle.
179
- The cumulative power is returned as a pandas dataframe and the total cumulative power per cycle is returned as a dictionary.
180
- """
181
- find_closest_idx = lambda arr, val: np.abs(arr - val).argmin()
182
-
183
- t = np.array(self.power['Time'])
184
- t_final = t[-1]
185
- t_init = find_closest_idx(t, t_final/2)
186
-
187
- cumulative_power = pd.DataFrame(columns= self.power_columns)
188
- total_power_per_cycle = {}
189
-
190
- cumulative_power['Time'] = self.power["Time"]
191
- for column in self.power_columns[1:]:
192
- cumulative_power[column] = np.insert(integrate.cumulative_trapezoid(self.power[column], t), 0, 0)
193
- total_power_per_cycle[column] = 2 * (cumulative_power[column].iloc[-1]-cumulative_power[column].iloc[t_init]) # / (np.pi*matrix_radius**2 * loss_factor) # Why do we divide by pi*matrix_radius**2*loss_factor?
194
-
195
- return cumulative_power, total_power_per_cycle
196
-
197
- def plot_instantaneous_power(self, show:bool = True, title:str = "Power", save_plot:bool = False, save_folder_path:str = None, save_file_name:str = None, overwrite:bool = False):
198
- plt.figure()
199
- plt.plot(self.power['Time'], self.power[self.power_columns[1:]] , label = self.power_columns[1:])
200
- plt.xlabel('Time [s]')
201
- plt.ylabel('Power [W/m]')
202
- plt.legend()
203
-
204
- # Configure title:
205
- # Class attributes can be accessed by using '<< ... >>' in the title string.
206
- commands = re.findall('<<(.*?)>>', title)
207
- for c in commands:
208
- title = title.replace(f"<<{c}>>", str(eval('self.'+c)))
209
- plt.title(title)
210
-
211
-
212
- if save_plot: # Save the plot
213
- filePath = create_non_overwriting_filepath(save_folder_path, save_file_name, '.png', overwrite)
214
- plt.savefig(filePath)
215
-
216
- if show:
217
- plt.show()
218
- else:
219
- plt.close()
220
-
221
- class PlotPython:
222
- """
223
- This class loads and stores the data from the simulations specified in a csv file and can apply various postprocessing operations on the data.
224
- The data from each simulation is saved as a SimulationData object which is subsequently stored in a list in this class.
225
- """
226
- def __init__(self, fdm, csv_filename = None, lossMap_gridData_folder = None, inputs_folder_path='', outputs_folder_path='') -> None:
227
- self.fdm = fdm
228
- self.inputs_folder_path = inputs_folder_path # This is the path to the folder where the input data is stored
229
- self.model_data_output_path = outputs_folder_path # This is the path to the folder where the model output data is stored (e.g. geometries)
230
- self.outputs_folder_path = os.path.join(outputs_folder_path, fdm.magnet.postproc.batch_postproc.output_folder) # This is the path to the folder where the postprocessed data is written
231
-
232
-
233
- if not os.path.exists(self.outputs_folder_path):
234
- os.makedirs(self.outputs_folder_path)
235
-
236
- if csv_filename is not None:
237
- try:
238
- self.input_csv = pd.read_csv(os.path.join(self.inputs_folder_path, f'{csv_filename}.csv')) # Read the csv file with the input data
239
- except:
240
- raise FileNotFoundError(f'No csv file with the name {fdm.magnet.postproc.batch_postproc.postProc_csv}.csv was found in the inputs folder.')
241
-
242
- self.simulation_collection = self.retrieve_simulation_data()
243
-
244
- self.avg_simulation_time = np.mean([sd.simulation_time for sd in self.simulation_collection])
245
- self.total_simulation_time = np.sum([sd.simulation_time for sd in self.simulation_collection])
246
-
247
- print('Number of simulations considered: ', len(self.simulation_collection) )
248
- print('Average simulation time: ', self.avg_simulation_time, 's')
249
- print('Total simulation time: ', self.total_simulation_time, 's')
250
-
251
-
252
-
253
- elif lossMap_gridData_folder is not None:
254
- self.input_csv = None
255
- self.simulation_collection = None
256
-
257
- self.totalLoss_gridData = self.load_lossMap_gridData('TotalLoss', lossMap_gridData_folder)
258
- self.filamentLoss_gridData = self.load_lossMap_gridData('FilamentLoss', lossMap_gridData_folder)
259
- self.eddyLoss_gridData = self.load_lossMap_gridData('EddyLoss', lossMap_gridData_folder)
260
- self.couplingLoss_gridData = self.load_lossMap_gridData('CouplingLoss', lossMap_gridData_folder)
261
- else:
262
- raise ValueError('No input data specified. Either a csv file or a folder with loss map grid data must be provided.')
263
-
264
-
265
-
266
-
267
- def retrieve_simulation_data(self):
268
- """
269
- This function iterates over the input CSV-file (specifying which simulations to postprocess) and returns a list of SimulationData objects
270
- containing all the simulation data. If no CSV-file is specified, the data from the single simulation specified in the input YAML-file is returned.
271
- """
272
- if self.input_csv is not None:
273
- simulationCollection = []
274
- for index, row in self.input_csv.iterrows():
275
- if pd.isna(row['input.run.geometry']) and pd.isna(row['input.run.mesh']) and pd.isna(row['input.run.solution']):
276
- continue
277
- geometry_name = 'Geometry_'+str(row['input.run.geometry'])
278
- mesh_name = 'Mesh_'+str(row['input.run.mesh'])
279
-
280
- if isinstance(row['input.run.solution'], float) and row['input.run.solution'].is_integer():
281
- solution_name = 'Solution_'+str(int(row['input.run.solution']))
282
- else:
283
- solution_name = 'Solution_'+str(row['input.run.solution'])
284
-
285
- # Check if the row refers to a valid simulation by checking if the solution folder exists:
286
- # solution_folder = os.path.join(os.getcwd(), 'tests', '_outputs', self.fdm.general.magnet_name, geometry_name, mesh_name, solution_name)
287
- solution_folder = os.path.join(self.model_data_output_path, geometry_name, mesh_name, solution_name)
288
- if os.path.exists(solution_folder): # If the solution folder exists, add the simulation to the simulationCollection
289
- sd = SimulationData(self.model_data_output_path, geometry_name, mesh_name, solution_name)
290
- if sd.simulation_time is not None: # Only add the simulation if it has finished running (and therefore has written the simulation time to a file)
291
- simulationCollection.append(sd)
292
- else:
293
- simulationCollection = [SimulationData(self.model_data_output_path, 'Geometry_'+self.fdm.run.geometry, 'Mesh_'+self.fdm.run.mesh, 'Solution_'+self.fdm.run.solution)]
294
-
295
- return self.sort_simulationCollection(self.filter_simulationCollection(simulationCollection))
296
-
297
- def filter_simulationCollection(self, simulationCollection):
298
- """
299
- This function is used to filter the simulationCollection based on the filter criterion specified in the yaml input file.
300
- An example of a filter criterion is '<<solve.source_parameters.sine.frequency>> == 18', which will disregard all simulations with frequency != 18Hz.
301
- """
302
- if self.fdm.magnet.postproc.batch_postproc.filter.apply_filter:
303
- filter_criterion = self.fdm.magnet.postproc.batch_postproc.filter.filter_criterion
304
- class_params = re.findall('<<(.*?)>>', filter_criterion)
305
- for cp in class_params:
306
- filter_criterion = filter_criterion.replace(f"<<{cp}>>", 'sd.'+cp)
307
- filtering_function = eval(f'lambda sd: {filter_criterion}')
308
- return list(filter(filtering_function, simulationCollection))
309
- else:
310
- return simulationCollection
311
-
312
- def sort_simulationCollection(self, simulationCollection):
313
- """
314
- This function is used to sort the simulationCollection based on the sort key specified in the yaml input file.
315
- An example of a sort key is 'solve.source_parameters.sine.frequency', which will sort the simulations based on frequency.
316
- """
317
- if self.fdm.magnet.postproc.batch_postproc.sort.apply_sort:
318
- sorting_function = eval(f'lambda sd: sd.{self.fdm.magnet.postproc.batch_postproc.sort.sort_key}')
319
- return sorted(simulationCollection, key=sorting_function)
320
- else:
321
- return simulationCollection
322
-
323
- def lossMap_createGridData(self, lossType = 'TotalLoss', x_val_to_include = None, y_val_to_include = None):
324
- """
325
- This function creates the grid data needed for the loss map, based on the yaml input file.
326
- Given a collection of simulations it interpolates the loss data between the datapoints to a grid and returns the grid data.
327
- """
328
- lm = self.fdm.magnet.postproc.batch_postproc.loss_map
329
-
330
- # Extract data from simulation collection and normalize
331
- x_arr = np.array([eval('sd.'+lm.x_val)/lm.x_norm for sd in self.simulation_collection])
332
- y_arr = np.array([eval('sd.'+lm.y_val)/lm.y_norm for sd in self.simulation_collection])
333
- loss = np.array([sd.total_power_per_cycle[lossType]/lm.loss_norm for sd in self.simulation_collection])
334
-
335
- # Logarithmic scaling
336
- if lm.x_log: x_arr = np.log10(x_arr)
337
- if lm.y_log: y_arr = np.log10(y_arr)
338
- if lm.loss_log: loss = np.log10(loss)
339
-
340
- x_arr_interpolated = np.linspace(min(x_arr), max(x_arr), lm.x_steps)
341
- y_arr_interpolated = np.linspace(min(y_arr), max(y_arr), lm.y_steps)
342
- # Insert specific values to the grid if they are not already included (useful for cross sections)
343
- if x_val_to_include is not None and x_val_to_include not in x_arr_interpolated:
344
- x_arr_interpolated = np.insert(x_arr_interpolated, np.where(x_arr_interpolated > x_val_to_include)[0][0], x_val_to_include)
345
- if y_val_to_include is not None and y_val_to_include not in y_arr_interpolated:
346
- y_arr_interpolated = np.insert(y_arr_interpolated, np.where(y_arr_interpolated > y_val_to_include)[0][0], y_val_to_include)
347
-
348
- # Create grid
349
- X, Y = np.meshgrid(x_arr_interpolated, y_arr_interpolated, indexing='ij')
350
- gridPoints = np.c_[X.ravel(), Y.ravel()]
351
- dataPoints = np.c_[x_arr, y_arr]
352
-
353
- # Interpolate the simulation data onto the grid
354
- V = interpolate.griddata(
355
- dataPoints,
356
- loss,
357
- gridPoints,
358
- method='linear' # Cubic produces cleaner plots. Any incentive to go back to linear?
359
- ).reshape(X.shape)
360
-
361
- return X, Y, V, dataPoints
362
-
363
- def save_lossMap_gridData(self, save_folder_name = 'lossMap_gridData'):
364
- """
365
- This function calls the lossMap_createGridData function and saves the grid data.
366
- """
367
- lm = self.fdm.magnet.postproc.batch_postproc.loss_map
368
-
369
- lossTypes = ['TotalLoss', 'FilamentLoss', 'EddyLoss', 'CouplingLoss', 'CouplingLoss_dyn', 'TotalLoss_dyn'] # Only in the case dynamic correction is used, must be changed later
370
- # 1) Create a folder to store the output files
371
- gridData_folder_path = create_non_overwriting_filepath(self.outputs_folder_path, save_folder_name, '', self.fdm.run.overwrite)
372
- if not os.path.exists(gridData_folder_path): os.makedirs(gridData_folder_path)
373
- # 2) Create the grid data for each loss type and save it
374
- for lossType in lossTypes:
375
- X, Y, V, _ = self.lossMap_createGridData(lossType)
376
- if lm.x_log: X = np.power(10, X)
377
- if lm.y_log: Y = np.power(10, Y)
378
- if lm.loss_log: V = np.power(10, V)
379
- np.savetxt(os.path.join(gridData_folder_path, f'{lossType}.txt'), np.column_stack((X.ravel(), Y.ravel(), V.ravel())), delimiter=' ', header=f'{lm.x_val} {lm.y_val} {lossType}', comments='')
380
-
381
- def load_lossMap_gridData(self, lossType = 'TotalLoss', save_folder_name = 'lossMap_gridData'):
382
- """
383
- This function loads the grid data for a given loss type.
384
- """
385
- lm = self.fdm.magnet.postproc.batch_postproc.loss_map
386
- gridData_folder_path = os.path.join(self.inputs_folder_path, save_folder_name)
387
-
388
- if not os.path.exists(gridData_folder_path):
389
- raise FileNotFoundError(f'The folder {gridData_folder_path} does not exist.')
390
-
391
- X, Y, V = np.loadtxt(os.path.join(gridData_folder_path, f'{lossType}.txt'), unpack=True, skiprows=1)
392
-
393
- if lm.x_log: X = np.log10(X)
394
- if lm.y_log: Y = np.log10(Y)
395
- if lm.loss_log: V = np.log10(V)
396
-
397
- # Get the unique counts of X and Y
398
- unique_X = np.unique(X)
399
- unique_Y = np.unique(Y)
400
-
401
- # Reshape the data
402
- X = X.reshape((len(unique_X), len(unique_Y)))
403
- Y = Y.reshape((len(unique_X), len(unique_Y)))
404
- V = V.reshape((len(unique_X), len(unique_Y)))
405
-
406
- return X, Y, V
407
-
408
- # def add_value_to_gridData(self, X, Y, V, x_val = None, y_val = None):
409
- # """
410
- # This function adds a value to the grid data.
411
- # Steps:
412
- # 1) Revert the grid data to 1D arrays
413
- # 2) Add x or y value or both to the arrays
414
- # 3) Reshape the arrays back to grid data, interpolating the loss to the new grid points
415
- # """
416
- # gridPoints
417
-
418
- def save_magnetization(self):
419
- """
420
- This function saves the magnetization data for all simulations in the simulation collection.
421
- """
422
- magnetization_folder_path = create_non_overwriting_filepath(self.outputs_folder_path, 'magnetization', '', self.fdm.run.overwrite)
423
- if not os.path.exists(magnetization_folder_path): os.makedirs(magnetization_folder_path)
424
- for sd in self.simulation_collection:
425
- magnetization = sd.magn_fil + sd.magn_matrix
426
- magnetization = np.c_[sd.time, magnetization]
427
- np.savetxt(os.path.join(magnetization_folder_path, f'magn_f{sd.solve.source_parameters.sine.frequency}_b{sd.solve.source_parameters.sine.field_amplitude}_I{sd.solve.source_parameters.sine.current_amplitude}.txt'), magnetization, delimiter=' ', header='t x y z', comments='')
428
-
429
-
430
-
431
-
432
-
433
-
434
- def lossMap_crossSection(self, slice_value, axis_to_cut = 'x'):
435
- """
436
- This function returns the data corresponding to a cross section of the loss map, for all loss types.
437
- Given an axis and a value, it sweeps the other axis for the closest value and returns the data.
438
- Example: Given slice value 0 and axis x, it returns the data for the cross section at x = 0.
439
- """
440
-
441
- lm = self.fdm.magnet.postproc.batch_postproc.loss_map
442
- if axis_to_cut == 'x':
443
- x_val_to_include = slice_value
444
- y_val_to_include = None
445
- elif axis_to_cut == 'y':
446
- x_val_to_include = None
447
- y_val_to_include = slice_value
448
- X, Y, V, dataPoints = self.lossMap_createGridData('TotalLoss', x_val_to_include, y_val_to_include)
449
- _,_,FilamentLoss, _ = self.lossMap_createGridData('FilamentLoss', x_val_to_include, y_val_to_include)
450
- _,_,EddyLoss, _ = self.lossMap_createGridData('EddyLoss', x_val_to_include, y_val_to_include)
451
- _,_,CouplingLoss, _ = self.lossMap_createGridData('CouplingLoss', x_val_to_include, y_val_to_include)
452
-
453
-
454
- if axis_to_cut == 'x':
455
- index = np.abs(X[:, 0] - slice_value).argmin()
456
- slice_vals = Y[index, :]
457
-
458
- elif axis_to_cut == 'y':
459
- index = np.abs(Y[0, :] - slice_value).argmin()
460
- slice_vals = X[:, index]
461
-
462
- # Extract the loss values for the constant frequency across all applied fields
463
- totalLoss = V[index, :] if axis_to_cut == 'x' else V[:, index]
464
- filamentLoss = FilamentLoss[index, :] if axis_to_cut == 'x' else FilamentLoss[:, index]
465
- eddyLoss = EddyLoss[index, :] if axis_to_cut == 'x' else EddyLoss[:, index]
466
- couplingLoss = CouplingLoss[index, :] if axis_to_cut == 'x' else CouplingLoss[:, index]
467
-
468
- return slice_vals, totalLoss, filamentLoss, eddyLoss, couplingLoss
469
-
470
- def plot_lossMap_crossSection(self):
471
- """
472
- This function calls the lossMap_crossSection function and plots the data it returns, which is the loss for all values of one axis, given a constant value of the other axis.
473
- """
474
-
475
- if is_latex_installed():
476
- plt.rcParams['text.usetex'] = True
477
- plt.rcParams['font.family'] = 'times'
478
- plt.rcParams['font.size'] = 20
479
-
480
- lm = self.fdm.magnet.postproc.batch_postproc.loss_map
481
- slice_value = lm.cross_section.cut_value
482
- axis_to_cut = lm.cross_section.axis_to_cut
483
-
484
- if (lm.x_log and axis_to_cut == 'x') or (lm.y_log and axis_to_cut == 'y'):
485
- slice_value = np.log10(slice_value)
486
-
487
- slice_vals, totalLoss, filamentLoss, eddyLoss, couplingLoss = self.lossMap_crossSection(slice_value, axis_to_cut = axis_to_cut)
488
-
489
- def log_formatter(x, pos):
490
- """
491
- Format the tick labels on the plot.
492
- """
493
- return f"$10^{{{int(x)}}}$"
494
-
495
- # Plot the loss with respect to applied field for the constant frequency
496
- fig, ax = plt.subplots(figsize=(8, 6))
497
- ax.plot(slice_vals, totalLoss, label=f'Total Loss')
498
- ax.plot(slice_vals, filamentLoss, label=f'Filament Loss')
499
- ax.plot(slice_vals, eddyLoss, label=f'Eddy Loss')
500
- ax.plot(slice_vals, couplingLoss, label=f'Coupling Loss')
501
-
502
- tick_formatter = FuncFormatter(log_formatter)
503
- if lm.x_log and axis_to_cut == 'y' or lm.y_log and axis_to_cut == 'x':
504
- ax.xaxis.set_major_formatter(tick_formatter)
505
- if lm.loss_log:
506
- ax.yaxis.set_major_formatter(tick_formatter)
507
-
508
-
509
- title = lm.cross_section.title.replace('<<cut_value>>', str(round(10**slice_value, 3)))
510
- ax.set_title(title)
511
- ax.set_xlabel(lm.ylabel if axis_to_cut == 'x' else lm.xlabel)
512
- ax.set_ylabel(lm.cross_section.ylabel)
513
- ax.legend()
514
-
515
- # np.savetxt(os.path.join(self.outputs_folder_path, 'lossMaps_cut_0p2T_0A.txt'), np.column_stack((10**slice_vals, 10**totalLoss, 10**eddyLoss, 10**couplingLoss, 10**filamentLoss)), delimiter=' ', header='f total eddy coupling filament', comments='')
516
-
517
- if lm.cross_section.save_plot:
518
- filePath = create_non_overwriting_filepath(self.outputs_folder_path, lm.cross_section.filename, '.png', self.fdm.run.overwrite)
519
- plt.savefig(filePath)
520
-
521
- if self.fdm.run.launch_gui: plt.show()
522
-
523
- def animate_lossMap_crossSection(self):
524
- """
525
- This function is similar to the plot_lossMap_crossSection function, but instead of plotting the loss for at a constant crossection,
526
- it sweeps the crossection over a chosen axis and plots the loss for each crossection as an animation.
527
- """
528
- lm = self.fdm.magnet.postproc.batch_postproc.loss_map
529
- axis = lm.cross_section_sweep.axis_to_sweep
530
-
531
- X, Y, V, dataPoints = self.lossMap_createGridData('TotalLoss')
532
- x_vals = X[:, 0] # x-values from the loss map
533
- y_vals = Y[0, :] # y-values from the loss map
534
-
535
- if axis == 'x':
536
- A = np.zeros((lm.y_steps, 4, lm.x_steps))
537
- axis_to_sweep = x_vals
538
- constant_axis = y_vals
539
- elif axis == 'y':
540
- A = np.zeros((lm.x_steps, 4, lm.y_steps))
541
- axis_to_sweep = y_vals
542
- constant_axis = x_vals
543
-
544
-
545
- for i, val in enumerate(axis_to_sweep):
546
- _, totalLoss, filamentLoss, eddyLoss, couplingLoss = self.lossMap_crossSection(val, axis_to_cut = axis)
547
- A[:, 0, i] = totalLoss
548
- A[:, 1, i] = filamentLoss
549
- A[:, 2, i] = eddyLoss
550
- A[:, 3, i] = couplingLoss
551
-
552
- # Initialize the plot
553
- fig, ax = plt.subplots()
554
- lines = ax.plot(constant_axis, A[:, :, 0], lw=2, label = ['total Loss', 'filament Loss', 'eddy Loss', 'coupling Loss'])
555
-
556
- # Set plot limits and labels
557
- ax.set_xlim(constant_axis[0], constant_axis[-1])
558
- ax.set_ylim(np.min(A), np.max(A))
559
- ax.set_xlabel(lm.ylabel if axis == 'x' else lm.xlabel)
560
- ax.set_ylabel(lm.cross_section_sweep.ylabel)
561
-
562
-
563
- # Define the animation update function
564
- def update(frame):
565
- for i, line in enumerate(lines):
566
- line.set_ydata(A[:, i, frame])
567
-
568
- if axis == 'x':
569
- if lm.x_log:
570
- sweep_value = 10**x_vals[frame]
571
- else:
572
- sweep_value = x_vals[frame]
573
- elif axis == 'y':
574
- if lm.y_log:
575
- sweep_value = 10**y_vals[frame]
576
- else:
577
- sweep_value = y_vals[frame]
578
-
579
- title = lm.cross_section_sweep.title.replace('<<sweep_value>>', str(round(sweep_value, 3)))
580
- ax.set_title(title)
581
- return lines,# line1, line2, line3, line4
582
-
583
- # Create the animation
584
- dt = 0.1
585
- ani = FuncAnimation(fig, update, frames=lm.x_steps if axis == 'x' else lm.y_steps, interval=dt*1000, blit=False)
586
-
587
- # Show the animation
588
- plt.legend()
589
- plt.grid()
590
-
591
- if lm.cross_section_sweep.save_plot:
592
- filepath = create_non_overwriting_filepath(folder_path=self.outputs_folder_path, base_name=lm.cross_section_sweep.filename, extension='.gif', overwrite=self.fdm.run.overwrite)
593
- ani.save(filepath, writer='imagemagick', fps=1/dt)
594
-
595
- if self.fdm.run.launch_gui: plt.show()
596
-
597
- def create_lossMap(self):
598
- """
599
- This function creates a loss map based on the inputs given in the loss_map section of the input file.
600
- The loss-map can be plotted and saved as a .png file.
601
- """
602
- lm = self.fdm.magnet.postproc.batch_postproc.loss_map
603
-
604
- if self.simulation_collection:
605
- X, Y, V, dataPoints = self.lossMap_createGridData(lm.loss_type)
606
- else:
607
- X, Y, V = self.totalLoss_gridData
608
-
609
- if is_latex_installed():
610
- plt.rcParams['text.usetex'] = True
611
- plt.rcParams['font.family'] = 'times'
612
- plt.rcParams['font.size'] = 20
613
-
614
- fig, ax = plt.subplots(figsize=(10,8))
615
-
616
- c = plt.pcolormesh(X, Y, V, shading='gouraud', cmap='plasma_r')
617
- c_min = min([np.ceil(np.min(V)) for V in [V]])
618
- c_max = max([np.floor(np.max(V)) for V in [V]])
619
- c_ticks = [int(val) for val in np.arange(c_min, c_max+1)]
620
- cont = plt.contour(X,Y,V, c_ticks, colors='k', linestyles='dashed')
621
-
622
- if lm.show_datapoints:
623
- plt.scatter(dataPoints[:, 0], dataPoints[:, 1], s=50, edgecolors='k')
624
-
625
-
626
- if lm.show_loss_type_dominance_contour:
627
- sigmoid = lambda x: 1/(1+np.exp(-x))
628
- if self.simulation_collection:
629
- _, _, FilamentLoss, _ = self.lossMap_createGridData(lossType='FilamentLoss')
630
- _, _, CouplingLoss, _ = self.lossMap_createGridData(lossType='CouplingLoss')
631
- _, _, EddyLoss, _ = self.lossMap_createGridData(lossType='EddyLoss')
632
- # else:
633
- # _, _, FilamentLoss = self.filamentLoss_gridData
634
- # _, _, CouplingLoss = self.couplingLoss_gridData
635
- # _, _, EddyLoss = self.eddyLoss_gridData
636
- fil_vs_coupling_loss = np.maximum(FilamentLoss, EddyLoss) - CouplingLoss
637
- fil_vs_eddy_loss = EddyLoss - np.maximum(FilamentLoss, CouplingLoss)
638
- plt.contour(X,Y,sigmoid(fil_vs_coupling_loss),[0.5], colors='k')
639
- plt.contour(X,Y,sigmoid(fil_vs_eddy_loss),[0.5], colors='k')
640
-
641
- cbar = fig.colorbar(c, ticks=c_ticks)#, labels=c_labels)
642
- # cbar.ax.set_xticks([-7, -6, -5, -4, -3, -2, -1, 0, 1])
643
- # cbar.ax.set_yticklabels([r'$10^{-7}$', r'$10^{-6}$', r'$10^{-5}$', r'$10^{-4}$', r'$10^{-3}$', r'$10^{-2}$', r'$10^{-1}$', r'$10^0$', r'$10^1$'])
644
- cbar.ax.set_yticklabels([f"$10^{{{val}}}$" for val in c_ticks])
645
- # plt.grid(alpha=0.5)
646
- # plt.title(lm.title)
647
- # plt.xlabel(lm.xlabel)
648
- # plt.ylabel(lm.ylabel)
649
- plt.title(r'Loss per cycle (J/m)')
650
- plt.xlabel(r'Frequency $f$ (Hz)')
651
- plt.ylabel(r'Field amplitude $b$ (T)')
652
-
653
-
654
- # plt.annotate(r'Coupling', (np.log10(1.0), np.log10(0.007)), color='white')
655
- # plt.annotate(r'Filament', (np.log10(0.012), np.log10(0.74)), color='white')
656
- # plt.annotate(r'(uncoupled)', (np.log10(0.012), np.log10(0.55)), color='white')
657
- # plt.annotate(r'Filament', (np.log10(45), np.log10(0.38)), color='white')
658
- # plt.annotate(r'(coupled)', (np.log10(45), np.log10(0.28)), color='white')
659
- # plt.annotate(r'Eddy', (np.log10(2000), np.log10(0.03)), color='white')
660
-
661
- # ax.plot(np.log10(0.03), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(0.03), np.log10(0.12)), arrowprops=dict(facecolor='black', shrink=0.02))
662
- # ax.plot(np.log10(30), np.log10(1), 'o', color='white')#, xytext=(np.log10(40), np.log10(0.8)), arrowprops=dict(facecolor='black', shrink=0.02))
663
- # ax.plot(np.log10(3), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(2), np.log10(0.2)), arrowprops=dict(facecolor='black', shrink=0.02))
664
- # ax.plot(np.log10(5000), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(5000), np.log10(0.1)), arrowprops=dict(facecolor='black', shrink=0.02))
665
-
666
- # ax.annotate('(a)', xy=(np.log10(0.03), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
667
- # ax.annotate('(b)', xy=(np.log10(3), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
668
- # ax.annotate('(c)', xy=(np.log10(30), np.log10(1)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
669
- # ax.annotate('(d)', xy=(np.log10(5000), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
670
-
671
- # Define custom tick labels for x-axis
672
- x_min_log = int(np.log10(min([eval('sd.'+lm.x_val) for sd in self.simulation_collection])))
673
- x_max_log = int(np.log10(max([eval('sd.'+lm.x_val) for sd in self.simulation_collection])))
674
- x = np.arange(x_min_log, x_max_log+1)
675
- # Create a list of minor ticks
676
- minor_x_labels = []
677
- # 1) Add the ticks from x_min_log to ceil(x_min_log) to the minor_x_test list
678
- new_ticks = np.linspace(10.0**np.floor(x_min_log), 10.0**np.ceil(x_min_log), 10)[:-1]
679
- new_ticks = np.unique(new_ticks[new_ticks >= 10.0**x_min_log])
680
- minor_x_labels.extend(new_ticks)
681
- # 2) Add the ticks from ceil(x_min_log) to floor(x_max_log) to the minor_x_test list
682
- for x_val in x:
683
- new_ticks = np.linspace(10.0**x_val, 10.0**(x_val+1), 10)[1:-1]
684
- if x_val == x[-1]:
685
- new_ticks = new_ticks[new_ticks <= 10.0**x_max_log]
686
- minor_x_labels.extend(new_ticks)
687
- minor_x = [np.log10(val) for val in minor_x_labels]
688
-
689
- new_x_labels = [f"$10^{{{val}}}$" for val in x]
690
- plt.xticks(x, new_x_labels)
691
- plt.xticks(minor_x, minor=True)
692
-
693
- # Define custom tick labels for y-axis
694
- y_min_log = np.log10(min([eval('sd.'+lm.y_val) for sd in self.simulation_collection]))
695
- y_max_log = np.log10(max([eval('sd.'+lm.y_val) for sd in self.simulation_collection]))
696
- y = np.arange(np.ceil(y_min_log), np.floor(y_max_log)+1)
697
- # Create a list of minor ticks
698
- minor_y_labels = []
699
- # 1) Add the ticks from y_min_log to ceil(y_min_log) to the minor_y_test list
700
- new_ticks = np.linspace(10.0**np.floor(y_min_log), 10.0**np.ceil(y_min_log), 10)[:-1]
701
- new_ticks = np.unique(new_ticks[new_ticks >= 10.0**y_min_log])
702
- minor_y_labels.extend(new_ticks)
703
- # 2) Add the ticks from ceil(y_min_log) to floor(y_max_log) to the minor_y_test list
704
- for y_val in y:
705
- new_ticks = np.linspace(10.0**y_val, 10.0**(y_val+1), 10)[1:-1]
706
- if y_val == y[-1]:
707
- new_ticks = new_ticks[new_ticks <= 10.0**y_max_log]
708
- minor_y_labels.extend(new_ticks)
709
-
710
- new_y_labels = [f"$10^{{{int(val)}}}$" for val in y]
711
- minor_y = [np.log10(val) for val in minor_y_labels]
712
- plt.yticks(y, new_y_labels)
713
- plt.yticks(minor_y, minor=True)
714
-
715
- # plt.savefig('C:/Users/jdular/cernbox/Documents/Reports/CERN_Reports/linkedFluxPaper/fig/loss_map_54fil_noI.pdf', bbox_inches='tight')
716
-
717
-
718
- if lm.save_plot:
719
- filePath = create_non_overwriting_filepath(self.outputs_folder_path, lm.filename, '.pdf', self.fdm.run.overwrite)
720
- plt.savefig(filePath, bbox_inches='tight')
721
-
722
- if self.fdm.run.launch_gui: plt.show()
723
-
724
-
725
- def plot2d(self):
726
- """
727
- This function is used to create a 2d plot. It is supposed to be flexible and work for various kinds of plots one may want to create.
728
- """
729
- if is_latex_installed():
730
- plt.rcParams['text.usetex'] = True
731
- plt.rcParams['font.family'] = 'times'
732
- # plt.rcParams['font.size'] = 20
733
-
734
- # Create the title (or titles if combined_plot is False)
735
- title = self.fdm.magnet.postproc.batch_postproc.plot2d.title
736
- if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot:
737
- sd = self.simulation_collection[0]
738
- commands = re.findall('<<(.*?)>>', title)
739
- for c in commands:
740
- title = title.replace(f"<<{c}>>", str(eval('sd.'+c)))
741
- else:
742
- titles = []
743
- for sd in self.simulation_collection:
744
- commands = re.findall('<<(.*?)>>', title)
745
- title_i = title
746
- for c in commands:
747
- title_i = title_i.replace(f"<<{c}>>", str(eval('sd.'+c)))
748
- titles.append(title_i)
749
-
750
- # Create the labels
751
- label_list = self.fdm.magnet.postproc.batch_postproc.plot2d.labels
752
- labels = np.zeros((len(self.simulation_collection), len(label_list)), dtype=object)
753
- for i, sd in enumerate(self.simulation_collection):
754
- simulation_labels = []
755
- for l in label_list:
756
- commands = re.findall('<<(.*?)>>', l)
757
- for c in commands:
758
- l = l.replace(f"<<{c}>>", str(eval('sd.'+c)))
759
-
760
- simulation_labels.append(l)
761
- labels[i, :] = simulation_labels
762
-
763
- # colors = plt.cm.get_cmap('magma').resampled(len(self.simulation_collection)).colors
764
- colors = plt.cm.get_cmap('viridis').resampled(len(self.simulation_collection)).colors
765
-
766
- # Load the x-values:
767
- x_val = self.fdm.magnet.postproc.batch_postproc.plot2d.x_val
768
- commands = re.findall('<<(.*?)>>', x_val)
769
- for c in commands:
770
- x_val = x_val.replace(f"<<{c}>>", str('sd.'+c))
771
- x_arr = np.array([eval(x_val) for sd in self.simulation_collection], dtype=object)
772
-
773
- # Load the y-values:
774
- y_vals = self.fdm.magnet.postproc.batch_postproc.plot2d.y_vals
775
- y_arr = np.zeros((len(self.simulation_collection), len(y_vals)), dtype=object)
776
- for i, sd in enumerate(self.simulation_collection):
777
- for j, y_val in enumerate(y_vals):
778
- commands = re.findall('<<(.*?)>>', y_val)
779
- for c in commands:
780
- y_val = y_val.replace(f"<<{c}>>", str('sd.'+c))
781
- y_arr[i, j] = eval(y_val)
782
-
783
- # data = np.column_stack((x_arr, y_arr))
784
- # np.savetxt(os.path.join(self.outputs_folder_path, self.fdm.magnet.postproc.batch_postproc.plot2d.filename+'.txt'), data, delimiter=' ', header='f total eddy coupling filament', comments='')
785
-
786
-
787
-
788
-
789
- # Plot and save the data:
790
- if not self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
791
- # Create a folder to save the plots if combined_plot is False and save_plot is True:
792
- filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
793
- folder_path = create_non_overwriting_filepath(self.outputs_folder_path, filename, '', self.fdm.run.overwrite)
794
- if not os.path.exists(folder_path): os.makedirs(folder_path)
795
-
796
- # Check if the y-values are all floats:
797
- y_is_float = np.all(np.apply_along_axis(lambda arr: np.all(np.vectorize(isinstance)(arr, float)), axis=1, arr=y_arr))
798
- # If they are all floats (not arrays), we can make a single plot, spanning all simulations, instead of one plot per simulation.
799
- if y_is_float:
800
- for column in range(y_arr.shape[1]):
801
- plt.plot(x_arr, y_arr[:, column], label=label_list[column])
802
- if self.fdm.magnet.postproc.batch_postproc.plot2d.legend:
803
- plt.legend()
804
- plt.grid()
805
- plt.xlabel(self.fdm.magnet.postproc.batch_postproc.plot2d.xlabel)
806
- plt.ylabel(self.fdm.magnet.postproc.batch_postproc.plot2d.ylabel)
807
- plt.title(title)
808
- if self.fdm.magnet.postproc.batch_postproc.plot2d.x_log:
809
- plt.xscale('log')
810
- if self.fdm.magnet.postproc.batch_postproc.plot2d.y_log:
811
- plt.yscale('log')
812
-
813
- if self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
814
- filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
815
- filePath = create_non_overwriting_filepath(self.outputs_folder_path, filename, '.png', self.fdm.run.overwrite)
816
- plt.savefig(filePath)
817
-
818
-
819
- else:
820
- for i, (x, y_vals_per_sim, labels_per_sim, color) in enumerate(zip(x_arr, y_arr, labels, colors)):
821
- if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot:
822
- # If combined_plot is true, plot all the data in the same figure:
823
- # x_sin = 2*np.sin(2*np.pi*x/x.iloc[-1]) #temporary for missing hs_val
824
- for y, label in zip(y_vals_per_sim, labels_per_sim):
825
- # plt.plot(x_sin, x_sin+1.6*y, self.fdm.magnet.postproc.batch_postproc.plot2d.linestyle, label=label, color = color) #temporary for missing hs_val
826
- plt.plot(x, y, self.fdm.magnet.postproc.batch_postproc.plot2d.linestyle, label=label, color = color)
827
- else:
828
- # If combined_plot is false, plot data from each simulation in a separate figure:
829
- plt.figure()
830
- for y, label in zip(y_vals_per_sim, labels_per_sim):
831
- plt.plot(x, y, self.fdm.magnet.postproc.batch_postproc.plot2d.linestyle, label=label)
832
-
833
- # Set the plot options
834
- if self.fdm.magnet.postproc.batch_postproc.plot2d.legend:
835
- plt.legend()
836
- plt.grid()
837
- plt.xlabel(self.fdm.magnet.postproc.batch_postproc.plot2d.xlabel)
838
- plt.ylabel(self.fdm.magnet.postproc.batch_postproc.plot2d.ylabel)
839
- plt.title(title if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot else titles[i])
840
-
841
-
842
- if not self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
843
- # If combined_plot is false we expect a lot of plots and save them all into a folder:
844
- filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
845
- plt.savefig(os.path.join(folder_path, filename+f"_{i}.png"))
846
-
847
- if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
848
- # If combined_plot is true we expect only one plot and save it in the main folder:
849
- filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
850
- filePath = create_non_overwriting_filepath(self.outputs_folder_path, filename, '.png', self.fdm.run.overwrite)
851
- plt.savefig(filePath, dpi=300)
852
-
853
- if self.fdm.run.launch_gui: plt.show()
854
-
855
-
1
+ import os, re, datetime, getpass, logging, sys
2
+ import numpy as np
3
+ import subprocess as sp
4
+ import matplotlib.pyplot as plt
5
+
6
+ from scipy import interpolate, constants
7
+ from dataclasses import dataclass
8
+ from matplotlib.ticker import FuncFormatter
9
+ from matplotlib.animation import FuncAnimation
10
+ from matplotlib.backends.backend_pdf import PdfPages, Stream, PdfFile
11
+ from fiqus.utils.Utils import FilesAndFolders
12
+
13
+
14
+ @dataclass
15
+ class PlotData:
16
+ x_data: np.ndarray
17
+ y_data: np.ndarray
18
+ label: str
19
+ color: np.ndarray
20
+ linestyle: str = '-'
21
+
22
+
23
+ class PDFreport:
24
+ def __init__(self, filepath, title='PostprocReport'):
25
+ self.creation_date = datetime.datetime.now()
26
+ self.author = getpass.getuser()
27
+ self.file = os.path.join(filepath, str(title) + '.pdf')
28
+ self.pdf = PdfPages(self.file)
29
+ # init metadata
30
+ self.add_metadata()
31
+
32
+ def multi_sim_report(self):
33
+ sims = self.plotter.simulation_collection
34
+
35
+ def add_table(self, title, data, row_header=None, col_header=None):
36
+ title_text = title
37
+ footer_text = self.author + ' - ' + str(datetime.datetime.now())
38
+ fig_background_color = 'white'
39
+ fig_border = 'steelblue'
40
+
41
+ column_headers = ['taus (s)', 'kappas (A)', 'alphas']
42
+ row_headers = []
43
+ # while I'm at it.
44
+ cell_text = []
45
+ for idx, row in enumerate(data):
46
+ cell_text.append([f'{x:f}' for x in row])
47
+ row_headers.append('k=' + str(idx + 1))
48
+ # Get some lists of color specs for row and column headers
49
+ rcolors = plt.cm.BuPu(np.full(len(row_headers), 0.1))
50
+ ccolors = plt.cm.BuPu(np.full(len(column_headers), 0.1)) # Create the figure. Setting a small pad on tight_layout
51
+ # seems to better regulate white space. Sometimes experimenting
52
+ # with an explicit figsize here can produce better outcome.
53
+ plt.figure(linewidth=2, edgecolor=fig_border, facecolor=fig_background_color, tight_layout={'pad': 1}, figsize=(5, 3))
54
+ the_table = plt.table(cellText=cell_text, rowLabels=row_headers, rowColours=rcolors, rowLoc='right',
55
+ colColours=ccolors, colLabels=column_headers, loc='center')
56
+ # Scaling is the only influence we have over top and bottom cell padding.
57
+ # Make the rows taller (i.e., make cell y scale larger).
58
+ the_table.scale(1, 1.5) # Hide axes
59
+ ax = plt.gca()
60
+ ax.get_xaxis().set_visible(False)
61
+ ax.get_yaxis().set_visible(False) # Hide axes border
62
+ plt.box(on=None) # Add title
63
+ plt.suptitle(title_text) # Add footer
64
+ plt.figtext(0.95, 0.05, footer_text, horizontalalignment='right', size=6, weight='light') # Force the figure to update, so backends center objects correctly within the figure.
65
+ # Without plt.draw() here, the title will center on the axes and not the figure.
66
+ plt.draw() # Create image. plt.savefig ignores figure edge and face colors, so map them.
67
+ fig = plt.gcf()
68
+ self.savefigs()
69
+ plt.close()
70
+
71
+ def savefigs(self):
72
+ """ Save all open figures one on each page """
73
+ for i in plt.get_fignums():
74
+ self.pdf.savefig(i)
75
+
76
+ def add_metadata(self):
77
+ d = self.pdf.infodict()
78
+ d['Title'] = 'Report'
79
+ d['Author'] = getpass.getuser()
80
+ d['CreationDate'] = datetime.datetime.now()
81
+
82
+ def __del__(self):
83
+ """ save all generated figures and close report on destruction """
84
+ self.savefigs()
85
+ self.pdf.close()
86
+
87
+
88
+ class PlotPythonConductorAC:
89
+ """
90
+ This class handles various plots for single simulation CACStrand models.
91
+ """
92
+
93
+ def __init__(self, fdm, outputs_folder_path='', simulation_data=None, flux_model=None) -> None:
94
+ self.fdm = fdm
95
+ self.outputs_folder_path = os.path.join(outputs_folder_path, fdm.magnet.postproc.output_folder) # This is the path to the folder where the postprocessed data is written
96
+ self.simulation_data = simulation_data
97
+ self.flux_model = flux_model
98
+ # self.pdf_report = PDFreport(fdm=fdm,author=getpass.getuser(),outputs_folder_path=self.simulation_data.solution_folder) if fdm.magnet.postproc.generate_pdf else None
99
+
100
+ def colors(self, length):
101
+ """ unify color scheme for all plots """
102
+ return plt.cm.rainbow(np.linspace(0.05, 0.9, length))
103
+
104
+ def export2txt(self, data_names, data_columns, filename):
105
+ """ This function can be used to export one or more arrays of simulation data in one txt file (stored in solution folder)."""
106
+ np_data = np.array(data_columns).T
107
+ np.savetxt(os.path.join(self.simulation_data.solution_folder, str(filename) + '.txt'), np_data, header=" ".join(data_names), comments="")
108
+
109
+ def plot_power_loss(self):
110
+ """ Compares the losses produced by the ROHF model S_chain to the corresponding filament losses """
111
+
112
+ t_final = np.array(self.simulation_data.time)[-1]
113
+ if self.flux_model:
114
+ _, p_irr_rohf, p_eddy_rohf, per_cycle_rohf, _ = self.flux_model.compute(self.simulation_data.time, self.simulation_data.I_transport, sim=self.simulation_data)
115
+ if self.simulation_data.f and 1 / self.simulation_data.f < t_final:
116
+ offset_time = t_final - 1 / self.simulation_data.f
117
+ offset_idx = np.abs(self.simulation_data.time - offset_time).argmin()
118
+ lastp_time = self.simulation_data.time[offset_idx:]
119
+ lastp_I = self.simulation_data.I_transport[offset_idx:]
120
+ lastp_int_flux = self.simulation_data.flux_internal[offset_idx:]
121
+ lastp_filamentloss = self.simulation_data.power["FilamentLoss"][offset_idx:]
122
+ # selffield_power_unitarea_loop_factor = 2 * selffield_power_unitarea # missing 2x radial contribution (eddy)
123
+ print("#### HYSTERETIC SURFACE ####")
124
+ print("simple flux surface: " + str(np.trapz(lastp_I, lastp_int_flux)))
125
+ print("###### PER CYCLE LOSS ######")
126
+ print("FilamentLoss " + str(self.simulation_data.total_power_per_cycle["FilamentLoss"]))
127
+ if self.flux_model: print("ROHF Pirr: " + str(per_cycle_rohf))
128
+
129
+ colorgrad = self.colors(3)
130
+ #### DYNAMIC LOSS ####
131
+ plt.figure()
132
+ plt.title("Instantaneous power losses")
133
+ plt.plot(self.simulation_data.time, self.simulation_data.power["TotalLoss"], color=colorgrad[0], linestyle='--', label="Total loss")
134
+ plt.plot(self.simulation_data.time, self.simulation_data.power["FilamentLoss"], color=colorgrad[1], linestyle='--', label="Filament loss")
135
+ plt.plot(self.simulation_data.time, self.simulation_data.power["EddyLoss"], color=colorgrad[2], linestyle='--', label="Eddy loss")
136
+ # plt.plot(self.time, graphic_loss, color=self.c_grad[-2], label=r'graphic $P_{\text{irr}}$')
137
+ if self.flux_model:
138
+ plt.plot(self.simulation_data.time, p_irr_rohf + p_eddy_rohf, color=colorgrad[0], label=r'ROHF ($N=$' + str(self.flux_model.N) + r') $P_{\mathrm{total}}$')
139
+ plt.plot(self.simulation_data.time, p_irr_rohf, color=colorgrad[1], label=r'ROHF ($N=$' + str(self.flux_model.N) + r') $P_{\mathrm{irr}}$')
140
+ plt.plot(self.simulation_data.time, p_eddy_rohf, color=colorgrad[2], label=r'ROHF ($N=$' + str(self.flux_model.N) + r') $P_{\mathrm{eddy}}$')
141
+ # plt.plot(self.simulation_data.time[:len(p_irr_rohf)], self.flux_model.Ec_0 * (abs(self.simulation_data.I_transport[:len(p_irr_rohf)])/self.flux_model.Ic_0)**(self.flux_model.n) * abs(self.simulation_data.I_transport[:len(p_irr_rohf)]), color=self.c_grad[-1], label = r'power-law Filament', linestyle='--')
142
+ # plt.plot(self.time, abs(np.gradient(selffield_loss, self.time)) * (np.pi * strand_radius**2) , label=r'TEST selffield loss power')
143
+ plt.xlabel(r'Time [s]')
144
+ plt.ylabel(r"Power per unitlength [W']")
145
+ plt.legend()
146
+ plt.grid(linestyle='dotted')
147
+
148
+ def plot_transport(self):
149
+ """ This funcion makes a nice combined plot of Transport current and voltage """
150
+ # make fancy plot
151
+
152
+ colors = self.colors(2)
153
+ fig, ax1 = plt.subplots()
154
+ color = 'tab:red'
155
+ ax1.set_title("Transport current and voltage")
156
+ ax1.set_xlabel(r'Time $(s)$')
157
+ ax1.set_ylabel(r'Current $(A)$', color=colors[0])
158
+ ax1.plot(self.simulation_data.time, self.simulation_data.I_transport, color=colors[0])
159
+ ax1.grid(axis='x', linestyle='dotted')
160
+ ax1.tick_params(axis='y', labelcolor=colors[0])
161
+
162
+ ax2 = ax1.twinx()
163
+ color = 'tab:blue'
164
+ ax2.set_ylabel(r'Voltage $(V)$', color=colors[1])
165
+ ax2.plot(self.simulation_data.time, self.simulation_data.V_transport_unitlen, color=colors[1])
166
+ ax2.tick_params(axis='y', labelcolor=colors[1])
167
+ ax2.grid(axis='y', linestyle='dotted')
168
+ fig.tight_layout()
169
+
170
+ def plot_phaseshift_voltage(self, phaseshift):
171
+ # removes phaseshift from voltage
172
+ if not self.f:
173
+ raise ValueError(f'No shift plot for selected source type')
174
+
175
+ time_delay = phaseshift / (360 * self.f)
176
+ shift_idx = (np.abs(self.simulation_data.time - time_delay)).argmin()
177
+ shifted_V = self.simulation_data.V_transport[shift_idx:]
178
+ shifted_time = self.simulation_data.time[1 + shift_idx:] - self.simulation_data.time[shift_idx]
179
+
180
+ # make fancy plot
181
+ fig, ax1 = plt.subplots()
182
+ color = 'tab:red'
183
+ ax1.set_title("shifted voltage (" + str(phaseshift) + " degree)")
184
+ ax1.set_xlabel(r'Time $(s)$')
185
+ ax1.set_ylabel(r'Current $(A)$', color=color)
186
+ ax1.plot(self.simulation_data.time, self.simulation_data.I_transport, color=color)
187
+ ax1.grid(axis='x', linestyle='dotted')
188
+ ax1.tick_params(axis='y', labelcolor=color)
189
+
190
+ ax2 = ax1.twinx()
191
+ color = 'tab:blue'
192
+ ax2.set_ylabel(r'Voltage $(V)$', color=color)
193
+ ax2.plot(shifted_time, shifted_V, color=color, linestyle='dashed')
194
+ ax2.tick_params(axis='y', labelcolor=color)
195
+ ax2.grid(axis='y', linestyle='dotted')
196
+ fig.tight_layout()
197
+
198
+ def plot_flux(self):
199
+ """ This plots the internal flux obtained through two approaches and compares them with an s_chain model """
200
+ # L_base = (self.simulation_data.flux_internal[1]-self.simulation_data.flux_internal[0])/(self.simulation_data.I_transport[1]-self.simulation_data.I_transport[0])
201
+ if self.flux_model:
202
+ flux_rohf, _, _, _, _ = self.flux_model.compute(self.simulation_data.time, self.simulation_data.I_transport, sim=self.simulation_data)
203
+
204
+ colors = self.colors(3)
205
+ ### SIMPLE FLUX HOMOGENIZATION ###
206
+ plt.figure()
207
+ plt.title("Internal flux ")
208
+ plt.xlabel(r"Transport current $[A]$")
209
+ plt.ylabel(r"Flux per unitlength $[Tm]$")
210
+ # plt.plot(self.I_transport, self.flux_external, color=self.c_grad[2], label = r'$\Phi_{\text{ext}}$')
211
+ plt.plot(self.simulation_data.I_transport, self.simulation_data.flux_internal, color=colors[0], label=r'$\Phi_{\text{int}}$')
212
+ plt.plot(self.simulation_data.I_transport, constants.mu_0 / (4 * np.pi) * self.simulation_data.I_transport, color=colors[1], label=r"$L'=\mu_0/(4 \pi)$")
213
+ if self.flux_model: plt.plot(self.simulation_data.I_transport, flux_rohf, color=colors[2], label=r'$\Phi_{\text{ROHF}}$ ($N=$' + str(self.flux_model.N) + ')')
214
+ plt.grid(linestyle='dotted')
215
+ plt.ticklabel_format(style='sci', axis='both', scilimits=(0, 0))
216
+ plt.legend()
217
+ plt.grid(axis='y', linestyle='dotted')
218
+
219
+ ### FLUX BASED VOLTAGE ###
220
+ if self.flux_model: voltage_rohf = -np.gradient((flux_rohf + self.simulation_data.flux_external), self.simulation_data.time)
221
+ plt.figure()
222
+ plt.title("flux based Voltage")
223
+ plt.xlabel(r"Time $[s]$")
224
+ plt.ylabel(r"Voltage per unit-length $\left[\frac{V}{m}\right]$")
225
+ # plt.plot(self.time, -np.gradient((self.flux_internal + self.flux_external), self.time), color=self.c_grad[3], label = r'$-\frac{d\Phi}{dt}$ graphic')
226
+ if self.flux_model: plt.plot(self.simulation_data.time, voltage_rohf, color=colors[2], label=r'$-\frac{d\Phi}{dt}$ ROHF ($N=$' + str(self.flux_model.N) + ')')
227
+ plt.plot(self.simulation_data.time, self.simulation_data.V_transport_unitlen, color=colors[0], linestyle='--', label=r'$V$ transport')
228
+ plt.grid(linestyle='dotted')
229
+ plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
230
+ plt.legend()
231
+ plt.grid(axis='y', linestyle='dotted')
232
+
233
+ V = self.simulation_data.V_transport_unitlen
234
+ I = self.simulation_data.I_transport
235
+ idx = min(range(len(V)), key=lambda i: abs(V[i] + 1e-4))
236
+ print("ciritical current: " + str(I[idx]))
237
+
238
+ # data = np.vstack([self.simulation_data.I_transport, self.simulation_data.flux_internal, flux_rohf])
239
+ # self.export2txt(['current','internal_flux','rohf_flux'], data, 'flux_hysteresis')
240
+
241
+ def plot_current_rings(self):
242
+ """
243
+ This function decomposes the total transport current into an reversible and irreversible part based on inner flux computations of the filaments.
244
+ It is basically the dual approach to the plot_voltage_decomp() function declared here after but shouldnt suffer from its numeric instabilities.
245
+ """
246
+
247
+ t_final = np.array(self.simulation_data.time)[-1]
248
+ if not self.simulation_data.f or 1 / self.simulation_data.f > t_final:
249
+ raise ValueError(r'Cant plot current rings')
250
+
251
+ ### plot current threshold rings ###
252
+ if self.flux_model:
253
+ jc_real = 3.33e10
254
+ circle_radii = np.sqrt(self.simulation_data.conductor.strand.number_of_filaments * (self.simulation_data.conductor.strand.filament_diameter / 2) ** 2 - np.array(self.flux_model.kappas_bar) / (jc_real * np.pi))
255
+ circle_surfaces = np.pi * circle_radii ** 2
256
+ ringwidth = np.diff(np.flip(circle_radii), prepend=0)
257
+
258
+ plt.figure()
259
+ plt.title("ROHF single filament - field free zones")
260
+ for idx, radius in enumerate(circle_radii):
261
+ shells = [plt.Circle((2.05 * circle_radii[0] * idx, 0), radius, color=self.c_grad[idx], hatch='//', label="> " + "{:.2f}".format(self.flux_model.kappas_bar[idx]) + " A", fill=False),
262
+ plt.Circle((2.05 * circle_radii[0] * idx, 0), circle_radii[0], color='black', fill=False)] # contour
263
+ for shell in shells: plt.gca().add_patch(shell)
264
+ # core = plt.Circle((0,0), circle_radii[-1], color='white')
265
+ # plt.gca().add_patch(core)
266
+ plt.legend()
267
+ plt.ticklabel_format(style='sci', axis='both', scilimits=(0, 0))
268
+ plt.xlim(-1.1 * circle_radii[0], 2.1 * len(circle_radii) * circle_radii[0])
269
+ plt.ylim(-1.1 * circle_radii[0], 1.1 * circle_radii[0])
270
+
271
+ def plot_fft_voltage(self):
272
+ """ analyze the transport voltage in the frequency domain """
273
+ if self.simulation_data.f:
274
+ T = 1 / self.simulation_data.f
275
+ start_idx = abs(self.simulation_data.time - (max(self.simulation_data.time) - T)).argmin() - 2
276
+ X = np.fft.fft(self.simulation_data.V_transport_unitlen[start_idx:])
277
+ N = len(X)
278
+ n = np.arange(N)
279
+ freq = n / T
280
+
281
+ fig, axs = plt.subplots(2)
282
+ fig.suptitle(r'Frequency domain voltage')
283
+ axs[0].stem(freq, np.abs(X), 'b', markerfmt=" ", basefmt="-b")
284
+ axs[0].set_xlabel('Freq (Hz)')
285
+ axs[0].set_ylabel('FFT Amplitude |X(freq)|')
286
+ axs[0].grid(linestyle='dotted')
287
+ axs[0].set_xlim([0, 10 * self.simulation_data.f])
288
+
289
+ axs[1].plot(self.simulation_data.time[:start_idx - 1], self.simulation_data.V_transport_unitlen[:start_idx - 1], '--r')
290
+ axs[1].plot(self.simulation_data.time[start_idx:], np.fft.ifft(X), 'r', label='transport voltage')
291
+ axs[1].set_xlabel('Time (s)')
292
+ axs[1].set_ylabel('Amplitude')
293
+ axs[1].grid(linestyle='dotted')
294
+ axs[1].set_xlim([0, max(self.simulation_data.time)])
295
+
296
+ def plot_instantaneous_power(self, show: bool = True, title: str = "Power", save_plot: bool = False, save_folder_path: str = None, save_file_name: str = None, overwrite: bool = False):
297
+ plt.figure()
298
+ plt.plot(self.simulation_data.power['Time'], self.simulation_data.power[self.simulation_data.power_columns[1:]], label=self.simulation_data.power_columns[1:])
299
+ plt.xlabel('Time [s]')
300
+ plt.ylabel('Power [W/m]')
301
+ plt.legend()
302
+
303
+ # Configure title:
304
+ # Class attributes can be accessed by using '<< ... >>' in the title string.
305
+ commands = re.findall('<<(.*?)>>', title)
306
+ for c in commands:
307
+ title = title.replace(f"<<{c}>>", str(eval('self.simulation_data.' + c)))
308
+ plt.title(title)
309
+
310
+ if save_plot: # Save the plot
311
+ filePath = os.path.join(save_folder_path, str(save_file_name) + '.png')
312
+ plt.savefig(filePath)
313
+
314
+ if show:
315
+ plt.show()
316
+ else:
317
+ plt.close()
318
+
319
+ def plot_impedance(self, frequency_range):
320
+ """ This function compares the estimated data impedance with an analytic solution for inductive tranmission """
321
+
322
+ if not self.simulation_data.f or not self.simulation_data.analytic_RL:
323
+ raise ValueError(f'No impedance estimate for selected source type')
324
+ analytic_impedance = self.simulation_data.analytic_RL[0] + 1j * 2 * np.pi * frequency_range * self.simulation_data.analytic_RL[1] # R + jwL
325
+
326
+ fig, axs = plt.subplots(2)
327
+ fig.suptitle(r'Simulation Impedance $Z$')
328
+ axs[0].plot(frequency_range, np.abs(np.imag(analytic_impedance)), '-b', label="pure Inductance")
329
+ axs[0].plot(self.simulation_data.f, np.abs(self.simulation_data.data_impedance), 'r', marker="x", label="simulation")
330
+ axs[0].set_xscale('log')
331
+ axs[0].set_yscale('log')
332
+ axs[0].set_ylabel(r'Magnitude $|Z|$')
333
+ axs[0].grid(linestyle='dotted')
334
+ axs[0].legend()
335
+
336
+ axs[1].plot(frequency_range, 90 * np.ones((len(frequency_range), 1)), '-b', label="pure Inductance")
337
+ axs[1].plot(self.simulation_data.f, np.angle(self.simulation_data.data_impedance, deg=True), 'r', marker="x", label="simulation")
338
+ axs[1].set_xscale('log')
339
+ axs[1].set_ylim([-15, 105])
340
+ axs[1].set_ylabel(r'Phase $\measuredangle{Z}$' + r' $(deg)$')
341
+ axs[1].set_xlabel(r'Frequency $(Hz)$')
342
+ axs[1].grid(linestyle='dotted')
343
+ axs[1].legend()
344
+
345
+ def plot_resistivity(self):
346
+
347
+ time, I, flux = self.simulation_data.first_rise(current=True)
348
+
349
+ if self.flux_model:
350
+ flux_rohf, p_irr, _, _, _ = self.flux_model.compute(time, I, sim=self.simulation_data)
351
+ # p_ohm_fil, p_ohm_mat, R_fil, R_mat = self.flux_model.ohmic_loss(I, self.simulation_data)
352
+
353
+ colors = self.colors(3)
354
+ ### SIMPLE FLUX HOMOGENIZATION ###
355
+ plt.figure(figsize=(5, 8))
356
+ plt.title("Differential resitivity")
357
+ plt.xlabel(r"Transport current $[A]$")
358
+ plt.ylabel(r"Transport voltage per unit length $[V/m]$")
359
+ # plt.plot(self.I_transport, self.flux_external, color=self.c_grad[2], label = r'$\Phi_{\text{ext}}$')
360
+ plt.plot(I, abs(self.simulation_data.V_transport_unitlen[:len(I)]), color=colors[0], label=r'FEM reference')
361
+ # plt.plot(I, (1.68e-10/(0.5*np.pi*(0.5e-3)**2))*I, color=self.c_grad[1], label='RRR100 copper')
362
+ if self.flux_model:
363
+ plt.plot(I, self.flux_model.Ec_0 * (abs(I) / self.flux_model.Ic_0) ** (self.flux_model.n), color=colors[1], label=r'Power-law only')
364
+ plt.plot(I, (p_irr) / I, color=colors[2], label=r'ROHF current sharing')
365
+ plt.axhline(y=self.flux_model.Ec_0, color='black', linestyle='--', label=r'$E_c = 1^{-4} \mathrm{V}/\mathrm{m}$')
366
+ # plt.axis([0.7*self.flux_model.Ic_0, 1.2*self.flux_model.Ic_0, 0, 100*self.flux_model.Ec_0])
367
+ plt.grid(linestyle='dotted')
368
+ plt.ticklabel_format(style='sci', axis='both', scilimits=(0, 0))
369
+ plt.legend()
370
+ plt.grid(axis='y', linestyle='dotted')
371
+
372
+ def show(self):
373
+ plt.show()
374
+
375
+
376
+ class BatchPlotPythonConductorAC:
377
+ """
378
+ This class loads and stores the data from the simulations specified in a csv file and can apply various postprocessing operations on the data.
379
+ The data from each simulation is saved as a SimulationData object which is subsequently stored in a list in this class.
380
+ """
381
+
382
+ def __init__(self, fdm, lossMap_gridData_folder=None, outputs_folder_path='', simulation_collection=None, fluxmodel_collection=None) -> None:
383
+ self.fdm = fdm
384
+ self.outputs_folder_path = os.path.join(outputs_folder_path, fdm.magnet.postproc.output_folder) # This is the path to the folder where the postprocessed data is written
385
+ self.simulation_collection = simulation_collection
386
+ self.fluxmodel_collection = fluxmodel_collection
387
+ try:
388
+ sp.run(['pdflatex', '--version'], stdout=sp.DEVNULL, stderr=sp.DEVNULL)
389
+ tex_installation = True
390
+ except:
391
+ tex_installation = False
392
+ self.tex_installation = tex_installation
393
+
394
+ if not os.path.exists(self.outputs_folder_path):
395
+ os.makedirs(self.outputs_folder_path)
396
+
397
+ if lossMap_gridData_folder is not None:
398
+ self.totalLoss_gridData = self.load_lossMap_gridData('TotalLoss', lossMap_gridData_folder)
399
+ self.filamentLoss_gridData = self.load_lossMap_gridData('FilamentLoss', lossMap_gridData_folder)
400
+ self.eddyLoss_gridData = self.load_lossMap_gridData('EddyLoss', lossMap_gridData_folder)
401
+ self.couplingLoss_gridData = self.load_lossMap_gridData('CouplingLoss', lossMap_gridData_folder)
402
+
403
+ def colors(self, length):
404
+ """ unify color scheme for plots """
405
+ return plt.cm.rainbow(np.linspace(0.05, 0.9, length))
406
+
407
+ def create_non_overwriting_filepath(self, folder_path, base_name, extension, overwrite):
408
+ """
409
+ Creates a filepath that does not overwrite any existing files.
410
+
411
+ This function checks if a file already exists at the specified filepath. If the file exists and `overwrite` is False,
412
+ it modifies the filepath to create a new file instead of overwriting the existing one.
413
+ If `overwrite` is True or the file does not exist, it returns the filepath as it is.
414
+
415
+ Parameters
416
+ ----------
417
+ folder_path : str
418
+ The path to the folder where the file will be created.
419
+ base_name : str
420
+ The base name of the file.
421
+ extension : str
422
+ The extension of the file.
423
+ overwrite : bool, optional
424
+ If True, the function will overwrite an existing file. If False, the function will modify the filepath to avoid overwriting. Defaults to False.
425
+
426
+ Returns
427
+ -------
428
+ str
429
+ The final filepath. If `overwrite` is False and a file already exists at the original filepath, this will be a new filepath that does not overwrite any existing files.
430
+ """
431
+ if os.path.exists(os.path.join(folder_path, base_name + extension)) and not overwrite:
432
+ counter = 1
433
+ new_name = base_name + f"_{counter}" + extension
434
+ while os.path.exists(os.path.join(folder_path, new_name)):
435
+ new_name = base_name + f"_{counter}" + extension
436
+ counter += 1
437
+ return os.path.join(folder_path, new_name)
438
+
439
+ return os.path.join(folder_path, base_name + extension)
440
+
441
+ def lossMap_createGridData(self, lossType='TotalLoss', x_val_to_include=None, y_val_to_include=None):
442
+ """
443
+ This function creates the grid data needed for the loss map, based on the yaml input file.
444
+ Given a collection of simulations it interpolates the loss data between the datapoints to a grid and returns the grid data.
445
+ """
446
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
447
+
448
+ # Extract data from simulation collection and normalize
449
+ x_arr = np.array([eval('sd.' + lm.x_val) / lm.x_norm for sd in self.simulation_collection])
450
+ y_arr = np.array([eval('sd.' + lm.y_val) / lm.y_norm for sd in self.simulation_collection])
451
+ loss = np.array([sd.total_power_per_cycle[lossType] / lm.loss_norm for sd in self.simulation_collection])
452
+
453
+ # Logarithmic scaling
454
+ if lm.x_log: x_arr = np.log10(x_arr)
455
+ if lm.y_log: y_arr = np.log10(y_arr)
456
+ if lm.loss_log: loss = np.log10(loss)
457
+
458
+ x_arr_interpolated = np.linspace(min(x_arr), max(x_arr), lm.x_steps)
459
+ y_arr_interpolated = np.linspace(min(y_arr), max(y_arr), lm.y_steps)
460
+ # Insert specific values to the grid if they are not already included (useful for cross sections)
461
+ if x_val_to_include is not None and x_val_to_include not in x_arr_interpolated:
462
+ x_arr_interpolated = np.insert(x_arr_interpolated, np.where(x_arr_interpolated > x_val_to_include)[0][0], x_val_to_include)
463
+ if y_val_to_include is not None and y_val_to_include not in y_arr_interpolated:
464
+ y_arr_interpolated = np.insert(y_arr_interpolated, np.where(y_arr_interpolated > y_val_to_include)[0][0], y_val_to_include)
465
+
466
+ # Create grid
467
+ X, Y = np.meshgrid(x_arr_interpolated, y_arr_interpolated, indexing='ij')
468
+ gridPoints = np.c_[X.ravel(), Y.ravel()]
469
+ dataPoints = np.c_[x_arr, y_arr]
470
+
471
+ # Interpolate the simulation data onto the grid
472
+ V = interpolate.griddata(
473
+ dataPoints,
474
+ loss,
475
+ gridPoints,
476
+ method='linear' # Cubic produces cleaner plots. Any incentive to go back to linear?
477
+ ).reshape(X.shape)
478
+
479
+ return X, Y, V, dataPoints
480
+
481
+ def save_lossMap_gridData(self, save_folder_name='lossMap_gridData'):
482
+ """
483
+ This function calls the lossMap_createGridData function and saves the grid data.
484
+ """
485
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
486
+
487
+ lossTypes = ['TotalLoss', 'FilamentLoss', 'EddyLoss', 'CouplingLoss', 'CouplingLoss_dyn', 'TotalLoss_dyn'] # Only in the case dynamic correction is used, must be changed later
488
+ # 1) Create a folder to store the output files
489
+ gridData_folder_path = self.create_non_overwriting_filepath(self.outputs_folder_path, save_folder_name, '', self.fdm.run.overwrite)
490
+ if not os.path.exists(gridData_folder_path): os.makedirs(gridData_folder_path)
491
+ # 2) Create the grid data for each loss type and save it
492
+ for lossType in lossTypes:
493
+ X, Y, V, _ = self.lossMap_createGridData(lossType)
494
+ if lm.x_log: X = np.power(10, X)
495
+ if lm.y_log: Y = np.power(10, Y)
496
+ if lm.loss_log: V = np.power(10, V)
497
+ np.savetxt(os.path.join(gridData_folder_path, f'{lossType}.txt'), np.column_stack((X.ravel(), Y.ravel(), V.ravel())), delimiter=' ', header=f'{lm.x_val} {lm.y_val} {lossType}', comments='')
498
+
499
+ def load_lossMap_gridData(self, lossType='TotalLoss', save_folder_name='lossMap_gridData'):
500
+ """
501
+ This function loads the grid data for a given loss type.
502
+ """
503
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
504
+ gridData_folder_path = os.path.join(self.inputs_folder_path, save_folder_name)
505
+
506
+ if not os.path.exists(gridData_folder_path):
507
+ raise FileNotFoundError(f'The folder {gridData_folder_path} does not exist.')
508
+
509
+ X, Y, V = np.loadtxt(os.path.join(gridData_folder_path, f'{lossType}.txt'), unpack=True, skiprows=1)
510
+
511
+ if lm.x_log: X = np.log10(X)
512
+ if lm.y_log: Y = np.log10(Y)
513
+ if lm.loss_log: V = np.log10(V)
514
+
515
+ # Get the unique counts of X and Y
516
+ unique_X = np.unique(X)
517
+ unique_Y = np.unique(Y)
518
+
519
+ # Reshape the data
520
+ X = X.reshape((len(unique_X), len(unique_Y)))
521
+ Y = Y.reshape((len(unique_X), len(unique_Y)))
522
+ V = V.reshape((len(unique_X), len(unique_Y)))
523
+
524
+ return X, Y, V
525
+
526
+ def save_magnetization(self):
527
+ """
528
+ This function saves the magnetization data for all simulations in the simulation collection.
529
+ """
530
+ magnetization_folder_path = self.create_non_overwriting_filepath(self.outputs_folder_path, 'magnetization', '', self.fdm.run.overwrite)
531
+ if not os.path.exists(magnetization_folder_path): os.makedirs(magnetization_folder_path)
532
+ for sd in self.simulation_collection:
533
+ magnetization = sd.magn_fil + sd.magn_matrix
534
+ magnetization = np.c_[sd.time, magnetization]
535
+ np.savetxt(os.path.join(magnetization_folder_path, f'magn_f{sd.solve.source_parameters.sine.frequency}_b{sd.solve.source_parameters.sine.field_amplitude}_I{sd.solve.source_parameters.sine.current_amplitude}.txt'), magnetization, delimiter=' ', header='t x y z', comments='')
536
+
537
+ def lossMap_crossSection(self, slice_value, axis_to_cut='x'):
538
+ """
539
+ This function returns the data corresponding to a cross section of the loss map, for all loss types.
540
+ Given an axis and a value, it sweeps the other axis for the closest value and returns the data.
541
+ Example: Given slice value 0 and axis x, it returns the data for the cross section at x = 0.
542
+ """
543
+
544
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
545
+ if axis_to_cut == 'x':
546
+ x_val_to_include = slice_value
547
+ y_val_to_include = None
548
+ elif axis_to_cut == 'y':
549
+ x_val_to_include = None
550
+ y_val_to_include = slice_value
551
+ X, Y, V, dataPoints = self.lossMap_createGridData('TotalLoss', x_val_to_include, y_val_to_include)
552
+ _, _, FilamentLoss, _ = self.lossMap_createGridData('FilamentLoss', x_val_to_include, y_val_to_include)
553
+ _, _, EddyLoss, _ = self.lossMap_createGridData('EddyLoss', x_val_to_include, y_val_to_include)
554
+ _, _, CouplingLoss, _ = self.lossMap_createGridData('CouplingLoss', x_val_to_include, y_val_to_include)
555
+
556
+ if axis_to_cut == 'x':
557
+ index = np.abs(X[:, 0] - slice_value).argmin()
558
+ slice_vals = Y[index, :]
559
+
560
+ elif axis_to_cut == 'y':
561
+ index = np.abs(Y[0, :] - slice_value).argmin()
562
+ slice_vals = X[:, index]
563
+
564
+ # Extract the loss values for the constant frequency across all applied fields
565
+ totalLoss = V[index, :] if axis_to_cut == 'x' else V[:, index]
566
+ filamentLoss = FilamentLoss[index, :] if axis_to_cut == 'x' else FilamentLoss[:, index]
567
+ eddyLoss = EddyLoss[index, :] if axis_to_cut == 'x' else EddyLoss[:, index]
568
+ couplingLoss = CouplingLoss[index, :] if axis_to_cut == 'x' else CouplingLoss[:, index]
569
+
570
+ return slice_vals, totalLoss, filamentLoss, eddyLoss, couplingLoss
571
+
572
+ def plot_lossMap_crossSection(self):
573
+ """
574
+ This function calls the lossMap_crossSection function and plots the data it returns, which is the loss for all values of one axis, given a constant value of the other axis.
575
+ """
576
+
577
+ if self.tex_installation:
578
+ plt.rcParams['text.usetex'] = True
579
+ plt.rcParams['font.family'] = 'times'
580
+ plt.rcParams['font.size'] = 20
581
+
582
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
583
+ slice_value = lm.cross_section.cut_value
584
+ axis_to_cut = lm.cross_section.axis_to_cut
585
+
586
+ if (lm.x_log and axis_to_cut == 'x') or (lm.y_log and axis_to_cut == 'y'):
587
+ slice_value = np.log10(slice_value)
588
+
589
+ slice_vals, totalLoss, filamentLoss, eddyLoss, couplingLoss = self.lossMap_crossSection(slice_value, axis_to_cut=axis_to_cut)
590
+
591
+ def log_formatter(x, pos):
592
+ """
593
+ Format the tick labels on the plot.
594
+ """
595
+ return f"$10^{{{int(x)}}}$"
596
+
597
+ # Plot the loss with respect to applied field for the constant frequency
598
+ fig, ax = plt.subplots(figsize=(8, 6))
599
+ ax.plot(slice_vals, totalLoss, label=f'Total Loss')
600
+ ax.plot(slice_vals, filamentLoss, label=f'Filament Loss')
601
+ ax.plot(slice_vals, eddyLoss, label=f'Eddy Loss')
602
+ ax.plot(slice_vals, couplingLoss, label=f'Coupling Loss')
603
+
604
+ tick_formatter = FuncFormatter(log_formatter)
605
+ if lm.x_log and axis_to_cut == 'y' or lm.y_log and axis_to_cut == 'x':
606
+ ax.xaxis.set_major_formatter(tick_formatter)
607
+ if lm.loss_log:
608
+ ax.yaxis.set_major_formatter(tick_formatter)
609
+
610
+ title = lm.cross_section.title.replace('<<cut_value>>', str(round(10 ** slice_value, 3)))
611
+ ax.set_title(title)
612
+ ax.set_xlabel(lm.ylabel if axis_to_cut == 'x' else lm.xlabel)
613
+ ax.set_ylabel(lm.cross_section.ylabel)
614
+ ax.legend()
615
+
616
+ # np.savetxt(os.path.join(self.outputs_folder_path, 'lossMaps_cut_0p2T_0A.txt'), np.column_stack((10**slice_vals, 10**totalLoss, 10**eddyLoss, 10**couplingLoss, 10**filamentLoss)), delimiter=' ', header='f total eddy coupling filament', comments='')
617
+
618
+ if lm.cross_section.save_plot:
619
+ filePath = self.create_non_overwriting_filepath(self.outputs_folder_path, lm.cross_section.filename, '.png', self.fdm.run.overwrite)
620
+ plt.savefig(filePath)
621
+
622
+ if self.fdm.run.launch_gui: plt.show()
623
+
624
+ def plot_percycle_losses(self, sims, flux_model, title):
625
+ """ This function produces a frequency to per cycle power loss plot used within the ROHF parameter optimization """
626
+
627
+ pc_irr_rohf = []
628
+ pc_eddy_rohf = []
629
+ pc_fil_sim = [sim.total_power_per_cycle['FilamentLoss'] for sim in sims]
630
+ pc_eddy_sim = [sim.total_power_per_cycle['EddyLoss'] for sim in sims]
631
+ freq = [sim.f for sim in sims]
632
+
633
+ for sim in sims:
634
+ _, _, _, irr, eddy = flux_model.compute(time=sim.time, I=sim.I_transport, sim=sim)
635
+ pc_irr_rohf.append(irr)
636
+ pc_eddy_rohf.append(eddy)
637
+
638
+ plt.figure()
639
+ plt.title(title)
640
+ plt.plot(freq, pc_fil_sim, 'o-', label='FilamentLoss')
641
+ plt.plot(freq, pc_irr_rohf, 'x-', label=r'$P_{\mathrm{irr}}$ ROHF')
642
+ plt.plot(freq, pc_eddy_sim, 'o-', label='EddyLoss')
643
+ plt.plot(freq, pc_eddy_rohf, 'x-', label=r'$P_{\mathrm{eddy}}$ ROHF')
644
+ plt.legend()
645
+ plt.xlabel(r'Frequency ($\mathrm{Hz}$)')
646
+ plt.ylabel(r'Per cycle power loss ($\mathrm{W/m}$)')
647
+ plt.xscale('log')
648
+ plt.yscale('log')
649
+
650
+ def plot_flux_time_triplet(self, flux_model, title):
651
+
652
+ amplitudes = sorted(list(set([sim.I_amp for sim in self.simulation_collection])))
653
+
654
+ high_I_sims = []
655
+ mid_I_sims = []
656
+ low_I_sims = []
657
+ for sim in self.simulation_collection:
658
+ if sim.I_amp == amplitudes[-1]:
659
+ high_I_sims.append(sim)
660
+ elif sim.I_amp == amplitudes[-2]:
661
+ mid_I_sims.append(sim)
662
+ elif sim.I_amp == amplitudes[2]:
663
+ low_I_sims.append(sim)
664
+
665
+ colors = self.colors(max([len(high_I_sims), len(mid_I_sims), len(low_I_sims)]))
666
+ plt.figure()
667
+ plt.title(title)
668
+ plt.subplot(3, 1, 1)
669
+ for i, sim in enumerate(high_I_sims):
670
+ plt.plot(sim.time * sim.f, sim.flux_internal, color=colors[i], linestyle='--')
671
+ plt.plot(sim.time * sim.f, flux_model.compute(I=sim.I_transport, time=sim.time, sim=sim)[0], color=colors[i], label=str(sim.f) + ' Hz')
672
+ plt.legend()
673
+
674
+ plt.subplot(3, 1, 2)
675
+ for i, sim in enumerate(mid_I_sims):
676
+ plt.plot(sim.time * sim.f, sim.flux_internal, color=colors[i], linestyle='--')
677
+ plt.plot(sim.time * sim.f, flux_model.compute(I=sim.I_transport, time=sim.time, sim=sim)[0], color=colors[i], label=str(sim.f) + ' Hz')
678
+
679
+ plt.subplot(3, 1, 3)
680
+ for i, sim in enumerate(low_I_sims):
681
+ plt.plot(sim.time * sim.f, sim.flux_internal, color=colors[i], linestyle='--')
682
+ plt.plot(sim.time * sim.f, flux_model.compute(I=sim.I_transport, time=sim.time, sim=sim)[0], color=colors[i], label=str(sim.f) + ' Hz')
683
+ # plt.ylabel(r'Internal Flux per unit length ($\mathrm{Tm}$)')
684
+ plt.text(0.04, 0.5, r'Internal Flux per unit length ($\mathrm{Tm}$)', va='center', rotation='vertical')
685
+ plt.xlabel(r'Time in periods')
686
+
687
+ def animate_lossMap_crossSection(self):
688
+ """
689
+ This function is similar to the plot_lossMap_crossSection function, but instead of plotting the loss for at a constant crossection,
690
+ it sweeps the crossection over a chosen axis and plots the loss for each crossection as an animation.
691
+ """
692
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
693
+ axis = lm.cross_section_sweep.axis_to_sweep
694
+
695
+ X, Y, V, dataPoints = self.lossMap_createGridData('TotalLoss')
696
+ x_vals = X[:, 0] # x-values from the loss map
697
+ y_vals = Y[0, :] # y-values from the loss map
698
+
699
+ if axis == 'x':
700
+ A = np.zeros((lm.y_steps, 4, lm.x_steps))
701
+ axis_to_sweep = x_vals
702
+ constant_axis = y_vals
703
+ elif axis == 'y':
704
+ A = np.zeros((lm.x_steps, 4, lm.y_steps))
705
+ axis_to_sweep = y_vals
706
+ constant_axis = x_vals
707
+
708
+ for i, val in enumerate(axis_to_sweep):
709
+ _, totalLoss, filamentLoss, eddyLoss, couplingLoss = self.lossMap_crossSection(val, axis_to_cut=axis)
710
+ A[:, 0, i] = totalLoss
711
+ A[:, 1, i] = filamentLoss
712
+ A[:, 2, i] = eddyLoss
713
+ A[:, 3, i] = couplingLoss
714
+
715
+ # Initialize the plot
716
+ fig, ax = plt.subplots()
717
+ lines = ax.plot(constant_axis, A[:, :, 0], lw=2, label=['total Loss', 'filament Loss', 'eddy Loss', 'coupling Loss'])
718
+
719
+ # Set plot limits and labels
720
+ ax.set_xlim(constant_axis[0], constant_axis[-1])
721
+ ax.set_ylim(np.min(A), np.max(A))
722
+ ax.set_xlabel(lm.ylabel if axis == 'x' else lm.xlabel)
723
+ ax.set_ylabel(lm.cross_section_sweep.ylabel)
724
+
725
+ # Define the animation update function
726
+ def update(frame):
727
+ for i, line in enumerate(lines):
728
+ line.set_ydata(A[:, i, frame])
729
+
730
+ if axis == 'x':
731
+ if lm.x_log:
732
+ sweep_value = 10 ** x_vals[frame]
733
+ else:
734
+ sweep_value = x_vals[frame]
735
+ elif axis == 'y':
736
+ if lm.y_log:
737
+ sweep_value = 10 ** y_vals[frame]
738
+ else:
739
+ sweep_value = y_vals[frame]
740
+
741
+ title = lm.cross_section_sweep.title.replace('<<sweep_value>>', str(round(sweep_value, 3)))
742
+ ax.set_title(title)
743
+ return lines, # line1, line2, line3, line4
744
+
745
+ # Create the animation
746
+ dt = 0.1
747
+ ani = FuncAnimation(fig, update, frames=lm.x_steps if axis == 'x' else lm.y_steps, interval=dt * 1000, blit=False)
748
+
749
+ # Show the animation
750
+ plt.legend()
751
+ plt.grid()
752
+
753
+ if lm.cross_section_sweep.save_plot:
754
+ filepath = self.create_non_overwriting_filepath(folder_path=self.outputs_folder_path, base_name=lm.cross_section_sweep.filename, extension='.gif', overwrite=self.fdm.run.overwrite)
755
+ ani.save(filepath, writer='imagemagick', fps=1 / dt)
756
+
757
+ if self.fdm.run.launch_gui: plt.show()
758
+
759
+ def create_lossMap(self):
760
+ """
761
+ This function creates a loss map based on the inputs given in the loss_map section of the input file.
762
+ The loss-map can be plotted and saved as a .png file.
763
+ """
764
+ lm = self.fdm.magnet.postproc.batch_postproc.loss_map
765
+
766
+ if self.simulation_collection:
767
+ X, Y, V, dataPoints = self.lossMap_createGridData(lm.loss_type)
768
+ else:
769
+ X, Y, V = self.totalLoss_gridData
770
+
771
+ if self.tex_installation:
772
+ plt.rcParams['text.usetex'] = True
773
+ plt.rcParams['font.family'] = 'times'
774
+ plt.rcParams['font.size'] = 20
775
+
776
+ fig, ax = plt.subplots(figsize=(10, 8))
777
+
778
+ c = plt.pcolormesh(X, Y, V, shading='gouraud', cmap='plasma_r')
779
+ c_min = min([np.ceil(np.min(V)) for V in [V]])
780
+ c_max = max([np.floor(np.max(V)) for V in [V]])
781
+ c_ticks = [int(val) for val in np.arange(c_min, c_max + 1)]
782
+ cont = plt.contour(X, Y, V, c_ticks, colors='k', linestyles='dashed')
783
+
784
+ if lm.show_datapoints:
785
+ plt.scatter(dataPoints[:, 0], dataPoints[:, 1], s=50, edgecolors='k')
786
+
787
+ if lm.show_loss_type_dominance_contour:
788
+ sigmoid = lambda x: 1 / (1 + np.exp(-x))
789
+ if self.simulation_collection:
790
+ _, _, FilamentLoss, _ = self.lossMap_createGridData(lossType='FilamentLoss')
791
+ _, _, CouplingLoss, _ = self.lossMap_createGridData(lossType='CouplingLoss')
792
+ _, _, EddyLoss, _ = self.lossMap_createGridData(lossType='EddyLoss')
793
+ # else:
794
+ # _, _, FilamentLoss = self.filamentLoss_gridData
795
+ # _, _, CouplingLoss = self.couplingLoss_gridData
796
+ # _, _, EddyLoss = self.eddyLoss_gridData
797
+ fil_vs_coupling_loss = np.maximum(FilamentLoss, EddyLoss) - CouplingLoss
798
+ fil_vs_eddy_loss = EddyLoss - np.maximum(FilamentLoss, CouplingLoss)
799
+ plt.contour(X, Y, sigmoid(fil_vs_coupling_loss), [0.5], colors='k')
800
+ plt.contour(X, Y, sigmoid(fil_vs_eddy_loss), [0.5], colors='k')
801
+
802
+ cbar = fig.colorbar(c, ticks=c_ticks) # , labels=c_labels)
803
+ # cbar.ax.set_xticks([-7, -6, -5, -4, -3, -2, -1, 0, 1])
804
+ # cbar.ax.set_yticklabels([r'$10^{-7}$', r'$10^{-6}$', r'$10^{-5}$', r'$10^{-4}$', r'$10^{-3}$', r'$10^{-2}$', r'$10^{-1}$', r'$10^0$', r'$10^1$'])
805
+ cbar.ax.set_yticklabels([f"$10^{{{val}}}$" for val in c_ticks])
806
+ # plt.grid(alpha=0.5)
807
+ # plt.title(lm.title)
808
+ # plt.xlabel(lm.xlabel)
809
+ # plt.ylabel(lm.ylabel)
810
+ plt.title(r'Loss per cycle (J/m)')
811
+ plt.xlabel(r'Frequency $f$ (Hz)')
812
+ plt.ylabel(r'Field amplitude $b$ (T)')
813
+
814
+ # plt.annotate(r'Coupling', (np.log10(1.0), np.log10(0.007)), color='white')
815
+ # plt.annotate(r'Filament', (np.log10(0.012), np.log10(0.74)), color='white')
816
+ # plt.annotate(r'(uncoupled)', (np.log10(0.012), np.log10(0.55)), color='white')
817
+ # plt.annotate(r'Filament', (np.log10(45), np.log10(0.38)), color='white')
818
+ # plt.annotate(r'(coupled)', (np.log10(45), np.log10(0.28)), color='white')
819
+ # plt.annotate(r'Eddy', (np.log10(2000), np.log10(0.03)), color='white')
820
+
821
+ # ax.plot(np.log10(0.03), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(0.03), np.log10(0.12)), arrowprops=dict(facecolor='black', shrink=0.02))
822
+ # ax.plot(np.log10(30), np.log10(1), 'o', color='white')#, xytext=(np.log10(40), np.log10(0.8)), arrowprops=dict(facecolor='black', shrink=0.02))
823
+ # ax.plot(np.log10(3), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(2), np.log10(0.2)), arrowprops=dict(facecolor='black', shrink=0.02))
824
+ # ax.plot(np.log10(5000), np.log10(0.2), 'o', color='white')#, xytext=(np.log10(5000), np.log10(0.1)), arrowprops=dict(facecolor='black', shrink=0.02))
825
+
826
+ # ax.annotate('(a)', xy=(np.log10(0.03), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
827
+ # ax.annotate('(b)', xy=(np.log10(3), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
828
+ # ax.annotate('(c)', xy=(np.log10(30), np.log10(1)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
829
+ # ax.annotate('(d)', xy=(np.log10(5000), np.log10(0.2)), xycoords='data', ha='right', va='bottom', fontsize=20, color='white')
830
+
831
+ # Define custom tick labels for x-axis
832
+ x_min_log = int(np.log10(min([eval('sd.' + lm.x_val) for sd in self.simulation_collection])))
833
+ x_max_log = int(np.log10(max([eval('sd.' + lm.x_val) for sd in self.simulation_collection])))
834
+ x = np.arange(x_min_log, x_max_log + 1)
835
+ # Create a list of minor ticks
836
+ minor_x_labels = []
837
+ # 1) Add the ticks from x_min_log to ceil(x_min_log) to the minor_x_test list
838
+ new_ticks = np.linspace(10.0 ** np.floor(x_min_log), 10.0 ** np.ceil(x_min_log), 10)[:-1]
839
+ new_ticks = np.unique(new_ticks[new_ticks >= 10.0 ** x_min_log])
840
+ minor_x_labels.extend(new_ticks)
841
+ # 2) Add the ticks from ceil(x_min_log) to floor(x_max_log) to the minor_x_test list
842
+ for x_val in x:
843
+ new_ticks = np.linspace(10.0 ** x_val, 10.0 ** (x_val + 1), 10)[1:-1]
844
+ if x_val == x[-1]:
845
+ new_ticks = new_ticks[new_ticks <= 10.0 ** x_max_log]
846
+ minor_x_labels.extend(new_ticks)
847
+ minor_x = [np.log10(val) for val in minor_x_labels]
848
+
849
+ new_x_labels = [f"$10^{{{val}}}$" for val in x]
850
+ plt.xticks(x, new_x_labels)
851
+ plt.xticks(minor_x, minor=True)
852
+
853
+ # Define custom tick labels for y-axis
854
+ y_min_log = np.log10(min([eval('sd.' + lm.y_val) for sd in self.simulation_collection]))
855
+ y_max_log = np.log10(max([eval('sd.' + lm.y_val) for sd in self.simulation_collection]))
856
+ y = np.arange(np.ceil(y_min_log), np.floor(y_max_log) + 1)
857
+ # Create a list of minor ticks
858
+ minor_y_labels = []
859
+ # 1) Add the ticks from y_min_log to ceil(y_min_log) to the minor_y_test list
860
+ new_ticks = np.linspace(10.0 ** np.floor(y_min_log), 10.0 ** np.ceil(y_min_log), 10)[:-1]
861
+ new_ticks = np.unique(new_ticks[new_ticks >= 10.0 ** y_min_log])
862
+ minor_y_labels.extend(new_ticks)
863
+ # 2) Add the ticks from ceil(y_min_log) to floor(y_max_log) to the minor_y_test list
864
+ for y_val in y:
865
+ new_ticks = np.linspace(10.0 ** y_val, 10.0 ** (y_val + 1), 10)[1:-1]
866
+ if y_val == y[-1]:
867
+ new_ticks = new_ticks[new_ticks <= 10.0 ** y_max_log]
868
+ minor_y_labels.extend(new_ticks)
869
+
870
+ new_y_labels = [f"$10^{{{int(val)}}}$" for val in y]
871
+ minor_y = [np.log10(val) for val in minor_y_labels]
872
+ plt.yticks(y, new_y_labels)
873
+ plt.yticks(minor_y, minor=True)
874
+
875
+ # plt.savefig('C:/Users/jdular/cernbox/Documents/Reports/CERN_Reports/linkedFluxPaper/fig/loss_map_54fil_noI.pdf', bbox_inches='tight')
876
+
877
+ if lm.save_plot:
878
+ filePath = self.create_non_overwriting_filepath(self.outputs_folder_path, lm.filename, '.pdf', self.fdm.run.overwrite)
879
+ plt.savefig(filePath, bbox_inches='tight')
880
+
881
+ if self.fdm.run.launch_gui: plt.show()
882
+
883
+ def create_errorMap(self, flux_model, display=True, iteration=None):
884
+ """ This function creates a error map for a given rohf model on the space spanned by the simulations (f, I_amp) """
885
+
886
+ error_type = self.fdm.magnet.postproc.batch_postproc.rohf_on_grid.error_type
887
+
888
+ N = len(self.simulation_collection)
889
+ frequencies = np.zeros(N)
890
+ peakCurrents = np.zeros(N)
891
+ pc_error = np.zeros(N)
892
+ dyn_error = np.zeros(N)
893
+ flux_error = np.zeros(N)
894
+ for idx, simulation in enumerate(self.simulation_collection):
895
+ frequencies[idx] = simulation.f
896
+ peakCurrents[idx] = simulation.I_amp
897
+ # compute rohf metrics
898
+ rohf_flux, rohf_dyn_loss, _, rohf_pc_loss, _ = flux_model.compute(simulation.time, simulation.I_transport, sim=simulation)
899
+ # (relative) per cycle loss error
900
+ pc_error[idx] = abs(rohf_pc_loss - simulation.total_power_per_cycle['FilamentLoss']) / simulation.total_power_per_cycle['FilamentLoss']
901
+ # (relative) mean dynamic error
902
+ dyn_error[idx] = np.mean(abs((rohf_dyn_loss - simulation.power['FilamentLoss']) / simulation.power['FilamentLoss']))
903
+ # (relative) mean flux error
904
+ non_zero_idx = np.nonzero(simulation.flux_internal)
905
+ flux_error[idx] = np.mean(abs((rohf_flux[non_zero_idx] - simulation.flux_internal[non_zero_idx]) / simulation.flux_internal[non_zero_idx]))
906
+
907
+ if error_type == 'pc_loss':
908
+ error = pc_error
909
+ elif error_type == 'flux':
910
+ error = flux_error
911
+ elif error_type == 'dyn_loss':
912
+ error = dyn_error
913
+ else:
914
+ raise ValueError('Unrecognized error type')
915
+
916
+ # plot errorMap result
917
+ if display:
918
+
919
+ show_descriptions = False
920
+ freq_i, curr_i = np.linspace(min(frequencies), max(frequencies), 1000), np.linspace(min(peakCurrents), max(peakCurrents), 1000)
921
+ freq_i, curr_i = np.meshgrid(freq_i, curr_i)
922
+ plt.figure()
923
+ ax = plt.gca()
924
+ title = str(error_type) + " map"
925
+ if show_descriptions:
926
+ ax.set_title(title)
927
+ ax.set_xlabel(r'Frequency $f$ ($\mathrm{Hz}$)')
928
+ ax.set_ylabel(r"Transport current ratio $I/I_\mathrm{c}$")
929
+ else:
930
+ ax.set_axis_off()
931
+
932
+ if self.fdm.magnet.postproc.batch_postproc.rohf_on_grid.interpolate_error_map:
933
+ rbf = interpolate.Rbf(frequencies, peakCurrents, error, function='linear')
934
+ error_i = rbf(freq_i, curr_i)
935
+ plt.imshow(error_i, vmin=error.min(), vmax=error.max(), origin='lower', aspect='auto',
936
+ extent=[min(frequencies), max(frequencies), min(peakCurrents) / max(peakCurrents), 1])
937
+ plt.scatter(frequencies, peakCurrents / max(peakCurrents), c=error, edgecolor='w')
938
+ ax.set_xscale('log')
939
+ # ax.set_yscale('log')
940
+ plt.colorbar()
941
+ plt.show()
942
+ else:
943
+ # cumulative error over map
944
+ return sum(error)
945
+
946
+ def plot2d(self):
947
+ """
948
+ This function is used to create a 2d plot. It is supposed to be flexible and work for various kinds of plots one may want to create.
949
+ """
950
+
951
+ def apply_plot_settings():
952
+ if ref:
953
+ plt.plot(ref_array[0], ref_array[1], linestyle='--', label=self.fdm.magnet.postproc.batch_postproc.plot2d.reference_label)
954
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.legend:
955
+ plt.legend()
956
+ plt.grid(linestyle='dotted')
957
+ plt.xlabel(self.fdm.magnet.postproc.batch_postproc.plot2d.xlabel)
958
+ plt.ylabel(self.fdm.magnet.postproc.batch_postproc.plot2d.ylabel)
959
+ plt.title(title)
960
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.x_log:
961
+ plt.xscale('log')
962
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.y_log:
963
+ plt.yscale('log')
964
+
965
+ if self.tex_installation:
966
+ plt.rcParams['text.usetex'] = True
967
+ plt.rcParams['font.family'] = 'times'
968
+ # plt.rcParams['font.size'] = 20
969
+
970
+ plotdata_list = []
971
+ # use labelcount to subdevide simulations into sets of equal length
972
+ labellen = int(len(self.fdm.magnet.postproc.batch_postproc.plot2d.labels))
973
+ grplen = int(len(self.simulation_collection) / labellen)
974
+
975
+ # Create the title (or titles if combined_plot is False)
976
+ title = self.fdm.magnet.postproc.batch_postproc.plot2d.title
977
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot:
978
+ sd = self.simulation_collection[0]
979
+ commands = re.findall('<<(.*?)>>', title)
980
+ for c in commands:
981
+ title = title.replace(f"<<{c}>>", str(eval('sd.' + c)))
982
+ else:
983
+ titles = []
984
+ for sd in self.simulation_collection:
985
+ commands = re.findall('<<(.*?)>>', title)
986
+ title_i = title
987
+ for c in commands:
988
+ title_i = title_i.replace(f"<<{c}>>", str(eval('sd.' + c)))
989
+ titles.append(title_i)
990
+
991
+ # colors = plt.cm.get_cmap('magma').resampled(len(self.simulation_collection)).colors
992
+ max_len = max([len(self.simulation_collection), len(self.fluxmodel_collection)]) if self.fluxmodel_collection else len(self.simulation_collection)
993
+ colors = plt.cm.get_cmap('viridis').resampled(max_len).colors
994
+
995
+ # convert selected simulation parameters into list of PlotData
996
+ simulation_idx = [] # nested list with indices
997
+ fluxmodel_idx = []
998
+ style_cycle = ['--', ':', '-.']
999
+ for i, simulation in enumerate(self.simulation_collection):
1000
+ # get x_data
1001
+ x_data = eval(self.fdm.magnet.postproc.batch_postproc.plot2d.x_val)
1002
+ # base label
1003
+ if len(self.fdm.magnet.postproc.batch_postproc.plot2d.labels) == len(self.simulation_collection):
1004
+ label_idx = i
1005
+ else:
1006
+ label_idx = 0
1007
+ label_cmd = re.findall('<<(.*?)>>', self.fdm.magnet.postproc.batch_postproc.plot2d.labels[label_idx])
1008
+ sim_label = self.fdm.magnet.postproc.batch_postproc.plot2d.labels[label_idx].replace(f"<<{label_cmd}>>", str(eval('simulation.' + label_cmd))) if label_cmd else self.fdm.magnet.postproc.batch_postproc.plot2d.labels[label_idx]
1009
+ # add (multiple) y_data sets w. label
1010
+ for j in range(len(self.fdm.magnet.postproc.batch_postproc.plot2d.y_vals)):
1011
+ y_data = eval(self.fdm.magnet.postproc.batch_postproc.plot2d.y_vals[j])
1012
+
1013
+ sublabel = sim_label if len(self.fdm.magnet.postproc.batch_postproc.plot2d.y_vals) == 1 else sim_label + ' (' + str(i) + ')'
1014
+ plotdata_list.append(PlotData(x_data=x_data, y_data=y_data, label=sublabel, color=colors[i], linestyle='-'))
1015
+ simulation_idx.append(len(plotdata_list) - 1)
1016
+ # add multiple fluxmodel results
1017
+ if self.fluxmodel_collection:
1018
+ for k, fluxmodel in enumerate(self.fluxmodel_collection):
1019
+ y_data = eval(self.fdm.magnet.postproc.batch_postproc.plot2d.y_val_fluxmodel)
1020
+ plotdata_list.append(PlotData(x_data=x_data, y_data=y_data, label=fluxmodel.label + '_' + str(sim_label), color=colors[i], linestyle=style_cycle[k % len(style_cycle)]))
1021
+ fluxmodel_idx.append(len(plotdata_list) - 1)
1022
+
1023
+ # Load reference - if defined
1024
+ ref = self.fdm.magnet.postproc.batch_postproc.plot2d.reference_vals
1025
+ ref_array = []
1026
+ if ref:
1027
+ for i, ref_val in enumerate(ref):
1028
+ commands = re.findall('<<(.*?)>>', ref_val)
1029
+ for c in commands:
1030
+ ref_val = ref_val.replace(f"<<{c}>>", str('sd.' + c))
1031
+ ref_array.append(eval(ref_val))
1032
+
1033
+ # Create result folder:
1034
+ if not self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
1035
+ # Create a folder to save the plots if combined_plot is False and save_plot is True:
1036
+ filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
1037
+ folder_path = self.create_non_overwriting_filepath(self.outputs_folder_path, filename, '', self.fdm.run.overwrite)
1038
+ if not os.path.exists(folder_path): os.makedirs(folder_path)
1039
+
1040
+ # plot data as continous graphs in one plot if there are no scalars
1041
+ scalar_plotdata = any([hasattr(plotdata.x_data, "__len__") for plotdata in plotdata_list])
1042
+ if not scalar_plotdata:
1043
+ for grp_idx, grp_plotdata in enumerate([plotdata_list[i::labellen] for i in range(labellen)]):
1044
+ plt.plot([data.x_data for data in grp_plotdata], [data.y_data for data in grp_plotdata], self.fdm.magnet.postproc.batch_postproc.plot2d.linestyle, label=self.fdm.magnet.postproc.batch_postproc.plot2d.labels[grp_idx], color=colors[-grp_idx])
1045
+ if not self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot:
1046
+ plt.figure()
1047
+ if self.fluxmodel_collection:
1048
+ for grp_idx, group in enumerate([fluxmodel_idx[i:i + grplen] for i in range(0, len(fluxmodel_idx), grplen)]):
1049
+ plt.plot([plotdata_list[idx].x_data for idx in group], [plotdata_list[idx].y_data for idx in group], '-o', label="ROHF", color='red')
1050
+ apply_plot_settings()
1051
+ # plot data as points in one graph each in one plot
1052
+ else:
1053
+ print('test')
1054
+ for idx, plotdata in enumerate(plotdata_list):
1055
+ plt.plot(plotdata.x_data, plotdata.y_data, linestyle=plotdata.linestyle, label=plotdata.label, color=plotdata.color)
1056
+ apply_plot_settings()
1057
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.save_plot:
1058
+ filename = self.fdm.magnet.postproc.batch_postproc.plot2d.filename
1059
+ filePath = self.create_non_overwriting_filepath(self.outputs_folder_path, filename, '.png', self.fdm.run.overwrite)
1060
+ plt.savefig(filePath, dpi=300)
1061
+
1062
+ if self.fdm.magnet.postproc.batch_postproc.plot2d.combined_plot and self.fdm.magnet.postproc.batch_postproc.plot2d.save_pgfdata:
1063
+ if not scalar_plotdata:
1064
+ data_tuple = ()
1065
+ data_names = []
1066
+ for grp_idx, grp_plotdata in enumerate([plotdata_list[i::labellen] for i in range(labellen)]):
1067
+ data_tuple = data_tuple + ([data.x_data for data in grp_plotdata], [data.y_data for data in grp_plotdata])
1068
+ data_names.extend([self.fdm.magnet.postproc.batch_postproc.plot2d.labels[grp_idx] + str("_x"), self.fdm.magnet.postproc.batch_postproc.plot2d.labels[grp_idx] + str("_y")])
1069
+ data = np.vstack(data_tuple).T
1070
+ header = " ".join(data_names)
1071
+ else:
1072
+ max_len = max([len(plotdata.x_data) for plotdata in plotdata_list])
1073
+ # pad shorter data columns with nan - is ignored by pgfplots
1074
+ data = np.vstack(tuple([np.pad(plotdata.x_data, (0, max_len - len(plotdata.x_data)), "constant", constant_values=(np.nan,)) for plotdata in plotdata_list]
1075
+ + [np.pad(plotdata.y_data, (0, max_len - len(plotdata.y_data)), "constant", constant_values=(np.nan,)) for plotdata in plotdata_list])).T
1076
+ header = " ".join([plotdata.label + '_x' for plotdata in plotdata_list]) + " " + " ".join([plotdata.label + '_y' for plotdata in plotdata_list])
1077
+ np.savetxt(os.path.join(self.outputs_folder_path, 'pgfdata.txt'), data, header=header, comments="")
1078
+
1079
+ if self.fdm.run.launch_gui: plt.show()