pyvale 2025.7.1__cp311-cp311-musllinux_1_2_aarch64.whl → 2025.8.1__cp311-cp311-musllinux_1_2_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyvale might be problematic. Click here for more details.
- pyvale/__init__.py +12 -92
- pyvale/blender/__init__.py +23 -0
- pyvale/{pyvaleexceptions.py → blender/blenderexceptions.py} +0 -3
- pyvale/{blenderlightdata.py → blender/blenderlightdata.py} +3 -3
- pyvale/{blendermaterialdata.py → blender/blendermaterialdata.py} +1 -1
- pyvale/{blenderrenderdata.py → blender/blenderrenderdata.py} +5 -3
- pyvale/{blenderscene.py → blender/blenderscene.py} +33 -30
- pyvale/{blendertools.py → blender/blendertools.py} +14 -10
- pyvale/dataset/__init__.py +7 -0
- pyvale/dataset/dataset.py +443 -0
- pyvale/dic/__init__.py +20 -0
- pyvale/dic/cpp/dicfourier.cpp +36 -4
- pyvale/dic/cpp/dicinterpolator.cpp +56 -1
- pyvale/dic/cpp/dicmain.cpp +24 -19
- pyvale/dic/cpp/dicoptimizer.cpp +6 -1
- pyvale/dic/cpp/dicscanmethod.cpp +32 -32
- pyvale/dic/cpp/dicsignalhandler.cpp +16 -0
- pyvale/dic/cpp/dicstrain.cpp +7 -3
- pyvale/dic/cpp/dicutil.cpp +79 -23
- pyvale/{dic2d.py → dic/dic2d.py} +51 -29
- pyvale/dic/dic2dconv.py +6 -0
- pyvale/{dic2dcpp.cpython-311-aarch64-linux-musl.so → dic/dic2dcpp.cpython-311-aarch64-linux-musl.so} +0 -0
- pyvale/{dicchecks.py → dic/dicchecks.py} +28 -16
- pyvale/dic/dicdataimport.py +370 -0
- pyvale/{dicregionofinterest.py → dic/dicregionofinterest.py} +169 -12
- pyvale/{dicresults.py → dic/dicresults.py} +4 -1
- pyvale/{dicstrain.py → dic/dicstrain.py} +9 -9
- pyvale/examples/basics/{ex1_1_basicscalars_therm2d.py → ex1a_basicscalars_therm2d.py} +12 -9
- pyvale/examples/basics/{ex1_2_sensormodel_therm2d.py → ex1b_sensormodel_therm2d.py} +17 -14
- pyvale/examples/basics/{ex1_3_customsens_therm3d.py → ex1c_customsens_therm3d.py} +27 -24
- pyvale/examples/basics/{ex1_4_basicerrors_therm3d.py → ex1d_basicerrors_therm3d.py} +32 -29
- pyvale/examples/basics/{ex1_5_fielderrs_therm3d.py → ex1e_fielderrs_therm3d.py} +19 -15
- pyvale/examples/basics/{ex1_6_caliberrs_therm2d.py → ex1f_caliberrs_therm2d.py} +20 -16
- pyvale/examples/basics/{ex1_7_spatavg_therm2d.py → ex1g_spatavg_therm2d.py} +19 -16
- pyvale/examples/basics/{ex2_1_basicvectors_disp2d.py → ex2a_basicvectors_disp2d.py} +13 -10
- pyvale/examples/basics/{ex2_2_vectorsens_disp2d.py → ex2b_vectorsens_disp2d.py} +19 -15
- pyvale/examples/basics/{ex2_3_sensangle_disp2d.py → ex2c_sensangle_disp2d.py} +21 -18
- pyvale/examples/basics/{ex2_4_chainfielderrs_disp2d.py → ex2d_chainfielderrs_disp2d.py} +31 -29
- pyvale/examples/basics/{ex2_5_vectorfields3d_disp3d.py → ex2e_vectorfields3d_disp3d.py} +21 -18
- pyvale/examples/basics/{ex3_1_basictensors_strain2d.py → ex3a_basictensors_strain2d.py} +16 -14
- pyvale/examples/basics/{ex3_2_tensorsens2d_strain2d.py → ex3b_tensorsens2d_strain2d.py} +17 -14
- pyvale/examples/basics/{ex3_3_tensorsens3d_strain3d.py → ex3c_tensorsens3d_strain3d.py} +25 -22
- pyvale/examples/basics/{ex4_1_expsim2d_thermmech2d.py → ex4a_expsim2d_thermmech2d.py} +17 -14
- pyvale/examples/basics/{ex4_2_expsim3d_thermmech3d.py → ex4b_expsim3d_thermmech3d.py} +37 -34
- pyvale/examples/basics/ex5_nomesh.py +24 -0
- pyvale/examples/dic/ex1_2_blenderdeformed.py +174 -0
- pyvale/examples/dic/ex1_region_of_interest.py +6 -3
- pyvale/examples/dic/ex2_plate_with_hole.py +21 -18
- pyvale/examples/dic/ex3_plate_with_hole_strain.py +8 -6
- pyvale/examples/dic/ex4_dic_blender.py +17 -15
- pyvale/examples/dic/ex5_dic_challenge.py +19 -14
- pyvale/examples/genanalyticdata/ex1_1_scalarvisualisation.py +16 -10
- pyvale/examples/genanalyticdata/ex1_2_scalarcasebuild.py +3 -3
- pyvale/examples/genanalyticdata/ex2_1_analyticsensors.py +29 -23
- pyvale/examples/genanalyticdata/ex2_2_analyticsensors_nomesh.py +67 -0
- pyvale/examples/imagedef2d/ex_imagedef2d_todisk.py +12 -9
- pyvale/examples/mooseherder/ex0_create_moose_config.py +65 -0
- pyvale/examples/mooseherder/ex1a_modify_moose_input.py +71 -0
- pyvale/examples/mooseherder/ex1b_modify_gmsh_input.py +69 -0
- pyvale/examples/mooseherder/ex2a_run_moose_once.py +80 -0
- pyvale/examples/mooseherder/ex2b_run_gmsh_once.py +64 -0
- pyvale/examples/mooseherder/ex2c_run_both_once.py +114 -0
- pyvale/examples/mooseherder/ex3_run_moose_seq_para.py +157 -0
- pyvale/examples/mooseherder/ex4_run_gmsh-moose_seq_para.py +176 -0
- pyvale/examples/mooseherder/ex5_run_moose_paramulti.py +136 -0
- pyvale/examples/mooseherder/ex6_read_moose_exodus.py +163 -0
- pyvale/examples/mooseherder/ex7a_read_moose_herd_results.py +153 -0
- pyvale/examples/mooseherder/ex7b_read_multi_herd_results.py +116 -0
- pyvale/examples/mooseherder/ex7c_read_multi_gmshmoose_results.py +127 -0
- pyvale/examples/mooseherder/ex7d_readconfig_multi_gmshmoose_results.py +143 -0
- pyvale/examples/mooseherder/ex8_read_existing_sweep_output.py +72 -0
- pyvale/examples/renderblender/ex1_1_blenderscene.py +24 -20
- pyvale/examples/renderblender/ex1_2_blenderdeformed.py +22 -18
- pyvale/examples/renderblender/ex2_1_stereoscene.py +36 -29
- pyvale/examples/renderblender/ex2_2_stereodeformed.py +26 -20
- pyvale/examples/renderblender/ex3_1_blendercalibration.py +24 -17
- pyvale/examples/renderrasterisation/ex_rastenp.py +14 -12
- pyvale/examples/renderrasterisation/ex_rastercyth_oneframe.py +14 -15
- pyvale/examples/renderrasterisation/ex_rastercyth_static_cypara.py +13 -11
- pyvale/examples/renderrasterisation/ex_rastercyth_static_pypara.py +13 -11
- pyvale/mooseherder/__init__.py +32 -0
- pyvale/mooseherder/directorymanager.py +416 -0
- pyvale/mooseherder/exodusreader.py +763 -0
- pyvale/mooseherder/gmshrunner.py +163 -0
- pyvale/mooseherder/inputmodifier.py +236 -0
- pyvale/mooseherder/mooseconfig.py +226 -0
- pyvale/mooseherder/mooseherd.py +527 -0
- pyvale/mooseherder/mooserunner.py +303 -0
- pyvale/mooseherder/outputreader.py +22 -0
- pyvale/mooseherder/simdata.py +92 -0
- pyvale/mooseherder/simrunner.py +31 -0
- pyvale/mooseherder/sweepreader.py +356 -0
- pyvale/mooseherder/sweeptools.py +76 -0
- pyvale/sensorsim/__init__.py +82 -0
- pyvale/{camera.py → sensorsim/camera.py} +7 -7
- pyvale/{camerasensor.py → sensorsim/camerasensor.py} +7 -7
- pyvale/{camerastereo.py → sensorsim/camerastereo.py} +2 -2
- pyvale/{cameratools.py → sensorsim/cameratools.py} +4 -4
- pyvale/{cython → sensorsim/cython}/rastercyth.c +596 -596
- pyvale/sensorsim/cython/rastercyth.cpython-311-aarch64-linux-musl.so +0 -0
- pyvale/{cython → sensorsim/cython}/rastercyth.py +16 -17
- pyvale/{errorcalculator.py → sensorsim/errorcalculator.py} +1 -1
- pyvale/{errorintegrator.py → sensorsim/errorintegrator.py} +2 -2
- pyvale/{errorrand.py → sensorsim/errorrand.py} +4 -4
- pyvale/{errorsyscalib.py → sensorsim/errorsyscalib.py} +2 -2
- pyvale/{errorsysdep.py → sensorsim/errorsysdep.py} +2 -2
- pyvale/{errorsysfield.py → sensorsim/errorsysfield.py} +8 -8
- pyvale/{errorsysindep.py → sensorsim/errorsysindep.py} +3 -3
- pyvale/sensorsim/exceptions.py +8 -0
- pyvale/{experimentsimulator.py → sensorsim/experimentsimulator.py} +23 -3
- pyvale/{field.py → sensorsim/field.py} +1 -1
- pyvale/{fieldconverter.py → sensorsim/fieldconverter.py} +72 -19
- pyvale/sensorsim/fieldinterp.py +37 -0
- pyvale/sensorsim/fieldinterpmesh.py +124 -0
- pyvale/sensorsim/fieldinterppoints.py +55 -0
- pyvale/{fieldsampler.py → sensorsim/fieldsampler.py} +4 -4
- pyvale/{fieldscalar.py → sensorsim/fieldscalar.py} +28 -24
- pyvale/{fieldtensor.py → sensorsim/fieldtensor.py} +33 -31
- pyvale/{fieldvector.py → sensorsim/fieldvector.py} +33 -31
- pyvale/{imagedef2d.py → sensorsim/imagedef2d.py} +9 -5
- pyvale/{integratorfactory.py → sensorsim/integratorfactory.py} +6 -6
- pyvale/{integratorquadrature.py → sensorsim/integratorquadrature.py} +3 -3
- pyvale/{integratorrectangle.py → sensorsim/integratorrectangle.py} +3 -3
- pyvale/{integratorspatial.py → sensorsim/integratorspatial.py} +1 -1
- pyvale/{rastercy.py → sensorsim/rastercy.py} +5 -5
- pyvale/{rasternp.py → sensorsim/rasternp.py} +9 -9
- pyvale/{rasteropts.py → sensorsim/rasteropts.py} +1 -1
- pyvale/{renderer.py → sensorsim/renderer.py} +1 -1
- pyvale/{rendermesh.py → sensorsim/rendermesh.py} +5 -5
- pyvale/{renderscene.py → sensorsim/renderscene.py} +2 -2
- pyvale/{sensorarray.py → sensorsim/sensorarray.py} +1 -1
- pyvale/{sensorarrayfactory.py → sensorsim/sensorarrayfactory.py} +12 -12
- pyvale/{sensorarraypoint.py → sensorsim/sensorarraypoint.py} +10 -8
- pyvale/{sensordata.py → sensorsim/sensordata.py} +1 -1
- pyvale/{sensortools.py → sensorsim/sensortools.py} +2 -20
- pyvale/sensorsim/simtools.py +174 -0
- pyvale/{visualexpplotter.py → sensorsim/visualexpplotter.py} +3 -3
- pyvale/{visualimages.py → sensorsim/visualimages.py} +2 -2
- pyvale/{visualsimanimator.py → sensorsim/visualsimanimator.py} +4 -4
- pyvale/{visualsimplotter.py → sensorsim/visualsimplotter.py} +5 -5
- pyvale/{visualsimsensors.py → sensorsim/visualsimsensors.py} +12 -12
- pyvale/{visualtools.py → sensorsim/visualtools.py} +1 -1
- pyvale/{visualtraceplotter.py → sensorsim/visualtraceplotter.py} +2 -2
- pyvale/simcases/case17.geo +3 -0
- pyvale/simcases/case17.i +4 -4
- pyvale/simcases/run_1case.py +1 -9
- pyvale/simcases/run_all_cases.py +1 -1
- pyvale/simcases/run_build_case.py +1 -1
- pyvale/simcases/run_example_cases.py +1 -1
- pyvale/verif/__init__.py +12 -0
- pyvale/{analyticsimdatafactory.py → verif/analyticsimdatafactory.py} +2 -2
- pyvale/{analyticsimdatagenerator.py → verif/analyticsimdatagenerator.py} +2 -2
- pyvale/verif/psens.py +125 -0
- pyvale/verif/psensconst.py +18 -0
- pyvale/verif/psensmech.py +227 -0
- pyvale/verif/psensmultiphys.py +187 -0
- pyvale/verif/psensscalar.py +347 -0
- pyvale/verif/psenstensor.py +123 -0
- pyvale/verif/psensvector.py +116 -0
- {pyvale-2025.7.1.dist-info → pyvale-2025.8.1.dist-info}/METADATA +6 -7
- pyvale-2025.8.1.dist-info/RECORD +263 -0
- pyvale/cython/rastercyth.cpython-311-aarch64-linux-musl.so +0 -0
- pyvale/dataset.py +0 -415
- pyvale/dicdataimport.py +0 -247
- pyvale/simtools.py +0 -67
- pyvale-2025.7.1.dist-info/RECORD +0 -214
- /pyvale/{blendercalibrationdata.py → blender/blendercalibrationdata.py} +0 -0
- /pyvale/{dicspecklegenerator.py → dic/dicspecklegenerator.py} +0 -0
- /pyvale/{dicspecklequality.py → dic/dicspecklequality.py} +0 -0
- /pyvale/{dicstrainresults.py → dic/dicstrainresults.py} +0 -0
- /pyvale/{cameradata.py → sensorsim/cameradata.py} +0 -0
- /pyvale/{cameradata2d.py → sensorsim/cameradata2d.py} +0 -0
- /pyvale/{errordriftcalc.py → sensorsim/errordriftcalc.py} +0 -0
- /pyvale/{fieldtransform.py → sensorsim/fieldtransform.py} +0 -0
- /pyvale/{generatorsrandom.py → sensorsim/generatorsrandom.py} +0 -0
- /pyvale/{imagetools.py → sensorsim/imagetools.py} +0 -0
- /pyvale/{integratortype.py → sensorsim/integratortype.py} +0 -0
- /pyvale/{output.py → sensorsim/output.py} +0 -0
- /pyvale/{raster.py → sensorsim/raster.py} +0 -0
- /pyvale/{sensordescriptor.py → sensorsim/sensordescriptor.py} +0 -0
- /pyvale/{visualimagedef.py → sensorsim/visualimagedef.py} +0 -0
- /pyvale/{visualopts.py → sensorsim/visualopts.py} +0 -0
- /pyvale/{analyticmeshgen.py → verif/analyticmeshgen.py} +0 -0
- {pyvale-2025.7.1.dist-info → pyvale-2025.8.1.dist-info}/WHEEL +0 -0
- {pyvale-2025.7.1.dist-info → pyvale-2025.8.1.dist-info}/licenses/LICENSE +0 -0
- {pyvale-2025.7.1.dist-info → pyvale-2025.8.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# pyvale: the python validation engine
|
|
3
|
+
# License: MIT
|
|
4
|
+
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
+
# ==============================================================================
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Using multiple calls to run parallel sweeps
|
|
9
|
+
================================================================================
|
|
10
|
+
|
|
11
|
+
In this example we demonstrate how multiple repeated calls can be made to run
|
|
12
|
+
'herd' workflow manager where the simulations do not overwrite each other,
|
|
13
|
+
instead they accumulate within the output directories. If you need the
|
|
14
|
+
simulation output to be cleared after each call to run a sweep sequentially or
|
|
15
|
+
in parallel then you will need to call clear using the directory manager.
|
|
16
|
+
|
|
17
|
+
**Installing moose**: To run this example you will need to have installed moose
|
|
18
|
+
on your system. As moose supports unix operating systems windows users will need
|
|
19
|
+
to use windows subsystem for linux (WSL). We use the proteus moose build which
|
|
20
|
+
can be found here: https://github.com/aurora-multiphysics/proteus. Build scripts
|
|
21
|
+
for common linux distributions can be found in the 'scripts' directory of the
|
|
22
|
+
repo. You can also create your own moose build using instructions here:
|
|
23
|
+
https://mooseframework.inl.gov/.
|
|
24
|
+
|
|
25
|
+
We start by importing what we need for this example. For this example the
|
|
26
|
+
everything at the start is similar to previous examples where we have setup
|
|
27
|
+
our herd workflow manager. So, if you feel confident with things so far then
|
|
28
|
+
skip down to the last section.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
from pathlib import Path
|
|
32
|
+
import numpy as np
|
|
33
|
+
|
|
34
|
+
#pyvale imports
|
|
35
|
+
import pyvale.dataset as dataset
|
|
36
|
+
from pyvale.mooseherder import (MooseHerd,
|
|
37
|
+
MooseRunner,
|
|
38
|
+
MooseConfig,
|
|
39
|
+
InputModifier,
|
|
40
|
+
DirectoryManager,
|
|
41
|
+
sweep_param_grid)
|
|
42
|
+
|
|
43
|
+
#%%
|
|
44
|
+
# First we setup an input modifier and runner for our moose simulation in
|
|
45
|
+
# exactly the same way as we have done in previous examples.
|
|
46
|
+
|
|
47
|
+
moose_input = dataset.element_case_input_path(dataset.EElemTest.HEX20)
|
|
48
|
+
moose_modifier = InputModifier(moose_input,'#','')
|
|
49
|
+
|
|
50
|
+
config = {'main_path': Path.home()/ 'moose',
|
|
51
|
+
'app_path': Path.home() / 'proteus',
|
|
52
|
+
'app_name': 'proteus-opt'}
|
|
53
|
+
moose_config = MooseConfig(config)
|
|
54
|
+
|
|
55
|
+
moose_runner = MooseRunner(moose_config)
|
|
56
|
+
moose_runner.set_run_opts(n_tasks = 1,
|
|
57
|
+
n_threads = 2,
|
|
58
|
+
redirect_out = True)
|
|
59
|
+
|
|
60
|
+
#%%
|
|
61
|
+
# We use the moose input modifier and runner to create our herd workflow manager
|
|
62
|
+
# as we have seen in previous examples.
|
|
63
|
+
num_para_sims: int = 4
|
|
64
|
+
dir_manager = DirectoryManager(n_dirs=num_para_sims)
|
|
65
|
+
herd = MooseHerd([moose_runner],[moose_modifier],dir_manager)
|
|
66
|
+
herd.set_num_para_sims(n_para=num_para_sims)
|
|
67
|
+
|
|
68
|
+
#%%
|
|
69
|
+
# We need somewhere to run our simulations and store the output so we create our
|
|
70
|
+
# standard pyvale output directory as we have done in previous examples and then
|
|
71
|
+
# pass this to our directory manager.
|
|
72
|
+
output_path = Path.cwd() / "pyvale-output"
|
|
73
|
+
if not output_path.is_dir():
|
|
74
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
75
|
+
|
|
76
|
+
dir_manager.set_base_dir(output_path)
|
|
77
|
+
dir_manager.reset_dirs()
|
|
78
|
+
|
|
79
|
+
#%%
|
|
80
|
+
# We generate a grid sweep of the variables we are interested in analysing as
|
|
81
|
+
# we have done previously and then print this to the console so we can check
|
|
82
|
+
# all combinations of variables that we want are present and that the total
|
|
83
|
+
# number of simulations makes sense.
|
|
84
|
+
|
|
85
|
+
moose_params = {"nElemX": (2,3),
|
|
86
|
+
"lengX": np.array([10e-3,15e-3]),
|
|
87
|
+
"PRatio":(0.3,)}
|
|
88
|
+
params = [moose_params,]
|
|
89
|
+
sweep_params = sweep_param_grid(params)
|
|
90
|
+
|
|
91
|
+
print("\nParameter sweep variables by simulation:")
|
|
92
|
+
for ii,pp in enumerate(sweep_params):
|
|
93
|
+
print(f"Sim: {ii}, Params [moose,]: {pp}")
|
|
94
|
+
|
|
95
|
+
print()
|
|
96
|
+
print(f"Total simulations = {len(sweep_params)}")
|
|
97
|
+
print()
|
|
98
|
+
|
|
99
|
+
#%%
|
|
100
|
+
# Here we are going to run the parameter sweep a certain number of times and
|
|
101
|
+
# while storing the total time to complete the parameter sweep each time. Once
|
|
102
|
+
# we have completed all the parameter sweeps we print the time taken for each
|
|
103
|
+
# sweep and the average sweep time to the console.
|
|
104
|
+
#
|
|
105
|
+
# Now if we inspect the simulation working directories in our pyvale-output
|
|
106
|
+
# directory we will see that all runs have been stored. If we need to clear
|
|
107
|
+
# the directories in between parallel sweeps we can call
|
|
108
|
+
# ``dir_manager.reset_dirs()`` and then we will only be left with one copy of
|
|
109
|
+
# the sweep output. Retaining all simulations is useful if we want to update
|
|
110
|
+
# the parameters we are passing to the ``run_para`` function every time it
|
|
111
|
+
# is called.
|
|
112
|
+
|
|
113
|
+
num_para_runs: int = 3
|
|
114
|
+
|
|
115
|
+
if __name__ == '__main__':
|
|
116
|
+
sweep_times = np.zeros((num_para_runs,),dtype=np.float64)
|
|
117
|
+
for rr in range(num_para_runs):
|
|
118
|
+
herd.run_para(sweep_params)
|
|
119
|
+
sweep_times[rr] = herd.get_sweep_time()
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
print(80*"-")
|
|
123
|
+
for ii,ss in enumerate(sweep_times):
|
|
124
|
+
print(f"Sweep {ii} took: {ss:.3f}seconds")
|
|
125
|
+
|
|
126
|
+
print(80*"-")
|
|
127
|
+
print(f"Average sweep time: {np.mean(sweep_times):.3f} seconds")
|
|
128
|
+
print(80*"-")
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# pyvale: the python validation engine
|
|
3
|
+
# License: MIT
|
|
4
|
+
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
+
# ==============================================================================
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Reading exodus output from a MOOSE simulation
|
|
9
|
+
================================================================================
|
|
10
|
+
|
|
11
|
+
In this example we ...
|
|
12
|
+
|
|
13
|
+
**Installing moose**: To run this example you will need to have installed moose
|
|
14
|
+
on your system. As moose supports unix operating systems windows users will need
|
|
15
|
+
to use windows subsystem for linux (WSL). We use the proteus moose build which
|
|
16
|
+
can be found here: https://github.com/aurora-multiphysics/proteus. Build scripts
|
|
17
|
+
for common linux distributions can be found in the "scripts" directory of the
|
|
18
|
+
repo. You can also create your own moose build using instructions here:
|
|
19
|
+
https://mooseframework.inl.gov/.
|
|
20
|
+
|
|
21
|
+
We start by importing what we need for this example.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import time
|
|
25
|
+
import shutil
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
from typing import Any
|
|
28
|
+
import dataclasses
|
|
29
|
+
import numpy as np
|
|
30
|
+
|
|
31
|
+
#pyvale imports
|
|
32
|
+
import pyvale.dataset as dataset
|
|
33
|
+
import pyvale.sensorsim as sens
|
|
34
|
+
from pyvale.mooseherder import (MooseRunner,
|
|
35
|
+
MooseConfig,
|
|
36
|
+
ExodusReader)
|
|
37
|
+
|
|
38
|
+
#%%
|
|
39
|
+
# We also define a helper function that will print all attriubutes of a
|
|
40
|
+
# dataclas so we can see what it contains. This will be useful when we inspect
|
|
41
|
+
# what our ``SimData`` objects contain.
|
|
42
|
+
def print_attrs(in_obj: Any) -> None:
|
|
43
|
+
for field in dataclasses.fields(in_obj):
|
|
44
|
+
if not field.name.startswith('__'):
|
|
45
|
+
print(f" {field.name}: {field.type}")
|
|
46
|
+
|
|
47
|
+
#%%
|
|
48
|
+
# We need to know where our simulation output is so we are going to create our
|
|
49
|
+
# standard pyvale-output directory, grab our simulation input file from the
|
|
50
|
+
# pyvale simulation library and then copy it to this directory to run. This
|
|
51
|
+
# means the output exodus will appear in the same directory as the input file.
|
|
52
|
+
|
|
53
|
+
output_path = Path.cwd() / "pyvale-output"
|
|
54
|
+
if not output_path.is_dir():
|
|
55
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
56
|
+
|
|
57
|
+
moose_file = dataset.element_case_input_path(dataset.EElemTest.HEX20)
|
|
58
|
+
moose_input = output_path / moose_file.name
|
|
59
|
+
|
|
60
|
+
shutil.copyfile(moose_file,moose_input)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
#%%
|
|
64
|
+
# We now create our moose runner with the same method we have used in previous
|
|
65
|
+
# examples. We run the simulation and time it, printing the solve time to the
|
|
66
|
+
# terminal.
|
|
67
|
+
|
|
68
|
+
config = {"main_path": Path.home()/ "moose",
|
|
69
|
+
"app_path": Path.home() / "proteus",
|
|
70
|
+
"app_name": "proteus-opt"}
|
|
71
|
+
moose_config = MooseConfig(config)
|
|
72
|
+
|
|
73
|
+
moose_runner = MooseRunner(moose_config)
|
|
74
|
+
|
|
75
|
+
moose_runner.set_run_opts(n_tasks=1, n_threads=4, redirect_out=True)
|
|
76
|
+
|
|
77
|
+
moose_runner.set_input_file(moose_input)
|
|
78
|
+
|
|
79
|
+
start_time = time.perf_counter()
|
|
80
|
+
moose_runner.run()
|
|
81
|
+
run_time = time.perf_counter() - start_time
|
|
82
|
+
|
|
83
|
+
print("-"*80)
|
|
84
|
+
print(f"MOOSE run time = {run_time:.3f} seconds")
|
|
85
|
+
print("-"*80)
|
|
86
|
+
|
|
87
|
+
#%%
|
|
88
|
+
# Now we create our exodus reader by giving it the path to the exodus file we
|
|
89
|
+
# want to read. By default moose creates an exodus output with the input file
|
|
90
|
+
# name with "_out.e" appended.
|
|
91
|
+
output_exodus = output_path / (moose_input.stem + "_out.e")
|
|
92
|
+
exodus_reader = ExodusReader(output_exodus)
|
|
93
|
+
|
|
94
|
+
print("\nReading exodus file with ExodusReader:")
|
|
95
|
+
print(output_exodus.resolve())
|
|
96
|
+
print()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
#%%
|
|
100
|
+
# We start with the simplest method which is to just read everything in the
|
|
101
|
+
# exodus file and return it as a ``SimData`` object. In some cases we will not
|
|
102
|
+
# want to read everything into memory so we will show how we can control this n
|
|
103
|
+
# next.
|
|
104
|
+
#
|
|
105
|
+
# We then use a helper function to print the sim data fields to the terminal so
|
|
106
|
+
# we can see the structure of the dataclass. The documentation for the
|
|
107
|
+
# ``SimData`` class provides descriptions of each of the fields and we
|
|
108
|
+
# recommend you check this out to understand the terminal output.
|
|
109
|
+
all_sim_data = exodus_reader.read_all_sim_data()
|
|
110
|
+
print("SimData from 'read_all':")
|
|
111
|
+
sens.SimTools.print_sim_data(all_sim_data)
|
|
112
|
+
|
|
113
|
+
#%%
|
|
114
|
+
# We are now going to read specific variables from the exodus output using a
|
|
115
|
+
# read configuration object. There are two ways to create this object. A good
|
|
116
|
+
# way to start is to use the exodus reader to return the read config that would
|
|
117
|
+
# extract all variables from the exodus as shown below. This is helpful as it
|
|
118
|
+
# will pre-populate the 'node', 'elem' and 'glob' variables with the appropriate
|
|
119
|
+
# dicitionary keys to read based on what is already in the exodus file.
|
|
120
|
+
|
|
121
|
+
read_config = exodus_reader.get_read_config()
|
|
122
|
+
sens.SimTools.print_dataclass_fields(read_config)
|
|
123
|
+
|
|
124
|
+
#%%
|
|
125
|
+
# We set the 'node_vars' field to None to prevent the nodal variables being read
|
|
126
|
+
# from the exodus file. We then use the read function to return a ``SimData``
|
|
127
|
+
# object and we print the 'node_vars' field to verify that it has not been read.
|
|
128
|
+
read_config.node_vars = None
|
|
129
|
+
sim_data = exodus_reader.read_sim_data(read_config)
|
|
130
|
+
|
|
131
|
+
print("Read config without 'node_vars':")
|
|
132
|
+
print(f" {sim_data.node_vars=}")
|
|
133
|
+
print()
|
|
134
|
+
|
|
135
|
+
#%%
|
|
136
|
+
# We can also turn off reading of the simulation time steps, nodal coordinates
|
|
137
|
+
# and the connectivity table by setting these flags to False in our read config.
|
|
138
|
+
read_config.time = False
|
|
139
|
+
read_config.coords = False
|
|
140
|
+
read_config.connect = False
|
|
141
|
+
sim_data = exodus_reader.read_sim_data(read_config)
|
|
142
|
+
|
|
143
|
+
print("Read config without time, coords and connectivity:")
|
|
144
|
+
print(f" {sim_data.time=}")
|
|
145
|
+
print(f" {sim_data.coords=}")
|
|
146
|
+
print(f" {sim_data.connect=}")
|
|
147
|
+
print()
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
#%%
|
|
151
|
+
# We can also read specific keyed fields from 'node', 'elem' and 'glob'
|
|
152
|
+
# variables. Here we will read just the x displacement from the node variables.
|
|
153
|
+
# Note that for element variables you also need to specify the block number (
|
|
154
|
+
# corresponding to the number X in the key for the connectivity table in the
|
|
155
|
+
# format "connectivityX" in the connectivity dictionary).
|
|
156
|
+
|
|
157
|
+
read_config.node_vars = ("disp_x",)
|
|
158
|
+
sim_data = exodus_reader.read_sim_data(read_config)
|
|
159
|
+
print("Read config only extracting x displacement:")
|
|
160
|
+
print(f" {sim_data.node_vars.keys()=}")
|
|
161
|
+
print(f" {sim_data.node_vars['disp_x'].shape=}")
|
|
162
|
+
print()
|
|
163
|
+
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# pyvale: the python validation engine
|
|
3
|
+
# License: MIT
|
|
4
|
+
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
+
# ==============================================================================
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Reading exodus output from a parameter sweep
|
|
9
|
+
================================================================================
|
|
10
|
+
|
|
11
|
+
In this example we run a parallel sweep of a moose simulation and then read the
|
|
12
|
+
results of the whole sweep using the sweep reader class.
|
|
13
|
+
|
|
14
|
+
**Installing moose**: To run this example you will need to have installed moose
|
|
15
|
+
on your system. As moose supports unix operating systems windows users will need
|
|
16
|
+
to use windows subsystem for linux (WSL). We use the proteus moose build which
|
|
17
|
+
can be found here: https://github.com/aurora-multiphysics/proteus. Build scripts
|
|
18
|
+
for common linux distributions can be found in the 'scripts' directory of the
|
|
19
|
+
repo. You can also create your own moose build using instructions here:
|
|
20
|
+
https://mooseframework.inl.gov/.
|
|
21
|
+
|
|
22
|
+
We start by importing what we need for this example.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
import time
|
|
26
|
+
from pathlib import Path
|
|
27
|
+
import numpy as np
|
|
28
|
+
|
|
29
|
+
#pyvale imports
|
|
30
|
+
import pyvale.sensorsim as sens
|
|
31
|
+
import pyvale.dataset as dataset
|
|
32
|
+
from pyvale.mooseherder import (MooseHerd,
|
|
33
|
+
MooseRunner,
|
|
34
|
+
MooseConfig,
|
|
35
|
+
InputModifier,
|
|
36
|
+
DirectoryManager,
|
|
37
|
+
SweepReader,
|
|
38
|
+
sweep_param_grid)
|
|
39
|
+
|
|
40
|
+
#%%
|
|
41
|
+
# In this first section we setup our herd workflow manager to run a parameter
|
|
42
|
+
# sweep of our moose simulation as we have done in previous examples. We run
|
|
43
|
+
# the parameter sweep and print the solve time to the terminal. The sweep
|
|
44
|
+
# output is in the standard pyvale-output directory we have used previously.
|
|
45
|
+
# In the next section we will read the output from the parameter sweep below.
|
|
46
|
+
|
|
47
|
+
moose_input = dataset.element_case_input_path(dataset.EElemTest.HEX20)
|
|
48
|
+
moose_modifier = InputModifier(moose_input,'#','')
|
|
49
|
+
|
|
50
|
+
config = {'main_path': Path.home()/ 'moose',
|
|
51
|
+
'app_path': Path.home() / 'proteus',
|
|
52
|
+
'app_name': 'proteus-opt'}
|
|
53
|
+
moose_config = MooseConfig(config)
|
|
54
|
+
|
|
55
|
+
moose_runner = MooseRunner(moose_config)
|
|
56
|
+
moose_runner.set_run_opts(n_tasks = 1,
|
|
57
|
+
n_threads = 2,
|
|
58
|
+
redirect_out = True)
|
|
59
|
+
|
|
60
|
+
num_para_sims: int = 4
|
|
61
|
+
dir_manager = DirectoryManager(n_dirs=num_para_sims)
|
|
62
|
+
herd = MooseHerd([moose_runner],[moose_modifier],dir_manager)
|
|
63
|
+
herd.set_num_para_sims(n_para=num_para_sims)
|
|
64
|
+
|
|
65
|
+
output_path = Path.cwd() / "pyvale-output"
|
|
66
|
+
if not output_path.is_dir():
|
|
67
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
68
|
+
|
|
69
|
+
dir_manager.set_base_dir(output_path)
|
|
70
|
+
dir_manager.reset_dirs()
|
|
71
|
+
|
|
72
|
+
moose_params = {"nElemX": (2,3),
|
|
73
|
+
"lengX": np.array([10e-3,15e-3]),
|
|
74
|
+
"PRatio":(0.3,0.35)}
|
|
75
|
+
params = [moose_params,]
|
|
76
|
+
sweep_params = sweep_param_grid(params)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
if __name__ == "__main__":
|
|
80
|
+
print('Running simulation parameter sweep in parallel.')
|
|
81
|
+
herd.run_para(sweep_params)
|
|
82
|
+
print(f'Run time (parallel) = {herd.get_sweep_time():.3f} seconds\n')
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
#%%
|
|
86
|
+
# To read the sweep output files we first create our sweep reader and pass it
|
|
87
|
+
# the same directory manager we used to run the sweep. We also set the number
|
|
88
|
+
# of simulation outputs to read in parallel when we call the read parallel
|
|
89
|
+
# function. We will see below that we can still read sequentially by calling
|
|
90
|
+
# read sequential functions and if the simulation output files are small it is
|
|
91
|
+
# likely to be faster to read them sequentially.
|
|
92
|
+
#
|
|
93
|
+
# We first use our sweep reader to inspect the output path keys to find the
|
|
94
|
+
# simulation output files that exist in the simulation working directories.
|
|
95
|
+
|
|
96
|
+
sweep_reader = SweepReader(dir_manager,num_para_read=4)
|
|
97
|
+
output_files = sweep_reader.read_all_output_file_keys()
|
|
98
|
+
|
|
99
|
+
print('Sweep output files (from output_keys.json):')
|
|
100
|
+
for ff in output_files:
|
|
101
|
+
print(f" {ff}")
|
|
102
|
+
print()
|
|
103
|
+
|
|
104
|
+
#%%
|
|
105
|
+
# Using the sweep reader we can read the results for a single simulation chain
|
|
106
|
+
# from the sweep. Our simulation chain only has a single moose simulation so
|
|
107
|
+
# the list of ``SimData`` objects we are returned only has a single element.
|
|
108
|
+
# We then use a helper function to print the contents of the ``SimData`` object
|
|
109
|
+
# to the terminal.
|
|
110
|
+
#
|
|
111
|
+
# We suggest you check out the documentation for the ``SimData`` object as it
|
|
112
|
+
# includes a detailed description of each of the relevant fields you might want
|
|
113
|
+
# to use for post-processing.
|
|
114
|
+
sim_data_list = sweep_reader.read_results_once(output_files[0])
|
|
115
|
+
sens.SimTools.print_sim_data(sim_data_list[0])
|
|
116
|
+
|
|
117
|
+
#%%
|
|
118
|
+
# We can use the sweep reader to read results for each simulation chain in the
|
|
119
|
+
# sweep sequentially with read sequential function. The sweep results we are
|
|
120
|
+
# returned is a list of list of data classes where the outer list corresponds to
|
|
121
|
+
# the unique simulation chain in the sweep and the inner list corresponds to the
|
|
122
|
+
# the results for the particular simulation tool in the chain.
|
|
123
|
+
#
|
|
124
|
+
# After reading the sweep results we print the inner and outer list lengths. We
|
|
125
|
+
# have 8 unique simulation chains with a single simulation tool (moose) in the
|
|
126
|
+
# chain.
|
|
127
|
+
start_time = time.perf_counter()
|
|
128
|
+
sweep_results_seq = sweep_reader.read_sequential()
|
|
129
|
+
read_time_seq = time.perf_counter() - start_time
|
|
130
|
+
|
|
131
|
+
print("Outer list = unique simulation chain:")
|
|
132
|
+
print(f" {len(sweep_results_seq)=}")
|
|
133
|
+
print("Inner list = particular simulation tool in the chain:")
|
|
134
|
+
print(f" {len(sweep_results_seq[0])=}")
|
|
135
|
+
print("'SimData' object for the particular simulation tool:")
|
|
136
|
+
print(f" {type(sweep_results_seq[0][0])=}")
|
|
137
|
+
print()
|
|
138
|
+
|
|
139
|
+
#%%
|
|
140
|
+
# Finally, we read the same sweep in parallel making sure we include a main
|
|
141
|
+
# guard as we will be using the multi-processing package to do this. We then
|
|
142
|
+
# print the read time to the console for the sequential and parallel reads.
|
|
143
|
+
if __name__ == '__main__':
|
|
144
|
+
start_time = time.perf_counter()
|
|
145
|
+
sweep_results_para = sweep_reader.read_results_para()
|
|
146
|
+
read_time_para = time.perf_counter() - start_time
|
|
147
|
+
|
|
148
|
+
print()
|
|
149
|
+
print("-"*80)
|
|
150
|
+
print(f'Read time sequential = {read_time_seq:.6f} seconds')
|
|
151
|
+
print(f'Read time parallel = {read_time_para:.6f} seconds')
|
|
152
|
+
print("-"*80)
|
|
153
|
+
print()
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# pyvale: the python validation engine
|
|
3
|
+
# License: MIT
|
|
4
|
+
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
+
# ==============================================================================
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Read parameter sweep results for a MOOSE simulation
|
|
9
|
+
================================================================================
|
|
10
|
+
|
|
11
|
+
In this example we read the all the simulation results from multiple calls to
|
|
12
|
+
her workflow manager and verify that we correctly read all of the simulation
|
|
13
|
+
outputs.
|
|
14
|
+
|
|
15
|
+
**Installing moose**: To run this example you will need to have installed moose
|
|
16
|
+
on your system. As moose supports unix operating systems windows users will need
|
|
17
|
+
to use windows subsystem for linux (WSL). We use the proteus moose build which
|
|
18
|
+
can be found here: https://github.com/aurora-multiphysics/proteus. Build scripts
|
|
19
|
+
for common linux distributions can be found in the 'scripts' directory of the
|
|
20
|
+
repo. You can also create your own moose build using instructions here:
|
|
21
|
+
https://mooseframework.inl.gov/.
|
|
22
|
+
|
|
23
|
+
We start by importing what we need for this example.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import time
|
|
27
|
+
from pathlib import Path
|
|
28
|
+
import numpy as np
|
|
29
|
+
|
|
30
|
+
#pyvale imports
|
|
31
|
+
import pyvale.dataset as dataset
|
|
32
|
+
from pyvale.mooseherder import (MooseHerd,
|
|
33
|
+
MooseRunner,
|
|
34
|
+
MooseConfig,
|
|
35
|
+
InputModifier,
|
|
36
|
+
DirectoryManager,
|
|
37
|
+
SweepReader,
|
|
38
|
+
sweep_param_grid)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
#%%
|
|
42
|
+
# The first part of this example is the same as our previous example called:
|
|
43
|
+
# 'Using multiple calls to run parallel sweeps'. For a detailed explanation of
|
|
44
|
+
# the code below head to that example. For now we use this to generate multiple
|
|
45
|
+
# sets of outputs and then use a sweep reader to read this all in below.
|
|
46
|
+
|
|
47
|
+
moose_input = dataset.element_case_input_path(dataset.EElemTest.HEX20)
|
|
48
|
+
moose_modifier = InputModifier(moose_input,'#','')
|
|
49
|
+
|
|
50
|
+
config = {'main_path': Path.home()/ 'moose',
|
|
51
|
+
'app_path': Path.home() / 'proteus',
|
|
52
|
+
'app_name': 'proteus-opt'}
|
|
53
|
+
moose_config = MooseConfig(config)
|
|
54
|
+
|
|
55
|
+
moose_runner = MooseRunner(moose_config)
|
|
56
|
+
moose_runner.set_run_opts(n_tasks = 1,
|
|
57
|
+
n_threads = 2,
|
|
58
|
+
redirect_out = True)
|
|
59
|
+
|
|
60
|
+
num_para_sims: int = 4
|
|
61
|
+
dir_manager = DirectoryManager(n_dirs=num_para_sims)
|
|
62
|
+
herd = MooseHerd([moose_runner],[moose_modifier],dir_manager)
|
|
63
|
+
herd.set_num_para_sims(n_para=num_para_sims)
|
|
64
|
+
|
|
65
|
+
output_path = Path.cwd() / "pyvale-output"
|
|
66
|
+
if not output_path.is_dir():
|
|
67
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
68
|
+
|
|
69
|
+
dir_manager.set_base_dir(output_path)
|
|
70
|
+
dir_manager.reset_dirs()
|
|
71
|
+
|
|
72
|
+
moose_params = {"nElemX": (2,3),
|
|
73
|
+
"lengX": np.array([10e-3,15e-3]),
|
|
74
|
+
"PRatio":(0.3,)}
|
|
75
|
+
params = [moose_params,]
|
|
76
|
+
sweep_params = sweep_param_grid(params)
|
|
77
|
+
|
|
78
|
+
print("\nParameter sweep variables by simulation:")
|
|
79
|
+
for ii,pp in enumerate(sweep_params):
|
|
80
|
+
print(f"Sim: {ii}, Params [moose,]: {pp}")
|
|
81
|
+
|
|
82
|
+
num_para_runs: int = 3
|
|
83
|
+
if __name__ == '__main__':
|
|
84
|
+
sweep_times = np.zeros((num_para_runs,),dtype=np.float64)
|
|
85
|
+
for rr in range(num_para_runs):
|
|
86
|
+
herd.run_para(sweep_params)
|
|
87
|
+
sweep_times[rr] = herd.get_sweep_time()
|
|
88
|
+
|
|
89
|
+
print()
|
|
90
|
+
for ii,ss in enumerate(sweep_times):
|
|
91
|
+
print(f"Sweep {ii} took: {ss:.3f}seconds")
|
|
92
|
+
print()
|
|
93
|
+
|
|
94
|
+
#%%
|
|
95
|
+
# Here we will just read all the results sequentially and verify that we have
|
|
96
|
+
# the number of simulation results we expect for the multiple calls above.
|
|
97
|
+
# Looking at our parameter sweep we have 4 unique combination of variables and
|
|
98
|
+
# we ran this 3 times so we expect to have a total of 12 simulation results in
|
|
99
|
+
# our outer list which we print below. Note that our inner list still has a
|
|
100
|
+
# single ``SimData`` object as we only have a single moose simulation in our
|
|
101
|
+
# simulation chain.
|
|
102
|
+
sweep_reader = SweepReader(dir_manager)
|
|
103
|
+
start_time = time.perf_counter()
|
|
104
|
+
sweep_results_seq = sweep_reader.read_sequential()
|
|
105
|
+
read_time_seq = time.perf_counter() - start_time
|
|
106
|
+
|
|
107
|
+
print("Outer list = unique simulation chain:")
|
|
108
|
+
print(f" {len(sweep_results_seq)=}")
|
|
109
|
+
print("Inner list = particular simulation tool in the chain:")
|
|
110
|
+
print(f" {len(sweep_results_seq[0])=}")
|
|
111
|
+
print("'SimData' object for the particular simulation tool:")
|
|
112
|
+
print(f" {type(sweep_results_seq[0][0])=}")
|
|
113
|
+
print()
|
|
114
|
+
|
|
115
|
+
print(f'Read time (sequential) = {read_time_seq:.6f} seconds')
|
|
116
|
+
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# ==============================================================================
|
|
2
|
+
# pyvale: the python validation engine
|
|
3
|
+
# License: MIT
|
|
4
|
+
# Copyright (C) 2025 The Computer Aided Validation Team
|
|
5
|
+
# ==============================================================================
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
Read parameter sweep results for a Gmsh and MOOSE simulation
|
|
9
|
+
================================================================================
|
|
10
|
+
|
|
11
|
+
In this example we will read the results from a sweep that includes gmsh and
|
|
12
|
+
moose in the simulation chain. A key difference here in the results output is
|
|
13
|
+
that gmsh does not create output we want to read so the inner list of our
|
|
14
|
+
results list of lists will have 'None' in its 0 index and then the ``SimData``
|
|
15
|
+
data object from our moose simulation in the 1 index.
|
|
16
|
+
|
|
17
|
+
**Installing moose**: To run this example you will need to have installed moose
|
|
18
|
+
on your system. As moose supports unix operating systems windows users will need
|
|
19
|
+
to use windows subsystem for linux (WSL). We use the proteus moose build which
|
|
20
|
+
can be found here: https://github.com/aurora-multiphysics/proteus. Build scripts
|
|
21
|
+
for common linux distributions can be found in the 'scripts' directory of the
|
|
22
|
+
repo. You can also create your own moose build using instructions here:
|
|
23
|
+
https://mooseframework.inl.gov/.
|
|
24
|
+
|
|
25
|
+
**Installing gmsh**: For this example you will need to have a gmsh executable
|
|
26
|
+
which can be downloaded and installed from here: https://gmsh.info/#Download
|
|
27
|
+
|
|
28
|
+
We start by importing what we need for this example.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
import time
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
import numpy as np
|
|
34
|
+
|
|
35
|
+
#pyvale imports
|
|
36
|
+
import pyvale.dataset as dataset
|
|
37
|
+
from pyvale.mooseherder import (MooseHerd,
|
|
38
|
+
MooseRunner,
|
|
39
|
+
GmshRunner,
|
|
40
|
+
MooseConfig,
|
|
41
|
+
InputModifier,
|
|
42
|
+
DirectoryManager,
|
|
43
|
+
SweepReader,
|
|
44
|
+
sweep_param_grid)
|
|
45
|
+
|
|
46
|
+
#%%
|
|
47
|
+
# The first part of this example is the same as the previous example we have
|
|
48
|
+
# seen running a parallel parameter sweep for gmsh+moose using the herd workflow
|
|
49
|
+
# manager. If you are not familiar with the code below go back to the example
|
|
50
|
+
# entitled 'Running a parameter sweep of a Gmsh and MOOSE simulation'.
|
|
51
|
+
# Otherwise you can skip down to the next code block where we will read the
|
|
52
|
+
# output of the sweep and compare it to what we have seen previously.
|
|
53
|
+
sim_case: int = 17
|
|
54
|
+
|
|
55
|
+
gmsh_input = dataset.sim_case_gmsh_file_path(case_num=sim_case)
|
|
56
|
+
gmsh_modifier = InputModifier(gmsh_input,"//",";")
|
|
57
|
+
|
|
58
|
+
gmsh_runner = GmshRunner(gmsh_path=(Path.home() / "gmsh/bin/gmsh"))
|
|
59
|
+
gmsh_runner.set_input_file(gmsh_input)
|
|
60
|
+
|
|
61
|
+
moose_input = dataset.sim_case_input_file_path(case_num=sim_case)
|
|
62
|
+
moose_modifier = InputModifier(moose_input,"#","")
|
|
63
|
+
|
|
64
|
+
moose_config = MooseConfig({'main_path': Path.home()/ 'moose',
|
|
65
|
+
'app_path': Path.home() / 'proteus',
|
|
66
|
+
'app_name': 'proteus-opt'})
|
|
67
|
+
moose_runner = MooseRunner(moose_config)
|
|
68
|
+
moose_runner.set_run_opts(n_tasks = 1,
|
|
69
|
+
n_threads = 2,
|
|
70
|
+
redirect_out = True)
|
|
71
|
+
|
|
72
|
+
num_para_sims: int = 4
|
|
73
|
+
dir_manager = DirectoryManager(n_dirs=num_para_sims)
|
|
74
|
+
herd = MooseHerd([gmsh_runner,moose_runner],
|
|
75
|
+
[gmsh_modifier,moose_modifier],
|
|
76
|
+
dir_manager,
|
|
77
|
+
num_para_sims)
|
|
78
|
+
|
|
79
|
+
output_path = Path.cwd() / "pyvale-output"
|
|
80
|
+
if not output_path.is_dir():
|
|
81
|
+
output_path.mkdir(parents=True, exist_ok=True)
|
|
82
|
+
|
|
83
|
+
dir_manager.set_base_dir(output_path)
|
|
84
|
+
dir_manager.reset_dirs()
|
|
85
|
+
|
|
86
|
+
gmsh_params = {"plate_width": np.array([150e-3,100e-3]),
|
|
87
|
+
"plate_height": ("plate_width + 100e-3",
|
|
88
|
+
"plate_width + 50e-3")}
|
|
89
|
+
moose_params = None
|
|
90
|
+
sweep_params = sweep_param_grid([gmsh_params,moose_params])
|
|
91
|
+
|
|
92
|
+
if __name__ == "__main__":
|
|
93
|
+
herd.run_para(sweep_params)
|
|
94
|
+
time_run_para = herd.get_sweep_time()
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
print(f'Sweep run time (para) = {time_run_para:.3f} seconds\n')
|
|
98
|
+
|
|
99
|
+
#%%
|
|
100
|
+
# We now pass the directory manager we used for the sweep to our sweep reader
|
|
101
|
+
# and then we read in the sweep results sequentially. As we have seen before
|
|
102
|
+
# our sweep results are given as a list of lists where the outer list is the
|
|
103
|
+
# unique simulation chain in our parameter sweep and the inner list corresponds
|
|
104
|
+
# to each simulation tool in the chain. In this case we have gmsh and moose so
|
|
105
|
+
# our inner list should have a length of 2.
|
|
106
|
+
|
|
107
|
+
sweep_reader = SweepReader(dir_manager,num_para_read=4)
|
|
108
|
+
|
|
109
|
+
start_time = time.perf_counter()
|
|
110
|
+
sweep_results_seq = sweep_reader.read_sequential()
|
|
111
|
+
read_time_seq = time.perf_counter() - start_time
|
|
112
|
+
|
|
113
|
+
print(f'Read time sequential = {read_time_seq:.6f} seconds\n')
|
|
114
|
+
|
|
115
|
+
print("Outer list = unique simulation chain:")
|
|
116
|
+
print(f" {len(sweep_results_seq)=}")
|
|
117
|
+
print("Inner list = particular simulation tool in the chain:")
|
|
118
|
+
print(f" {len(sweep_results_seq[0])=}")
|
|
119
|
+
print()
|
|
120
|
+
|
|
121
|
+
#%%
|
|
122
|
+
# As gmsh does not have any simulation output we can read as a ``SimData``
|
|
123
|
+
# object we see that our inner sweep results list has 'None' in the position
|
|
124
|
+
# corresponding to gmsh (the 0 index). We also see we have our moose output as
|
|
125
|
+
# a ``SimData`` object at the 1 index of the inner list.
|
|
126
|
+
print(f"{type(sweep_results_seq[0][0])=}")
|
|
127
|
+
print(f"{type(sweep_results_seq[0][1])=}")
|