idmtools-test 0.0.0.dev0__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- idmtools_test/__init__.py +16 -8
- idmtools_test/inputs/__init__.py +0 -0
- idmtools_test/inputs/assets/collections/1/a.txt +0 -0
- idmtools_test/inputs/assets/collections/1/b.txt +0 -0
- idmtools_test/inputs/assets/collections/2/c.txt +0 -0
- idmtools_test/inputs/assets/collections/d.txt +0 -0
- idmtools_test/inputs/builder/sweeps.csv +6 -0
- idmtools_test/inputs/builder/sweeps.yaml +8 -0
- idmtools_test/inputs/compsplatform/__init__.py +0 -0
- idmtools_test/inputs/compsplatform/failing_model.py +5 -0
- idmtools_test/inputs/compsplatform/mixed_model.py +10 -0
- idmtools_test/inputs/compsplatform/working_model.py +5 -0
- idmtools_test/inputs/configuration/idmtools_test.ini +71 -0
- idmtools_test/inputs/custom/Eradication.exe +0 -0
- idmtools_test/inputs/custom/Local_Migration.bin +0 -0
- idmtools_test/inputs/custom/Local_Migration.bin.json +12 -0
- idmtools_test/inputs/custom/Regional_Migration.bin +0 -0
- idmtools_test/inputs/custom/Regional_Migration.bin.json +12 -0
- idmtools_test/inputs/custom/Zambia_30arcsec_air_temperature_daily.bin +0 -0
- idmtools_test/inputs/custom/Zambia_30arcsec_air_temperature_daily.bin.json +26 -0
- idmtools_test/inputs/custom/Zambia_30arcsec_rainfall_daily.bin +0 -0
- idmtools_test/inputs/custom/Zambia_30arcsec_rainfall_daily.bin.json +26 -0
- idmtools_test/inputs/custom/Zambia_30arcsec_relative_humidity_daily.bin +0 -0
- idmtools_test/inputs/custom/Zambia_30arcsec_relative_humidity_daily.bin.json +26 -0
- idmtools_test/inputs/custom/campaign.json +95384 -0
- idmtools_test/inputs/custom/config.json +943 -0
- idmtools_test/inputs/custom/custom_reports.json +163 -0
- idmtools_test/inputs/custom/demo.json +1258 -0
- idmtools_test/inputs/custom/emodules_map.json +9 -0
- idmtools_test/inputs/custom/reporter_plugins/libReportMalariaFiltered.dll +0 -0
- idmtools_test/inputs/custom/reporter_plugins/libSpatialReportMalariaFiltered.dll +0 -0
- idmtools_test/inputs/custom/reporter_plugins/libreporteventcounter.dll +0 -0
- idmtools_test/inputs/duplicated_model/exe/Eradication +0 -0
- idmtools_test/inputs/duplicated_model/f1 +0 -0
- idmtools_test/inputs/emod/Eradication.exe +0 -0
- idmtools_test/inputs/emod_files/campaign.json +21 -0
- idmtools_test/inputs/emod_files/config.json +125 -0
- idmtools_test/inputs/emod_files/demographics.json +81 -0
- idmtools_test/inputs/fakemodels/AnotherOne +0 -0
- idmtools_test/inputs/fakemodels/Eradication +0 -0
- idmtools_test/inputs/fakemodels/Eradication-2.11.custom.exe +0 -0
- idmtools_test/inputs/fakemodels/Eradication.exe +0 -0
- idmtools_test/inputs/files/campaign.json +21 -0
- idmtools_test/inputs/files/config.json +119 -0
- idmtools_test/inputs/files/demographics.json +82 -0
- idmtools_test/inputs/files/hello.txt +1 -0
- idmtools_test/inputs/id_files/slurm.example_python_experiment.id +1 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_air_temperature_daily.bin +0 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_air_temperature_daily.bin.json +26 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_demographics.json +559 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_rainfall_daily.bin +0 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_rainfall_daily.bin.json +26 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_relative_humidity_daily.bin +0 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Brazil_Central_West_Brazil_Central_West_2.5arcmin_relative_humidity_daily.bin.json +26 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Eradication +0 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/Assets/Eradication.exe +0 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/campaign.json +4 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/config.json +667 -0
- idmtools_test/inputs/malaria_brazil_central_west_spatial/malaria_brazil_central_west_spatial-ERA5Input_demo.csv +37 -0
- idmtools_test/inputs/python/Assets/MyExternalLibrary/__init__.py +0 -0
- idmtools_test/inputs/python/Assets/MyExternalLibrary/functions.py +15 -0
- idmtools_test/inputs/python/Assets/MyLib/functions.py +2 -0
- idmtools_test/inputs/python/Assets/MyLib/temp.py +271 -0
- idmtools_test/inputs/python/Assets/__init__.py +0 -0
- idmtools_test/inputs/python/__init__.py +0 -0
- idmtools_test/inputs/python/folder_dup_file/__init__.py +0 -0
- idmtools_test/inputs/python/folder_dup_file/model1.py +19 -0
- idmtools_test/inputs/python/hello_world.py +1 -0
- idmtools_test/inputs/python/model.py +26 -0
- idmtools_test/inputs/python/model1.py +20 -0
- idmtools_test/inputs/python/model3.py +21 -0
- idmtools_test/inputs/python/newmodel2.py +20 -0
- idmtools_test/inputs/python/output_generator/generate.py +39 -0
- idmtools_test/inputs/python/realpath_verify.py +6 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python36/dtk_generic_intrahost.pyd +0 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python36/dtk_nodedemog.pyd +0 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python37/dtk_generic_intrahost.pyd +0 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/MyExternalLibrary/Python37/dtk_nodedemog.pyd +0 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/SEIR_model.py +252 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/SEIR_model_slurm.py +242 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/config_sim.py +48 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/custom_csv_analyzer.py +133 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/python.sh +4 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/requirements.txt +4 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/templates/config.json +68 -0
- idmtools_test/inputs/python/ye_seir_model/Assets/templates/demographics_template.json +44 -0
- idmtools_test/inputs/python/ye_seir_model/__init__.py +0 -0
- idmtools_test/inputs/python_experiments/__init__.py +0 -0
- idmtools_test/inputs/python_experiments/model.py +10 -0
- idmtools_test/inputs/r/model1.R +1 -0
- idmtools_test/inputs/r/ncov_analysis/individual_dynamics_estimates/estimate_incubation_period.R +89 -0
- idmtools_test/inputs/regression/107/Assets/__init__.py +0 -0
- idmtools_test/inputs/regression/107/Assets/model.py +1 -0
- idmtools_test/inputs/regression/107/__init__.py +0 -0
- idmtools_test/inputs/regression/125/Assets/__init__.py +0 -0
- idmtools_test/inputs/regression/125/Assets/model.py +1 -0
- idmtools_test/inputs/regression/125/Assets2/__init__.py +0 -0
- idmtools_test/inputs/regression/125/Assets2/dir1/__init__.py +0 -0
- idmtools_test/inputs/regression/125/Assets2/dir1/model.py +1 -0
- idmtools_test/inputs/regression/125/Assets2/dir2/__init__.py +0 -0
- idmtools_test/inputs/regression/125/Assets2/dir2/model.py +1 -0
- idmtools_test/inputs/regression/125/__init__.py +0 -0
- idmtools_test/inputs/regression/__init__.py +0 -0
- idmtools_test/inputs/scheduling/hpc/WorkOrder.json +7 -0
- idmtools_test/inputs/scheduling/slurm/WorkOrder.json +11 -0
- idmtools_test/inputs/scheduling/slurm/WorkOrder1.json +11 -0
- idmtools_test/inputs/scheduling/slurm/WorkOrder2.json +13 -0
- idmtools_test/inputs/scheduling/slurm/commandline_model.py +22 -0
- idmtools_test/inputs/serialization/Eradication.exe +0 -0
- idmtools_test/inputs/serialization/single_node_demographics.json +82 -0
- idmtools_test/inputs/singularity/alpine_simple/Singularity.def +28 -0
- idmtools_test/inputs/singularity/alpine_simple/run_model.py +41 -0
- idmtools_test/inputs/singularity/alpine_template/Singularity.jinja +22 -0
- idmtools_test/test_precreate_hooks.py +25 -0
- idmtools_test/utils/__init__.py +0 -0
- idmtools_test/utils/cli.py +41 -0
- idmtools_test/utils/common_experiments.py +79 -0
- idmtools_test/utils/comps.py +152 -0
- idmtools_test/utils/decorators.py +208 -0
- idmtools_test/utils/execute_operations/__init__.py +0 -0
- idmtools_test/utils/execute_operations/experiment_operations.py +237 -0
- idmtools_test/utils/execute_operations/simulate_operations.py +368 -0
- idmtools_test/utils/itest_with_persistence.py +25 -0
- idmtools_test/utils/operations/__init__.py +0 -0
- idmtools_test/utils/operations/experiment_operations.py +64 -0
- idmtools_test/utils/operations/simulation_operations.py +114 -0
- idmtools_test/utils/shared_functions.py +25 -0
- idmtools_test/utils/test_asset.py +89 -0
- idmtools_test/utils/test_asset_collection.py +223 -0
- idmtools_test/utils/test_execute_platform.py +137 -0
- idmtools_test/utils/test_platform.py +94 -0
- idmtools_test/utils/test_task.py +69 -0
- idmtools_test/utils/utils.py +146 -0
- idmtools_test-0.0.2.dist-info/METADATA +48 -0
- idmtools_test-0.0.2.dist-info/RECORD +139 -0
- idmtools_test-0.0.2.dist-info/entry_points.txt +9 -0
- idmtools_test-0.0.2.dist-info/licenses/LICENSE.TXT +3 -0
- idmtools_test-0.0.0.dev0.dist-info/METADATA +0 -41
- idmtools_test-0.0.0.dev0.dist-info/RECORD +0 -5
- {idmtools_test-0.0.0.dev0.dist-info → idmtools_test-0.0.2.dist-info}/WHEEL +0 -0
- {idmtools_test-0.0.0.dev0.dist-info → idmtools_test-0.0.2.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import os
|
|
3
|
+
import subprocess
|
|
4
|
+
import sys
|
|
5
|
+
import tempfile
|
|
6
|
+
from pathlib import PurePath
|
|
7
|
+
from COMPS import Data
|
|
8
|
+
from COMPS.Data import AssetCollection as CompsAssetCollection
|
|
9
|
+
from COMPS.Data import QueryCriteria, Simulation as COMPSSimulation, Experiment as COMPSExperiment
|
|
10
|
+
from idmtools import __version__ as core_version
|
|
11
|
+
from idmtools.builders import SimulationBuilder
|
|
12
|
+
from idmtools.core.enums import EntityStatus
|
|
13
|
+
from idmtools.entities.experiment import Experiment
|
|
14
|
+
from idmtools.entities.iplatform import IPlatform
|
|
15
|
+
from idmtools_models.templated_script_task import get_script_wrapper_unix_task
|
|
16
|
+
from idmtools_platform_comps import __version__ as platform_comps_version
|
|
17
|
+
|
|
18
|
+
CURRENT_DIR = PurePath(__file__).parent
|
|
19
|
+
COMPS_VERSION = platform_comps_version.replace('nightly.0', 'nightly')
|
|
20
|
+
if COMPS_VERSION.endswith(".0") and len(COMPS_VERSION) == 7:
|
|
21
|
+
COMPS_VERSION = ".".join(COMPS_VERSION.split(".")[:3])
|
|
22
|
+
CORE_VERSION = core_version.replace('nightly.0', 'nightly')
|
|
23
|
+
if CORE_VERSION.endswith(".0") and len(CORE_VERSION) == 7:
|
|
24
|
+
CORE_VERSION = ".".join(CORE_VERSION.split(".")[:3])
|
|
25
|
+
COMPS_PACKAGE_FILENAME = f"idmtools_platform_comps-{COMPS_VERSION}.tar.gz"
|
|
26
|
+
CORE_PACKAGE_FILENAME = f"idmtools-{CORE_VERSION}.tar.gz"
|
|
27
|
+
COMPS_LOCAL_PACKAGE = CURRENT_DIR.parent.parent.parent.joinpath("idmtools_platform_comps", "dist", COMPS_PACKAGE_FILENAME)
|
|
28
|
+
CORE_LOCAL_PACKAGE = CURRENT_DIR.parent.parent.parent.joinpath("idmtools_core", "dist", CORE_PACKAGE_FILENAME)
|
|
29
|
+
COMPS_LOAD_SSMT_PACKAGES_WRAPPER = f"""
|
|
30
|
+
set -o noglob
|
|
31
|
+
echo Running $@
|
|
32
|
+
|
|
33
|
+
echo after install of newer idmtools
|
|
34
|
+
|
|
35
|
+
export PYTHONPATH=$(pwd)/Assets/site-packages:$(pwd)/Assets/:$PYTHONPATH
|
|
36
|
+
|
|
37
|
+
echo "Installing updated versions of idmtools packages"
|
|
38
|
+
pip install Assets/{COMPS_PACKAGE_FILENAME} --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
|
|
39
|
+
pip install Assets/{CORE_PACKAGE_FILENAME} --force --index-url=https://packages.idmod.org/api/pypi/pypi-production/simple
|
|
40
|
+
|
|
41
|
+
$@
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@functools.lru_cache(1)
|
|
46
|
+
def write_wrapper_script():
|
|
47
|
+
f = tempfile.NamedTemporaryFile(suffix='.sh', mode='wb', delete=False)
|
|
48
|
+
f.write(COMPS_LOAD_SSMT_PACKAGES_WRAPPER.replace("\r", "").encode('utf-8'))
|
|
49
|
+
f.flush()
|
|
50
|
+
return f.name
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def load_library_dynamically(item, platform: IPlatform):
|
|
54
|
+
fn = write_wrapper_script()
|
|
55
|
+
for file in [COMPS_LOCAL_PACKAGE, CORE_LOCAL_PACKAGE]:
|
|
56
|
+
item.assets.add_asset(file)
|
|
57
|
+
item.task = get_script_wrapper_unix_task(task=item.task, template_content=COMPS_LOAD_SSMT_PACKAGES_WRAPPER)
|
|
58
|
+
item.task.gather_common_assets()
|
|
59
|
+
item.task.pre_creation(item, platform)
|
|
60
|
+
# item.assets.add_assets(item.task.gather_common_assets(), fail_on_duplicate=False)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def run_package_dists():
|
|
64
|
+
mk = "pymake" if sys.platform == "win32" else "make"
|
|
65
|
+
print("Running Dist for core")
|
|
66
|
+
subprocess.call(f"{mk} dist", cwd=CORE_LOCAL_PACKAGE.parent.parent, shell=True)
|
|
67
|
+
print("Running Dist for comps")
|
|
68
|
+
subprocess.call(f"{mk} dist", cwd=COMPS_LOCAL_PACKAGE.parent.parent, shell=True)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def get_asset_collection_id_for_simulation_id(sim_id):
|
|
72
|
+
"""
|
|
73
|
+
Obtains COMPS AssetCollection id from a given simulation id.
|
|
74
|
+
:param sim_id: A simulation id to retrieve assetcollection id from
|
|
75
|
+
:return: COMPS AssetCollection id
|
|
76
|
+
"""
|
|
77
|
+
simulation = COMPSSimulation.get(sim_id, query_criteria=QueryCriteria().select(
|
|
78
|
+
['id', 'experiment_id']).select_children(
|
|
79
|
+
["files", "configuration"]))
|
|
80
|
+
|
|
81
|
+
if simulation.configuration is None:
|
|
82
|
+
# check experiment
|
|
83
|
+
experiment = COMPSExperiment.get(simulation.experiment_id, query_criteria=QueryCriteria().select(
|
|
84
|
+
['id']).select_children("configuration")
|
|
85
|
+
)
|
|
86
|
+
collection_id = experiment.configuration.asset_collection_id
|
|
87
|
+
else:
|
|
88
|
+
collection_id = simulation.configuration.asset_collection_id
|
|
89
|
+
return collection_id
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def get_asset_collection_by_id(collection_id, query_criteria=None) -> CompsAssetCollection:
|
|
93
|
+
"""
|
|
94
|
+
Obtains COMPS AssetCollection from a given collection id.
|
|
95
|
+
:param collection_id: An asset collection id to retrieve assetcollection from
|
|
96
|
+
:param query_criteria: query_criteria
|
|
97
|
+
:return: COMPS AssetCollection
|
|
98
|
+
"""
|
|
99
|
+
query_criteria = query_criteria or QueryCriteria().select_children('assets')
|
|
100
|
+
try:
|
|
101
|
+
return Data.AssetCollection.get(collection_id, query_criteria)
|
|
102
|
+
except (RuntimeError, ValueError):
|
|
103
|
+
return None
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def sims_from_experiment(e):
|
|
107
|
+
o = e
|
|
108
|
+
if isinstance(e, Experiment):
|
|
109
|
+
o = e.get_platform_object()
|
|
110
|
+
return o.get_simulations(QueryCriteria().select(['id', 'state']).select_children('hpc_jobs'))
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def workdirs_from_simulations(sims):
|
|
114
|
+
return {str(sim.id): sim.hpc_jobs[-1].working_directory for sim in sims if sim.hpc_jobs}
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_simulation_path(simulation):
|
|
118
|
+
path = workdirs_from_simulations([simulation])[str(simulation.id)]
|
|
119
|
+
return path
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def get_simulation_by_id(sim_id, query_criteria=None):
|
|
123
|
+
return COMPSSimulation.get(id=sim_id, query_criteria=query_criteria)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def assure_running_then_wait_til_done(tst, experiment):
|
|
127
|
+
tst.platform.run_items(items=[experiment])
|
|
128
|
+
tst.platform.refresh_status(item=experiment)
|
|
129
|
+
tst.assertFalse(experiment.done)
|
|
130
|
+
tst.assertTrue(all([s.status == EntityStatus.RUNNING for s in experiment.simulations]))
|
|
131
|
+
# Wait till done
|
|
132
|
+
import time
|
|
133
|
+
start_time = time.time()
|
|
134
|
+
while time.time() - start_time < 180:
|
|
135
|
+
tst.platform.refresh_status(item=experiment)
|
|
136
|
+
if experiment.done:
|
|
137
|
+
break
|
|
138
|
+
time.sleep(3)
|
|
139
|
+
tst.assertTrue(experiment.done)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def setup_test_with_platform_and_simple_sweep(tst):
|
|
143
|
+
from idmtools.core.platform_factory import Platform
|
|
144
|
+
tst.platform = Platform('SlurmStage')
|
|
145
|
+
print(tst.case_name)
|
|
146
|
+
|
|
147
|
+
def setP(simulation, p):
|
|
148
|
+
return simulation.task.set_parameter("P", p)
|
|
149
|
+
|
|
150
|
+
tst.builder = SimulationBuilder()
|
|
151
|
+
tst.builder.add_sweep_definition(setP, [1, 2, 3])
|
|
152
|
+
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import shutil
|
|
3
|
+
|
|
4
|
+
import tempfile
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
import platform
|
|
8
|
+
import time
|
|
9
|
+
import unittest
|
|
10
|
+
from functools import wraps
|
|
11
|
+
from logging import getLogger
|
|
12
|
+
from typing import Callable, Union, Any, Optional
|
|
13
|
+
import pytest
|
|
14
|
+
from idmtools import IdmConfigParser
|
|
15
|
+
from idmtools_test import COMMON_INPUT_PATH
|
|
16
|
+
from idmtools_test.utils.utils import is_global_configuration_enabled
|
|
17
|
+
|
|
18
|
+
# The following decorators are used to control test
|
|
19
|
+
# To allow for different use cases(dev, test, packaging, etc)
|
|
20
|
+
# We have switches that should allow a rich set of possible
|
|
21
|
+
# test combinations
|
|
22
|
+
#
|
|
23
|
+
# The default tests run with all the optional tests set to off(except Linux since that is auto-detected)
|
|
24
|
+
# For test-external runs any tests that require external communication
|
|
25
|
+
# This currently is any comps related test
|
|
26
|
+
# test-docker run any tests that depend on docker locally(Mostly local runn)
|
|
27
|
+
# test-all runs all tests
|
|
28
|
+
|
|
29
|
+
logger = getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
linux_only = unittest.skipIf(
|
|
32
|
+
not platform.system() in ["Linux", "Darwin"], 'No Tests that are meant for linux'
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
windows_only = unittest.skipIf(
|
|
36
|
+
platform.system() in ["Linux", "Darwin"], 'No Tests that are meant for Windows'
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
skip_if_global_configuration_is_enabled = pytest.mark.skipif(is_global_configuration_enabled(), reason=f"Either {IdmConfigParser.get_global_configuration_name()} is set or the environment variable 'IDMTOOLS_CONFIG_FILE' is set")
|
|
40
|
+
# this is mainly for docker in docker environments but also applies to environments
|
|
41
|
+
# where you must use the local ip address for connectivity vs localhost
|
|
42
|
+
skip_api_host = unittest.skipIf(os.getenv("API_HOST", None) is not None, "API_HOST is defined")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def run_test_in_n_seconds(n: int, print_elapsed_time: bool = False) -> Callable:
|
|
46
|
+
"""
|
|
47
|
+
Decorator that assert a test will run in N seconds. If it does not, it will fail.
|
|
48
|
+
|
|
49
|
+
THIS DOES NOT MANAGE processes and it will not stop long running processes. It simply times a test and ensure it
|
|
50
|
+
ran in less than N seconds
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
n (int): Number of seconds that is considered acceptable
|
|
54
|
+
print_elapsed_time: Will print the function name and elasped time at the end of each function it decorates
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
(Callable) : Wrapped function
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
def decorator(func):
|
|
61
|
+
@wraps(func)
|
|
62
|
+
def wrapper(*args, **kwargs):
|
|
63
|
+
start = time.time()
|
|
64
|
+
ret = func(*args, **kwargs)
|
|
65
|
+
end = time.time()
|
|
66
|
+
if print_elapsed_time:
|
|
67
|
+
print(f"{func.__name__} took {end - start}s to run!")
|
|
68
|
+
args[0].assertLess(end - start, n, f"{func.__name__} took {end - start}s to run!")
|
|
69
|
+
return ret
|
|
70
|
+
|
|
71
|
+
return wrapper
|
|
72
|
+
|
|
73
|
+
return decorator
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def default_fixture_pickle_save(filename, args, pickler=None, write_mode='bw'):
|
|
77
|
+
if pickler is None:
|
|
78
|
+
import pickle
|
|
79
|
+
pickler = pickle
|
|
80
|
+
with open(filename, write_mode) as o:
|
|
81
|
+
try:
|
|
82
|
+
pickler.dump(args, o)
|
|
83
|
+
except TypeError as e:
|
|
84
|
+
print("Unpicklable object!")
|
|
85
|
+
print(e)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def dump_function_input_for_test(output_directory: Union[str, Callable[[str, Callable], str]] = None,
|
|
89
|
+
capture_output: bool = False, include_module_in_path: bool = True,
|
|
90
|
+
custom_pickler=None, save_extension: str = '.pkl', write_mode: str = 'bw',
|
|
91
|
+
custom_save_output_func: Optional[Callable[[str, Any], Any]] = None):
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
output_directory: Output directory of fixture
|
|
96
|
+
capture_output: Capture output of function as well
|
|
97
|
+
include_module_in_path: When using the default path scheme, should the module path be part of the
|
|
98
|
+
destination name
|
|
99
|
+
custom_pickler: When using default save function, do you want to use a custom pickler or alternative like json
|
|
100
|
+
save_extension: Save extension. Default to '.pkl'
|
|
101
|
+
custom_save_output_func: Custom save function
|
|
102
|
+
|
|
103
|
+
Examples:
|
|
104
|
+
Using with a class:
|
|
105
|
+
```
|
|
106
|
+
class Experiment
|
|
107
|
+
|
|
108
|
+
@classmethod
|
|
109
|
+
@dump_function_input_for_test()
|
|
110
|
+
def save(cls)
|
|
111
|
+
pass
|
|
112
|
+
|
|
113
|
+
@dump_function_input_for_test()
|
|
114
|
+
def normal_funct(self, a):
|
|
115
|
+
pass
|
|
116
|
+
```
|
|
117
|
+
Returns:
|
|
118
|
+
|
|
119
|
+
"""
|
|
120
|
+
# if no output directory, set to fixture path
|
|
121
|
+
if output_directory is None:
|
|
122
|
+
output_directory = COMMON_INPUT_PATH
|
|
123
|
+
|
|
124
|
+
def decorate(func):
|
|
125
|
+
# create directory for function output
|
|
126
|
+
# if we have a function for directory naming, call it
|
|
127
|
+
if callable(output_directory):
|
|
128
|
+
out = output_directory(COMMON_INPUT_PATH, func)
|
|
129
|
+
else:
|
|
130
|
+
fname = f'{func.__module__}.{func.__name__}' if include_module_in_path else func.__name__
|
|
131
|
+
fname = fname.replace('.', os.path.sep)
|
|
132
|
+
out = os.path.abspath(os.path.join(output_directory, fname))
|
|
133
|
+
|
|
134
|
+
# create our paths to input/output files
|
|
135
|
+
out_directory = os.path.join(out, 'output')
|
|
136
|
+
input_directory = os.path.join(out, 'input')
|
|
137
|
+
|
|
138
|
+
@wraps(func)
|
|
139
|
+
def wrapper(*args, **kwargs):
|
|
140
|
+
call_time = int(round(time.time() * 1000))
|
|
141
|
+
# if output directory is a
|
|
142
|
+
# save keywords and args to files
|
|
143
|
+
os.makedirs(input_directory, exist_ok=True)
|
|
144
|
+
input_file_out = os.path.join(input_directory, f'{call_time}{save_extension}')
|
|
145
|
+
if custom_save_output_func:
|
|
146
|
+
custom_save_output_func(input_file_out, (args, kwargs))
|
|
147
|
+
else:
|
|
148
|
+
default_fixture_pickle_save(input_file_out, (args, kwargs), custom_pickler, write_mode)
|
|
149
|
+
result = func(*args, **kwargs)
|
|
150
|
+
if capture_output:
|
|
151
|
+
os.makedirs(out_directory, exist_ok=True)
|
|
152
|
+
output_file_out = os.path.join(out_directory, f'{call_time}{save_extension}')
|
|
153
|
+
if custom_save_output_func:
|
|
154
|
+
custom_save_output_func(output_file_out, result)
|
|
155
|
+
else:
|
|
156
|
+
default_fixture_pickle_save(output_file_out, result, custom_pickler, write_mode)
|
|
157
|
+
return result
|
|
158
|
+
|
|
159
|
+
return wrapper
|
|
160
|
+
|
|
161
|
+
return decorate
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def run_in_temp_dir(func):
|
|
165
|
+
|
|
166
|
+
@wraps(func)
|
|
167
|
+
def wrapper(*args, **kwargs):
|
|
168
|
+
current_dir = os.getcwd()
|
|
169
|
+
temp_dir = tempfile.mkdtemp()
|
|
170
|
+
try:
|
|
171
|
+
logger.debug(f"Running function in: {temp_dir}")
|
|
172
|
+
os.chdir(temp_dir)
|
|
173
|
+
func(*args, **kwargs)
|
|
174
|
+
finally:
|
|
175
|
+
os.chdir(current_dir)
|
|
176
|
+
try:
|
|
177
|
+
shutil.rmtree(temp_dir)
|
|
178
|
+
except:
|
|
179
|
+
pass
|
|
180
|
+
|
|
181
|
+
return wrapper
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def warn_amount_ssmt_image_decorator(func):
|
|
185
|
+
"""
|
|
186
|
+
A decorator to warn developers about possible failures due to SSMT.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
func: Function to wrap
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Wrapped function
|
|
193
|
+
"""
|
|
194
|
+
|
|
195
|
+
@functools.wraps(func)
|
|
196
|
+
def wrapper(*args, **kwargs):
|
|
197
|
+
try:
|
|
198
|
+
return func(*args, **kwargs)
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.error(
|
|
201
|
+
"These tests can fail due to changes to idmtools-core, idmtools-models, or idmtools-platform-comps. "
|
|
202
|
+
"If you have changed the code in those libraries, you will need to build a new ssmt image, publish to staging,"
|
|
203
|
+
"then update idmtools_platform_comps/tests/idmtools.ini by uncommenting the 'docker_image' options. You should"
|
|
204
|
+
"change the value to the new version of the SSMT image. COMPS Will automatically pull the new image."
|
|
205
|
+
)
|
|
206
|
+
raise e
|
|
207
|
+
|
|
208
|
+
return wrapper
|
|
File without changes
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
import hashlib
|
|
2
|
+
import io
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import shutil
|
|
6
|
+
from dataclasses import field, dataclass
|
|
7
|
+
from functools import partial
|
|
8
|
+
from logging import getLogger, DEBUG
|
|
9
|
+
from threading import Lock
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any, List, Type, Dict, TYPE_CHECKING, Optional
|
|
12
|
+
from idmtools.assets import Asset, AssetCollection
|
|
13
|
+
from idmtools.core import EntityStatus, ItemType
|
|
14
|
+
from idmtools.core import IDMTOOLS_USER_HOME
|
|
15
|
+
from idmtools.entities.experiment import Experiment
|
|
16
|
+
from idmtools.entities.iplatform_ops.iplatform_experiment_operations import IPlatformExperimentOperations
|
|
17
|
+
from idmtools.utils.file import file_content_to_generator
|
|
18
|
+
from idmtools.utils.json import IDMJSONEncoder
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
21
|
+
from idmtools_test.utils.test_execute_platform import TestExecutePlatform
|
|
22
|
+
|
|
23
|
+
logger = getLogger(__name__)
|
|
24
|
+
current_directory = os.path.dirname(os.path.realpath(__file__))
|
|
25
|
+
data_path = os.path.abspath(os.path.join(current_directory, "..", "..", "data"))
|
|
26
|
+
EXPERIMENTS_LOCK = Lock()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ExperimentDict(dict):
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class TestExecutePlatformExperimentOperation(IPlatformExperimentOperations):
|
|
35
|
+
platform: 'TestExecutePlatform'
|
|
36
|
+
platform_type: Type = field(default=ExperimentDict)
|
|
37
|
+
experiments: Dict[str, Experiment] = field(default_factory=dict, compare=False, metadata={"pickle_ignore": True})
|
|
38
|
+
|
|
39
|
+
def get(self, experiment_id: str, **kwargs) -> Any:
|
|
40
|
+
exp_path = self.get_experiment_path(experiment_id)
|
|
41
|
+
experiment_path = os.path.join(exp_path, "experiment.json")
|
|
42
|
+
if not os.path.exists(experiment_path):
|
|
43
|
+
logger.error(f"Cannot find experiment with id {experiment_id}")
|
|
44
|
+
raise FileNotFoundError(f"Cannot find experiment with id {experiment_id}")
|
|
45
|
+
|
|
46
|
+
simulation_metadata_path = os.path.join(exp_path, "simulation_index.json")
|
|
47
|
+
if not os.path.exists(simulation_metadata_path):
|
|
48
|
+
logger.error(f"Cannot find simulation index for experiment with id {experiment_id}")
|
|
49
|
+
raise FileNotFoundError(f"Cannot find simulation index for experiment with id {experiment_id}")
|
|
50
|
+
|
|
51
|
+
logger.info(f"Loading experiment metadata from {experiment_path}")
|
|
52
|
+
with open(experiment_path, 'r') as metadata_in:
|
|
53
|
+
metadata = json.load(metadata_in)
|
|
54
|
+
logger.info(f"Loading simulation metadata from {simulation_metadata_path}")
|
|
55
|
+
with open(simulation_metadata_path, 'r') as metadata_in:
|
|
56
|
+
metadata['simulations'] = json.load(metadata_in)
|
|
57
|
+
return ExperimentDict(metadata)
|
|
58
|
+
|
|
59
|
+
def platform_create(self, experiment: Experiment, **kwargs) -> Any:
|
|
60
|
+
if logger.isEnabledFor(DEBUG):
|
|
61
|
+
logger.debug('Creating Experiment')
|
|
62
|
+
EXPERIMENTS_LOCK.acquire()
|
|
63
|
+
self.experiments[experiment.uid] = experiment
|
|
64
|
+
EXPERIMENTS_LOCK.release()
|
|
65
|
+
logger.debug(f"Created Experiment {experiment.uid}")
|
|
66
|
+
self.send_assets(experiment, **kwargs)
|
|
67
|
+
return experiment
|
|
68
|
+
|
|
69
|
+
def get_children(self, experiment: ExperimentDict, **kwargs) -> List[Any]:
|
|
70
|
+
children = []
|
|
71
|
+
for sim in experiment['simulations']:
|
|
72
|
+
children.append(self.platform.get_item(sim, ItemType.SIMULATION,
|
|
73
|
+
experiment_id=experiment['_uid'],
|
|
74
|
+
raw=True,
|
|
75
|
+
**kwargs
|
|
76
|
+
)
|
|
77
|
+
)
|
|
78
|
+
return children
|
|
79
|
+
|
|
80
|
+
def get_parent(self, experiment: Any, **kwargs) -> Experiment:
|
|
81
|
+
pass
|
|
82
|
+
|
|
83
|
+
def platform_run_item(self, experiment: Experiment, **kwargs):
|
|
84
|
+
exp_path = self.get_experiment_path(experiment.uid)
|
|
85
|
+
path = os.path.join(exp_path, "experiment.json")
|
|
86
|
+
os.makedirs(exp_path, exist_ok=True)
|
|
87
|
+
if not os.path.exists(path):
|
|
88
|
+
with open(path, 'w') as out:
|
|
89
|
+
out.write(json.dumps(experiment.to_dict(), cls=IDMJSONEncoder))
|
|
90
|
+
for sim in experiment.simulations:
|
|
91
|
+
if sim.status in [None, EntityStatus.CREATED]:
|
|
92
|
+
self.platform._simulations.run_item(sim)
|
|
93
|
+
|
|
94
|
+
def get_experiment_path(self, experiment_id: str) -> str:
|
|
95
|
+
"""
|
|
96
|
+
Get path to experiment directory
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
experiment_id:
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
|
|
103
|
+
"""
|
|
104
|
+
return os.path.join(self.platform.execute_directory, str(experiment_id))
|
|
105
|
+
|
|
106
|
+
@staticmethod
|
|
107
|
+
def download_asset(path):
|
|
108
|
+
logger.info(f"Downloading asst from {path}")
|
|
109
|
+
if not os.path.exists(path):
|
|
110
|
+
logger.error(f"Cannot the asset {path}")
|
|
111
|
+
raise FileNotFoundError(f"Cannot the asset {path}")
|
|
112
|
+
with open(path, 'rb') as i:
|
|
113
|
+
while True:
|
|
114
|
+
res = i.read(128)
|
|
115
|
+
if res:
|
|
116
|
+
yield res
|
|
117
|
+
else:
|
|
118
|
+
break
|
|
119
|
+
|
|
120
|
+
def send_assets(self, experiment: Experiment, **kwargs):
|
|
121
|
+
# calculate total md5 of all files
|
|
122
|
+
md5 = hashlib.md5()
|
|
123
|
+
path = os.path.join(self.platform.execute_directory, str(experiment.uid), "Assets")
|
|
124
|
+
if logger.isEnabledFor(DEBUG):
|
|
125
|
+
logger.debug(f"Creating {path}")
|
|
126
|
+
os.makedirs(path, exist_ok=True)
|
|
127
|
+
for asset in experiment.assets:
|
|
128
|
+
remote_path = os.path.join(path, asset.relative_path) if asset.relative_path else path
|
|
129
|
+
remote_path = os.path.join(remote_path, asset.filename)
|
|
130
|
+
if asset.absolute_path:
|
|
131
|
+
if logger.isEnabledFor(DEBUG):
|
|
132
|
+
logger.debug(f"Copying {asset.absolute_path} to {remote_path}")
|
|
133
|
+
with open(asset.absolute_path) as ifi:
|
|
134
|
+
self.__calculate_partial_md5(ifi, md5)
|
|
135
|
+
shutil.copy(asset.absolute_path, remote_path)
|
|
136
|
+
else:
|
|
137
|
+
ifi = io.BytesIO(asset.content.encode('utf-8') if isinstance(asset.content, str) else asset.content)
|
|
138
|
+
self.__calculate_partial_md5(ifi, md5)
|
|
139
|
+
if logger.isEnabledFor(DEBUG):
|
|
140
|
+
logger.debug(f"Writing {asset.absolute_path} to {remote_path}")
|
|
141
|
+
with open(remote_path, 'wb') as out:
|
|
142
|
+
if isinstance(asset.content, str):
|
|
143
|
+
out.write(asset.content.encode('utf-8'))
|
|
144
|
+
else:
|
|
145
|
+
out.write(asset.content)
|
|
146
|
+
|
|
147
|
+
experiment.assets.platform_id = md5.hexdigest()
|
|
148
|
+
|
|
149
|
+
def __calculate_partial_md5(self, ifi, md5):
|
|
150
|
+
while True:
|
|
151
|
+
chunk = ifi.read(8196)
|
|
152
|
+
if not chunk:
|
|
153
|
+
break
|
|
154
|
+
else:
|
|
155
|
+
if isinstance(chunk, bytes):
|
|
156
|
+
md5.update(chunk)
|
|
157
|
+
else:
|
|
158
|
+
md5.update(chunk.encode('utf-8'))
|
|
159
|
+
|
|
160
|
+
def refresh_status(self, experiment: Experiment, **kwargs):
|
|
161
|
+
if logger.isEnabledFor(DEBUG):
|
|
162
|
+
logger.debug(f'Refreshing status for Experiment: {experiment.uid}')
|
|
163
|
+
for simulation in self.platform._simulations.simulations.get(experiment.uid):
|
|
164
|
+
for esim in experiment.simulations:
|
|
165
|
+
if esim == simulation:
|
|
166
|
+
logger.debug(f'Setting {simulation.uid} Status to {simulation.status}')
|
|
167
|
+
esim.status = simulation.status
|
|
168
|
+
break
|
|
169
|
+
|
|
170
|
+
def list_assets(self, experiment: Experiment, children: bool = False,
|
|
171
|
+
**kwargs) -> List[Asset]:
|
|
172
|
+
"""
|
|
173
|
+
List assets for the experiment
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
experiment:
|
|
177
|
+
children:
|
|
178
|
+
**kwargs:
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
|
|
182
|
+
"""
|
|
183
|
+
logger.info("Listing assets for experiment")
|
|
184
|
+
assets = []
|
|
185
|
+
asset_path = os.path.join(self.get_experiment_path(experiment.uid), "Assets")
|
|
186
|
+
for root, files, dirs in os.walk(asset_path):
|
|
187
|
+
for file in files:
|
|
188
|
+
fp = os.path.join(asset_path, file)
|
|
189
|
+
asset = Asset(absolute_path=fp, filename=file)
|
|
190
|
+
assets.append(asset)
|
|
191
|
+
|
|
192
|
+
if children:
|
|
193
|
+
for sim in experiment.simulations:
|
|
194
|
+
assets.extend(self.platform._simulations.list_assets(sim))
|
|
195
|
+
return assets
|
|
196
|
+
|
|
197
|
+
def to_entity(self, data: Dict[Any, Any], parent: Optional[Any] = None, children: bool = True, **kwargs) -> \
|
|
198
|
+
Experiment:
|
|
199
|
+
excluded = ['platform_id', 'item_type', 'frozen', 'simulations']
|
|
200
|
+
experiment = Experiment(**{k: v for k, v in data.items() if k not in excluded})
|
|
201
|
+
experiment.platform_metadata = data
|
|
202
|
+
experiment.task_type = data['tags']['task_type']
|
|
203
|
+
if data['assets']:
|
|
204
|
+
assets = AssetCollection()
|
|
205
|
+
exp_path = os.path.join(self.get_experiment_path(experiment.uid), "Assets")
|
|
206
|
+
for root, dirs, files in os.walk(exp_path):
|
|
207
|
+
for file in files:
|
|
208
|
+
fp = os.path.abspath(os.path.join(root, file))
|
|
209
|
+
asset = Asset(absolute_path=fp, filename=file)
|
|
210
|
+
asset.download_generator_hook = partial(file_content_to_generator, fp)
|
|
211
|
+
assets.add_asset(asset)
|
|
212
|
+
experiment.assets = assets
|
|
213
|
+
if children:
|
|
214
|
+
experiment.simulations = self.platform.get_children(
|
|
215
|
+
experiment.uid,
|
|
216
|
+
ItemType.EXPERIMENT,
|
|
217
|
+
item=experiment,
|
|
218
|
+
**kwargs
|
|
219
|
+
)
|
|
220
|
+
|
|
221
|
+
return experiment
|
|
222
|
+
|
|
223
|
+
def platform_modify_experiment(self, experiment: Experiment, regather_common_assets: bool = False, **kwargs) -> Experiment:
|
|
224
|
+
experiment.pre_creation(self.platform, gather_assets=regather_common_assets)
|
|
225
|
+
EXPERIMENTS_LOCK.acquire()
|
|
226
|
+
self.experiments[experiment.uid] = experiment
|
|
227
|
+
EXPERIMENTS_LOCK.release()
|
|
228
|
+
self.send_assets(experiment)
|
|
229
|
+
return experiment
|
|
230
|
+
|
|
231
|
+
def post_run_item(self, experiment: Experiment, **kwargs):
|
|
232
|
+
exp_path = self.get_experiment_path(experiment.uid)
|
|
233
|
+
sim_path = Path(exp_path, "simulation_index.json")
|
|
234
|
+
with open(sim_path, "w") as f:
|
|
235
|
+
json.dump([s.id for s in experiment.simulations], f)
|
|
236
|
+
|
|
237
|
+
super().post_run_item(experiment, **kwargs)
|