idmtools-platform-comps 0.0.0.dev0__py3-none-any.whl → 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- idmtools_platform_comps/__init__.py +25 -8
- idmtools_platform_comps/cli/__init__.py +4 -0
- idmtools_platform_comps/cli/cli_functions.py +50 -0
- idmtools_platform_comps/cli/comps.py +492 -0
- idmtools_platform_comps/comps_cli.py +48 -0
- idmtools_platform_comps/comps_operations/__init__.py +6 -0
- idmtools_platform_comps/comps_operations/asset_collection_operations.py +263 -0
- idmtools_platform_comps/comps_operations/experiment_operations.py +569 -0
- idmtools_platform_comps/comps_operations/simulation_operations.py +678 -0
- idmtools_platform_comps/comps_operations/suite_operations.py +228 -0
- idmtools_platform_comps/comps_operations/workflow_item_operations.py +269 -0
- idmtools_platform_comps/comps_platform.py +309 -0
- idmtools_platform_comps/plugin_info.py +168 -0
- idmtools_platform_comps/ssmt_operations/__init__.py +6 -0
- idmtools_platform_comps/ssmt_operations/simulation_operations.py +77 -0
- idmtools_platform_comps/ssmt_operations/workflow_item_operations.py +73 -0
- idmtools_platform_comps/ssmt_platform.py +44 -0
- idmtools_platform_comps/ssmt_work_items/__init__.py +4 -0
- idmtools_platform_comps/ssmt_work_items/comps_work_order_task.py +29 -0
- idmtools_platform_comps/ssmt_work_items/comps_workitems.py +113 -0
- idmtools_platform_comps/ssmt_work_items/icomps_workflowitem.py +71 -0
- idmtools_platform_comps/ssmt_work_items/work_order.py +54 -0
- idmtools_platform_comps/utils/__init__.py +4 -0
- idmtools_platform_comps/utils/assetize_output/__init__.py +4 -0
- idmtools_platform_comps/utils/assetize_output/assetize_output.py +125 -0
- idmtools_platform_comps/utils/assetize_output/assetize_ssmt_script.py +144 -0
- idmtools_platform_comps/utils/base_singularity_work_order.json +6 -0
- idmtools_platform_comps/utils/download/__init__.py +4 -0
- idmtools_platform_comps/utils/download/download.py +178 -0
- idmtools_platform_comps/utils/download/download_ssmt.py +81 -0
- idmtools_platform_comps/utils/download_experiment.py +116 -0
- idmtools_platform_comps/utils/file_filter_workitem.py +519 -0
- idmtools_platform_comps/utils/general.py +358 -0
- idmtools_platform_comps/utils/linux_mounts.py +73 -0
- idmtools_platform_comps/utils/lookups.py +123 -0
- idmtools_platform_comps/utils/package_version.py +489 -0
- idmtools_platform_comps/utils/python_requirements_ac/__init__.py +4 -0
- idmtools_platform_comps/utils/python_requirements_ac/create_asset_collection.py +155 -0
- idmtools_platform_comps/utils/python_requirements_ac/install_requirements.py +109 -0
- idmtools_platform_comps/utils/python_requirements_ac/requirements_to_asset_collection.py +374 -0
- idmtools_platform_comps/utils/python_version.py +40 -0
- idmtools_platform_comps/utils/scheduling.py +154 -0
- idmtools_platform_comps/utils/singularity_build.py +491 -0
- idmtools_platform_comps/utils/spatial_output.py +76 -0
- idmtools_platform_comps/utils/ssmt_utils/__init__.py +6 -0
- idmtools_platform_comps/utils/ssmt_utils/common.py +70 -0
- idmtools_platform_comps/utils/ssmt_utils/file_filter.py +568 -0
- idmtools_platform_comps/utils/sweeping.py +162 -0
- idmtools_platform_comps-0.0.2.dist-info/METADATA +100 -0
- idmtools_platform_comps-0.0.2.dist-info/RECORD +62 -0
- idmtools_platform_comps-0.0.2.dist-info/entry_points.txt +9 -0
- idmtools_platform_comps-0.0.2.dist-info/licenses/LICENSE.TXT +3 -0
- {idmtools_platform_comps-0.0.0.dev0.dist-info → idmtools_platform_comps-0.0.2.dist-info}/top_level.txt +1 -0
- ssmt_image/Dockerfile +52 -0
- ssmt_image/Makefile +21 -0
- ssmt_image/__init__.py +6 -0
- ssmt_image/bootstrap.sh +30 -0
- ssmt_image/build_docker_image.py +161 -0
- ssmt_image/pip.conf +3 -0
- ssmt_image/push_docker_image.py +49 -0
- ssmt_image/requirements.txt +9 -0
- idmtools_platform_comps-0.0.0.dev0.dist-info/METADATA +0 -41
- idmtools_platform_comps-0.0.0.dev0.dist-info/RECORD +0 -5
- {idmtools_platform_comps-0.0.0.dev0.dist-info → idmtools_platform_comps-0.0.2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
"""idmtools general status.
|
|
2
|
+
|
|
3
|
+
Copyright 2021, Bill & Melinda Gates Foundation. All rights reserved.
|
|
4
|
+
"""
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
import uuid
|
|
8
|
+
import ntpath
|
|
9
|
+
from logging import getLogger, DEBUG
|
|
10
|
+
from typing import List, Dict, Union, Generator, Optional
|
|
11
|
+
from uuid import UUID
|
|
12
|
+
from COMPS import Client
|
|
13
|
+
from COMPS.Data import Simulation, SimulationFile, AssetCollectionFile, WorkItemFile, OutputFileMetadata, Experiment
|
|
14
|
+
from COMPS.Data import AssetCollection as COMPSAssetCollection
|
|
15
|
+
from COMPS.Data.AssetFile import AssetFile
|
|
16
|
+
from COMPS.Data.Simulation import SimulationState
|
|
17
|
+
from COMPS.Data.WorkItem import WorkItemState, WorkItem
|
|
18
|
+
from requests import RequestException
|
|
19
|
+
from idmtools.assets import AssetCollection, Asset
|
|
20
|
+
from idmtools.core import EntityStatus, ItemType
|
|
21
|
+
from idmtools.core.context import get_current_platform
|
|
22
|
+
from idmtools.core.interfaces.ientity import IEntity
|
|
23
|
+
from idmtools.entities.iplatform import IPlatform
|
|
24
|
+
from idmtools.utils.local_os import LocalOS
|
|
25
|
+
|
|
26
|
+
ASSETS_PATH = "Assets\\"
|
|
27
|
+
if LocalOS.is_window():
|
|
28
|
+
ASSETS_PATH = ASSETS_PATH.lower()
|
|
29
|
+
|
|
30
|
+
logger = getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
chars_to_replace = ['/', '\\', ':', "'", '"', '?', '<', '>', '*', '|', "\0", "(", ")", '`']
|
|
33
|
+
clean_names_expr = re.compile(f'[{re.escape("".join(chars_to_replace))}]')
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def fatal_code(e: Exception) -> bool:
|
|
37
|
+
"""
|
|
38
|
+
Uses to determine if we should stop retrying based on request status code.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
e: Exception to check
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
True is exception is a request and status code matches 404
|
|
45
|
+
"""
|
|
46
|
+
if isinstance(e, RequestException):
|
|
47
|
+
return e.response.status_code == 404
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def convert_comps_status(comps_status: SimulationState) -> EntityStatus:
|
|
52
|
+
"""
|
|
53
|
+
Convert status from COMPS to IDMTools.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
comps_status: Status in Comps
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
EntityStatus
|
|
60
|
+
"""
|
|
61
|
+
if comps_status == SimulationState.Succeeded:
|
|
62
|
+
return EntityStatus.SUCCEEDED
|
|
63
|
+
elif comps_status in (SimulationState.Canceled, SimulationState.CancelRequested, SimulationState.Failed):
|
|
64
|
+
return EntityStatus.FAILED
|
|
65
|
+
elif comps_status == SimulationState.Created:
|
|
66
|
+
return EntityStatus.CREATED
|
|
67
|
+
else:
|
|
68
|
+
return EntityStatus.RUNNING
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def convert_comps_workitem_status(comps_status: WorkItemState) -> EntityStatus:
|
|
72
|
+
"""
|
|
73
|
+
Convert status from COMPS to IDMTools.
|
|
74
|
+
|
|
75
|
+
Created = 0 # WorkItem has been saved to the database
|
|
76
|
+
CommissionRequested = 5 # WorkItem is ready to be processed by the next available worker of the correct type
|
|
77
|
+
Commissioned = 10 # WorkItem has been commissioned to a worker of the correct type and is beginning execution
|
|
78
|
+
Validating = 30 # WorkItem is being validated
|
|
79
|
+
Running = 40 # WorkItem is currently running
|
|
80
|
+
Waiting = 50 # WorkItem is waiting for dependent items to complete
|
|
81
|
+
ResumeRequested = 60 # Dependent items have completed and WorkItem is ready to be processed by the next available worker of the correct type
|
|
82
|
+
CancelRequested = 80 # WorkItem cancellation was requested
|
|
83
|
+
Canceled = 90 # WorkItem was successfully canceled
|
|
84
|
+
Resumed = 100 # WorkItem has been claimed by a worker of the correct type and is resuming
|
|
85
|
+
Canceling = 120 # WorkItem is in the process of being canceled by the worker
|
|
86
|
+
Succeeded = 130 # WorkItem completed successfully
|
|
87
|
+
Failed = 140 # WorkItem failed
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
comps_status: Status in Comps
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
EntityStatus
|
|
94
|
+
"""
|
|
95
|
+
work_item_canceled = (WorkItemState.Canceled, WorkItemState.CancelRequested, WorkItemState.Failed)
|
|
96
|
+
work_item_created = [
|
|
97
|
+
WorkItemState.Created, WorkItemState.Resumed, WorkItemState.CommissionRequested, WorkItemState.Commissioned
|
|
98
|
+
]
|
|
99
|
+
if comps_status == WorkItemState.Succeeded:
|
|
100
|
+
return EntityStatus.SUCCEEDED
|
|
101
|
+
elif comps_status in work_item_canceled:
|
|
102
|
+
return EntityStatus.FAILED
|
|
103
|
+
elif comps_status == work_item_created:
|
|
104
|
+
return EntityStatus.CREATED
|
|
105
|
+
else:
|
|
106
|
+
return EntityStatus.RUNNING
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def clean_experiment_name(experiment_name: str) -> str:
|
|
110
|
+
"""
|
|
111
|
+
Enforce any COMPS-specific demands on experiment names.
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
experiment_name: name of the experiment
|
|
115
|
+
Returns:the experiment name allowed for use
|
|
116
|
+
"""
|
|
117
|
+
experiment_name = clean_names_expr.sub("_", experiment_name)
|
|
118
|
+
return experiment_name.encode("ascii", "ignore").decode('utf8').strip()
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def get_file_from_collection(platform: IPlatform, collection_id: UUID, file_path: str) -> bytearray:
|
|
122
|
+
"""
|
|
123
|
+
Retrieve a file from an asset collection.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
platform: Platform object to use
|
|
127
|
+
collection_id: Asset Collection ID
|
|
128
|
+
file_path: Path within collection
|
|
129
|
+
|
|
130
|
+
Examples::
|
|
131
|
+
>>> import uuid
|
|
132
|
+
>>> get_file_from_collection(platform, uuid.UUID("fc461146-3b2a-441f-bc51-0bff3a9c1ba0"), "StdOut.txt")
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
Object Byte Array
|
|
136
|
+
"""
|
|
137
|
+
logger.debug(f"Cache miss for {collection_id} {file_path}")
|
|
138
|
+
|
|
139
|
+
# retrieve the collection
|
|
140
|
+
ac = platform.get_item(collection_id, ItemType.ASSETCOLLECTION, raw=True)
|
|
141
|
+
|
|
142
|
+
# Look for the asset file in the collection
|
|
143
|
+
file_name = ntpath.basename(file_path)
|
|
144
|
+
path = ntpath.dirname(file_path)
|
|
145
|
+
if LocalOS.is_window():
|
|
146
|
+
file_name = file_name.lower()
|
|
147
|
+
path = path.lower()
|
|
148
|
+
path = os.path.normpath(path.lstrip(ASSETS_PATH).strip('/'))
|
|
149
|
+
|
|
150
|
+
for asset_file in ac.assets:
|
|
151
|
+
if LocalOS.is_window():
|
|
152
|
+
if asset_file.file_name.lower() == file_name and os.path.normpath(asset_file.relative_path or '').lower() == path:
|
|
153
|
+
return asset_file.retrieve()
|
|
154
|
+
else:
|
|
155
|
+
if asset_file.file_name == file_name and os.path.normpath(asset_file.relative_path or '') == path:
|
|
156
|
+
return asset_file.retrieve()
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def get_file_as_generator(file: Union[SimulationFile, AssetCollectionFile, AssetFile, WorkItemFile, OutputFileMetadata],
|
|
160
|
+
chunk_size: int = 128, resume_byte_pos: Optional[int] = None) -> \
|
|
161
|
+
Generator[bytearray, None, None]:
|
|
162
|
+
"""
|
|
163
|
+
Get file as a generator.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
file: File to stream contents through a generator
|
|
167
|
+
chunk_size: Size of chunks to load
|
|
168
|
+
resume_byte_pos: Optional start of download
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Generator for file content
|
|
172
|
+
"""
|
|
173
|
+
if isinstance(file, OutputFileMetadata):
|
|
174
|
+
url = file.url
|
|
175
|
+
else:
|
|
176
|
+
url = file.uri
|
|
177
|
+
i = url.find('/asset/')
|
|
178
|
+
if i == -1:
|
|
179
|
+
raise RuntimeError('Unable to parse asset url: ' + url)
|
|
180
|
+
|
|
181
|
+
if resume_byte_pos:
|
|
182
|
+
header = {'Range': 'bytes=%d-' % resume_byte_pos}
|
|
183
|
+
else:
|
|
184
|
+
header = {}
|
|
185
|
+
response = Client.get(url[i:], headers=header, stream=True)
|
|
186
|
+
|
|
187
|
+
yield from response.iter_content(chunk_size=chunk_size)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class Workitem(object):
|
|
191
|
+
"""SimpleItem to define workitem for proxy purposes.
|
|
192
|
+
|
|
193
|
+
Notes:
|
|
194
|
+
- TODO deprecate this if possible
|
|
195
|
+
"""
|
|
196
|
+
pass
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def get_asset_for_comps_item(platform: IPlatform, item: IEntity, files: List[str], cache=None,
|
|
200
|
+
comps_item: Union[Experiment, Workitem, Simulation] = None) -> Dict[str, bytearray]:
|
|
201
|
+
"""
|
|
202
|
+
Retrieve assets from an Entity(Simulation, Experiment, WorkItem).
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
platform: Platform Object to use
|
|
206
|
+
item: Item to fetch assets from
|
|
207
|
+
files: List of file names to retrieve
|
|
208
|
+
cache: Cache object to use
|
|
209
|
+
comps_item: Optional comps item
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Dictionary in structure of filename -> bytearray
|
|
213
|
+
"""
|
|
214
|
+
if logger.isEnabledFor(DEBUG):
|
|
215
|
+
logger.debug(f"Loading the files {files} from {item}")
|
|
216
|
+
|
|
217
|
+
if len(files) == 0:
|
|
218
|
+
transients = []
|
|
219
|
+
assets = None
|
|
220
|
+
else:
|
|
221
|
+
all_paths = set(files)
|
|
222
|
+
assets = set(path for path in all_paths if path.lower().startswith("assets"))
|
|
223
|
+
transients = all_paths.difference(assets)
|
|
224
|
+
|
|
225
|
+
# Create the return dict
|
|
226
|
+
ret = {}
|
|
227
|
+
|
|
228
|
+
# Retrieve the transient if any
|
|
229
|
+
if isinstance(comps_item, (Simulation, WorkItem)):
|
|
230
|
+
if transients or len(files) == 0:
|
|
231
|
+
transient_files = comps_item.retrieve_output_files(paths=transients)
|
|
232
|
+
ret = dict(zip(transients, transient_files))
|
|
233
|
+
else:
|
|
234
|
+
ret = dict()
|
|
235
|
+
|
|
236
|
+
# Take care of the assets
|
|
237
|
+
if assets and comps_item.configuration:
|
|
238
|
+
# Get the collection_id for the simulation
|
|
239
|
+
collection_id = comps_item.configuration.asset_collection_id
|
|
240
|
+
if collection_id:
|
|
241
|
+
# Retrieve the files
|
|
242
|
+
for file_path in assets:
|
|
243
|
+
# Normalize the separators
|
|
244
|
+
normalized_path = ntpath.normpath(file_path)
|
|
245
|
+
if cache is not None:
|
|
246
|
+
ret[file_path] = cache.memoize()(get_file_from_collection)(platform, collection_id, normalized_path)
|
|
247
|
+
else:
|
|
248
|
+
ret[file_path] = get_file_from_collection(platform, collection_id, normalized_path)
|
|
249
|
+
return ret
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def update_item(platform: IPlatform, item_id: str, item_type: ItemType, tags: dict = None, name: str = None):
|
|
253
|
+
"""Utility function to update existing COMPS experiment/simulation/workitem's tags.
|
|
254
|
+
|
|
255
|
+
For example, you can add/update simulation's tag once its post-process is done to mark the simulation with
|
|
256
|
+
more meaningful text with tag/name
|
|
257
|
+
Args:
|
|
258
|
+
platform: Platform
|
|
259
|
+
item_id: experiment/simulation/workitem id
|
|
260
|
+
item_type: The type of the object to be retrieved
|
|
261
|
+
tags: tags dict for update
|
|
262
|
+
name: name of experiment/simulation/workitem
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
None
|
|
266
|
+
"""
|
|
267
|
+
comps_item = platform.get_item(item_id, item_type, raw=True)
|
|
268
|
+
current_tags = comps_item.tags
|
|
269
|
+
if tags is not None:
|
|
270
|
+
current_tags.update(tags)
|
|
271
|
+
comps_item.set_tags(current_tags)
|
|
272
|
+
if name is not None:
|
|
273
|
+
comps_item.name = name
|
|
274
|
+
comps_item.save()
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def generate_ac_from_asset_md5(file_name: str, asset_md5: [str, uuid.UUID], platform: 'IPlatform' = None,
|
|
278
|
+
tags: dict = None):
|
|
279
|
+
"""
|
|
280
|
+
Get an asset collection by asset id(md5).
|
|
281
|
+
Args:
|
|
282
|
+
file_name: file name string
|
|
283
|
+
asset_md5: asset md5 string
|
|
284
|
+
platform : Platform object
|
|
285
|
+
tags: tags dict for asset collection
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
COMPS AssetCollection
|
|
289
|
+
"""
|
|
290
|
+
if tags is None:
|
|
291
|
+
tags = {'Name': file_name, 'md5': asset_md5}
|
|
292
|
+
else:
|
|
293
|
+
tags['Name'] = file_name
|
|
294
|
+
tags['md5'] = asset_md5
|
|
295
|
+
|
|
296
|
+
if platform is None:
|
|
297
|
+
platform = get_current_platform()
|
|
298
|
+
|
|
299
|
+
ac = COMPSAssetCollection()
|
|
300
|
+
ac.set_tags(tags)
|
|
301
|
+
acf = AssetCollectionFile(file_name=file_name, md5_checksum=asset_md5)
|
|
302
|
+
ac.add_asset(acf)
|
|
303
|
+
ac.save()
|
|
304
|
+
print('done - created AC ' + str(ac.id))
|
|
305
|
+
asset_collection: AssetCollection = platform._assets.to_entity(ac)
|
|
306
|
+
return asset_collection
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def generate_ac_from_asset_md5_file(file_path: str):
|
|
310
|
+
"""
|
|
311
|
+
Get an asset collection by file path.
|
|
312
|
+
Args:
|
|
313
|
+
file_path : file path
|
|
314
|
+
|
|
315
|
+
Returns:
|
|
316
|
+
COMPS AssetCollection
|
|
317
|
+
"""
|
|
318
|
+
# Check if the file exists and is accessible
|
|
319
|
+
try:
|
|
320
|
+
with open(file_path, "r") as asset_fd:
|
|
321
|
+
content = asset_fd.read()
|
|
322
|
+
except FileNotFoundError:
|
|
323
|
+
logger.debug(f"Error: The file {file_path} was not found.")
|
|
324
|
+
return None
|
|
325
|
+
except IOError:
|
|
326
|
+
logger.debug(f"Error: Could not read the file {file_path}.")
|
|
327
|
+
return None
|
|
328
|
+
|
|
329
|
+
# Split the content and check format
|
|
330
|
+
items = content.split(':')
|
|
331
|
+
if len(items) < 3:
|
|
332
|
+
logger.debug("Error: The file's content is not in the expected format.")
|
|
333
|
+
return None
|
|
334
|
+
|
|
335
|
+
file_name = items[0]
|
|
336
|
+
asset_md5 = items[2]
|
|
337
|
+
|
|
338
|
+
# Handle errors from generate_ac_from_asset_md5
|
|
339
|
+
try:
|
|
340
|
+
asset_collection: AssetCollection = generate_ac_from_asset_md5(file_name, asset_md5)
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.debug(f"An error occurred while generating AC from asset ID: {e}")
|
|
343
|
+
return None
|
|
344
|
+
|
|
345
|
+
return asset_collection
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
def save_sif_asset_md5_from_ac_id(ac_id: str):
|
|
349
|
+
"""
|
|
350
|
+
Get the md5 of the asset in the asset collection of singularity.
|
|
351
|
+
Args:
|
|
352
|
+
ac_id: asset collection id
|
|
353
|
+
"""
|
|
354
|
+
from COMPS.Data import QueryCriteria
|
|
355
|
+
ac = COMPSAssetCollection.get(ac_id, QueryCriteria().select_children(['assets']))
|
|
356
|
+
asset = Asset(filename=ac.assets[0].file_name, checksum=ac.assets[0].md5_checksum)
|
|
357
|
+
# Save the asset filename and md5 checksum to local file
|
|
358
|
+
asset.save_md5_checksum()
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""idmtools set linux mounts.
|
|
2
|
+
|
|
3
|
+
Copyright 2021, Bill & Melinda Gates Foundation. All rights reserved.
|
|
4
|
+
"""
|
|
5
|
+
import os
|
|
6
|
+
from COMPS import AuthManager
|
|
7
|
+
from COMPS.Data import Simulation
|
|
8
|
+
from typing import TYPE_CHECKING, List, Dict
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING: # pragma: no cover
|
|
11
|
+
from idmtools.entities.iplatform import IPlatform
|
|
12
|
+
|
|
13
|
+
DEFAULT_ENVIRONMENTS = ["SLURMSTAGE", "CALCULON", "NIBBLER"]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def set_linux_mounts(platform: 'IPlatform', linux_environment: str = None) -> None:
|
|
17
|
+
"""
|
|
18
|
+
For COMPS Platform, check and set linux mounts.
|
|
19
|
+
Args:
|
|
20
|
+
platform: idmtools COMPS Platform
|
|
21
|
+
linux_environment: platform environment
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
None
|
|
25
|
+
"""
|
|
26
|
+
linux_envs = DEFAULT_ENVIRONMENTS
|
|
27
|
+
if linux_environment is not None:
|
|
28
|
+
linux_envs.append(linux_environment.upper())
|
|
29
|
+
|
|
30
|
+
if platform.environment.upper() in linux_envs:
|
|
31
|
+
mounts = AuthManager.get_environment_macros(platform.environment)['DOCKER_MOUNTS']
|
|
32
|
+
mounts = {v[0]: v[1:4] for v in [m.split(';') for m in mounts.split('|')]}
|
|
33
|
+
# pretend I'm on Linux and set the Linux mapping environment variables
|
|
34
|
+
for k, v in mounts.items():
|
|
35
|
+
os.environ[k] = ';'.join([v[0], v[2]])
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def clear_linux_mounts(platform: 'IPlatform', linux_environment: str = None) -> None:
|
|
39
|
+
"""
|
|
40
|
+
For COMPS Platform, check and clear linux mounts.
|
|
41
|
+
Args:
|
|
42
|
+
platform: idmtools COMPS Platform
|
|
43
|
+
linux_environment: platform environment
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
None
|
|
47
|
+
"""
|
|
48
|
+
linux_envs = DEFAULT_ENVIRONMENTS
|
|
49
|
+
if linux_environment is not None:
|
|
50
|
+
linux_envs.append(linux_environment.upper())
|
|
51
|
+
|
|
52
|
+
if platform.environment.upper() in linux_envs:
|
|
53
|
+
mounts = AuthManager.get_environment_macros(platform.environment)['DOCKER_MOUNTS']
|
|
54
|
+
mounts = {v[0]: v[1:4] for v in [m.split(';') for m in mounts.split('|')]}
|
|
55
|
+
# pretend I'm on Linux and clear the Linux mapping environment variables
|
|
56
|
+
for k, v in mounts.items():
|
|
57
|
+
os.environ.pop(k)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def get_workdir_from_simulations(platform: 'IPlatform', comps_simulations: List[Simulation]) -> Dict[str, str]:
|
|
61
|
+
"""
|
|
62
|
+
Get COMPS simulations working directory.
|
|
63
|
+
Args:
|
|
64
|
+
platform: idmtools COMPS Platform
|
|
65
|
+
comps_simulations: COMPS Simulations
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
dictionary with simulation id as key and working directory as value
|
|
69
|
+
"""
|
|
70
|
+
set_linux_mounts(platform)
|
|
71
|
+
sim_work_dir = {str(sim.id): sim.hpc_jobs[-1].working_directory for sim in comps_simulations if sim.hpc_jobs}
|
|
72
|
+
clear_linux_mounts(platform)
|
|
73
|
+
return sim_work_dir
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
"""idmtools comps lookups.
|
|
2
|
+
|
|
3
|
+
Copyright 2021, Bill & Melinda Gates Foundation. All rights reserved.
|
|
4
|
+
"""
|
|
5
|
+
from datetime import datetime, timedelta
|
|
6
|
+
from logging import getLogger
|
|
7
|
+
from typing import List
|
|
8
|
+
import backoff
|
|
9
|
+
from COMPS.Data import Experiment, Simulation, QueryCriteria
|
|
10
|
+
from requests import Timeout, HTTPError
|
|
11
|
+
from idmtools_platform_comps.utils.general import fatal_code
|
|
12
|
+
|
|
13
|
+
logger = getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@backoff.on_exception(backoff.constant(1.5), (Timeout, ConnectionError, HTTPError), max_tries=5, giveup=fatal_code)
|
|
17
|
+
def get_experiment_by_id(exp_id, query_criteria: QueryCriteria = None) -> Experiment:
|
|
18
|
+
"""Get an experiment by id."""
|
|
19
|
+
return Experiment.get(exp_id, query_criteria=query_criteria)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@backoff.on_exception(backoff.constant(1.5), (Timeout, ConnectionError, HTTPError), max_tries=5, giveup=fatal_code)
|
|
23
|
+
def get_simulation_by_id(sim_id, query_criteria: QueryCriteria = None) -> Simulation:
|
|
24
|
+
"""
|
|
25
|
+
Fetches simulation by id and optional query criteria.
|
|
26
|
+
|
|
27
|
+
Wrapped in additional Retry Logic. Used by other lookup methods
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
sim_id:
|
|
31
|
+
query_criteria: Optional QueryCriteria to search with
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Simulation with ID
|
|
35
|
+
"""
|
|
36
|
+
return Simulation.get(id=sim_id, query_criteria=query_criteria)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def get_all_experiments_for_user(user: str) -> List[Experiment]:
|
|
40
|
+
"""
|
|
41
|
+
Returns all the experiments for a specific user.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
user: username to locate
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Experiments for a user
|
|
48
|
+
"""
|
|
49
|
+
# COMPS limits the retrieval to 1000 so to make sure we get all experiments for a given user, we need to be clever
|
|
50
|
+
# Also COMPS does not have an order_by so we have to go through all date ranges
|
|
51
|
+
interval = 365
|
|
52
|
+
results = {}
|
|
53
|
+
end_date = start_date = datetime.today()
|
|
54
|
+
limit_date = datetime.strptime("2014-03-31", '%Y-%m-%d') # Oldest simulation in COMPS
|
|
55
|
+
|
|
56
|
+
while start_date > limit_date:
|
|
57
|
+
start_date = end_date - timedelta(days=interval)
|
|
58
|
+
batch = Experiment.get(query_criteria=QueryCriteria().where(["owner={}".format(user),
|
|
59
|
+
"date_created<={}".format(
|
|
60
|
+
end_date.strftime('%Y-%m-%d')),
|
|
61
|
+
"date_created>={}".format(
|
|
62
|
+
start_date.strftime('%Y-%m-%d'))]))
|
|
63
|
+
if len(batch) == 1000:
|
|
64
|
+
# We hit a limit, reduce the interval and run again
|
|
65
|
+
interval = interval / 2
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
if len(batch) == 0:
|
|
69
|
+
interval *= 2
|
|
70
|
+
else:
|
|
71
|
+
# Add the experiments to the dict
|
|
72
|
+
for e in batch:
|
|
73
|
+
results[e.id] = e
|
|
74
|
+
|
|
75
|
+
# Go from there
|
|
76
|
+
end_date = start_date
|
|
77
|
+
|
|
78
|
+
return list(results.values())
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def get_simulations_from_big_experiments(experiment_id):
|
|
82
|
+
"""
|
|
83
|
+
Get simulation for large experiment. This allows us to pull simulations in chunks.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
experiment_id: Experiment id to load
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
List of simulations
|
|
90
|
+
"""
|
|
91
|
+
e = get_experiment_by_id(experiment_id)
|
|
92
|
+
start_date = end_date = e.date_created
|
|
93
|
+
import pytz
|
|
94
|
+
limit_date = datetime.today().replace(tzinfo=pytz.utc)
|
|
95
|
+
interval = 60
|
|
96
|
+
stop_flag = False
|
|
97
|
+
results = {}
|
|
98
|
+
while start_date < limit_date:
|
|
99
|
+
start_date = end_date + timedelta(minutes=interval)
|
|
100
|
+
try:
|
|
101
|
+
batch = Simulation.get(query_criteria=QueryCriteria()
|
|
102
|
+
.select(['id', 'state', 'date_created']).select_children('tags')
|
|
103
|
+
.where(["experiment_id={}".format(experiment_id),
|
|
104
|
+
"date_created>={}".format(end_date.strftime('%Y-%m-%d %T')),
|
|
105
|
+
"date_created<={}".format(start_date.strftime('%Y-%m-%d %T'))])
|
|
106
|
+
)
|
|
107
|
+
except Exception as e:
|
|
108
|
+
logger.exception(e)
|
|
109
|
+
interval /= 2
|
|
110
|
+
continue
|
|
111
|
+
|
|
112
|
+
if not batch:
|
|
113
|
+
if stop_flag:
|
|
114
|
+
break
|
|
115
|
+
else:
|
|
116
|
+
interval = 120
|
|
117
|
+
stop_flag = True
|
|
118
|
+
else:
|
|
119
|
+
stop_flag = False
|
|
120
|
+
for s in batch:
|
|
121
|
+
results[s.id] = s
|
|
122
|
+
end_date = start_date
|
|
123
|
+
return results.values()
|